gt
stringclasses 1
value | context
stringlengths 2.49k
119k
|
---|---|
"""HPCBench campaign helper functions
"""
import collections
from contextlib import contextmanager
import filecmp
import functools
import json
import logging
import operator
import os
import os.path as osp
import re
import shutil
import socket
import uuid
from cached_property import cached_property
from ClusterShell.NodeSet import NodeSet
import six
import yaml
import hpcbench
from hpcbench.api import Benchmark
from hpcbench.report import render
from .toolbox.collections_ext import Configuration, dict_map_kv, freeze, nameddict
from .toolbox.env import expandvars
from .toolbox.functools_ext import listify
from .toolbox.slurm import SlurmCluster
def pip_installer_url(version=None):
"""Get argument to give to ``pip`` to install HPCBench.
"""
version = version or hpcbench.__version__
version = str(version)
if '.dev' in version:
git_rev = 'master'
if 'TRAVIS_BRANCH' in os.environ:
git_rev = version.split('+', 1)[-1]
if '.' in git_rev: # get rid of date suffix
git_rev = git_rev.split('.', 1)[0]
git_rev = git_rev[1:] # get rid of scm letter
return 'git+{project_url}@{git_rev}#egg=hpcbench'.format(
project_url='http://github.com/BlueBrain/hpcbench',
git_rev=git_rev or 'master',
)
return 'hpcbench=={}'.format(version)
LOGGER = logging.getLogger('hpcbench')
JSON_METRICS_FILE = 'metrics.json'
SBATCH_JINJA_TEMPLATE = 'sbatch.jinja'
YAML_CAMPAIGN_FILE = 'campaign.yaml'
YAML_EXPANDED_CAMPAIGN_FILE = 'campaign.expanded.yaml'
YAML_REPORT_FILE = 'hpcbench.yaml'
DEFAULT_CAMPAIGN = dict(
output_dir="hpcbench-%Y%m%d-%H%M%S",
network=dict(
nodes=[socket.gethostname()],
tags=dict(),
ssh_config_file=None,
remote_work_dir='.hpcbench',
installer_template='ssh-installer.sh.jinja',
installer_prelude_file=None,
max_concurrent_runs=4,
pip_installer_url=pip_installer_url(),
slurm_blacklist_states=[
'down',
'down*',
'drain',
'drained',
'draining',
'error',
'fail',
'failing',
'future',
],
),
process=dict(
type='local',
config=dict(),
executor_template='executor.sh.jinja',
sbatch_template=SBATCH_JINJA_TEMPLATE,
),
tag=dict(),
benchmarks={'*': {}},
export=dict(
elasticsearch=dict(connection_params=dict(), index_name='hpcbench-{date}')
),
precondition=dict(),
)
class Generator(object):
"""Generate default campaign file"""
DEFAULT_TEMPLATE = 'hpcbench.yaml.jinja'
def __init__(self, template=None):
"""Jinja template to use (in hpcbench/templates/ directory)
"""
self.template = template or Generator.DEFAULT_TEMPLATE
def write(self, file):
"""Write YAML campaign template to the given open file
"""
render(
self.template,
file,
benchmarks=self.benchmarks,
hostname=socket.gethostname(),
)
@property
def benchmarks(self):
# instantiate all benchmarks
benches = [b() for b in Benchmark.__subclasses__()]
# filter benchmark whose API says they should be included
# in the template
benches = [b for b in benches if b.in_campaign_template]
# sort by name
benches = sorted(benches, key=lambda b: b.name)
# return payload for Jinja template
return [
dict(
name=b.name,
description=Generator._description(b),
attributes={
attr: dict(
doc=Generator._format_attrdoc(b.__class__, attr),
value=Generator._format_attrvalue(b.attributes[attr]),
)
for attr in b.attributes
},
)
for b in benches
]
@classmethod
def _format_attrdoc(cls, clazz, attr):
doc = getattr(clazz, attr).__doc__ or ''
doc = doc.strip()
doc = '# ' + doc
return doc.replace('\n ', '\n # ').strip()
@classmethod
def _format_attrvalue(cls, value):
if isinstance(value, set):
value = list(value)
if isinstance(value, list):
return yaml.dump(value, default_flow_style=True).rstrip()
return value
@classmethod
def _description(cls, benchmark):
desc = benchmark.__class__.__doc__
if desc is None:
msg = 'Missing %s benchmark class docstring' % benchmark.__class__
raise Exception(msg)
desc = desc.split('\n', 1)[0].strip()
desc = '# ' + desc
return desc.replace('\n ', '\n # ').strip()
def from_file(campaign_file, **kwargs):
"""Load campaign from YAML file
:return: memory representation of the YAML file
:rtype: dictionary
"""
realpath = osp.realpath(campaign_file)
if osp.isdir(realpath):
campaign_file = osp.join(campaign_file, YAML_CAMPAIGN_FILE)
campaign = Configuration.from_file(campaign_file)
return default_campaign(campaign, **kwargs)
def default_campaign(
campaign=None, expandcampvars=True, exclude_nodes=None, frozen=True
):
"""Fill an existing campaign with default
values for optional keys
:param campaign: dictionary
:type campaign: str
:param exclude_nodes: node set to exclude from allocations
:type exclude_nodes: str
:param expandcampvars: should env variables be expanded? True by default
:type expandcampvars: bool
:param frozen: whether the returned data-structure is immutable or not
:type frozen: bool
:return: object provided in parameter
:rtype: dictionary
"""
campaign = campaign or nameddict()
def _merger(_camp, _deft):
for key in _deft.keys():
if (
key in _camp
and isinstance(_camp[key], dict)
and isinstance(_deft[key], collections.Mapping)
):
_merger(_camp[key], _deft[key])
elif key not in _camp:
_camp[key] = _deft[key]
_merger(campaign, DEFAULT_CAMPAIGN)
campaign.setdefault('campaign_id', str(uuid.uuid4()))
for precondition in campaign.precondition.keys():
config = campaign.precondition[precondition]
if not isinstance(config, list):
campaign.precondition[precondition] = [config]
def _expandvars(value):
if isinstance(value, six.string_types):
return expandvars(value)
return value
if expandcampvars:
campaign = nameddict(dict_map_kv(campaign, _expandvars))
else:
campaign = nameddict(campaign)
if expandcampvars:
if campaign.network.get('tags') is None:
campaign.network['tags'] = {}
NetworkConfig(campaign).expand()
return freeze(campaign) if frozen else campaign
class NetworkConfig(object):
"""Wrapper around network configuration
"""
def __init__(self, campaign, exclude_nodes=None):
self.campaign = campaign
self._exclude_nodes = NodeSet(exclude_nodes)
@property
def exclude_nodes(self):
return self._exclude_nodes
@property
def network(self):
"""Get network section of the campaign
"""
return self.campaign.network
@property
def slurm(self):
return self.campaign.network.get('cluster') == 'slurm'
def expand(self):
"""Perform node expansion of network section.
"""
if self.slurm:
self._introspect_slurm_cluster()
self.network.nodes = self._expand_nodes(self.network.nodes)
self._expand_tags()
@cached_property
def blacklist_states(self):
states = set(self.network.slurm_blacklist_states)
if self.campaign.process.type == 'slurm':
if 'reservation' in self.campaign.process.get('sbatch') or dict():
states.discard('reserved')
return states
@cached_property
def _reserved_nodes(self):
if self.campaign.process.type == 'slurm':
if 'reservation' in self.campaign.process.get('sbatch') or {}:
rsv_name = self.campaign.process.sbatch.reservation
try:
rsv = SlurmCluster.reservation(rsv_name)
except KeyError:
return None
finally:
return rsv.nodes
return None
def _filter_node(self, node):
if node.state in self.blacklist_states:
return True
if self._reserved_nodes is not None:
return str(node) not in self._reserved_nodes
def _introspect_slurm_cluster(self):
cluster = SlurmCluster()
node_names = set()
tags = dict()
for node in cluster.nodes:
if self._filter_node(node):
continue
node_names.add(str(node))
for feature in node.active_features:
tag_name = node.partition + '_' + feature
tags.setdefault(tag_name, []).append(str(node))
tags.setdefault(feature, []).append(str(node))
for tag in tags:
tags[tag] = dict(nodes=tags[tag])
self.network.nodes = list(node_names)
LOGGER.info('Found nodes: %s', NodeSet.fromlist(self.network.nodes))
LOGGER.info('Found tags:')
for tag in iter(sorted(tags)):
LOGGER.info("{: >25} {}".format(tag, NodeSet.fromlist(tags[tag]['nodes'])))
prev_tags = self.network.tags
self.network.tags = tags
self.network.tags.update(prev_tags)
def _expand_nodes(self, nodes):
if isinstance(nodes, six.string_types):
nodes = [nodes]
if not isinstance(nodes, list):
raise Exception('Invalid "nodes" value type.' ' list expected')
eax = NodeSet()
for node in nodes:
eax.update(node)
eax -= self.exclude_nodes
return list(eax)
def _expand_tag_pattern(self, tag, pattern):
if len(pattern) > 1:
msg = "Tag '{tag}' is based on more than one criterion: {types}"
raise Exception(msg.format(tag=tag, types=', '.join(pattern)))
for mode in list(pattern):
if mode == 'match':
pattern[mode] = re.compile(pattern[mode])
elif mode == 'nodes':
pattern[mode] = self._expand_nodes(pattern[mode])
elif mode == 'constraint':
value = pattern[mode]
if not isinstance(value, six.string_types):
msg = "Constraint tag '{tag}' "
msg += "may be a string, not: {value}"
msg = msg.format(tag=tag, value=repr(value))
raise Exception(msg)
elif mode == 'tags':
pass # don't fail but ignore tags
else:
raise Exception('Unknown tag association pattern: %s', mode)
@classmethod
def _is_leaf(cls, config):
# returns True if in none of the modes and patterns is 'tags'
return all(['tags' not in pat.keys() for pat in config])
def _resolve(self, tag, config, expanded, recursive, visited):
for pattern in config[:]:
# we work with a copy so we can modify the original
# first expand all the other modes
self._expand_tag_pattern(tag, pattern)
# now let's go through that tags if they exist in this pattern
if 'tags' in list(pattern):
tags = pattern['tags']
if isinstance(tags, six.string_types):
tags = [tags]
for rectag in tags:
if rectag in expanded:
config += expanded[rectag]
elif rectag in visited:
raise Exception(
'found circular dependency ' + 'between %s and %s',
tag,
rectag,
)
elif rectag in recursive:
recconfig = recursive.pop(rectag)
visited.add(rectag)
self._resolve(rectag, recconfig, expanded, recursive, visited)
else: # rectag is nowhere to be found
message = '"%s" refers to "%s", which is not defined.'
message = message % (tag, rectag)
raise Exception(message)
pattern.pop('tags') # we've expanded this, it can be deleted
config = [c for c in config if any(c)]
if len(config) >= 2:
for rectag in config:
if 'constraint' in rectag:
message = "Tag '%s': cannot combine constraint tags"
message = message % tag
raise Exception(message)
expanded[tag] = config
def _expand_tags(self):
expanded = {}
recursive = {}
for tag, config in self.network.tags.items():
if isinstance(config, dict):
config = [config]
if NetworkConfig._is_leaf(config):
for pattern in config:
self._expand_tag_pattern(tag, pattern)
expanded[tag] = config
else:
recursive[tag] = config
# we finished all the leafs (tags without any recursive tag references)
visited = set(expanded)
while recursive:
tag, config = recursive.popitem()
visited.add(tag)
self._resolve(tag, config, expanded, recursive, visited)
self.network.tags = expanded
@listify(wrapper=set)
def get_benchmark_types(campaign):
"""Get of benchmarks referenced in the configuration
:return: benchmarks
:rtype: string generator
"""
for benchmarks in campaign.benchmarks.values():
for name, benchmark in benchmarks.items():
if name != 'sbatch': # exclude special sbatch name
yield benchmark.type
def get_metrics(campaign, report, top=True):
"""Extract metrics from existing campaign
:param campaign: campaign loaded with `hpcbench.campaign.from_file`
:param report: instance of `hpcbench.campaign.ReportNode`
:param top: this function is recursive. This parameter
help distinguishing top-call.
"""
if top and campaign.process.type == 'slurm':
for path, _ in report.collect('jobid', with_path=True):
for child in ReportNode(path).children.values():
for metrics in get_metrics(campaign, child, top=False):
yield metrics
else:
def metrics_node_extract(report):
metrics_file = osp.join(report.path, JSON_METRICS_FILE)
if osp.exists(metrics_file):
with open(metrics_file) as istr:
return json.load(istr)
def metrics_iterator(report):
return filter(
lambda eax: eax[1] is not None,
report.map(metrics_node_extract, with_path=True),
)
for path, metrics in metrics_iterator(report):
yield report.path_context(path), metrics
class CampaignMerge(object):
"""Merge 2 campaign directories
"""
def __init__(self, lhs, rhs):
"""Merge 2 campaign directories
:param lhs: path to campaign that will receive data
of the second campaign
:param rhs: campaign to merge data from
"""
self.lhs = lhs
self.rhs = rhs
self.serializers = dict(
json=CampaignMerge.SERIALIZER_CLASS(
reader=CampaignMerge._reader_json, writer=CampaignMerge._writer_json
),
yaml=CampaignMerge.SERIALIZER_CLASS(
reader=CampaignMerge._reader_yaml, writer=CampaignMerge._writer_yaml
),
)
def merge(self):
"""Perform merge operation between 2 campaign directories
"""
self.ensure_has_same_campaigns()
self._merge()
@staticmethod
def _reader_json(path):
with open(path) as istr:
return json.load(istr)
@staticmethod
def _reader_yaml(path):
with open(path) as istr:
return yaml.safe_load(istr)
@staticmethod
def _writer_json(data, path):
with open(path, 'w') as ostr:
json.dump(data, ostr, indent=2)
@staticmethod
def _writer_yaml(data, path):
with open(path, 'w') as ostr:
yaml.dump(data, ostr, default_flow_style=False)
DATA_FILE_EXTENSIONS = {'yaml', 'json'}
IGNORED_FILES = 'campaign.yaml'
SERIALIZER_CLASS = collections.namedtuple('serializer', ['reader', 'writer'])
def _merge_data_file(self, path, extension):
def _merge(lhs, rhs):
if isinstance(rhs, list):
lhs += rhs
return
for key in rhs.keys():
if key in lhs:
if isinstance(lhs[key], dict) and isinstance(
rhs[key], collections.Mapping
):
_merge(lhs[key], rhs[key])
elif isinstance(lhs[key], list) and isinstance(rhs[key], list):
lhs[key] += rhs[key]
elif key == 'elapsed':
lhs[key] += rhs[key]
elif key not in lhs:
lhs[key] = rhs[key]
lhs_file = osp.join(self.lhs, path)
rhs_file = osp.join(self.rhs, path)
assert osp.isfile(rhs_file)
assert osp.isfile(lhs_file)
lhs_data = self.serializers[extension].reader(lhs_file)
rhs_data = self.serializers[extension].reader(rhs_file)
_merge(lhs_data, rhs_data)
self.serializers[extension].writer(lhs_data, lhs_file)
def ensure_has_same_campaigns(self):
"""Ensure that the 2 campaigns to merge have been generated
from the same campaign.yaml
"""
lhs_yaml = osp.join(self.lhs, 'campaign.yaml')
rhs_yaml = osp.join(self.rhs, 'campaign.yaml')
assert osp.isfile(lhs_yaml)
assert osp.isfile(rhs_yaml)
assert filecmp.cmp(lhs_yaml, rhs_yaml)
def _merge(self):
for filename in os.listdir(self.rhs):
file_path = osp.join(self.rhs, filename)
if osp.isdir(file_path):
dest_path = osp.join(self.lhs, filename)
if not osp.isdir(dest_path):
shutil.copytree(file_path, dest_path)
else:
with self._push(filename):
self._merge()
else:
if CampaignMerge.IGNORED_FILES in file_path:
continue
extension = osp.splitext(filename)[1][1:]
if extension in CampaignMerge.DATA_FILE_EXTENSIONS:
self._merge_data_file(filename, extension)
@contextmanager
def _push(self, subdir):
lhs = self.lhs
rhs = self.rhs
try:
self.lhs = osp.join(self.lhs, subdir)
self.rhs = osp.join(self.rhs, subdir)
yield
finally:
self.lhs = lhs
self.rhs = rhs
def merge_campaigns(output_campaign, *campaigns):
"""Merge campaign directories
:param output_campaign: existing campaign directory
where data from others campaigns will be merged into
:param campaigns: existing campaigns to merge from
"""
for campaign in campaigns:
CampaignMerge(output_campaign, campaign).merge()
class ReportNode(collections.Mapping):
"""Navigate across hpcbench.yaml files of a campaign
"""
CONTEXT_ATTRS = ['node', 'tag', 'benchmark', 'category', 'attempt']
def __init__(self, path):
"""
:param path: path to an existing campaign directory
:type path: str
"""
self._path = path
@property
def path(self):
"""get path given in constructor
:rtype: str
"""
return self._path
@listify(wrapper=nameddict)
def path_context(self, path):
"""Build of dictionary of fields extracted from
the given path"""
prefix = os.path.commonprefix([path, self._path])
relative_path = path[len(prefix) :]
relative_path = relative_path.strip(os.sep)
attrs = self.CONTEXT_ATTRS
for i, elt in enumerate(relative_path.split(os.sep)):
yield attrs[i], elt
yield 'path', path
@property
def report(self):
"""get path to the hpcbench.yaml report
:rtype: str
"""
return osp.join(self._path, YAML_REPORT_FILE)
@cached_property
def data(self):
"""get content of hpcbench.yaml
:rtype: dict
"""
with open(self.report) as istr:
return yaml.safe_load(istr)
@cached_property
@listify(wrapper=dict)
def children(self):
"""get children node referenced as `children` in the
report.
:rtype: dict with name (str) -> node (ReportNode)
"""
for child in self.data.get('children', []):
if osp.exists(osp.join(self.path, child, YAML_REPORT_FILE)):
yield child, self.__class__(osp.join(self.path, child))
def map(self, func, **kwargs):
"""Generator function returning result of
`func(self)`
:param func: callable object
:keyword recursive: if True, then apply map to every children nodes
:keyword with_path: whether the yield values is a tuple
of 2 elements containing report-path and `func(self)` result or
simply `func(self)` result.
:rtype: generator
"""
if kwargs.get('with_path', False):
yield self.path, func(self)
if kwargs.get('recursive', True):
for child in self.children.values():
for value in child.map(func, **kwargs):
yield value
def collect(self, *keys, **kwargs):
"""Generator function traversing
tree structure to collect values of a specified key.
:param keys: the keys to look for in the report
:type key: str
:keyword recursive: look for key in children nodes
:type recursive: bool
:keyword with_path: whether the yield values is a tuple
of 2 elements containing report-path and the value
or simply the value.
:type with_path: bool
:rtype: generator providing either values or
tuples of 2 elements containing report path and value
depending on with_path parameter
"""
if not keys:
raise Exception('Missing key')
has_values = functools.reduce(
operator.__and__, [key in self.data for key in keys], True
)
if has_values:
values = tuple([self.data[key] for key in keys])
if len(values) == 1:
values = values[0]
if kwargs.get('with_path', False):
yield self.path, values
else:
yield values
if kwargs.get('recursive', True):
for child in self.children.values():
for value in child.collect(*keys, **kwargs):
yield value
def collect_one(self, *args, **kwargs):
"""Same as `collect` but expects to have only one result.
:return: the only result directly, not the generator like `collect`.
"""
generator = self.collect(*args, **kwargs)
try:
value = next(generator)
except StopIteration:
raise Exception("Expected exactly one value don't have any")
try:
next(generator)
except StopIteration:
return value
raise Exception('Expected exactly one value but have more')
def __getitem__(self, item):
return self.data[item]
def __len__(self):
return len(self.data)
def __iter__(self):
return iter(self.data)
|
|
import datetime
from cStringIO import StringIO
from django.core import mail
from django.conf import settings
from django.utils import timezone
from django.test.utils import override_settings
from funfactory.urlresolvers import reverse
import mock
from nose.tools import eq_, ok_
from airmozilla.base.tests.testbase import DjangoTestCase
from airmozilla.manage.archiver import archive
from airmozilla.main.models import Event, Template, VidlySubmission
SAMPLE_XML = (
'<?xml version="1.0"?>'
'<Response><Message>Action successful.</Message>'
'<MessageCode>4.1</MessageCode><Success><Task><UserID>1234</UserID>'
'<MediaShortLink>abc123</MediaShortLink>'
'<SourceFile>http://videos.mozilla.org/bla.f4v</SourceFile>'
'<BatchID>35402</BatchID>'
'<Status>Finished</Status>'
'<Private>false</Private>'
'<PrivateCDN>false</PrivateCDN><Created>2012-08-23 19:30:58</Created>'
'<Updated>2012-08-23 20:44:22</Updated>'
'<UserEmail>[email protected]</UserEmail>'
'</Task></Success></Response>'
)
SAMPLE_MEDIALIST_XML = (
'<?xml version="1.0"?>'
'<Response><Message>OK</Message><MessageCode>7.4</MessageCode><Success>'
'<Media><MediaShortLink>abc123</MediaShortLink><VanityLink/>'
'<Notify>[email protected]</Notify><Created>2011-12-25 18:45:56</Created>'
'<Updated>2012-11-28 14:05:07</Updated><Status>Finished</Status>'
'<IsDeleted>false</IsDeleted><IsPrivate>false</IsPrivate>'
'<IsPrivateCDN>false</IsPrivateCDN><CDN>AWS</CDN></Media>'
'<Media><MediaShortLink>xyz987</MediaShortLink><VanityLink/>'
'<Notify>[email protected]</Notify><Created>2011-12-25 19:41:05</Created>'
'<Updated>2012-11-28 14:04:57</Updated><Status>Error</Status>'
'<IsDeleted>false</IsDeleted><IsPrivate>false</IsPrivate>'
'<IsPrivateCDN>false</IsPrivateCDN><CDN>AWS</CDN></Media>'
'</Success></Response>'
)
class ArchiverTestCase(DjangoTestCase):
def _age_event_created(self, event, save=True):
extra_seconds = settings.PESTER_INTERVAL_DAYS * 24 * 60 * 60 + 1
now = timezone.now()
event.created = now - datetime.timedelta(seconds=extra_seconds)
save and event.save()
@mock.patch('airmozilla.manage.archiver.logging')
def test_a_bad_event_parameter_1(self, mocked_logging):
event = Event.objects.get(title='Test event')
archive(event)
mocked_logging.warn.assert_called_with(
'Event %r not a Vid.ly event', 'Test event'
)
@mock.patch('airmozilla.manage.archiver.logging')
def test_a_bad_event_parameter_2(self, mocked_logging):
event = Event.objects.get(title='Test event')
vidly_template = Template.objects.create(name='Vid.ly Test')
event.template = vidly_template
event.save()
archive(event)
mocked_logging.warn.assert_called_with(
'Event %r does not have a Vid.ly tag', u'Test event'
)
@override_settings(ADMINS=(('F', '[email protected]'), ('B', '[email protected]')))
@mock.patch('urllib2.urlopen')
def test_still_not_found(self, p_urlopen):
def mocked_urlopen(request):
return StringIO(SAMPLE_MEDIALIST_XML.strip())
p_urlopen.side_effect = mocked_urlopen
event = Event.objects.get(title='Test event')
vidly_template = Template.objects.create(name='Vid.ly Test')
event.template = vidly_template
event.template_environment = {'tag': 'NOTKNOWN'}
event.save()
archive(event)
sent_email = mail.outbox[-1]
eq_(sent_email.to, [x[1] for x in settings.ADMINS])
ok_('NOTKNOWN' in sent_email.subject)
ok_(reverse('manage:event_edit', args=(event.pk,)) in sent_email.body)
@override_settings(ADMINS=(('F', '[email protected]'), ('B', '[email protected]')))
@mock.patch('urllib2.urlopen')
def test_errored(self, p_urlopen):
def mocked_urlopen(request):
xml = SAMPLE_XML.replace(
'<Status>Finished</Status>',
'<Status>Error</Status>',
)
return StringIO(xml.strip())
p_urlopen.side_effect = mocked_urlopen
event = Event.objects.get(title='Test event')
vidly_template = Template.objects.create(name='Vid.ly Test')
event.template = vidly_template
event.template_environment = {'tag': 'abc123'}
event.save()
archive(event)
sent_email = mail.outbox[-1]
eq_(sent_email.to, [x[1] for x in settings.ADMINS])
ok_('Unable to archive event' in sent_email.subject)
ok_('abc123' in sent_email.subject)
ok_(reverse('manage:event_edit', args=(event.pk,)) in sent_email.body)
@override_settings(ADMINS=(('F', '[email protected]'),))
@mock.patch('urllib2.urlopen')
def test_errored_updating_vidly_submission(self, p_urlopen):
def mocked_urlopen(request):
xml = SAMPLE_XML.replace(
'<Status>Finished</Status>',
'<Status>Error</Status>',
)
return StringIO(xml.strip())
p_urlopen.side_effect = mocked_urlopen
event = Event.objects.get(title='Test event')
vidly_template = Template.objects.create(name='Vid.ly Test')
event.template = vidly_template
event.template_environment = {'tag': 'abc123'}
event.save()
vidly_submission = VidlySubmission.objects.create(
event=event,
url='https://example.com',
tag='abc123'
)
archive(event)
vidly_submission = VidlySubmission.objects.get(id=vidly_submission.id)
ok_(vidly_submission.errored)
ok_(not vidly_submission.finished)
@mock.patch('urllib2.urlopen')
def test_processing(self, p_urlopen):
def mocked_urlopen(request):
xml = SAMPLE_XML.replace(
'<Status>Finished</Status>',
'<Status>Processing</Status>',
)
return StringIO(xml.strip())
p_urlopen.side_effect = mocked_urlopen
event = Event.objects.get(title='Test event')
vidly_template = Template.objects.create(name='Vid.ly Test')
event.template = vidly_template
event.template_environment = {'tag': 'abc123'}
event.save()
archive(event)
eq_(len(mail.outbox), 0)
@mock.patch('urllib2.urlopen')
def test_finished(self, p_urlopen):
def mocked_urlopen(request):
return StringIO(SAMPLE_XML.strip())
p_urlopen.side_effect = mocked_urlopen
event = Event.objects.get(title='Test event')
event.status = Event.STATUS_PENDING
event.archive_time = None
vidly_template = Template.objects.create(name='Vid.ly Test')
event.template = vidly_template
event.template_environment = {'tag': 'abc123'}
event.save()
archive(event)
eq_(len(mail.outbox), 0)
event = Event.objects.get(pk=event.pk)
now = timezone.now()
eq_(
event.archive_time.strftime('%Y%m%d %H%M'),
now.strftime('%Y%m%d %H%M'),
)
eq_(event.status, Event.STATUS_SCHEDULED)
@mock.patch('urllib2.urlopen')
def test_finished_updating_vidly_submission(self, p_urlopen):
def mocked_urlopen(request):
return StringIO(SAMPLE_XML.strip())
p_urlopen.side_effect = mocked_urlopen
event = Event.objects.get(title='Test event')
event.status = Event.STATUS_PENDING
event.archive_time = None
vidly_template = Template.objects.create(name='Vid.ly Test')
event.template = vidly_template
event.template_environment = {'tag': 'abc123'}
event.save()
vidly_submission = VidlySubmission.objects.create(
event=event,
url='https://example.com',
tag='abc123'
)
archive(event)
vidly_submission = VidlySubmission.objects.get(id=vidly_submission.id)
ok_(not vidly_submission.errored)
ok_(vidly_submission.finished)
|
|
#!/usr/bin/env python
#
# Copyright (c) 2013, ReMake Electric ehf
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# This file implements the "malaria publish" command
"""
Publish a stream of messages and capture statistics on their timing.
"""
import argparse
import multiprocessing
import os
import random
import socket
import time
import beem.load
import beem.bridge
import beem.msgs
def my_custom_msg_generator(sequence_length):
"""
An example of a custom msg generator.
You must return a tuple of sequence number, topic and payload
on each iteration.
"""
seq = 0
while seq < sequence_length:
yield (seq, "magic_topic", "very boring payload")
seq += 1
def _worker(options, proc_num, auth=None):
"""
Wrapper to run a test and push the results back onto a queue.
Modify this to provide custom message generation routines.
"""
# Make a new clientid with our worker process number
cid = "%s-%d" % (options.clientid, proc_num)
if options.bridge:
ts = beem.bridge.BridgingSender(options.host, options.port, cid, auth)
# This is _probably_ what you want if you are specifying a key file
# This would correspond with using ids as clientids, and acls
if auth:
cid = auth.split(":")[0]
else:
# FIXME - add auth support here too dummy!
ts = beem.load.TrackingSender(options.host, options.port, cid)
# Provide a custom generator
#msg_gen = my_custom_msg_generator(options.msg_count)
msg_gen = beem.msgs.createGenerator(cid, options)
# This helps introduce jitter so you don't have many threads all in sync
time.sleep(random.uniform(1, 10))
ts.run(msg_gen, qos=options.qos)
return ts.stats()
def _worker_threaded(options, proc_num, auth=None):
ts = beem.bridge.ThreadedBridgingSender(options, proc_num, auth)
ts.run()
return ts.stats
def add_args(subparsers):
parser = subparsers.add_parser(
"publish",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
description=__doc__,
help="Publish a stream of messages")
parser.add_argument(
"-c", "--clientid",
default="beem.loadr-%s-%d" % (socket.gethostname(), os.getpid()),
help="""Set the client id of the publisher, can be useful for acls.
Default includes host and pid information, unless a keyfile was
specified, in which case the "user/identity" part is used as the
client id. The clientid is also used in the default topics.
""")
parser.add_argument(
"-H", "--host", default="localhost",
help="MQTT host to connect to")
parser.add_argument(
"-p", "--port", type=int, default=1883,
help="Port for remote MQTT host")
parser.add_argument(
"-q", "--qos", type=int, choices=[0, 1, 2],
help="set the mqtt qos for messages published", default=1)
parser.add_argument(
"-n", "--msg_count", type=int, default=10,
help="How many messages to send")
parser.add_argument(
"-s", "--msg_size", type=int, default=100,
help="Size of messages to send. This will be gaussian at (x, x/20)")
parser.add_argument(
"-t", "--timing", action="store_true",
help="""Message bodies will contain timing information instead of
random hex characters. This can be combined with --msg-size option""")
parser.add_argument(
"-T", "--msgs_per_second", type=float, default=0,
help="""Each publisher should target sending this many msgs per second,
useful for simulating real devices.""")
parser.add_argument(
"--jitter", type=float, default=0.1,
help="""Percentage jitter to use when rate limiting via --msgs_per_sec,
Can/may help avoid processes sawtoothing and becoming synchronized""")
parser.add_argument(
"-P", "--processes", type=int, default=1,
help="How many separate processes to spin up (multiprocessing)")
parser.add_argument(
"--thread_ratio", type=int, default=1,
help="Threads per process (bridged multiprocessing) WARNING! VERY ALPHA!")
parser.add_argument(
"-b", "--bridge", action="store_true",
help="""Instead of connecting directly to the target, fire up a
separate mosquitto instance configured to bridge to the target""")
# See http://stackoverflow.com/questions/4114996/python-argparse-nargs-or-depending-on-prior-argument
# we shouldn't allow psk-file without bridging, as python doesn't let us use psk
parser.add_argument(
"--psk_file", type=argparse.FileType("r"),
help="""A file of psk 'identity:key' pairs, as you would pass to
mosquitto's psk_file configuration option. Each process will use a single
line from the file. Only as many processes will be made as there are keys""")
parser.add_argument(
"--json", type=str, default=None,
help="""Dump the collected stats into the given JSON file.""")
parser.set_defaults(handler=run)
def run(options):
time_start = time.time()
# This should be pretty easy to use for passwords as well as PSK....
if options.psk_file:
assert options.bridge, "PSK is only supported with bridging due to python limitations, sorry about that"
auth_pairs = options.psk_file.readlines()
# Can only fire up as many processes as we have keys!
# FIXME - not true with threading!!
assert (options.thread_ratio * options.processes) <= len(auth_pairs), "can't handle more threads*procs than keys!"
options.processes = min(options.processes, len(auth_pairs))
print("Using first %d keys from: %s"
% (options.processes, options.psk_file.name))
pool = multiprocessing.Pool(processes=options.processes)
if options.thread_ratio == 1:
auth_pairs = auth_pairs[:options.processes]
result_set = [pool.apply_async(_worker, (options, x, auth.strip())) for x, auth in enumerate(auth_pairs)]
else:
# need to slice auth_pairs up into thread_ratio sized chunks for each one.
result_set = []
for x in range(options.processes):
ll = options.thread_ratio
keyset = auth_pairs[x*ll:x*ll + options.thread_ratio]
print("process number: %d using keyset: %s" % (x, keyset))
result_set.append(pool.apply_async(_worker_threaded, (options, x, keyset)))
else:
pool = multiprocessing.Pool(processes=options.processes)
if options.thread_ratio == 1:
result_set = [pool.apply_async(_worker, (options, x)) for x in range(options.processes)]
else:
result_set = [pool.apply_async(_worker_threaded, (options, x)) for x in range(options.processes)]
completed_set = []
while len(completed_set) < options.processes:
hold_set = []
for result in result_set:
if result.ready():
completed_set.append(result)
else:
hold_set.append(result)
result_set = hold_set
print("Completed workers: %d/%d"
% (len(completed_set), options.processes))
if len(result_set) > 0:
time.sleep(1)
time_end = time.time()
stats_set = []
for result in completed_set:
s = result.get()
if options.thread_ratio == 1:
beem.print_publish_stats(s)
stats_set.append(s)
if options.thread_ratio == 1:
agg_stats = beem.aggregate_publish_stats(stats_set)
agg_stats["time_total"] = time_end - time_start
beem.print_publish_stats(agg_stats)
if options.json is not None:
beem.json_dump_stats(agg_stats, options.json)
else:
agg_stats_set = [beem.aggregate_publish_stats(x) for x in stats_set]
for x in agg_stats_set:
x["time_total"] = time_end - time_start
[beem.print_publish_stats(x) for x in agg_stats_set]
if options.json is not None:
beem.json_dump_stats(agg_stats_set, options.json)
|
|
# Copyright (c) 2018 PaddlePaddle Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import unittest
import numpy as np
from op_test import OpTest
import paddle.fluid.core as core
import paddle.fluid as fluid
from paddle.fluid import compiler, Program, program_guard
import paddle
import paddle.nn.functional as F
np.random.seed(10)
def stable_softmax(x):
"""Compute the softmax of vector x in a numerically stable way."""
# clip to shiftx, otherwise, when calc loss with
# log(exp(shiftx)), may get log(0)=INF
shiftx = (x - np.max(x)).clip(-64.)
exps = np.exp(shiftx)
return exps / np.sum(exps)
def ref_softmax(x, axis=None, dtype=None):
x_t = x.copy()
if dtype is not None:
x_t = x_t.astype(dtype)
if axis is None:
axis = -1
return np.apply_along_axis(stable_softmax, axis, x_t)
class TestSoftmaxOp(OpTest):
def get_x_shape(self):
return [10, 10]
def get_axis(self):
return -1
def setUp(self):
self.op_type = "softmax"
self.use_cudnn = False
self.use_mkldnn = False
# explicilty use float32 for ROCm, as MIOpen does not yet support float64
self.dtype = np.float32 if core.is_compiled_with_rocm() else np.float64
self.init_kernel_type()
self.shape = self.get_x_shape()
self.axis = self.get_axis()
np.random.seed(0)
x = np.random.uniform(0.1, 1, self.shape).astype(self.dtype)
out = np.apply_along_axis(stable_softmax, self.axis, x)
self.inputs = {'X': OpTest.np_dtype_to_fluid_dtype(x)}
self.outputs = {'Out': out}
self.attrs = {
'axis': self.axis,
'use_cudnn': self.use_cudnn,
'use_mkldnn': self.use_mkldnn
}
def init_kernel_type(self):
pass
def test_check_output(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.use_cudnn:
place = core.CUDAPlace(0)
self.check_output_with_place(
place, atol=1e-5, check_dygraph=(self.use_mkldnn == False))
else:
self.check_output(check_dygraph=(self.use_mkldnn == False))
def test_check_grad(self):
# TODO(wangzhongpu): support mkldnn op in dygraph mode
if self.use_cudnn or self.dtype == np.float16:
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_grad_with_place(
place, ["X"],
"Out",
max_relative_error=0.01,
check_dygraph=(self.use_mkldnn == False))
else:
self.check_grad(
["X"],
"Out",
max_relative_error=0.01,
check_dygraph=(self.use_mkldnn == False))
class TestSoftmaxOp2(TestSoftmaxOp):
def get_x_shape(self):
return [2, 3, 4, 5]
class TestSoftmaxOp3(TestSoftmaxOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 0
class TestSoftmaxOp4(TestSoftmaxOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 1
class TestSoftmaxOp5(TestSoftmaxOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 2
class TestSoftmaxOp6(TestSoftmaxOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 3
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp(TestSoftmaxOp):
def init_kernel_type(self):
self.use_cudnn = True
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp2(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp3(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 0
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp4(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 1
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp5(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 2
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp6(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
def get_axis(self):
return 3
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp7(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5, 6]
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp8(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5, 6]
def get_axis(self):
return 0
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp9(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5, 6]
def get_axis(self):
return 1
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp10(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5, 6]
def get_axis(self):
return 2
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp11(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5, 6]
def get_axis(self):
return 3
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxCUDNNOp12(TestSoftmaxCUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5, 6]
def get_axis(self):
return 4
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxFP16Op(TestSoftmaxOp):
def init_kernel_type(self):
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-3)
# FIXME: If the x_shape is [10, 10], gradient failed.
def test_check_grad(self):
pass
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxFP16Op2(TestSoftmaxFP16Op):
def get_x_shape(self):
return [2, 3, 4, 10]
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxFP16CUDNNOp(TestSoftmaxOp):
def init_kernel_type(self):
self.use_cudnn = True
self.dtype = np.float16
def test_check_output(self):
if core.is_compiled_with_cuda():
place = core.CUDAPlace(0)
if core.is_float16_supported(place):
self.check_output_with_place(place, atol=1e-3)
@unittest.skipIf(not core.is_compiled_with_cuda(),
"core is not compiled with CUDA")
class TestSoftmaxFP16CUDNNOp2(TestSoftmaxFP16CUDNNOp):
def get_x_shape(self):
return [2, 3, 4, 5]
class TestSoftmaxAPI(unittest.TestCase):
def setUp(self):
self.place = paddle.CUDAPlace(0) if core.is_compiled_with_cuda(
) else paddle.CPUPlace()
self.x_np = np.random.uniform(-1., 1., [2, 3, 4, 5]).astype('float32')
self.out_ref = np.apply_along_axis(stable_softmax, -1, self.x_np)
self.executed_api()
def executed_api(self):
self.softmax = F.softmax
def test_static_check(self):
with paddle.static.program_guard(paddle.static.Program()):
x = paddle.fluid.data('X', self.x_np.shape, 'float32')
out1 = self.softmax(x)
m = paddle.nn.Softmax()
out2 = m(x)
exe = paddle.static.Executor(self.place)
res = exe.run(feed={'X': self.x_np}, fetch_list=[out1, out2])
out_ref = ref_softmax(self.x_np, axis=-1, dtype=None)
for r in res:
self.assertEqual(np.allclose(out_ref, r), True)
def test_dygraph_check(self):
paddle.disable_static(self.place)
x = paddle.to_tensor(self.x_np)
out1 = self.softmax(x)
x = paddle.to_tensor(self.x_np)
m = paddle.nn.Softmax()
out2 = m(x)
out_ref = ref_softmax(self.x_np, axis=-1, dtype=None)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
out1 = self.softmax(x, axis=0)
x = paddle.to_tensor(self.x_np)
m = paddle.nn.Softmax(axis=0)
out2 = m(x)
out_ref = ref_softmax(self.x_np, axis=0, dtype=None)
for r in [out1, out2]:
self.assertEqual(np.allclose(out_ref, r.numpy()), True)
# explicilty use float32 for ROCm, as MIOpen does not yet support float64
if core.is_compiled_with_rocm():
out = self.softmax(x, dtype=np.float32)
out_ref = ref_softmax(self.x_np, axis=-1, dtype=np.float32)
else:
out = self.softmax(x, dtype=np.float64)
out_ref = ref_softmax(self.x_np, axis=-1, dtype=np.float64)
self.assertEqual(np.allclose(out_ref, out.numpy()), True)
paddle.enable_static()
def test_error(self):
with paddle.static.program_guard(paddle.static.Program()):
# The input type must be Variable.
self.assertRaises(TypeError, self.softmax, 1)
# The input dtype must be float16, float32, float64.
x_int32 = paddle.fluid.data(
name='x_int32', shape=[2, 3], dtype='int32')
self.assertRaises(TypeError, self.softmax, x_int32)
# support the input dtype is float16
x_fp16 = paddle.fluid.data(
name='x_fp16', shape=[2, 3], dtype='float16')
self.softmax(x_fp16)
class TestSoftmaxInplaceAPI(TestSoftmaxAPI):
def executed_api(self):
self.softmax = F.softmax_
if __name__ == "__main__":
unittest.main()
|
|
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import contextlib
import mock
import six
from heat.common import exception as exc
from heat.common import template_format
from heat.engine import stack
from heat.engine import template
from heat.tests import common
from heat.tests import utils
class SoftwareComponentTest(common.HeatTestCase):
def setUp(self):
super(SoftwareComponentTest, self).setUp()
self.ctx = utils.dummy_context()
tpl = '''
heat_template_version: 2013-05-23
resources:
mysql_component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo "Create MySQL"
tool: script
- actions: [UPDATE]
config: |
#!/bin/bash
echo "Update MySQL"
tool: script
inputs:
- name: mysql_port
outputs:
- name: root_password
'''
self.template = template_format.parse(tpl)
self.stack = stack.Stack(
self.ctx, 'software_component_test_stack',
template.Template(self.template))
self.component = self.stack['mysql_component']
self.rpc_client = mock.MagicMock()
self.component._rpc_client = self.rpc_client
@contextlib.contextmanager
def exc_filter(*args):
try:
yield
except exc.NotFound:
pass
self.rpc_client.ignore_error_by_name.side_effect = exc_filter
def test_handle_create(self):
config_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
value = {'id': config_id}
self.rpc_client.create_software_config.return_value = value
props = dict(self.component.properties)
self.component.handle_create()
self.rpc_client.create_software_config.assert_called_with(
self.ctx,
group='component',
name=None,
inputs=props['inputs'],
outputs=props['outputs'],
config={'configs': props['configs']},
options=None)
self.assertEqual(config_id, self.component.resource_id)
def test_handle_delete(self):
self.resource_id = None
self.assertIsNone(self.component.handle_delete())
config_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
self.component.resource_id = config_id
self.rpc_client.delete_software_config.return_value = None
self.assertIsNone(self.component.handle_delete())
self.rpc_client.delete_software_config.side_effect = exc.NotFound
self.assertIsNone(self.component.handle_delete())
def test_resolve_attribute(self):
self.assertIsNone(self.component._resolve_attribute('others'))
self.component.resource_id = None
self.assertIsNone(self.component._resolve_attribute('configs'))
self.component.resource_id = 'c8a19429-7fde-47ea-a42f-40045488226c'
configs = self.template['resources']['mysql_component'
]['properties']['configs']
# configs list is stored in 'config' property of SoftwareConfig
value = {'config': {'configs': configs}}
self.rpc_client.show_software_config.return_value = value
self.assertEqual(configs, self.component._resolve_attribute('configs'))
self.rpc_client.show_software_config.side_effect = exc.NotFound
self.assertIsNone(self.component._resolve_attribute('configs'))
class SoftwareComponentValidationTest(common.HeatTestCase):
scenarios = [
(
'component_full',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo CREATE $foo
tool: script
inputs:
- name: foo
outputs:
- name: bar
options:
opt1: blah
''',
err=None,
err_msg=None)
),
(
'no_input_output_options',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo CREATE $foo
tool: script
''',
err=None,
err_msg=None)
),
(
'wrong_property_config',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
config: #!/bin/bash
configs:
- actions: [CREATE]
config: |
#!/bin/bash
echo CREATE $foo
tool: script
''',
err=exc.StackValidationFailed,
err_msg='Unknown Property config')
),
(
'missing_configs',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
inputs:
- name: foo
''',
err=exc.StackValidationFailed,
err_msg='Property configs not assigned')
),
(
'empty_configs',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
''',
err=exc.StackValidationFailed,
err_msg='resources.component.properties.configs: '
'length (0) is out of range (min: 1, max: None)')
),
(
'invalid_configs',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
actions: [CREATE]
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='is not a list')
),
(
'config_empty_actions',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: []
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='component.properties.configs[0].actions: '
'length (0) is out of range (min: 1, max: None)')
),
(
'multiple_configs_per_action_single',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE]
config: #!/bin/bash
tool: script
- actions: [CREATE]
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='Defining more than one configuration for the same '
'action in SoftwareComponent "component" is not '
'allowed.')
),
(
'multiple_configs_per_action_overlapping_list',
dict(snippet='''
component:
type: OS::Heat::SoftwareComponent
properties:
configs:
- actions: [CREATE, UPDATE, RESUME]
config: #!/bin/bash
tool: script
- actions: [UPDATE]
config: #!/bin/bash
tool: script
''',
err=exc.StackValidationFailed,
err_msg='Defining more than one configuration for the same '
'action in SoftwareComponent "component" is not '
'allowed.')
),
]
def setUp(self):
super(SoftwareComponentValidationTest, self).setUp()
self.ctx = utils.dummy_context()
tpl = '''
heat_template_version: 2013-05-23
resources:
%s
''' % self.snippet
self.template = template_format.parse(tpl)
self.stack = stack.Stack(
self.ctx, 'software_component_test_stack',
template.Template(self.template))
self.component = self.stack['component']
self.component._rpc_client = mock.MagicMock()
def test_properties_schema(self):
if self.err:
err = self.assertRaises(self.err, self.stack.validate)
if self.err_msg:
self.assertIn(self.err_msg, six.text_type(err))
else:
self.assertIsNone(self.stack.validate())
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright 2015 Nikola Kovacevic <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import glob
import os
import sys
from logbook import FileHandler, Logger, CRITICAL
import time
from browsers import Chrome, Chromium, Firefox, Safari, Canary
import json
from appdirs import AppDirs
import arrow
log = Logger("CLI")
class CommandLine(object):
def __init__(self):
self.backups = {}
self.chrome = Chrome()
self.firefox = Firefox()
self.safari = Safari()
self.chromium = Chromium()
self.canary = Canary()
self.all_browsers = {'chrome' : self.chrome,
'firefox' : self.firefox,
'safari' : self.safari,
'chromium': self.chromium,
'canary' : self.canary}
self.dirs = AppDirs("RESTool", "nikolak")
default_config = {
"sys_dir_bak" : False,
"bak_format" : "%Y-%m-%d",
"bak_folder" : "res_backups",
"portable_config" : True,
"auto_update_check": False
}
self.config = None
if os.path.exists('settings.json'):
with open('settings.json') as settings:
self.config = json.load(settings)
else:
self.config = default_config
if self.config['sys_dir_bak']:
self.backup_folder = self.dirs.user_data_dir
elif self.config['bak_folder']:
self.backup_folder = self.config['bak_folder']
else:
log.debug("No custom backup folder set even though settings.json exists")
log.debug("Defaulting to res_backups")
self.backup_folder = "res_backups"
self.date_format = self.config.get('bak_format', "%Y-%m-%d")
log.info('Backup/restore folder set to {}'.format(self.backup_folder))
def list_browsers(self):
print "Browsers and profiles that contain RES:"
print '-' * 40
for browser in self.all_browsers.values():
if browser.res_exists:
if hasattr(browser, 'available_profiles'):
print "'{}' Profiles:".format(browser.name.title())
for profile_name in browser.available_profiles:
browser.change_profile(profile_name)
if browser.res_exists:
print "\t Profile: '{}' [RES FOUND]".format(
profile_name)
else:
print "\t Profile: '{}' [RES NOT FOUND]".format(
profile_name)
else:
if browser.res_exists:
print browser.name.title() + " [RES FOUND] (Profiles not supported)"
else:
print browser.name.title() + " [RES NOT FOUND] (Profiles not supported)"
def _get_browser_instance(self, browser_name, profile_name=None):
if browser_name.lower() not in self.all_browsers.keys():
print "Browser not found, did you enter the name correctly?"
return None
browser = self.all_browsers[browser_name.lower()]
if profile_name:
if not hasattr(browser, 'available_profiles'):
print "{} does not support profiles".format(browser_name)
return None
elif profile_name not in browser.available_profiles:
print "{} is not in list of available profiles for {}".format(
profile_name, browser_name
)
return None
else:
log.debug('Setting profile for {} to {}'.format(browser_name,
profile_name))
browser.change_profile(profile_name)
else:
if hasattr(browser, 'available_profiles'):
print "Please specify profile name you want to backup."
print "Avialable profiles: {}".format(
','.join(browser.available_profiles.keys()))
return None
if not browser.res_exists:
print "RES was not found in the specified browser and/or profile"
return None
if browser:
return browser
else:
log.debug("Browser was not set in the get browser instance function, "
"but no error was returned")
log.debug(browser)
return None
def backup(self, browser_name, profile_name=None):
browser = self._get_browser_instance(browser_name, profile_name)
if not browser:
print "Couldn't get the browser and or browser specified, aborting backup operation."
return
if browser.backup(self.backup_folder, self.date_format):
print "{} backed up to {} successfully".format(browser_name,
self.backup_folder)
else:
print "Backing up {} to {} failed.".format(browser_name,
self.backup_folder)
def _load_backups(self):
all_files = glob.glob(self.backup_folder + os.sep + "*.resbak")
for backup in all_files:
date_string = backup.split('.')[-3:][:-2][0]
backup_datetime = arrow.get(date_string)
self.backups[backup_datetime] = backup
def _list_backups(self):
print "-" * 50
print "Listing all available RES backup files"
self._load_backups()
if not self.backups:
print "No backups were found in {}".format(self.backup_folder)
return
print "[LATEST] {}".format(self.backups[max(self.backups.keys())])
for date in sorted(self.backups.keys(), reverse=True)[1:]:
print self.backups[date]
def restore(self, browser_name=None, profile_name=None, backup_file=None):
if backup_file == "list":
self._list_backups()
return
browser = self._get_browser_instance(browser_name, profile_name)
if not browser:
print "Couldn't get the browser and or browser specified, aborting restore operation."
return
if not os.path.exists(backup_file):
restore_path = os.path.join(self.backup_folder, backup_file)
else:
restore_path = backup_file
if not os.path.exists(restore_path):
print "Backup file at {} could not be found.".format(restore_path)
return
backup_browser = os.path.basename(restore_path)[0].lower()
if backup_browser == browser.name:
browser.restore_from_self(restore_path)
else:
restore_browser = self.all_browsers[backup_browser]
restore_data = restore_browser.get_data(restore_path)
browser.set_data(restore_data)
if __name__ == '__main__':
if os.path.exists("application.log"):
log_handler = FileHandler('application.log')
log_handler.push_application()
else:
log.level = CRITICAL
parser = argparse.ArgumentParser(prog='RESTool Command Line Interface. '
'See github for usage examples.')
parser.add_argument('-l', '--list', action='store_true',
help="List all available browsers and profiles to use for other commands.")
parser.add_argument('-b', '--backup', action='store_true',
help="Backup the specified browser and profile RES settings to the backup folder "
"specified in the settings.json if it exists, otherwise backup to local 'res_backups' folder")
parser.add_argument('-r', '--restore', type=str,
help="Restore the backup from the selected file. You can use 'latest' to restore backup "
"from the latest file found in the backups folder. RES Tool will search "
"for previous backup files in the res backups folder specified in settings.json or "
"the default location res_backups in the current directory. "
"Enter 'list' as restore name to list all available backups and see the latest one")
parser.add_argument('-w', '--browser', type=str,
help="Name of the browser to execute specified command on.")
parser.add_argument('-p', '--profile', type=str,
help="Name of the profile in the specified browser to run the command on. (optional, case sensitive) "
"Note: Not all browsers support profiles. Run --list to see supported browsers and their profiles.")
parser.add_argument('-d', '--debug', action='store_true',
help="Create log file for debugging.")
args = parser.parse_args()
if args.debug:
with open("application.log", "w") as _:
print "application.log file created, please run the commands again" \
"and see the github page on how to properly submit a bug report." \
"https://github.com/Nikola-K/RESTool"
exit()
app = CommandLine()
if args.list:
app.list_browsers()
elif args.backup:
if not args.browser:
print "You need to specify which browser to backup"
exit()
app.backup(args.browser, args.profile)
elif args.restore:
if args.restore == "list":
app.restore(backup_file="list")
exit()
if not args.browser:
print "You need to specify which browser to restore to"
exit()
app.restore(args.browser, args.profile, args.restore)
else:
parser.print_help()
|
|
import unittest
from mahjong import algo
from mahjong.types import GameContext, Tile
class TestCanWin(unittest.TestCase):
def setUp(self):
self.context = GameContext()
# wait for BAMBOO5
self.context.players[0].hand.add_free_tiles([
Tile.CHAR1, Tile.CHAR1, Tile.CHAR1,
Tile.CHAR2, Tile.CHAR2, Tile.CHAR2,
Tile.CHAR3, Tile.CHAR3, Tile.CHAR3,
Tile.BAMBOO1, Tile.BAMBOO2, Tile.BAMBOO3, Tile.BAMBOO5
])
# wait for BAMBOO1
self.context.players[1].hand.add_free_tiles([
Tile.BAMBOO1,
Tile.CIRCLE4, Tile.CIRCLE4, Tile.CIRCLE4,
Tile.BAMBOO5, Tile.BAMBOO5, Tile.BAMBOO5,
Tile.BAMBOO7, Tile.BAMBOO7, Tile.BAMBOO7,
Tile.CIRCLE8, Tile.CIRCLE8, Tile.CIRCLE8
])
# wait for CHAR9 and BAMBOO1
self.context.players[2].hand.add_free_tiles([
Tile.CHAR1, Tile.CHAR1, Tile.CHAR1,
Tile.CHAR9, Tile.CHAR9,
Tile.BAMBOO1, Tile.BAMBOO1,
Tile.WHITE, Tile.WHITE, Tile.WHITE,
Tile.WEST, Tile.WEST, Tile.WEST
])
# not ready
self.context.players[3].hand.add_free_tiles([
Tile.RED, Tile.WHITE, Tile.RED,
Tile.GREEN, Tile.EAST, Tile.GREEN,
Tile.SOUTH, Tile.SOUTH, Tile.SOUTH,
Tile.BAMBOO1, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.CHAR9, Tile.CHAR3,
Tile.CHAR4, Tile.CIRCLE1
])
def test_declared_ready(self):
self.context.players[0].extra['declared_ready'] = True
self.context.players[0].extra['waiting_tiles'] = [Tile.EAST, Tile.NORTH]
self.assertTrue(algo.can_win(self.context, player_idx=0, incoming_tile=Tile.EAST))
self.assertTrue(algo.can_win(self.context, player_idx=0, incoming_tile=Tile.NORTH))
self.assertTrue(algo.can_win(self.context, player_idx=0, incoming_tile=Tile.BAMBOO5))
self.assertFalse(algo.can_win(self.context, player_idx=0, incoming_tile=Tile.BAMBOO4))
self.assertFalse(algo.can_win(self.context, player_idx=0, incoming_tile=Tile.WEST))
def test_general_winning_pattern(self):
self.context.players[0].hand.last_tile = Tile.BAMBOO5
self.assertTrue(algo.can_win(self.context, player_idx=0))
self.context.players[0].hand.last_tile = Tile.BAMBOO1
self.assertFalse(algo.can_win(self.context, player_idx=0))
self.assertFalse(algo.can_win(self.context, player_idx=0, incoming_tile=Tile.RED))
self.assertFalse(algo.can_win(self.context, 1, Tile.BAMBOO3))
self.assertFalse(algo.can_win(self.context, 1, Tile.BAMBOO2))
self.assertTrue(algo.can_win(self.context, 1, Tile.BAMBOO1))
self.assertTrue(algo.can_win(self.context, 2, Tile.BAMBOO1))
self.assertTrue(algo.can_win(self.context, 2, Tile.CHAR9))
self.assertFalse(algo.can_win(self.context, 2, Tile.CIRCLE1))
self.assertFalse(algo.can_win(self.context, 3, Tile.CHAR1))
self.assertFalse(algo.can_win(self.context, 3, Tile.CIRCLE1))
def test_winning_restriction(self):
self.context.settings.patterns_win_filter += [
'lack-a-suit', 'self-picked'
]
self.assertFalse(algo.can_win(self.context, 0, Tile.BAMBOO5))
self.context.players[0].hand.last_tile = Tile.BAMBOO5
self.assertTrue(algo.can_win(self.context, 0))
self.context.players[1].hand.last_tile = Tile.BAMBOO1
self.assertTrue(algo.can_win(self.context, 1))
self.context.players[1].hand.last_tile = None
self.assertFalse(algo.can_win(self.context, 2, Tile.BAMBOO1))
self.context.players[2].hand.last_tile = Tile.CHAR9
self.assertFalse(algo.can_win(self.context, 2))
self.context.players[2].hand.last_tile = Tile.BAMBOO1
self.assertFalse(algo.can_win(self.context, 2))
hand = self.context.players[2].hand
hand.free_tiles = hand.free_tiles[:7]
hand.last_tile = Tile.CHAR9
self.assertTrue(algo.can_win(self.context, 2))
hand.last_tile = Tile.BAMBOO1
self.assertTrue(algo.can_win(self.context, 2))
hand.last_tile = Tile.BAMBOO2
self.assertFalse(algo.can_win(self.context, 2))
def test_special_pattern(self):
# eight- or seven-flowers
self.assertFalse(algo.can_win(self.context, 2, Tile.RED))
self.context.players[2].hand.flowers = Tile.FLOWERS
self.assertTrue(algo.can_win(self.context, 2, Tile.RED))
def test_illegal_arugments(self):
self.context.cur_player_idx = None
with self.assertRaises(ValueError):
algo.can_win(self.context)
self.context.cur_player_idx = 0
with self.assertRaises(ValueError):
algo.can_win(self.context)
class TestWaitingTiles(unittest.TestCase):
def setUp(self):
self.context = GameContext()
# wait for BAMBOO5
self.context.players[0].hand.add_free_tiles([
Tile.CHAR1, Tile.CHAR1, Tile.CHAR1,
Tile.CHAR2, Tile.CHAR2, Tile.CHAR2,
Tile.CHAR3, Tile.CHAR3, Tile.CHAR3,
Tile.BAMBOO1, Tile.BAMBOO2, Tile.BAMBOO3, Tile.BAMBOO5
])
# wait for BAMBOO1
self.context.players[1].hand.add_free_tiles([
Tile.BAMBOO1,
Tile.CIRCLE4, Tile.CIRCLE4, Tile.CIRCLE4,
Tile.BAMBOO5, Tile.BAMBOO5, Tile.BAMBOO5,
Tile.BAMBOO7, Tile.BAMBOO7, Tile.BAMBOO7,
Tile.CIRCLE8, Tile.CIRCLE8, Tile.CIRCLE8
])
# wait for CHAR9 and BAMBOO1
self.context.players[2].hand.add_free_tiles([
Tile.CHAR1, Tile.CHAR1, Tile.CHAR1,
Tile.CHAR9, Tile.CHAR9,
Tile.BAMBOO1, Tile.BAMBOO1,
Tile.WHITE, Tile.WHITE, Tile.WHITE,
Tile.WEST, Tile.WEST, Tile.WEST
])
# not ready
self.context.players[3].hand.add_free_tiles([
Tile.RED, Tile.WHITE, Tile.RED,
Tile.GREEN, Tile.EAST, Tile.GREEN,
Tile.SOUTH, Tile.SOUTH, Tile.SOUTH,
Tile.BAMBOO1, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.CHAR9, Tile.CHAR3,
Tile.CHAR4, Tile.CIRCLE1
])
def test_waiting_tiles(self):
self.assertEqual(algo.waiting_tiles(self.context, 0), [Tile.BAMBOO5])
self.assertEqual(algo.waiting_tiles(self.context, 1), [Tile.BAMBOO1])
self.assertEqual(algo.waiting_tiles(self.context, 2), [Tile.CHAR9, Tile.BAMBOO1])
self.assertFalse(algo.waiting_tiles(self.context, 3))
def test_special_pattern(self):
# seven flowers
self.context.players[2].hand.flowers = Tile.FLOWERS[0:7]
self.assertEqual(algo.waiting_tiles(self.context, 2), [Tile.CHAR9, Tile.BAMBOO1] + Tile.FLOWERS)
class TestReady(unittest.TestCase):
def setUp(self):
self.context = GameContext()
# wait for BAMBOO5
self.context.players[0].hand.add_free_tiles([
Tile.CHAR1, Tile.CHAR1, Tile.CHAR1,
Tile.CHAR2, Tile.CHAR2, Tile.CHAR2,
Tile.CHAR3, Tile.CHAR3, Tile.CHAR3,
Tile.BAMBOO1, Tile.BAMBOO2, Tile.BAMBOO3, Tile.BAMBOO5
])
# wait for BAMBOO1
self.context.players[1].hand.add_free_tiles([
Tile.BAMBOO1,
Tile.CIRCLE4, Tile.CIRCLE4, Tile.CIRCLE4,
Tile.BAMBOO5, Tile.BAMBOO5, Tile.BAMBOO5,
Tile.BAMBOO7, Tile.BAMBOO7, Tile.BAMBOO7,
Tile.CIRCLE8, Tile.CIRCLE8, Tile.CIRCLE8
])
# wait for CHAR9 and BAMBOO1
self.context.players[2].hand.add_free_tiles([
Tile.CHAR1, Tile.CHAR1, Tile.CHAR1,
Tile.CHAR9, Tile.CHAR9,
Tile.BAMBOO1, Tile.BAMBOO1,
Tile.WHITE, Tile.WHITE, Tile.WHITE,
Tile.WEST, Tile.WEST, Tile.WEST
])
# not ready
self.context.players[3].hand.add_free_tiles([
Tile.RED, Tile.WHITE, Tile.RED,
Tile.GREEN, Tile.EAST, Tile.GREEN,
Tile.SOUTH, Tile.SOUTH, Tile.SOUTH,
Tile.BAMBOO1, Tile.BAMBOO2, Tile.BAMBOO3,
Tile.CHAR9, Tile.CHAR3,
Tile.CHAR4, Tile.CIRCLE1
])
def test_ready(self):
self.assertTrue(algo.ready(self.context, 0))
self.assertTrue(algo.ready(self.context, 1))
self.assertTrue(algo.ready(self.context, 2))
self.assertFalse(algo.ready(self.context, 3))
class TestSelectMelders(unittest.TestCase):
def test_none_viable(self):
viable_decisions = [None, None, None, None]
player_decisions = [None, None, None, None]
self.assertFalse(algo.select_melders(viable_decisions, player_decisions))
def test_everybody_skips(self):
viable_decisions = [
['win', 'chow', 'skip'],
['pong', 'skip'],
['win', 'skip'],
['win', 'skip']
]
player_decisions = ['skip', 'skip', 'skip', 'skip']
self.assertFalse(algo.select_melders(viable_decisions, player_decisions))
def test_priority(self):
viable_decisions = [
None,
['win', 'chow', 'skip'],
['kong', 'skip'],
['win', 'skip']
]
player_decisions = [None, 'chow', 'kong', 'win']
self.assertEqual(algo.select_melders(viable_decisions, player_decisions),
[(3, 'win')])
player_decisions = [None, 'chow', 'kong', 'skip']
self.assertEqual(algo.select_melders(viable_decisions, player_decisions),
[(2, 'kong')])
player_decisions = [None, 'chow', 'skip', 'skip']
self.assertEqual(algo.select_melders(viable_decisions, player_decisions),
[(1, 'chow')])
def test_multi_win(self):
viable_decisions = [
None,
['win', 'skip'],
['kong', 'skip'],
['win', 'skip']
]
player_decisions = [None, 'win', 'kong', 'win']
self.assertEqual(algo.select_melders(viable_decisions, player_decisions),
[(1, 'win'), (3, 'win')])
|
|
"""
multiRobotworld.py
Author: Ariel Anders, [email protected]
this program creates the worldmodel and defines the move primitives
for the robots. It has a successor function to use for planning
with a single robot or multiple robots
"""
import time
import copy
import gridMap
from itertools import product
dirs = ["north", "south", "east", "west"]
subdirs = ["north","south"]
names = "abcdefghijklmnpqrstuvwxyz"
actions = dirs + ['nop']
class Primitive:
def __init__(self):
pass
def __str__(self):
return "Empty primitive"
class MovePrimitive(Primitive):
def __init__(self, direction):
Primitive.__init__(self)
self.direction = direction
def __str__(self):
return self.direction
class WorldModel:
def __init__(self, xMax, yMax, obstacles, robotLocs, goalLocs, display=True):
self.xMax = xMax
self.yMax = yMax
self.robotLocs = [list(rL) for rL in robotLocs]
self.goalLocs = [list(rL) for rL in goalLocs]
self.home = [list(rL) for rL in robotLocs]
self.obstacles = [list(o) for o in obstacles] # list of (x,y) pairs
self.display = display
if self.display:
self.gridMap = gridMap.GridMap(xMax, yMax)
def legalLoc(self, (x, y)):
return x >= 0 and y >= 0 and x < self.xMax and y < self.yMax
def blockedByObst(self, loc):
return list(loc) in self.obstacles
def moveLocByDirection(self, loc, direction):
loc = copy.copy(loc)
if direction == 'north':
loc[1] += 1
if direction == 'south':
loc[1] -= 1
if direction == 'east':
loc[0] += 1
if direction == 'west':
loc[0] -= 1
return loc
def move(self, loc, direction):
if direction == 'north':
loc[1] += 1
if direction == 'south':
loc[1] -= 1
if direction == 'east':
loc[0] += 1
if direction == 'west':
loc[0] -= 1
if direction != None: print 'PRIM: Moved', direction, 'to', loc
def doi(self,i, prim, clear="noDraw"):
if isinstance(prim, MovePrimitive):
self.move(self.robotLocs[i], prim.direction)
else:
raise Exception, 'Unknown primitive' + str(prim)
self.draw()
return True
def do(self, prims, clear = 'noDraw'):
for i, prim in enumerate(prims):
# Motion primitives
if isinstance(prim, MovePrimitive):
self.move(self.robotLocs[i], prim.direction)
else:
raise Exception, 'Unknown primitive' + str(prim)
self.draw()
return True
def draw(self, color = 'cyan'):
robot_colors =lambda x: ['red', 'purple', 'blue', 'yellow', 'green'][x % 5]
objects = [('', loc, 'black') for loc in self.obstacles]
for i, goalLoc in enumerate(self.goalLocs):
objects.append(('g%d' %i , goalLoc, 'grey'))
for i, robotLoc in enumerate(self.robotLocs):
objects.append(('r%d' %i , robotLoc, robot_colors(i)))
self.gridMap.drawWorld(objects)
#XXXtime.sleep(1)
def moveLocByDirection(loc, direction):
loc = copy.copy(loc)
if direction == 'north':
loc[1] += 1
if direction == 'south':
loc[1] -= 1
if direction == 'east':
loc[0] += 1
if direction == 'west':
loc[0] -= 1
return loc
def successors(wm, single, extra_obstacles=[None], alpha=None):
# only test if loc inside grid and not at static obstacle
def legalMove(loc):
if wm.legalLoc(loc):
return not (wm.blockedByObst(loc) or tuple(loc) in extra_obstacles)
else: return False
def applyAction(action, robotLoc):
if alpha != None and robotLoc[1] != alpha:
valid_dirs = subdirs
else:
valid_dirs = dirs
if not action in valid_dirs:
return robotLoc
if action =="nop":
return robotLoc
else:
rl = moveLocByDirection(list(robotLoc), action)
if not legalMove(rl) :
return None
return tuple(rl)
def get_successors(robotStates):
joint_actions = list(product(actions, repeat=len(wm.robotLocs)))
bots = range(len(robotStates))
next_states = []
for joint_action in joint_actions:
if all([act == "nop" for act in joint_action]):
continue
robotLocs = list(robotStates)
for robot in bots:
action = joint_action[robot]
robotLoc = robotStates[robot]
robotLocs[robot] = applyAction(action, robotLoc)
if robotLocs[robot] == None: break
if None in robotLocs: continue
# check for robot in same location
robot_duplicates = len(set(robotLocs)) < len(robotLocs)
if robot_duplicates: continue
swap = False
for i in bots:
for j in range(i+1,len(robotLocs)):
if robotStates[i]==robotLocs[j]:
swap = True
break
if swap: continue
nops = sum(["nop"==x for x in joint_action])
cost = 1 + len(joint_action) - nops
next_states.append( (tuple(robotLocs) , cost))
return next_states
def single_get_successors(robotLoc):
if alpha!=None and robotLoc[1] != alpha:
valid_dirs = subdirs
else:
valid_dirs = dirs
next_states = []
for act in valid_dirs:
rl = moveLocByDirection(list(robotLoc), act)
if legalMove(rl):
next_states.append((tuple(rl),1))
return next_states
if single:
return single_get_successors
else:
return get_successors
def getPrimitives(state, next_state):
prims = [getMotionPrimitive(state[i], next_state[i])[0] \
for i in range(len(state))]
return prims
def getMotionPrimitive( (x,y), (nx,ny)):
move=None
if ny - y == 1: move = "north"
elif nx - x == 1: move = "east"
elif ny - y == -1: move = "south"
elif nx - x == -1: move ="west"
return MovePrimitive(move), move
|
|
# -*- coding: utf-8 -*-
'''
The Saltutil module is used to manage the state of the salt minion itself. It is used to manage minion modules as well as automate updates to the salt minion.
:depends: - esky Python module for update functionality
'''
# Import python libs
import os
import hashlib
import shutil
import signal
import logging
import fnmatch
import time
import sys
# Import salt libs
import salt.payload
import salt.state
import salt.client
import salt.utils
from salt.exceptions import SaltReqTimeoutError
from salt._compat import string_types
# Import third party libs
try:
import esky
HAS_ESKY = True
except ImportError:
HAS_ESKY = False
log = logging.getLogger(__name__)
def _sync(form, env=None):
'''
Sync the given directory in the given environment
'''
if env is None:
# No environment passed, detect them based on gathering the top files
# from the master
env = 'base'
st_ = salt.state.HighState(__opts__)
top = st_.get_top()
if top:
env = st_.top_matches(top).keys()
if isinstance(env, string_types):
env = env.split(',')
ret = []
remote = set()
source = os.path.join('salt://_{0}'.format(form))
mod_dir = os.path.join(__opts__['extension_modules'], '{0}'.format(form))
if not os.path.isdir(mod_dir):
log.info('Creating module dir \'{0}\''.format(mod_dir))
os.makedirs(mod_dir)
for sub_env in env:
log.info('Syncing {0} for environment \'{1}\''.format(form, sub_env))
cache = []
log.info('Loading cache from {0}, for {1})'.format(source, sub_env))
cache.extend(__salt__['cp.cache_dir'](source, sub_env))
local_cache_dir = os.path.join(
__opts__['cachedir'],
'files',
sub_env,
'_{0}'.format(form)
)
log.debug('Local cache dir: \'{0}\''.format(local_cache_dir))
for fn_ in cache:
if __opts__.get('file_client', '') == 'local':
for fn_root in __opts__['file_roots'].get(sub_env, []):
if fn_.startswith(fn_root):
relpath = os.path.relpath(fn_, fn_root)
relpath = relpath[relpath.index('/') + 1:]
relname = os.path.splitext(relpath)[0].replace(
os.sep,
'.')
remote.add(relpath)
dest = os.path.join(mod_dir, relpath)
else:
relpath = os.path.relpath(fn_, local_cache_dir)
relname = os.path.splitext(relpath)[0].replace(os.sep, '.')
remote.add(relpath)
dest = os.path.join(mod_dir, relpath)
log.info('Copying \'{0}\' to \'{1}\''.format(fn_, dest))
if os.path.isfile(dest):
# The file is present, if the sum differs replace it
srch = hashlib.md5(
salt.utils.fopen(fn_, 'r').read()
).hexdigest()
dsth = hashlib.md5(
salt.utils.fopen(dest, 'r').read()
).hexdigest()
if srch != dsth:
# The downloaded file differs, replace!
shutil.copyfile(fn_, dest)
ret.append('{0}.{1}'.format(form, relname))
else:
dest_dir = os.path.dirname(dest)
if not os.path.isdir(dest_dir):
os.makedirs(dest_dir)
shutil.copyfile(fn_, dest)
ret.append('{0}.{1}'.format(form, relname))
touched = bool(ret)
if __opts__.get('clean_dynamic_modules', True):
current = set(_listdir_recursively(mod_dir))
for fn_ in current - remote:
full = os.path.join(mod_dir, fn_)
if os.path.isfile(full):
touched = True
os.remove(full)
#cleanup empty dirs
while True:
emptydirs = _list_emptydirs(mod_dir)
if not emptydirs:
break
for emptydir in emptydirs:
touched = True
os.rmdir(emptydir)
#dest mod_dir is touched? trigger reload if requested
if touched:
mod_file = os.path.join(__opts__['cachedir'], 'module_refresh')
with salt.utils.fopen(mod_file, 'a+') as ofile:
ofile.write('')
return ret
def _listdir_recursively(rootdir):
file_list = []
for root, dirs, files in os.walk(rootdir):
for filename in files:
relpath = os.path.relpath(root, rootdir).strip('.')
file_list.append(os.path.join(relpath, filename))
return file_list
def _list_emptydirs(rootdir):
emptydirs = []
for root, dirs, files in os.walk(rootdir):
if not files and not dirs:
emptydirs.append(root)
return emptydirs
def update(version=None):
'''
Update the salt minion from the URL defined in opts['update_url']
This feature requires the minion to be running a bdist_esky build.
The version number is optional and will default to the most recent version
available at opts['update_url'].
Returns details about the transaction upon completion.
CLI Example:
.. code-block:: bash
salt '*' saltutil.update 0.10.3
'''
if not HAS_ESKY:
return 'Esky not available as import'
if not getattr(sys, 'frozen', False):
return 'Minion is not running an Esky build'
if not __salt__['config.option']('update_url'):
return '"update_url" not configured on this minion'
app = esky.Esky(sys.executable, __opts__['update_url'])
oldversion = __grains__['saltversion']
try:
if not version:
version = app.find_update()
if not version:
return 'No updates available'
app.fetch_version(version)
app.install_version(version)
app.cleanup()
except Exception as err:
return err
restarted = {}
for service in __opts__['update_restart_services']:
restarted[service] = __salt__['service.restart'](service)
return {'comment': 'Updated from {0} to {1}'.format(oldversion, version),
'restarted': restarted}
def sync_modules(env=None, refresh=True):
'''
Sync the modules from the _modules directory on the salt master file
server. This function is environment aware, pass the desired environment
to grab the contents of the _modules directory, base is the default
environment.
CLI Example:
.. code-block:: bash
salt '*' saltutil.sync_modules
'''
ret = _sync('modules', env)
if refresh:
refresh_modules()
return ret
def sync_states(env=None, refresh=True):
'''
Sync the states from the _states directory on the salt master file
server. This function is environment aware, pass the desired environment
to grab the contents of the _states directory, base is the default
environment.
CLI Example:
.. code-block:: bash
salt '*' saltutil.sync_states
'''
ret = _sync('states', env)
if refresh:
refresh_modules()
return ret
def sync_grains(env=None, refresh=True):
'''
Sync the grains from the _grains directory on the salt master file
server. This function is environment aware, pass the desired environment
to grab the contents of the _grains directory, base is the default
environment.
CLI Example:
.. code-block:: bash
salt '*' saltutil.sync_grains
'''
ret = _sync('grains', env)
if refresh:
refresh_modules()
refresh_pillar()
return ret
def sync_renderers(env=None, refresh=True):
'''
Sync the renderers from the _renderers directory on the salt master file
server. This function is environment aware, pass the desired environment
to grab the contents of the _renderers directory, base is the default
environment.
CLI Example:
.. code-block:: bash
salt '*' saltutil.sync_renderers
'''
ret = _sync('renderers', env)
if refresh:
refresh_modules()
return ret
def sync_returners(env=None, refresh=True):
'''
Sync the returners from the _returners directory on the salt master file
server. This function is environment aware, pass the desired environment
to grab the contents of the _returners directory, base is the default
environment.
CLI Example:
.. code-block:: bash
salt '*' saltutil.sync_returners
'''
ret = _sync('returners', env)
if refresh:
refresh_modules()
return ret
def sync_outputters(env=None, refresh=True):
'''
Sync the outputters from the _outputters directory on the salt master file
server. This function is environment aware, pass the desired environment
to grab the contents of the _outputters directory, base is the default
environment.
CLI Example:
.. code-block:: bash
salt '*' saltutil.sync_outputters
'''
ret = _sync('outputters', env)
if refresh:
refresh_modules()
return ret
def sync_all(env=None, refresh=True):
'''
Sync down all of the dynamic modules from the file server for a specific
environment
CLI Example:
.. code-block:: bash
salt '*' saltutil.sync_all
'''
log.debug('Syncing all')
ret = {}
ret['modules'] = sync_modules(env, False)
ret['states'] = sync_states(env, False)
ret['grains'] = sync_grains(env, False)
ret['renderers'] = sync_renderers(env, False)
ret['returners'] = sync_returners(env, False)
ret['outputters'] = sync_outputters(env, False)
if refresh:
refresh_modules()
return ret
def refresh_pillar():
'''
Signal the minion to refresh the pillar data.
CLI Example:
.. code-block:: bash
salt '*' saltutil.refresh_pillar
'''
__salt__['event.fire']({}, 'pillar_refresh')
def refresh_modules():
'''
Signal the minion to refresh the module and grain data
CLI Example:
.. code-block:: bash
salt '*' saltutil.refresh_modules
'''
__salt__['event.fire']({}, 'module_refresh')
def is_running(fun):
'''
If the named function is running return the data associated with it/them.
The argument can be a glob
CLI Example:
.. code-block:: bash
salt '*' saltutil.is_running state.highstate
'''
run = running()
ret = []
for data in run:
if fnmatch.fnmatch(data.get('fun', ''), fun):
ret.append(data)
return ret
def running():
'''
Return the data on all running salt processes on the minion
CLI Example:
.. code-block:: bash
salt '*' saltutil.running
'''
ret = []
serial = salt.payload.Serial(__opts__)
pid = os.getpid()
proc_dir = os.path.join(__opts__['cachedir'], 'proc')
if not os.path.isdir(proc_dir):
return []
for fn_ in os.listdir(proc_dir):
path = os.path.join(proc_dir, fn_)
with salt.utils.fopen(path, 'rb') as fp_:
buf = fp_.read()
fp_.close()
if buf:
data = serial.loads(buf)
else:
# Proc file is empty, remove
os.remove(path)
continue
if not isinstance(data, dict):
# Invalid serial object
continue
if not salt.utils.process.os_is_running(data['pid']):
# The process is no longer running, clear out the file and
# continue
os.remove(path)
continue
if data.get('pid') == pid:
continue
ret.append(data)
return ret
def find_job(jid):
'''
Return the data for a specific job id
CLI Example:
.. code-block:: bash
salt '*' saltutil.find_job <job id>
'''
for data in running():
if data['jid'] == jid:
return data
return {}
def signal_job(jid, sig):
'''
Sends a signal to the named salt job's process
CLI Example:
.. code-block:: bash
salt '*' saltutil.signal_job <job id> 15
'''
for data in running():
if data['jid'] == jid:
try:
os.kill(int(data['pid']), sig)
return 'Signal {0} sent to job {1} at pid {2}'.format(
int(sig),
jid,
data['pid']
)
except OSError:
path = os.path.join(__opts__['cachedir'], 'proc', str(jid))
if os.path.isfile(path):
os.remove(path)
return ('Job {0} was not running and job data has been '
' cleaned up').format(jid)
return ''
def term_job(jid):
'''
Sends a termination signal (SIGTERM 15) to the named salt job's process
CLI Example:
.. code-block:: bash
salt '*' saltutil.term_job <job id>
'''
return signal_job(jid, signal.SIGTERM)
def kill_job(jid):
'''
Sends a kill signal (SIGKILL 9) to the named salt job's process
CLI Example:
.. code-block:: bash
salt '*' saltutil.kill_job <job id>
'''
return signal_job(jid, signal.SIGKILL)
def regen_keys():
'''
Used to regenerate the minion keys.
CLI Example:
.. code-block:: bash
salt '*' saltutil.regen_keys
'''
for fn_ in os.listdir(__opts__['pki_dir']):
path = os.path.join(__opts__['pki_dir'], fn_)
try:
os.remove(path)
except os.error:
pass
time.sleep(60)
sreq = salt.payload.SREQ(__opts__['master_uri'])
auth = salt.crypt.SAuth(__opts__)
def revoke_auth():
'''
The minion sends a request to the master to revoke its own key.
Note that the minion session will be revoked and the minion may
not be able to return the result of this command back to the master.
CLI Example:
.. code-block:: bash
salt '*' saltutil.revoke_auth
'''
sreq = salt.payload.SREQ(__opts__['master_uri'])
auth = salt.crypt.SAuth(__opts__)
tok = auth.gen_token('salt')
load = {'cmd': 'revoke_auth',
'id': __opts__['id'],
'tok': tok}
try:
return auth.crypticle.loads(
sreq.send('aes', auth.crypticle.dumps(load), 1))
except SaltReqTimeoutError:
return False
return False
def cmd(tgt,
fun,
arg=(),
timeout=None,
expr_form='glob',
ret='',
kwarg=None,
ssh=False,
**kwargs):
'''
Assuming this minion is a master, execute a salt command
CLI Example:
.. code-block:: bash
salt '*' saltutil.cmd
'''
if ssh:
client = salt.client.SSHClient(
os.path.dirname(__opts__['conf_file']))
else:
client = salt.client.LocalClient(
os.path.dirname(__opts__['conf_file']))
ret = {}
for ret_comp in client.cmd_iter(
tgt,
fun,
arg,
timeout,
expr_form,
ret,
kwarg,
**kwargs):
ret.update(ret_comp)
return ret
def cmd_iter(tgt,
fun,
arg=(),
timeout=None,
expr_form='glob',
ret='',
kwarg=None,
ssh=False,
**kwargs):
'''
Assuming this minion is a master, execute a salt command
CLI Example:
.. code-block:: bash
salt '*' saltutil.cmd
'''
if ssh:
client = salt.client.SSHClient(
os.path.dirname(__opts__['conf_file']))
else:
client = salt.client.LocalClient(
os.path.dirname(__opts__['conf_file']))
for ret in client.cmd_iter(
tgt,
fun,
arg,
timeout,
expr_form,
ret,
kwarg,
**kwargs):
yield ret
|
|
import itertools
from inspect import formatargspec, getargspec
from types import FunctionType, MethodType
from pydot import Dot, Edge, Node
from sqlalchemy.orm import class_mapper
from sqlalchemy.orm.exc import UnmappedClassError
from sqlalchemy.orm.properties import RelationshipProperty
NODE_TABLE = (
'<<TABLE BORDER="0" CELLBORDER="1" CELLPADDING="1" CELLSPACING="0">'
'<TR><TD BGCOLOR="{bgcolor}" VALIGN="BOTTOM">'
'<FONT POINT-SIZE="{top_margin}"><BR ALIGN="LEFT" /></FONT>'
'<FONT COLOR="{color}" POINT-SIZE="{fontsize}"><B>{title}</B></FONT>'
'</TD></TR>{table_content}</TABLE>>')
NODE_BLOCK_START = '<TR><TD><TABLE BORDER="0" CELLSPACING="0" CELLPADDING="1">'
NODE_BLOCK_END = '</TABLE></TD></TR>'
DEFAULT_STYLE = {
'edge': {
'arrowsize': 0.8,
'fontname': 'Bitstream Vera Sans',
'fontsize': 8,
'labelfloat': 'true',
'penwidth': 1},
'inheritance': {
'arrowhead': 'none',
'arrowtail': 'empty'},
'relationship': {
'arrowhead': 'vee',
'arrowtail': 'vee'},
'relationship-viewonly': {
'style': 'dashed'},
'node': {
'fontname': 'Bitstream Vera Sans',
'fontsize': 8,
'shape': 'plaintext'},
'node_table_header': {
'bgcolor': '#707070',
'color': '#FFFFFF',
'fontsize': 10,
'top_margin': 2}}
def calculate_style(style):
def collapse(*keys):
result = {}
for key in keys:
result.update(DEFAULT_STYLE[key])
result.update(style.get(key, {}))
return result
return {
'edge': collapse('edge'),
'inheritance': collapse('edge', 'inheritance'),
'relationship': collapse('edge', 'relationship'),
'relationship-viewonly': collapse('relationship-viewonly'),
'node': collapse('node'),
'node_table_header': collapse('node_table_header')}
class Grapher(object):
GRAPH_OPTIONS = {}
def __init__(self, graph_options, name_mangler, style):
self.graph_options = self.GRAPH_OPTIONS.copy()
if graph_options is not None:
self.graph_options.update(graph_options)
self.renamer = name_mangler or (lambda obj: obj)
self.style = calculate_style(style or {})
@staticmethod
def node_row(content, port=''):
"""Renders a content row for a node table."""
if isinstance(content, (list, tuple)):
content = ''.join(content)
return '<TR><TD ALIGN="LEFT" PORT="{port}">{content}</TD></TR>'.format(
port=port, content=content)
def node_table(self, title, *content_iterators):
"""Returns an HTML table label for a Node."""
return NODE_TABLE.format(
table_content=''.join(itertools.chain(*content_iterators)),
title=self.renamer(title),
**self.style['node_table_header'])
@staticmethod
def quote(name):
"""Returns the name in quotes, preventing reserved keyword issues."""
return '"{}"'.format(name)
class ModelGrapher(Grapher):
GRAPH_OPTIONS = {'mclimit': 1000}
def __init__(
self,
show_attributes=True,
show_datatypes=True,
show_inherited=True,
show_operations=False,
show_multiplicity_one=False,
graph_options=None,
name_mangler=None,
style=None):
super(ModelGrapher, self).__init__(graph_options, name_mangler, style)
self.show_attributes = show_attributes
self.show_datatypes = show_datatypes
self.show_inherited = show_inherited
self.show_operations = show_operations
self.show_multiplicity_one = show_multiplicity_one
def graph(self, model_classes):
graph = Dot(**self.graph_options)
relations = set()
# Create nodes from mappers
mappers = map(class_mapper, model_classes)
for mapper in mappers:
graph.add_node(Node(
self.quote(mapper),
label=self.node_table(
mapper.class_.__name__,
self._model_columns(mapper),
self._model_operations(mapper)),
**self.style['node']))
if mapper.inherits:
graph.add_edge(Edge(
*list(map(self.quote, (mapper.inherits, mapper))),
**self.style['inheritance']))
for loader in mapper.iterate_properties:
if (isinstance(loader, RelationshipProperty) and
loader.mapper in mappers):
reverse = getattr(loader, '_reverse_property')
if len(reverse) == 1:
relations.add(frozenset((loader, next(iter(reverse)))))
else:
relations.add((loader,))
# Create edges from relationships between mappers
for relation in relations:
options = self.style['relationship'].copy()
if len(relation) == 2:
src, dest = relation
if src.viewonly and dest.viewonly:
options.update(self.style['relationship-viewonly'])
between = src.parent, dest.parent
options['headlabel'] = self._format_relationship(src)
options['taillabel'] = self._format_relationship(dest)
options['dir'] = 'both'
else:
prop, = relation
between = prop.parent, prop.mapper
options['headlabel'] = self._format_relationship(prop)
if prop.viewonly:
options.update(self.style['relationship-viewonly'])
graph.add_edge(Edge(*list(map(self.quote, between)), **options))
return graph
def quote(self, mapper):
"""Returns the quoted model name."""
return super(ModelGrapher, self).quote(mapper.class_.__name__)
def _model_columns(self, mapper):
if self.show_attributes:
yield NODE_BLOCK_START
for column in mapper.columns:
if self.show_inherited or column.table is mapper.tables[0]:
yield self.node_row(self._column_label(column))
yield NODE_BLOCK_END
def _model_operations(self, mapper):
model = mapper.class_
operations = filter(self._is_local_class_method(model), vars(model))
if operations and self.show_operations:
yield NODE_BLOCK_START
for name in sorted(operations):
func = getattr(model, name)
oper = [self.renamer(name), self._format_argspec(func)]
if not isinstance(func, MethodType):
oper.insert(0, '*') # Non-instancemethod indicator
yield self.node_row(oper)
yield NODE_BLOCK_END
def _column_label(self, column):
"""Returns the column name with type if so configured."""
if self.show_datatypes:
return '{}: {}'.format(
*list(map(self.renamer, (column.name, type(column.type).__name__))))
return self.renamer(column.name)
def _format_argspec(self, function):
"""Returns a formatted argument spec exluding a method's 'self'."""
argspec = list(getargspec(function))
if argspec[0][0] == 'self':
argspec[0].pop(0)
for index, content in enumerate(argspec):
if isinstance(content, (list, tuple)):
argspec[index] = list(map(self.renamer, content))
elif isinstance(content, str):
argspec[index] = self.renamer(content)
return formatargspec(*argspec)
def _format_multiplicity(self, prop):
"""Returns a string with a multiplicity indicator."""
if prop.uselist:
return '+'
if hasattr(prop, 'local_side'):
cols = prop.local_side
else:
cols = prop.local_columns
if any(col.nullable for col in cols):
return '0..1 '
if self.show_multiplicity_one:
return '1 '
return ''
def _format_relationship(self, rel):
"""Returns the relationship name with multiplicity prefix."""
return ' {}{} '.format(
self._format_multiplicity(rel), self.renamer(rel.key))
@staticmethod
def _is_local_class_method(class_):
"""Test whether attr name is a method defined on the provided class."""
def _checker(attribute):
obj = getattr(class_, attribute)
return (isinstance(obj, (FunctionType, MethodType)) and
obj.__module__ is class_.__module__)
return _checker
class TableGrapher(Grapher):
GRAPH_OPTIONS = {
'concentrate': 'true',
'mclimit': 1000,
'rankdir': 'TB'}
def __init__(
self,
show_datatypes=True,
show_indexes=True,
graph_options=None,
name_mangler=None,
style=None):
super(TableGrapher, self).__init__(graph_options, name_mangler, style)
self.show_datatypes = show_datatypes
self.show_indexes = show_indexes
def graph(self, tables, skip_tables=()):
graph = Dot(**self.graph_options)
for table in tables:
if table.name in skip_tables:
continue
graph.add_node(Node(
self.quote(table.name),
label=self.node_table(
table.name,
self._table_columns(table),
self._table_indices(table)),
**self.style['node']))
for fk in table.foreign_keys:
fk_table = fk.column.table
if fk_table not in tables or fk_table.name in skip_tables:
continue
is_single_parent = fk.parent.primary_key or fk.parent.unique
options = self.style['edge'].copy()
options['arrowtail'] = 'empty' if is_single_parent else 'crow'
options['dir'] = 'both'
if fk.parent.primary_key and fk.column.primary_key:
# Inheritance relationship
edge = fk_table.name, table.name
options['arrowhead'] = 'none'
options['tailport'] = fk.column.name
options['headport'] = fk.parent.name
else:
edge = table.name, fk_table.name
options['arrowhead'] = 'odot'
options['tailport'] = fk.parent.name
options['headport'] = fk.column.name
graph.add_edge(Edge(*list(map(self.quote, edge)), **options))
return graph
def _table_columns(self, table):
yield (NODE_BLOCK_START)
for col in table.columns:
yield self.node_row(self._format_column(col), port=col.name)
yield (NODE_BLOCK_END)
def _table_indices(self, table):
if self.show_indexes and (table.indexes or table.primary_key):
yield NODE_BLOCK_START
if table.primary_key:
yield self.node_row(self._format_index(
'PRIMARY', table.primary_key.columns))
for index in table.indexes:
yield self.node_row(self._format_index(
'UNIQUE' if index.unique else 'INDEX', index.columns))
yield NODE_BLOCK_END
def _format_column(self, col):
if self.show_datatypes:
return '{}: {}'.format(
*list(map(self.renamer, (col.name, str(col.type)))))
return self.renamer(col.name)
def _format_index(self, idx_type, cols):
return '{} ({})'.format(
idx_type, ', '.join(self.renamer(col.name) for col in cols))
|
|
#! /usr/bin/env python
from api.util import settings
from dataprovider.dataprovider import RedisLiveDataProvider
from threading import Timer
import redis
import datetime
import threading
import traceback
import argparse
import time
class Monitor(object):
"""Monitors a given Redis server using the MONITOR command.
"""
def __init__(self, connection_pool):
"""Initializes the Monitor class.
Args:
connection_pool (redis.ConnectionPool): Connection pool for the \
Redis server to monitor.
"""
self.connection_pool = connection_pool
self.connection = None
def __del__(self):
try:
self.reset()
except:
pass
def reset(self):
"""If we have a connection, release it back to the connection pool.
"""
if self.connection:
self.connection_pool.release(self.connection)
self.connection = None
def monitor(self):
"""Kicks off the monitoring process and returns a generator to read the
response stream.
"""
if self.connection is None:
self.connection = self.connection_pool.get_connection('monitor', None)
self.connection.send_command("monitor")
return self.listen()
def parse_response(self):
"""Parses the most recent responses from the current connection.
"""
return self.connection.read_response()
def listen(self):
"""A generator which yields responses from the MONITOR command.
"""
while True:
yield self.parse_response()
class MonitorThread(threading.Thread):
"""Runs a thread to execute the MONITOR command against a given Redis server
and store the resulting aggregated statistics in the configured stats
provider.
"""
def __init__(self, server, port, password=None):
"""Initializes a MontitorThread.
Args:
server (str): The host name or IP of the Redis server to monitor.
port (int): The port to contact the Redis server on.
Kwargs:
password (str): The password to access the Redis host. Default: None
"""
super(MonitorThread, self).__init__()
self.server = server
self.port = port
self.password = password
self.id = self.server + ":" + str(self.port)
self._stop = threading.Event()
def stop(self):
"""Stops the thread.
"""
self._stop.set()
def stopped(self):
"""Returns True if the thread is stopped, False otherwise.
"""
return self._stop.is_set()
def run(self):
"""Runs the thread.
"""
stats_provider = RedisLiveDataProvider.get_provider()
pool = redis.ConnectionPool(host=self.server, port=self.port, db=0,
password=self.password)
monitor = Monitor(pool)
commands = monitor.monitor()
for command in commands:
try:
parts = command.split(" ")
if len(parts) == 1:
continue
epoch = float(parts[0].strip())
timestamp = datetime.datetime.fromtimestamp(epoch)
# Strip '(db N)' and '[N x.x.x.x:xx]' out of the monitor str
if (parts[1] == "(db") or (parts[1][0] == "["):
parts = [parts[0]] + parts[3:]
command = parts[1].replace('"', '').upper()
if len(parts) > 2:
keyname = parts[2].replace('"', '').strip()
else:
keyname = None
if len(parts) > 3:
# TODO: This is probably more efficient as a list
# comprehension wrapped in " ".join()
arguments = ""
for x in xrange(3, len(parts)):
arguments += " " + parts[x].replace('"', '')
arguments = arguments.strip()
else:
arguments = None
if not command == 'INFO' and not command == 'MONITOR':
stats_provider.save_monitor_command(self.id,
timestamp,
command,
str(keyname),
str(arguments))
except Exception, e:
tb = traceback.format_exc()
print "==============================\n"
print datetime.datetime.now()
print tb
print command
print "==============================\n"
if self.stopped():
break
class InfoThread(threading.Thread):
"""Runs a thread to execute the INFO command against a given Redis server
and store the resulting statistics in the configured stats provider.
"""
def __init__(self, server, port, password=None):
"""Initializes an InfoThread instance.
Args:
server (str): The host name of IP of the Redis server to monitor.
port (int): The port number of the Redis server to monitor.
Kwargs:
password (str): The password to access the Redis server. \
Default: None
"""
threading.Thread.__init__(self)
self.server = server
self.port = port
self.password = password
self.id = self.server + ":" + str(self.port)
self._stop = threading.Event()
def stop(self):
"""Stops the thread.
"""
self._stop.set()
def stopped(self):
"""Returns True if the thread is stopped, False otherwise.
"""
return self._stop.is_set()
def run(self):
"""Does all the work.
"""
stats_provider = RedisLiveDataProvider.get_provider()
redis_client = redis.StrictRedis(host=self.server, port=self.port, db=0,
password=self.password)
# process the results from redis
while not self.stopped():
try:
redis_info = redis_client.info()
current_time = datetime.datetime.now()
used_memory = int(redis_info['used_memory'])
# used_memory_peak not available in older versions of redis
try:
peak_memory = int(redis_info['used_memory_peak'])
except:
peak_memory = used_memory
stats_provider.save_memory_info(self.id, current_time,
used_memory, peak_memory)
stats_provider.save_info_command(self.id, current_time,
redis_info)
# databases=[]
# for key in sorted(redis_info.keys()):
# if key.startswith("db"):
# database = redis_info[key]
# database['name']=key
# databases.append(database)
# expires=0
# persists=0
# for database in databases:
# expires+=database.get("expires")
# persists+=database.get("keys")-database.get("expires")
# stats_provider.SaveKeysInfo(self.id, current_time, expires, persists)
time.sleep(1)
except Exception, e:
tb = traceback.format_exc()
print "==============================\n"
print datetime.datetime.now()
print tb
print "==============================\n"
class RedisMonitor(object):
def __init__(self):
self.threads = []
self.active = True
def run(self, duration):
"""Monitors all redis servers defined in the config for a certain number
of seconds.
Args:
duration (int): The number of seconds to monitor for.
"""
redis_servers = settings.get_redis_servers()
for redis_server in redis_servers:
redis_password = redis_server.get("password")
monitor = MonitorThread(redis_server["server"], redis_server["port"], redis_password)
self.threads.append(monitor)
monitor.setDaemon(True)
monitor.start()
info = InfoThread(redis_server["server"], redis_server["port"], redis_password)
self.threads.append(info)
info.setDaemon(True)
info.start()
t = Timer(duration, self.stop)
t.start()
try:
while self.active:
time.sleep(1)
except (KeyboardInterrupt, SystemExit):
self.stop()
t.cancel()
def stop(self):
"""Stops the monitor and all associated threads.
"""
if args.quiet==False:
print "shutting down..."
for t in self.threads:
t.stop()
self.active = False
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Monitor redis.')
parser.add_argument('--duration',
type=int,
help="duration to run the monitor command (in seconds)",
required=True)
parser.add_argument('--quiet',
help="do not write anything to standard output",
required=False,
action='store_true')
args = parser.parse_args()
duration = args.duration
monitor = RedisMonitor()
monitor.run(duration)
|
|
import os
import tempfile
import zipfile
import shutil
import requests
from lxml import etree
import logging
from django.template import RequestContext
from django.contrib.auth.decorators import login_required
from django.http import HttpResponse, HttpResponseRedirect, HttpResponseNotFound, FileResponse
from django.shortcuts import render
from rest_framework.decorators import api_view
from hs_core import hydroshare
from hs_core.views.utils import authorize, ACTION_TO_AUTHORIZE, json_or_jsonp
from django_irods.views import download as download_bag_from_irods
from . import ts_utils
from .forms import ReferencedSitesForm, ReferencedVariablesForm, GetTSValuesForm, \
VerifyRestUrlForm, CreateRefTimeSeriesForm
from drf_yasg.utils import swagger_auto_schema
PREVIEW_NAME = "preview.png"
HIS_CENTRAL_URL = 'https://hiscentral.cuahsi.org/webservices/hiscentral.asmx/GetWaterOneFlowServiceInfo'
logger = logging.getLogger(__name__)
# query HIS central to get all available HydroServer urls
def get_his_urls(request):
try:
r = requests.get(HIS_CENTRAL_URL)
if r.status_code == 200:
response = r.text.encode()
root = etree.XML(response)
else:
raise Exception("Query HIS central error.")
url_list = []
for element in root.iter():
if "servURL" in element.tag:
url_list.append(element.text)
return json_or_jsonp(request, {"status": "success", "url_list": url_list})
except Exception as e:
logger.exception("get_his_urls: " + str(e))
return json_or_jsonp(request, {"status": "error"})
def search_sites(request):
try:
f = ReferencedSitesForm(request.GET)
if f.is_valid():
params = f.cleaned_data
url = params['url']
sites = ts_utils.sites_from_soap(url)
return json_or_jsonp(request, {"status": "success", "sites": sites})
else:
raise Exception("search_sites form validation failed.")
except Exception as e:
logger.exception("search_sites: " + str(e))
return json_or_jsonp(request, {"status": "error"})
def search_variables(request):
try:
f = ReferencedVariablesForm(request.GET)
if f.is_valid():
params = f.cleaned_data
url = params['url']
site = params['site']
variables = ts_utils.site_info_from_soap(url, site=site)
return json_or_jsonp(request, {"status": "success", "variables": variables})
else:
raise Exception("search_variables form validation failed.")
except Exception as e:
logger.exception("search_variables: %s" % (str(e)))
return json_or_jsonp(request, {"status": "error"})
def time_series_from_service(request):
tempdir = None
try:
f = GetTSValuesForm(request.GET)
if f.is_valid():
params = f.cleaned_data
ref_type = params['ref_type']
url = params['service_url']
site = params.get('site')
variable = params.get('variable')
if ref_type == 'rest':
ts = ts_utils.QueryHydroServerGetParsedWML(service_url=url, soap_or_rest=ref_type)
else:
index = site.rfind(" [")
site = site[index+2:len(site)-1]
site_code = site
index = variable.rfind(" [")
variable_code = variable[index+2:len(variable)-1]
ts = ts_utils.QueryHydroServerGetParsedWML(service_url=url, soap_or_rest=ref_type, site_code=site_code, \
variable_code=variable_code)
ts['url'] = url
ts['ref_type'] = ref_type
ts_session = request.session.get('ts', None)
if ts_session is not None:
del request.session['ts']
request.session['ts'] = ts
data = ts['data']
units = ts['unit_abbr']
if units is None:
units = ts['unit_name']
if units is None:
units = "Unknown"
variable_name = ts['variable_name']
noDataValue = ts['noDataValue']
tempdir = tempfile.mkdtemp()
ts_utils.create_vis_2(path=tempdir, data=data, xlabel='Date',
variable_name=variable_name, units=units, noDataValue=noDataValue,
predefined_name=PREVIEW_NAME)
tempdir_last_six_chars = tempdir[-6:]
preview_url = "/hsapi/_internal/refts/preview-figure/%s/" % (tempdir_last_six_chars)
return json_or_jsonp(request, {'status': "success", 'preview_url': preview_url})
else:
raise Exception("GetTSValuesForm form validation failed.")
except Exception as e:
logger.exception("time_series_from_service: %s" % (str(e)))
if tempdir is not None:
shutil.rmtree(tempdir)
return json_or_jsonp(request, {'status': "error"})
def preview_figure(request, preview_code, *args, **kwargs):
response = HttpResponse()
preview_str = None
tempdir_preview = None
try:
tempdir_base_path = tempfile.gettempdir()
tempdir_preview = tempdir_base_path + "/" + "tmp" + preview_code
preview_full_path = tempdir_preview + "/" + PREVIEW_NAME
preview_fhandle = open(preview_full_path,'rb')
preview_str = str(preview_fhandle.read())
preview_fhandle.close()
if preview_str is None:
raise
except Exception as e:
module_dir = os.path.dirname(__file__)
error_location = os.path.join(module_dir, "static/ref_ts/img/warning.png")
err_hdl = open(error_location, 'rb')
preview_str = str(err_hdl.read())
err_hdl.close()
finally:
if tempdir_preview is not None and os.path.isdir(tempdir_preview):
shutil.rmtree(tempdir_preview)
response.content_type = "image/png"
response.write(preview_str)
return response
def verify_rest_url(request):
try:
f = VerifyRestUrlForm(request.GET)
if f.is_valid():
params = f.cleaned_data
url = params['url']
ts = requests.get(url, verify=False)
ts_xml = etree.XML(ts.text.encode())
if ts.status_code == 200 and 'timeseriesresponse' in ts_xml.tag.lower():
return json_or_jsonp(request, {"status": "success"})
elif ts.status_code == 200 and 'collection' in ts_xml.tag.lower():
return json_or_jsonp(request, {"status": "success"})
else:
raise Exception("Test REST url failed.")
else:
raise Exception("Invalid REST url.")
except:
return json_or_jsonp(request, {"status": "error"})
def download_refts_resource_bag(request, shortkey, *args, **kwargs):
tempdir = None
try:
_, authorized, _ = authorize(request, shortkey,
needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE,
raises_exception=False)
if not authorized:
response = HttpResponse(status=401)
response.content = "<h3>You do not have permission to download this resource!</h3>"
return response
path = "bags/" + str(shortkey) + ".zip"
response_irods = download_bag_from_irods(request, path, use_async=False,
use_reverse_proxy=False)
tempdir = tempfile.mkdtemp()
response = assemble_refts_bag(shortkey, response_irods.streaming_content,
temp_dir=tempdir)
return response
except Exception as e:
logger.exception("download_refts_resource_bag: %s" % (str(e)))
response = HttpResponse(status=503)
response.content = "<h3>Failed to download this resource!</h3>"
return response
finally:
if tempdir is not None:
shutil.rmtree(tempdir)
@swagger_auto_schema(method='get', auto_schema=None)
@api_view(['GET'])
def rest_download_refts_resource_bag(request, shortkey, *args, **kwargs):
tempdir = None
_, authorized, _ = authorize(request, shortkey,
needed_permission=ACTION_TO_AUTHORIZE.VIEW_RESOURCE,
raises_exception=True)
try:
path = "bags/" + str(shortkey) + ".zip"
response_irods = download_bag_from_irods(request, path, rest_call=True, use_async=False,
use_reverse_proxy=False)
if not response_irods.streaming:
raise Exception("Failed to stream RefTS bag")
else:
tempdir = tempfile.mkdtemp()
response = assemble_refts_bag(shortkey, response_irods.streaming_content,
temp_dir=tempdir)
return response
except Exception as e:
logger.exception("rest_download_refts_resource_bag: %s" % (str(e)))
response = HttpResponse(status=503)
response.content = "Failed to download this resource!"
return response
finally:
if tempdir is not None:
shutil.rmtree(tempdir)
def assemble_refts_bag(res_id, empty_bag_stream, temp_dir=None):
"""
save empty_bag_stream to local; download latest wml;
put wml into empty bag; return filled-in bag in FileResponse
:param res_id: the resource id of the RefTS resource
:param bag_stream: the stream of the empty bag
:param temp_dir: a folder to store files locally
:return: FileResponse obj
"""
if temp_dir is None:
temp_dir = tempfile.mkdtemp()
bag_save_to_path = temp_dir + "/" + str(res_id) + ".zip"
with open(bag_save_to_path, 'wb+') as f:
for chunk in empty_bag_stream:
f.write(chunk)
res_files_fp_arr = ts_utils.generate_resource_files(res_id, temp_dir)
bag_zip_obj = zipfile.ZipFile(bag_save_to_path, "a", zipfile.ZIP_DEFLATED)
bag_content_base_folder = str(res_id) + "/data/contents/" # _RESOURCE_ID_/data/contents/
for fn_fp in res_files_fp_arr:
fh = open(fn_fp['fullpath'], 'r')
bag_zip_obj.writestr(bag_content_base_folder + fn_fp['fname'], fh.read())
fh.close()
bag_zip_obj.close()
response = FileResponse(open(bag_save_to_path, 'rb'), content_type='application/zip')
response['Content-Disposition'] = 'attachment; filename="' + str(res_id) + '.zip"'
response['Content-Length'] = os.path.getsize(bag_save_to_path)
return response
|
|
from __future__ import absolute_import, division, print_function
from collections import deque
from dask.core import istask, subs
def head(task):
"""Return the top level node of a task"""
if istask(task):
return task[0]
elif isinstance(task, list):
return list
else:
return task
def args(task):
"""Get the arguments for the current task"""
if istask(task):
return task[1:]
elif isinstance(task, list):
return task
else:
return ()
class Traverser(object):
"""Traverser interface for tasks.
Class for storing the state while performing a preorder-traversal of a
task.
Parameters
----------
term : task
The task to be traversed
Attributes
----------
term
The current element in the traversal
current
The head of the current element in the traversal. This is simply `head`
applied to the attribute `term`.
"""
def __init__(self, term, stack=None):
self.term = term
if not stack:
self._stack = deque([END])
else:
self._stack = stack
def __iter__(self):
while self.current is not END:
yield self.current
self.next()
def copy(self):
"""Copy the traverser in its current state.
This allows the traversal to be pushed onto a stack, for easy
backtracking."""
return Traverser(self.term, deque(self._stack))
def next(self):
"""Proceed to the next term in the preorder traversal."""
subterms = args(self.term)
if not subterms:
# No subterms, pop off stack
self.term = self._stack.pop()
else:
self.term = subterms[0]
self._stack.extend(reversed(subterms[1:]))
@property
def current(self):
return head(self.term)
def skip(self):
"""Skip over all subterms of the current level in the traversal"""
self.term = self._stack.pop()
class Token(object):
"""A token object.
Used to express certain objects in the traversal of a task or pattern."""
def __init__(self, name):
self.name = name
def __repr__(self):
return self.name
# A variable to represent *all* variables in a discrimination net
VAR = Token('?')
# Represents the end of the traversal of an expression. We can't use `None`,
# 'False', etc... here, as anything may be an argument to a function.
END = Token('end')
class Node(tuple):
"""A Discrimination Net node."""
__slots__ = ()
def __new__(cls, edges=None, patterns=None):
edges = edges if edges else {}
patterns = patterns if patterns else []
return tuple.__new__(cls, (edges, patterns))
@property
def edges(self):
"""A dictionary, where the keys are edges, and the values are nodes"""
return self[0]
@property
def patterns(self):
"""A list of all patterns that currently match at this node"""
return self[1]
class RewriteRule(object):
"""A rewrite rule.
Expresses `lhs` -> `rhs`, for variables `vars`.
Parameters
----------
lhs : task
The left-hand-side of the rewrite rule.
rhs : task or function
The right-hand-side of the rewrite rule. If it's a task, variables in
`rhs` will be replaced by terms in the subject that match the variables
in `lhs`. If it's a function, the function will be called with a dict
of such matches.
vars: tuple, optional
Tuple of variables found in the lhs. Variables can be represented as
any hashable object; a good convention is to use strings. If there are
no variables, this can be omitted.
Examples
--------
Here's a `RewriteRule` to replace all nested calls to `list`, so that
`(list, (list, 'x'))` is replaced with `(list, 'x')`, where `'x'` is a
variable.
>>> lhs = (list, (list, 'x'))
>>> rhs = (list, 'x')
>>> variables = ('x',)
>>> rule = RewriteRule(lhs, rhs, variables)
Here's a more complicated rule that uses a callable right-hand-side. A
callable `rhs` takes in a dictionary mapping variables to their matching
values. This rule replaces all occurrences of `(list, 'x')` with `'x'` if
`'x'` is a list itself.
>>> lhs = (list, 'x')
>>> def repl_list(sd):
... x = sd['x']
... if isinstance(x, list):
... return x
... else:
... return (list, x)
>>> rule = RewriteRule(lhs, repl_list, variables)
"""
def __init__(self, lhs, rhs, vars=()):
if not isinstance(vars, tuple):
raise TypeError("vars must be a tuple of variables")
self.lhs = lhs
if callable(rhs):
self.subs = rhs
else:
self.subs = self._apply
self.rhs = rhs
self._varlist = [t for t in Traverser(lhs) if t in vars]
# Reduce vars down to just variables found in lhs
self.vars = tuple(sorted(set(self._varlist)))
def _apply(self, sub_dict):
term = self.rhs
for key, val in sub_dict.items():
term = subs(term, key, val)
return term
def __str__(self):
return "RewriteRule({0}, {1}, {2})".format(self.lhs, self.rhs,
self.vars)
def __repr__(self):
return str(self)
class RuleSet(object):
"""A set of rewrite rules.
Forms a structure for fast rewriting over a set of rewrite rules. This
allows for syntactic matching of terms to patterns for many patterns at
the same time.
Examples
--------
>>> def f(*args): pass
>>> def g(*args): pass
>>> def h(*args): pass
>>> from operator import add
>>> rs = RuleSet( # Make RuleSet with two Rules
... RewriteRule((add, 'x', 0), 'x', ('x',)),
... RewriteRule((f, (g, 'x'), 'y'),
... (h, 'x', 'y'),
... ('x', 'y')))
>>> rs.rewrite((add, 2, 0)) # Apply ruleset to single task
2
>>> rs.rewrite((f, (g, 'a', 3))) # doctest: +SKIP
(h, 'a', 3)
>>> dsk = {'a': (add, 2, 0), # Apply ruleset to full dask graph
... 'b': (f, (g, 'a', 3))}
>>> from toolz import valmap
>>> valmap(rs.rewrite, dsk) # doctest: +SKIP
{'a': 2,
'b': (h, 'a', 3)}
Attributes
----------
rules : list
A list of `RewriteRule`s included in the `RuleSet`.
"""
def __init__(self, *rules):
"""Create a `RuleSet` for a number of rules
Parameters
----------
rules
One or more instances of RewriteRule
"""
self._net = Node()
self.rules = []
for p in rules:
self.add(p)
def add(self, rule):
"""Add a rule to the RuleSet.
Parameters
----------
rule : RewriteRule
"""
if not isinstance(rule, RewriteRule):
raise TypeError("rule must be instance of RewriteRule")
vars = rule.vars
curr_node = self._net
ind = len(self.rules)
# List of variables, in order they appear in the POT of the term
for t in Traverser(rule.lhs):
prev_node = curr_node
if t in vars:
t = VAR
if t in curr_node.edges:
curr_node = curr_node.edges[t]
else:
curr_node.edges[t] = Node()
curr_node = curr_node.edges[t]
# We've reached a leaf node. Add the term index to this leaf.
prev_node.edges[t].patterns.append(ind)
self.rules.append(rule)
def iter_matches(self, term):
"""A generator that lazily finds matchings for term from the RuleSet.
Parameters
----------
term : task
Yields
------
Tuples of `(rule, subs)`, where `rule` is the rewrite rule being
matched, and `subs` is a dictionary mapping the variables in the lhs
of the rule to their matching values in the term."""
S = Traverser(term)
for m, syms in _match(S, self._net):
for i in m:
rule = self.rules[i]
subs = _process_match(rule, syms)
if subs is not None:
yield rule, subs
def _rewrite(self, term):
"""Apply the rewrite rules in RuleSet to top level of term"""
for rule, sd in self.iter_matches(term):
# We use for (...) because it's fast in all cases for getting the
# first element from the match iterator. As we only want that
# element, we break here
term = rule.subs(sd)
break
return term
def rewrite(self, task, strategy="bottom_up"):
"""Apply the `RuleSet` to `task`.
This applies the most specific matching rule in the RuleSet to the
task, using the provided strategy.
Parameters
----------
term: a task
The task to be rewritten
strategy: str, optional
The rewriting strategy to use. Options are "bottom_up" (default),
or "top_level".
Examples
--------
Suppose there was a function `add` that returned the sum of 2 numbers,
and another function `double` that returned twice its input:
>>> add = lambda x, y: x + y
>>> double = lambda x: 2*x
Now suppose `double` was *significantly* faster than `add`, so
you'd like to replace all expressions `(add, x, x)` with `(double,
x)`, where `x` is a variable. This can be expressed as a rewrite rule:
>>> rule = RewriteRule((add, 'x', 'x'), (double, 'x'), ('x',))
>>> rs = RuleSet(rule)
This can then be applied to terms to perform the rewriting:
>>> term = (add, (add, 2, 2), (add, 2, 2))
>>> rs.rewrite(term) # doctest: +SKIP
(double, (double, 2))
If we only wanted to apply this to the top level of the term, the
`strategy` kwarg can be set to "top_level".
>>> rs.rewrite(term) # doctest: +SKIP
(double, (add, 2, 2))
"""
return strategies[strategy](self, task)
def _top_level(net, term):
return net._rewrite(term)
def _bottom_up(net, term):
if istask(term):
term = (head(term),) + tuple(_bottom_up(net, t) for t in args(term))
elif isinstance(term, list):
term = [_bottom_up(net, t) for t in args(term)]
return net._rewrite(term)
strategies = {'top_level': _top_level,
'bottom_up': _bottom_up}
def _match(S, N):
"""Structural matching of term S to discrimination net node N."""
stack = deque()
restore_state_flag = False
# matches are stored in a tuple, because all mutations result in a copy,
# preventing operations from changing matches stored on the stack.
matches = ()
while True:
if S.current is END:
yield N.patterns, matches
try:
# This try-except block is to catch hashing errors from un-hashable
# types. This allows for variables to be matched with un-hashable
# objects.
n = N.edges.get(S.current, None)
if n and not restore_state_flag:
stack.append((S.copy(), N, matches))
N = n
S.next()
continue
except TypeError:
pass
n = N.edges.get(VAR, None)
if n:
restore_state_flag = False
matches = matches + (S.term,)
S.skip()
N = n
continue
try:
# Backtrack here
(S, N, matches) = stack.pop()
restore_state_flag = True
except:
return
def _process_match(rule, syms):
"""Process a match to determine if it is correct, and to find the correct
substitution that will convert the term into the pattern.
Parameters
----------
rule : RewriteRule
syms : iterable
Iterable of subterms that match a corresponding variable.
Returns
-------
A dictionary of {vars : subterms} describing the substitution to make the
pattern equivalent with the term. Returns `None` if the match is
invalid."""
subs = {}
varlist = rule._varlist
if not len(varlist) == len(syms):
raise RuntimeError("length of varlist doesn't match length of syms.")
for v, s in zip(varlist, syms):
if v in subs and subs[v] != s:
return None
else:
subs[v] = s
return subs
|
|
import json
import os
import threading
import time
import traceback
import uuid
from urllib.parse import urljoin
errors = None
marionette = None
pytestrunner = None
here = os.path.dirname(__file__)
from .base import (CallbackHandler,
CrashtestExecutor,
RefTestExecutor,
RefTestImplementation,
TestharnessExecutor,
TimedRunner,
WdspecExecutor,
WdspecProtocol,
get_pages,
strip_server)
from .protocol import (ActionSequenceProtocolPart,
AssertsProtocolPart,
BaseProtocolPart,
TestharnessProtocolPart,
PrefsProtocolPart,
Protocol,
StorageProtocolPart,
SelectorProtocolPart,
ClickProtocolPart,
CookiesProtocolPart,
SendKeysProtocolPart,
TestDriverProtocolPart,
CoverageProtocolPart,
GenerateTestReportProtocolPart,
VirtualAuthenticatorProtocolPart,
SetPermissionProtocolPart,
PrintProtocolPart,
DebugProtocolPart)
def do_delayed_imports():
global errors, marionette, Addons
from marionette_driver import marionette, errors
from marionette_driver.addons import Addons
def _switch_to_window(marionette, handle):
"""Switch to the specified window; subsequent commands will be
directed at the new window.
This is a workaround for issue 24924[0]; marionettedriver 3.1.0 dropped the
'name' parameter from its switch_to_window command, but it is still needed
for at least Firefox 79.
[0]: https://github.com/web-platform-tests/wpt/issues/24924
:param marionette: The Marionette instance
:param handle: The id of the window to switch to.
"""
marionette._send_message("WebDriver:SwitchToWindow",
{"handle": handle, "name": handle, "focus": True})
marionette.window = handle
class MarionetteBaseProtocolPart(BaseProtocolPart):
def __init__(self, parent):
super(MarionetteBaseProtocolPart, self).__init__(parent)
self.timeout = None
def setup(self):
self.marionette = self.parent.marionette
def execute_script(self, script, asynchronous=False):
method = self.marionette.execute_async_script if asynchronous else self.marionette.execute_script
return method(script, new_sandbox=False, sandbox=None)
def set_timeout(self, timeout):
"""Set the Marionette script timeout.
:param timeout: Script timeout in seconds
"""
if timeout != self.timeout:
self.marionette.timeout.script = timeout
self.timeout = timeout
@property
def current_window(self):
return self.marionette.current_window_handle
def set_window(self, handle):
_switch_to_window(self.marionette, handle)
def window_handles(self):
return self.marionette.window_handles
def load(self, url):
self.marionette.navigate(url)
def wait(self):
try:
socket_timeout = self.marionette.client.socket_timeout
except AttributeError:
# This can happen if there was a crash
return
if socket_timeout:
try:
self.marionette.timeout.script = socket_timeout / 2
except IOError:
self.logger.debug("Socket closed")
return
while True:
try:
return self.marionette.execute_async_script("""let callback = arguments[arguments.length - 1];
addEventListener("__test_restart", e => {e.preventDefault(); callback(true)})""")
except errors.NoSuchWindowException:
# The window closed
break
except errors.ScriptTimeoutException:
self.logger.debug("Script timed out")
pass
except errors.JavascriptException as e:
# This can happen if we navigate, but just keep going
self.logger.debug(e)
pass
except IOError:
self.logger.debug("Socket closed")
break
except Exception:
self.logger.warning(traceback.format_exc())
break
return False
class MarionetteTestharnessProtocolPart(TestharnessProtocolPart):
def __init__(self, parent):
super(MarionetteTestharnessProtocolPart, self).__init__(parent)
self.runner_handle = None
with open(os.path.join(here, "runner.js")) as f:
self.runner_script = f.read()
with open(os.path.join(here, "window-loaded.js")) as f:
self.window_loaded_script = f.read()
def setup(self):
self.marionette = self.parent.marionette
def load_runner(self, url_protocol):
# Check if we previously had a test window open, and if we did make sure it's closed
if self.runner_handle:
self._close_windows()
url = urljoin(self.parent.executor.server_url(url_protocol), "/testharness_runner.html")
self.logger.debug("Loading %s" % url)
try:
self.dismiss_alert(lambda: self.marionette.navigate(url))
except Exception:
self.logger.critical(
"Loading initial page %s failed. Ensure that the "
"there are no other programs bound to this port and "
"that your firewall rules or network setup does not "
"prevent access.\n%s" % (url, traceback.format_exc()))
raise
self.runner_handle = self.marionette.current_window_handle
format_map = {"title": threading.current_thread().name.replace("'", '"')}
self.parent.base.execute_script(self.runner_script % format_map)
def _close_windows(self):
handles = self.marionette.window_handles
runner_handle = None
try:
handles.remove(self.runner_handle)
runner_handle = self.runner_handle
except ValueError:
# The runner window probably changed id but we can restore it
# This isn't supposed to happen, but marionette ids are not yet stable
# We assume that the first handle returned corresponds to the runner,
# but it hopefully doesn't matter too much if that assumption is
# wrong since we reload the runner in that tab anyway.
runner_handle = handles.pop(0)
self.logger.info("Changing harness_window to %s" % runner_handle)
for handle in handles:
try:
self.logger.info("Closing window %s" % handle)
_switch_to_window(self.marionette, handle)
self.dismiss_alert(lambda: self.marionette.close())
except errors.NoSuchWindowException:
# We might have raced with the previous test to close this
# window, skip it.
pass
_switch_to_window(self.marionette, runner_handle)
return runner_handle
def close_old_windows(self, url_protocol):
runner_handle = self._close_windows()
if runner_handle != self.runner_handle:
self.load_runner(url_protocol)
return self.runner_handle
def dismiss_alert(self, f):
while True:
try:
f()
except errors.UnexpectedAlertOpen:
alert = self.marionette.switch_to_alert()
try:
alert.dismiss()
except errors.NoAlertPresentException:
pass
else:
break
def get_test_window(self, window_id, parent, timeout=5):
"""Find the test window amongst all the open windows.
This is assumed to be either the named window or the one after the parent in the list of
window handles
:param window_id: The DOM name of the Window
:param parent: The handle of the runner window
:param timeout: The time in seconds to wait for the window to appear. This is because in
some implementations there's a race between calling window.open and the
window being added to the list of WebDriver accessible windows."""
test_window = None
end_time = time.time() + timeout
while time.time() < end_time:
if window_id:
try:
# Try this, it's in Level 1 but nothing supports it yet
win_s = self.parent.base.execute_script("return window['%s'];" % self.window_id)
win_obj = json.loads(win_s)
test_window = win_obj["window-fcc6-11e5-b4f8-330a88ab9d7f"]
except Exception:
pass
if test_window is None:
handles = self.marionette.window_handles
if len(handles) == 2:
test_window = next(iter(set(handles) - {parent}))
elif len(handles) > 2 and handles[0] == parent:
# Hope the first one here is the test window
test_window = handles[1]
if test_window is not None:
assert test_window != parent
return test_window
time.sleep(0.1)
raise Exception("unable to find test window")
def test_window_loaded(self):
"""Wait until the page in the new window has been loaded.
Hereby ignore Javascript execptions that are thrown when
the document has been unloaded due to a process change.
"""
while True:
try:
self.parent.base.execute_script(self.window_loaded_script, asynchronous=True)
break
except errors.JavascriptException:
pass
class MarionettePrefsProtocolPart(PrefsProtocolPart):
def setup(self):
self.marionette = self.parent.marionette
def set(self, name, value):
if value.lower() not in ("true", "false"):
try:
int(value)
except ValueError:
value = "'%s'" % value
else:
value = value.lower()
self.logger.info("Setting pref %s (%s)" % (name, value))
script = """
let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
.getService(Components.interfaces.nsIPrefBranch);
let pref = '%s';
let type = prefInterface.getPrefType(pref);
let value = %s;
switch(type) {
case prefInterface.PREF_STRING:
prefInterface.setCharPref(pref, value);
break;
case prefInterface.PREF_BOOL:
prefInterface.setBoolPref(pref, value);
break;
case prefInterface.PREF_INT:
prefInterface.setIntPref(pref, value);
break;
case prefInterface.PREF_INVALID:
// Pref doesn't seem to be defined already; guess at the
// right way to set it based on the type of value we have.
switch (typeof value) {
case "boolean":
prefInterface.setBoolPref(pref, value);
break;
case "string":
prefInterface.setCharPref(pref, value);
break;
case "number":
prefInterface.setIntPref(pref, value);
break;
default:
throw new Error("Unknown pref value type: " + (typeof value));
}
break;
default:
throw new Error("Unknown pref type " + type);
}
""" % (name, value)
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
self.marionette.execute_script(script)
def clear(self, name):
self.logger.info("Clearing pref %s" % (name))
script = """
let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
.getService(Components.interfaces.nsIPrefBranch);
let pref = '%s';
prefInterface.clearUserPref(pref);
""" % name
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
self.marionette.execute_script(script)
def get(self, name):
script = """
let prefInterface = Components.classes["@mozilla.org/preferences-service;1"]
.getService(Components.interfaces.nsIPrefBranch);
let pref = '%s';
let type = prefInterface.getPrefType(pref);
switch(type) {
case prefInterface.PREF_STRING:
return prefInterface.getCharPref(pref);
case prefInterface.PREF_BOOL:
return prefInterface.getBoolPref(pref);
case prefInterface.PREF_INT:
return prefInterface.getIntPref(pref);
case prefInterface.PREF_INVALID:
return null;
}
""" % name
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
self.marionette.execute_script(script)
class MarionetteStorageProtocolPart(StorageProtocolPart):
def setup(self):
self.marionette = self.parent.marionette
def clear_origin(self, url):
self.logger.info("Clearing origin %s" % (url))
script = """
let url = '%s';
let uri = Components.classes["@mozilla.org/network/io-service;1"]
.getService(Ci.nsIIOService)
.newURI(url);
let ssm = Components.classes["@mozilla.org/scriptsecuritymanager;1"]
.getService(Ci.nsIScriptSecurityManager);
let principal = ssm.createContentPrincipal(uri, {});
let qms = Components.classes["@mozilla.org/dom/quota-manager-service;1"]
.getService(Components.interfaces.nsIQuotaManagerService);
qms.clearStoragesForPrincipal(principal, "default", null, true);
""" % url
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
self.marionette.execute_script(script)
class MarionetteAssertsProtocolPart(AssertsProtocolPart):
def setup(self):
self.assert_count = {"chrome": 0, "content": 0}
self.chrome_assert_count = 0
self.marionette = self.parent.marionette
def get(self):
script = """
debug = Cc["@mozilla.org/xpcom/debug;1"].getService(Ci.nsIDebug2);
if (debug.isDebugBuild) {
return debug.assertionCount;
}
return 0;
"""
def get_count(context, **kwargs):
try:
context_count = self.marionette.execute_script(script, **kwargs)
if context_count:
self.parent.logger.info("Got %s assert count %s" % (context, context_count))
test_count = context_count - self.assert_count[context]
self.assert_count[context] = context_count
return test_count
except errors.NoSuchWindowException:
# If the window was already closed
self.parent.logger.warning("Failed to get assertion count; window was closed")
except (errors.MarionetteException, IOError):
# This usually happens if the process crashed
pass
counts = []
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
counts.append(get_count("chrome"))
if self.parent.e10s:
counts.append(get_count("content", sandbox="system"))
counts = [item for item in counts if item is not None]
if not counts:
return None
return sum(counts)
class MarionetteSelectorProtocolPart(SelectorProtocolPart):
def setup(self):
self.marionette = self.parent.marionette
def elements_by_selector(self, selector):
return self.marionette.find_elements("css selector", selector)
class MarionetteClickProtocolPart(ClickProtocolPart):
def setup(self):
self.marionette = self.parent.marionette
def element(self, element):
return element.click()
class MarionetteCookiesProtocolPart(CookiesProtocolPart):
def setup(self):
self.marionette = self.parent.marionette
def delete_all_cookies(self):
self.logger.info("Deleting all cookies")
return self.marionette.delete_all_cookies()
class MarionetteSendKeysProtocolPart(SendKeysProtocolPart):
def setup(self):
self.marionette = self.parent.marionette
def send_keys(self, element, keys):
return element.send_keys(keys)
class MarionetteActionSequenceProtocolPart(ActionSequenceProtocolPart):
def setup(self):
self.marionette = self.parent.marionette
def send_actions(self, actions):
actions = self.marionette._to_json(actions)
self.logger.info(actions)
self.marionette._send_message("WebDriver:PerformActions", actions)
class MarionetteTestDriverProtocolPart(TestDriverProtocolPart):
def setup(self):
self.marionette = self.parent.marionette
def send_message(self, cmd_id, message_type, status, message=None):
obj = {
"cmd_id": cmd_id,
"type": "testdriver-%s" % str(message_type),
"status": str(status)
}
if message:
obj["message"] = str(message)
self.parent.base.execute_script("window.postMessage(%s, '*')" % json.dumps(obj))
def _switch_to_frame(self, index_or_elem):
try:
self.marionette.switch_to_frame(index_or_elem)
except (errors.NoSuchFrameException,
errors.StaleElementException) as e:
raise ValueError from e
def _switch_to_parent_frame(self):
self.marionette.switch_to_parent_frame()
class MarionetteCoverageProtocolPart(CoverageProtocolPart):
def setup(self):
self.marionette = self.parent.marionette
if not self.parent.ccov:
self.is_enabled = False
return
script = """
const {PerTestCoverageUtils} = ChromeUtils.import("chrome://remote/content/marionette/PerTestCoverageUtils.jsm");
return PerTestCoverageUtils.enabled;
"""
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
self.is_enabled = self.marionette.execute_script(script)
def reset(self):
script = """
var callback = arguments[arguments.length - 1];
const {PerTestCoverageUtils} = ChromeUtils.import("chrome://remote/content/marionette/PerTestCoverageUtils.jsm");
PerTestCoverageUtils.beforeTest().then(callback, callback);
"""
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
try:
error = self.marionette.execute_async_script(script)
if error is not None:
raise Exception('Failure while resetting counters: %s' % json.dumps(error))
except (errors.MarionetteException, IOError):
# This usually happens if the process crashed
pass
def dump(self):
if len(self.marionette.window_handles):
handle = self.marionette.window_handles[0]
_switch_to_window(self.marionette, handle)
script = """
var callback = arguments[arguments.length - 1];
const {PerTestCoverageUtils} = ChromeUtils.import("chrome://remote/content/marionette/PerTestCoverageUtils.jsm");
PerTestCoverageUtils.afterTest().then(callback, callback);
"""
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
try:
error = self.marionette.execute_async_script(script)
if error is not None:
raise Exception('Failure while dumping counters: %s' % json.dumps(error))
except (errors.MarionetteException, IOError):
# This usually happens if the process crashed
pass
class MarionetteGenerateTestReportProtocolPart(GenerateTestReportProtocolPart):
def setup(self):
self.marionette = self.parent.marionette
def generate_test_report(self, config):
raise NotImplementedError("generate_test_report not yet implemented")
class MarionetteVirtualAuthenticatorProtocolPart(VirtualAuthenticatorProtocolPart):
def setup(self):
self.marionette = self.parent.marionette
def add_virtual_authenticator(self, config):
raise NotImplementedError("add_virtual_authenticator not yet implemented")
def remove_virtual_authenticator(self, authenticator_id):
raise NotImplementedError("remove_virtual_authenticator not yet implemented")
def add_credential(self, authenticator_id, credential):
raise NotImplementedError("add_credential not yet implemented")
def get_credentials(self, authenticator_id):
raise NotImplementedError("get_credentials not yet implemented")
def remove_credential(self, authenticator_id, credential_id):
raise NotImplementedError("remove_credential not yet implemented")
def remove_all_credentials(self, authenticator_id):
raise NotImplementedError("remove_all_credentials not yet implemented")
def set_user_verified(self, authenticator_id, uv):
raise NotImplementedError("set_user_verified not yet implemented")
class MarionetteSetPermissionProtocolPart(SetPermissionProtocolPart):
def setup(self):
self.marionette = self.parent.marionette
def set_permission(self, descriptor, state, one_realm):
body = {
"descriptor": descriptor,
"state": state,
}
if one_realm is not None:
body["oneRealm"] = one_realm
try:
self.marionette._send_message("WebDriver:SetPermission", body)
except errors.UnsupportedOperationException:
raise NotImplementedError("set_permission not yet implemented")
class MarionettePrintProtocolPart(PrintProtocolPart):
def setup(self):
self.marionette = self.parent.marionette
self.runner_handle = None
def load_runner(self):
url = urljoin(self.parent.executor.server_url("http"), "/print_reftest_runner.html")
self.logger.debug("Loading %s" % url)
try:
self.marionette.navigate(url)
except Exception as e:
self.logger.critical(
"Loading initial page %s failed. Ensure that the "
"there are no other programs bound to this port and "
"that your firewall rules or network setup does not "
"prevent access.\n%s" % (url, traceback.format_exc(e)))
raise
self.runner_handle = self.marionette.current_window_handle
def render_as_pdf(self, width, height):
margin = 0.5 * 2.54
body = {
"page": {
"width": width,
"height": height
},
"margin": {
"left": margin,
"right": margin,
"top": margin,
"bottom": margin,
},
"shrinkToFit": False,
"printBackground": True,
}
return self.marionette._send_message("WebDriver:Print", body, key="value")
def pdf_to_png(self, pdf_base64, page_ranges):
handle = self.marionette.current_window_handle
_switch_to_window(self.marionette, self.runner_handle)
try:
rv = self.marionette.execute_async_script("""
let callback = arguments[arguments.length - 1];
render('%s').then(result => callback(result))""" % pdf_base64, new_sandbox=False, sandbox=None)
page_numbers = get_pages(page_ranges, len(rv))
rv = [item for i, item in enumerate(rv) if i + 1 in page_numbers]
return rv
finally:
_switch_to_window(self.marionette, handle)
class MarionetteDebugProtocolPart(DebugProtocolPart):
def setup(self):
self.marionette = self.parent.marionette
def load_devtools(self):
with self.marionette.using_context(self.marionette.CONTEXT_CHROME):
self.parent.base.execute_script("""
const { require } = ChromeUtils.import("resource://devtools/shared/Loader.jsm");
const { TargetFactory } = require("devtools/client/framework/target");
const { gDevTools } = require("devtools/client/framework/devtools");
const callback = arguments[arguments.length - 1];
async function loadDevTools() {
const target = await TargetFactory.forTab(window.gBrowser.selectedTab);
await gDevTools.showToolbox(target, "webconsole", "window");
}
loadDevTools().catch(() => dump("Devtools failed to load"))
.then(callback);
""", asynchronous=True)
class MarionetteProtocol(Protocol):
implements = [MarionetteBaseProtocolPart,
MarionetteTestharnessProtocolPart,
MarionettePrefsProtocolPart,
MarionetteStorageProtocolPart,
MarionetteSelectorProtocolPart,
MarionetteClickProtocolPart,
MarionetteCookiesProtocolPart,
MarionetteSendKeysProtocolPart,
MarionetteActionSequenceProtocolPart,
MarionetteTestDriverProtocolPart,
MarionetteAssertsProtocolPart,
MarionetteCoverageProtocolPart,
MarionetteGenerateTestReportProtocolPart,
MarionetteVirtualAuthenticatorProtocolPart,
MarionetteSetPermissionProtocolPart,
MarionettePrintProtocolPart,
MarionetteDebugProtocolPart]
def __init__(self, executor, browser, capabilities=None, timeout_multiplier=1, e10s=True, ccov=False):
do_delayed_imports()
super(MarionetteProtocol, self).__init__(executor, browser)
self.marionette = None
self.marionette_port = browser.marionette_port
self.capabilities = capabilities
self.timeout_multiplier = timeout_multiplier
self.runner_handle = None
self.e10s = e10s
self.ccov = ccov
def connect(self):
self.logger.debug("Connecting to Marionette on port %i" % self.marionette_port)
startup_timeout = marionette.Marionette.DEFAULT_STARTUP_TIMEOUT * self.timeout_multiplier
self.marionette = marionette.Marionette(host='127.0.0.1',
port=self.marionette_port,
socket_timeout=None,
startup_timeout=startup_timeout)
self.logger.debug("Waiting for Marionette connection")
while True:
try:
self.marionette.raise_for_port()
break
except IOError:
# When running in a debugger wait indefinitely for Firefox to start
if self.executor.debug_info is None:
raise
self.logger.debug("Starting Marionette session")
self.marionette.start_session(self.capabilities)
self.logger.debug("Marionette session started")
def after_connect(self):
pass
def teardown(self):
if self.marionette and self.marionette.session_id:
try:
self.marionette._request_in_app_shutdown()
self.marionette.delete_session(send_request=False)
self.marionette.cleanup()
except Exception:
# This is typically because the session never started
pass
if self.marionette is not None:
self.marionette = None
super(MarionetteProtocol, self).teardown()
def is_alive(self):
try:
self.marionette.current_window_handle
except Exception:
return False
return True
def on_environment_change(self, old_environment, new_environment):
#Unset all the old prefs
for name in old_environment.get("prefs", {}).keys():
value = self.executor.original_pref_values[name]
if value is None:
self.prefs.clear(name)
else:
self.prefs.set(name, value)
for name, value in new_environment.get("prefs", {}).items():
self.executor.original_pref_values[name] = self.prefs.get(name)
self.prefs.set(name, value)
class ExecuteAsyncScriptRun(TimedRunner):
def set_timeout(self):
timeout = self.timeout
try:
if timeout is not None:
self.protocol.base.set_timeout(timeout + self.extra_timeout)
else:
# We just want it to never time out, really, but marionette doesn't
# make that possible. It also seems to time out immediately if the
# timeout is set too high. This works at least.
self.protocol.base.set_timeout(2**28 - 1)
except IOError:
msg = "Lost marionette connection before starting test"
self.logger.error(msg)
return ("INTERNAL-ERROR", msg)
def before_run(self):
index = self.url.rfind("/storage/")
if index != -1:
# Clear storage
self.protocol.storage.clear_origin(self.url)
def run_func(self):
try:
self.result = True, self.func(self.protocol, self.url, self.timeout)
except errors.ScriptTimeoutException:
self.logger.debug("Got a marionette timeout")
self.result = False, ("EXTERNAL-TIMEOUT", None)
except IOError:
# This can happen on a crash
# Also, should check after the test if the firefox process is still running
# and otherwise ignore any other result and set it to crash
self.logger.info("IOError on command, setting status to CRASH")
self.result = False, ("CRASH", None)
except errors.NoSuchWindowException:
self.logger.info("NoSuchWindowException on command, setting status to CRASH")
self.result = False, ("CRASH", None)
except Exception as e:
if isinstance(e, errors.JavascriptException) and str(e).startswith("Document was unloaded"):
message = "Document unloaded; maybe test navigated the top-level-browsing context?"
else:
message = getattr(e, "message", "")
if message:
message += "\n"
message += traceback.format_exc()
self.logger.warning(traceback.format_exc())
self.result = False, ("INTERNAL-ERROR", message)
finally:
self.result_flag.set()
class MarionetteTestharnessExecutor(TestharnessExecutor):
supports_testdriver = True
def __init__(self, logger, browser, server_config, timeout_multiplier=1,
close_after_done=True, debug_info=None, capabilities=None,
debug=False, ccov=False, debug_test=False, **kwargs):
"""Marionette-based executor for testharness.js tests"""
TestharnessExecutor.__init__(self, logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = MarionetteProtocol(self,
browser,
capabilities,
timeout_multiplier,
kwargs["e10s"],
ccov)
with open(os.path.join(here, "testharness_webdriver_resume.js")) as f:
self.script_resume = f.read()
self.close_after_done = close_after_done
self.window_id = str(uuid.uuid4())
self.debug = debug
self.debug_test = debug_test
self.install_extensions = browser.extensions
self.original_pref_values = {}
if marionette is None:
do_delayed_imports()
def setup(self, runner):
super(MarionetteTestharnessExecutor, self).setup(runner)
for extension_path in self.install_extensions:
self.logger.info("Installing extension from %s" % extension_path)
addons = Addons(self.protocol.marionette)
addons.install(extension_path)
self.protocol.testharness.load_runner(self.last_environment["protocol"])
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
self.protocol.on_environment_change(self.last_environment, new_environment)
if new_environment["protocol"] != self.last_environment["protocol"]:
self.protocol.testharness.load_runner(new_environment["protocol"])
def do_test(self, test):
timeout = (test.timeout * self.timeout_multiplier if self.debug_info is None
else None)
success, data = ExecuteAsyncScriptRun(self.logger,
self.do_testharness,
self.protocol,
self.test_url(test),
timeout,
self.extra_timeout).run()
# The format of data depends on whether the test ran to completion or not
# For asserts we only care about the fact that if it didn't complete, the
# status is in the first field.
status = None
if not success:
status = data[0]
extra = None
if self.debug and (success or status not in ("CRASH", "INTERNAL-ERROR")):
assertion_count = self.protocol.asserts.get()
if assertion_count is not None:
extra = {"assertion_count": assertion_count}
if success:
return self.convert_result(test, data, extra=extra)
return (test.result_cls(extra=extra, *data), [])
def do_testharness(self, protocol, url, timeout):
parent_window = protocol.testharness.close_old_windows(protocol)
if self.protocol.coverage.is_enabled:
self.protocol.coverage.reset()
format_map = {"url": strip_server(url)}
protocol.base.execute_script("window.open('about:blank', '%s', 'noopener')" % self.window_id)
test_window = protocol.testharness.get_test_window(self.window_id, parent_window,
timeout=10 * self.timeout_multiplier)
self.protocol.base.set_window(test_window)
protocol.testharness.test_window_loaded()
if self.debug_test:
self.protocol.debug.load_devtools()
handler = CallbackHandler(self.logger, protocol, test_window)
protocol.marionette.navigate(url)
while True:
result = protocol.base.execute_script(
self.script_resume % format_map, asynchronous=True)
if result is None:
# This can happen if we get an content process crash
return None
done, rv = handler(result)
if done:
break
if self.protocol.coverage.is_enabled:
self.protocol.coverage.dump()
return rv
class MarionetteRefTestExecutor(RefTestExecutor):
is_print = False
def __init__(self, logger, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, reftest_internal=False,
reftest_screenshot="unexpected", ccov=False,
group_metadata=None, capabilities=None, debug=False,
browser_version=None, debug_test=False, **kwargs):
"""Marionette-based executor for reftests"""
RefTestExecutor.__init__(self,
logger,
browser,
server_config,
screenshot_cache=screenshot_cache,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = MarionetteProtocol(self, browser, capabilities,
timeout_multiplier, kwargs["e10s"],
ccov)
self.implementation = self.get_implementation(reftest_internal)
self.implementation_kwargs = {}
if reftest_internal:
self.implementation_kwargs["screenshot"] = reftest_screenshot
self.implementation_kwargs["chrome_scope"] = (browser_version is not None and
int(browser_version.split(".")[0]) < 82)
self.close_after_done = close_after_done
self.has_window = False
self.original_pref_values = {}
self.group_metadata = group_metadata
self.debug = debug
self.debug_test = debug_test
with open(os.path.join(here, "reftest.js")) as f:
self.script = f.read()
with open(os.path.join(here, "test-wait.js")) as f:
self.wait_script = f.read() % {"classname": "reftest-wait"}
def get_implementation(self, reftest_internal):
return (InternalRefTestImplementation if reftest_internal
else RefTestImplementation)(self)
def setup(self, runner):
super(MarionetteRefTestExecutor, self).setup(runner)
self.implementation.setup(**self.implementation_kwargs)
def teardown(self):
try:
self.implementation.teardown()
if self.protocol.marionette and self.protocol.marionette.session_id:
handles = self.protocol.marionette.window_handles
if handles:
_switch_to_window(self.protocol.marionette, handles[0])
super(MarionetteRefTestExecutor, self).teardown()
except Exception:
# Ignore errors during teardown
self.logger.warning("Exception during reftest teardown:\n%s" %
traceback.format_exc())
def reset(self):
self.implementation.reset(**self.implementation_kwargs)
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
self.protocol.on_environment_change(self.last_environment, new_environment)
def do_test(self, test):
if not isinstance(self.implementation, InternalRefTestImplementation):
if self.close_after_done and self.has_window:
self.protocol.marionette.close()
_switch_to_window(self.protocol.marionette,
self.protocol.marionette.window_handles[-1])
self.has_window = False
if not self.has_window:
self.protocol.base.execute_script(self.script)
self.protocol.base.set_window(self.protocol.marionette.window_handles[-1])
self.has_window = True
self.protocol.testharness.test_window_loaded()
if self.protocol.coverage.is_enabled:
self.protocol.coverage.reset()
result = self.implementation.run_test(test)
if self.protocol.coverage.is_enabled:
self.protocol.coverage.dump()
if self.debug:
assertion_count = self.protocol.asserts.get()
if "extra" not in result:
result["extra"] = {}
if assertion_count is not None:
result["extra"]["assertion_count"] = assertion_count
if self.debug_test and result["status"] in ["PASS", "FAIL", "ERROR"] and "extra" in result:
self.protocol.base.set_window(self.protocol.base.window_handles()[0])
self.protocol.debug.load_reftest_analyzer(test, result)
return self.convert_result(test, result)
def screenshot(self, test, viewport_size, dpi, page_ranges):
# https://github.com/web-platform-tests/wpt/issues/7135
assert viewport_size is None
assert dpi is None
timeout = self.timeout_multiplier * test.timeout if self.debug_info is None else None
test_url = self.test_url(test)
return ExecuteAsyncScriptRun(self.logger,
self._screenshot,
self.protocol,
test_url,
timeout,
self.extra_timeout).run()
def _screenshot(self, protocol, url, timeout):
protocol.marionette.navigate(url)
protocol.base.execute_script(self.wait_script, asynchronous=True)
screenshot = protocol.marionette.screenshot(full=False)
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshot = screenshot.split(",", 1)[1]
return screenshot
class InternalRefTestImplementation(RefTestImplementation):
def __init__(self, executor):
self.timeout_multiplier = executor.timeout_multiplier
self.executor = executor
self.chrome_scope = False
@property
def logger(self):
return self.executor.logger
def setup(self, screenshot="unexpected", chrome_scope=False):
data = {"screenshot": screenshot, "isPrint": self.executor.is_print}
if self.executor.group_metadata is not None:
data["urlCount"] = {urljoin(self.executor.server_url(key[0]), key[1]):value
for key, value in self.executor.group_metadata.get("url_count", {}).items()
if value > 1}
self.chrome_scope = chrome_scope
if chrome_scope:
self.logger.debug("Using marionette Chrome scope for reftests")
self.executor.protocol.marionette.set_context(self.executor.protocol.marionette.CONTEXT_CHROME)
self.executor.protocol.marionette._send_message("reftest:setup", data)
def reset(self, **kwargs):
# this is obvious wrong; it shouldn't be a no-op
# see https://github.com/web-platform-tests/wpt/issues/15604
pass
def run_test(self, test):
references = self.get_references(test, test)
timeout = (test.timeout * 1000) * self.timeout_multiplier
rv = self.executor.protocol.marionette._send_message("reftest:run",
{"test": self.executor.test_url(test),
"references": references,
"expected": test.expected(),
"timeout": timeout,
"width": 800,
"height": 600,
"pageRanges": test.page_ranges})["value"]
return rv
def get_references(self, root_test, node):
rv = []
for item, relation in node.references:
rv.append([self.executor.test_url(item), self.get_references(root_test, item), relation,
{"fuzzy": self.get_fuzzy(root_test, [node, item], relation)}])
return rv
def teardown(self):
try:
if self.executor.protocol.marionette and self.executor.protocol.marionette.session_id:
self.executor.protocol.marionette._send_message("reftest:teardown", {})
if self.chrome_scope:
self.executor.protocol.marionette.set_context(
self.executor.protocol.marionette.CONTEXT_CONTENT)
# the reftest runner opens/closes a window with focus, so as
# with after closing a window we need to give a new window
# focus
handles = self.executor.protocol.marionette.window_handles
if handles:
_switch_to_window(self.executor.protocol.marionette, handles[0])
except Exception:
# Ignore errors during teardown
self.logger.warning(traceback.format_exc())
class GeckoDriverProtocol(WdspecProtocol):
server_cls = None # To avoid circular imports we set this at runtime
class MarionetteWdspecExecutor(WdspecExecutor):
protocol_cls = GeckoDriverProtocol
def __init__(self, logger, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=1, capabilities=None,
debug_info=None, environ=None, stackfix_dir=None,
symbols_path=None, leak_report_file=None, asan=False,
group_metadata=None, browser_settings=None, **kwargs):
from ..browsers.firefox import GeckoDriverServer
super().__init__(logger, browser, server_config, webdriver_binary,
webdriver_args, timeout_multiplier=timeout_multiplier,
capabilities=capabilities, debug_info=debug_info,
environ=environ, **kwargs)
self.protocol_cls.server_cls = GeckoDriverServer
self.output_handler_kwargs = {"stackfix_dir": stackfix_dir,
"symbols_path": symbols_path,
"asan": asan,
"leak_report_file": leak_report_file}
self.output_handler_start_kwargs = {"group_metadata": group_metadata}
self.output_handler_start_kwargs.update(browser_settings)
class MarionetteCrashtestExecutor(CrashtestExecutor):
def __init__(self, logger, browser, server_config, timeout_multiplier=1,
debug_info=None, capabilities=None, debug=False,
ccov=False, **kwargs):
"""Marionette-based executor for testharness.js tests"""
CrashtestExecutor.__init__(self, logger, browser, server_config,
timeout_multiplier=timeout_multiplier,
debug_info=debug_info)
self.protocol = MarionetteProtocol(self,
browser,
capabilities,
timeout_multiplier,
kwargs["e10s"],
ccov)
self.original_pref_values = {}
self.debug = debug
with open(os.path.join(here, "test-wait.js")) as f:
self.wait_script = f.read() % {"classname": "test-wait"}
if marionette is None:
do_delayed_imports()
def is_alive(self):
return self.protocol.is_alive()
def on_environment_change(self, new_environment):
self.protocol.on_environment_change(self.last_environment, new_environment)
def do_test(self, test):
timeout = (test.timeout * self.timeout_multiplier if self.debug_info is None
else None)
success, data = ExecuteAsyncScriptRun(self.logger,
self.do_crashtest,
self.protocol,
self.test_url(test),
timeout,
self.extra_timeout).run()
status = None
if not success:
status = data[0]
extra = None
if self.debug and (success or status not in ("CRASH", "INTERNAL-ERROR")):
assertion_count = self.protocol.asserts.get()
if assertion_count is not None:
extra = {"assertion_count": assertion_count}
if success:
return self.convert_result(test, data)
return (test.result_cls(extra=extra, *data), [])
def do_crashtest(self, protocol, url, timeout):
if self.protocol.coverage.is_enabled:
self.protocol.coverage.reset()
protocol.base.load(url)
protocol.base.execute_script(self.wait_script, asynchronous=True)
if self.protocol.coverage.is_enabled:
self.protocol.coverage.dump()
return {"status": "PASS",
"message": None}
class MarionettePrintRefTestExecutor(MarionetteRefTestExecutor):
is_print = True
def __init__(self, logger, browser, server_config, timeout_multiplier=1,
screenshot_cache=None, close_after_done=True,
debug_info=None, reftest_screenshot="unexpected", ccov=False,
group_metadata=None, capabilities=None, debug=False,
reftest_internal=False, **kwargs):
"""Marionette-based executor for reftests"""
MarionetteRefTestExecutor.__init__(self,
logger,
browser,
server_config,
timeout_multiplier=timeout_multiplier,
screenshot_cache=screenshot_cache,
close_after_done=close_after_done,
debug_info=debug_info,
reftest_screenshot=reftest_screenshot,
reftest_internal=reftest_internal,
ccov=ccov,
group_metadata=group_metadata,
capabilities=capabilities,
debug=debug,
**kwargs)
def setup(self, runner):
super(MarionettePrintRefTestExecutor, self).setup(runner)
if not isinstance(self.implementation, InternalRefTestImplementation):
self.protocol.pdf_print.load_runner()
def get_implementation(self, reftest_internal):
return (InternalRefTestImplementation if reftest_internal
else RefTestImplementation)(self)
def screenshot(self, test, viewport_size, dpi, page_ranges):
# https://github.com/web-platform-tests/wpt/issues/7140
assert dpi is None
self.viewport_size = viewport_size
timeout = self.timeout_multiplier * test.timeout if self.debug_info is None else None
test_url = self.test_url(test)
self.page_ranges = page_ranges.get(test.url)
return ExecuteAsyncScriptRun(self.logger,
self._render,
self.protocol,
test_url,
timeout,
self.extra_timeout).run()
def _render(self, protocol, url, timeout):
protocol.marionette.navigate(url)
protocol.base.execute_script(self.wait_script, asynchronous=True)
pdf = protocol.pdf_print.render_as_pdf(*self.viewport_size)
screenshots = protocol.pdf_print.pdf_to_png(pdf, self.page_ranges)
for i, screenshot in enumerate(screenshots):
# strip off the data:img/png, part of the url
if screenshot.startswith("data:image/png;base64,"):
screenshots[i] = screenshot.split(",", 1)[1]
return screenshots
|
|
import re
from django import forms
from django.template.loader import render_to_string
from django.conf import settings
from django.utils.translation import ugettext_lazy as _, ugettext
from django.utils.encoding import smart_unicode
from django.utils.hashcompat import sha_constructor
from pinax.core.utils import get_send_mail
send_mail = get_send_mail()
from django.contrib.auth import authenticate, login
from django.contrib.auth.models import User
from django.contrib.sites.models import Site
from emailconfirmation.models import EmailAddress
from account.models import Account
from timezones.forms import TimeZoneField
from account.models import PasswordReset
alnum_re = re.compile(r'^\w+$')
class LoginForm(forms.Form):
username = forms.CharField(label=_("Username"), max_length=30, widget=forms.TextInput())
password = forms.CharField(label=_("Password"), widget=forms.PasswordInput(render_value=False))
remember = forms.BooleanField(label=_("Remember Me"), help_text=_("If checked you will stay logged in for 3 weeks"), required=False)
user = None
def clean(self):
if self._errors:
return
user = authenticate(username=self.cleaned_data["username"], password=self.cleaned_data["password"])
if user:
if user.is_active:
self.user = user
else:
raise forms.ValidationError(_("This account is currently inactive."))
else:
raise forms.ValidationError(_("The username and/or password you specified are not correct."))
return self.cleaned_data
def login(self, request):
if self.is_valid():
login(request, self.user)
request.user.message_set.create(message=ugettext(u"Successfully logged in as %(username)s.") % {'username': self.user.username})
if self.cleaned_data['remember']:
request.session.set_expiry(60 * 60 * 24 * 7 * 3)
else:
request.session.set_expiry(0)
return True
return False
class SignupForm(forms.Form):
username = forms.CharField(label=_("Username"), max_length=30, widget=forms.TextInput())
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput(render_value=False))
password2 = forms.CharField(label=_("Password (again)"), widget=forms.PasswordInput(render_value=False))
if settings.ACCOUNT_REQUIRED_EMAIL or settings.ACCOUNT_EMAIL_VERIFICATION:
email = forms.EmailField(
label = _("Email"),
required = True,
widget = forms.TextInput()
)
else:
email = forms.EmailField(
label = _("Email (optional)"),
required = False,
widget = forms.TextInput()
)
confirmation_key = forms.CharField(max_length=40, required=False, widget=forms.HiddenInput())
def clean_username(self):
if not alnum_re.search(self.cleaned_data["username"]):
raise forms.ValidationError(_("Usernames can only contain letters, numbers and underscores."))
try:
user = User.objects.get(username__iexact=self.cleaned_data["username"])
except User.DoesNotExist:
return self.cleaned_data["username"]
raise forms.ValidationError(_("This username is already taken. Please choose another."))
def clean(self):
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data
def save(self):
username = self.cleaned_data["username"]
email = self.cleaned_data["email"]
password = self.cleaned_data["password1"]
if self.cleaned_data["confirmation_key"]:
from friends.models import JoinInvitation # @@@ temporary fix for issue 93
try:
join_invitation = JoinInvitation.objects.get(confirmation_key = self.cleaned_data["confirmation_key"])
confirmed = True
except JoinInvitation.DoesNotExist:
confirmed = False
else:
confirmed = False
# @@@ clean up some of the repetition below -- DRY!
if confirmed:
if email == join_invitation.contact.email:
new_user = User.objects.create_user(username, email, password)
join_invitation.accept(new_user) # should go before creation of EmailAddress below
new_user.message_set.create(message=ugettext(u"Your email address has already been verified"))
# already verified so can just create
EmailAddress(user=new_user, email=email, verified=True, primary=True).save()
else:
new_user = User.objects.create_user(username, "", password)
join_invitation.accept(new_user) # should go before creation of EmailAddress below
if email:
new_user.message_set.create(message=ugettext(u"Confirmation email sent to %(email)s") % {'email': email})
EmailAddress.objects.add_email(new_user, email)
else:
new_user = User.objects.create_user(username, "", password)
if email:
new_user.message_set.create(message=ugettext(u"Confirmation email sent to %(email)s") % {'email': email})
EmailAddress.objects.add_email(new_user, email)
if settings.ACCOUNT_EMAIL_VERIFICATION:
new_user.is_active = False
new_user.save()
return username, password # required for authenticate()
class OpenIDSignupForm(forms.Form):
username = forms.CharField(label="Username", max_length=30, widget=forms.TextInput())
if settings.ACCOUNT_REQUIRED_EMAIL or settings.ACCOUNT_EMAIL_VERIFICATION:
email = forms.EmailField(
label = _("Email"),
required = True,
widget = forms.TextInput()
)
else:
email = forms.EmailField(
label = _("Email (optional)"),
required = False,
widget = forms.TextInput()
)
def __init__(self, *args, **kwargs):
# remember provided (validated!) OpenID to attach it to the new user
# later.
self.openid = kwargs.pop("openid", None)
# pop these off since they are passed to this method but we can't
# pass them to forms.Form.__init__
kwargs.pop("reserved_usernames", [])
kwargs.pop("no_duplicate_emails", False)
super(OpenIDSignupForm, self).__init__(*args, **kwargs)
def clean_username(self):
if not alnum_re.search(self.cleaned_data["username"]):
raise forms.ValidationError(u"Usernames can only contain letters, numbers and underscores.")
try:
user = User.objects.get(username__iexact=self.cleaned_data["username"])
except User.DoesNotExist:
return self.cleaned_data["username"]
raise forms.ValidationError(u"This username is already taken. Please choose another.")
def save(self):
username = self.cleaned_data["username"]
email = self.cleaned_data["email"]
new_user = User.objects.create_user(username, "", "!")
if email:
new_user.message_set.create(message="Confirmation email sent to %s" % email)
EmailAddress.objects.add_email(new_user, email)
if self.openid:
# Associate openid with the new account.
new_user.openids.create(openid = self.openid)
return new_user
class UserForm(forms.Form):
def __init__(self, user=None, *args, **kwargs):
self.user = user
super(UserForm, self).__init__(*args, **kwargs)
class AccountForm(UserForm):
def __init__(self, *args, **kwargs):
super(AccountForm, self).__init__(*args, **kwargs)
try:
self.account = Account.objects.get(user=self.user)
except Account.DoesNotExist:
self.account = Account(user=self.user)
class AddEmailForm(UserForm):
email = forms.EmailField(label=_("Email"), required=True, widget=forms.TextInput(attrs={'size':'30'}))
def clean_email(self):
try:
EmailAddress.objects.get(user=self.user, email=self.cleaned_data["email"])
except EmailAddress.DoesNotExist:
return self.cleaned_data["email"]
raise forms.ValidationError(_("This email address already associated with this account."))
def save(self):
self.user.message_set.create(message=ugettext(u"Confirmation email sent to %(email)s") % {'email': self.cleaned_data["email"]})
return EmailAddress.objects.add_email(self.user, self.cleaned_data["email"])
class ChangePasswordForm(UserForm):
oldpassword = forms.CharField(label=_("Current Password"), widget=forms.PasswordInput(render_value=False))
password1 = forms.CharField(label=_("New Password"), widget=forms.PasswordInput(render_value=False))
password2 = forms.CharField(label=_("New Password (again)"), widget=forms.PasswordInput(render_value=False))
def clean_oldpassword(self):
if not self.user.check_password(self.cleaned_data.get("oldpassword")):
raise forms.ValidationError(_("Please type your current password."))
return self.cleaned_data["oldpassword"]
def clean_password2(self):
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data["password2"]
def save(self):
self.user.set_password(self.cleaned_data['password1'])
self.user.save()
self.user.message_set.create(message=ugettext(u"Password successfully changed."))
class SetPasswordForm(UserForm):
password1 = forms.CharField(label=_("Password"), widget=forms.PasswordInput(render_value=False))
password2 = forms.CharField(label=_("Password (again)"), widget=forms.PasswordInput(render_value=False))
def clean_password2(self):
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data["password2"]
def save(self):
self.user.set_password(self.cleaned_data["password1"])
self.user.save()
self.user.message_set.create(message=ugettext(u"Password successfully set."))
class ResetPasswordForm(forms.Form):
email = forms.EmailField(label=_("Email"), required=True, widget=forms.TextInput(attrs={'size':'30'}))
def clean_email(self):
if EmailAddress.objects.filter(email__iexact=self.cleaned_data["email"], verified=True).count() == 0:
raise forms.ValidationError(_("Email address not verified for any user account"))
return self.cleaned_data["email"]
def save(self):
for user in User.objects.filter(email__iexact=self.cleaned_data["email"]):
temp_key = sha_constructor("%s%s%s" % (
settings.SECRET_KEY,
user.email,
settings.SECRET_KEY,
)).hexdigest()
# save it to the password reset model
password_reset = PasswordReset(user=user, temp_key=temp_key)
password_reset.save()
current_site = Site.objects.get_current()
domain = unicode(current_site.domain)
#send the password reset email
subject = _("Password reset email sent")
message = render_to_string("account/password_reset_key_message.txt", {
"user": user,
"temp_key": temp_key,
"domain": domain,
})
send_mail(subject, message, settings.DEFAULT_FROM_EMAIL, [user.email], priority="high")
return self.cleaned_data["email"]
class ResetPasswordKeyForm(forms.Form):
password1 = forms.CharField(label=_("New Password"), widget=forms.PasswordInput(render_value=False))
password2 = forms.CharField(label=_("New Password (again)"), widget=forms.PasswordInput(render_value=False))
temp_key = forms.CharField(widget=forms.HiddenInput)
def clean_temp_key(self):
temp_key = self.cleaned_data.get("temp_key")
if not PasswordReset.objects.filter(temp_key=temp_key, reset=False).count() == 1:
raise forms.ValidationError(_("Temporary key is invalid."))
return temp_key
def clean_password2(self):
if "password1" in self.cleaned_data and "password2" in self.cleaned_data:
if self.cleaned_data["password1"] != self.cleaned_data["password2"]:
raise forms.ValidationError(_("You must type the same password each time."))
return self.cleaned_data["password2"]
def save(self):
# get the password_reset object
temp_key = self.cleaned_data.get("temp_key")
password_reset = PasswordReset.objects.get(temp_key__exact=temp_key)
# now set the new user password
user = User.objects.get(passwordreset__exact=password_reset)
user.set_password(self.cleaned_data["password1"])
user.save()
user.message_set.create(message=ugettext(u"Password successfully changed."))
# change all the password reset records to this person to be true.
for password_reset in PasswordReset.objects.filter(user=user):
password_reset.reset = True
password_reset.save()
class ChangeTimezoneForm(AccountForm):
timezone = TimeZoneField(label=_("Timezone"), required=True)
def __init__(self, *args, **kwargs):
super(ChangeTimezoneForm, self).__init__(*args, **kwargs)
self.initial.update({"timezone": self.account.timezone})
def save(self):
self.account.timezone = self.cleaned_data["timezone"]
self.account.save()
self.user.message_set.create(message=ugettext(u"Timezone successfully updated."))
class ChangeLanguageForm(AccountForm):
language = forms.ChoiceField(label=_("Language"), required=True, choices=settings.LANGUAGES)
def __init__(self, *args, **kwargs):
super(ChangeLanguageForm, self).__init__(*args, **kwargs)
self.initial.update({"language": self.account.language})
def save(self):
self.account.language = self.cleaned_data["language"]
self.account.save()
self.user.message_set.create(message=ugettext(u"Language successfully updated."))
# @@@ these should somehow be moved out of account or at least out of this module
from account.models import OtherServiceInfo, other_service, update_other_services
class TwitterForm(UserForm):
username = forms.CharField(label=_("Username"), required=True)
password = forms.CharField(label=_("Password"), required=True,
widget=forms.PasswordInput(render_value=False))
def __init__(self, *args, **kwargs):
super(TwitterForm, self).__init__(*args, **kwargs)
self.initial.update({"username": other_service(self.user, "twitter_user")})
def save(self):
from microblogging.utils import get_twitter_password
update_other_services(self.user,
twitter_user = self.cleaned_data['username'],
twitter_password = get_twitter_password(settings.SECRET_KEY, self.cleaned_data['password']),
)
self.user.message_set.create(message=ugettext(u"Successfully authenticated."))
|
|
# curio/test/kernel.py
import unittest
import time
from ..import *
# ---- Basic Kernel Functionality
class TestKernel(unittest.TestCase):
def test_hello(self):
kernel = get_kernel()
results = []
async def hello():
results.append('hello')
kernel.add_task(hello())
kernel.run()
self.assertEqual(results, [
'hello'
])
def test_sleep(self):
kernel = get_kernel()
results = []
async def main():
results.append('start')
await sleep(0.5)
results.append('end')
kernel.add_task(main())
start = time.time()
kernel.run()
end = time.time()
self.assertEqual(results, [
'start',
'end',
])
self.assertTrue((end-start) > 0.5)
def test_sleep_cancel(self):
kernel = get_kernel()
results = []
async def sleeper():
results.append('start')
try:
await sleep(1)
results.append('not here')
except CancelledError:
results.append('cancelled')
async def main():
task = await new_task(sleeper())
await sleep(0.5)
await task.cancel()
kernel.add_task(main())
kernel.run()
self.assertEqual(results, [
'start',
'cancelled',
])
def test_task_join(self):
kernel = get_kernel()
results = []
async def child():
results.append('start')
await sleep(0.5)
results.append('end')
return 37
async def main():
task = await new_task(child())
await sleep(0.1)
results.append('joining')
r = await task.join()
results.append(r)
kernel.add_task(main())
kernel.run()
self.assertEqual(results, [
'start',
'joining',
'end',
37
])
def test_task_join_error(self):
kernel = get_kernel()
results = []
async def child():
results.append('start')
int('bad')
async def main():
task = await new_task(child())
await sleep(0.1)
results.append('joining')
try:
r = await task.join()
results.append(r)
except TaskError as e:
results.append('task fail')
results.append(type(e))
results.append(type(e.__cause__))
kernel.add_task(main())
kernel.run(log_errors=False)
self.assertEqual(results, [
'start',
'joining',
'task fail',
TaskError,
ValueError,
])
def test_task_cancel(self):
kernel = get_kernel()
results = []
async def child():
results.append('start')
try:
await sleep(0.5)
results.append('end')
except CancelledError:
results.append('cancelled')
async def main():
task = await new_task(child())
results.append('cancel start')
await sleep(0.1)
results.append('cancelling')
await task.cancel()
results.append('done')
kernel.add_task(main())
kernel.run()
self.assertEqual(results, [
'start',
'cancel start',
'cancelling',
'cancelled',
'done',
])
def test_task_child_cancel(self):
kernel = get_kernel()
results = []
async def child():
results.append('start')
try:
await sleep(0.5)
results.append('end')
except CancelledError:
results.append('child cancelled')
async def parent():
try:
child_task = await new_task(child())
await sleep(0.5)
results.append('end parent')
except CancelledError:
await child_task.cancel()
results.append('parent cancelled')
async def grandparent():
try:
parent_task = await new_task(parent())
await sleep(0.5)
results.append('end grandparent')
except CancelledError:
await parent_task.cancel()
results.append('grandparent cancelled')
async def main():
task = await new_task(grandparent())
await sleep(0.1)
results.append('cancel start')
await sleep(0.1)
results.append('cancelling')
await task.cancel()
results.append('done')
kernel.add_task(main())
kernel.run()
self.assertEqual(results, [
'start',
'cancel start',
'cancelling',
'child cancelled',
'parent cancelled',
'grandparent cancelled',
'done',
])
def test_task_ready_cancel(self):
# This tests a tricky corner case of a task cancelling another task that's also
# on the ready queue.
kernel = get_kernel()
results = []
async def child():
try:
results.append('child sleep')
await sleep(1.0)
results.append('child slept')
await sleep(1.0)
results.append('should not see this')
except CancelledError:
results.append('child cancelled')
async def parent():
task = await new_task(child())
results.append('parent sleep')
await sleep(0.5)
results.append('cancel start')
await task.cancel()
results.append('cancel done')
async def main():
task = await new_task(parent())
await sleep(0.1)
time.sleep(1) # Forced block of the event loop. Both tasks should awake when we come back
await sleep(0.1)
kernel.add_task(main())
kernel.run()
self.assertEqual(results, [
'child sleep',
'parent sleep',
'cancel start',
'child slept',
'child cancelled',
'cancel done'
])
class TestSignal(unittest.TestCase):
def test_task_signal(self):
import signal, os
kernel = get_kernel()
results = []
async def child():
async with SignalSet(signal.SIGUSR1, signal.SIGUSR2) as sig:
signo = await sig.wait()
results.append(signo)
signo = await sig.wait()
results.append(signo)
async def main():
task = await new_task(child())
await sleep(0.1)
results.append('sending USR1')
os.kill(os.getpid(), signal.SIGUSR1)
await sleep(0.1)
results.append('sending USR2')
os.kill(os.getpid(), signal.SIGUSR2)
await sleep(0.1)
results.append('done')
kernel.add_task(main())
kernel.run()
self.assertEqual(results, [
'sending USR1',
signal.SIGUSR1,
'sending USR2',
signal.SIGUSR2,
'done',
])
def test_task_signal_waitone(self):
import signal, os
kernel = get_kernel()
results = []
async def child():
sig = SignalSet(signal.SIGUSR1, signal.SIGUSR2)
signo = await sig.wait()
results.append(signo)
signo = await sig.wait()
results.append(signo)
async def main():
task = await new_task(child())
await sleep(0.1)
results.append('sending USR1')
os.kill(os.getpid(), signal.SIGUSR1)
await sleep(0.1)
results.append('sending USR2')
os.kill(os.getpid(), signal.SIGUSR2)
await sleep(0.1)
results.append('done')
kernel.add_task(main())
kernel.run()
self.assertEqual(results, [
'sending USR1',
signal.SIGUSR1,
'sending USR2',
signal.SIGUSR2,
'done',
])
def test_task_signal_ignore(self):
import signal, os
kernel = get_kernel()
results = []
async def child():
sig = SignalSet(signal.SIGUSR1, signal.SIGUSR2)
async with sig:
signo = await sig.wait()
results.append(signo)
with sig.ignore():
await sleep(1)
results.append('here')
async def main():
task = await new_task(child())
await sleep(0.1)
results.append('sending USR1')
os.kill(os.getpid(), signal.SIGUSR1)
await sleep(0.5)
results.append('sending USR1')
os.kill(os.getpid(), signal.SIGUSR1)
await sleep(0.1)
await task.join()
results.append('done')
kernel.add_task(main())
kernel.run()
self.assertEqual(results, [
'sending USR1',
signal.SIGUSR1,
'sending USR1',
'here',
'done',
])
def test_task_signal_timeout(self):
import signal, os
kernel = get_kernel()
results = []
async def child():
async with SignalSet(signal.SIGUSR1, signal.SIGUSR2) as sig:
try:
signo = await sig.wait(timeout=0.5)
results.append(signo)
except TimeoutError:
results.append('timeout')
async def main():
task = await new_task(child())
await task.join()
results.append('done')
kernel.add_task(main())
kernel.run()
self.assertEqual(results, [
'timeout',
'done',
])
if __name__ == '__main__':
unittest.main()
|
|
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from tempest.lib.services.volume.v3 import types_client
from tempest.tests.lib import fake_auth_provider
from tempest.tests.lib.services import base
class TestTypesClient(base.BaseServiceTest):
FAKE_CREATE_VOLUME_TYPE = {
'volume_type': {
'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
'name': 'vol-type-001',
'description': 'volume type 0001',
'is_public': True,
'os-volume-type-access:is_public': True,
'extra_specs': {
'volume_backend_name': 'rbd'
}
}
}
FAKE_DEFAULT_VOLUME_TYPE_INFO = {
'volume_type': {
'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
'qos_specs_id': None,
'name': 'volume-type-test',
'description': 'default volume type',
'is_public': True,
'os-volume-type-access:is_public': True,
'extra_specs': {
'volume_backend_name': 'rbd'
}
}
}
FAKE_UPDATE_VOLUME_TYPE = {
'volume_type': {
'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
'name': 'volume-type-test',
'description': 'default volume type',
'is_public': True,
'extra_specs': {
'volume_backend_name': 'rbd'
}
}
}
FAKE_VOLUME_TYPES = {
'volume_types': [
{
'name': 'volume_type01',
'qos_specs_id': None,
'extra_specs': {
'volume_backend_name': 'lvmdriver-1'
},
'os-volume-type-access:is_public': True,
'is_public': True,
'id': '6685584b-1eac-4da6-b5c3-555430cf68ff',
'description': None
},
{
'name': 'volume_type02',
'qos_specs_id': None,
'extra_specs': {
'volume_backend_name': 'lvmdriver-1'
},
'os-volume-type-access:is_public': True,
'is_public': True,
'id': '8eb69a46-df97-4e41-9586-9a40a7533803',
'description': None
}
]
}
FAKE_VOLUME_TYPE_EXTRA_SPECS = {
'extra_specs': {
'capabilities': 'gpu'
}
}
FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS = {
'capabilities': 'gpu'
}
FAKE_VOLUME_TYPE_ACCESS = {
'volume_type_access': [{
'volume_type_id': '3c67e124-39ad-4ace-a507-8bb7bf510c26',
'project_id': 'f270b245cb11498ca4031deb7e141cfa'
}]
}
def setUp(self):
super(TestTypesClient, self).setUp()
fake_auth = fake_auth_provider.FakeAuthProvider()
self.client = types_client.TypesClient(fake_auth,
'volume',
'regionOne')
def _test_list_volume_types(self, bytes_body=False):
self.check_service_client_function(
self.client.list_volume_types,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_VOLUME_TYPES,
bytes_body)
def _test_show_volume_type(self, bytes_body=False):
self.check_service_client_function(
self.client.show_volume_type,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_DEFAULT_VOLUME_TYPE_INFO,
to_utf=bytes_body,
volume_type_id="6685584b-1eac-4da6-b5c3-555430cf68ff")
def _test_create_volume_type(self, bytes_body=False):
self.check_service_client_function(
self.client.create_volume_type,
'tempest.lib.common.rest_client.RestClient.post',
self.FAKE_CREATE_VOLUME_TYPE,
to_utf=bytes_body,
name='volume-type-test')
def _test_delete_volume_type(self):
self.check_service_client_function(
self.client.delete_volume_type,
'tempest.lib.common.rest_client.RestClient.delete',
{}, status=202,
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
def _test_list_volume_types_extra_specs(self, bytes_body=False):
self.check_service_client_function(
self.client.list_volume_types_extra_specs,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_VOLUME_TYPE_EXTRA_SPECS,
to_utf=bytes_body,
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
def _test_show_volume_type_extra_specs(self, bytes_body=False):
self.check_service_client_function(
self.client.show_volume_type_extra_specs,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS,
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
extra_specs_name='capabilities',
to_utf=bytes_body)
def _test_create_volume_type_extra_specs(self, bytes_body=False):
self.check_service_client_function(
self.client.create_volume_type_extra_specs,
'tempest.lib.common.rest_client.RestClient.post',
self.FAKE_VOLUME_TYPE_EXTRA_SPECS,
volume_type_id="6685584b-1eac-4da6-b5c3-555430cf68ff",
extra_specs=self.FAKE_VOLUME_TYPE_EXTRA_SPECS,
to_utf=bytes_body)
def _test_delete_volume_type_extra_specs(self):
self.check_service_client_function(
self.client.delete_volume_type_extra_specs,
'tempest.lib.common.rest_client.RestClient.delete',
{}, status=202,
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
extra_spec_name='volume_backend_name')
def _test_update_volume_type(self, bytes_body=False):
self.check_service_client_function(
self.client.update_volume_type,
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_UPDATE_VOLUME_TYPE,
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
to_utf=bytes_body,
name='update-volume-type-test',
description='test update volume type description')
def _test_update_volume_type_extra_specs(self, bytes_body=False):
self.check_service_client_function(
self.client.update_volume_type_extra_specs,
'tempest.lib.common.rest_client.RestClient.put',
self.FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS,
extra_spec_name='capabilities',
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff',
extra_specs=self.FAKE_SHOW_VOLUME_TYPE_EXTRA_SPECS,
to_utf=bytes_body)
def _test_add_type_access(self):
self.check_service_client_function(
self.client.add_type_access,
'tempest.lib.common.rest_client.RestClient.post',
{}, status=202,
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
def _test_remove_type_access(self):
self.check_service_client_function(
self.client.remove_type_access,
'tempest.lib.common.rest_client.RestClient.post',
{}, status=202,
volume_type_id='6685584b-1eac-4da6-b5c3-555430cf68ff')
def _test_list_type_access(self, bytes_body=False):
self.check_service_client_function(
self.client.list_type_access,
'tempest.lib.common.rest_client.RestClient.get',
self.FAKE_VOLUME_TYPE_ACCESS,
volume_type_id='3c67e124-39ad-4ace-a507-8bb7bf510c26',
to_utf=bytes_body)
def test_list_volume_types_with_str_body(self):
self._test_list_volume_types()
def test_list_volume_types_with_bytes_body(self):
self._test_list_volume_types(bytes_body=True)
def test_show_volume_type_with_str_body(self):
self._test_show_volume_type()
def test_show_volume_type_with_bytes_body(self):
self._test_show_volume_type(bytes_body=True)
def test_create_volume_type_str_body(self):
self._test_create_volume_type()
def test_create_volume_type_with_bytes_body(self):
self._test_create_volume_type(bytes_body=True)
def test_list_volume_types_extra_specs_with_str_body(self):
self._test_list_volume_types_extra_specs()
def test_list_volume_types_extra_specs_with_bytes_body(self):
self._test_list_volume_types_extra_specs(bytes_body=True)
def test_show_volume_type_extra_specs_with_str_body(self):
self._test_show_volume_type_extra_specs()
def test_show_volume_type_extra_specs_with_bytes_body(self):
self._test_show_volume_type_extra_specs(bytes_body=True)
def test_create_volume_type_extra_specs_with_str_body(self):
self._test_create_volume_type_extra_specs()
def test_create_volume_type_extra_specs_with_bytes_body(self):
self._test_create_volume_type_extra_specs(bytes_body=True)
def test_delete_volume_type_extra_specs(self):
self._test_delete_volume_type_extra_specs()
def test_update_volume_type_with_str_body(self):
self._test_update_volume_type()
def test_update_volume_type_with_bytes_body(self):
self._test_update_volume_type(bytes_body=True)
def test_delete_volume_type(self):
self._test_delete_volume_type()
def test_update_volume_type_extra_specs_with_str_body(self):
self._test_update_volume_type_extra_specs()
def test_update_volume_type_extra_specs_with_bytes_body(self):
self._test_update_volume_type_extra_specs(bytes_body=True)
def test_add_type_access(self):
self._test_add_type_access()
def test_remove_type_access(self):
self._test_remove_type_access()
def test_list_type_access_with_str_body(self):
self._test_list_type_access()
def test_list_type_access_with_bytes_body(self):
self._test_list_type_access(bytes_body=True)
|
|
# Copyright 2018 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
import datetime
import unittest
import json
import os
import re
from faker_schema.faker_schema import FakerSchema
from google.cloud import bigquery as bq
from data_generator.PrettyDataGenerator import DataGenerator, FakeRowGen
class TestPrettyDataGenerator(unittest.TestCase):
"""The test cases are focused on the business logic. In this case this is how we parse the
schemas, generate data and label images.
Execution Note:
This script is stored in professional-services/data-analytics/dataflow-python-examples/tests
but should be copied to professional-services/data-analytics/dataflow-python-examples/ and
run from there.
"""
@classmethod
def setUpClass(cls):
cls._bq_cli = bq.Client()
def setUp(self):
# Note changed default for schema_file for ease of testing.
dir_path = os.path.dirname(os.path.realpath(''))
schema_file = os.path.join(dir_path, 'data-generator-pipeline',
'resources', 'lineorder-schema.json')
self.data_gen = DataGenerator(
bq_schema_filename=schema_file,
p_null=0.0,
n_keys=1000,
min_date='2000-01-01',
max_date=datetime.date.today().strftime('%Y-%m-%d'),
only_pos=True,
max_int=10**11,
max_float=float(10**11),
float_precision=2,
write_disp='WRITE_APPEND',
bq_cli=TestPrettyDataGenerator._bq_cli)
self.fakerowgen = FakeRowGen(self.data_gen)
logging.basicConfig(level=logging.INFO)
def test_get_faker_schema(self):
"""
This tests the get_faker_schema method of the DataGenerator class.
"""
expected_faker_schema = {
'lo_recieptfile':
'file_name', # This tests a field from special_map.
'lo_cust_key': 'word', # The rest of the fields test type_map.
'lo_order_key': 'word',
'lo_ordpriority': 'random_number',
'lo_supp_key': 'word',
'lo_quantity': 'random_number',
'lo_revenue': 'pyfloat',
'lo_orderdate': 'date_this_century',
'lo_extendedprice': 'pyfloat',
'lo_supplycost': 'pyfloat',
'lo_part_key': 'word',
'lo_discount': 'pyfloat',
'lo_shippriority': 'random_number',
'lo_shipmode': 'pyfloat',
'lo_ordtotalprice': 'pyfloat',
'lo_linenumber': 'random_number',
'lo_tax': 'pyfloat',
'lo_record_field': {
'name': 'name',
'email': 'email',
'time_sec': 'random_number',
'tz_offset': 'random_number',
'date': 'date_time_this_century'
}
}
actual_faker_schema = self.data_gen.get_faker_schema()
self.assertDictEqual(actual_faker_schema, expected_faker_schema)
def test_generate_fake(self):
"""
This tests the generate_Fake function of the FakeRowGen class which is called py process
an instance of the DataGenerator class and returns fake json record that abides to the
rules specified in the attributes of the DataGenerator instance. Note that this is a
non-deterministic function so the best we can do is spot-check values obey the rules
for this call for thoroughness we could run the unit test many many times.
"""
faker_schema = self.fakerowgen.data_gen.get_faker_schema()
actual_row = json.loads(self.fakerowgen.generate_fake(faker_schema))
# Check returns a dict representing a single record.
self.assertIsInstance(actual_row, dict)
# # Check the schema is correct.
# self.assertEquals(actual_row.keys(), faker_schema.keys())
# Check the date in range.
self.assertGreaterEqual(
datetime.datetime.strptime(actual_row['lo_orderdate'],
'%Y-%m-%d').date(),
self.data_gen.min_date)
self.assertLessEqual(
datetime.datetime.strptime(actual_row['lo_orderdate'],
'%Y-%m-%d').date(),
self.data_gen.max_date)
# Check the integer is in range.
self.assertLessEqual(actual_row['lo_linenumber'],
self.data_gen.max_int)
# Check the float is in range.
self.assertLessEqual(actual_row['lo_tax'], self.data_gen.max_float)
# Check int strictly positive
self.assertGreaterEqual(actual_row['lo_linenumber'], 0)
# Check float strictly positive
self.assertGreaterEqual(actual_row['lo_tax'], 0.0)
# Check string size was parsed and enforced from description fields of lo_recieptfile.
self.assertLessEqual(len(actual_row['lo_recieptfile']), 10)
# Check if record type nesting worked.
self.assertIsInstance(actual_row['lo_record_field'], list)
def test_get_field_dict(self):
"""
This tests the ability of the FakeRowGen.get_field_dict method to extract a single field
dictionary from a FakeRowGen.data_gen.schema
"""
expected_field_dict = {
'type': 'DATE',
'name': 'lo_orderdate',
'mode': 'NULLABLE'
}
actual_field_dict = self.fakerowgen.get_field_dict(
field_name='lo_orderdate')
self.assertDictEqual(actual_field_dict, expected_field_dict)
expected_record_dict = {
"name":
"lo_record_field",
"type":
"RECORD",
"mode":
"REPEATED",
"fields": [{
"mode": "NULLABLE",
"name": "name",
"type": "STRING"
}, {
"mode": "NULLABLE",
"name": "email",
"type": "STRING"
}, {
"mode": "NULLABLE",
"name": "time_sec",
"type": "INTEGER"
}, {
"mode": "NULLABLE",
"name": "tz_offset",
"type": "INTEGER"
}, {
"mode": "NULLABLE",
"name": "date",
"type": "TIMESTAMP"
}]
}
actual_record_dict = self.fakerowgen.get_field_dict(
field_name='lo_record_field')
def test_sanity_check(self):
fschema = self.data_gen.get_faker_schema()
schema_faker = FakerSchema()
data = schema_faker.generate_fake(fschema, 1) # Generate one record.
# Note at this point data[u'lo_orderdate'] is a datetime.date object while Biguery expects
# a string
self.assertIsInstance(data['lo_orderdate'], datetime.date)
data = self.fakerowgen.sanity_check(record=data,
fieldname='lo_orderdate')
# Check that the date was converted to a string
self.assertIsInstance(data['lo_orderdate'], str)
# Check that the date is in the correct format
_ = datetime.datetime.strptime(data['lo_orderdate'], '%Y-%m-%d')
# Check if sanity check enforces integers < data_args.max_int
data['lo_linenumber'] = 10**12 # Note that max_int is 10**11
data = self.fakerowgen.sanity_check(record=data,
fieldname='lo_linenumber')
self.assertLessEqual(data['lo_linenumber'], self.data_gen.max_int)
data = self.fakerowgen.sanity_check(record=data,
fieldname='lo_record_field')
self.assertIsInstance(data['lo_record_field'], list)
def test_get_skewed_key(self):
"""
This tests the get_skewed_key method of the FakeRowGen class.
"""
uniform_key = self.fakerowgen.get_skewed_key()
self.assertTrue(uniform_key)
self.assertLessEqual(uniform_key, self.data_gen.n_keys)
binomial_key = self.fakerowgen.get_skewed_key(distribution='binomial')
self.assertTrue(uniform_key)
self.assertLessEqual(uniform_key, self.data_gen.n_keys)
zipf_key = self.fakerowgen.get_skewed_key(distribution='zipf')
self.assertTrue(uniform_key)
self.assertLessEqual(uniform_key, self.data_gen.n_keys)
if __name__ == '__main__':
unittest.main()
|
|
import renderdoc as rd
import rdtest
# Not a direct test, re-used by API-specific tests
class Overlay_Test(rdtest.TestCase):
internal = True
def check_capture(self, base_event=0):
out: rd.ReplayOutput = self.controller.CreateOutput(rd.CreateHeadlessWindowingData(100, 100), rd.ReplayOutputType.Texture)
self.check(out is not None)
api: rd.GraphicsAPI = self.controller.GetAPIProperties().pipelineType
# Check the actual output is as expected first.
for is_msaa in [False, True]:
if is_msaa:
test_marker: rd.ActionDescription = self.find_action("MSAA Test", base_event)
else:
test_marker: rd.ActionDescription = self.find_action("Normal Test", base_event)
self.controller.SetFrameEvent(test_marker.next.eventId, True)
pipe: rd.PipeState = self.controller.GetPipelineState()
col_tex: rd.ResourceId = pipe.GetOutputTargets()[0].resourceId
tex = rd.TextureDisplay()
tex.resourceId = col_tex
tex.subresource.sample = 0
# Background around the outside
self.check_pixel_value(col_tex, 0.1, 0.1, [0.2, 0.2, 0.2, 1.0])
self.check_pixel_value(col_tex, 0.8, 0.1, [0.2, 0.2, 0.2, 1.0])
self.check_pixel_value(col_tex, 0.5, 0.95, [0.2, 0.2, 0.2, 1.0])
# Large dark grey triangle
self.check_pixel_value(col_tex, 0.5, 0.1, [0.1, 0.1, 0.1, 1.0])
self.check_pixel_value(col_tex, 0.5, 0.9, [0.1, 0.1, 0.1, 1.0])
self.check_pixel_value(col_tex, 0.2, 0.9, [0.1, 0.1, 0.1, 1.0])
self.check_pixel_value(col_tex, 0.8, 0.9, [0.1, 0.1, 0.1, 1.0])
# Red upper half triangle
self.check_pixel_value(col_tex, 0.3, 0.4, [1.0, 0.0, 0.0, 1.0])
# Blue lower half triangle
self.check_pixel_value(col_tex, 0.3, 0.6, [0.0, 0.0, 1.0, 1.0])
# Floating clipped triangle
self.check_pixel_value(col_tex, 335, 140, [0.0, 0.0, 0.0, 1.0])
self.check_pixel_value(col_tex, 340, 140, [0.2, 0.2, 0.2, 1.0])
# Triangle size triangles
self.check_pixel_value(col_tex, 200, 51, [1.0, 0.5, 1.0, 1.0])
self.check_pixel_value(col_tex, 200, 65, [1.0, 1.0, 0.0, 1.0])
self.check_pixel_value(col_tex, 200, 79, [0.0, 1.0, 1.0, 1.0])
self.check_pixel_value(col_tex, 200, 93, [0.0, 1.0, 0.0, 1.0])
for overlay in rd.DebugOverlay:
if overlay == rd.DebugOverlay.NoOverlay:
continue
# These overlays are just displaymodes really, not actually separate overlays
if overlay == rd.DebugOverlay.NaN or overlay == rd.DebugOverlay.Clipping:
continue
# We'll test the clear-before-X overlays seperately, for both colour and depth
if overlay == rd.DebugOverlay.ClearBeforeDraw or overlay == rd.DebugOverlay.ClearBeforePass:
continue
rdtest.log.print("Checking overlay {} in {} main action".format("MSAA" if is_msaa else "normal", str(overlay)))
tex.overlay = overlay
out.SetTextureDisplay(tex)
out.Display()
eps = 1.0 / 256.0
overlay_id: rd.ResourceId = out.GetDebugOverlayTexID()
# We test a few key spots:
# 4 points along the left side of the big triangle, above/in/below its intersection with the tri behind
# 4 points outside all triangles
# The overlap between the big tri and the bottom tri, and between it and the right backface culled tri
# The bottom tri's part that sticks out
# The two parts of the backface culled tri that stick out
# The depth clipped tri, in and out of clipping
# The 4 triangle size test triangles
if overlay == rd.DebugOverlay.Drawcall:
self.check_pixel_value(overlay_id, 150, 90, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 130, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 160, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 200, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 60, [0.0, 0.0, 0.0, 0.5], eps=eps)
self.check_pixel_value(overlay_id, 125, 250, [0.0, 0.0, 0.0, 0.5], eps=eps)
self.check_pixel_value(overlay_id, 250, 60, [0.0, 0.0, 0.0, 0.5], eps=eps)
self.check_pixel_value(overlay_id, 250, 250, [0.0, 0.0, 0.0, 0.5], eps=eps)
self.check_pixel_value(overlay_id, 220, 175, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 150, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 190, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 285, 135, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 285, 165, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 330, 145, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 340, 145, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 51, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 65, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 79, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 93, [0.8, 0.1, 0.8, 1.0], eps=eps)
elif overlay == rd.DebugOverlay.Wireframe:
# Wireframe we only test a limited set to avoid hitting implementation variations of line raster
# We also have to fudge a little because the lines might land on adjacent pixels
# Also to be safe we don't run this test on MSAA
if not is_msaa:
x = 142
picked: rd.PixelValue = self.controller.PickPixel(overlay_id, x, 150, rd.Subresource(), rd.CompType.Typeless)
if picked.floatValue[3] == 0.0:
x = 141
picked: rd.PixelValue = self.controller.PickPixel(overlay_id, x, 150, rd.Subresource(), rd.CompType.Typeless)
self.check_pixel_value(overlay_id, x, 90, [200.0/255.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, x, 130, [200.0/255.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, x, 160, [200.0/255.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, x, 200, [200.0/255.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 60, [200.0/255.0, 1.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 250, [200.0/255.0, 1.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 60, [200.0/255.0, 1.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 250, [200.0/255.0, 1.0, 0.0, 0.0], eps=eps)
y = 149
picked: rd.PixelValue = self.controller.PickPixel(overlay_id, 325, y, rd.Subresource(), rd.CompType.Typeless)
if picked.floatValue[3] == 0.0:
y = 150
self.check_pixel_value(overlay_id, 325, y, [200.0/255.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 340, y, [200.0/255.0, 1.0, 0.0, 1.0], eps=eps)
elif overlay == rd.DebugOverlay.Depth:
self.check_pixel_value(overlay_id, 150, 90, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 130, [0.0, 1.0, 0.0, 1.0], eps=eps)
# Intersection with lesser depth - depth fail
self.check_pixel_value(overlay_id, 150, 160, [1.0, 0.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 200, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 60, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 250, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 60, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 250, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 175, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 150, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 190, [0.0, 1.0, 0.0, 1.0], eps=eps)
# Backface culled triangle
self.check_pixel_value(overlay_id, 285, 135, [1.0, 0.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 285, 165, [1.0, 0.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 330, 145, [0.0, 1.0, 0.0, 1.0], eps=eps)
# Depth clipped part of tri
self.check_pixel_value(overlay_id, 340, 145, [1.0, 0.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 51, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 65, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 79, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 93, [0.0, 1.0, 0.0, 1.0], eps=eps)
elif overlay == rd.DebugOverlay.Stencil:
self.check_pixel_value(overlay_id, 150, 90, [0.0, 1.0, 0.0, 1.0], eps=eps)
# Intersection with different stencil - stencil fail
self.check_pixel_value(overlay_id, 150, 130, [1.0, 0.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 160, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 200, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 60, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 250, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 60, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 250, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 175, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 150, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 190, [0.0, 1.0, 0.0, 1.0], eps=eps)
# Backface culled triangle
self.check_pixel_value(overlay_id, 285, 135, [1.0, 0.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 285, 165, [1.0, 0.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 330, 145, [0.0, 1.0, 0.0, 1.0], eps=eps)
# Depth clipped part of tri
self.check_pixel_value(overlay_id, 340, 145, [1.0, 0.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 51, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 65, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 79, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 93, [0.0, 1.0, 0.0, 1.0], eps=eps)
elif overlay == rd.DebugOverlay.BackfaceCull:
self.check_pixel_value(overlay_id, 150, 90, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 130, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 160, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 200, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 60, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 250, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 60, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 250, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 175, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 150, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 190, [0.0, 1.0, 0.0, 1.0], eps=eps)
# Backface culled triangle
self.check_pixel_value(overlay_id, 285, 135, [1.0, 0.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 285, 165, [1.0, 0.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 330, 145, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 340, 145, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 51, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 65, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 79, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 93, [0.0, 1.0, 0.0, 1.0], eps=eps)
elif overlay == rd.DebugOverlay.ViewportScissor:
# Inside viewport
self.check_pixel_value(overlay_id, 50, 50, [0.2 * 0.4, 0.2 * 0.4, 0.9 * 0.4, 0.4 * 0.4], eps=eps)
self.check_pixel_value(overlay_id, 350, 50, [0.2 * 0.4, 0.2 * 0.4, 0.9 * 0.4, 0.4 * 0.4], eps=eps)
self.check_pixel_value(overlay_id, 50, 250, [0.2 * 0.4, 0.2 * 0.4, 0.9 * 0.4, 0.4 * 0.4], eps=eps)
self.check_pixel_value(overlay_id, 350, 250, [0.2 * 0.4, 0.2 * 0.4, 0.9 * 0.4, 0.4 * 0.4], eps=eps)
# Passing triangle inside the viewport
self.check_pixel_value(overlay_id, 200, 150,
[0.2 * 0.4, 1.0 * 0.6 + 0.2 * 0.4, 0.9 * 0.4, 1.0 * 0.6 + 0.4 * 0.4], eps=eps)
# Viewport border
self.check_pixel_value(overlay_id, 12, 12, [0.1, 0.1, 0.1, 1.0], eps=eps)
# Outside viewport (not on scissor border)
self.check_pixel_value(overlay_id, 6, 6, [0.0, 0.0, 0.0, 0.0], eps=eps)
# Scissor border
self.check_pixel_value(overlay_id, 0, 0, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 20, 0, [0.0, 0.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 40, 0, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 60, 0, [0.0, 0.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 60, 0, [0.0, 0.0, 0.0, 1.0], eps=eps)
elif overlay == rd.DebugOverlay.QuadOverdrawDraw:
# This would require extreme command buffer patching to de-MSAA the framebuffer and renderpass
if api == rd.GraphicsAPI.Vulkan and is_msaa:
rdtest.log.print("Quad overdrawnot currently supported on MSAA on Vulkan")
continue
self.check_pixel_value(overlay_id, 150, 90, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 130, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 160, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 200, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 60, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 250, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 60, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 250, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 175, [2.0, 2.0, 2.0, 2.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 150, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 190, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 285, 135, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 285, 165, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 330, 145, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 340, 145, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 51, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 65, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 79, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 93, [1.0, 1.0, 1.0, 1.0], eps=eps)
elif overlay == rd.DebugOverlay.QuadOverdrawPass:
# This would require extreme command buffer patching to de-MSAA the framebuffer and renderpass
if api == rd.GraphicsAPI.Vulkan and is_msaa:
rdtest.log.print("Quad overdraw not currently supported on MSAA on Vulkan")
continue
self.check_pixel_value(overlay_id, 150, 90, [1.0, 1.0, 1.0, 1.0], eps=eps)
# Do an extra tap here where we overlap with the extreme-background largest triangle, to show that
# overdraw
self.check_pixel_value(overlay_id, 150, 100, [2.0, 2.0, 2.0, 2.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 130, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 160, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 200, [2.0, 2.0, 2.0, 2.0], eps=eps)
# Two of these have overdraw from the pass due to the large background triangle
self.check_pixel_value(overlay_id, 125, 60, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 250, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 60, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 250, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 175, [3.0, 3.0, 3.0, 3.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 150, [2.0, 2.0, 2.0, 2.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 190, [2.0, 2.0, 2.0, 2.0], eps=eps)
self.check_pixel_value(overlay_id, 285, 135, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 285, 165, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 330, 145, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 340, 145, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 51, [2.0, 2.0, 2.0, 2.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 65, [2.0, 2.0, 2.0, 2.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 79, [2.0, 2.0, 2.0, 2.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 93, [2.0, 2.0, 2.0, 2.0], eps=eps)
elif overlay == rd.DebugOverlay.TriangleSizeDraw:
eps = 1.0
self.check_pixel_value(overlay_id, 150, 90, [10632.0, 10632.0, 10632.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 130, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 160, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 200, [10632.0, 10632.0, 10632.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 60, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 250, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 60, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 250, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 175, [2128.0, 2128.0, 2128.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 150, [10632.0, 10632.0, 10632.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 190, [2128.0, 2128.0, 2128.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 285, 135, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 285, 165, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 330, 145, [531.0, 531.0, 531.0, 531.0], eps=eps)
self.check_pixel_value(overlay_id, 340, 145, [0.0, 0.0, 0.0, 0.0], eps=eps)
eps = 0.01
self.check_pixel_value(overlay_id, 200, 51, [8.305, 8.305, 8.305, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 65, [5.316, 5.316, 5.316, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 79, [3.0, 3.0, 3.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 93, [1.33, 1.33, 1.33, 1.0], eps=eps)
elif overlay == rd.DebugOverlay.TriangleSizePass:
eps = 1.0
self.check_pixel_value(overlay_id, 150, 90, [10632.0, 10632.0, 10632.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 130, [3324.0, 3324.0, 3324.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 160, [3324.0, 3324.0, 3324.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 150, 200, [10632.0, 10632.0, 10632.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 60, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 125, 250, [43072.0, 43072.0, 43072.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 60, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 250, [43072.0, 43072.0, 43072.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 175, [2128.0, 2128.0, 2128.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 250, 150, [10632.0, 10632.0, 10632.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 220, 190, [2128.0, 2128.0, 2128.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 285, 135, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 285, 165, [43072.0, 43072.0, 43072.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 330, 145, [531.0, 531.0, 531.0, 531.0], eps=eps)
self.check_pixel_value(overlay_id, 340, 145, [0.0, 0.0, 0.0, 0.0], eps=eps)
eps = 0.01
self.check_pixel_value(overlay_id, 200, 51, [8.305, 8.305, 8.305, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 65, [5.316, 5.316, 5.316, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 79, [3.0, 3.0, 3.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 93, [1.33, 1.33, 1.33, 1.0], eps=eps)
rdtest.log.success("Picked pixels are as expected for {}".format(str(overlay)))
if is_msaa:
rdtest.log.success("All MSAA overlays are as expected")
else:
rdtest.log.success("All normal overlays are as expected")
# Check the viewport overlay especially
view_marker: rd.ActionDescription = self.find_action("Viewport Test", base_event)
self.controller.SetFrameEvent(view_marker.next.eventId, True)
pipe: rd.PipeState = self.controller.GetPipelineState()
col_tex: rd.ResourceId = pipe.GetOutputTargets()[0].resourceId
for overlay in rd.DebugOverlay:
if overlay == rd.DebugOverlay.NoOverlay:
continue
# These overlays are just displaymodes really, not actually separate overlays
if overlay == rd.DebugOverlay.NaN or overlay == rd.DebugOverlay.Clipping:
continue
# We'll test the clear-before-X overlays seperately, for both colour and depth
if overlay == rd.DebugOverlay.ClearBeforeDraw or overlay == rd.DebugOverlay.ClearBeforePass:
continue
rdtest.log.print("Checking overlay {} in viewport action".format(str(overlay)))
tex.resourceId = col_tex
tex.overlay = overlay
out.SetTextureDisplay(tex)
out.Display()
eps = 1.0 / 256.0
overlay_id: rd.ResourceId = out.GetDebugOverlayTexID()
save_data = rd.TextureSave()
save_data.resourceId = overlay_id
save_data.destType = rd.FileType.PNG
self.controller.SaveTexture(save_data, rdtest.get_tmp_path('overlay.png'))
if overlay == rd.DebugOverlay.Drawcall:
# The action overlay will show up outside the scissor region
self.check_pixel_value(overlay_id, 50, 85, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 50, 50, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 50, 10, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 85, 85, [0.8, 0.1, 0.8, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 50, 5, [0.0, 0.0, 0.0, 0.5], eps=eps)
self.check_pixel_value(overlay_id, 95, 85, [0.0, 0.0, 0.0, 0.5], eps=eps)
self.check_pixel_value(overlay_id, 80, 30, [0.0, 0.0, 0.0, 0.5], eps=eps)
elif overlay == rd.DebugOverlay.Wireframe:
# Wireframe we only test a limited set to avoid hitting implementation variations of line raster
# We also have to fudge a little because the lines might land on adjacent pixels
found = False
for delta in range(0, 5):
try:
self.check_pixel_value(overlay_id, 30 + delta, 32, [200.0 / 255.0, 1.0, 0.0, 1.0], eps=eps)
found = True
break
except rdtest.TestFailureException:
pass
if not found:
raise rdtest.TestFailureException("Couldn't find wireframe within scissor")
found = False
for delta in range(0, 5):
try:
self.check_pixel_value(overlay_id, 34 + delta, 22, [200.0 / 255.0, 1.0, 0.0, 1.0], eps=eps)
found = True
break
except rdtest.TestFailureException:
pass
if found:
raise rdtest.TestFailureException("Found wireframe outside of scissor")
elif overlay == rd.DebugOverlay.Depth or overlay == rd.DebugOverlay.Stencil or overlay == rd.DebugOverlay.BackfaceCull:
self.check_pixel_value(overlay_id, 50, 25, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 50, 75, [0.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 50, 20, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 50, 80, [0.0, 0.0, 0.0, 0.0], eps=eps)
elif overlay == rd.DebugOverlay.ViewportScissor:
# Inside viewport and scissor, passing triangle
self.check_pixel_value(overlay_id, 50, 50,
[0.2 * 0.4, 1.0 * 0.6 + 0.2 * 0.4, 0.9 * 0.4, 1.0 * 0.6 + 0.4 * 0.4], eps=eps)
# Inside viewport and outside scissor
self.check_pixel_value(overlay_id, 50, 80,
[1.0 * 0.6 + 0.2 * 0.4, 0.2 * 0.4, 0.9 * 0.4, 1.0 * 0.6 + 0.4 * 0.4], eps=eps)
elif overlay == rd.DebugOverlay.QuadOverdrawDraw:
self.check_pixel_value(overlay_id, 50, 50, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 50, 15, [0.0, 0.0, 0.0, 0.0], eps=eps)
elif overlay == rd.DebugOverlay.QuadOverdrawPass:
self.check_pixel_value(overlay_id, 50, 50, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 50, 15, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 270, [1.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 280, [0.0, 0.0, 0.0, 0.0], eps=eps)
elif overlay == rd.DebugOverlay.TriangleSizeDraw:
eps = 1.0
self.check_pixel_value(overlay_id, 50, 50, [5408.0, 5408.0, 5408.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 50, 15, [0.0, 0.0, 0.0, 0.0], eps=eps)
elif overlay == rd.DebugOverlay.TriangleSizePass:
eps = 1.0
self.check_pixel_value(overlay_id, 50, 50, [5408.0, 5408.0, 5408.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 50, 15, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 270, [43072.0, 43072.0, 43072.0, 1.0], eps=eps)
self.check_pixel_value(overlay_id, 200, 280, [0.0, 0.0, 0.0, 0.0], eps=eps)
rdtest.log.success("Picked pixels are as expected for {}".format(str(overlay)))
rdtest.log.success("Overlays are as expected around viewport/scissor behaviour")
test_marker: rd.ActionDescription = self.find_action("Normal Test", base_event)
# Now check clear-before-X by hand, for colour and for depth
self.controller.SetFrameEvent(test_marker.next.eventId, True)
depth_tex: rd.ResourceId = pipe.GetDepthTarget().resourceId
eps = 1.0/256.0
# Check colour and depth before-hand
self.check_pixel_value(col_tex, 250, 250, [0.1, 0.1, 0.1, 1.0], eps=eps)
self.check_pixel_value(col_tex, 125, 125, [1.0, 0.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(col_tex, 125, 175, [0.0, 0.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(col_tex, 50, 50, [0.2, 0.2, 0.2, 1.0], eps=eps)
self.check_pixel_value(col_tex, 291, 150, [0.977, 0.977, 0.977, 1.0], eps=0.075)
self.check_pixel_value(col_tex, 200, 51, [1.0, 0.5, 1.0, 1.0], eps=eps)
self.check_pixel_value(col_tex, 200, 65, [1.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(col_tex, 200, 79, [0.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(col_tex, 200, 93, [0.0, 1.0, 0.0, 1.0], eps=eps)
eps = 0.001
self.check_pixel_value(depth_tex, 160, 135, [0.9, 85.0/255.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(depth_tex, 160, 165, [0.0, 0.0/255.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(depth_tex, 250, 150, [0.5, 85.0/255.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(depth_tex, 250, 250, [0.95, 0.0/255.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(depth_tex, 50, 50, [1.0, 0.0/255.0, 0.0, 1.0], eps=eps)
rdtest.log.success("Colour and depth at end are correct")
# Check clear before pass
tex.resourceId = col_tex
tex.overlay = rd.DebugOverlay.ClearBeforePass
out.SetTextureDisplay(tex)
out.Display()
eps = 1.0/256.0
self.check_pixel_value(col_tex, 250, 250, [0.1, 0.1, 0.1, 1.0], eps=eps)
self.check_pixel_value(col_tex, 125, 125, [1.0, 0.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(col_tex, 125, 175, [0.0, 0.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(col_tex, 50, 50, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(col_tex, 291, 150, [0.977, 0.977, 0.977, 1.0], eps=0.075)
self.check_pixel_value(col_tex, 200, 51, [1.0, 0.5, 1.0, 1.0], eps=eps)
self.check_pixel_value(col_tex, 200, 65, [1.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(col_tex, 200, 79, [0.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(col_tex, 200, 93, [0.0, 1.0, 0.0, 1.0], eps=eps)
tex.resourceId = depth_tex
tex.overlay = rd.DebugOverlay.ClearBeforePass
out.SetTextureDisplay(tex)
out.Display()
eps = 0.001
self.check_pixel_value(depth_tex, 160, 135, [0.9, 85.0/255.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(depth_tex, 160, 165, [0.0, 0.0/255.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(depth_tex, 250, 150, [0.5, 85.0/255.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(depth_tex, 250, 250, [0.95, 0.0/255.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(depth_tex, 50, 50, [1.0, 0.0/255.0, 0.0, 1.0], eps=eps)
rdtest.log.success("Clear before pass colour and depth values as expected")
# Check clear before action
tex.resourceId = col_tex
tex.overlay = rd.DebugOverlay.ClearBeforeDraw
out.SetTextureDisplay(tex)
out.Display()
eps = 1.0/256.0
# These are all pass triangles, should be cleared
self.check_pixel_value(col_tex, 250, 250, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(col_tex, 125, 125, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(col_tex, 125, 175, [0.0, 0.0, 0.0, 0.0], eps=eps)
self.check_pixel_value(col_tex, 50, 50, [0.0, 0.0, 0.0, 0.0], eps=eps)
# These should be identical
self.check_pixel_value(col_tex, 291, 150, [0.977, 0.977, 0.977, 1.0], eps=0.075)
self.check_pixel_value(col_tex, 200, 51, [1.0, 0.5, 1.0, 1.0], eps=eps)
self.check_pixel_value(col_tex, 200, 65, [1.0, 1.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(col_tex, 200, 79, [0.0, 1.0, 1.0, 1.0], eps=eps)
self.check_pixel_value(col_tex, 200, 93, [0.0, 1.0, 0.0, 1.0], eps=eps)
tex.resourceId = depth_tex
tex.overlay = rd.DebugOverlay.ClearBeforeDraw
out.SetTextureDisplay(tex)
out.Display()
eps = 0.001
# Without the pass, depth/stencil results are different
self.check_pixel_value(depth_tex, 160, 135, [0.5, 85.0/255.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(depth_tex, 160, 165, [0.5, 85.0/255.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(depth_tex, 250, 150, [0.5, 85.0/255.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(depth_tex, 250, 250, [1.0, 0.0/255.0, 0.0, 1.0], eps=eps)
self.check_pixel_value(depth_tex, 50, 50, [1.0, 0.0/255.0, 0.0, 1.0], eps=eps)
rdtest.log.success("Clear before action colour and depth values as expected")
rdtest.log.success("All overlays as expected for main action")
# Now test overlays on a render-to-slice/mip case
for mip in [2, 3]:
sub_marker: rd.ActionDescription = self.find_action("Subresources mip {}".format(mip), base_event)
self.controller.SetFrameEvent(sub_marker.next.eventId, True)
pipe: rd.PipeState = self.controller.GetPipelineState()
col_tex = pipe.GetOutputTargets()[0].resourceId
sub = rd.Subresource(pipe.GetOutputTargets()[0].firstMip, pipe.GetOutputTargets()[0].firstSlice, 0)
for overlay in rd.DebugOverlay:
if overlay == rd.DebugOverlay.NoOverlay:
continue
# These overlays are just displaymodes really, not actually separate overlays
if overlay == rd.DebugOverlay.NaN or overlay == rd.DebugOverlay.Clipping:
continue
if overlay == rd.DebugOverlay.ClearBeforeDraw or overlay == rd.DebugOverlay.ClearBeforePass:
continue
rdtest.log.print("Checking overlay {} with mip/slice rendering".format(str(overlay)))
tex.resourceId = col_tex
tex.overlay = overlay
tex.subresource = sub
out.SetTextureDisplay(tex)
out.Display()
overlay_id: rd.ResourceId = out.GetDebugOverlayTexID()
shift = 0
if mip == 3:
shift = 1
# All values in mip 0 should be 0 for all overlays
self.check_pixel_value(overlay_id, 200 >> shift, 150 >> shift, [0.0, 0.0, 0.0, 0.0], sub=rd.Subresource(0, 0, 0))
self.check_pixel_value(overlay_id, 197 >> shift, 147 >> shift, [0.0, 0.0, 0.0, 0.0], sub=rd.Subresource(0, 0, 0))
self.check_pixel_value(overlay_id, 203 >> shift, 153 >> shift, [0.0, 0.0, 0.0, 0.0], sub=rd.Subresource(0, 0, 0))
# Also for array slice 0 on this mip
self.check_pixel_value(overlay_id, 200 >> shift, 150 >> shift, [0.0, 0.0, 0.0, 0.0], sub=rd.Subresource(mip, 0, 0))
self.check_pixel_value(overlay_id, 197 >> shift, 147 >> shift, [0.0, 0.0, 0.0, 0.0], sub=rd.Subresource(mip, 0, 0))
self.check_pixel_value(overlay_id, 203 >> shift, 153 >> shift, [0.0, 0.0, 0.0, 0.0], sub=rd.Subresource(mip, 0, 0))
rdtest.log.success("Other mips are empty as expected for overlay {}".format(str(overlay)))
if overlay == rd.DebugOverlay.Drawcall:
self.check_pixel_value(overlay_id, 50 >> shift, 36 >> shift, [0.8, 0.1, 0.8, 1.0], sub=sub, eps=eps)
self.check_pixel_value(overlay_id, 30 >> shift, 36 >> shift, [0.0, 0.0, 0.0, 0.5], sub=sub, eps=eps)
self.check_pixel_value(overlay_id, 70 >> shift, 34 >> shift, [0.8, 0.1, 0.8, 1.0], sub=sub, eps=eps)
self.check_pixel_value(overlay_id, 70 >> shift, 20 >> shift, [0.0, 0.0, 0.0, 0.5], sub=sub, eps=eps)
elif overlay == rd.DebugOverlay.Wireframe:
self.check_pixel_value(overlay_id, 36 >> shift, 36 >> shift, [200.0 / 255.0, 1.0, 0.0, 1.0],
sub=sub, eps=eps)
self.check_pixel_value(overlay_id, 36 >> shift, 50 >> shift, [200.0 / 255.0, 1.0, 0.0, 1.0],
sub=sub, eps=eps)
self.check_pixel_value(overlay_id, 50 >> shift, 36 >> shift, [200.0 / 255.0, 1.0, 0.0, 0.0],
sub=sub, eps=eps)
elif overlay == rd.DebugOverlay.Depth or overlay == rd.DebugOverlay.Stencil:
self.check_pixel_value(overlay_id, 50 >> shift, 36 >> shift, [0.0, 1.0, 0.0, 1.0], sub=sub)
self.check_pixel_value(overlay_id, 30 >> shift, 36 >> shift, [0.0, 0.0, 0.0, 0.0], sub=sub)
self.check_pixel_value(overlay_id, 70 >> shift, 34 >> shift, [1.0, 0.0, 0.0, 1.0], sub=sub)
self.check_pixel_value(overlay_id, 70 >> shift, 20 >> shift, [0.0, 0.0, 0.0, 0.0], sub=sub)
elif overlay == rd.DebugOverlay.BackfaceCull:
self.check_pixel_value(overlay_id, 50 >> shift, 36 >> shift, [0.0, 1.0, 0.0, 1.0], sub=sub)
self.check_pixel_value(overlay_id, 30 >> shift, 36 >> shift, [0.0, 0.0, 0.0, 0.0], sub=sub)
self.check_pixel_value(overlay_id, 70 >> shift, 34 >> shift, [1.0, 0.0, 0.0, 1.0], sub=sub)
self.check_pixel_value(overlay_id, 70 >> shift, 20 >> shift, [0.0, 0.0, 0.0, 0.0], sub=sub)
elif overlay == rd.DebugOverlay.ViewportScissor:
self.check_pixel_value(overlay_id, 20 >> shift, 15 >> shift,
[0.2 * 0.4, 0.2 * 0.4, 0.9 * 0.4, 0.4 * 0.4], sub=sub, eps=eps)
self.check_pixel_value(overlay_id, 80 >> shift, 15 >> shift,
[0.2 * 0.4, 0.2 * 0.4, 0.9 * 0.4, 0.4 * 0.4], sub=sub, eps=eps)
self.check_pixel_value(overlay_id, 20 >> shift, 60 >> shift,
[0.2 * 0.4, 0.2 * 0.4, 0.9 * 0.4, 0.4 * 0.4], sub=sub, eps=eps)
self.check_pixel_value(overlay_id, 80 >> shift, 60 >> shift,
[0.2 * 0.4, 0.2 * 0.4, 0.9 * 0.4, 0.4 * 0.4], sub=sub, eps=eps)
self.check_pixel_value(overlay_id, 50 >> shift, 36 >> shift,
[0.2 * 0.4, 1.0 * 0.6 + 0.2 * 0.4, 0.9 * 0.4, 1.0 * 0.6 + 0.4 * 0.4],
sub=sub, eps=eps)
if mip == 2:
self.check_pixel_value(overlay_id, 6, 6, [0.1, 0.1, 0.1, 1.0], sub=sub, eps=eps)
self.check_pixel_value(overlay_id, 4, 4, [0.0, 0.0, 0.0, 0.0], sub=sub)
self.check_pixel_value(overlay_id, 0, 0, [1.0, 1.0, 1.0, 1.0], sub=sub)
self.check_pixel_value(overlay_id, 20, 0, [0.0, 0.0, 0.0, 1.0], sub=sub)
self.check_pixel_value(overlay_id, 40, 0, [1.0, 1.0, 1.0, 1.0], sub=sub)
self.check_pixel_value(overlay_id, 60, 0, [0.0, 0.0, 0.0, 1.0], sub=sub)
else:
self.check_pixel_value(overlay_id, 4, 4, [0.1, 0.1, 0.1, 1.0], sub=sub, eps=eps)
self.check_pixel_value(overlay_id, 0, 0, [1.0, 1.0, 1.0, 1.0], sub=sub)
self.check_pixel_value(overlay_id, 20, 0, [0.0, 0.0, 0.0, 1.0], sub=sub)
self.check_pixel_value(overlay_id, 40, 0, [1.0, 1.0, 1.0, 1.0], sub=sub)
elif overlay == rd.DebugOverlay.QuadOverdrawDraw or overlay == rd.DebugOverlay.QuadOverdrawPass:
self.check_pixel_value(overlay_id, 50 >> shift, 36 >> shift, [1.0, 1.0, 1.0, 1.0], sub=sub)
self.check_pixel_value(overlay_id, 30 >> shift, 36 >> shift, [0.0, 0.0, 0.0, 0.0], sub=sub)
self.check_pixel_value(overlay_id, 70 >> shift, 20 >> shift, [0.0, 0.0, 0.0, 0.0], sub=sub)
self.check_pixel_value(overlay_id, 50 >> shift, 45 >> shift, [2.0, 2.0, 2.0, 2.0], sub=sub)
elif overlay == rd.DebugOverlay.TriangleSizeDraw or overlay == rd.DebugOverlay.TriangleSizePass:
if mip == 2:
self.check_pixel_value(overlay_id, 50 >> shift, 36 >> shift, [585.0, 585.0, 585.0, 1.0], sub=sub)
else:
self.check_pixel_value(overlay_id, 50 >> shift, 36 >> shift, [151.75, 151.75, 151.75, 1.0], sub=sub)
self.check_pixel_value(overlay_id, 30 >> shift, 36 >> shift, [0.0, 0.0, 0.0, 0.0], sub=sub)
self.check_pixel_value(overlay_id, 70 >> shift, 34 >> shift, [0.0, 0.0, 0.0, 0.0], sub=sub)
self.check_pixel_value(overlay_id, 70 >> shift, 20 >> shift, [0.0, 0.0, 0.0, 0.0], sub=sub)
if mip == 2:
self.check_pixel_value(overlay_id, 50 >> shift, 45 >> shift, [117.0, 117.0, 117.0, 1.0], sub=sub)
else:
self.check_pixel_value(overlay_id, 50 >> shift, 45 >> shift, [30.359375, 30.359375, 30.359375, 1.0], sub=sub)
rdtest.log.success("Picked values are correct for mip {} overlay {}".format(sub.mip, str(overlay)))
out.Shutdown()
|
|
# -*- coding: utf-8 -*-
# Copyright 2010 Alexander Orlov <[email protected]>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from google.appengine.api import memcache
from google.appengine.api import users
from google.appengine.ext import db
from google.appengine.ext import webapp
from google.appengine.ext.webapp import template
from google.appengine.ext.webapp import util
from sol import settings
from google.appengine.api import mail
#from django.conf import settings
##settings.LANGUAGE_CODE = self.request.headers.get('Accept-Language')
#settings._target = None
#settings.LANGUAGE_CODE = 'de'
#from django.conf import settings
#import random
#i = random.choice(['de', 'ru', 'en-us', 'es', 'pt'])
##os.environ['DJANGO_SETTINGS_MODULE'] = 'settings'
#
#settings.configure(LANGUAGE_CODE = i)
class Caching():
@staticmethod
def flush_public_web_cmd_cache(cmds):
DELETE_SUCCESSFUL = 2
web_cmd_objs_delete_result = memcache.delete_multi(cmds, 0, WEB_CMD_OBJS_MEMCACHE_KEY_PREFIX + IS_PUBLIC_CMD)
cmds_delete_result = memcache.delete(WEB_CMDS_MEMCACHE_KEY + IS_PUBLIC_CMD)
if web_cmd_objs_delete_result and cmds_delete_result is DELETE_SUCCESSFUL:
return True
return False
@staticmethod
def flush_user_web_cmd_cache(cmds):
user = users.get_current_user()
DELETE_SUCCESSFUL = 2
web_cmd_objs_delete_result = memcache.delete_multi(cmds, 0, WEB_CMD_OBJS_MEMCACHE_KEY_PREFIX + IS_USER_CMD + str(user) + '_')
cmds_delete_result = memcache.delete(WEB_CMDS_MEMCACHE_KEY + IS_USER_CMD + str(user) + '_')
if web_cmd_objs_delete_result and cmds_delete_result is DELETE_SUCCESSFUL:
return True
return False
@staticmethod
def reset_public_web_cmd_cache(web_cmds):
web_cmd_objs = {}
cmds = []
for web_cmd in web_cmds:
web_cmd_objs[web_cmd.cmd] = web_cmd
cmds.append(web_cmd.cmd)
Caching().flush_public_web_cmd_cache(cmds)
memcache_add_web_cmd_objs_result = memcache.add_multi(web_cmd_objs, 0, WEB_CMD_OBJS_MEMCACHE_KEY_PREFIX + IS_PUBLIC_CMD)
memcache_add_web_cmds_result = memcache.add(WEB_CMDS_MEMCACHE_KEY + IS_PUBLIC_CMD, cmds, 0)
if memcache_add_web_cmds_result and len(memcache_add_web_cmd_objs_result) is 0:
web_cmd_objs_memcached = memcache.get_multi(cmds, WEB_CMD_OBJS_MEMCACHE_KEY_PREFIX + IS_PUBLIC_CMD)
return web_cmd_objs_memcached
@staticmethod
def reset_user_web_cmd_cache(web_cmds):
user = users.get_current_user()
web_cmd_objs = {}
cmds = []
for web_cmd in web_cmds:
web_cmd_objs[web_cmd.cmd] = web_cmd
cmds.append(web_cmd.cmd)
Caching().flush_user_web_cmd_cache(cmds)
memcache_add_web_cmd_objs_result = memcache.add_multi(web_cmd_objs, 0, WEB_CMD_OBJS_MEMCACHE_KEY_PREFIX + IS_USER_CMD + str(user) + '_')
memcache_add_web_cmds_result = memcache.add(WEB_CMDS_MEMCACHE_KEY + IS_USER_CMD + str(user) + '_', cmds, 0)
if memcache_add_web_cmds_result and len(memcache_add_web_cmd_objs_result) is 0:
web_cmd_objs_memcached = memcache.get_multi(cmds, WEB_CMD_OBJS_MEMCACHE_KEY_PREFIX + IS_USER_CMD + str(user) + '_')
return web_cmd_objs_memcached
import urllib
class Exec(webapp.RequestHandler):
def get(self):
DEFAULT_QUERY_URL = 'http://www.google.com/search?q='
QUERY_DELIMITER = ' '
# WORKAROUND (API bug): self.request.get(QUERY_DELIMITER) was broken and stripped all special signs in Firefox
request_query = urllib.unquote_plus(urllib.splitvalue(self.request.query)[1]).split(QUERY_DELIMITER)
cmd = request_query[0]
# FIX: if cmd contains special signs, the UnicodeDecodeError exception is issued
user = users.get_current_user()
if user is None:
web_cmd_from_mc = memcache.get(WEB_CMD_OBJS_MEMCACHE_KEY_PREFIX + IS_PUBLIC_CMD + cmd)
else:
web_cmd_from_mc = memcache.get(WEB_CMD_OBJS_MEMCACHE_KEY_PREFIX + IS_USER_CMD + str(user) + '_' + cmd)
if web_cmd_from_mc is None:
# Web Command is not in the cache
if user is None:
web_cmd_from_db = WebCmd().get_public_web_cmd(cmd)
else:
web_cmd_from_db = WebCmd().get_user_web_cmd(cmd)
if web_cmd_from_db is None:
# default fallback Web Command
query_url = DEFAULT_QUERY_URL
cmd_query = request_query
else:
# if Web Command exists but is not in the cache
if user is None:
web_cmds = WebCmd().get_public_web_cmds()
Caching().reset_public_web_cmd_cache(web_cmds)
else:
web_cmds = WebCmd().get_user_web_cmds()
Caching().reset_user_web_cmd_cache(web_cmds)
query_url = web_cmd_from_db.url
cmd_query = request_query[1:]
else:
query_url = web_cmd_from_mc.url
cmd_query = request_query[1:]
# self.redirect(query_url + str.join(QUERY_DELIMITER, cmd_query))
self.redirect(query_url + urllib.quote(str.join(QUERY_DELIMITER, cmd_query)))
class WebCmd(db.Model):
""" definition of the Web Command model """
name = db.StringProperty()
cmd = db.StringProperty()
url = db.LinkProperty()
created = db.DateTimeProperty(auto_now_add = True)
updated = db.DateTimeProperty(auto_now = True)
is_public = db.BooleanProperty(default = users.is_current_user_admin()) # : see also create method (fix DOUBLE check)
owner = db.UserProperty(auto_current_user_add = True)
@staticmethod
def is_key_owner(key):
''' check current user the owner of the Web Command '''
web_cmd = db.get(key)
if web_cmd.owner == users.get_current_user():
return True
@staticmethod
def get_public_web_cmds():
''' get all public web commands '''
web_cmds = WebCmd.all().filter(IS_PUBLIC, True).order(SERVICE_URL_QUERY).fetch(99)
return web_cmds
@staticmethod
def get_public_web_cmd(cmd):
''' get a certain public web command '''
web_cmd = WebCmd.all().filter(IS_PUBLIC, True).filter(SERVICE_URL_QUERY, cmd).get()
return web_cmd
@staticmethod
def is_public_web_cmd(web_cmd):
''' True if a web command is already available to the public '''
web_cmd_cmd = WebCmd.all().filter(IS_PUBLIC, True).filter(SERVICE_URL_QUERY, web_cmd.cmd).get()
web_cmd_name = WebCmd.all().filter(IS_PUBLIC, True).filter('name', web_cmd.name).get()
web_cmd_url = WebCmd.all().filter(IS_PUBLIC, True).filter('url', web_cmd.url).get()
if web_cmd_cmd is not None or web_cmd_name is not None or web_cmd_url is not None:
return True
@staticmethod
def get_user_web_cmds():
''' get all web commands of the current user '''
web_cmds = WebCmd.all().filter('owner', users.get_current_user()).order(SERVICE_URL_QUERY).fetch(99)
return web_cmds
@staticmethod
def get_user_web_cmd(cmd):
''' get a certain web command of the current user '''
web_cmd = WebCmd.all().filter('owner', users.get_current_user()).filter(SERVICE_URL_QUERY, cmd).get()
return web_cmd
@staticmethod
def create_initial_user_web_cmd_set():
''' copy all public web commands to the initially created user '''
public_web_cmds = WebCmd.all().filter(IS_PUBLIC, True).fetch(99)
user_web_cmds = []
for public_web_cmd in public_web_cmds:
user_web_cmd = WebCmd()
user_web_cmd.cmd = public_web_cmd.cmd
user_web_cmd.name = public_web_cmd.name
user_web_cmd.url = public_web_cmd.url
user_web_cmd.is_public = False
user_web_cmd.owner = users.get_current_user()
user_web_cmds.append(user_web_cmd)
db.put(user_web_cmds)
@staticmethod
def is_initial_user_login():
''' check if there is any web command assigned to the current user '''
is_any_user_web_cmd = WebCmd.all().filter('owner', users.get_current_user()).get()
if is_any_user_web_cmd is None:
return True
class Delete(webapp.RequestHandler):
def get(self):
key = self.request.get('key')
if WebCmd().is_key_owner(key):
cmd = db.get(key).cmd
self.redirect('?key=%s&cmd=%s&confirm_delete=.' % (key, cmd))
else:
self.redirect(SERVICE_PATH)
def post(self):
is_confirmed = self.request.get('ok')
key = self.request.get('key')
if is_confirmed and WebCmd().is_key_owner(key):
cmd = db.get(key).cmd
db.delete(key)
self.redirect('%s?cmd=%s&info_success=.' % (SERVICE_PATH, cmd))
else:
self.redirect(SERVICE_PATH)
class Suggest(webapp.RequestHandler):
def get(self):
''' suggest a command to public via email '''
web_cmd = db.get(self.request.get('key'))
if WebCmd().is_key_owner(web_cmd.key()) and not WebCmd().is_public_web_cmd(web_cmd):
Suggest().send_suggestion(web_cmd)
self.redirect('%s?info_success=.' % SERVICE_PATH)
else:
self.redirect('%s?info_warning=.' % SERVICE_PATH)
@staticmethod
def send_suggestion(web_cmd):
''' send web command suggestion via email '''
msg = mail.EmailMessage()
msg.to = properties.MAIL_RECEIVER
subject = _('sol Notification: Web Commander - Suggestion')
msg.subject = subject.encode(ENCODING)
sender = '"' + str(users.get_current_user().nickname()) + '" <' + str(users.get_current_user().email()) + '>'
msg.sender = properties.MAIL_GAE_ACCOUNT
body = '''
user: \t%s\n
key: \t%s
cmd: \t%s
name: \t%s
url: \t%s
''' % (sender, web_cmd.key(), web_cmd.cmd, web_cmd.name, web_cmd.url)
msg.body = body.encode(ENCODING)
if msg.is_initialized():
# send_mail_to_admins(sender, subject, body, **kw)
msg.send()
class Edit(webapp.RequestHandler):
@staticmethod
def edit_cmd(key, name, url):
web_cmd = db.get(key)
web_cmd.name = name
web_cmd.url = url
db.put(web_cmd)
def post(self):
is_ok = self.request.get('ok')
key = self.request.get('key')
if is_ok and WebCmd().is_key_owner(key):
Edit().edit_cmd(key, self.request.get('name'), self.request.get('url'))
self.redirect(SERVICE_PATH)
class Create(webapp.RequestHandler):
@staticmethod
def is_user_admin():
if users.get_current_user().email() == '[email protected]':
return True
else:
return False
@staticmethod
def create(cmd, name, url):
""" create a new web command"""
web_cmd = WebCmd()
web_cmd.name = name
web_cmd.cmd = cmd
web_cmd.url = url
# web_cmd.is_public = users.is_current_user_admin()
web_cmd.is_public = Create().is_user_admin()
web_cmd.put()
def post(self):
cmd = self.request.get('cmd')
name = self.request.get('name')
url = self.request.get('url').encode(ENCODING)
is_cmd = WebCmd().get_user_web_cmd(cmd)
if is_cmd is None:
Create().create(cmd, name, url)
self.redirect(SERVICE_PATH)
else:
if self.request.get('action.edit'):
Edit().edit_cmd(is_cmd.key(), name, url)
self.redirect(SERVICE_PATH)
else:
self.redirect(SERVICE_PATH + '?key=%s&cmd=%s&name=%s&url=%s&edit=.' % (is_cmd.key(), cmd, name, url))
import os
from sol import properties
from google.appengine.ext import webapp
class Default(webapp.RequestHandler):
def get(self):
if users.get_current_user():
if WebCmd().is_initial_user_login():
WebCmd().create_initial_user_web_cmd_set()
web_cmds = WebCmd.get_user_web_cmds()
else:
web_cmds = WebCmd.get_public_web_cmds()
from sol import template_filters
from datetime import datetime
template_properties = {
'properties' : properties.template,
'web_cmds' : web_cmds,
'is_cmd' : self.request.get('is_cmd'),
'key' : self.request.get('key'),
'cmd' : self.request.get('cmd').encode(ENCODING),
'name' : self.request.get('name'),
'url' : self.request.get('url'),
'confirm_delete' : self.request.get('confirm_delete'),
'info_success' : self.request.get('info_success'),
'info_warning' : self.request.get('info_warning'),
'edit' : self.request.get('edit'),
'edit_instant' : self.request.get('edit_instant'),
'service_path' : SERVICE_PATH,
# 'service_url' : 'http://%s%s%s?%s=' % (self.request.headers.get('Host'), SERVICE_PATH, SERVICE_URL_SUFFIX, SERVICE_URL_QUERY),
'user_agent' : self.request.headers.get('User-Agent'),
'req_url' : self.request.url,
'user' : users.get_current_user(),
'is_user_admin' : users.is_current_user_admin(),
}
req_path = self.request.path
# handling: entry page "exclusion"
if self.request.path.endswith('/'):
req_path = SERVICE_PATH
template_path = properties.TPL_DIR + req_path + '.html'
# handling: "Error: 404 Not Found"
if not os.path.isfile(template_path):
template_path = properties.TPL_DIR + properties.TPL_404_NOT_FOUND
path = os.path.join(os.path.dirname(__file__), template_path)
self.response.out.write(template.render(path, template_properties))
ENCODING = 'utf-8'
SERVICE_PATH = '/service/web-commander'
SERVICE_URL_SUFFIX = '/exec'
SERVICE_URL_QUERY = 'cmd'
WEB_CMD_OBJS_MEMCACHE_KEY_PREFIX = 'web_cmds_objs_'
WEB_CMDS_MEMCACHE_KEY = 'web_cmds'
IS_PUBLIC_CMD = 'is_public_cmd_'
IS_USER_CMD = 'is_user_cmd_'
IS_PUBLIC = 'is_public'
def main():
app = webapp.WSGIApplication([
(SERVICE_PATH + '/create', Create),
(SERVICE_PATH + '/delete', Delete),
(SERVICE_PATH + '/edit', Edit),
(SERVICE_PATH + '/suggest', Suggest),
(SERVICE_PATH + SERVICE_URL_SUFFIX, Exec),
('.*', Default),
], debug = settings.DEBUG)
util.run_wsgi_app(app)
if __name__ == '__main__':
main()
|
|
import numpy as np
import pytest
import pandas as pd
from pandas import DataFrame, Series, concat
import pandas._testing as tm
from pandas.core.base import DataError
def test_rank_apply():
lev1 = tm.rands_array(10, 100)
lev2 = tm.rands_array(10, 130)
lab1 = np.random.randint(0, 100, size=500)
lab2 = np.random.randint(0, 130, size=500)
df = DataFrame(
{
"value": np.random.randn(500),
"key1": lev1.take(lab1),
"key2": lev2.take(lab2),
}
)
result = df.groupby(["key1", "key2"]).value.rank()
expected = [piece.value.rank() for key, piece in df.groupby(["key1", "key2"])]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
result = df.groupby(["key1", "key2"]).value.rank(pct=True)
expected = [
piece.value.rank(pct=True) for key, piece in df.groupby(["key1", "key2"])
]
expected = concat(expected, axis=0)
expected = expected.reindex(result.index)
tm.assert_series_equal(result, expected)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals",
[
np.array([2, 2, 8, 2, 6], dtype=dtype)
for dtype in ["i8", "i4", "i2", "i1", "u8", "u4", "u2", "u1", "f8", "f4", "f2"]
]
+ [
[
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-08"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-06"),
],
[
pd.Timestamp("2018-01-02", tz="US/Pacific"),
pd.Timestamp("2018-01-02", tz="US/Pacific"),
pd.Timestamp("2018-01-08", tz="US/Pacific"),
pd.Timestamp("2018-01-02", tz="US/Pacific"),
pd.Timestamp("2018-01-06", tz="US/Pacific"),
],
[
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
pd.Timestamp("2018-01-08") - pd.Timestamp(0),
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
pd.Timestamp("2018-01-06") - pd.Timestamp(0),
],
[
pd.Timestamp("2018-01-02").to_period("D"),
pd.Timestamp("2018-01-02").to_period("D"),
pd.Timestamp("2018-01-08").to_period("D"),
pd.Timestamp("2018-01-02").to_period("D"),
pd.Timestamp("2018-01-06").to_period("D"),
],
],
ids=lambda x: type(x[0]),
)
@pytest.mark.parametrize(
"ties_method,ascending,pct,exp",
[
("average", True, False, [2.0, 2.0, 5.0, 2.0, 4.0]),
("average", True, True, [0.4, 0.4, 1.0, 0.4, 0.8]),
("average", False, False, [4.0, 4.0, 1.0, 4.0, 2.0]),
("average", False, True, [0.8, 0.8, 0.2, 0.8, 0.4]),
("min", True, False, [1.0, 1.0, 5.0, 1.0, 4.0]),
("min", True, True, [0.2, 0.2, 1.0, 0.2, 0.8]),
("min", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
("min", False, True, [0.6, 0.6, 0.2, 0.6, 0.4]),
("max", True, False, [3.0, 3.0, 5.0, 3.0, 4.0]),
("max", True, True, [0.6, 0.6, 1.0, 0.6, 0.8]),
("max", False, False, [5.0, 5.0, 1.0, 5.0, 2.0]),
("max", False, True, [1.0, 1.0, 0.2, 1.0, 0.4]),
("first", True, False, [1.0, 2.0, 5.0, 3.0, 4.0]),
("first", True, True, [0.2, 0.4, 1.0, 0.6, 0.8]),
("first", False, False, [3.0, 4.0, 1.0, 5.0, 2.0]),
("first", False, True, [0.6, 0.8, 0.2, 1.0, 0.4]),
("dense", True, False, [1.0, 1.0, 3.0, 1.0, 2.0]),
("dense", True, True, [1.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 2.0 / 3.0]),
("dense", False, False, [3.0, 3.0, 1.0, 3.0, 2.0]),
("dense", False, True, [3.0 / 3.0, 3.0 / 3.0, 1.0 / 3.0, 3.0 / 3.0, 2.0 / 3.0]),
],
)
def test_rank_args(grps, vals, ties_method, ascending, pct, exp):
key = np.repeat(grps, len(vals))
orig_vals = vals
vals = list(vals) * len(grps)
if isinstance(orig_vals, np.ndarray):
vals = np.array(vals, dtype=orig_vals.dtype)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(method=ties_method, ascending=ascending, pct=pct)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals", [[-np.inf, -np.inf, np.nan, 1.0, np.nan, np.inf, np.inf]]
)
@pytest.mark.parametrize(
"ties_method,ascending,na_option,exp",
[
("average", True, "keep", [1.5, 1.5, np.nan, 3, np.nan, 4.5, 4.5]),
("average", True, "top", [3.5, 3.5, 1.5, 5.0, 1.5, 6.5, 6.5]),
("average", True, "bottom", [1.5, 1.5, 6.5, 3.0, 6.5, 4.5, 4.5]),
("average", False, "keep", [4.5, 4.5, np.nan, 3, np.nan, 1.5, 1.5]),
("average", False, "top", [6.5, 6.5, 1.5, 5.0, 1.5, 3.5, 3.5]),
("average", False, "bottom", [4.5, 4.5, 6.5, 3.0, 6.5, 1.5, 1.5]),
("min", True, "keep", [1.0, 1.0, np.nan, 3.0, np.nan, 4.0, 4.0]),
("min", True, "top", [3.0, 3.0, 1.0, 5.0, 1.0, 6.0, 6.0]),
("min", True, "bottom", [1.0, 1.0, 6.0, 3.0, 6.0, 4.0, 4.0]),
("min", False, "keep", [4.0, 4.0, np.nan, 3.0, np.nan, 1.0, 1.0]),
("min", False, "top", [6.0, 6.0, 1.0, 5.0, 1.0, 3.0, 3.0]),
("min", False, "bottom", [4.0, 4.0, 6.0, 3.0, 6.0, 1.0, 1.0]),
("max", True, "keep", [2.0, 2.0, np.nan, 3.0, np.nan, 5.0, 5.0]),
("max", True, "top", [4.0, 4.0, 2.0, 5.0, 2.0, 7.0, 7.0]),
("max", True, "bottom", [2.0, 2.0, 7.0, 3.0, 7.0, 5.0, 5.0]),
("max", False, "keep", [5.0, 5.0, np.nan, 3.0, np.nan, 2.0, 2.0]),
("max", False, "top", [7.0, 7.0, 2.0, 5.0, 2.0, 4.0, 4.0]),
("max", False, "bottom", [5.0, 5.0, 7.0, 3.0, 7.0, 2.0, 2.0]),
("first", True, "keep", [1.0, 2.0, np.nan, 3.0, np.nan, 4.0, 5.0]),
("first", True, "top", [3.0, 4.0, 1.0, 5.0, 2.0, 6.0, 7.0]),
("first", True, "bottom", [1.0, 2.0, 6.0, 3.0, 7.0, 4.0, 5.0]),
("first", False, "keep", [4.0, 5.0, np.nan, 3.0, np.nan, 1.0, 2.0]),
("first", False, "top", [6.0, 7.0, 1.0, 5.0, 2.0, 3.0, 4.0]),
("first", False, "bottom", [4.0, 5.0, 6.0, 3.0, 7.0, 1.0, 2.0]),
("dense", True, "keep", [1.0, 1.0, np.nan, 2.0, np.nan, 3.0, 3.0]),
("dense", True, "top", [2.0, 2.0, 1.0, 3.0, 1.0, 4.0, 4.0]),
("dense", True, "bottom", [1.0, 1.0, 4.0, 2.0, 4.0, 3.0, 3.0]),
("dense", False, "keep", [3.0, 3.0, np.nan, 2.0, np.nan, 1.0, 1.0]),
("dense", False, "top", [4.0, 4.0, 1.0, 3.0, 1.0, 2.0, 2.0]),
("dense", False, "bottom", [3.0, 3.0, 4.0, 2.0, 4.0, 1.0, 1.0]),
],
)
def test_infs_n_nans(grps, vals, ties_method, ascending, na_option, exp):
# GH 20561
key = np.repeat(grps, len(vals))
vals = vals * len(grps)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(
method=ties_method, ascending=ascending, na_option=na_option
)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize("grps", [["qux"], ["qux", "quux"]])
@pytest.mark.parametrize(
"vals",
[
np.array([2, 2, np.nan, 8, 2, 6, np.nan, np.nan], dtype=dtype)
for dtype in ["f8", "f4", "f2"]
]
+ [
[
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-02"),
np.nan,
pd.Timestamp("2018-01-08"),
pd.Timestamp("2018-01-02"),
pd.Timestamp("2018-01-06"),
np.nan,
np.nan,
],
[
pd.Timestamp("2018-01-02", tz="US/Pacific"),
pd.Timestamp("2018-01-02", tz="US/Pacific"),
np.nan,
pd.Timestamp("2018-01-08", tz="US/Pacific"),
pd.Timestamp("2018-01-02", tz="US/Pacific"),
pd.Timestamp("2018-01-06", tz="US/Pacific"),
np.nan,
np.nan,
],
[
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
np.nan,
pd.Timestamp("2018-01-08") - pd.Timestamp(0),
pd.Timestamp("2018-01-02") - pd.Timestamp(0),
pd.Timestamp("2018-01-06") - pd.Timestamp(0),
np.nan,
np.nan,
],
[
pd.Timestamp("2018-01-02").to_period("D"),
pd.Timestamp("2018-01-02").to_period("D"),
np.nan,
pd.Timestamp("2018-01-08").to_period("D"),
pd.Timestamp("2018-01-02").to_period("D"),
pd.Timestamp("2018-01-06").to_period("D"),
np.nan,
np.nan,
],
],
ids=lambda x: type(x[0]),
)
@pytest.mark.parametrize(
"ties_method,ascending,na_option,pct,exp",
[
(
"average",
True,
"keep",
False,
[2.0, 2.0, np.nan, 5.0, 2.0, 4.0, np.nan, np.nan],
),
(
"average",
True,
"keep",
True,
[0.4, 0.4, np.nan, 1.0, 0.4, 0.8, np.nan, np.nan],
),
(
"average",
False,
"keep",
False,
[4.0, 4.0, np.nan, 1.0, 4.0, 2.0, np.nan, np.nan],
),
(
"average",
False,
"keep",
True,
[0.8, 0.8, np.nan, 0.2, 0.8, 0.4, np.nan, np.nan],
),
("min", True, "keep", False, [1.0, 1.0, np.nan, 5.0, 1.0, 4.0, np.nan, np.nan]),
("min", True, "keep", True, [0.2, 0.2, np.nan, 1.0, 0.2, 0.8, np.nan, np.nan]),
(
"min",
False,
"keep",
False,
[3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan],
),
("min", False, "keep", True, [0.6, 0.6, np.nan, 0.2, 0.6, 0.4, np.nan, np.nan]),
("max", True, "keep", False, [3.0, 3.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan]),
("max", True, "keep", True, [0.6, 0.6, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan]),
(
"max",
False,
"keep",
False,
[5.0, 5.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan],
),
("max", False, "keep", True, [1.0, 1.0, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan]),
(
"first",
True,
"keep",
False,
[1.0, 2.0, np.nan, 5.0, 3.0, 4.0, np.nan, np.nan],
),
(
"first",
True,
"keep",
True,
[0.2, 0.4, np.nan, 1.0, 0.6, 0.8, np.nan, np.nan],
),
(
"first",
False,
"keep",
False,
[3.0, 4.0, np.nan, 1.0, 5.0, 2.0, np.nan, np.nan],
),
(
"first",
False,
"keep",
True,
[0.6, 0.8, np.nan, 0.2, 1.0, 0.4, np.nan, np.nan],
),
(
"dense",
True,
"keep",
False,
[1.0, 1.0, np.nan, 3.0, 1.0, 2.0, np.nan, np.nan],
),
(
"dense",
True,
"keep",
True,
[
1.0 / 3.0,
1.0 / 3.0,
np.nan,
3.0 / 3.0,
1.0 / 3.0,
2.0 / 3.0,
np.nan,
np.nan,
],
),
(
"dense",
False,
"keep",
False,
[3.0, 3.0, np.nan, 1.0, 3.0, 2.0, np.nan, np.nan],
),
(
"dense",
False,
"keep",
True,
[
3.0 / 3.0,
3.0 / 3.0,
np.nan,
1.0 / 3.0,
3.0 / 3.0,
2.0 / 3.0,
np.nan,
np.nan,
],
),
("average", True, "bottom", False, [2.0, 2.0, 7.0, 5.0, 2.0, 4.0, 7.0, 7.0]),
(
"average",
True,
"bottom",
True,
[0.25, 0.25, 0.875, 0.625, 0.25, 0.5, 0.875, 0.875],
),
("average", False, "bottom", False, [4.0, 4.0, 7.0, 1.0, 4.0, 2.0, 7.0, 7.0]),
(
"average",
False,
"bottom",
True,
[0.5, 0.5, 0.875, 0.125, 0.5, 0.25, 0.875, 0.875],
),
("min", True, "bottom", False, [1.0, 1.0, 6.0, 5.0, 1.0, 4.0, 6.0, 6.0]),
(
"min",
True,
"bottom",
True,
[0.125, 0.125, 0.75, 0.625, 0.125, 0.5, 0.75, 0.75],
),
("min", False, "bottom", False, [3.0, 3.0, 6.0, 1.0, 3.0, 2.0, 6.0, 6.0]),
(
"min",
False,
"bottom",
True,
[0.375, 0.375, 0.75, 0.125, 0.375, 0.25, 0.75, 0.75],
),
("max", True, "bottom", False, [3.0, 3.0, 8.0, 5.0, 3.0, 4.0, 8.0, 8.0]),
("max", True, "bottom", True, [0.375, 0.375, 1.0, 0.625, 0.375, 0.5, 1.0, 1.0]),
("max", False, "bottom", False, [5.0, 5.0, 8.0, 1.0, 5.0, 2.0, 8.0, 8.0]),
(
"max",
False,
"bottom",
True,
[0.625, 0.625, 1.0, 0.125, 0.625, 0.25, 1.0, 1.0],
),
("first", True, "bottom", False, [1.0, 2.0, 6.0, 5.0, 3.0, 4.0, 7.0, 8.0]),
(
"first",
True,
"bottom",
True,
[0.125, 0.25, 0.75, 0.625, 0.375, 0.5, 0.875, 1.0],
),
("first", False, "bottom", False, [3.0, 4.0, 6.0, 1.0, 5.0, 2.0, 7.0, 8.0]),
(
"first",
False,
"bottom",
True,
[0.375, 0.5, 0.75, 0.125, 0.625, 0.25, 0.875, 1.0],
),
("dense", True, "bottom", False, [1.0, 1.0, 4.0, 3.0, 1.0, 2.0, 4.0, 4.0]),
("dense", True, "bottom", True, [0.25, 0.25, 1.0, 0.75, 0.25, 0.5, 1.0, 1.0]),
("dense", False, "bottom", False, [3.0, 3.0, 4.0, 1.0, 3.0, 2.0, 4.0, 4.0]),
("dense", False, "bottom", True, [0.75, 0.75, 1.0, 0.25, 0.75, 0.5, 1.0, 1.0]),
],
)
def test_rank_args_missing(grps, vals, ties_method, ascending, na_option, pct, exp):
key = np.repeat(grps, len(vals))
orig_vals = vals
vals = list(vals) * len(grps)
if isinstance(orig_vals, np.ndarray):
vals = np.array(vals, dtype=orig_vals.dtype)
df = DataFrame({"key": key, "val": vals})
result = df.groupby("key").rank(
method=ties_method, ascending=ascending, na_option=na_option, pct=pct
)
exp_df = DataFrame(exp * len(grps), columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.parametrize(
"pct,exp", [(False, [3.0, 3.0, 3.0, 3.0, 3.0]), (True, [0.6, 0.6, 0.6, 0.6, 0.6])]
)
def test_rank_resets_each_group(pct, exp):
df = DataFrame(
{"key": ["a", "a", "a", "a", "a", "b", "b", "b", "b", "b"], "val": [1] * 10}
)
result = df.groupby("key").rank(pct=pct)
exp_df = DataFrame(exp * 2, columns=["val"])
tm.assert_frame_equal(result, exp_df)
def test_rank_avg_even_vals():
df = DataFrame({"key": ["a"] * 4, "val": [1] * 4})
result = df.groupby("key").rank()
exp_df = DataFrame([2.5, 2.5, 2.5, 2.5], columns=["val"])
tm.assert_frame_equal(result, exp_df)
@pytest.mark.xfail(reason="Works now, needs tests")
@pytest.mark.parametrize("ties_method", ["average", "min", "max", "first", "dense"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("na_option", ["keep", "top", "bottom"])
@pytest.mark.parametrize("pct", [True, False])
@pytest.mark.parametrize(
"vals", [["bar", "bar", "foo", "bar", "baz"], ["bar", np.nan, "foo", np.nan, "baz"]]
)
def test_rank_object_raises(ties_method, ascending, na_option, pct, vals):
df = DataFrame({"key": ["foo"] * 5, "val": vals})
with pytest.raises(DataError, match="No numeric types to aggregate"):
df.groupby("key").rank(
method=ties_method, ascending=ascending, na_option=na_option, pct=pct
)
@pytest.mark.parametrize("na_option", [True, "bad", 1])
@pytest.mark.parametrize("ties_method", ["average", "min", "max", "first", "dense"])
@pytest.mark.parametrize("ascending", [True, False])
@pytest.mark.parametrize("pct", [True, False])
@pytest.mark.parametrize(
"vals",
[
["bar", "bar", "foo", "bar", "baz"],
["bar", np.nan, "foo", np.nan, "baz"],
[1, np.nan, 2, np.nan, 3],
],
)
def test_rank_naoption_raises(ties_method, ascending, na_option, pct, vals):
df = DataFrame({"key": ["foo"] * 5, "val": vals})
msg = "na_option must be one of 'keep', 'top', or 'bottom'"
with pytest.raises(ValueError, match=msg):
df.groupby("key").rank(
method=ties_method, ascending=ascending, na_option=na_option, pct=pct
)
def test_rank_empty_group():
# see gh-22519
column = "A"
df = DataFrame({"A": [0, 1, 0], "B": [1.0, np.nan, 2.0]})
result = df.groupby(column).B.rank(pct=True)
expected = Series([0.5, np.nan, 1.0], name="B")
tm.assert_series_equal(result, expected)
result = df.groupby(column).rank(pct=True)
expected = DataFrame({"B": [0.5, np.nan, 1.0]})
tm.assert_frame_equal(result, expected)
@pytest.mark.parametrize(
"input_key,input_value,output_value",
[
([1, 2], [1, 1], [1.0, 1.0]),
([1, 1, 2, 2], [1, 2, 1, 2], [0.5, 1.0, 0.5, 1.0]),
([1, 1, 2, 2], [1, 2, 1, np.nan], [0.5, 1.0, 1.0, np.nan]),
([1, 1, 2], [1, 2, np.nan], [0.5, 1.0, np.nan]),
],
)
def test_rank_zero_div(input_key, input_value, output_value):
# GH 23666
df = DataFrame({"A": input_key, "B": input_value})
result = df.groupby("A").rank(method="dense", pct=True)
expected = DataFrame({"B": output_value})
tm.assert_frame_equal(result, expected)
|
|
from functools import wraps, partial
import inspect
from json import JSONEncoder
from threading import local as threadlocal
from typing import AnyStr, Tuple, Optional
import warnings
import copy
import logging
from datetime import datetime, timedelta
from urllib.parse import urlparse, urlunsplit
MAX_PAYLOAD_LENGTH = 128 * 1024
MAX_STRING_LENGTH = 1024
__all__ = [] # type: ignore
class SanitizingJSONEncoder(JSONEncoder):
"""
A JSON encoder which handles filtering and conversion from JSON-
incompatible types to strings.
>>> import logging
>>> from json import loads
>>> logger = logging.getLogger(__name__)
>>> encoder = SanitizingJSONEncoder(logger, keyword_filters=['bananas'])
>>> items = loads(encoder.encode(FilterDict({'carrots': 4, 'bananas': 5})))
>>> items['bananas']
'[FILTERED]'
>>> items['carrots']
4
"""
filtered_value = '[FILTERED]'
recursive_value = '[RECURSIVE]'
unencodeable_value = '[BADENCODING]'
def __init__(self, logger: logging.Logger, keyword_filters=None, **kwargs):
self.logger = logger
self.filters = list(map(str.lower, keyword_filters or []))
self.bytes_filters = [x.encode('utf-8') for x in self.filters]
super(SanitizingJSONEncoder, self).__init__(**kwargs)
def encode(self, obj):
safe_obj = self._sanitize(obj, False)
payload = super(SanitizingJSONEncoder, self).encode(safe_obj)
if len(payload) > MAX_PAYLOAD_LENGTH:
safe_obj = self._sanitize(safe_obj, True)
return super(SanitizingJSONEncoder, self).encode(safe_obj)
else:
return payload
def filter_string_values(self, obj, ignored=None, seen=None):
"""
Remove any value from the dictionary which match the key filters
"""
if not ignored:
ignored = set()
# Keep track of nested objects to avoid having references garbage
# collected (which would cause id reuse and false positive recursion
if seen is None:
seen = []
if type(ignored) is list:
ignored = set(ignored)
if id(obj) in ignored:
return self.recursive_value
if isinstance(obj, dict):
ignored.add(id(obj))
seen.append(obj)
clean_dict = {}
for key, value in obj.items():
if self._should_filter(key):
clean_dict[key] = self.filtered_value
else:
clean_dict[key] = self.filter_string_values(
value, ignored, seen)
return clean_dict
return obj
def default(self, obj):
"""
Coerce values to strings if possible, otherwise replace with
'[BADENCODING]'
"""
try:
if isinstance(obj, bytes):
return str(obj, encoding='utf-8', errors='replace')
else:
return str(obj)
except Exception:
self.logger.exception('Could not add object to payload')
return self.unencodeable_value
def _sanitize(self, obj, trim_strings, ignored=None, seen=None):
"""
Replace recursive values and trim strings longer than
MAX_STRING_LENGTH
"""
if not ignored:
ignored = set()
# Keep track of nested objects to avoid having references garbage
# collected (which would cause id reuse and false positive recursion)
if seen is None:
seen = []
if type(ignored) is list:
ignored = set(ignored)
if id(obj) in ignored:
return self.recursive_value
elif isinstance(obj, dict):
ignored.add(id(obj))
seen.append(obj)
return self._sanitize_dict(obj, trim_strings, ignored, seen)
elif isinstance(obj, (set, tuple, list)):
ignored.add(id(obj))
seen.append(obj)
items = []
for value in obj:
items.append(
self._sanitize(value, trim_strings, ignored, seen))
return items
elif trim_strings and isinstance(obj, str):
return obj[:MAX_STRING_LENGTH]
else:
return obj
def _sanitize_dict_key_value(self, clean_dict, key, clean_value):
"""
Safely sets the provided key on the dictionary by coercing the key
to a string
"""
if isinstance(key, bytes):
try:
key = str(key, encoding='utf-8', errors='replace')
clean_dict[key] = clean_value
except Exception:
self.logger.exception(
'Could not add sanitize key for dictionary, '
'dropping value.')
if isinstance(key, str):
clean_dict[key] = clean_value
else:
try:
clean_dict[str(key)] = clean_value
except Exception:
self.logger.exception(
'Could not add sanitize key for dictionary, '
'dropping value.')
def _sanitize_dict(self, obj, trim_strings, ignored, seen):
"""
Trim individual values in an object, applying filtering if the object
is a FilterDict
"""
if isinstance(obj, FilterDict):
obj = self.filter_string_values(obj)
clean_dict = {}
for key, value in obj.items():
clean_value = self._sanitize(value, trim_strings, ignored, seen)
self._sanitize_dict_key_value(clean_dict, key, clean_value)
return clean_dict
def _should_filter(self, key):
if isinstance(key, str):
key_lower = key.lower()
return any(f in key_lower for f in self.filters)
if isinstance(key, bytes):
key_lower = key.lower()
return any(f in key_lower for f in self.bytes_filters)
return False
class FilterDict(dict):
"""
Object which will be filtered when encoded
"""
pass
ContentType = Tuple[str, Optional[str], Optional[str], Optional[str]]
def parse_content_type(value: str) -> ContentType:
"""
Generate a tuple of (type, subtype, suffix, parameters) from a type based
on RFC 6838
>>> parse_content_type("text/plain")
('text', 'plain', None, None)
>>> parse_content_type("application/hal+json")
('application', 'hal', 'json', None)
>>> parse_content_type("application/json;schema=\\"ftp://example.com/a\\"")
('application', 'json', None, 'schema="ftp://example.com/a"')
"""
parameters = None # type: Optional[str]
if ';' in value:
types, parameters = value.split(';', 1)
else:
types = value
if '/' in types:
maintype, subtype = types.split('/', 1)
if '+' in subtype:
subtype, suffix = subtype.split('+', 1)
return (maintype, subtype, suffix, parameters)
else:
return (maintype, subtype, None, parameters)
else:
return (types, None, None, parameters)
def is_json_content_type(value: str) -> bool:
"""
Check if a content type is JSON-parseable
>>> is_json_content_type('text/plain')
False
>>> is_json_content_type('application/schema+json')
True
>>> is_json_content_type('application/json')
True
"""
type, subtype, suffix, _ = parse_content_type(value.lower())
return type == 'application' and (subtype == 'json' or suffix == 'json')
def fully_qualified_class_name(obj):
module = inspect.getmodule(obj)
if module is not None and module.__name__ != "__main__":
return module.__name__ + "." + obj.__class__.__name__
else:
return obj.__class__.__name__
def package_version(package_name):
try:
import pkg_resources
except ImportError:
return None
else:
try:
return pkg_resources.get_distribution(package_name).version
except pkg_resources.DistributionNotFound:
return None
def _validate_setter(types, func, should_error=False):
"""
Check that the first argument of a function is of a provided set of types
before calling the body of the wrapped function, printing a runtime warning
(or raising a TypeError) if the validation fails.
"""
@wraps(func)
def wrapper(obj, value):
option_name = func.__name__
if value is None or isinstance(value, types):
func(obj, value)
else:
error_format = '{0} should be {1}, got {2}'
actual = type(value).__name__
requirement = ' or '.join([t.__name__ for t in types])
message = error_format.format(option_name, requirement, actual)
if should_error:
raise TypeError(message)
else:
warnings.warn(message, RuntimeWarning)
return wrapper
validate_str_setter = partial(_validate_setter, (str,))
validate_required_str_setter = partial(_validate_setter, (str,),
should_error=True)
validate_bool_setter = partial(_validate_setter, (bool,))
validate_iterable_setter = partial(_validate_setter, (list, tuple))
validate_int_setter = partial(_validate_setter, (int,))
class ThreadContextVar:
"""
A wrapper around thread-local variables to mimic the API of contextvars
"""
LOCALS = None
@classmethod
def local_context(cls):
if not ThreadContextVar.LOCALS:
ThreadContextVar.LOCALS = threadlocal()
return ThreadContextVar.LOCALS
def __init__(self, name, **kwargs):
self.name = name
# Mimic the behaviour of ContextVar - if a default has been explicitly
# passed then we will use it, otherwise don't set an initial value
# This allows 'get' to know when to raise a LookupError
if 'default' in kwargs:
self.default = kwargs['default']
# Make a deep copy so this thread starts with a fresh default
self.set(copy.deepcopy(self.default))
def get(self):
local = ThreadContextVar.local_context()
if hasattr(local, self.name):
return getattr(local, self.name)
if hasattr(self, 'default'):
# Make a deep copy so that each thread starts with a fresh default
result = copy.deepcopy(self.default)
self.set(result)
return result
raise LookupError("No value for '{}'".format(self.name))
def set(self, new_value):
setattr(ThreadContextVar.local_context(), self.name, new_value)
def sanitize_url(url_to_sanitize: AnyStr) -> Optional[AnyStr]:
try:
parsed = urlparse(url_to_sanitize)
sanitized_url = urlunsplit(
# urlunsplit always requires 5 elements in this tuple
(parsed.scheme, parsed.netloc, parsed.path, None, None)
).strip()
except Exception:
return None
# If the sanitized url is empty then it did not have any of the components
# we are interested in, so return None to indicate failure
if not sanitized_url:
return None
return sanitized_url
# to_rfc3339: format a datetime instance to match to_rfc3339/iso8601 with
# milliseconds precision
# Python can do this natively from version 3.6, but we need to include a
# fallback implementation for Python 3.5
try:
# this will raise if 'timespec' isn't supported
datetime.utcnow().isoformat(timespec='milliseconds') # type: ignore
def to_rfc3339(dt: datetime) -> str:
return dt.isoformat(timespec='milliseconds') # type: ignore
except Exception:
def _get_timezone_offset(dt: datetime) -> str:
if dt.tzinfo is None:
return ''
utc_offset = dt.tzinfo.utcoffset(dt)
if utc_offset is None:
return ''
sign = '+'
if utc_offset.days < 0:
sign = '-'
utc_offset = -utc_offset
hours_offset, minutes = divmod(utc_offset, timedelta(hours=1))
minutes_offset, seconds = divmod(minutes, timedelta(minutes=1))
return '{:s}{:02d}:{:02d}'.format(sign, hours_offset, minutes_offset)
def to_rfc3339(dt: datetime) -> str:
return '{:04d}-{:02d}-{:02d}T{:02d}:{:02d}:{:02d}.{:03d}{:s}'.format(
dt.year,
dt.month,
dt.day,
dt.hour,
dt.minute,
dt.second,
int(dt.microsecond / 1000),
_get_timezone_offset(dt)
)
|
|
# Copyright (c) 2013 ARM Limited
# All rights reserved
#
# The license below extends only to copyright in the software and shall
# not be construed as granting a license to any other intellectual
# property including but not limited to intellectual property relating
# to a hardware implementation of the functionality of the software
# licensed hereunder. You may use the software subject to the license
# terms below provided that you ensure that this notice is replicated
# unmodified and in its entirety in all distributions of the software,
# modified or unmodified, in source code or in binary form.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met: redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer;
# redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution;
# neither the name of the copyright holders nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
# Authors: Andrew Bardsley
import parse
import colours
from colours import unknownColour
from point import Point
import re
import blobs
from time import time as wall_time
import os
id_parts = "TSPLFE"
all_ids = set(id_parts)
no_ids = set([])
class BlobDataSelect(object):
"""Represents which data is displayed for Ided object"""
def __init__(self):
# Copy all_ids
self.ids = set(all_ids)
def __and__(self, rhs):
"""And for filtering"""
ret = BlobDataSelect()
ret.ids = self.ids.intersection(rhs.ids)
return ret
class BlobVisualData(object):
"""Super class for block data colouring"""
def to_striped_block(self, select):
"""Return an array of colours to use for a striped block"""
return unknownColour
def get_inst(self):
"""Get an instruction Id (if any) from this data"""
return None
def get_line(self):
"""Get a line Id (if any) from this data"""
return None
def __repr__(self):
return self.__class__.__name__ + '().from_string(' + \
self.__str__() + ')'
def __str__(self):
return ''
class Id(BlobVisualData):
"""A line or instruction id"""
def __init__(self):
self.isFault = False
self.threadId = 0
self.streamSeqNum = 0
self.predictionSeqNum = 0
self.lineSeqNum = 0
self.fetchSeqNum = 0
self.execSeqNum = 0
def as_list(self):
return [self.threadId, self.streamSeqNum, self.predictionSeqNum,
self.lineSeqNum, self.fetchSeqNum, self.execSeqNum]
def __cmp__(self, right):
return cmp(self.as_list(), right.as_list())
def from_string(self, string):
m = re.match('^(F;)?(\d+)/(\d+)\.(\d+)/(\d+)(/(\d+)(\.(\d+))?)?',
string)
def seqnum_from_string(string):
if string is None:
return 0
else:
return int(string)
if m is None:
print 'Invalid Id string', string
else:
elems = m.groups()
if elems[0] is not None:
self.isFault = True
else:
self.isFault = False
self.threadId = seqnum_from_string(elems[1])
self.streamSeqNum = seqnum_from_string(elems[2])
self.predictionSeqNum = seqnum_from_string(elems[3])
self.lineSeqNum = seqnum_from_string(elems[4])
self.fetchSeqNum = seqnum_from_string(elems[6])
self.execSeqNum = seqnum_from_string(elems[8])
return self
def get_inst(self):
if self.fetchSeqNum != 0:
return self
else:
return None
def get_line(self):
return self
def __str__(self):
"""Returns the usual id T/S.P/L/F.E string"""
return (
str(self.threadId) + '/' +
str(self.streamSeqNum) + '.' +
str(self.predictionSeqNum) + '/' +
str(self.lineSeqNum) + '/' +
str(self.fetchSeqNum) + '.' +
str(self.execSeqNum))
def to_striped_block(self, select):
ret = []
if self.isFault:
ret.append(colours.faultColour)
if 'T' in select.ids:
ret.append(colours.number_to_colour(self.threadId))
if 'S' in select.ids:
ret.append(colours.number_to_colour(self.streamSeqNum))
if 'P' in select.ids:
ret.append(colours.number_to_colour(self.predictionSeqNum))
if 'L' in select.ids:
ret.append(colours.number_to_colour(self.lineSeqNum))
if self.fetchSeqNum != 0 and 'F' in select.ids:
ret.append(colours.number_to_colour(self.fetchSeqNum))
if self.execSeqNum != 0 and 'E' in select.ids:
ret.append(colours.number_to_colour(self.execSeqNum))
if len(ret) == 0:
ret = [colours.unknownColour]
if self.isFault:
ret.append(colours.faultColour)
return ret
class Branch(BlobVisualData):
"""Branch data new stream and prediction sequence numbers, a branch
reason and a new PC"""
def __init__(self):
self.newStreamSeqNum = 0
self.newPredictionSeqNum = 0
self.newPC = 0
self.reason = "NoBranch"
self.id = Id()
def from_string(self, string):
m = re.match('^(\w+);(\d+)\.(\d+);([0-9a-fA-Fx]+);(.*)$', string)
if m is not None:
self.reason, newStreamSeqNum, newPredictionSeqNum, \
newPC, id = m.groups()
self.newStreamSeqNum = int(newStreamSeqNum)
self.newPredictionSeqNum = int(newPredictionSeqNum)
self.newPC = int(newPC, 0)
self.id = special_view_decoder(Id)(id)
# self.branch = special_view_decoder(Branch)(branch)
else:
print "Bad Branch data:", string
return self
def to_striped_block(self, select):
return [colours.number_to_colour(self.newStreamSeqNum),
colours.number_to_colour(self.newPredictionSeqNum),
colours.number_to_colour(self.newPC)]
class Counts(BlobVisualData):
"""Treat the input data as just a /-separated list of count values (or
just a single value)"""
def __init__(self):
self.counts = []
def from_string(self, string):
self.counts = map(int, re.split('/', string))
return self
def to_striped_block(self, select):
return map(colours.number_to_colour, self.counts)
class Colour(BlobVisualData):
"""A fixed colour block, used for special colour decoding"""
def __init__(self, colour):
self.colour = colour
def to_striped_block(self, select):
return [self.colour]
class DcacheAccess(BlobVisualData):
"""Data cache accesses [RW];id"""
def __init__(self):
self.direc = 'R'
self.id = Id()
def from_string(self, string):
self.direc, id = re.match('^([RW]);([^;]*);.*$', string).groups()
self.id.from_string(id)
return self
def get_inst(self):
return self.id
def to_striped_block(self, select):
if self.direc == 'R':
direc_colour = colours.readColour
elif self.direc == 'R':
direc_colour = colours.writeColour
else:
direc_colour = colours.errorColour
return [direc_colour] + self.id.to_striped_block(select)
class ColourPattern(object):
"""Super class for decoders that make 2D grids rather than just single
striped blocks"""
def elems(self):
return []
def to_striped_block(self, select):
return [[[colours.errorColour]]]
def special_view_decoder(class_):
"""Generate a decode function that checks for special character
arguments first (and generates a fixed colour) before building a
BlobVisualData of the given class"""
def decode(symbol):
if symbol in special_state_colours:
return Colour(special_state_colours[symbol])
else:
return class_().from_string(symbol)
return decode
class TwoDColours(ColourPattern):
"""A 2D grid pattern decoder"""
def __init__(self, blockss):
self.blockss = blockss
@classmethod
def decoder(class_, elemClass, dataName):
"""Factory for making decoders for particular block types"""
def decode(pairs):
if dataName not in pairs:
print 'TwoDColours: no event data called:', \
dataName, 'in:', pairs
return class_([[Colour(colours.errorColour)]])
else:
parsed = parse.list_parser(pairs[dataName])
return class_(parse.map2(special_view_decoder(elemClass), \
parsed))
return decode
@classmethod
def indexed_decoder(class_, elemClass, dataName, picPairs):
"""Factory for making decoders for particular block types but
where the list elements are pairs of (index, data) and
strip and stripelems counts are picked up from the pair
data on the decoder's picture file. This gives a 2D layout
of the values with index 0 at strip=0, elem=0 and index 1
at strip=0, elem=1"""
def decode(pairs):
if dataName not in pairs:
print 'TwoDColours: no event data called:', \
dataName, 'in:', pairs
return class_([[Colour(colours.errorColour)]])
else:
strips = int(picPairs['strips'])
strip_elems = int(picPairs['stripelems'])
raw_iv_pairs = pairs[dataName]
parsed = parse.parse_indexed_list(raw_iv_pairs)
array = [[Colour(colours.emptySlotColour)
for i in xrange(0, strip_elems)]
for j in xrange(0, strips)]
for index, value in parsed:
try:
array[index % strips][index / strips] = \
special_view_decoder(elemClass)(value)
except:
print "Element out of range strips: %d," \
" stripelems %d, index: %d" % (strips,
strip_elems, index)
# return class_(array)
return class_(array)
return decode
def elems(self):
"""Get a flat list of all elements"""
ret = []
for blocks in self.blockss:
ret += blocks
return ret
def to_striped_block(self, select):
return parse.map2(lambda d: d.to_striped_block(select), self.blockss)
class FrameColours(ColourPattern):
"""Decode to a 2D grid which has a single occupied row from the event
data and some blank rows forming a frame with the occupied row as a
'title' coloured stripe"""
def __init__(self, block, numBlankSlots):
self.numBlankSlots = numBlankSlots
self.block = block
@classmethod
def decoder(class_, elemClass, numBlankSlots, dataName):
"""Factory for element type"""
def decode(pairs):
if dataName not in pairs:
print 'FrameColours: no event data called:', dataName, \
'in:', pairs
return class_([Colour(colours.errorColour)])
else:
parsed = parse.list_parser(pairs[dataName])
return class_(special_view_decoder(elemClass)
(parsed[0][0]), numBlankSlots)
return decode
def elems(self):
return [self.block]
def to_striped_block(self, select):
return ([[self.block.to_striped_block(select)]] +
(self.numBlankSlots * [[[colours.backgroundColour]]]))
special_state_colours = {
'U': colours.unknownColour,
'B': colours.blockedColour,
'-': colours.bubbleColour,
'': colours.emptySlotColour,
'E': colours.emptySlotColour,
'R': colours.reservedSlotColour,
'X': colours.errorColour,
'F': colours.faultColour,
'r': colours.readColour,
'w': colours.writeColour
}
special_state_names = {
'U': '(U)nknown',
'B': '(B)locked',
'-': '(-)Bubble',
'': '()Empty',
'E': '(E)mpty',
'R': '(R)eserved',
'X': '(X)Error',
'F': '(F)ault',
'r': '(r)ead',
'w': '(w)rite'
}
special_state_chars = special_state_colours.keys()
# The complete set of available block data types
decoder_element_classes = {
'insts': Id,
'lines': Id,
'branch': Branch,
'dcache': DcacheAccess,
'counts': Counts
}
indexed_decoder_element_classes = {
'indexedCounts' : Counts
}
def find_colour_decoder(stripSpace, decoderName, dataName, picPairs):
"""Make a colour decoder from some picture file blob attributes"""
if decoderName == 'frame':
return FrameColours.decoder(Counts, stripSpace, dataName)
elif decoderName in decoder_element_classes:
return TwoDColours.decoder(decoder_element_classes[decoderName],
dataName)
elif decoderName in indexed_decoder_element_classes:
return TwoDColours.indexed_decoder(
indexed_decoder_element_classes[decoderName], dataName, picPairs)
else:
return None
class IdedObj(object):
"""An object identified by an Id carrying paired data.
The super class for Inst and Line"""
def __init__(self, id, pairs={}):
self.id = id
self.pairs = pairs
def __cmp__(self, right):
return cmp(self.id, right.id)
def table_line(self):
"""Represent the object as a list of table row data"""
return []
# FIXME, add a table column titles?
def __repr__(self):
return ' '.join(self.table_line())
class Inst(IdedObj):
"""A non-fault instruction"""
def __init__(self, id, disassembly, addr, pairs={}):
super(Inst,self).__init__(id, pairs)
if 'nextAddr' in pairs:
self.nextAddr = int(pairs['nextAddr'], 0)
del pairs['nextAddr']
else:
self.nextAddr = None
self.disassembly = disassembly
self.addr = addr
def table_line(self):
if self.nextAddr is not None:
addrStr = '0x%x->0x%x' % (self.addr, self.nextAddr)
else:
addrStr = '0x%x' % self.addr
ret = [addrStr, self.disassembly]
for name, value in self.pairs.iteritems():
ret.append("%s=%s" % (name, str(value)))
return ret
class InstFault(IdedObj):
"""A fault instruction"""
def __init__(self, id, fault, addr, pairs={}):
super(InstFault,self).__init__(id, pairs)
self.fault = fault
self.addr = addr
def table_line(self):
ret = ["0x%x" % self.addr, self.fault]
for name, value in self.pairs:
ret.append("%s=%s", name, str(value))
return ret
class Line(IdedObj):
"""A fetched line"""
def __init__(self, id, vaddr, paddr, size, pairs={}):
super(Line,self).__init__(id, pairs)
self.vaddr = vaddr
self.paddr = paddr
self.size = size
def table_line(self):
ret = ["0x%x/0x%x" % (self.vaddr, self.paddr), "%d" % self.size]
for name, value in self.pairs:
ret.append("%s=%s", name, str(value))
return ret
class LineFault(IdedObj):
"""A faulting line"""
def __init__(self, id, fault, vaddr, pairs={}):
super(LineFault,self).__init__(id, pairs)
self.vaddr = vaddr
self.fault = fault
def table_line(self):
ret = ["0x%x" % self.vaddr, self.fault]
for name, value in self.pairs:
ret.append("%s=%s", name, str(value))
return ret
class BlobEvent(object):
"""Time event for a single blob"""
def __init__(self, unit, time, pairs = {}):
# blob's unit name
self.unit = unit
self.time = time
# dict of picChar (blob name) to visual data
self.visuals = {}
# Miscellaneous unparsed MinorTrace line data
self.pairs = pairs
# Non-MinorTrace debug printout for this unit at this time
self.comments = []
def find_ided_objects(self, model, picChar, includeInstLines):
"""Find instructions/lines mentioned in the blob's event
data"""
ret = []
if picChar in self.visuals:
blocks = self.visuals[picChar].elems()
def find_inst(data):
instId = data.get_inst()
lineId = data.get_line()
if instId is not None:
inst = model.find_inst(instId)
line = model.find_line(instId)
if inst is not None:
ret.append(inst)
if includeInstLines and line is not None:
ret.append(line)
elif lineId is not None:
line = model.find_line(lineId)
if line is not None:
ret.append(line)
map(find_inst, blocks)
return sorted(ret)
class BlobModel(object):
"""Model bringing together blob definitions and parsed events"""
def __init__(self, unitNamePrefix=''):
self.blobs = []
self.unitNameToBlobs = {}
self.unitEvents = {}
self.clear_events()
self.picSize = Point(20,10)
self.lastTime = 0
self.unitNamePrefix = unitNamePrefix
def clear_events(self):
"""Drop all events and times"""
self.lastTime = 0
self.times = []
self.insts = {}
self.lines = {}
self.numEvents = 0
for unit, events in self.unitEvents.iteritems():
self.unitEvents[unit] = []
def add_blob(self, blob):
"""Add a parsed blob to the model"""
self.blobs.append(blob)
if blob.unit not in self.unitNameToBlobs:
self.unitNameToBlobs[blob.unit] = []
self.unitNameToBlobs[blob.unit].append(blob)
def add_inst(self, inst):
"""Add a MinorInst instruction definition to the model"""
# Is this a non micro-op instruction. Microops (usually) get their
# fetchSeqNum == 0 varient stored first
macroop_key = (inst.id.fetchSeqNum, 0)
full_key = (inst.id.fetchSeqNum, inst.id.execSeqNum)
if inst.id.execSeqNum != 0 and macroop_key not in self.insts:
self.insts[macroop_key] = inst
self.insts[full_key] = inst
def find_inst(self, id):
"""Find an instruction either as a microop or macroop"""
macroop_key = (id.fetchSeqNum, 0)
full_key = (id.fetchSeqNum, id.execSeqNum)
if full_key in self.insts:
return self.insts[full_key]
elif macroop_key in self.insts:
return self.insts[macroop_key]
else:
return None
def add_line(self, line):
"""Add a MinorLine line to the model"""
self.lines[line.id.lineSeqNum] = line
def add_unit_event(self, event):
"""Add a single event to the model. This must be an event at a
time >= the current maximum time"""
if event.unit in self.unitEvents:
events = self.unitEvents[event.unit]
if len(events) > 0 and events[len(events)-1].time > event.time:
print "Bad event ordering"
events.append(event)
self.numEvents += 1
self.lastTime = max(self.lastTime, event.time)
def extract_times(self):
"""Extract a list of all the times from the seen events. Call after
reading events to give a safe index list to use for time indices"""
times = {}
for unitEvents in self.unitEvents.itervalues():
for event in unitEvents:
times[event.time] = 1
self.times = times.keys()
self.times.sort()
def find_line(self, id):
"""Find a line by id"""
key = id.lineSeqNum
return self.lines.get(key, None)
def find_event_bisection(self, unit, time, events,
lower_index, upper_index):
"""Find an event by binary search on time indices"""
while lower_index <= upper_index:
pivot = (upper_index + lower_index) / 2
pivotEvent = events[pivot]
event_equal = (pivotEvent.time == time or
(pivotEvent.time < time and
(pivot == len(events) - 1 or
events[pivot + 1].time > time)))
if event_equal:
return pivotEvent
elif time > pivotEvent.time:
if pivot == upper_index:
return None
else:
lower_index = pivot + 1
elif time < pivotEvent.time:
if pivot == lower_index:
return None
else:
upper_index = pivot - 1
else:
return None
return None
def find_unit_event_by_time(self, unit, time):
"""Find the last event for the given unit at time <= time"""
if unit in self.unitEvents:
events = self.unitEvents[unit]
ret = self.find_event_bisection(unit, time, events,
0, len(events)-1)
return ret
else:
return None
def find_time_index(self, time):
"""Find a time index close to the given time (where
times[return] <= time and times[return+1] > time"""
ret = 0
lastIndex = len(self.times) - 1
while ret < lastIndex and self.times[ret + 1] <= time:
ret += 1
return ret
def add_minor_inst(self, rest):
"""Parse and add a MinorInst line to the model"""
pairs = parse.parse_pairs(rest)
other_pairs = dict(pairs)
id = Id().from_string(pairs['id'])
del other_pairs['id']
addr = int(pairs['addr'], 0)
del other_pairs['addr']
if 'inst' in other_pairs:
del other_pairs['inst']
# Collapse unnecessary spaces in disassembly
disassembly = re.sub(' *', ' ',
re.sub('^ *', '', pairs['inst']))
inst = Inst(id, disassembly, addr, other_pairs)
self.add_inst(inst)
elif 'fault' in other_pairs:
del other_pairs['fault']
inst = InstFault(id, pairs['fault'], addr, other_pairs)
self.add_inst(inst)
def add_minor_line(self, rest):
"""Parse and add a MinorLine line to the model"""
pairs = parse.parse_pairs(rest)
other_pairs = dict(pairs)
id = Id().from_string(pairs['id'])
del other_pairs['id']
vaddr = int(pairs['vaddr'], 0)
del other_pairs['vaddr']
if 'paddr' in other_pairs:
del other_pairs['paddr']
del other_pairs['size']
paddr = int(pairs['paddr'], 0)
size = int(pairs['size'], 0)
self.add_line(Line(id,
vaddr, paddr, size, other_pairs))
elif 'fault' in other_pairs:
del other_pairs['fault']
self.add_line(LineFault(id, pairs['fault'], vaddr, other_pairs))
def load_events(self, file, startTime=0, endTime=None):
"""Load an event file and add everything to this model"""
def update_comments(comments, time):
# Add a list of comments to an existing event, if there is one at
# the given time, or create a new, correctly-timed, event from
# the last event and attach the comments to that
for commentUnit, commentRest in comments:
event = self.find_unit_event_by_time(commentUnit, time)
# Find an event to which this comment can be attached
if event is None:
# No older event, make a new empty one
event = BlobEvent(commentUnit, time, {})
self.add_unit_event(event)
elif event.time != time:
# Copy the old event and make a new one with the right
# time and comment
newEvent = BlobEvent(commentUnit, time, event.pairs)
newEvent.visuals = dict(event.visuals)
event = newEvent
self.add_unit_event(event)
event.comments.append(commentRest)
self.clear_events()
# A negative time will *always* be different from an event time
time = -1
time_events = {}
last_time_lines = {}
minor_trace_line_count = 0
comments = []
default_colour = [[colours.unknownColour]]
next_progress_print_event_count = 1000
if not os.access(file, os.R_OK):
print 'Can\'t open file', file
exit(1)
else:
print 'Opening file', file
f = open(file)
start_wall_time = wall_time()
# Skip leading events
still_skipping = True
l = f.readline()
while l and still_skipping:
match = re.match('^\s*(\d+):', l)
if match is not None:
event_time = match.groups()
if int(event_time[0]) >= startTime:
still_skipping = False
else:
l = f.readline()
else:
l = f.readline()
match_line_re = re.compile(
'^\s*(\d+):\s*([\w\.]+):\s*(Minor\w+:)?\s*(.*)$')
# Parse each line of the events file, accumulating comments to be
# attached to MinorTrace events when the time changes
reached_end_time = False
while not reached_end_time and l:
match = match_line_re.match(l)
if match is not None:
event_time, unit, line_type, rest = match.groups()
event_time = int(event_time)
unit = re.sub('^' + self.unitNamePrefix + '\.?(.*)$',
'\\1', unit)
# When the time changes, resolve comments
if event_time != time:
if self.numEvents > next_progress_print_event_count:
print ('Parsed to time: %d' % event_time)
next_progress_print_event_count = (
self.numEvents + 1000)
update_comments(comments, time)
comments = []
time = event_time
if line_type is None:
# Treat this line as just a 'comment'
comments.append((unit, rest))
elif line_type == 'MinorTrace:':
minor_trace_line_count += 1
# Only insert this event if it's not the same as
# the last event we saw for this unit
if last_time_lines.get(unit, None) != rest:
event = BlobEvent(unit, event_time, {})
pairs = parse.parse_pairs(rest)
event.pairs = pairs
# Try to decode the colour data for this event
blobs = self.unitNameToBlobs.get(unit, [])
for blob in blobs:
if blob.visualDecoder is not None:
event.visuals[blob.picChar] = (
blob.visualDecoder(pairs))
self.add_unit_event(event)
last_time_lines[unit] = rest
elif line_type == 'MinorInst:':
self.add_minor_inst(rest)
elif line_type == 'MinorLine:':
self.add_minor_line(rest)
if endTime is not None and time > endTime:
reached_end_time = True
l = f.readline()
update_comments(comments, time)
self.extract_times()
f.close()
end_wall_time = wall_time()
print 'Total events:', minor_trace_line_count, 'unique events:', \
self.numEvents
print 'Time to parse:', end_wall_time - start_wall_time
def add_blob_picture(self, offset, pic, nameDict):
"""Add a parsed ASCII-art pipeline markup to the model"""
pic_width = 0
for line in pic:
pic_width = max(pic_width, len(line))
pic_height = len(pic)
# Number of horizontal characters per 'pixel'. Should be 2
charsPerPixel = 2
# Clean up pic_width to a multiple of charsPerPixel
pic_width = (pic_width + charsPerPixel - 1) // 2
self.picSize = Point(pic_width, pic_height)
def pic_at(point):
"""Return the char pair at the given point.
Returns None for characters off the picture"""
x, y = point.to_pair()
x *= 2
if y >= len(pic) or x >= len(pic[y]):
return None
else:
return pic[y][x:x + charsPerPixel]
def clear_pic_at(point):
"""Clear the chars at point so we don't trip over them again"""
line = pic[point.y]
x = point.x * charsPerPixel
pic[point.y] = line[0:x] + (' ' * charsPerPixel) + \
line[x + charsPerPixel:]
def skip_same_char(start, increment):
"""Skip characters which match pic_at(start)"""
char = pic_at(start)
hunt = start
while pic_at(hunt) == char:
hunt += increment
return hunt
def find_size(start):
"""Find the size of a rectangle with top left hand corner at
start consisting of (at least) a -. shaped corner describing
the top right corner of a rectangle of the same char"""
char = pic_at(start)
hunt_x = skip_same_char(start, Point(1,0))
hunt_y = skip_same_char(start, Point(0,1))
off_bottom_right = (hunt_x * Point(1,0)) + (hunt_y * Point(0,1))
return off_bottom_right - start
def point_return(point):
"""Carriage return, line feed"""
return Point(0, point.y + 1)
def find_arrow(start):
"""Find a simple 1-char wide arrow"""
def body(endChar, contChar, direc):
arrow_point = start
arrow_point += Point(0, 1)
clear_pic_at(start)
while pic_at(arrow_point) == contChar:
clear_pic_at(arrow_point)
arrow_point += Point(0, 1)
if pic_at(arrow_point) == endChar:
clear_pic_at(arrow_point)
self.add_blob(blobs.Arrow('_', start + offset,
direc = direc,
size = (Point(1, 1) + arrow_point - start)))
else:
print 'Bad arrow', start
char = pic_at(start)
if char == '-\\':
body('-/', ' :', 'right')
elif char == '/-':
body('\\-', ': ', 'left')
blank_chars = [' ', ' :', ': ']
# Traverse the picture left to right, top to bottom to find blobs
seen_dict = {}
point = Point(0,0)
while pic_at(point) is not None:
while pic_at(point) is not None:
char = pic_at(point)
if char == '->':
self.add_blob(blobs.Arrow('_', point + offset,
direc = 'right'))
elif char == '<-':
self.add_blob(blobs.Arrow('_', point + offset,
direc = 'left'))
elif char == '-\\' or char == '/-':
find_arrow(point)
elif char in blank_chars:
pass
else:
if char not in seen_dict:
size = find_size(point)
topLeft = point + offset
if char not in nameDict:
# Unnamed blobs
self.add_blob(blobs.Block(char,
nameDict.get(char, '_'),
topLeft, size = size))
else:
# Named blobs, set visual info.
blob = nameDict[char]
blob.size = size
blob.topLeft = topLeft
self.add_blob(blob)
seen_dict[char] = True
point = skip_same_char(point, Point(1,0))
point = point_return(point)
def load_picture(self, filename):
"""Load a picture file into the model"""
def parse_blob_description(char, unit, macros, pairsList):
# Parse the name value pairs in a blob-describing line
def expand_macros(pairs, newPairs):
# Recursively expand macros
for name, value in newPairs:
if name in macros:
expand_macros(pairs, macros[name])
else:
pairs[name] = value
return pairs
pairs = expand_macros({}, pairsList)
ret = None
typ = pairs.get('type', 'block')
colour = colours.name_to_colour(pairs.get('colour', 'black'))
if typ == 'key':
ret = blobs.Key(char, unit, Point(0,0), colour)
elif typ == 'block':
ret = blobs.Block(char, unit, Point(0,0), colour)
else:
print "Bad picture blog type:", typ
if 'hideId' in pairs:
hide = pairs['hideId']
ret.dataSelect.ids -= set(hide)
if typ == 'block':
ret.displayName = pairs.get('name', unit)
ret.nameLoc = pairs.get('nameLoc', 'top')
ret.shape = pairs.get('shape', 'box')
ret.stripDir = pairs.get('stripDir', 'horiz')
ret.stripOrd = pairs.get('stripOrd', 'LR')
ret.blankStrips = int(pairs.get('blankStrips', '0'))
ret.shorten = int(pairs.get('shorten', '0'))
if 'decoder' in pairs:
decoderName = pairs['decoder']
dataElement = pairs.get('dataElement', decoderName)
decoder = find_colour_decoder(ret.blankStrips,
decoderName, dataElement, pairs)
if decoder is not None:
ret.visualDecoder = decoder
else:
print 'Bad visualDecoder requested:', decoderName
if 'border' in pairs:
border = pairs['border']
if border == 'thin':
ret.border = 0.2
elif border == 'mid':
ret.border = 0.5
else:
ret.border = 1.0
elif typ == 'key':
ret.colours = pairs.get('colours', ret.colours)
return ret
def line_is_comment(line):
"""Returns true if a line starts with #, returns False
for lines which are None"""
return line is not None \
and re.match('^\s*#', line) is not None
def get_line(f):
"""Get a line from file f extending that line if it ends in
'\' and dropping lines that start with '#'s"""
ret = f.readline()
# Discard comment lines
while line_is_comment(ret):
ret = f.readline()
if ret is not None:
extend_match = re.match('^(.*)\\\\$', ret)
while extend_match is not None:
new_line = f.readline()
if new_line is not None and not line_is_comment(new_line):
line_wo_backslash, = extend_match.groups()
ret = line_wo_backslash + new_line
extend_match = re.match('^(.*)\\\\$', ret)
else:
extend_match = None
return ret
# Macros are recursively expanded into name=value pairs
macros = {}
if not os.access(filename, os.R_OK):
print 'Can\'t open file', filename
exit(1)
else:
print 'Opening file', filename
f = open(filename)
l = get_line(f)
picture = []
blob_char_dict = {}
self.unitEvents = {}
self.clear_events()
# Actually parse the file
in_picture = False
while l:
l = parse.remove_trailing_ws(l)
l = re.sub('#.*', '', l)
if re.match("^\s*$", l) is not None:
pass
elif l == '<<<':
in_picture = True
elif l == '>>>':
in_picture = False
elif in_picture:
picture.append(re.sub('\s*$', '', l))
else:
line_match = re.match(
'^([a-zA-Z0-9][a-zA-Z0-9]):\s+([\w.]+)\s*(.*)', l)
macro_match = re.match('macro\s+(\w+):(.*)', l)
if macro_match is not None:
name, defn = macro_match.groups()
macros[name] = parse.parse_pairs_list(defn)
elif line_match is not None:
char, unit, pairs = line_match.groups()
blob = parse_blob_description(char, unit, macros,
parse.parse_pairs_list(pairs))
blob_char_dict[char] = blob
# Setup the events structure
self.unitEvents[unit] = []
else:
print 'Problem with Blob line:', l
l = get_line(f)
self.blobs = []
self.add_blob_picture(Point(0,1), picture, blob_char_dict)
|
|
"""
This module contains functions that does convertion of code and equations to
PDF and png formats
"""
from __future__ import print_function
import os
import sys
import hashlib
import errno
import shutil
from xml.sax.saxutils import unescape
from lxml import etree
from termcolor import colored
from . import pstikz2png
from .pstikz2png import LatexPictureError
from . import utils
def get_code_hash(codetext):
'''
Calculate the hash and output path for a given image, given the code as
string
'''
codetext = ''.join([c for c in codetext if ord(c) < 128])
code_hash = hashlib.md5(
''.join(codetext.encode('utf-8').split())).hexdigest()
return code_hash
def cleanup_after_latex(figpath):
''' clean up after the image generation
'''
tmpdir = os.path.dirname(figpath)
try:
shutil.rmtree(tmpdir)
except OSError as exc:
if exc.errno != errno.ENOENT: # ENOENT - no such file or directory
raise # re-raise exception
def run_latex(data):
''' Run the image generation for pstricks and tikz images
'''
pictype, codehash, codetext = data
# copy to local image cache in .bookbuilder/images
image_cache_path = os.path.join('.bookbuilder',
pictype,
codehash+'.png')
rendered = False
# skip image generation if it exists
if os.path.exists(image_cache_path):
rendered = True
sys.stdout.write('s')
if not rendered:
sys.stdout.write('.')
# send this object to pstikz2png
try:
if pictype == 'pspicture':
figpath = pstikz2png.pspicture2png(codetext, iDpi=150)
elif pictype == 'tikzpicture':
figpath = pstikz2png.tikzpicture2png(codetext, iDpi=150)
elif pictype == 'equation':
figpath = pstikz2png.equation2png(codetext, iDpi=150)
except LatexPictureError as lpe:
print(colored("\nLaTeX failure", "red"))
print(unicode(lpe))
return None
if figpath:
# done. copy to image cache
utils.copy_if_newer(figpath, image_cache_path)
# copy the pdf also but run pdfcrop first
utils.copy_if_newer(figpath.replace('.png', '.pdf'),
image_cache_path.replace('.png', '.pdf'))
cleanup_after_latex(figpath)
else:
figpath = image_cache_path
sys.stdout.flush()
return image_cache_path
def _render_html_images(html, output_path, parallel=True):
''' Given etree object of the html file, render images and change the
DOM tp have image links. Returns Etree object
'''
valid = True
# find all the pspicture and tikz elements
pspics = [p for p in html.findall('.//pre[@class="pspicture"]')]
tikzpics = [p for p in html.findall('.//pre[@class="tikzpicture"]')]
allpics = pspics + tikzpics
# create a data list for the Pool map to work on
pooldata = []
for pre in allpics:
pictype = pre.attrib['class']
# find the hash of the code content
codetext = pre.find('.//code').text
code_hash = get_code_hash(codetext)
# see if the output png exists at
# build/html/pspictures/hash.png OR
# build/html/tikzpictures/hash.png
pooldata.append((pictype, code_hash, codetext))
if pooldata:
# call parallel map
image_cache_paths = utils.Map(run_latex, pooldata, parallel=False)
for (pre, pooldata, icp) in zip(allpics, pooldata, image_cache_paths):
image_cache_path = icp
if not image_cache_path:
valid = False
continue
pictype, code_hash, codetext = pooldata
pngpath = os.path.join(os.path.dirname(output_path), pictype,
code_hash+'.png')
utils.copy_if_newer(image_cache_path, pngpath)
# replace div.alternate with <img>
parent = pre.getparent().getparent()
img = etree.Element('img')
img.attrib['src'] = os.path.join(pictype, code_hash+'.png')
img.attrib['alt'] = code_hash + '.png'
parent.replace(pre.getparent(), img)
# figure.remove(pre.getparent())
return html, valid
def _render_tex_images(tex, output_path, parallel=True):
'''
Given TeX file as string, find pstricks and tikz images and generate
the PDF version, include in file as graphics, return as string
'''
valid = True
environments = ['pspicture', 'tikzpicture']
for pictype in environments:
texsplit = tex.split(r'\begin{{{env}}}'.format(env=pictype))
pooldata = []
for i, chunk in enumerate(texsplit[1:]):
env_end = chunk.find(r'\end{{{env}}}'.format(env=pictype))
# get code text and hash
codetext = chunk[0:env_end]
code_hash = get_code_hash(codetext)
pooldata.append((pictype, code_hash, codetext))
if pooldata:
# call parallel map
image_cache_paths = utils.Map(run_latex, pooldata,
parallel=parallel)
for i, (chunk, pooldata, icp) in enumerate(zip(texsplit[1:],
pooldata,
image_cache_paths)):
if not icp:
valid = False
# Image was not valid
continue
pictype, code_hash, codetext = pooldata
env_end = chunk.find(r'\end{{{env}}}'.format(env=pictype))
image_cache_path = icp
# place where image will go.
pdfpath = os.path.join(os.path.dirname(output_path),
pictype, code_hash+'.pdf')
# This returns the png path
pdf_cache_path = image_cache_path.replace('.png', '.pdf')
# copy generated pdf to tex folder.
utils.copy_if_newer(pdf_cache_path, pdfpath)
# replace environment with \includegraphics
newenv = \
r'\includegraphics{{{f}}}'.format(
f=os.path.join(pictype,
code_hash + '.pdf'))
endlength = len(r'\end{{{env}}}'.format(env=pictype))
texsplit[i+1] = newenv + chunk[env_end + endlength:]
tex = ''.join(texsplit)
return tex, valid
def _render_mobile_images(html, output_path, parallel=True):
'''
Given HTML file as string, equations and generate png images for them.
'''
valid = True
pictype = 'equation'
environments = [(r'\(', r'\)'),
(r'\[', r'\]'),
(r'\begin{align*}', r'\end{align*}')]
for (env_start, env_end) in environments:
htmlsplit = html.split(env_start)
pooldata = []
for i, chunk in enumerate(htmlsplit[1:]):
# This finds stuff like
#
# \[ blah \\[8pt] \]
# ^ this is seen as start for new equation
# must avoid that corner case
env_end_pos = chunk.find(env_end)
# get code text and hash
codetext = chunk[0:env_end_pos]
code_hash = get_code_hash(codetext)
# unescape the code for latex generation
codetext = unescape(codetext)
pooldata.append((pictype,
code_hash,
env_start + codetext + env_end))
if pooldata:
# call parallel map
image_cache_paths = utils.Map(run_latex, pooldata,
parallel=parallel)
for i, (chunk, pooldata, icp) in enumerate(zip(htmlsplit[1:],
pooldata,
image_cache_paths)):
if not icp:
valid = False
pictype, code_hash, codetext = pooldata
env_end_pos = chunk.find(env_end)
image_cache_path = icp
# place where image will go.
pngpath = os.path.join(os.path.dirname(output_path),
pictype, code_hash+'.png')
# This returns the png path
# copy generated pdf to tex folder.
utils.copy_if_newer(image_cache_path, pngpath)
# replace environment with img tag
# must specify block or inline
if env_start == r'\(':
imgclass = 'math-inline'
else:
imgclass = 'math-block'
newenv = \
r'<img class="{imgclass}" src="{f}"/>'.format(
imgclass=imgclass,
f=os.path.join(pictype,
code_hash + '.png'))
endlength = len(env_end)
htmlsplit[i+1] = newenv + chunk[env_end_pos + endlength:]
html = ''.join(htmlsplit)
return html, valid
def render_images(output_path, parallel=True):
''' Given an output path, find all the tikz and pstricks images and render
them as pdf and png. This function act as delegator for the pstikz2png
module
'''
#
# html, xhtml and mobile output
#
if output_path.endswith('html'):
with open(output_path, 'r') as htmlout:
html = etree.HTML(htmlout.read().decode('utf-8'))
html, valid = _render_html_images(html, output_path, parallel=parallel)
with open(output_path, 'w') as htmlout:
htmlout.write(etree.tostring(html, method='xml').encode('utf-8'))
#
# TeX output
#
if output_path.endswith(".tex"):
with open(output_path, 'r') as texout:
tex = texout.read()
tex, valid = _render_tex_images(tex, output_path, parallel=parallel)
with open(output_path, 'w') as texout:
texout.write(tex)
#
# Mobile html, equations need to be rendered to images
#
if r'/mobile/' in output_path:
with open(output_path, 'r') as htmlout:
html = htmlout.read()
html, valid = _render_mobile_images(html, output_path,
parallel=parallel)
with open(output_path, 'w') as htmlout:
htmlout.write(html)
sys.stdout.write('\n')
sys.stdout.flush()
return valid
|
|
from test import support
from test.support import import_helper
support.requires('audio')
from test.support import findfile
ossaudiodev = import_helper.import_module('ossaudiodev')
import errno
import sys
import sunau
import time
import audioop
import unittest
# Arggh, AFMT_S16_NE not defined on all platforms -- seems to be a
# fairly recent addition to OSS.
try:
from ossaudiodev import AFMT_S16_NE
except ImportError:
if sys.byteorder == "little":
AFMT_S16_NE = ossaudiodev.AFMT_S16_LE
else:
AFMT_S16_NE = ossaudiodev.AFMT_S16_BE
def read_sound_file(path):
with open(path, 'rb') as fp:
au = sunau.open(fp)
rate = au.getframerate()
nchannels = au.getnchannels()
encoding = au._encoding
fp.seek(0)
data = fp.read()
if encoding != sunau.AUDIO_FILE_ENCODING_MULAW_8:
raise RuntimeError("Expect .au file with 8-bit mu-law samples")
# Convert the data to 16-bit signed.
data = audioop.ulaw2lin(data, 2)
return (data, rate, 16, nchannels)
class OSSAudioDevTests(unittest.TestCase):
def play_sound_file(self, data, rate, ssize, nchannels):
try:
dsp = ossaudiodev.open('w')
except OSError as msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT,
errno.ENODEV, errno.EBUSY):
raise unittest.SkipTest(msg)
raise
# at least check that these methods can be invoked
dsp.bufsize()
dsp.obufcount()
dsp.obuffree()
dsp.getptr()
dsp.fileno()
# Make sure the read-only attributes work.
self.assertFalse(dsp.closed)
self.assertEqual(dsp.name, "/dev/dsp")
self.assertEqual(dsp.mode, "w", "bad dsp.mode: %r" % dsp.mode)
# And make sure they're really read-only.
for attr in ('closed', 'name', 'mode'):
try:
setattr(dsp, attr, 42)
except (TypeError, AttributeError):
pass
else:
self.fail("dsp.%s not read-only" % attr)
# Compute expected running time of sound sample (in seconds).
expected_time = float(len(data)) / (ssize/8) / nchannels / rate
# set parameters based on .au file headers
dsp.setparameters(AFMT_S16_NE, nchannels, rate)
self.assertTrue(abs(expected_time - 3.51) < 1e-2, expected_time)
t1 = time.monotonic()
dsp.write(data)
dsp.close()
t2 = time.monotonic()
elapsed_time = t2 - t1
percent_diff = (abs(elapsed_time - expected_time) / expected_time) * 100
self.assertTrue(percent_diff <= 10.0,
"elapsed time (%s) > 10%% off of expected time (%s)" %
(elapsed_time, expected_time))
def set_parameters(self, dsp):
# Two configurations for testing:
# config1 (8-bit, mono, 8 kHz) should work on even the most
# ancient and crufty sound card, but maybe not on special-
# purpose high-end hardware
# config2 (16-bit, stereo, 44.1kHz) should work on all but the
# most ancient and crufty hardware
config1 = (ossaudiodev.AFMT_U8, 1, 8000)
config2 = (AFMT_S16_NE, 2, 44100)
for config in [config1, config2]:
(fmt, channels, rate) = config
if (dsp.setfmt(fmt) == fmt and
dsp.channels(channels) == channels and
dsp.speed(rate) == rate):
break
else:
raise RuntimeError("unable to set audio sampling parameters: "
"you must have really weird audio hardware")
# setparameters() should be able to set this configuration in
# either strict or non-strict mode.
result = dsp.setparameters(fmt, channels, rate, False)
self.assertEqual(result, (fmt, channels, rate),
"setparameters%r: returned %r" % (config, result))
result = dsp.setparameters(fmt, channels, rate, True)
self.assertEqual(result, (fmt, channels, rate),
"setparameters%r: returned %r" % (config, result))
def set_bad_parameters(self, dsp):
# Now try some configurations that are presumably bogus: eg. 300
# channels currently exceeds even Hollywood's ambitions, and
# negative sampling rate is utter nonsense. setparameters() should
# accept these in non-strict mode, returning something other than
# was requested, but should barf in strict mode.
fmt = AFMT_S16_NE
rate = 44100
channels = 2
for config in [(fmt, 300, rate), # ridiculous nchannels
(fmt, -5, rate), # impossible nchannels
(fmt, channels, -50), # impossible rate
]:
(fmt, channels, rate) = config
result = dsp.setparameters(fmt, channels, rate, False)
self.assertNotEqual(result, config,
"unexpectedly got requested configuration")
try:
result = dsp.setparameters(fmt, channels, rate, True)
except ossaudiodev.OSSAudioError as err:
pass
else:
self.fail("expected OSSAudioError")
def test_playback(self):
sound_info = read_sound_file(findfile('audiotest.au'))
self.play_sound_file(*sound_info)
def test_set_parameters(self):
dsp = ossaudiodev.open("w")
try:
self.set_parameters(dsp)
# Disabled because it fails under Linux 2.6 with ALSA's OSS
# emulation layer.
#self.set_bad_parameters(dsp)
finally:
dsp.close()
self.assertTrue(dsp.closed)
def test_mixer_methods(self):
# Issue #8139: ossaudiodev didn't initialize its types properly,
# therefore some methods were unavailable.
with ossaudiodev.openmixer() as mixer:
self.assertGreaterEqual(mixer.fileno(), 0)
def test_with(self):
with ossaudiodev.open('w') as dsp:
pass
self.assertTrue(dsp.closed)
def test_on_closed(self):
dsp = ossaudiodev.open('w')
dsp.close()
self.assertRaises(ValueError, dsp.fileno)
self.assertRaises(ValueError, dsp.read, 1)
self.assertRaises(ValueError, dsp.write, b'x')
self.assertRaises(ValueError, dsp.writeall, b'x')
self.assertRaises(ValueError, dsp.bufsize)
self.assertRaises(ValueError, dsp.obufcount)
self.assertRaises(ValueError, dsp.obufcount)
self.assertRaises(ValueError, dsp.obuffree)
self.assertRaises(ValueError, dsp.getptr)
mixer = ossaudiodev.openmixer()
mixer.close()
self.assertRaises(ValueError, mixer.fileno)
def test_main():
try:
dsp = ossaudiodev.open('w')
except (ossaudiodev.error, OSError) as msg:
if msg.args[0] in (errno.EACCES, errno.ENOENT,
errno.ENODEV, errno.EBUSY):
raise unittest.SkipTest(msg)
raise
dsp.close()
support.run_unittest(__name__)
if __name__ == "__main__":
test_main()
|
|
"""
Copyright (c) 2004-Present Pivotal Software, Inc.
This program and the accompanying materials are made available under
the terms of the under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import new
import traceback
import gc
from multiprocessing import Pool, Queue
import tinctest
from tinctest import TINCTestCase
import unittest2 as unittest
from unittest import TestResult
def remote_test_invoker(module_name, class_name, method_name, proc_name,
dargs=[], dxargs={}, setups = [], cleanups = []):
"""
A wrapper function that will execute a given test method in an external process.
@type module_name: string
@param module_name: Name of the module
@type class_name: string
@param class_name: Name of the class
@type method_name: string
@param method_name: Name of the test method
@type proc_name: string
@type proc_name: Name of the process that will be used in the logs
@type setups: list
@type setups: A list of (function, args, kwargs) tuple that will be executed as setups
@type cleanups: list
@type cleanups: A list of (function, args, kwargs) tuple that will be executed as cleanups
after the test execution
"""
tinctest.logger.info("Started remote test : %s - %s.%s.%s" % (proc_name, module_name, class_name, method_name) )
try:
full_class_path = module_name + '.' + class_name
# Import the class
parts = full_class_path.split('.')
module = ".".join(parts[:-1])
klass = __import__( module )
for comp in parts[1:]:
klass = getattr(klass, comp)
test_klass_instance = klass(method_name)
# Execute all setups
while setups:
function, args, kwargs = setups.pop(0)
try:
setup_method = getattr(test_klass_instance, function)
tinctest.logger.debug("Calling setup_method %s" % setup_method)
setup_method(*args, **kwargs)
except unittest.case.SkipTest, st:
return [proc_name, tinctest._SKIP_TEST_MSG_PREFIX + str(st)]
except Exception, e:
tinctest.logger.exception("Setup failed: %s - %s.%s.%s" % (proc_name, module_name, class_name, method_name))
return [proc_name, "Setup failed: %s" %traceback.format_exc()]
# Execute the test method
try:
testMethod = getattr(test_klass_instance, method_name)
tinctest.logger.debug("Calling testMethod %s" % testMethod)
testMethod(*dargs, **dxargs)
except unittest.case.SkipTest, st:
return [proc_name, tinctest._SKIP_TEST_MSG_PREFIX + str(st)]
except Exception, e:
tinctest.logger.exception("Test failed: %s - %s.%s.%s" % (proc_name, module_name, class_name, method_name))
return [proc_name, "Test failed: %s" %traceback.format_exc()]
# Execute all cleanups in LIFO
while cleanups:
function, args, kwargs = cleanups.pop(-1)
try:
cleanup_method = getattr(test_klass_instance, function)
tinctest.logger.debug("Calling cleanup_method %s" % cleanup_method)
cleanup_method(*args, **kwargs)
except Exception, e:
tinctest.logger.exception("Cleanup failed: %s - %s.%s.%s" % (proc_name, module_name, class_name, method_name))
return [proc_name, "Cleanup failed: %s" %traceback.formact_exc()]
except Exception, e:
tinctest.logger.exception("Error during invocation: %s" %traceback.format_exc())
return [proc_name, "Error during invocation: %s" %traceback.format_exc()]
tinctest.logger.info("Finished remote test : %s - %s.%s.%s" % (proc_name, module_name, class_name, method_name))
return None
@tinctest.skipLoading("Test model. No tests loaded.")
class ConcurrencyTestCase(TINCTestCase):
"""
This model class should not be instantiated directly and should
be extended for adding test methods.
ConcurrencyTestCase provides an implementation where the test method
will be run concurrently based on the metadata 'concurrency'
@metadata: concurrency: number of concurrent executions of the test case (default: 1)
@metadata: iterations: number of times the concurrent executions are run (default: 1)
"""
def __init__(self, methodName="runTest", baseline_result = None):
self.iterations = None
self.concurrency = None
super(ConcurrencyTestCase, self).__init__(methodName)
def _infer_metadata(self):
super(ConcurrencyTestCase, self)._infer_metadata()
self.iterations = int(self._metadata.get('iterations', '1'))
self.concurrency = int(self._metadata.get('concurrency', '1'))
def run(self, result=None, pool = None):
"""
This method is overriden to implement concurrency for a test method. The default
implementation of unittest's run method will just run the test method directly.
In the implementation, we construct a supplementary test method that will run the
actual test method concurrently based on self.concurrency.
In addition, this accepts an optional 'pool' argument which is passed when a ConcurrencyTestCAse
is used within a ScenarioTestCase.
@type result: TINCTextTestResult
@param result: The result object to be used for this test
@type pool: TINCWorkerPool
@param pool: The worker pool to be used to submit external tests. If not provided, a new worker pool will be created.
This is to enable ScenarioTestCase and ConcurrencyTestCase share the same worker pool.
"""
# RB: For ConcurrencyTestCase, we should run the test method for
# 'self.iterations' number of times. So, we create a new instance
# method that runs self._testMethodName the desired number of times
# concurrently using a worker pool of size self.concurrency
# and set self._testMethodName to the new method before calling super.run().
# Note - The test will be reported using the new method instead of the original
# test method. We will re-visit this later.
self._orig_testMethodName = self._testMethodName
worker_pool = pool
def test_function(my_self):
my_class = my_self.__class__.__name__
my_module = my_self.__module__
my_method_name = my_self._orig_testMethodName
for iter in xrange(my_self.iterations):
tinctest.logger.info("Starting iteration# %s of total %s..." % (str(iter + 1), str(my_self.iterations)))
should_wait = True
# TODO - Parameterize maximum pool size
if worker_pool is None:
pool = TINCTestWorkerPool(100)
else:
# This means that this test is being executed through a ScenarioTestCase
# and we should defer inspecting the results to the scenario test case.
pool = worker_pool
should_wait = False
for i in xrange(my_self.concurrency):
proc_prefix = "%s_proc_" %my_self._testMethodName
proc_postfix = "_iter_%s_proc_%s" %(str(iter + 1), str(i + 1))
proc_name = proc_prefix + proc_postfix
# We use 'run_worker' here that will simply call out to the
# super class' run method. ConcurrencyTestCase.run method has
# the logic to create a new test method and we would not want this to be done twice.
pool.submit_test(my_module, my_class, my_method_name, proc_name)
# Wait and inspect only when the concurrency test case is executed by itself.
# Defer result inspection when concurrency test case is executed through
# a scenario test case.
if should_wait:
pool.join()
# Inspect the result_queue for failures or errors
try:
if pool.has_failures():
failure_string = pool.get_failure_string()
failure_index = failure_string.find(" failed execution")
if failure_index != -1:
failure_string = failure_string[:failure_index]
self.fail("Workers encountered errors or failures: %s" % failure_string)
finally:
pool.terminate()
test_method = new.instancemethod(test_function,
self, self.__class__)
self.__dict__[ self._testMethodName + "*"] = test_method
self._testMethodName = self._testMethodName + "*"
super(ConcurrencyTestCase, self).run(result)
class TINCTestWorkerPool(object):
"""
A wrapper around multiprocessing.pool for handling concurrency in TINC. Used from within
ConcurrencyTestCase and ScenarioTestCase
"""
def __init__(self, worker_pool_size = 100):
"""
Initialize a multiprocessing.pool
@param worker_pool_size: Size of the worker pool
@type worker_pool_size: integer
"""
tinctest.logger.info("Initializing worker pool with %d workers" % worker_pool_size)
# The Queue object that will be shared between the current process and the process
# that will run the test method. The external process will update the queue with
# failure information which will be inspected from the runner for failures.
self.result_queue = Queue()
self.skipped_queue = Queue()
self.total_tests = 0
# A list of two-tuples containing the name of the worker tha failed and the traceback
# as a string object from the remote process
self.failed_workers = []
# String containing worker name that failed and complete traceback string for each failed worker
self._failure_info = ''
# String containing worker name that failed and just the failure message from the traceback string for each failed worker
self._brief_failure_info = ''
gc.disable()
self.pool = Pool(worker_pool_size)
gc.enable()
# callback function for each spawned process in the pool.
# the ret parameter is the return value from the process's executor funtion
def remote_test_cb(self, ret):
# keep track of the total number of tests in the future we may need
# to find out if all tests in the concurrency/scenario suite were skipped
# this variable will be helpful to decide that
self.total_tests += 1
if ret:
if ret[1].find(tinctest._SKIP_TEST_MSG_PREFIX) != -1:
self.skipped_queue.put(ret)
else:
self.result_queue.put(ret)
def submit_test(self, module_name, class_name, method_name, proc_name = 'remote_test_process', dargs=[], dxargs={}, setups = [], cleanups = []):
"""
Submit a test case asynchronously for remote execution
@param module_name: Name of the module where the test resides
@type module_name: string
@param class_name: Name of the class where the test resides
@type class_name: string
@param method_name: Name of the test method to be executed remotely through this worker pool
@type method_name: string
@param proc_name: Name to be used for the process that is started for this test submitted
@type proc_name: string
@param dargs: A list of non-keyword arguments to be passed to the submitted test
@type dargs: list
@param dxargs: A dict of keyworkd arguments to be passed to the test while invoking
@type dxargs: dict
@param setups: A list of method names that should be run before the actual test is executed
@type setups: list
@param cleanups: A list of method names that should be run after the actual test is executed
@type cleanups: list
"""
self.pool.apply_async(remote_test_invoker, [module_name, class_name, method_name, proc_name, dargs, dxargs, setups, cleanups], callback=self.remote_test_cb)
def join(self):
"""
Join the worker pool. Will wait till all the tasks in the pool finishes execution
"""
self.pool.close()
self.pool.join()
# Find failed workers
self._find_failed_workers()
def _find_failed_workers(self):
"""
Inspect the result queue that will contain the failed workers and populate self.failed_workers
"""
while not self.result_queue.empty():
tinctest.logger.error("Failures encountered in at least one of the test workers.")
worker_info = self.result_queue.get()
self.failed_workers.append((worker_info[0], worker_info[1]))
def has_failures(self):
"""
Returns True / False depending on whether there are failures in the tasks submitted through this instance
of the pool
@rtype: boolean
@return: True if there are failures in the submitted tasks, False otherwise
"""
return len(self.failed_workers) > 0
def inspect(self):
"""
Inspect the result queue and returns list of workers that failed or errored in a
tuple containing the worker name and the traceback string
@rtype: list of two-tuples
@return:
"""
if self.has_failures():
tinctest.logger.error("Failures encountered in at least one of the test workers.")
def get_failure_string(self):
"""
Return an aggregated failure string for all the tasks submitted through this instance of the worker pool
"""
for failed_worker in self.failed_workers:
self._failure_info += "Worker %s failed execution : \n %s\n" %(failed_worker[0], failed_worker[1])
return self._failure_info
def get_brief_failure_string(self):
"""
Similar to get_failure_string(), however, returns worker names and just the error message from the exception
instead of the whole stack trace
"""
for failed_worker in self.failed_workers:
error_msg = ''
if failed_worker[1] and len(failed_worker[1].split('\n')) >=2:
error_msg = failed_worker[1].split('\n')[-2]
self._brief_failure_info += "Worker %s failed execution: %s\n" %(failed_worker[0], error_msg)
return self._brief_failure_info
def terminate(self):
"""
Termiates the worker pool. Disable gc to avoid hangs
"""
gc.disable()
self.pool.terminate()
gc.enable()
|
|
#!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright (c), 2018-2019, SISSA (International School for Advanced Studies).
# All rights reserved.
# This file is distributed under the terms of the MIT License.
# See the file 'LICENSE' in the root directory of the present
# distribution, or http://opensource.org/licenses/MIT.
#
# @author Davide Brunato <[email protected]>
#
import unittest
import glob
import fileinput
import json
import os
import re
import importlib
import platform
import sys
import decimal
import subprocess
try:
import memory_profiler
except ImportError:
memory_profiler = None
@unittest.skipIf(sys.version_info < (3,), "In Python 2 ElementTree is not overwritten by cElementTree")
class TestElementTree(unittest.TestCase):
def test_element_string_serialization(self):
ElementTree = importlib.import_module('xml.etree.ElementTree')
xmlschema_etree = importlib.import_module('xmlschema.etree')
elem = ElementTree.Element('element')
self.assertEqual(xmlschema_etree.etree_tostring(elem), '<element />')
elem = xmlschema_etree.ElementTree.Element('element')
self.assertEqual(xmlschema_etree.etree_tostring(elem), '<element />')
elem = xmlschema_etree.PyElementTree.Element('element')
self.assertEqual(xmlschema_etree.etree_tostring(elem), '<element />')
def test_import_element_tree_before(self):
ElementTree = importlib.import_module('xml.etree.ElementTree')
xmlschema_etree = importlib.import_module('xmlschema.etree')
self.assertIsNot(ElementTree.Element, ElementTree._Element_Py, msg="cElementTree not available!")
elem = xmlschema_etree.PyElementTree.Element('element')
self.assertEqual(xmlschema_etree.etree_tostring(elem), '<element />')
self.assertIs(importlib.import_module('xml.etree.ElementTree'), ElementTree)
self.assertIs(xmlschema_etree.ElementTree, ElementTree)
def test_import_element_tree_after(self):
xmlschema_etree = importlib.import_module('xmlschema.etree')
ElementTree = importlib.import_module('xml.etree.ElementTree')
self.assertIsNot(ElementTree.Element, ElementTree._Element_Py, msg="cElementTree not available!")
elem = xmlschema_etree.PyElementTree.Element('element')
self.assertEqual(xmlschema_etree.etree_tostring(elem), '<element />')
self.assertIs(importlib.import_module('xml.etree.ElementTree'), ElementTree)
self.assertIs(xmlschema_etree.ElementTree, ElementTree)
def test_element_tree_import_script(self):
test_dir = os.path.dirname(__file__) or '.'
cmd = [os.path.join(test_dir, 'check_etree_import.py')]
process = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = process.stdout.decode('utf-8')
self.assertTrue("\nTest OK:" in output, msg="Wrong import of ElementTree after xmlschema")
cmd.append('--before')
process = subprocess.run(cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
output = process.stdout.decode('utf-8')
self.assertTrue("\nTest OK:" in output, msg="Wrong import of ElementTree before xmlschema")
def test_safe_xml_parser(self):
test_dir = os.path.dirname(__file__) or '.'
xmlschema_etree = importlib.import_module('xmlschema.etree')
parser = xmlschema_etree.SafeXMLParser(target=xmlschema_etree.PyElementTree.TreeBuilder())
PyElementTree = xmlschema_etree.PyElementTree
xml_file = os.path.join(test_dir, 'test_cases/resources/with_entity.xml')
elem = xmlschema_etree.ElementTree.parse(xml_file).getroot()
self.assertEqual(elem.text, 'abc')
self.assertRaises(
PyElementTree.ParseError, xmlschema_etree.ElementTree.parse, xml_file, parser=parser
)
xml_file = os.path.join(test_dir, 'test_cases/resources/unused_external_entity.xml')
elem = xmlschema_etree.ElementTree.parse(xml_file).getroot()
self.assertEqual(elem.text, 'abc')
self.assertRaises(
PyElementTree.ParseError, xmlschema_etree.ElementTree.parse, xml_file, parser=parser
)
xml_file = os.path.join(test_dir, 'test_cases/resources/external_entity.xml')
self.assertRaises(xmlschema_etree.ParseError, xmlschema_etree.ElementTree.parse, xml_file)
self.assertRaises(
PyElementTree.ParseError, xmlschema_etree.ElementTree.parse, xml_file, parser=parser
)
@unittest.skipIf(memory_profiler is None or sys.version_info[:2] != (3, 7), "Test only with Python 3.7")
class TestMemoryUsage(unittest.TestCase):
@staticmethod
def check_memory_profile(output):
"""Check the output of a memory memory profile run on a function."""
mem_usage = []
func_num = 0
for line in output.split('\n'):
parts = line.split()
if 'def' in parts:
func_num += 1
if not parts or not parts[0].isdigit() or len(parts) == 1 \
or not parts[1].replace('.', '').isdigit():
continue
mem_usage.append(decimal.Decimal(parts[1]))
if func_num > 1:
raise ValueError("Cannot the a memory profile output of more than one function!")
return max(v - mem_usage[0] for v in mem_usage[1:])
@unittest.skip
def test_package_memory_usage(self):
test_dir = os.path.dirname(__file__) or '.'
cmd = [os.path.join(test_dir, 'check_memory.py'), '1']
output = subprocess.check_output(cmd, universal_newlines=True)
package_mem = self.check_memory_profile(output)
self.assertLess(package_mem, 20)
def test_element_tree_memory_usage(self):
test_dir = os.path.dirname(__file__) or '.'
xsd10_schema_file = os.path.join(
os.path.dirname(os.path.abspath(test_dir)), 'validators/schemas/XSD_1.0/XMLSchema.xsd'
)
cmd = [os.path.join(test_dir, 'check_memory.py'), '2', xsd10_schema_file]
output = subprocess.check_output(cmd, universal_newlines=True)
parse_mem = self.check_memory_profile(output)
cmd = [os.path.join(test_dir, 'check_memory.py'), '3', xsd10_schema_file]
output = subprocess.check_output(cmd, universal_newlines=True)
iterparse_mem = self.check_memory_profile(output)
cmd = [os.path.join(test_dir, 'check_memory.py'), '4', xsd10_schema_file]
output = subprocess.check_output(cmd, universal_newlines=True)
lazy_iterparse_mem = self.check_memory_profile(output)
self.assertLess(parse_mem, 2)
self.assertLessEqual(lazy_iterparse_mem, parse_mem / 2)
self.assertLessEqual(lazy_iterparse_mem, iterparse_mem)
def test_decode_memory_usage(self):
test_dir = os.path.dirname(__file__) or '.'
xsd10_schema_file = os.path.join(
os.path.dirname(os.path.abspath(test_dir)), 'validators/schemas/XSD_1.0/XMLSchema.xsd'
)
cmd = [os.path.join(test_dir, 'check_memory.py'), '5', xsd10_schema_file]
output = subprocess.check_output(cmd, universal_newlines=True)
decode_mem = self.check_memory_profile(output)
cmd = [os.path.join(test_dir, 'check_memory.py'), '6', xsd10_schema_file]
output = subprocess.check_output(cmd, universal_newlines=True)
lazy_decode_mem = self.check_memory_profile(output)
self.assertLess(decode_mem, 2)
self.assertLessEqual(lazy_decode_mem, decode_mem / decimal.Decimal(1.5))
def test_validate_memory_usage(self):
test_dir = os.path.dirname(__file__) or '.'
xsd10_schema_file = os.path.join(
os.path.dirname(os.path.abspath(test_dir)), 'validators/schemas/XSD_1.0/XMLSchema.xsd'
)
cmd = [os.path.join(test_dir, 'check_memory.py'), '7', xsd10_schema_file]
output = subprocess.check_output(cmd, universal_newlines=True)
validate_mem = self.check_memory_profile(output)
cmd = [os.path.join(test_dir, 'check_memory.py'), '8', xsd10_schema_file]
output = subprocess.check_output(cmd, universal_newlines=True)
lazy_validate_mem = self.check_memory_profile(output)
self.assertLess(validate_mem, 2)
self.assertLessEqual(lazy_validate_mem, validate_mem / 2)
@unittest.skipIf(platform.system() == 'Windows', "Skip packaging test on Windows platform.")
class TestPackaging(unittest.TestCase):
@classmethod
def setUpClass(cls):
cls.test_dir = os.path.dirname(os.path.abspath(__file__))
cls.source_dir = os.path.dirname(cls.test_dir)
cls.package_dir = os.path.dirname(cls.source_dir)
if not cls.package_dir.endswith('/xmlschema'):
cls.package_dir = None
cls.missing_debug = re.compile(
r"(\bimport\s+pdb\b|\bpdb\s*\.\s*set_trace\(\s*\)|\bprint\s*\()|\bbreakpoint\s*\("
)
cls.get_version = re.compile(r"(?:\brelease|__version__)(?:\s*=\s*)(\'[^\']*\'|\"[^\"]*\")")
def test_missing_debug_statements(self):
# Exclude explicit debug statements written in the code
exclude = {
'regex.py': [240, 241],
'codepoints.py': [543],
}
message = "\nFound a debug missing statement at line %d or file %r: %r"
filename = None
file_excluded = []
files = glob.glob(os.path.join(self.source_dir, '*.py')) + \
glob.glob(os.path.join(self.source_dir, 'validators/*.py'))
for line in fileinput.input(files):
if fileinput.isfirstline():
filename = fileinput.filename()
file_excluded = exclude.get(os.path.basename(filename), [])
lineno = fileinput.filelineno()
if lineno in file_excluded:
continue
match = self.missing_debug.search(line)
self.assertIsNone(match, message % (lineno, filename, match.group(0) if match else None))
def test_version(self):
message = "\nFound a different version at line %d or file %r: %r (may be %r)."
files = [os.path.join(self.source_dir, '__init__.py')]
if self.package_dir is not None:
files.extend([
os.path.join(self.package_dir, 'setup.py'),
os.path.join(self.package_dir, 'doc/conf.py'),
])
version = filename = None
for line in fileinput.input(files):
if fileinput.isfirstline():
filename = fileinput.filename()
lineno = fileinput.filelineno()
match = self.get_version.search(line)
if match is not None:
if version is None:
version = match.group(1).strip('\'\"')
else:
self.assertTrue(
version == match.group(1).strip('\'\"'),
message % (lineno, filename, match.group(1).strip('\'\"'), version)
)
def test_json_unicode_categories(self):
filename = os.path.join(self.source_dir, 'unicode_categories.json')
self.assertTrue(os.path.isfile(filename), msg="file %r is missing!" % filename)
with open(filename, 'r') as fp:
self.assertIsInstance(json.load(fp), dict, msg="file %r is not encoded in JSON format!" % filename)
def test_base_schema_files(self):
et = importlib.import_module('xml.etree.ElementTree')
schemas_dir = os.path.join(self.source_dir, 'validators/schemas')
base_schemas = [
'XSD_1.0/XMLSchema.xsd', 'XSD_1.1/XMLSchema.xsd', 'xhtml1-strict.xsd', 'xlink.xsd',
'xml_minimal.xsd', 'XMLSchema-hasFacetAndProperty_minimal.xsd', 'XMLSchema-instance_minimal.xsd'
]
for rel_path in base_schemas:
filename = os.path.join(schemas_dir, rel_path)
self.assertTrue(os.path.isfile(filename), msg="schema file %r is missing!" % filename)
self.assertIsInstance(et.parse(filename), et.ElementTree)
if __name__ == '__main__':
header1 = "Test package %r installation" % os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
header2 = "with Python {} on platform {}".format(platform.python_version(), platform.platform())
print('{0}\n{1}\n{2}\n{0}'.format("*" * max(len(header1), len(header2)), header1, header2))
unittest.main()
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# pytype: skip-file
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import datetime
import os
import sys
import tempfile
import urllib
import apache_beam as beam
from apache_beam import coders
from apache_beam.io import filesystems
from apache_beam.io import textio
from apache_beam.io import tfrecordio
from apache_beam.transforms import combiners
if sys.version_info[0] > 2:
unquote_to_bytes = urllib.parse.unquote_to_bytes
quote = urllib.parse.quote
else:
unquote_to_bytes = urllib.unquote # pylint: disable=deprecated-urllib-function
quote = urllib.quote # pylint: disable=deprecated-urllib-function
class CacheManager(object):
"""Abstract class for caching PCollections.
A PCollection cache is identified by labels, which consist of a prefix (either
'full' or 'sample') and a cache_label which is a hash of the PCollection
derivation.
"""
def exists(self, *labels):
# type (*str) -> bool
"""Returns if the PCollection cache exists."""
raise NotImplementedError
def is_latest_version(self, version, *labels):
# type (str, *str) -> bool
"""Returns if the given version number is the latest."""
return version == self._latest_version(*labels)
def _latest_version(self, *labels):
# type (*str) -> str
"""Returns the latest version number of the PCollection cache."""
raise NotImplementedError
def read(self, *labels):
# type (*str) -> Tuple[str, Generator[Any]]
"""Return the PCollection as a list as well as the version number.
Args:
*labels: List of labels for PCollection instance.
Returns:
A tuple containing an iterator for the items in the PCollection and the
version number.
It is possible that the version numbers from read() and_latest_version()
are different. This usually means that the cache's been evicted (thus
unavailable => read() returns version = -1), but it had reached version n
before eviction.
"""
raise NotImplementedError
def write(self, value, *labels):
# type (Any, *str) -> None
"""Writes the value to the given cache.
Args:
value: An encodable (with corresponding PCoder) value
*labels: List of labels for PCollection instance
"""
raise NotImplementedError
def source(self, *labels):
# type (*str) -> ptransform.PTransform
"""Returns a PTransform that reads the PCollection cache."""
raise NotImplementedError
def sink(self, labels, is_capture=False):
# type (*str, bool) -> ptransform.PTransform
"""Returns a PTransform that writes the PCollection cache."""
raise NotImplementedError
def save_pcoder(self, pcoder, *labels):
# type (coders.Coder, *str) -> None
"""Saves pcoder for given PCollection.
Correct reading of PCollection from Cache requires PCoder to be known.
This method saves desired PCoder for PCollection that will subsequently
be used by sink(...), source(...), and, most importantly, read(...) method.
The latter must be able to read a PCollection written by Beam using
non-Beam IO.
Args:
pcoder: A PCoder to be used for reading and writing a PCollection.
*labels: List of labels for PCollection instance.
"""
raise NotImplementedError
def load_pcoder(self, *labels):
# type (*str) -> coders.Coder
"""Returns previously saved PCoder for reading and writing PCollection."""
raise NotImplementedError
def cleanup(self):
# type () -> None
"""Cleans up all the PCollection caches."""
raise NotImplementedError
class FileBasedCacheManager(CacheManager):
"""Maps PCollections to local temp files for materialization."""
_available_formats = {
'text': (textio.ReadFromText, textio.WriteToText),
'tfrecord': (tfrecordio.ReadFromTFRecord, tfrecordio.WriteToTFRecord)
}
def __init__(self, cache_dir=None, cache_format='text'):
if cache_dir:
self._cache_dir = filesystems.FileSystems.join(
cache_dir,
datetime.datetime.now().strftime("cache-%y-%m-%d-%H_%M_%S"))
else:
self._cache_dir = tempfile.mkdtemp(
prefix='interactive-temp-', dir=os.environ.get('TEST_TMPDIR', None))
self._versions = collections.defaultdict(lambda: self._CacheVersion())
if cache_format not in self._available_formats:
raise ValueError("Unsupported cache format: '%s'." % cache_format)
self._reader_class, self._writer_class = self._available_formats[
cache_format]
self._default_pcoder = (
SafeFastPrimitivesCoder() if cache_format == 'text' else None)
# List of saved pcoders keyed by PCollection path. It is OK to keep this
# list in memory because once FileBasedCacheManager object is
# destroyed/re-created it loses the access to previously written cache
# objects anyways even if cache_dir already exists. In other words,
# it is not possible to resume execution of Beam pipeline from the
# saved cache if FileBasedCacheManager has been reset.
#
# However, if we are to implement better cache persistence, one needs
# to take care of keeping consistency between the cached PCollection
# and its PCoder type.
self._saved_pcoders = {}
def exists(self, *labels):
return bool(self._match(*labels))
def _latest_version(self, *labels):
timestamp = 0
for path in self._match(*labels):
timestamp = max(timestamp, filesystems.FileSystems.last_updated(path))
result = self._versions["-".join(labels)].get_version(timestamp)
return result
def save_pcoder(self, pcoder, *labels):
self._saved_pcoders[self._path(*labels)] = pcoder
def load_pcoder(self, *labels):
return (
self._default_pcoder if self._default_pcoder is not None else
self._saved_pcoders[self._path(*labels)])
def read(self, *labels):
# Return an iterator to an empty list if it doesn't exist.
if not self.exists(*labels):
return iter([]), -1
# Otherwise, return a generator to the cached PCollection.
source = self.source(*labels)._source
range_tracker = source.get_range_tracker(None, None)
reader = source.read(range_tracker)
version = self._latest_version(*labels)
return reader, version
def write(self, values, *labels):
sink = self.sink(labels)._sink
path = self._path(*labels)
init_result = sink.initialize_write()
writer = sink.open_writer(init_result, path)
for v in values:
writer.write(v)
writer.close()
def source(self, *labels):
return self._reader_class(
self._glob_path(*labels), coder=self.load_pcoder(*labels))
def sink(self, labels, is_capture=False):
return self._writer_class(
self._path(*labels), coder=self.load_pcoder(*labels))
def cleanup(self):
if filesystems.FileSystems.exists(self._cache_dir):
filesystems.FileSystems.delete([self._cache_dir])
self._saved_pcoders = {}
def _glob_path(self, *labels):
return self._path(*labels) + '-*-of-*'
def _path(self, *labels):
return filesystems.FileSystems.join(self._cache_dir, *labels)
def _match(self, *labels):
match = filesystems.FileSystems.match([self._glob_path(*labels)])
assert len(match) == 1
return [metadata.path for metadata in match[0].metadata_list]
class _CacheVersion(object):
"""This class keeps track of the timestamp and the corresponding version."""
def __init__(self):
self.current_version = -1
self.current_timestamp = 0
def get_version(self, timestamp):
"""Updates version if necessary and returns the version number.
Args:
timestamp: (int) unix timestamp when the cache is updated. This value is
zero if the cache has been evicted or doesn't exist.
"""
# Do not update timestamp if the cache's been evicted.
if timestamp != 0 and timestamp != self.current_timestamp:
assert timestamp > self.current_timestamp
self.current_version = self.current_version + 1
self.current_timestamp = timestamp
return self.current_version
class ReadCache(beam.PTransform):
"""A PTransform that reads the PCollections from the cache."""
def __init__(self, cache_manager, label):
self._cache_manager = cache_manager
self._label = label
def expand(self, pbegin):
# pylint: disable=expression-not-assigned
return pbegin | 'Read' >> self._cache_manager.source('full', self._label)
class WriteCache(beam.PTransform):
"""A PTransform that writes the PCollections to the cache."""
def __init__(
self,
cache_manager,
label,
sample=False,
sample_size=0,
is_capture=False):
self._cache_manager = cache_manager
self._label = label
self._sample = sample
self._sample_size = sample_size
self._is_capture = is_capture
def expand(self, pcoll):
prefix = 'sample' if self._sample else 'full'
# We save pcoder that is necessary for proper reading of
# cached PCollection. _cache_manager.sink(...) call below
# should be using this saved pcoder.
self._cache_manager.save_pcoder(
coders.registry.get_coder(pcoll.element_type), prefix, self._label)
if self._sample:
pcoll |= 'Sample' >> (
combiners.Sample.FixedSizeGlobally(self._sample_size)
| beam.FlatMap(lambda sample: sample))
# pylint: disable=expression-not-assigned
return pcoll | 'Write' >> self._cache_manager.sink(
(prefix, self._label), is_capture=self._is_capture)
class SafeFastPrimitivesCoder(coders.Coder):
"""This class add an quote/unquote step to escape special characters."""
# pylint: disable=deprecated-urllib-function
def encode(self, value):
return quote(
coders.coders.FastPrimitivesCoder().encode(value)).encode('utf-8')
def decode(self, value):
return coders.coders.FastPrimitivesCoder().decode(unquote_to_bytes(value))
|
|
#
# This source file is part of the EdgeDB open source project.
#
# Copyright 2016-present MagicStack Inc. and the EdgeDB authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Provides a `profile()` decorator with aggregation capabilities.
See README.md in this package for more details.
"""
from __future__ import annotations
from typing import *
import ast
import atexit
import builtins
import cProfile
import dataclasses
import functools
import hashlib
import linecache
import os
import pathlib
import pickle
import pstats
import re
import sys
import tempfile
from xml.sax import saxutils
from edb.tools.profiling import tracing_singledispatch
CURRENT_DIR = pathlib.Path(__file__).resolve().parent
EDGEDB_DIR = CURRENT_DIR.parent.parent.parent
PROFILING_JS = CURRENT_DIR / "svg_helpers.js"
PREFIX = "edgedb_"
STAT_SUFFIX = ".pstats"
PROF_SUFFIX = ".prof"
SVG_SUFFIX = ".svg"
SINGLEDISPATCH_SUFFIX = ".singledispatch"
T = TypeVar("T", bound=Callable[..., Any])
if TYPE_CHECKING:
ModulePath = str
LineNo = int
FunctionName = str
FunctionID = Tuple[ModulePath, LineNo, FunctionName]
LineID = Tuple[ModulePath, LineNo]
# cc, nc, tt, ct, callers
PrimitiveCallCount = int # without recursion
CallCount = int # with recursion
TotalTime = float
CumulativeTime = float
Stat = Tuple[PrimitiveCallCount, CallCount, TotalTime, CumulativeTime]
StatWithCallers = Tuple[
PrimitiveCallCount,
CallCount,
TotalTime,
CumulativeTime,
Dict[FunctionID, Stat], # callers
]
Stats = Dict[FunctionID, StatWithCallers]
Caller = FunctionID
Callee = FunctionID
Call = Tuple[Caller, Optional[Callee]]
CallCounts = Dict[Caller, Dict[Callee, CallCount]]
class profile:
"""A decorator for CPU profiling."""
def __init__(
self,
*,
prefix: str = PREFIX,
suffix: str = PROF_SUFFIX,
dir: Optional[str] = None,
save_every_n_calls: int = 1,
):
"""Create the decorator.
If `save_every_n_calls` is greater than 1, the profiler will not
dump data to files on every call to the profiled function. This speeds
up the running program but risks incomplete data if the process is
terminated non-gracefully.
`dir`, `prefix`, and `suffix` after `tempfile.mkstemp`.
"""
self.prefix = prefix
self.suffix = suffix
self.save_every_n_calls = save_every_n_calls
self.n_calls = 0
self._dir: Union[str, pathlib.Path, None] = dir
self._profiler: Optional[cProfile.Profile] = None
self._dump_file_path: Optional[str] = None
def __call__(self, func: T) -> T:
"""Apply decorator to a function."""
@functools.wraps(func)
def wrapper(*args, **kwargs):
tracing_singledispatch.profiling_in_progress.set()
self.n_calls += 1
self.profiler.enable()
try:
return func(*args, **kwargs)
finally:
self.profiler.disable()
if self.n_calls % self.save_every_n_calls == 0:
self.dump_stats()
tracing_singledispatch.profiling_in_progress.clear()
return cast(T, wrapper)
@property
def dir(self) -> pathlib.Path:
if self._dir is None:
with tempfile.NamedTemporaryFile() as tmp:
self._dir = pathlib.Path(tmp.name).parent
return pathlib.Path(self._dir)
@property
def profiler(self) -> cProfile.Profile:
if self._profiler is None:
self._profiler = cProfile.Profile()
if self.save_every_n_calls > 1:
# This is attached here so the registration is in the right
# process (relevant for multiprocessing workers). This is
# still sadly flimsy, hence the `save every n calls`.
atexit.register(self.dump_stats)
return self._profiler
@property
def dump_file(self) -> str:
"""Return a path to a new, empty, existing named temporary file."""
if self._dump_file_path is None:
file = tempfile.NamedTemporaryFile(
dir=self.dir,
prefix=self.prefix,
suffix=self.suffix,
delete=False,
)
file.close()
self._dump_file_path = file.name
return self._dump_file_path
def dump_stats(self) -> None:
self.profiler.dump_stats(self.dump_file)
try:
done_dispatches = tracing_singledispatch.done_dispatches
except AttributeError:
return # we're at program exit; `tracing_singledispatch` went away
if done_dispatches:
with open(self.dump_file + ".singledispatch", "wb") as sd_file:
pickle.dump(done_dispatches, sd_file, pickle.HIGHEST_PROTOCOL)
def aggregate(
self,
out_path: pathlib.Path,
*,
sort_by: str = "",
width: int = 1920,
threshold: float = 0.0001, # 1.0 is 100%
quiet: bool = False,
) -> Tuple[int, int]:
"""Read all pstats in `self.dir` and write a summary to `out_path`.
`sort_by` after `pstats.sort_stats()`. Files identified by `self.dir`,
`self.prefix`, and `self.suffix`.
`width` selects the width of the generated SVG.
Functions whose runtime is below `threshold` percentage are not
included.
Returns a tuple with number of successfully and unsucessfully
aggregated files.
"""
print = builtins.print
if quiet:
print = lambda *args, **kwargs: None
if out_path.is_dir():
out_path = out_path / "profile_analysis"
prof_path = out_path.with_suffix(PROF_SUFFIX)
pstats_path = out_path.with_suffix(STAT_SUFFIX)
call_svg_path = out_path.with_suffix(".call_stack" + SVG_SUFFIX)
usage_svg_path = out_path.with_suffix(".usage" + SVG_SUFFIX)
files = list(
str(f) for f in self.dir.glob(self.prefix + "*" + self.suffix)
)
if not files:
print(f"warning: no files to process", file=sys.stderr)
return 0, 0
success = 0
failure = 0
with open(pstats_path, "w") as out:
ps = pstats.Stats(stream=out)
for file in files:
try:
ps.add(file)
except TypeError as te:
# Probably the profile file is empty.
print(te, file=sys.stderr)
failure += 1
else:
success += 1
ps.dump_stats(str(prof_path))
if sort_by:
ps.sort_stats(sort_by)
ps.print_stats()
singledispatch_traces = self.accumulate_singledispatch_traces()
if singledispatch_traces:
singledispatch_path = out_path.with_suffix(SINGLEDISPATCH_SUFFIX)
with singledispatch_path.open("wb") as sd_file:
pickle.dump(
singledispatch_traces, sd_file, pickle.HIGHEST_PROTOCOL
)
# Mypy is wrong below, `stats` is there on all pstats.Stats objects
stats = ps.stats # type: ignore
filter_singledispatch_in_place(stats, singledispatch_traces)
try:
render_svg(
stats,
call_svg_path,
usage_svg_path,
width=width,
threshold=threshold,
)
except ValueError as ve:
print(f"Cannot display flame graph: {ve}", file=sys.stderr)
print(
f"Processed {success + failure} files, {failure} failed.",
file=sys.stderr,
)
return success, failure
def accumulate_singledispatch_traces(self) -> Dict[FunctionID, CallCounts]:
result: Dict[FunctionID, CallCounts] = {}
d = self.dir.glob(
self.prefix + "*" + self.suffix + SINGLEDISPATCH_SUFFIX
)
for f in d:
with open(str(f), "rb") as file:
dispatches = pickle.load(file)
for singledispatch_funcid, call_counts in dispatches.items():
for caller, calls in call_counts.items():
for impl, call_count in calls.items():
r_d = result.setdefault(singledispatch_funcid, {})
c_d = r_d.setdefault(caller, {})
c_d[impl] = c_d.get(impl, 0) + call_count
return result
def profile_memory(func: Callable[[], Any]) -> MemoryFrame:
"""Profile memory and return a tree of statistics.
Feed those to `render_memory_svg()` to write an SVG.
"""
import tracemalloc
tracemalloc.start(1024)
try:
func()
finally:
snap = tracemalloc.take_snapshot()
tracemalloc.stop()
stats = snap.statistics("traceback")
root = MemoryFrame(blocks=0, size=0)
for stat in stats:
blocks = stat.count
size = stat.size
callee = root
callee.blocks += blocks
callee.size += size
for frame in stat.traceback:
lineid = (frame.filename, frame.lineno)
callee = callee.callers.setdefault(
lineid, MemoryFrame(blocks=0, size=0)
)
callee.blocks += blocks
callee.size += size
while len(root.callers) == 1:
root = next(iter(root.callers.values()))
return root
@dataclasses.dataclass
class Function:
id: FunctionID
calls: List[FunctionID]
calledby: List[FunctionID]
stat: Stat
ROOT_ID: FunctionID = ("<root>", 0, "<root>")
class RGB(NamedTuple):
r: int
g: int
b: int
def gen_colors(s: RGB, e: RGB, size: int) -> Iterator[RGB]:
"""Generate a gradient of `size` colors between `s` and `e`."""
for i in range(size):
yield RGB(
s.r + (e.r - s.r) * i // size,
s.g + (e.g - s.g) * i // size,
s.b + (e.b - s.b) * i // size,
)
COLORS = list(gen_colors(RGB(255, 240, 141), RGB(255, 65, 34), 7))
CCOLORS = list(gen_colors(RGB(44, 255, 210), RGB(113, 194, 0), 5))
ECOLORS = list(gen_colors(RGB(230, 230, 255), RGB(150, 150, 255), 5))
DCOLORS = list(gen_colors(RGB(190, 190, 190), RGB(240, 240, 240), 7))
def gradient_from_name(name: str) -> float:
v = int(hashlib.sha1(name.encode("utf8")).hexdigest()[:8], base=16)
return v / (0xFFFFFFFF + 1.0)
def calc_callers(
stats: Stats, threshold: float,
) -> Tuple[Dict[FunctionID, Function], Dict[Call, Stat]]:
"""Calculate flattened stats of calls between functions."""
roots: List[FunctionID] = []
funcs: Dict[FunctionID, Function] = {}
calls: Dict[Call, Stat] = {}
for func, (cc, nc, tt, ct, callers) in stats.items():
funcs[func] = Function(
id=func, calls=[], calledby=[], stat=(cc, nc, tt, ct)
)
if not callers:
roots.append(func)
calls[ROOT_ID, func] = funcs[func].stat
for func, (_, _, _, _, callers) in stats.items():
for caller, t in callers.items():
assert (caller, func) not in calls
funcs[caller].calls.append(func)
funcs[func].calledby.append(caller)
calls[caller, func] = t
total = sum(funcs[r].stat[3] for r in roots)
ttotal = sum(funcs[r].stat[2] for r in funcs)
# Try to find suitable root
newroot = max(
(r for r in funcs if r not in roots), key=lambda r: funcs[r].stat[3]
)
nstat = funcs[newroot].stat
ntotal = total + nstat[3]
if 0.8 < ntotal / ttotal < 1.2:
roots.append(newroot)
calls[ROOT_ID, newroot] = nstat
total = ntotal
else:
total = ttotal
funcs[ROOT_ID] = Function(
id=ROOT_ID, calls=roots, calledby=[], stat=(1, 1, 0, total),
)
return funcs, calls
@dataclasses.dataclass
class Block:
func: FunctionID
call_stack: Tuple[FunctionID, ...]
color: int
level: int
tooltip: str
w: float
x: float
@property
def id(self) -> str:
return repr(self.func)
@property
def name(self) -> FunctionName:
result = self.func[2]
if result.startswith("<built-in method builtins."):
result = result[len("<built-in method ") : -1]
return result
@property
def module(self) -> str:
result = self.func[0]
edgedb = str(EDGEDB_DIR) + os.sep
if result.startswith(edgedb):
return result[len(edgedb) :]
parts = []
maybe_stdlib = False
for part in pathlib.Path(result).parts[::-1]:
parts.append(part)
if part in {"python3.6", "python3.7", "python3.8", "python3.9"}:
maybe_stdlib = True
elif maybe_stdlib:
if part == "lib":
parts.pop()
return os.sep.join(parts[::-1])
break
return result
@property
def full_name(self) -> str:
result = ""
if self.func[0] not in {"~", "", None}:
result += self.module
result += ":"
if self.func[1] not in (0, None):
result += str(self.func[1])
result += ":"
result += self.name
return f"{result} {self.tooltip}"
@dataclasses.dataclass
class MemoryFrame:
"""A node of a tree of calls.
Leaves are were memory allocations actually happened.
"""
blocks: int
size: int # in bytes
callers: Dict[LineID, MemoryFrame] = dataclasses.field(
default_factory=dict
)
class ScopeRecorder(ast.NodeVisitor):
"""A nifty AST visitor that records all scope changes in the file."""
# the value is a qualified name (without the module), e.g. "Class.method"
scopes: Dict[LineNo, str]
# internal stack for correct naming in the scope
stack: List[FunctionName]
def __init__(self):
self.reset()
self.visit_FunctionDef = self.handle_scopes
self.visit_AsyncFunctionDef = self.handle_scopes
self.visit_ClassDef = self.handle_scopes
super().__init__()
def reset(self) -> None:
self.stack = []
self.scopes = {}
def handle_scopes(
self, node: Union[ast.FunctionDef, ast.AsyncFunctionDef, ast.ClassDef]
) -> None:
self.stack.append(node.name)
self.scopes[node.lineno] = ".".join(self.stack)
self.generic_visit(node)
self.stack.pop()
class ScopeCache:
"""Returns qualified names of scopes for a given module path and lineno.
Caches both scopes from ScopeRecorder and queries about a given line number
(which is likely *inside* the function body).
"""
def __init__(self):
self.recorder = ScopeRecorder()
self.scopes: Dict[ModulePath, Dict[LineNo, str]] = {}
self.cache: Dict[Tuple[ModulePath, LineNo], str] = {}
def __getitem__(self, key: Tuple[ModulePath, LineNo]) -> str:
if key not in self.cache:
try:
self.cache[key] = self._get_scope(key[0], key[1])
except FileNotFoundError:
self.cache[key] = ""
return self.cache[key]
def _get_scope(self, mod_path: ModulePath, wanted_lineno: LineNo) -> str:
if mod_path not in self.scopes:
with open(mod_path) as mod:
self.recorder.visit(ast.parse(mod.read(), mod_path))
self.scopes[mod_path] = self.recorder.scopes
self.recorder.reset()
last_scope = ""
for lineno, scope in sorted(self.scopes[mod_path].items()):
if lineno > wanted_lineno:
return last_scope
last_scope = scope
return last_scope
def count_calls(funcs: Dict[FunctionID, Function]) -> Counter[Call]:
call_counter: Counter[Call] = Counter()
def _counts(
caller: FunctionID, visited: Set[Call], level: int = 0
) -> None:
for callee in funcs[caller].calls:
call = caller, callee
call_counter[call] += 1
if call_counter[call] < 2 and call not in visited:
_counts(callee, visited | {call}, level + 1)
_counts(ROOT_ID, set())
return call_counter
def find_singledispatch_wrapper(
stats: Stats, *, regular_location: bool = False
) -> FunctionID:
"""Returns the singledispatch wrapper function ID tuple.
Raises LookupError if not found.
"""
if regular_location:
functools_path = re.compile(r"python3.\d+/functools.py$")
dispatch_name = "dispatch"
wrapper_name = "wrapper"
else:
functools_path = re.compile(r"profiling/tracing_singledispatch.py$")
dispatch_name = "dispatch"
wrapper_name = "sd_wrapper"
for (modpath, _lineno, funcname), (_, _, _, _, callers) in stats.items():
if funcname != dispatch_name:
continue
m = functools_path.search(modpath)
if not m:
continue
# Using this opportunity, we're figuring out which `wrapper` from
# functools in the trace is the singledispatch `wrapper` (there
# are three more others in functools.py).
for caller_modpath, caller_lineno, caller_funcname in callers:
if caller_funcname == wrapper_name:
m = functools_path.search(modpath)
if not m:
continue
return (caller_modpath, caller_lineno, caller_funcname)
raise LookupError("singledispatch.dispatch without wrapper?")
raise LookupError("No singledispatch use in provided stats")
def filter_singledispatch_in_place(
stats: Stats,
dispatches: Dict[FunctionID, CallCounts],
regular_location: bool = False,
) -> None:
"""Removes singledispatch `wrapper` from the `stats.`
Given that:
- W is a wrapper function hiding original function O;
- D is the internal dispatching function of singledispatch;
- W calls D first to select which function to call;
- then, W calls the concrete registered implementations F1, F2, F3, and
rather rarely, O.
This filter changes this ( -> means "calls"):
A -> W -> F1
A -> W -> D
into this:
A -> F1
A -> D
"""
try:
wrapper = find_singledispatch_wrapper(
stats, regular_location=regular_location
)
except LookupError:
return
# Delete the function from stats
del stats[wrapper]
# Fix up all "callers" stats
singledispatch_functions = {d: (0, 0, 0, 0) for d in dispatches}
for funcid, (_, _, _, _, callers) in stats.items():
if wrapper not in callers:
continue
new_direct_calls = {}
for call_counts in dispatches.values():
for caller, calls in call_counts.items():
if funcid not in calls:
continue
new_direct_calls[caller] = calls[funcid]
pcc, cc, tottime, cumtime = callers.pop(wrapper)
all_calls = sum(new_direct_calls.values())
if all_calls == 0:
count = len(singledispatch_functions)
for sdfid, old_stats in singledispatch_functions.items():
cur_stats = (
round(pcc / count),
round(cc / count),
tottime / count,
cumtime / count,
)
callers[sdfid] = cur_stats
new_stats = tuple(
old_stats[i] + cur_stats[i] for i in range(4)
)
singledispatch_functions[sdfid] = new_stats # type: ignore
continue
factor = all_calls / cc
pcc_fl = pcc * factor
cc_fl = cc * factor
tottime *= factor
cumtime *= factor
for caller, count in new_direct_calls.items():
factor = count / cc_fl
callers[caller] = (
round(pcc_fl * factor),
count,
tottime * factor,
cumtime * factor,
)
# Insert original single dispatch generic functions back
for sdfid, sd_stats in singledispatch_functions.items():
o_pcc, o_cc, o_tottime, o_cumtime, callers = stats.get(
sdfid, (0, 0, 0, 0, {})
)
stats[sdfid] = (
sd_stats[0] + o_pcc,
sd_stats[1] + o_cc,
sd_stats[2] + o_tottime,
sd_stats[3] + o_cumtime,
callers,
)
def build_svg_blocks(
funcs: Dict[FunctionID, Function],
calls: Dict[Call, Stat],
threshold: float,
) -> Tuple[List[Block], List[Block], float]:
call_stack_blocks: List[Block] = []
usage_blocks: List[Block] = []
counts: Counter[Call] = count_calls(funcs)
maxw = float(funcs[ROOT_ID].stat[3])
def _build_blocks_by_call_stack(
func: FunctionID,
scaled_timings: Stat,
*,
visited: AbstractSet[Call] = frozenset(),
level: int = 0,
origin: float = 0,
call_stack: Tuple[FunctionID, ...] = (),
parent_call_count: int = 1,
parent_block: Optional[Block] = None,
) -> None:
_, _, func_tt, func_tc = scaled_timings
pcc = parent_call_count
fchildren = [
(f, funcs[f], calls[func, f], max(counts[func, f], pcc))
for f in funcs[func].calls
]
fchildren.sort(key=lambda elem: elem[0])
gchildren = [elem for elem in fchildren if elem[3] == 1]
bchildren = [elem for elem in fchildren if elem[3] > 1]
if bchildren:
gchildren_tc_sum = sum(r[2][3] for r in gchildren)
bchildren_tc_sum = sum(r[2][3] for r in bchildren)
rest = func_tc - func_tt - gchildren_tc_sum
if bchildren_tc_sum > 0:
factor = rest / bchildren_tc_sum
else:
factor = 1
# Round up and scale times and call counts.
bchildren = [
(
f,
ff,
(
round(cc * factor),
round(nc * factor),
tt * factor,
tc * factor,
),
ccnt,
)
for f, ff, (cc, nc, tt, tc), ccnt in bchildren
]
for child, _, (cc, nc, tt, tc), call_count in gchildren + bchildren:
if tc / maxw < threshold:
origin += tc
continue
child_call_stack = call_stack + (child,)
tooltip = TOOLTIP.format(tc / maxw, cc, nc, tt, tc)
block = Block(
func=child,
call_stack=child_call_stack,
color=(parent_call_count == 1 and call_count > 1),
level=level,
tooltip=tooltip,
w=tc,
x=origin,
)
call_stack_blocks.append(block)
call = func, child
if call not in visited:
_build_blocks_by_call_stack(
child,
(cc, nc, tt, tc),
level=level + 1,
origin=origin,
visited=visited | {call},
call_stack=child_call_stack,
parent_call_count=call_count,
parent_block=block,
)
origin += tc
def _build_blocks_by_usage(
ids: Sequence[FunctionID],
*,
level: int = 0,
to: Optional[FunctionID] = None,
origin: float = 0,
visited: AbstractSet[Call] = frozenset(),
parent_width: float = 0,
) -> None:
factor = 1.0
if ids and to is not None:
calls_tottime = sum(calls[fid, to][3] for fid in ids)
if calls_tottime:
factor = parent_width / calls_tottime
for fid in sorted(ids):
call = fid, to
if to is not None:
cc, nc, tt, tc = calls[call] # type: ignore
ttt = tc * factor
else:
cc, nc, tt, tc = funcs[fid].stat
ttt = tt * factor
if ttt / maxw < threshold:
origin += ttt
continue
tooltip = TOOLTIP.format(tt / maxw, cc, nc, tt, tc)
block = Block(
func=fid,
call_stack=(),
color=2 if level > 0 else not funcs[fid].calls,
level=level,
tooltip=tooltip,
w=ttt,
x=origin,
)
usage_blocks.append(block)
if call not in visited:
_build_blocks_by_usage(
funcs[fid].calledby,
level=level + 1,
to=fid,
origin=origin,
visited=visited | {call},
parent_width=ttt,
)
origin += ttt
_build_blocks_by_call_stack(ROOT_ID, scaled_timings=(1, 1, maxw, maxw))
_build_blocks_by_usage([fid for fid in funcs if fid != ROOT_ID])
return call_stack_blocks, usage_blocks, maxw
def build_svg_blocks_by_memory(
root: MemoryFrame,
*,
maxw: int,
level: int = 0,
x: int = 0,
scope_cache: Optional[ScopeCache] = None,
) -> Iterator[Block]:
if scope_cache is None:
scope_cache = ScopeCache()
for (mod_path, lineno), caller in root.callers.items():
func_name = scope_cache[mod_path, lineno]
line = linecache.getline(mod_path, lineno).strip()
if len(caller.callers) == 0 or level == 0:
color = 0
elif len(caller.callers) >= 2:
color = 1
else:
color = 2
yield Block(
func=(mod_path, lineno, func_name),
call_stack=(),
color=color,
level=level,
tooltip=(
f"{caller.size / 1024 :.2f} KiB / {caller.blocks}"
f" blocks \N{RIGHTWARDS DOUBLE ARROW} {line}"
),
w=caller.size,
x=x,
)
yield from build_svg_blocks_by_memory(
caller, maxw=maxw, level=level + 1, x=x, scope_cache=scope_cache,
)
x += caller.size
def render_svg_section(
blocks: List[Block],
maxw: float,
colors: List[List[RGB]],
block_height: int,
font_size: int,
width: int,
javascript: str = "",
invert: bool = False,
) -> str:
maxlevel = max(r.level for r in blocks) + 1
height = (maxlevel + 1) * block_height
top = 0 if not invert else 3 * block_height
content = []
for b in blocks:
x = b.x * width / maxw
tx = block_height / 6
y = b.level
if invert:
y = maxlevel - y
y = top + height - y * block_height - block_height
ty = block_height / 2
w = max(1, b.w * width / maxw - 1)
bcolors = colors[b.color]
fill = bcolors[int(len(bcolors) * gradient_from_name(b.id))]
content.append(
ELEM.format(
w=w,
x=x,
y=y,
tx=tx,
ty=ty,
name=saxutils.escape(b.name),
full_name=saxutils.escape(b.full_name),
font_size=font_size,
h=block_height - 1,
fill=fill,
upsidedown="true" if invert else "false",
)
)
height += block_height
content.append(
DETAILS.format(
font_size=font_size, y=2 * block_height if invert else height
)
)
result = SVG.format(
"\n".join(content),
javascript=javascript,
width=width,
height=top + height + block_height,
unzoom_button_x=width - 100,
ui_font_size=1.33 * font_size,
)
return result
def render_svg(
stats: Stats,
call_out: Union[pathlib.Path, str],
usage_out: Union[pathlib.Path, str],
*,
threshold: float = 0.00001, # 1.0 is 100%
width: int = 1920, # in pixels
block_height: int = 24, # in pixels
font_size: int = 12,
raw: bool = False,
) -> None:
"""Render an SVG file to `call_out` and `usage_out`.
Raises ValueError if rendering cannot be done with the given `stats`.
Functions whose runtime is below `threshold` percentage are not included.
Unless `raw` is True, functions are filtered to exclude common wrappers
that make the resulting SVG too busy but are themselves harmless.
"""
funcs, calls = calc_callers(stats, threshold)
call_blocks, usage_blocks, maxw = build_svg_blocks(
funcs, calls, threshold=threshold
)
with PROFILING_JS.open() as js_file:
javascript = js_file.read()
if call_blocks:
call_svg = render_svg_section(
call_blocks,
maxw,
[COLORS, CCOLORS],
block_height=block_height,
font_size=font_size,
width=width,
javascript=javascript,
)
with open(call_out, "w") as outf:
outf.write(call_svg)
if usage_blocks:
usage_svg = render_svg_section(
usage_blocks,
maxw,
[COLORS, ECOLORS, DCOLORS],
block_height=block_height,
font_size=font_size,
width=width,
javascript=javascript,
)
with open(usage_out, "w") as outf:
outf.write(usage_svg)
def render_memory_svg(
stats: MemoryFrame,
out: Union[pathlib.Path, str],
*,
width: int = 1920, # in pixels
block_height: int = 24, # in pixels
font_size: int = 12,
):
with PROFILING_JS.open() as js_file:
javascript = js_file.read()
maxw = stats.size
mem_blocks = list(build_svg_blocks_by_memory(stats, maxw=maxw))
mem_svg = render_svg_section(
mem_blocks,
maxw,
[COLORS, CCOLORS, DCOLORS],
block_height=block_height,
font_size=font_size,
width=width,
javascript=javascript,
invert=True,
)
with open(out, "w") as outf:
outf.write(mem_svg)
SVG = """\
<?xml version="1.0" standalone="no"?>
<!DOCTYPE svg PUBLIC "-//W3C//DTD SVG 1.1//EN" \
"http://www.w3.org/Graphics/SVG/1.1/DTD/svg11.dtd">
<svg version="1.1" width="{width}" height="{height}"
xmlns="http://www.w3.org/2000/svg"
xmlns:xlink="http://www.w3.org/1999/xlink"
onload="init(evt)"
class="default">
<style type="text/css">
.func_g {{ font-family: arial }}
.func_g:hover {{ stroke:black; stroke-width:0.5; cursor:pointer; }}
</style>
<script type="text/ecmascript">
<![CDATA[
{javascript}
]]>
</script>
<text id="unzoom" onclick="unzoom()"
text-anchor="" x="{unzoom_button_x}" y="24"
font-size="{ui_font_size}" font-family="arial"
fill="rgb(0,0,0)" style="opacity:0.0;cursor:pointer" >Reset Zoom</text>
<text id="search"
onmouseover="searchover()" onmouseout="searchout()" onclick="search_prompt()"
text-anchor="" x="10" y="24"
font-size="{ui_font_size}" font-family="arial"
fill="rgb(0,0,0)" style="opacity:0.1;cursor:pointer" >Search</text>
{}
</svg>"""
ELEM = """\
<svg class="func_g" x="{x}" y="{y}" width="{w}" height="{h}"
onclick="zoom(this, {upsidedown})" onmouseover="s(this)" onmouseout="s()">
<title>{full_name}</title>
<rect height="100%" width="100%" fill="rgb({fill.r},{fill.g},{fill.b})"
rx="2" ry="2" />
<text text-anchor="" x="{tx}" y="{ty}"
font-size="{font_size}px" fill="rgb(0,0,0)">{name}</text>
</svg>"""
DETAILS = """
<text id="details" text-anchor="" x="10.00" y="{y}"
font-family="arial" font-size="{font_size}" font-weight="bold"
fill="rgb(0,0,0)"> </text>
"""
TOOLTIP = "{0:.2%} (calls={1} pcalls={2} tottime={3:.2f} cumtime={4:.2f})"
|
|
#
# Copyright (c) 2014 Juniper Networks, Inc. All rights reserved.
#
"""
This file contains implementation of data model for SVC monitor
"""
from pysandesh.gen_py.sandesh.ttypes import SandeshLevel
from cfgm_common.vnc_db import DBBase
from cfgm_common import svc_info
class DBBaseSM(DBBase):
obj_type = __name__
class LoadbalancerPoolSM(DBBaseSM):
_dict = {}
obj_type = 'loadbalancer_pool'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.members = set()
self.loadbalancer_healthmonitors = set()
self.service_instance = None
self.virtual_machine_interface = None
self.virtual_ip = None
self.update(obj_dict)
self.last_sent = None
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.params = obj.get('loadbalancer_pool_properties', None)
self.provider = obj.get('loadbalancer_pool_provider', None)
self.members = set([lm['uuid'] for lm in obj.get('loadbalancer_members', [])])
self.id_perms = obj.get('id_perms', None)
self.parent_uuid = obj['parent_uuid']
self.display_name = obj.get('display_name', None)
self.update_single_ref('service_instance', obj)
self.update_single_ref('virtual_ip', obj)
self.update_single_ref('virtual_machine_interface', obj)
self.update_multiple_refs('loadbalancer_healthmonitor', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.delete_loadbalancer_pool(obj)
obj.update_single_ref('service_instance', {})
obj.update_single_ref('virtual_ip', {})
obj.update_single_ref('virtual_machine_interface', {})
obj.update_multiple_refs('loadbalancer_healthmonitor', {})
del cls._dict[uuid]
# end delete
def add(self):
self.last_sent = \
self._manager.loadbalancer_agent.loadbalancer_pool_add(self)
if len(self.members):
for member in self.members:
member_obj = LoadbalancerMemberSM.get(member)
if member_obj:
member_obj.last_sent = \
self._manager.loadbalancer_agent.loadbalancer_member_add(member_obj)
if self.virtual_ip:
vip_obj = VirtualIpSM.get(self.virtual_ip)
if vip_obj:
vip_obj.last_sent = \
self._manager.loadbalancer_agent.virtual_ip_add(vip_obj)
# end add
# end class LoadbalancerPoolSM
class LoadbalancerMemberSM(DBBaseSM):
_dict = {}
obj_type = 'loadbalancer_member'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.loadbalancer_pool = {}
self.update(obj_dict)
self.last_sent = None
if self.loadbalancer_pool:
parent = LoadbalancerPoolSM.get(self.loadbalancer_pool)
parent.members.add(self.uuid)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.params = obj.get('loadbalancer_member_properties', None)
self.loadbalancer_pool = self.get_parent_uuid(obj)
self.id_perms = obj.get('id_perms', None)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.delete_loadbalancer_member(obj)
if obj.loadbalancer_pool:
parent = LoadbalancerPoolSM.get(obj.loadbalancer_pool)
if parent:
parent.members.discard(obj.uuid)
del cls._dict[uuid]
# end delete
# end class LoadbalancerMemberSM
class VirtualIpSM(DBBaseSM):
_dict = {}
obj_type = 'virtual_ip'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interface = None
self.loadbalancer_pool = None
self.update(obj_dict)
self.last_sent = None
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.params = obj.get('virtual_ip_properties', None)
self.update_single_ref('virtual_machine_interface', obj)
self.update_single_ref('loadbalancer_pool', obj)
self.id_perms = obj.get('id_perms', None)
self.parent_uuid = obj['parent_uuid']
self.display_name = obj.get('display_name', None)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.delete_virtual_ip(obj)
obj.update_single_ref('virtual_machine_interface', {})
obj.update_single_ref('loadbalancer_pool', {})
del cls._dict[uuid]
# end delete
# end class VirtualIpSM
class HealthMonitorSM(DBBaseSM):
_dict = {}
obj_type = 'loadbalancer_healthmonitor'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.loadbalancer_pools = set()
self.last_sent = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.params = obj.get('loadbalancer_healthmonitor_properties', None)
self.update_multiple_refs('loadbalancer_pool', obj)
self.id_perms = obj.get('id_perms', None)
self.parent_uuid = obj['parent_uuid']
self.display_name = obj.get('display_name', None)
self.last_sent = self._manager.loadbalancer_agent.update_hm(self)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('loadbalancer_pool', {})
del cls._dict[uuid]
# end delete
# end class HealthMonitorSM
class VirtualMachineSM(DBBaseSM):
_dict = {}
obj_type = 'virtual_machine'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_instance = None
self.virtual_router = None
self.virtual_machine_interfaces = set()
self.virtualization_type = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.update_single_ref('service_instance', obj)
self.update_single_ref('virtual_router', obj)
self.update_multiple_refs('virtual_machine_interface', obj)
self.display_name = obj.get('display_name', None)
if self.display_name is None:
return
display_list = self.display_name.split('__')
if self.service_instance and len(display_list) == 5:
self.virtualization_type = display_list[-1]
self.proj_fq_name = display_list[0:2]
self.index = int(display_list[-2]) - 1
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('service_instance', {})
obj.update_single_ref('virtual_router', {})
obj.update_multiple_refs('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
# end VirtualMachineSM
class VirtualRouterSM(DBBaseSM):
_dict = {}
obj_type = 'virtual_router'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machines = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.update_multiple_refs('virtual_machine', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine', {})
del cls._dict[uuid]
# end delete
# end VirtualRouterSM
class VirtualMachineInterfaceSM(DBBaseSM):
_dict = {}
obj_type = 'virtual_machine_interface'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.params = None
self.if_type = None
self.virtual_ip = None
self.virtual_network = None
self.virtual_machine = None
self.loadbalancer_pool = None
self.logical_interface = None
self.instance_ip = None
self.floating_ip = None
self.interface_route_table = None
self.security_group = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
if obj.get('virtual_machine_interface_properties', None):
self.params = obj['virtual_machine_interface_properties']
self.if_type = self.params.get('service_interface_type', None)
self.update_single_ref('virtual_ip', obj)
self.update_single_ref('loadbalancer_pool', obj)
self.update_single_ref('instance_ip', obj)
self.update_single_ref('floating_ip', obj)
self.update_single_ref('virtual_network', obj)
self.update_single_ref('virtual_machine', obj)
self.update_single_ref('logical_interface', obj)
self.update_single_ref('interface_route_table', obj)
self.update_single_ref('security_group', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('virtual_ip', {})
obj.update_single_ref('loadbalancer_pool', {})
obj.update_single_ref('instance_ip', {})
obj.update_single_ref('floating_ip', {})
obj.update_single_ref('virtual_network', {})
obj.update_single_ref('virtual_machine', {})
obj.update_single_ref('logical_interface', {})
obj.update_single_ref('interface_route_table', {})
obj.update_single_ref('security_group', {})
del cls._dict[uuid]
# end delete
# end VirtualMachineInterfaceSM
class ServiceInstanceSM(DBBaseSM):
_dict = {}
obj_type = 'service_instance'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_template = None
self.loadbalancer_pool = None
self.virtual_machines = set()
self.params = None
self.state = 'init'
self.launch_count = 0
self.image = None
self.flavor = None
self.max_instances = 0
self.availability_zone = None
self.ha_mode = None
self.vr_id = None
self.vn_changed = False
self.local_preference = [None, None]
self.vn_info = []
self.update(obj_dict)
if self.ha_mode == 'active-standby':
self.max_instances = 2
self.local_preference = [svc_info.get_active_preference(),
svc_info.get_standby_preference()]
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.proj_name = obj['fq_name'][-2]
self.check_vn_changes(obj)
self.params = obj.get('service_instance_properties', None)
self.update_single_ref('service_template', obj)
self.update_single_ref('loadbalancer_pool', obj)
self.update_multiple_refs('virtual_machine', obj)
self.id_perms = obj.get('id_perms', None)
if not self.params:
return
self.vr_id = self.params.get('virtual_router_id', None)
self.ha_mode = self.params.get('ha_mode', None)
if self.ha_mode != 'active-standby':
scale_out = self.params.get('scale_out', None)
if scale_out:
self.max_instances = scale_out.get('max_instances', 1)
# end update
def check_vn_changes(self, obj):
self.vn_changed = False
if not self.params or not obj.get('service_instance_properties'):
return
old_ifs = self.params.get('interface_list', [])
new_ifs = obj['service_instance_properties'].get('interface_list', [])
for index in range(0, len(old_ifs)):
try:
old_if = old_ifs[index]
new_if = new_ifs[index]
except IndexError:
continue
if not old_if['virtual_network'] or not new_if['virtual_network']:
continue
if old_if['virtual_network'] != new_if['virtual_network']:
self.vn_changed = True
return
#end check_vn_changes
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_single_ref('service_template', {})
obj.update_single_ref('loadbalancer_pool', {})
obj.update_multiple_refs('virtual_machine', {})
del cls._dict[uuid]
# end delete
# end class ServiceInstanceSM
class ServiceTemplateSM(DBBaseSM):
_dict = {}
obj_type = 'service_template'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_instances = set()
self.virtualization_type = 'virtual-machine'
self.service_appliance_sets = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.params = obj.get('service_template_properties')
if self.params:
self.virtualization_type = self.params.get(
'service_virtualization_type') or 'virtual-machine'
self.update_multiple_refs('service_instance', obj)
self.update_single_ref('service_appliance_set', obj)
self.id_perms = obj.get('id_perms', None)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('service_instance', {})
self.update_multiple_refs('service_appliance_set', {})
del cls._dict[uuid]
# end delete
# end class ServiceTemplateSM
class VirtualNetworkSM(DBBaseSM):
_dict = {}
obj_type = 'virtual_network'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interfaces = set()
obj_dict = self.update(obj_dict)
self.add_to_parent(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.update_multiple_refs('virtual_machine_interface', obj)
return obj
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
obj.remove_from_parent()
del cls._dict[uuid]
# end delete
# end class VirtualNetworkSM
class FloatingIpSM(DBBaseSM):
_dict = {}
obj_type = 'floating_ip'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.address = None
self.virtual_machine_interfaces = set()
self.virtual_ip = None
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.address = obj['floating_ip_address']
self.update_multiple_refs('virtual_machine_interface', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
# end class FloatingIpSM
class InstanceIpSM(DBBaseSM):
_dict = {}
obj_type = 'instance_ip'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.address = None
self.virtual_machine_interfaces = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.address = obj.get('instance_ip_address', None)
self.update_multiple_refs('virtual_machine_interface', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
# end class InstanceIpSM
class LogicalInterfaceSM(DBBaseSM):
_dict = {}
obj_type = 'logical_interface'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interface = None
self.logical_interface_vlan_tag = 0
self.update(obj_dict)
if self.physical_interface:
parent = PhysicalInterfaceSM.get(self.physical_interface)
elif self.physical_router:
parent = PhysicalRouterSM.get(self.physical_router)
if parent:
parent.logical_interfaces.add(self.uuid)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
if obj['parent_type'] == 'physical-router':
self.physical_router = self.get_parent_uuid(obj)
self.physical_interface = None
else:
self.physical_interface = self.get_parent_uuid(obj)
self.physical_router = None
self.update_single_ref('virtual_machine_interface', obj)
self.name = obj['fq_name'][-1]
self.logical_interface_vlan_tag = obj.get('logical_interface_vlan_tag', 0)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
if obj.physical_interface:
parent = PhysicalInterfaceSM.get(obj.physical_interface)
elif obj.physical_router:
parent = PhysicalInterfaceSM.get(obj.physical_router)
if parent:
parent.logical_interfaces.discard(obj.uuid)
obj.update_single_ref('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
# end LogicalInterfaceSM
class PhysicalInterfaceSM(DBBaseSM):
_dict = {}
obj_type = 'physical_interface'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.update(obj_dict)
pr = PhysicalRouterSM.get(self.physical_router)
if pr:
pr.physical_interfaces.add(self.uuid)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.physical_router = self.get_parent_uuid(obj)
self.logical_interfaces = set([li['uuid'] for li in
obj.get('logical_interfaces', [])])
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
pr = PhysicalRouterSM.get(obj.physical_router)
if pr:
pr.physical_interfaces.discard(obj.uuid)
del cls._dict[uuid]
# end delete
# end PhysicalInterfaceSM
class PhysicalRouterSM(DBBaseSM):
_dict = {}
obj_type = 'physical_router'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.management_ip = obj.get('physical_router_management_ip')
self.vendor = obj.get('physical_router_vendor_name')
self.physical_interfaces = set([pi['uuid'] for pi in
obj.get('physical_interfaces', [])])
self.logical_interfaces = set([li['uuid'] for li in
obj.get('logical_interfaces', [])])
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
del cls._dict[uuid]
# end delete
# end PhysicalRouterSM
class ProjectSM(DBBaseSM):
_dict = {}
obj_type = 'project'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_instances = set()
self.virtual_networks = set()
obj_dict = self.update(obj_dict)
self.set_children('virtual_network', obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.update_multiple_refs('service_instance', obj)
return obj
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('service_instance', {})
del cls._dict[uuid]
# end delete
# end ProjectSM
class DomainSM(DBBaseSM):
_dict = {}
obj_type = 'domain'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.fq_name = obj['fq_name']
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
del cls._dict[uuid]
# end delete
# end DomainSM
class SecurityGroupSM(DBBaseSM):
_dict = {}
obj_type = 'security_group'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
del cls._dict[uuid]
# end delete
# end SecurityGroupSM
class InterfaceRouteTableSM(DBBaseSM):
_dict = {}
obj_type = 'interface_route_table'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.virtual_machine_interfaces = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.update_multiple_refs('virtual_machine_interface', obj)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
obj.update_multiple_refs('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
# end InterfaceRouteTableSM
class ServiceApplianceSM(DBBaseSM):
_dict = {}
obj_type = 'service_appliance'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_appliance_set = None
self.physical_interfaces = set()
self.kvpairs = []
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
kvpairs = obj.get('service_appliance_properties', None)
if kvpairs:
self.kvpairs = kvpairs.get('key_value_pair', [])
self.user_credential = obj.get('service_appliance_user_credentials', None)
self.ip_address = obj.get('service_appliance_ip_address', None)
self.service_appliance_set = self.get_parent_uuid(obj)
self.update_multiple_refs('physical_interface', obj)
if self.service_appliance_set:
parent = ServiceApplianceSetSM.get(self.service_appliance_set)
parent.service_appliances.add(self.uuid)
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
if obj.service_appliance_set:
parent = ServiceApplianceSetSM.get(obj.service_appliance_set)
if parent:
parent.service_appliances.discard(obj.uuid)
self.update_multiple_refs('physical_interface', {})
del cls._dict[uuid]
# end delete
# end ServiceApplianceSM
class ServiceApplianceSetSM(DBBaseSM):
_dict = {}
obj_type = 'service_appliance_set'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_appliances = set()
self.kvpairs = []
self.ha_mode = "standalone"
self.update(obj_dict)
# end __init__
def add(self):
self._manager.loadbalancer_agent.load_driver(self)
# end add
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.name = obj['fq_name'][-1]
self.fq_name = obj['fq_name']
self.driver = obj.get('service_appliance_driver', None)
kvpairs = obj.get('service_appliance_set_properties', None)
if kvpairs:
self.kvpairs = kvpairs.get('key_value_pair', [])
self.service_appliances = set([sa['uuid'] for sa in obj.get('service_appliances', [])])
if 'service_appliance_ha_mode' in obj:
self.ha_mode = obj['service_appliance_ha_mode']
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.loadbalancer_agent.unload_driver(obj)
del cls._dict[uuid]
# end delete
# end ServiceApplianceSetSM
class LogicalRouterSM(DBBaseSM):
_dict = {}
obj_type = 'logical_router'
def __init__(self, uuid, obj_dict=None):
self.uuid = uuid
self.service_instance = None
self.virtual_network = None
self.virtual_machine_interfaces = set()
self.last_virtual_machine_interfaces = set()
self.update(obj_dict)
# end __init__
def update(self, obj=None):
if obj is None:
obj = self.read_obj(self.uuid)
self.parent_uuid = obj['parent_uuid']
self.update_single_ref('service_instance', obj)
self.update_multiple_refs('virtual_machine_interface', obj)
self.update_single_ref('virtual_network', obj)
self.name = obj['fq_name'][-1]
# end update
@classmethod
def delete(cls, uuid):
if uuid not in cls._dict:
return
obj = cls._dict[uuid]
cls._manager.snat_agent.delete_snat_instance(obj)
obj.update_single_ref('service_instance', {})
obj.update_single_ref('virtual_network', {})
obj.update_multiple_refs('virtual_machine_interface', {})
del cls._dict[uuid]
# end delete
# end LogicalRouterSM
|
|
###################################
# Category theoretic monadic types.
###################################
class Monad(object):
"A simple base class for any monadic type to inherit from"
def is_monad(self): return True
class Maybe(Monad):
"The base class for the Maybe monadic type"
def __init__(self):
super(Maybe, self).__init__()
class Just(Maybe):
"Just represents a successful computation containing a result"
def __init__(self, value):
super(Just, self).__init__()
self.value = value
def __str__(self):
return "Just(" + str(self.value) + ")"
def __repr__(self):
return __str__(self)
def returnM(self, x):
return Just(x)
def bind(self, f):
return f(self.value)
class Nothing(Maybe):
"Nothing represents a failed computation which cannot be operated on"
def __init__(self):
super(Maybe, self).__init__()
def __str__(self):
return "Nothing"
def __repr__(self):
return __str__(self)
def returnM(self, _):
return nothing
def bind(self, _):
return nothing
# Create an instance of nothing globally to be compared to.
nothing = Nothing()
def returnM(m, v):
return m.returnM(v)
def lift(m, f):
"Lifts a regular function into the domain of a monadic type"
def lifted(i):
if not isinstance(i, m):
raise TypeError("Instance passed to lifted function is not the correct monadic type.")
def bound(v):
return returnM(i, f(v))
return i.bind(bound)
return lifted
#############################
# Categorical type contracts.
#############################
class Contract(object):
"A class that provides pretty-printing and handles application for contracts"
def __init__(self, name, contract):
self.name = name
self.contract = contract
def __str__(self):
return self.name
def __repr__(self):
return self.__str__()
def __call__(self, *args):
return self.contract(*args)
def Any(x):
return x
Any = Contract("Any", Any)
def UntypedFunction(f):
def fn(): return 1
if not(isinstance(f, type(map)) or isinstance(f, type(fn))):
raise TypeError("Expected a function, got {0}".format(type(f)))
return f
UntypedFunction = Contract("UntypedFunction", UntypedFunction)
def ContractType(c):
if not isinstance(c, Contract):
raise TypeError("Expected a contract, got {0}".format(type(c)))
return c
ContractType = Contract("Contract", ContractType)
def MonadicType(m):
if not issubclass(m.__class__, Monad):
raise TypeError("Expected a monadic type, got {0}".format(type(m)))
return m
MonadicType = Contract("MonadicType", MonadicType)
def type_of(t):
"A helper function that creates contract functions for builtin types"
def contract(v):
if not isinstance(v, t):
raise TypeError("Expected {0}, got {1}".format(type(t), type(v)))
return v
return contract
List = Contract("List", type_of(list))
Tuple = Contract("Tuple", type_of(tuple))
Set = Contract("Set", type_of(set))
UntypedDictionary = Contract("Dictionary", type_of(dict))
String = Contract("String", type_of(str))
Object = Contract("Object", type_of(object))
Boolean = Contract("Boolean", type_of(bool))
Integer = Contract("Integer", type_of(int))
Float = Contract("Float", type_of(float))
Long = Contract("Long", type_of(long))
Complex = Contract("Complex", type_of(complex))
def Numeric(n):
if not any(map(lambda t: isinstance(n, t), [int, float, long, complex])):
raise TypeError("Expected a numeric type, got {0}".format(type(n)))
return n
Numeric = Contract("Numeric", Numeric)
def ListOf(c):
ContractType(c)
def contract(ls):
return map(c, List(ls))
return Contract("List<" + str(c) + ">", contract)
ListOf = Contract("List Of", ListOf)
def TupleOf(*cs):
ListOf(ContractType)(list(cs))
def contract(tp):
return tuple([cs[i](tp[i]) for i in range(len(cs))])
return Contract("(" + ", ".join(str(c) for c in cs) + ")", contract)
TupleOf = Contract("Tuple Of", TupleOf)
def SetOf(c):
ContractType(c)
def contract(s):
Set(s)
for x in s:
c(x)
return s
return Contract("Set<" + str(c) + ">", contract)
SetOf = Contract("Set Of", SetOf)
def Dictionary(ck, cv):
ContractType(ck)
ContractType(cv)
def contract(d):
return {ck(k) : cv(d[k]) for k in d}
return Contract("Dictionary<" + str(ck) + " -> " + str(cv) + ">", contract)
Dictionary = Contract("Dictionary", Dictionary)
def Prodn(cs):
"Creates a contract for lists of elements who satisfy corresponding contracts in a list(cs)"
ListOf(ContractType)(cs)
length = len(cs)
def contract(args):
List(args)
if len(args) != length:
raise TypeError("Expected {0} arguments.".format(length))
result = []
for i in range(length):
result.append(cs[i](args[i]))
return result
return Contract("Prodn<" + ",".join(map(str, cs)) + ">", contract)
def typed (last, *types):
"Generates a a decorator which is a contract for a function with a type signature"
types = list(types)
if len(types) > 0:
last, types[0], types[1:] = types[-1], last, types[:-1]
def decorator(fn):
def caller(*args):
args = list(args)
Prodn(types)(args)
return last(fn(*args))
return caller
return decorator
def Function(*types):
def contract(f):
return typed (*types)(f)
return Contract("Function<" + " -> ".join(map(str, types)) + ">", contract)
def alias(name, c):
return Contract(name, c.contract)
######################
# Functional niceties.
######################
@typed (Function(Any, Boolean), List, List)
def take_while(f, ls):
i = 0
while f(ls[i]): i += 1
return ls[:i]
@typed (Function(Any, Boolean), List, List)
def drop_while(f, ls):
i = 0
while f(ls[i]): i += 1
return ls[i:]
@typed (Function(Any, List), List, List)
def flat_map(f, ls):
return reduce(lambda a, b: a + b, map(f, ls), [])
@typed (Tuple, Any)
def first(tp):
return tp[0]
@typed (Tuple, Any)
def second(tp):
return tp[1]
@typed (Tuple, Any)
def third(tp):
return tp[2]
@typed (List, Any)
def head(ls):
return ls[0]
@typed (List, List)
def tail(ls):
return ls[1:]
@typed (List, Any)
def last(ls):
return ls[-1]
@typed (Integer, List, List)
def take(n, ls):
return ls[:n]
@typed (Integer, List, List)
def drop(n, ls):
return ls[n:]
def partial(f, *args):
"""Produces a partial function that has some arguments already supplied to f"""
def wrapper(*bargs):
return f(*(args + bargs))
return wrapper
p_ = partial
def compose(*fns):
"""Composes any number of functions together into one function"""
def composed(f, g):
def caller(*args):
return f(g(*args))
return caller
return reduce(lambda f, g: composed(f, g), fns[1:], fns[0])
c_ = compose
def demultiplexed(f):
"""Calls a function with arity n with a list/tuple of length n"""
def caller(args):
return f(*tuple(args))
return caller
d_ = demultiplexed
@typed (Numeric, Numeric, Numeric)
def add(x, y):
return x + y
@typed (Numeric, Numeric, Numeric)
def sub(x, y):
return x - y
@typed (Numeric, Numeric, Numeric)
def mul(x, y):
return x * y
@typed (Numeric, Numeric, Numeric)
def div(x, y):
return x / y
@typed (String, String, String)
def strcat(a, b):
return a + b
@typed (List, List, List)
def concat(a, b):
return a + b
@typed (ListOf(List), List)
def flatten(lls):
return reduce(concat, lls, [])
|
|
import base64
import json
import os
import shutil
import tempfile
import zipfile
import docopt
import pkg_resources
import six
import dcoscli
from dcos import cmds, emitting, http, options, servicemanager, util
from dcos.errors import DCOSException
from dcos.package import get_package_manager
from dcos.util import md5_hash_file
from dcoscli.subcommand import default_command_info, default_doc
from dcoscli.util import decorate_docopt_usage
logger = util.get_logger(__name__)
emitter = emitting.FlatEmitter()
def main(argv):
try:
return _main(argv)
except DCOSException as e:
emitter.publish(e)
return 1
@decorate_docopt_usage
def _main(argv):
args = docopt.docopt(
default_doc("experimental"),
argv=argv,
version='dcos-experimental version {}'.format(dcoscli.version))
http.silence_requests_warnings()
return cmds.execute(_cmds(), args)
def _cmds():
"""
:returns: All of the supported commands
:rtype: dcos.cmds.Command
"""
return [
cmds.Command(
hierarchy=['experimental', 'package', 'add'],
arg_keys=['--json', '--dcos-package',
'--package-name', '--package-version'],
function=_add),
cmds.Command(
hierarchy=['experimental', 'package', 'build'],
arg_keys=['--json', '<build-definition>', '--output-directory'],
function=_build,
),
cmds.Command(
hierarchy=['experimental', 'service', 'start'],
arg_keys=['--json', '<package-name>',
'--package-version', '--options'],
function=_service_start),
cmds.Command(
hierarchy=['experimental'],
arg_keys=['--info'],
function=_experimental),
]
def _experimental(info):
"""
:returns: process status
:rtype: int
"""
if info:
emitter.publish(default_command_info("experimental"))
else:
doc = default_doc("package")
emitter.publish(options.make_generic_usage_message(doc))
return 1
return 0
def _add(json, dcos_package, package_name, package_version):
"""
Add a DC/OS package to DC/OS
:param json: wether to output json
:type json: bool
:param dcos_package: path to the DC/OS package
:type dcos_package: None | str
:param package_name: the name of a remote DC/OS package
:type package_name: None | str
:param package_version: the version of a remote DC/OS package
:type package_version: None | str
:return: process status
:rtype: int
"""
package_manager = get_package_manager()
if dcos_package:
response = package_manager.package_add_local(dcos_package)
else:
response = (package_manager
.package_add_remote(package_name, package_version))
response_json = response.json()
if json:
emitter.publish(response_json)
else:
message = (
'The package [{}] version [{}] has been added to DC/OS'.format(
response_json['name'], response_json['version']))
emitter.publish(message)
return 0
def _build(output_json,
build_definition,
output_directory):
""" Creates a DC/OS Package from a DC/OS Package Build Definition
:param output_json: whether to output json
:type output_json: None | bool
:param build_definition: The Path to a DC/OS package build definition
:type build_definition: str
:param output_directory: The directory where the DC/OS Package
will be stored
:type output_directory: str
:returns: The process status
:rtype: int
"""
# get the path of the build definition
cwd = os.getcwd()
build_definition_path = build_definition
if not os.path.isabs(build_definition_path):
build_definition_path = os.path.join(cwd, build_definition_path)
build_definition_directory = os.path.dirname(build_definition_path)
if not os.path.exists(build_definition_path):
raise DCOSException(
"The file [{}] does not exist".format(build_definition_path))
# get the path to the output directory
if output_directory is None:
output_directory = cwd
if not os.path.exists(output_directory):
raise DCOSException(
"The output directory [{}]"
" does not exist".format(output_directory))
logger.debug("Using [%s] as output directory", output_directory)
# load raw build definition
with util.open_file(build_definition_path) as bd:
build_definition_raw = util.load_json(bd, keep_order=True)
# validate DC/OS Package Build Definition with local references
build_definition_schema_path = "data/schemas/build-definition-schema.json"
build_definition_schema = util.load_jsons(
pkg_resources.resource_string(
"dcoscli", build_definition_schema_path).decode())
errs = util.validate_json(build_definition_raw, build_definition_schema)
if errs:
logger.debug("Failed before resolution: \n"
"\tbuild definition: {}"
"".format(build_definition_raw))
raise DCOSException(_validation_error(build_definition_path))
# resolve local references in build definition
_resolve_local_references(
build_definition_raw,
build_definition_schema,
build_definition_directory
)
# at this point all the local references have been resolved
build_definition_resolved = build_definition_raw
# validate resolved build definition
metadata_schema_path = "data/schemas/metadata-schema.json"
metadata_schema = util.load_jsons(
pkg_resources.resource_string(
"dcoscli", metadata_schema_path).decode())
errs = util.validate_json(build_definition_resolved, metadata_schema)
if errs:
logger.debug("Failed after resolution: \n"
"\tbuild definition: {}"
"".format(build_definition_resolved))
raise DCOSException('Error validating package: '
'there was a problem resolving '
'the local references in '
'[{}]'.format(build_definition_path))
# create the manifest
manifest_json = {
'built-by': "dcoscli.version={}".format(dcoscli.version)
}
# create the metadata
metadata_json = build_definition_resolved
# create zip file
with tempfile.NamedTemporaryFile() as temp_file:
with zipfile.ZipFile(
temp_file.file,
mode='w',
compression=zipfile.ZIP_DEFLATED,
allowZip64=True) as zip_file:
metadata = json.dumps(metadata_json, indent=2).encode()
zip_file.writestr("metadata.json", metadata)
manifest = json.dumps(manifest_json, indent=2).encode()
zip_file.writestr("manifest.json", manifest)
# name the package appropriately
temp_file.file.seek(0)
dcos_package_name = '{}-{}-{}.dcos'.format(
metadata_json['name'],
metadata_json['version'],
md5_hash_file(temp_file.file))
# get the dcos package path
dcos_package_path = os.path.join(output_directory, dcos_package_name)
if os.path.exists(dcos_package_path):
raise DCOSException(
'Output file [{}] already exists'.format(
dcos_package_path))
# create a new file to contain the package
temp_file.file.seek(0)
with util.open_file(dcos_package_path, 'w+b') as dcos_package:
shutil.copyfileobj(temp_file.file, dcos_package)
if output_json:
message = {'package_path': dcos_package_path}
else:
message = 'Created DC/OS Universe Package [{}]'.format(
dcos_package_path)
emitter.publish(message)
return 0
def _resolve_local_references(build_definition,
build_schema,
build_definition_directory):
""" Resolves all local references in a DC/OS Package Build Definition
:param build_definition: The DC/OS Package Build Definition that may
contain local references
:type build_definition: dict
:param build_definition_directory: The directory of the Build Definition
:type build_definition_directory: str
:param build_schema: The schema for the Build Definition
:type build_schema: dict
"""
_replace_marathon(build_definition,
build_schema,
build_definition_directory)
_replace_directly(build_definition,
build_schema,
build_definition_directory,
"config")
_replace_directly(build_definition,
build_schema,
build_definition_directory,
"resource")
def _replace_directly(build_definition,
build_schema,
build_definition_directory,
ref):
""" Replaces the local reference ref with the contents of
the file pointed to by ref
:param build_definition: The DC/OS Package Build Definition that
may contain local references
:type build_definition: dict
:param build_definition_directory: The directory of the Build Definition
:type build_definition_directory: str
:param build_schema: The schema for the Build Definition
:type build_schema: dict
:param ref: The key in build_definition that will be replaced
:type ref: str
"""
if ref in build_definition and _is_local_reference(build_definition[ref]):
location = build_definition[ref][1:]
if not os.path.isabs(location):
location = os.path.join(build_definition_directory, location)
with util.open_file(location) as f:
contents = util.load_json(f, True)
build_definition[ref] = contents
errs = util.validate_json(build_definition, build_schema)
if errs:
logger.debug("Failed during resolution of {}: \n"
"\tbuild definition: {}"
"".format(ref, build_definition))
raise DCOSException(_validation_error(location))
def _replace_marathon(build_definition,
build_schema,
build_definition_directory):
""" Replaces the marathon v2AppMustacheTemplate ref with
the base64 encoding of the file pointed to by the reference
:param build_definition: The DC/OS Package Build Definition that
may contain local references
:type build_definition: dict
:param build_definition_directory: The directory of the Build Definition
:type build_definition_directory: str
:param build_schema: The schema for the Build Definition
:type build_schema: dict
"""
ref = "marathon"
template = "v2AppMustacheTemplate"
if ref in build_definition and \
_is_local_reference(build_definition[ref][template]):
location = (build_definition[ref])[template][1:]
if not os.path.isabs(location):
location = os.path.join(build_definition_directory, location)
# convert the contents of the marathon file into base64
with util.open_file(location) as f:
contents = base64.b64encode(
f.read().encode()).decode()
build_definition[ref][template] = contents
errs = util.validate_json(build_definition, build_schema)
if errs:
logger.debug("Failed during resolution of marathon: \n"
"\tbuild definition: {}"
"".format(build_definition))
raise DCOSException(_validation_error(location))
def _validation_error(filename):
"""Renders a human readable validation error
:param filename: the file that failed to validate
:type filename: str
:returns: validation error message
:rtype: str
"""
return 'Error validating package: ' \
'[{}] does not conform to the' \
' specified schema'.format(filename)
def _is_local_reference(item):
"""Checks if an object is a local reference
:param item: the object that may be a reference
:type item: str
:returns: true if item is a local reference else false
:rtype: bool
"""
return isinstance(item, six.string_types) and item.startswith("@")
def _service_start(json, package_name, package_version, options_path):
"""Starts a DC/OS service from a package that has been added
:param json: wether to output json
:type json: bool
:param package_name:
:type package_name: str
:param package_version:
:type package_version: None | str
:param options_path:
:type options_path: None | str
:return: process status
:rtype: int
"""
manager = servicemanager.ServiceManager()
options = util.read_file_json(options_path) if options_path else None
response = manager.start_service(
package_name, package_version, options)
response_json = response.json()
if json:
emitter.publish(response_json)
else:
message = (
"The service [{}] version [{}] has been started".format(
response_json['packageName'], response_json['packageVersion']
))
emitter.publish(message)
return 0
|
|
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""Module of Interactive Beam features that can be used in notebook.
The purpose of the module is to reduce the learning curve of Interactive Beam
users, provide a single place for importing and add sugar syntax for all
Interactive Beam components. It gives users capability to interact with existing
environment/session/context for Interactive Beam and visualize PCollections as
bounded dataset. In the meantime, it hides the interactivity implementation
from users so that users can focus on developing Beam pipeline without worrying
about how hidden states in the interactive session are managed.
Note: If you want backward-compatibility, only invoke interfaces provided by
this module in your notebook or application code.
"""
# pytype: skip-file
from __future__ import absolute_import
import logging
import warnings
import apache_beam as beam
from apache_beam.runners.interactive import background_caching_job as bcj
from apache_beam.runners.interactive import interactive_environment as ie
from apache_beam.runners.interactive import interactive_runner as ir
from apache_beam.runners.interactive import pipeline_fragment as pf
from apache_beam.runners.interactive import pipeline_instrument as pi
from apache_beam.runners.interactive.display import pipeline_graph
from apache_beam.runners.interactive.display.pcoll_visualization import visualize
from apache_beam.runners.interactive.options import interactive_options
from apache_beam.runners.interactive.utils import elements_to_df
from apache_beam.runners.interactive.utils import progress_indicated
from apache_beam.runners.interactive.utils import to_element_list
_LOGGER = logging.getLogger(__name__)
class Options(interactive_options.InteractiveOptions):
"""Options that guide how Interactive Beam works."""
@property
def enable_capture_replay(self):
"""Whether replayable source data capture should be replayed for multiple
PCollection evaluations and pipeline runs as long as the data captured is
still valid."""
return self.capture_control._enable_capture_replay
@enable_capture_replay.setter
def enable_capture_replay(self, value):
"""Sets whether source data capture should be replayed. True - Enables
capture of replayable source data so that following PCollection evaluations
and pipeline runs always use the same data captured; False - Disables
capture of replayable source data so that following PCollection evaluation
and pipeline runs always use new data from sources."""
# This makes sure the log handler is configured correctly in case the
# options are configured in an early stage.
_ = ie.current_env()
if value:
_LOGGER.info(
'Capture replay is enabled. When a PCollection is evaluated or the '
'pipeline is executed, existing data captured from previous '
'computations will be replayed for consistent results. If no '
'captured data is available, new data from capturable sources will '
'be captured.')
else:
_LOGGER.info(
'Capture replay is disabled. The next time a PCollection is '
'evaluated or the pipeline is executed, new data will always be '
'consumed from sources in the pipeline. You will not have '
'replayability until re-enabling this option.')
self.capture_control._enable_capture_replay = value
@property
def capturable_sources(self):
"""Interactive Beam automatically captures data from sources in this set.
"""
return self.capture_control._capturable_sources
@property
def capture_duration(self):
"""The data capture of sources ends as soon as the background caching job
has run for this long."""
return self.capture_control._capture_duration
@capture_duration.setter
def capture_duration(self, value):
"""Sets the capture duration as a timedelta.
Example::
# Sets the capture duration limit to 10 seconds.
interactive_beam.options.capture_duration = timedelta(seconds=10)
# Evicts all captured data if there is any.
interactive_beam.evict_captured_data()
# The next PCollection evaluation will capture fresh data from sources,
# and the data captured will be replayed until another eviction.
interactive_beam.collect(some_pcoll)
"""
assert value.total_seconds() > 0, 'Duration must be a positive value.'
if self.capture_control._capture_duration.total_seconds(
) != value.total_seconds():
_ = ie.current_env()
_LOGGER.info(
'You have changed capture duration from %s seconds to %s seconds. '
'To allow new data to be captured for the updated duration, the '
'next time a PCollection is evaluated or the pipeline is executed, '
'please invoke evict_captured_data().',
self.capture_control._capture_duration.total_seconds(),
value.total_seconds())
self.capture_control._capture_duration = value
@property
def capture_size_limit(self):
"""The data capture of sources ends as soon as the size (in bytes) of data
captured from capturable sources reaches the limit."""
return self.capture_control._capture_size_limit
@capture_size_limit.setter
def capture_size_limit(self, value):
"""Sets the capture size in bytes.
Example::
# Sets the capture size limit to 1GB.
interactive_beam.options.capture_size_limit = 1e9
"""
if self.capture_control._capture_size_limit != value:
_ = ie.current_env()
_LOGGER.info(
'You have changed capture size limit from %s bytes to %s bytes. To '
'allow new data to be captured under the updated size limit, the '
'next time a PCollection is evaluated or the pipeline is executed, '
'please invoke evict_captured_data().',
self.capture_control._capture_size_limit,
value)
self.capture_control._capture_size_limit = value
@property
def display_timestamp_format(self):
"""The format in which timestamps are displayed.
Default is '%Y-%m-%d %H:%M:%S.%f%z', e.g. 2020-02-01 15:05:06.000015-08:00.
"""
return self._display_timestamp_format
@display_timestamp_format.setter
def display_timestamp_format(self, value):
"""Sets the format in which timestamps are displayed.
Default is '%Y-%m-%d %H:%M:%S.%f%z', e.g. 2020-02-01 15:05:06.000015-08:00.
Example::
# Sets the format to not display the timezone or microseconds.
interactive_beam.options.display_timestamp_format = %Y-%m-%d %H:%M:%S'
"""
self._display_timestamp_format = value
@property
def display_timezone(self):
"""The timezone in which timestamps are displayed.
Defaults to local timezone.
"""
return self._display_timezone
@display_timezone.setter
def display_timezone(self, value):
"""Sets the timezone (datetime.tzinfo) in which timestamps are displayed.
Defaults to local timezone.
Example::
# Imports the timezone library.
from pytz import timezone
# Will display all timestamps in the US/Eastern time zone.
tz = timezone('US/Eastern')
# You can also use dateutil.tz to get a timezone.
tz = dateutil.tz.gettz('US/Eastern')
interactive_beam.options.capture_size = tz
"""
self._display_timezone = value
# Users can set options to guide how Interactive Beam works.
# Examples:
# from datetime import timedelta
# from apache_beam.runners.interactive import interactive_beam as ib
# ib.options.enable_capture_replay = False/True
# ib.options.capture_duration = timedelta(seconds=60)
# ib.options.capturable_sources.add(SourceClass)
# Check the docstrings for detailed usages.
options = Options()
def watch(watchable):
"""Monitors a watchable.
This allows Interactive Beam to implicitly pass on the information about the
location of your pipeline definition.
Current implementation mainly watches for PCollection variables defined in
user code. A watchable can be a dictionary of variable metadata such as
locals(), a str name of a module, a module object or an instance of a class.
The variable can come from any scope even local variables in a method of a
class defined in a module.
Below are all valid::
watch(__main__) # if import __main__ is already invoked
watch('__main__') # does not require invoking import __main__ beforehand
watch(self) # inside a class
watch(SomeInstance()) # an instance of a class
watch(locals()) # inside a function, watching local variables within
If you write a Beam pipeline in the __main__ module directly, since the
__main__ module is always watched, you don't have to instruct Interactive
Beam. If your Beam pipeline is defined in some module other than __main__,
such as inside a class function or a unit test, you can watch() the scope.
For example::
class Foo(object)
def run_pipeline(self):
with beam.Pipeline() as p:
init_pcoll = p | 'Init Create' >> beam.Create(range(10))
watch(locals())
return init_pcoll
init_pcoll = Foo().run_pipeline()
Interactive Beam caches init_pcoll for the first run.
Then you can use::
show(init_pcoll)
To visualize data from init_pcoll once the pipeline is executed.
"""
ie.current_env().watch(watchable)
# TODO(BEAM-8288): Change the signature of this function to
# `show(*pcolls, include_window_info=False, visualize_data=False)` once Python 2
# is completely deprecated from Beam.
@progress_indicated
def show(*pcolls, **configs):
# type: (*Union[Dict[Any, PCollection], Iterable[PCollection], PCollection], **bool) -> None
"""Shows given PCollections in an interactive exploratory way if used within
a notebook, or prints a heading sampled data if used within an ipython shell.
Noop if used in a non-interactive environment.
The given pcolls can be dictionary of PCollections (as values), or iterable
of PCollections or plain PCollection values.
There are 2 boolean configurations:
#. include_window_info=<True/False>. If True, windowing information of the
data will be visualized too. Default is false.
#. visualize_data=<True/False>. By default, the visualization contains data
tables rendering data from given pcolls separately as if they are
converted into dataframes. If visualize_data is True, there will be a
more dive-in widget and statistically overview widget of the data.
Otherwise, those 2 data visualization widgets will not be displayed.
By default, the visualization contains data tables rendering data from given
pcolls separately as if they are converted into dataframes. If visualize_data
is True, there will be a more dive-in widget and statistically overview widget
of the data. Otherwise, those 2 data visualization widgets will not be
displayed.
Ad hoc builds a pipeline fragment including only transforms that are
necessary to produce data for given PCollections pcolls, runs the pipeline
fragment to compute data for those pcolls and then visualizes the data.
The function is always blocking. If used within a notebook, the data
visualized might be dynamically updated before the function returns as more
and more data could getting processed and emitted when the pipeline fragment
is being executed. If used within an ipython shell, there will be no dynamic
plotting but a static plotting in the end of pipeline fragment execution.
The PCollections given must belong to the same pipeline.
For example::
p = beam.Pipeline(InteractiveRunner())
init = p | 'Init' >> beam.Create(range(1000))
square = init | 'Square' >> beam.Map(lambda x: x * x)
cube = init | 'Cube' >> beam.Map(lambda x: x ** 3)
# Below builds a pipeline fragment from the defined pipeline `p` that
# contains only applied transforms of `Init` and `Square`. Then the
# interactive runner runs the pipeline fragment implicitly to compute data
# represented by PCollection `square` and visualizes it.
show(square)
# This is equivalent to `show(square)` because `square` depends on `init`
# and `init` is included in the pipeline fragment and computed anyway.
show(init, square)
# Below is similar to running `p.run()`. It computes data for both
# PCollection `square` and PCollection `cube`, then visualizes them.
show(square, cube)
"""
flatten_pcolls = []
for pcoll_container in pcolls:
if isinstance(pcoll_container, dict):
flatten_pcolls.extend(pcoll_container.values())
elif isinstance(pcoll_container, beam.pvalue.PCollection):
flatten_pcolls.append(pcoll_container)
else:
try:
flatten_pcolls.extend(iter(pcoll_container))
except TypeError:
raise ValueError(
'The given pcoll %s is not a dict, an iterable or a PCollection.' %
pcoll_container)
pcolls = flatten_pcolls
assert len(pcolls) > 0, (
'Need at least 1 PCollection to show data visualization.')
for pcoll in pcolls:
assert isinstance(pcoll, beam.pvalue.PCollection), (
'{} is not an apache_beam.pvalue.PCollection.'.format(pcoll))
user_pipeline = pcolls[0].pipeline
for pcoll in pcolls:
assert pcoll.pipeline is user_pipeline, (
'{} belongs to a different user-defined pipeline ({}) than that of'
' other PCollections ({}).'.format(
pcoll, pcoll.pipeline, user_pipeline))
# TODO(BEAM-8288): Remove below pops and assertion once Python 2 is
# deprecated from Beam.
include_window_info = configs.pop('include_window_info', False)
visualize_data = configs.pop('visualize_data', False)
# This assertion is to protect the backward compatibility for function
# signature change after Python 2 deprecation.
assert not configs, (
'The only configs supported are include_window_info and '
'visualize_data.')
runner = user_pipeline.runner
if isinstance(runner, ir.InteractiveRunner):
runner = runner._underlying_runner
# Make sure that sources without a user reference are still cached.
pi.watch_sources(user_pipeline)
# Make sure that all PCollections to be shown are watched. If a PCollection
# has not been watched, make up a variable name for that PCollection and watch
# it. No validation is needed here because the watch logic can handle
# arbitrary variables.
watched_pcollections = set()
for watching in ie.current_env().watching():
for _, val in watching:
if hasattr(val, '__class__') and isinstance(val, beam.pvalue.PCollection):
watched_pcollections.add(val)
for pcoll in pcolls:
if pcoll not in watched_pcollections:
watch({'anonymous_pcollection_{}'.format(id(pcoll)): pcoll})
if ie.current_env().is_in_ipython:
warnings.filterwarnings(
'ignore',
'options is deprecated since First stable release. References to '
'<pipeline>.options will not be supported',
category=DeprecationWarning)
# Attempt to run background caching job since we have the reference to the
# user-defined pipeline.
bcj.attempt_to_run_background_caching_job(
runner, user_pipeline, user_pipeline.options)
pcolls = set(pcolls)
computed_pcolls = set()
for pcoll in pcolls:
if pcoll in ie.current_env().computed_pcollections:
computed_pcolls.add(pcoll)
pcolls = pcolls.difference(computed_pcolls)
# If in notebook, static plotting computed pcolls as computation is done.
if ie.current_env().is_in_notebook:
for pcoll in computed_pcolls:
visualize(
pcoll,
include_window_info=include_window_info,
display_facets=visualize_data)
elif ie.current_env().is_in_ipython:
for pcoll in computed_pcolls:
visualize(pcoll, include_window_info=include_window_info)
if not pcolls:
return
# Build a pipeline fragment for the PCollections and run it.
result = pf.PipelineFragment(list(pcolls), user_pipeline.options).run()
ie.current_env().set_pipeline_result(user_pipeline, result)
# If in notebook, dynamic plotting as computation goes.
if ie.current_env().is_in_notebook:
for pcoll in pcolls:
visualize(
pcoll,
dynamic_plotting_interval=1,
include_window_info=include_window_info,
display_facets=visualize_data)
# Invoke wait_until_finish to ensure the blocking nature of this API without
# relying on the run to be blocking.
result.wait_until_finish()
# If just in ipython shell, plotting once when the computation is completed.
if ie.current_env().is_in_ipython and not ie.current_env().is_in_notebook:
for pcoll in pcolls:
visualize(pcoll, include_window_info=include_window_info)
# If the pipeline execution is successful at this stage, mark the computation
# completeness for the given PCollections so that when further `show`
# invocation occurs, Interactive Beam wouldn't need to re-compute them.
if result.state is beam.runners.runner.PipelineState.DONE:
ie.current_env().mark_pcollection_computed(pcolls)
def collect(pcoll, include_window_info=False):
"""Materializes all of the elements from a PCollection into a Dataframe.
For example::
p = beam.Pipeline(InteractiveRunner())
init = p | 'Init' >> beam.Create(range(10))
square = init | 'Square' >> beam.Map(lambda x: x * x)
# Run the pipeline and bring the PCollection into memory as a Dataframe.
in_memory_square = collect(square)
"""
return head(pcoll, n=-1, include_window_info=include_window_info)
@progress_indicated
def head(pcoll, n=5, include_window_info=False):
"""Materializes the first n elements from a PCollection into a Dataframe.
This reads each element from file and reads only the amount that it needs
into memory.
For example::
p = beam.Pipeline(InteractiveRunner())
init = p | 'Init' >> beam.Create(range(10))
square = init | 'Square' >> beam.Map(lambda x: x * x)
# Run the pipeline and bring the PCollection into memory as a Dataframe.
in_memory_square = head(square, n=5)
"""
assert isinstance(pcoll, beam.pvalue.PCollection), (
'{} is not an apache_beam.pvalue.PCollection.'.format(pcoll))
user_pipeline = pcoll.pipeline
runner = user_pipeline.runner
if isinstance(runner, ir.InteractiveRunner):
runner = runner._underlying_runner
# Make sure that sources without a user reference are still cached.
pi.watch_sources(user_pipeline)
# Make sure that all PCollections to be shown are watched. If a PCollection
# has not been watched, make up a variable name for that PCollection and watch
# it. No validation is needed here because the watch logic can handle
# arbitrary variables.
watched_pcollections = set()
for watching in ie.current_env().watching():
for _, val in watching:
if hasattr(val, '__class__') and isinstance(val, beam.pvalue.PCollection):
watched_pcollections.add(val)
if pcoll not in watched_pcollections:
watch({'anonymous_pcollection_{}'.format(id(pcoll)): pcoll})
warnings.filterwarnings('ignore', category=DeprecationWarning)
# Attempt to run background caching job since we have the reference to the
# user-defined pipeline.
bcj.attempt_to_run_background_caching_job(
runner, user_pipeline, user_pipeline.options)
if pcoll in ie.current_env().computed_pcollections:
# Read from pcoll cache, then convert to DF
pipeline_instrument = pi.PipelineInstrument(pcoll.pipeline)
key = pipeline_instrument.cache_key(pcoll)
cache_manager = ie.current_env().cache_manager()
coder = cache_manager.load_pcoder('full', key)
reader, _ = cache_manager.read('full', key)
elements = to_element_list(reader, coder, include_window_info=True)
else:
# Build a pipeline fragment for the PCollections and run it.
result = pf.PipelineFragment([pcoll], user_pipeline.options).run()
ie.current_env().set_pipeline_result(user_pipeline, result)
# Invoke wait_until_finish to ensure the blocking nature of this API without
# relying on the run to be blocking.
result.wait_until_finish()
# If the pipeline execution is successful at this stage, mark the
# computation completeness for the given PCollections so that when further
# `show` invocation occurs, Interactive Beam wouldn't need to re-compute.
if result.state is beam.runners.runner.PipelineState.DONE:
ie.current_env().mark_pcollection_computed([pcoll])
elements = result.read(pcoll, include_window_info=True)
results = []
for e in elements:
results.append(e)
if len(results) >= n > 0:
break
return elements_to_df(results, include_window_info=include_window_info)
@progress_indicated
def show_graph(pipeline):
"""Shows the current pipeline shape of a given Beam pipeline as a DAG.
"""
pipeline_graph.PipelineGraph(pipeline).display_graph()
def evict_captured_data():
"""Forcefully evicts all captured replayable data.
Once invoked, Interactive Beam will capture new data based on the guidance of
options the next time it evaluates/visualizes PCollections or runs pipelines.
"""
from apache_beam.runners.interactive.options import capture_control
capture_control.evict_captured_data()
|
|
#!/usr/bin/env python
#
# ROS node to interface with Naoqi speech recognition and text-to-speech modules
# Tested with NaoQI: 1.12
#
# Copyright (c) 2012, 2013, Miguel Sarabia
# Imperial College London
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# # Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# # Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# # Neither the name of the Imperial College London nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import rospy
import actionlib
from dynamic_reconfigure.server import Server as ReConfServer
import dynamic_reconfigure.client
from nao_driver.cfg import nao_speechConfig as NodeConfig
from nao_driver import NaoNode
from naoqi import (ALBroker, ALProxy, ALModule)
from std_msgs.msg import( String )
from std_srvs.srv import( Empty, EmptyResponse )
from nao_msgs.msg import(
WordRecognized,
SetSpeechVocabularyGoal,
SetSpeechVocabularyResult,
SetSpeechVocabularyAction,
SpeechWithFeedbackGoal,
SpeechWithFeedbackResult,
SpeechWithFeedbackFeedback,
SpeechWithFeedbackAction )
class Constants:
NODE_NAME = "nao_speech"
EVENT = "WordRecognized"
TEXT_STARTED_EVENT = "ALTextToSpeech/TextStarted"
TEXT_DONE_EVENT = "ALTextToSpeech/TextDone"
class Util:
@staticmethod
def parse_vocabulary( vocabulary ):
# Split string
vocabulary_list = vocabulary.split("/")
# Remove surrounding whitespace
vocabulary_list = [ entry.strip() for entry in vocabulary_list]
# Remove empty strings
return filter(None, vocabulary_list)
# Methods for name conversion
@staticmethod
def to_naoqi_name(name):
return "ros{}_{}".format(
name.replace("/", "_"),
rospy.Time.now().to_sec() )
class DummyAudioDevice:
def getOutputVolume(self):
return 0
def setOutputVolume(self, vol):
pass
class NaoSpeech(ALModule, NaoNode):
def __init__( self, moduleName ):
# ROS Initialisation
NaoNode.__init__(self)
rospy.init_node( Constants.NODE_NAME )
# NAOQi Module initialization
self.moduleName = moduleName
# Causes ALBroker to fill in ip and find a unused port
self.ip = ""
self.port = 0
self.init_almodule()
# Used for speech with feedback mode only
self.speech_with_feedback_flag = False
# State variables
self.conf = None
# Get Audio proxies
# Speech-recognition wrapper will be lazily initialized
self.srw = None
# Subscription to the Proxy events
self.subscribe()
# Start reconfigure server
self.reconf_server = ReConfServer(NodeConfig, self.reconfigure)
# Client for receiving the new information
self.reconf_client = dynamic_reconfigure.client.Client(Constants.NODE_NAME)
#Subscribe to speech topic
self.sub = rospy.Subscriber("speech", String, self.say )
# Advertise word recognise topic
self.pub = rospy.Publisher("word_recognized", WordRecognized )
# Register ROS services
self.start_srv = rospy.Service(
"start_recognition",
Empty,
self.start )
self.stop_srv = rospy.Service(
"stop_recognition",
Empty,
self.stop )
# Actionlib server for altering the speech recognition vocabulary
self.setSpeechVocabularyServer = actionlib.SimpleActionServer("speech_vocabulary_action", SetSpeechVocabularyAction,
execute_cb=self.executeSpeechVocabularyAction,
auto_start=False)
# Actionlib server for having speech with feedback
self.speechWithFeedbackServer = actionlib.SimpleActionServer("speech_action", SpeechWithFeedbackAction,
execute_cb=self.executeSpeechWithFeedbackAction,
auto_start=False)
# Start both actionlib servers
self.setSpeechVocabularyServer.start()
self.speechWithFeedbackServer.start()
def init_almodule(self):
# before we can instantiate an ALModule, an ALBroker has to be created
rospy.loginfo("Connecting to NaoQi at %s:%d", self.pip, self.pport)
try:
self.broker = ALBroker("%sBroker" % self.moduleName, self.ip, self.port, self.pip, self.pport)
except RuntimeError,e:
print("Could not connect to NaoQi's main broker")
exit(1)
ALModule.__init__(self, self.moduleName)
self.memProxy = ALProxy("ALMemory",self.pip,self.pport)
# TODO: check self.memProxy.version() for > 1.6
if self.memProxy is None:
rospy.logerr("Could not get a proxy to ALMemory on %s:%d", self.pip, self.pport)
exit(1)
self.tts = self.getProxy("ALTextToSpeech")
# TODO: check self.memProxy.version() for > 1.6
if self.tts is None:
rospy.logerr("Could not get a proxy to ALTextToSpeech on %s:%d", self.pip, self.pport)
exit(1)
self.audio = self.getProxy("ALAudioDevice")
if self.audio is None:
# When using simulated naoqi, audio device is not available,
# Use a dummy instead
rospy.logwarn("Proxy to ALAudioDevice not available, using dummy device (normal in simulation; volume controls disabled)")
self.audio = DummyAudioDevice()
def subscribe(self):
# Subscription to the ALProxies events
self.memProxy.subscribeToEvent(Constants.TEXT_DONE_EVENT, self.moduleName, "onTextDone")
self.memProxy.subscribeToEvent(Constants.TEXT_STARTED_EVENT, self.moduleName, "onTextStarted")
def unsubscribe(self):
self.memProxy.unsubscribeToEvent(Constants.TEXT_DONE_EVENT, self.moduleName)
self.memProxy.unsubscribeToEvent(Constants.TEXT_STARTED_EVENT, self.moduleName)
def onTextStarted(self, strVarName, value, strMessage):
# Called when NAO begins or ends the speech. On begin the value = 1
# Must work only on speech with feedback mode
if value == 0 or self.speech_with_feedback_flag == False:
return
# Send feedback via the speech actionlib server
fb = SpeechWithFeedbackFeedback()
self.speechWithFeedbackServer.publish_feedback(fb)
def onTextDone(self, strVarName, value, strMessage):
# Called when NAO begins or ends the speech. On end the value = 1
# Must work only on speech with feedback mode
if value == 0 or self.speech_with_feedback_flag == False:
return
# Change the flag to inform the executeSpeechWithFeedbackAction function that
# the speaking process is over
self.speech_with_feedback_flag = False
def executeSpeechWithFeedbackAction(self, goal):
# Gets the goal and begins the speech
self.speech_with_feedback_flag = True
saystr = goal.say
self.internalSay(saystr)
# Wait till the onTextDone event is called or 2 mins are passed
counter = 0
while self.speech_with_feedback_flag == True and counter < 1200:
rospy.sleep(0.1)
counter += 1
# Send the success feedback
self.speechWithFeedbackServer.set_succeeded()
def executeSpeechVocabularyAction(self, goal):
#~ Called by action client
rospy.loginfo("SetSpeechVocabulary action executing");
words = goal.words
words_str = ""
#~ Empty word list. Send failure.
if len(words) == 0:
setVocabularyResult = SetSpeechVocabularyResult()
setVocabularyResult.success = False
self.setSpeechVocabularyServer.set_succeeded(setVocabularyResult)
return
#~ Create the vocabulary string
for i in range(0, len(words) - 1):
words_str += str(words[i]) + "/"
words_str += words[len(words) - 1]
#~ Update the dynamic reconfigure vocabulary parameter
params = { 'vocabulary' : words_str }
self.reconf_client.update_configuration(params)
#~ Send success
setVocabularyResult = SetSpeechVocabularyResult()
setVocabularyResult.success = True
self.setSpeechVocabularyServer.set_succeeded(setVocabularyResult)
# RECONFIGURE THIS PROGRAM
def reconfigure( self, request, level ):
newConf = {}
#Copy values
newConf["voice"] = request["voice"]
newConf["language"] = request["language"]
newConf["volume"] = request["volume"]
newConf["vocabulary"] = request["vocabulary"]
newConf["audio_expression"] = request["audio_expression"]
newConf["visual_expression"] = request["visual_expression"]
newConf["word_spotting"] = request["word_spotting"]
# Check and update values
if not newConf["voice"]:
newConf["voice"] = self.tts.getVoice()
elif newConf["voice"] not in self.tts.getAvailableVoices():
rospy.logwarn(
"Unknown voice '{}'. Using current voice instead".format(
newConf["voice"] ) )
rospy.loginfo("Voices available: {}".format(
self.tts.getAvailableVoices()))
newConf["voice"] = self.tts.getVoice()
if not newConf["language"]:
newConf["language"] = self.tts.getLanguage()
elif newConf["language"] not in self.tts.getAvailableLanguages():
newConf["language"] = self.tts.getLanguage()
rospy.logwarn(
"Unknown language '{}'. Using current language instead".format(
newConf["language"] ) )
rospy.loginfo("Languages available: {}".format(
self.tts.getAvailableLanguages()))
# If first time and parameter not explicitly set
if not self.conf and not rospy.has_param("~volume"):
newConf["volume"] = self.audio.getOutputVolume()
# if srw is running and the vocabulary request is invalid, ignore it
if self.srw and not Util.parse_vocabulary(newConf["vocabulary"]):
rospy.logwarn("Empty vocabulary. Using current vocabulary instead")
newConf["vocabulary"] = self.conf["vocabulary"]
# Check if we need to restart srw
if self.srw and self.conf and (
newConf["language"] != self.conf["language"] or
newConf["vocabulary"] != self.conf["language"] or
newConf["audio_expression"] != self.conf["audio_expression"] or
newConf["visual_expression"] != self.conf["visual_expression"] or
newConf["word_spotting"] != self.conf["word_spotting"] ):
need_to_restart_speech = True
else:
need_to_restart_speech = False
self.conf = newConf
#If we have enabled the speech recognition wrapper, reconfigure it
if need_to_restart_speech:
self.stop()
self.start()
return self.conf
# CALLBACK FOR SPEECH METHOD
def say( self, request ):
self.internalSay(request.data)
# Used for internal use. Called to say one sentence either from the speech
# action goal callback or message callback
def internalSay( self, sentence ):
#Get current voice parameters
current_voice = self.tts.getVoice()
current_language = self.tts.getLanguage()
current_volume = self.audio.getOutputVolume()
current_gain = self.tts.getVolume()
target_gain = 1.0
#Modify them if needed
if self.conf["voice"] != current_voice:
self.tts.setVoice( self.conf["voice"] )
if self.conf["language"] != current_language:
self.tts.setLanguage( self.conf["language"] )
if self.conf["volume"] != current_volume:
self.audio.setOutputVolume( self.conf["volume"] )
if target_gain != current_gain:
self.tts.setVolume(target_gain)
#Say whatever it is Nao needs to say
self.tts.say( sentence )
#And restore them
if self.conf["voice"] != current_voice:
self.tts.setVoice( current_voice )
if self.conf["language"] != current_language:
self.tts.setLanguage( current_language )
if self.conf["volume"] != current_volume:
self.audio.setOutputVolume( current_volume )
if target_gain != current_gain:
self.tts.setVolume(current_gain)
# SPEECH RECOGNITION SERVICES
def start( self, request = None ):
if self.srw:
rospy.logwarn("Speech recognition already started. Restarting.")
self.srw.close()
# Start only if vocabulary is valid
if Util.parse_vocabulary( self.conf["vocabulary"] ):
self.srw = SpeechRecognitionWrapper(
self.pip,
self.pport,
self.pub,
self.conf )
else:
rospy.logwarn("Empty vocabulary. Ignoring request.")
return EmptyResponse()
def stop( self, request = None ):
if not self.srw:
rospy.logerr("Speech recognition was not started")
else:
self.srw.stop()
self.srw = None
return EmptyResponse()
def shutdown(self):
self.unsubscribe()
# Shutting down broker seems to be not necessary any more
# try:
# self.broker.shutdown()
# except RuntimeError,e:
# rospy.logwarn("Could not shut down Python Broker: %s", e)
#This class is meant to be used only by NaoSpeech
#The speech recognition wrapper is lazily initialised
class SpeechRecognitionWrapper(ALModule):
"""ROS wrapper for Naoqi speech recognition"""
def __init__(self, ip, port, publisher, config):
# Get a (unique) name for naoqi module which is based on the node name
# and is a valid Python identifier (will be useful later)
self.naoqi_name = Util.to_naoqi_name( rospy.get_name() )
#Start ALBroker (needed by ALModule)
self.broker = ALBroker(self.naoqi_name + "_broker",
"0.0.0.0", # listen to anyone
0, # find a free port and use it
ip, # parent broker IP
port ) # parent broker port
#Init superclass ALModule
ALModule.__init__( self, self.naoqi_name )
# Start naoqi proxies
self.memory = ALProxy("ALMemory")
self.proxy = ALProxy("ALSpeechRecognition")
#Keep publisher to send word recognized
self.pub = publisher
#Install global variables needed by Naoqi
self.install_naoqi_globals()
#Check no one else is subscribed to this event
subscribers = self.memory.getSubscribers(Constants.EVENT)
if subscribers:
rospy.logwarn("Speech recognition already in use by another node")
for module in subscribers:
self.stop(module)
# Configure this instance
self.reconfigure(config)
#And subscribe to the event raised by speech recognition
rospy.loginfo("Subscribing '{}' to NAO speech recognition".format(
self.naoqi_name) )
self.memory.subscribeToEvent(
Constants.EVENT,
self.naoqi_name,
self.on_word_recognised.func_name )
# Install global variables needed for Naoqi callbacks to work
def install_naoqi_globals(self):
globals()[self.naoqi_name] = self
globals()["memory"] = self.memory
def reconfigure(self, config):
self.proxy.setLanguage( config["language"] )
self.proxy.setAudioExpression( config["audio_expression"] )
self.proxy.setVisualExpression( config["visual_expression"] )
self.proxy.setVocabulary(
Util.parse_vocabulary( config["vocabulary"] ),
config["word_spotting"] )
def stop(self, module = None):
if module is None:
module = self.naoqi_name
rospy.loginfo("Unsubscribing '{}' from NAO speech recognition".format(
module))
try:
self.memory.unsubscribeToEvent( Constants.EVENT, module )
except RuntimeError:
rospy.logwarn("Could not unsubscribe from NAO speech recognition")
def on_word_recognised(self, key, value, subscriber_id ):
"""Publish the words recognized by NAO via ROS """
#Create dictionary, by grouping into tuples the list in value
temp_dict = dict( value[i:i+2] for i in range(0, len(value), 2) )
#Delete empty string from dictionary
if '' in temp_dict:
del(temp_dict[''])
self.pub.publish(WordRecognized( temp_dict.keys(), temp_dict.values() ))
if __name__ == '__main__':
ROSNaoSpeechModule = NaoSpeech("ROSNaoSpeechModule")
rospy.loginfo( "ROSNaoSpeechModule running..." )
rospy.spin()
rospy.loginfo("Stopping ROSNaoSpeechModule ...")
#If speech recognition was started make sure we stop it
if ROSNaoSpeechModule.srw:
ROSNaoSpeechModule.srw.stop()
# Shutdown the module
ROSNaoSpeechModule.shutdown();
rospy.loginfo("ROSNaoSpeechModule stopped.")
exit(0)
|
|
import sys
import os.path
import os
import subprocess
import datetime
import time
from itertools import product
from collections import defaultdict
USE_SBATCH = True
USE_QSUB = False
QSUB_WORK_QUEUE = 'normal'
MEASURE_PERFORMANCE = False
MAX_TRIES = 10
RUN_LOCAL = False
#Multicore settings
MAX_CORES = 4
MAX_MEMORY_MB = 16000
MIN_TIME = 480
MAX_JOBS_TO_SUBMIT = 100
TIME_FACTOR = 1
class Job(object):
block_count = 0
all_jobs = []
def __init__(self):
# Allow for recovery if driver script fails - use deterministic job names.
self.name = os.path.split(os.getcwd())[1] + '.' + self.__class__.__name__ + str(len(Job.all_jobs)) # + '_' + datetime.datetime.now().isoformat()
self.jobid = None
self.output = []
self.already_done = False
self.processors = 1
self.time = 60
self.memory = 1000
self.try_count = 0
Job.all_jobs.append(self)
def get_done(self):
if self.already_done:
return True
all_outputs = self.output if isinstance(self.output, (list, tuple)) else [self.output]
if all([os.path.exists(f) for f in all_outputs]):
self.already_done = True
return True
return False
def dependendencies_done(self):
for d in self.dependencies:
if not d.get_done():
return False
return True
def run(self):
# Make sure output directories exist
out = self.output
if isinstance(out, basestring):
out = [out]
for f in out:
if not os.path.isdir(os.path.dirname(f)):
os.mkdir(os.path.dirname(f))
if self.get_done():
return 0
if self.try_count >= MAX_TRIES:
return 0
print "RUN", self.name
print " ".join(self.command())
self.try_count += 1
if RUN_LOCAL:
subprocess.check_call(self.command())
elif USE_SBATCH:
command_list = ["sbatch",
"-J", self.name, # Job name
"-p", "serial_requeue", # Work queue (partition) = general / unrestricted / interactive / serial_requeue
#"-p", "general", # Work queue (partition) = general / unrestricted / interactive / serial_requeue
"--requeue",
#"--exclude=holy2b05105,hp1301,hp0403", # Exclude some bad nodes - holy2b05105 did not have scratch2 mapped.
"-n", str(self.processors), # Number of processors
"-t", str(self.time), # Time in munites 1440 = 24 hours
"--mem-per-cpu", str(self.memory), # Max memory in MB (strict - attempts to allocate more memory will fail)
"--open-mode=append", # Append to log files
"-o", "logs/out." + self.name, # Standard out file
"-e", "logs/error." + self.name] # Error out file
if len(self.dependencies) > 0:
#print command_list
#print self.dependency_strings()
command_list = command_list + self.dependency_strings()
print command_list
process = subprocess.Popen(command_list,
stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
if MEASURE_PERFORMANCE:
sbatch_out, sbatch_err = process.communicate("#!/bin/bash\nperf stat -o logs/perf.{0} {1}".format(self.name, " ".join(self.command())))
else:
sbatch_out, sbatch_err = process.communicate("#!/bin/bash\n{0}".format(" ".join(self.command())))
if len(sbatch_err) == 0:
self.jobid = sbatch_out.split()[3]
#print 'jobid={0}'.format(self.jobid)
else:
subprocess.check_call(["bsub",
"-Q", "all ~0",
"-r",
"-R", "rusage[mem=" + str(self.memory) + "]",
"-g", "/diced_connectome",
"-q", "normal_serial" ,
"-J", self.name,
"-o", "logs/out." + self.name,
"-e", "logs/error." + self.name,
"-w", self.dependency_strings()] +
self.command())
return 1
def dependency_strings(self):
if USE_SBATCH:
dependency_string = ":".join(d.jobid for d in self.dependencies if not d.get_done())
if len(dependency_string) > 0:
return ["-d", "afterok:" + dependency_string]
return []
else:
return " && ".join("done(%s)" % d.name for d in self.dependencies if not d.get_done())
@classmethod
def run_all(cls):
for j in cls.all_jobs:
j.run()
@classmethod
def run_job_blocks(cls, job_block_list, required_cores, required_memory, required_full_time):
block_name = 'JobBlock{0}.'.format(cls.block_count) + job_block_list[0][0].name
cls.block_count += 1
print "RUNNING JOB BLOCK: " + block_name
print "{0} blocks, {1} jobs, {2} cores, {3}MB memory, {4}m time.".format(
len(job_block_list), [len(jb) for jb in job_block_list], required_cores, required_memory, required_full_time)
full_command = "#!/bin/bash\n"
dependency_set = set()
# Find all dependencies for all jobs
for job_block in job_block_list:
for j in job_block:
for d in j.dependencies:
if not d.get_done() and d.jobid is not None:
if USE_SBATCH or USE_QSUB:
dependency_set.add(d.jobid)
# else:
# dependency_set.add(d.name)
if USE_SBATCH:
command_list = ["sbatch",
"-J", block_name, # Job name
"-p", "serial_requeue", # Work queue (partition) = general / unrestricted / interactive / serial_requeue
#"-p", "general", # Work queue (partition) = general / unrestricted / interactive / serial_requeue
"--requeue",
#"--exclude=holy2b05105,hp1301,hp0403", # Exclude some bad nodes - holy2b05105 did not have scratch2 mapped.
"-n", str(required_cores), # Number of processors
"-t", str(required_full_time), # Time in munites 1440 = 24 hours
"--mem-per-cpu", str(required_memory), # Max memory in MB (strict - attempts to allocate more memory will fail)
"--open-mode=append", # Append to log files
"-o", "logs/out." + block_name, # Standard out file
"-e", "logs/error." + block_name] # Error out file
elif USE_QSUB:
command_list = ["qsub"]#,
# "-N", block_name, # Job name
# "-A", 'hvd113', # XSEDE Allocation
# "-q", QSUB_WORK_QUEUE, # Work queue (partition) = general / unrestricted / interactive / serial_requeue
# "-l", 'nodes=1:ppn={0},walltime={1}:00'.format(str(required_cores), required_full_time), # Number of processors
# #"-l", 'walltime={0}:00'.format(self.time), # Time in munites 1440 = 24 hours
# #"-l", '-mppmem={0}'.format(self.memory), # Max memory per cpu in MB (strict - attempts to allocate more memory will fail)
# "-e", "logs/outerror." + block_name('_')[0], # Error out file
# "-j", "eo"] # Join standard out file to error file
# Better to use file input rather than command line inputs (according to XSEDE helpdesk)
# Request MAX_CORES so that memory requirement is also met
full_command += (
"#PBS -N {0}\n".format(block_name) +
"#PBS -A hvd113\n" +
"#PBS -q {0}\n".format(QSUB_WORK_QUEUE) +
"#PBS -l nodes=1:ppn={0}:native,walltime={1}:00\n".format(str(MAX_CORES), required_full_time) +
"#PBS -e logs/outerror.{0}\n".format(block_name.split('_')[0]) +
"#PBS -j eo\n")
if len(dependency_set) > 0:
if USE_SBATCH:
dependency_string = ":".join(d for d in dependency_set)
if len(dependency_string) > 0:
print "depends on jobs:" + dependency_string
command_list += ["-d", "afterok:" + dependency_string]
elif USE_QSUB:
dependency_string = ":".join(d for d in dependency_set)
if len(dependency_string) > 0:
print "depends on jobs:" + dependency_string
full_command += "#PBS -W depend=afterok:" + dependency_string + "\n"
else:
command_list += " && ".join("done(%s)" % d for d in dependency_set)
if USE_SBATCH:
full_command += "date\n"
elif USE_QSUB:
full_command += "cd $PBS_O_WORKDIR\ndate\n"
# Generate job block commands
for job_block in job_block_list:
block_commands = ''
for j in job_block:
block_commands += '{0} &\n'.format(' '.join(j.command()))
print j.name
full_command += '{0}wait\ndate\n'.format(block_commands)
# # Test job ids
# for job_block in job_block_list:
# for j in job_block:
# j.jobid = str(cls.block_count - 1)
# print command_list
# print full_command
# Submit job
process = subprocess.Popen(command_list, stdin=subprocess.PIPE, stdout=subprocess.PIPE, stderr=subprocess.PIPE)
submit_out, submit_err = process.communicate(full_command)
# Process output
if len(submit_err) == 0:
if USE_SBATCH:
new_jobid = submit_out.split()[3]
elif USE_QSUB:
new_jobid = submit_out.split('.')[0]
print 'jobid={0}'.format(new_jobid)
for job_block in job_block_list:
for j in job_block:
j.jobid = new_jobid
return block_name
@classmethod
def multicore_run_list(cls, runnable_jobs):
submit_count = 0
submitted_job_blocks = {}
job_block_list = [[]]
required_cores = 0
required_memory = 0
required_full_cores = 0
required_full_memory = 0
required_full_time = 0
required_block_time = 0
for j in runnable_jobs:
# Make sure output directories exist
out = j.output
if isinstance(out, basestring):
out = [out]
for f in out:
if not os.path.isdir(os.path.dirname(f)):
os.mkdir(os.path.dirname(f))
if j.get_done():
continue
# Check dependencies
contains_dependent = False
for d in j.dependencies:
if isinstance(d, JobSplit):
d = d.job
if d in job_block_list[-1]:
contains_dependent = True
break
# See if we can fit this job into the current multicore job
if (not contains_dependent and required_cores + j.processors <= MAX_CORES and
required_memory + j.memory <= MAX_MEMORY_MB):
# Add this job to the job list
required_cores += j.processors
required_memory += j.memory
required_block_time = max(required_block_time, j.time)
job_block_list[-1].append(j)
else:
#print (contains_dependent, required_cores, required_memory)
#print (j.processors, j.memory)
# This block is full - run it or add another
required_full_cores = max(required_full_cores, required_cores)
required_full_memory = max(required_full_memory, required_memory)
required_full_time += required_block_time / TIME_FACTOR
# See if we need more jobs to fill the time
if (not contains_dependent and required_full_time < MIN_TIME):
# Start a new block of jobs
job_block_list.append([j])
required_cores = j.processors
required_memory = j.memory
required_block_time = j.time
else:
# Run the current job block list
block_name = Job.run_job_blocks(job_block_list, required_full_cores, required_full_memory, required_full_time)
submitted_job_blocks[block_name] = job_block_list
# Reset for next block
job_block_list = [[j]]
required_cores = j.processors
required_memory = j.memory
required_full_cores = 0
required_full_memory = 0
required_full_time = 0
required_block_time = j.time
# Limit number of jobs submitted at once
submit_count += 1
if MAX_JOBS_TO_SUBMIT > 0 and submit_count >= MAX_JOBS_TO_SUBMIT:
break
# Run the final (possibly partial) job block list
if len(job_block_list[0]) > 0:
required_full_cores = max(required_full_cores, required_cores)
required_full_memory = max(required_full_memory, required_memory)
required_full_time += required_block_time
block_name = Job.run_job_blocks(job_block_list, required_full_cores, required_full_memory, required_full_time)
submitted_job_blocks[block_name] = job_block_list
submit_count += 1
return submitted_job_blocks
@classmethod
def multicore_run_all(cls):
multicore_run_list(cls.all_jobs)
@classmethod
def multicore_keep_running(cls):
all_jobs_complete = False
cancelled_jobs = {}
cancelled_requeue_iters = 5
submitted_job_blocks = {}
while not all_jobs_complete:
# Find running job blocks
sacct_output = subprocess.check_output(['sacct', '-n', '-o', 'JobID,JobName%100,State%20'])
pending_running_complete_job_blocks = {}
pending = 0
running = 0
complete = 0
failed = 0
cancelled = 0
timeout = 0
other_status = 0
non_matching = 0
for job_line in sacct_output.split('\n'):
job_split = job_line.split()
if len(job_split) == 0:
continue
job_id = job_split[0]
job_name = job_split[1]
job_status = ' '.join(job_split[2:])
if job_name in submitted_job_blocks:
if job_status in ['PENDING', 'RUNNING', 'COMPLETED']:
if job_name in pending_running_complete_job_blocks:
print 'Found duplicate job: ' + job_name
dup_job_id, dup_job_status = pending_running_complete_job_blocks[job_name]
print job_id, job_status, dup_job_id, dup_job_status
job_to_kill = None
if job_status == 'PENDING':
job_to_kill = job_id
elif dup_job_status == 'PENDING':
job_to_kill = dup_job_id
pending_running_complete_job_blocks[job_name] = (job_id, job_status)
if job_to_kill is not None:
print 'Canceling job ' + job_to_kill
try:
scancel_output = subprocess.check_output(['scancel', '{0}'.format(job_to_kill)])
print scancel_output
except:
print "Error canceling job:", sys.exc_info()[0]
else:
pending_running_complete_job_blocks[job_name] = (job_id, job_status)
if job_status == 'PENDING':
pending += 1
elif job_status == 'RUNNING':
running += 1
elif job_status == 'COMPLETED':
complete += 1
elif job_status in ['FAILED', 'NODE_FAIL']:
failed += 1
elif job_status in ['CANCELLED', 'CANCELLED+'] or job_status.startswith('CANCELLED'):
cancelled += 1
# This job could requeued after preemption
# Wait cancelled_requeue_iters before requeueing
cancelled_iters = 0
if job_id in cancelled_jobs:
cancelled_iters = cancelled_jobs[job_id]
if cancelled_iters < cancelled_requeue_iters:
pending_running_complete_job_blocks[job_name] = (job_id, job_status)
cancelled_jobs[job_id] = cancelled_iters + 1
elif job_status in ['TIMEOUT']:
timeout += 1
else:
print "Unexpected status: {0}".format(job_status)
other_status += 1
elif job_name not in ['batch', 'true', 'prolog']:
non_matching += 1
#print 'Found {0} running job blocks.'.format(len(pending_running_complete_job_blocks))
# Find running jobs
pending_running_complete_jobs = {}
for job_block_name in pending_running_complete_job_blocks:
job_id, job_status = pending_running_complete_job_blocks[job_block_name]
job_block_list = submitted_job_blocks[job_block_name]
for job_list in job_block_list:
for job in job_list:
pending_running_complete_jobs[job.name] = (job_id, job_status)
#print '== {0} running jobs.'.format(len(pending_running_complete_jobs))
# Make a list of runnable jobs
run_count = 0
block_count = 0
runnable_jobs = []
for j in cls.all_jobs:
if j.name not in pending_running_complete_jobs and not j.get_done() and j.dependendencies_done():
runnable_jobs.append(j)
run_count += 1
new_job_blocks = Job.multicore_run_list(runnable_jobs)
block_count += len(new_job_blocks)
submitted_job_blocks.update(new_job_blocks)
print 'Found {0} pending, {1} running, {2} complete, {3} failed, {4} cancelled, {5} timeout, {6} unknown status and {7} non-matching job blocks.'.format(
pending, running, complete, failed, cancelled, timeout, other_status, non_matching)
print "Queued {0} job{1} in {2} block{3}.".format(
run_count, '' if run_count == 1 else 's',
block_count, '' if block_count == 1 else 's')
if pending > 0 or running > 0 or run_count > 0:
time.sleep(60)
else:
all_jobs_complete = True
@classmethod
def keep_running(cls):
all_jobs_complete = False
cancelled_jobs = {}
cancelled_requeue_iters = 3
while not all_jobs_complete:
all_job_names = {}
# Generate dictionary of jobs
for j in cls.all_jobs:
all_job_names[j.name] = True
# Find running jobs
sacct_output = subprocess.check_output(['sacct', '-n', '-o', 'JobID,JobName%100,State%20'])
pending_running_complete_jobs = {}
pending = 0
running = 0
complete = 0
failed = 0
cancelled = 0
timeout = 0
other_status = 0
non_matching = 0
for job_line in sacct_output.split('\n'):
job_split = job_line.split()
if len(job_split) == 0:
continue
job_id = job_split[0]
job_name = job_split[1]
job_status = ' '.join(job_split[2:])
if job_name in all_job_names:
if job_status in ['PENDING', 'RUNNING', 'COMPLETED']:
if job_name in pending_running_complete_jobs:
print 'Found duplicate job: ' + job_name
dup_job_id, dup_job_status = pending_running_complete_jobs[job_name]
print job_id, job_status, dup_job_id, dup_job_status
job_to_kill = None
if job_status == 'PENDING':
job_to_kill = job_id
elif dup_job_status == 'PENDING':
job_to_kill = dup_job_id
pending_running_complete_jobs[job_name] = (job_id, job_status)
if job_to_kill is not None:
print 'Canceling job ' + job_to_kill
try:
scancel_output = subprocess.check_output(['scancel', '{0}'.format(job_to_kill)])
print scancel_output
except:
print "Error canceling job:", sys.exc_info()[0]
else:
pending_running_complete_jobs[job_name] = (job_id, job_status)
if job_status == 'PENDING':
pending += 1
elif job_status == 'RUNNING':
running += 1
elif job_status == 'COMPLETED':
complete += 1
elif job_status in ['FAILED', 'NODE_FAIL']:
failed += 1
elif job_status in ['CANCELLED', 'CANCELLED+'] or job_status.startswith('CANCELLED'):
cancelled += 1
# This job could requeued after preemption
# Wait cancelled_requeue_iters before requeueing
cancelled_iters = 0
if job_id in cancelled_jobs:
cancelled_iters = cancelled_jobs[job_id]
if cancelled_iters < cancelled_requeue_iters:
pending_running_complete_jobs[job_name] = (job_id, job_status)
cancelled_jobs[job_id] = cancelled_iters + 1
elif job_status in ['TIMEOUT']:
timeout += 1
else:
print "Unexpected status: {0}".format(job_status)
other_status += 1
elif job_name not in ['batch', 'true', 'prolog']:
non_matching += 1
run_count = 0
for j in cls.all_jobs:
if j.name not in pending_running_complete_jobs and j.dependendencies_done():
run_count += j.run()
print 'Found {0} pending, {1} running, {2} complete, {3} failed, {4} cancelled, {5} timeout, {6} unknown status and {7} non-matching jobs.'.format(
pending, running, complete, failed, cancelled, timeout, other_status, non_matching)
print "Queued {0} job{1}.".format(run_count, '' if run_count == 1 else 's')
if pending > 0 or running > 0 or run_count > 0:
time.sleep(60)
else:
all_jobs_complete = True
class JobSplit(object):
'''make a multi-output job object look like a single output job'''
def __init__(self, job, idx):
self.job = job
self.idx = idx
self.name = job.name
def get_done(self):
return self.job.get_done()
def set_done(self, val):
self.job.already_done = val
already_done = property(get_done, set_done)
@property
def output(self):
return self.job.output[self.idx]
@property
def indices(self):
return self.job.indices[self.idx]
@property
def jobid(self):
return self.job.jobid
@jobid.setter
def jobid(self, value):
self.job.jobid = value
class Reassemble(Job):
'''reassemble a diced job'''
def __init__(self, dataset, output_sizes, joblist, output):
Job.__init__(self)
self.output_sizes = output_sizes
self.dataset = dataset
self.dependencies = joblist
self.memory = 4000
self.output = output
self.already_done = False
def command(self):
return ['./reassemble.sh', self.dataset,
str(len(self.output_sizes))] + \
[str(s) for s in self.output_sizes] + \
[j.output for j in self.dependencies] + \
[self.output]
class Subimage_ProbabilityMap(Job):
def __init__(self, raw_image, idx, xlo, ylo, xhi, yhi, xlo_core, ylo_core, xhi_core, yhi_core):
Job.__init__(self)
self.already_done = False
self.raw_image = raw_image
self.dependencies = []
self.memory = 4000
self.coords = [str(c) for c in (xlo, ylo, xhi, yhi)]
self.core_coords = [str(c) for c in (xlo_core, ylo_core, xhi_core, yhi_core)]
self.output = os.path.join('subimage_probabilities',
'probs_%d_%s.hdf5' % (idx, '_'.join(self.coords)))
def command(self):
return ['python', 'compute_probabilities.py', self.raw_image, self.output] + \
self.coords + self.core_coords
class Subimage_SegmentedSlice(Job):
def __init__(self, idx, probability_map, raw_image, xlo, ylo, xhi, yhi, xlo_core, ylo_core, xhi_core, yhi_core):
Job.__init__(self)
self.already_done = False
self.probability_map = probability_map
self.raw_image = raw_image
self.dependencies = [self.probability_map]
self.memory = 4000
self.coords = [str(c) for c in (xlo, ylo, xhi, yhi)]
self.core_coords = [str(c) for c in (xlo_core, ylo_core, xhi_core, yhi_core)]
self.output = os.path.join('subimage_segmentations',
'segs_%d_%s.hdf5' % (idx, '_'.join(self.coords)))
def command(self):
return ['python', 'segment_image.py', self.raw_image, self.probability_map.output, self.output] + \
self.coords + self.core_coords
class ClassifySegment_Image(Job):
def __init__(self, idx, raw_image, classifier_file):
Job.__init__(self)
self.already_done = False
self.raw_image = raw_image
self.stump_image = raw_image.replace('input_images', 'stump_images')
self.classifier_file = classifier_file
self.dependencies = []
self.memory = 8000
self.time = 300
self.features_file = os.path.join('segmentations',
'features_%d.hdf5' % (idx))
self.prob_file = os.path.join('segmentations',
'probs_%d.hdf5' % (idx))
self.output = os.path.join('segmentations',
'segs_%d.hdf5' % (idx))
#self.already_done = os.path.exists(self.output)
def command(self):
return ['python',
os.path.join(os.environ['CONNECTOME'], 'Control', 'segment_image.py'),
self.raw_image, self.classifier_file, self.stump_image, self.prob_file, self.output]
class Block(Job):
def __init__(self, segmented_slices, indices, *args):
Job.__init__(self)
self.already_done = False
self.segmented_slices = segmented_slices
self.dependencies = segmented_slices
self.memory = 500
self.time = 30
self.output = os.path.join('bigdicedblocks', 'block_%d_%d_%d.hdf5' % indices)
self.args = [str(a) for a in args] + [self.output]
#self.already_done = os.path.exists(self.output)
def command(self):
return ['python', os.path.join(os.environ['CONNECTOME'], 'Control', 'dice_block.py')] + self.args + [s.output for s in self.segmented_slices]
class FusedBlock(Job):
def __init__(self, block, indices, global_block_number):
Job.__init__(self)
self.already_done = False
self.block = block
self.global_block_number = global_block_number
self.dependencies = [block]
self.processors = 4
self.memory = 16000
# memory is per proc, so we are requesting 64GB here (and sometimes use it)
#self.time = 360
self.time = 480
self.indices = indices
self.output = os.path.join('bigfusedblocks', 'fusedblock_%d_%d_%d.hdf5' % indices)
#self.already_done = os.path.exists(self.output)
def command(self):
return ['python',
os.path.join(os.environ['CONNECTOME'], 'WindowFusion', 'window_fusion_cpx.py'),
self.block.output,
str(self.global_block_number),
self.output]
class CleanBlock(Job):
def __init__(self, fusedblock):
Job.__init__(self)
self.already_done = False
self.indices = fusedblock.indices
self.block = fusedblock.block
self.global_block_number = fusedblock.global_block_number
self.dependencies = [fusedblock]
self.memory = 6000
#self.memory = 8000
self.time = 60
self.inputlabels = fusedblock.output
self.inputprobs = fusedblock.block.output
self.output = os.path.join('cleanedblocks', 'block_%d_%d_%d.hdf5' % self.indices)
#self.already_done = os.path.exists(self.output)
def command(self):
return [os.path.join(os.environ['CONNECTOME'], 'Control', 'clean_block.sh'), self.inputlabels, self.inputprobs, self.output]
class PairwiseMatching(Job):
def __init__(self, fusedblock1, fusedblock2, direction, even_or_odd, halo_width):
Job.__init__(self)
self.direction = direction
self.already_done = False
self.even_or_odd = even_or_odd
self.halo_width = halo_width
self.indices = (fusedblock1.indices, fusedblock2.indices)
self.dependencies = [fusedblock1, fusedblock2]
#self.memory = 16000
self.memory = 8000
#self.memory = 4000
self.time = 60
outdir = 'pairwise_matches_%s_%s' % (['X', 'Y', 'Z',][direction], even_or_odd)
self.output = (os.path.join(outdir, os.path.basename(fusedblock1.output)),
os.path.join(outdir, os.path.basename(fusedblock2.output)))
#self.already_done = os.path.exists(self.output[0]) and os.path.exists(self.output[1])
def command(self):
return [os.path.join(os.environ['CONNECTOME'], 'Control', 'pairwise_match_labels.sh')] + [d.output for d in self.dependencies] + \
[str(self.direction + 1), # matlab
str(self.halo_width)] + list(self.output)
class JoinConcatenation(Job):
def __init__(self, outfilename, inputs):
Job.__init__(self)
self.already_done = False
self.dependencies = inputs
self.memory = 1000
self.time = 60
self.output = os.path.join('joins', outfilename)
#self.already_done = os.path.exists(self.output)
def command(self):
return [os.path.join(os.environ['CONNECTOME'], 'Control', 'concatenate_joins.sh')] + \
[s.output for s in self.dependencies] + \
[self.output]
class GlobalRemap(Job):
def __init__(self, outfilename, joinjob):
Job.__init__(self)
self.already_done = False
self.dependencies = [joinjob]
self.memory = 1000
self.time = 60
self.joinfile = joinjob.output
self.output = os.path.join('joins', outfilename)
#self.already_done = os.path.exists(self.output)
def command(self):
return [os.path.join(os.environ['CONNECTOME'], 'Control', 'create_global_map.sh'), self.joinfile, self.output]
class RemapBlock(Job):
def __init__(self, blockjob, build_remap_job, indices):
Job.__init__(self)
self.already_done = False
self.dependencies = [blockjob, build_remap_job]
#self.memory = 2000
self.memory = 4000
#self.memory = 8000
self.time = 60
self.inputfile = blockjob.output
self.mapfile = build_remap_job.output
self.indices = indices
self.output = os.path.join('relabeledblocks', 'block_%d_%d_%d.hdf5' % indices)
#self.already_done = os.path.exists(self.output)
def command(self):
return [os.path.join(os.environ['CONNECTOME'], 'Control', 'remap_block.sh'), self.inputfile, self.mapfile, self.output]
class CopyImage(Job):
def __init__(self, input, idx):
Job.__init__(self)
self.already_done = False
self.dependencies = []
self.memory = 4000
self.inputfile = input
self.idx = idx
self.output = os.path.join('output_images', 'image_%05d.tif' % idx)
def command(self):
return ['/bin/cp', self.inputfile, self.output]
class ExtractLabelPlane(Job):
def __init__(self, zplane, xy_halo, remapped_blocks, zoffset, image_size, xy_block_size):
Job.__init__(self)
self.already_done = False
self.dependencies = remapped_blocks
self.memory = 1000
self.time = 60
self.zoffset = zoffset
self.xy_halo = xy_halo
self.image_size = image_size
self.xy_block_size = xy_block_size
self.output = os.path.join('output_labels', 'labels_%05d.tif' % zplane)
#self.already_done = os.path.exists(self.output)
def generate_args(self):
for block in self.dependencies:
# XY corner followed by filename
yield str(block.indices[0] * self.xy_block_size)
yield str(block.indices[1] * self.xy_block_size)
yield block.output
def command(self):
return [os.path.join(os.environ['CONNECTOME'], 'Control', 'extract_label_plane.sh'), self.output, str(self.image_size), str(self.zoffset), str(self.xy_halo)] + \
list(self.generate_args())
class ExtractOverlayPlane(Job):
def __init__(self, zplane, xy_halo, remapped_blocks, zoffset, image_size, xy_block_size, input_image_path):
Job.__init__(self)
self.already_done = False
self.dependencies = remapped_blocks
self.memory = 4000
self.time = 60
self.zoffset = zoffset
self.xy_halo = xy_halo
self.image_size = image_size
self.xy_block_size = xy_block_size
self.input_image_path = input_image_path
self.output = os.path.join('output_overlay', 'overlay_%05d.png' % zplane)
#self.already_done = os.path.exists(self.output)
def generate_args(self):
for block in self.dependencies:
# XY corner followed by filename
yield str(block.indices[0] * self.xy_block_size)
yield str(block.indices[1] * self.xy_block_size)
yield block.output
def command(self):
return [os.path.join(os.environ['CONNECTOME'], 'Control', 'extract_overlay_plane.sh'), self.output, self.input_image_path, str(self.image_size), str(self.zoffset), str(self.xy_halo)] + \
list(self.generate_args())
###############################
# Helper functions
###############################
def dice_iter(full_size, core_size, halo_size):
# we produce two sets of bounds: halo+core+halo and core alone
for lo in range(0, full_size, core_size):
yield (max(0, lo - halo_size),
min(full_size - 1, lo + core_size + 2 * halo_size),
lo,
min(full_size - 1, lo + core_size))
def dice(job_builder, args, full_sizes, core_sizes, halo_sizes):
iters = [dice_iter(*sizes) for sizes in zip(full_sizes, core_sizes, halo_sizes)]
jobs = []
for coords in product(*iters):
# coords is a tuples of (lo, hi)
lovals, hivals, locore, hicore = zip(*coords)
jobs.append(job_builder(*(args + lovals + hivals + locore + hicore)))
return jobs
###############################
# Driver
###############################
if __name__ == '__main__':
assert 'CONNECTOME' in os.environ
#assert 'VIRTUAL_ENV' in os.environ
# Default settings
image_size = 2048
probability_subimage_size = 1024
probability_subimage_halo = 32
segmentation_subimage_size = 1024
segmentation_subimage_halo = 128
block_xy_halo = 64
block_xy_size = 512 - (2 * 64)
block_z_size = 52
block_z_halo = 6
classifier_file = os.path.join(os.environ['CONNECTOME'], 'DeepNets', 'deep_net_combo3_13November2013.h5')
settings_file = sys.argv[1]
os.environ['CONNECTOME_SETTINGS'] = settings_file
execfile(settings_file)
images = [f.rstrip() for f in open(sys.argv[2])]
segmentations = [ClassifySegment_Image(idx, im, classifier_file)
for idx, im in enumerate(images)]
#segmentations = [f.rstrip() for f in open(sys.argv[2])]
#print segmentations
# Dice full volume
blocks = {}
nblocks_x = (image_size - 2 * block_xy_halo) / block_xy_size
nblocks_y = (image_size - 2 * block_xy_halo) / block_xy_size
nblocks_z = (len(segmentations) - 2 * block_z_halo) / block_z_size
block_order = []
for block_idx_z in range(nblocks_z):
lo_slice = block_idx_z * block_z_size
hi_slice = lo_slice + block_z_size + 2 * block_z_halo
for block_idx_y in range(nblocks_y):
ylo = block_idx_y * block_xy_size
yhi = ylo + block_xy_size + 2 * block_xy_halo
for block_idx_x in range(nblocks_x):
xlo = block_idx_x * block_xy_size
xhi = xlo + block_xy_size + 2 * block_xy_halo
print "Making block {0}, slice {1}, crop {2}.".format(
(block_idx_x, block_idx_y, block_idx_z),
(lo_slice, hi_slice),
(xlo, ylo, xhi, yhi))
blocks[block_idx_x, block_idx_y, block_idx_z] = \
Block(segmentations[lo_slice:hi_slice],
(block_idx_x, block_idx_y, block_idx_z),
xlo, ylo, xhi, yhi)
block_order.append((block_idx_x, block_idx_y, block_idx_z))
# Window fuse all blocks
# Generate block id based on on block index with z as most significant (allows additional slabs to be added later)
fused_blocks = dict((idxs, FusedBlock(blocks[idxs], idxs,
idxs[0] + idxs[1] * nblocks_x + idxs[2] * nblocks_x * nblocks_y)) for idxs in block_order)
# Cleanup all blocks (remove small or completely enclosed segments)
cleaned_blocks = dict((idxs, CleanBlock(fused_blocks[idxs])) for idxs in block_order)
#cleaned_blocks = fused_blocks
# Pairwise match all blocks.
#
# We overwrite each block in cleaned_blocks (the python dict, not the file)
# with the output of the pairwise matching, and work in non-overlapping
# sets (even-to-odd, then odd-to-even)
for direction in range(3): # X, Y, Z
for wpidx, which_pairs in enumerate(['even', 'odd']):
for idx in block_order:
if (idx[direction] % 2) == wpidx: # merge even-to-odd, then odd-to-even
neighbor_idx = list(idx)
neighbor_idx[direction] += 1 # check neighbor exists
neighbor_idx = tuple(neighbor_idx)
if neighbor_idx in cleaned_blocks:
pw = PairwiseMatching(cleaned_blocks[idx], cleaned_blocks[neighbor_idx],
direction, # matlab
which_pairs,
block_xy_halo if direction < 2 else block_z_halo)
# we can safely overwrite (variables, not files)
# because of nonoverlapping even/odd sets
cleaned_blocks[idx] = JobSplit(pw, 0)
cleaned_blocks[neighbor_idx] = JobSplit(pw, 1)
# Contatenate the joins from all the blocks to a single file, for building
# the global remap. Work first on XY planes, to add some parallelism and
# limit number of command arguments.
plane_joins_lists = {}
for idxs in block_order:
plane_joins_lists[idxs[2]] = plane_joins_lists.get(idxs[2], []) + [cleaned_blocks[idxs]]
plane_join_jobs = [JoinConcatenation('concatenate_Z_%d' % idx, plane_joins_lists[idx])
for idx in plane_joins_lists]
full_join = JoinConcatenation('concatenate_full', plane_join_jobs)
# build the global remap
remap = GlobalRemap('globalmap', full_join)
# and apply it to every block
remapped_blocks = [RemapBlock(cleaned_blocks[idx], remap, idx) for idx in block_order]
remapped_blocks_by_plane = defaultdict(list)
for bl in remapped_blocks:
remapped_blocks_by_plane[bl.indices[2]] += [bl]
# finally, extract the images and output labels
# output_images = [CopyImage(i, idx) for idx, i in enumerate(images)]
max_zslab = max(remapped_blocks_by_plane.keys())
output_labels = [ExtractLabelPlane(idx, block_xy_halo,
remapped_blocks_by_plane[min(idx / block_z_size, max_zslab)],
idx - block_z_size * min(idx / block_z_size, max_zslab), # offset within block
image_size, block_xy_size)
for idx, _ in enumerate(segmentations)]
# optional, render overlay images
output_labels = [ExtractOverlayPlane(idx, block_xy_halo,
remapped_blocks_by_plane[min(idx / block_z_size, max_zslab)],
idx - block_z_size * min(idx / block_z_size, max_zslab), # offset within block
image_size, block_xy_size, im)
for idx, im in enumerate(images)]
# # Render fused blocks directly
# cleaned_blocks_by_plane = defaultdict(list)
# for idx, fb in cleaned_blocks.iteritems():
# cleaned_blocks_by_plane[fb.indices[2]] += [fb]
# max_zslab = max(cleaned_blocks_by_plane.keys())
# fused_output_labels = [ExtractLabelPlane(idx,
# cleaned_blocks_by_plane[min(idx / block_z_size, max_zslab)],
# idx - block_z_size * min(idx / block_z_size, max_zslab), # offset within block
# image_size, block_xy_size)
# for idx, _ in enumerate(segmentations)]
if '-l' in sys.argv:
RUN_LOCAL = True
sys.argv.remove('-l')
if '--local' in sys.argv:
RUN_LOCAL = True
sys.argv.remove('--local')
if len(sys.argv) == 3:
Job.run_all()
elif '-k' in sys.argv or '--keeprunning' in sys.argv:
# Monitor job status and requeue as necessary
Job.keep_running()
elif '-m' in sys.argv or '--multicore' in sys.argv:
if RUN_LOCAL:
print "ERROR: --local cannot be used with --multicore (not yet implemented)."
else:
# Bundle jobs for multicore nodes
Job.multicore_run_all()
elif '-mk' in sys.argv or '--multicore-keeprunning' in sys.argv:
if RUN_LOCAL:
print "ERROR: --local cannot be used with --multicore-keeprunning (not yet implemented)."
else:
# Bundle jobs for multicore nodes
Job.multicore_keep_running()
else:
for j in Job.all_jobs:
if j.output == sys.argv[3] or sys.argv[3] in j.output or sys.argv[3] in j.output[0] or sys.argv[3] in j.name:
for k in j.dependencies:
if k.output != sys.argv[3] and sys.argv[3] not in k.output and sys.argv[3] not in k.output[0] and sys.argv[3] not in k.name:
k.already_done = True
j.run()
|
|
import xml.etree.ElementTree as ET
"""
elemToString
This takes in content, a node, and returns the inner text
"""
def elemToString(content):
return ET.tostring(content, encoding='utf8', method='text')
"""
cleanElemToString
This takes in content, a node, and returns the inner text with only one space between
words and no line breaks
"""
def cleanElemToString(content):
string = elemToString(content)
return filter(lambda x: x != "\n", string).replace(" ", "")
def stageDirElem(content):
string = ""
for children in content.findall("./*"):
if children.tag == "{http://www.tei-c.org/ns/1.0}lb":
string += "\n<br>\n"
else:
toString = ET.tostring(children, encoding='utf8', method='text')
string += filter(lambda x: x != "\n", toString).replace(" ", "")
return string
def printSingleLine(line, targetFile):
targetFile.write(filter(lambda x: x != "\n", line).replace(" ", ""))
"""
stageDirInLine
This gets the stage directions in the middle of a line and writes them to our file.
This takes in content, a stage directions XML node, and a targetFile, the file object with write privileges.
"""
def stageDirInLine(content, targetFile):
xmlstr = stageDirElem(content)
targetFile.write("<i>%s</i>" % xmlstr)
"""
printSingleLine
This writes a string to file after removing extra spaces and all line breaks
This takes in line, a string, and targetFile, a file object with write privileges.
"""
def printSingleLine(line, targetFile):
targetFile.write(filter(lambda x: x != "\n", line).replace(" ", ""))
"""
speaker
This writes the speaker's name to file and returns it to use as the key for the dictionary.
This takes in content, a speaker node, and a targetFile, a file object with write privileges.
"""
def speaker(content, targetFile):
xmlstr = cleanElemToString(content)
targetFile.write('\n<br>\n<span class = "character">%s</span> '% xmlstr)
return xmlstr
def writeFormatting(className):
formatting.write("\n$('.%s').on('click', function(e){\n" % className)
formatting.write("e.preventDefault();\n")
formatting.write("$('.%s').toggleClass('strikethrough');\n});\n" % className)
"""
getLines
This will write all the lines that one character speaks and the in-line stage directions to a file.
It takes in content, a node with tag 'ab', and a targetFile, a file object with write privilege.
"""
def getLines(content, targetFile):
line = ""
numLines = 0
listOfSD = []
for words in content.findall("./*"):
# If the child is a milestone, it prints out the previous line, the next line number, and resets
if ((words.tag == "{http://www.tei-c.org/ns/1.0}milestone") and (words.get('unit') == "ftln")):
numLines += 1
printSingleLine(line, targetFile)
if numLines > 0:
targetFile.write("</span>")
targetFile.write('\n<br>\n<span class="lineNum">%s</span>' % words.get('n')[4:])
targetFile.write('<span class = "%s">' % words.get('n').replace(".", "-"))
writeFormatting(words.get('n').replace(".", "-"))
line = ""
numLines += 1
# If the child node is a q or seg, those are wrappers, so we need to go one level deeper
elif((words.tag == "{http://www.tei-c.org/ns/1.0}seg")):
getLines(words, targetFile)
# If the child is a stage, we should print the line and then print the stage direction
elif (words.tag == "{http://www.tei-c.org/ns/1.0}stage"):
printSingleLine(line, targetFile)
targetFile.write(" ")
line = ""
stageDirInLine(words, targetFile)
listOfSD = listOfSD + [words.get('n')]
elif(words.tag == "{http://www.tei-c.org/ns/1.0}seg"):
getLines(words, targetFile)
# Any other tag that is not fw is a word, space, or punctuation that should be added to the line
elif (words.tag != "{http://www.tei-c.org/ns/1.0}fw"):
line += ET.tostring(words, encoding='utf8', method='text')
# Because we never hit a final milestone after reading in the last line, we need to print it out
printSingleLine(line, targetFile)
targetFile.write("</span>")
targetFile.write("<br>")
return (numLines, listOfSD)
"""
printOneScene
This will write a single scene as we want it formatted and update the character line dictionary.
It takes in a scene (div2) node, a file to write to, and a dicitionary that holds the lines characters.
"""
def writeOneScene(scene, targetFile, dictionary):
curSpeaker = ""
lines = 0
listOfSD = []
# This goes through every node in the scene, hence the need for outerLvlStageDir and stageDirInLine
for content in scene.iter():
# If we get a stage direction at this level, it should be an outer level one
if (content.tag == "{http://www.tei-c.org/ns/1.0}stage"):
if content.get('n') not in listOfSD:
stageDirInLine(content, targetFile)
# If we get a speaker, we need to update the current speaker
elif (content.tag == "{http://www.tei-c.org/ns/1.0}speaker"):
curSpeaker = speaker(content, targetFile)
# If we get an 'ab' tag, this is the start of a line for curSpeaker
elif(content.tag == "{http://www.tei-c.org/ns/1.0}ab"):
numLinesAndSD = getLines(content, targetFile)
lines = numLinesAndSD[0]
listOfSD += numLinesAndSD[1]
# Writes the line to the targetFile and updates the character dictionary
if curSpeaker not in dictionary:
dictionary[curSpeaker] = lines
else:
dictionary[curSpeaker] += lines
"""
visitAct
This is a visitor parser to create a custom navigation bar for any play we use.
It requires an xmlTree that has acts noted by div1 and scenes noted by div2, like the Folger
XML versions of the plays. It also requires a file to write to. Hopefully, this is the file
that we're writing to all along.
This will go through and find all the acts and scenes based on those assumptions. It will
write out the proper HTML to make a navbar based on those assumptions.
"""
def oldVisitAct(xmlTree, targetFile):
acts = xmlTree.findall(".//{http://www.tei-c.org/ns/1.0}div1")
baseIndent = " " * 14
secondLvl = baseIndent + " "
thirdLvl = secondLvl + " "
actPattern = baseIndent + '<div class="col-sm-4">\n' + secondLvl+ '<ul class="multi-column-dropdown">\n'
for act in acts:
targetFile.write(actPattern)
targetFile.write(thirdLvl+'<li><a href="#%s">Act ' % act.get('n'))
targetFile.write('%s</a></li>\n' % act.get('n'))
targetFile.write(thirdLvl+'<li class="divider"></li>\n')
scenes = act.findall(".//{http://www.tei-c.org/ns/1.0}div2")
for scene in scenes:
idNumber = act.get('n') + "." + scene.get('n')
targetFile.write(thirdLvl + '<li><a href="#'+idNumber)
targetFile.write('">Scene %s</a></li>\n' % scene.get('n'))
targetFile.write(secondLvl+'</ul>\n'+baseIndent+'</div>\n')
# Every 3 acts, we will create a new row
if int(act.get('n')) == 3:
targetFile.write(secondLvl+"</div>")
def visitAct(content, targetFile):
indent = " "*4
acts = content.findall(".//{http://www.tei-c.org/ns/1.0}div1")
for act in acts:
targetFile.write(indent)
targetFile.write('\n<li><a href="#%s" class="act">Act' % act.get('n'))
targetFile.write(' %s</a></li>' % act.get('n'))
targetFile.write('\n<li class="divider"></li>')
scenes = act.findall(".//{http://www.tei-c.org/ns/1.0}div2")
for scene in scenes:
idNumber = act.get('n') + "." + scene.get('n')
targetFile.write(indent)
targetFile.write('\n<li><a href="#%s" class="scene">Scene ' % idNumber)
targetFile.write('%s</a></li>' % scene.get('n'))
dictionary = {}
header = open("header.html", "r")
lines = header.readlines()
target = open("index.html.erb", "w")
tree = ET.parse("data.xml").getroot()
formatting = open("../../assets/javascripts/application.js", "w")
formatHeader = open("../../assets/javascripts/applicationheader.txt", "r")
# Write the header to index file first, using the visitor parser at the appropriate place
for line in lines:
target.write(line)
if '<a class="navbar-brand" href="#">' in line:
title = tree.find(".//{http://www.tei-c.org/ns/1.0}title")
target.write(elemToString(title))
elif '<div class="row">' in line:
oldVisitAct(tree, target)
elif '<ul class="scroll-menu scroll-menu-2x">' in line:
visitAct(tree, target)
jsLines = formatHeader.readlines()
for line in jsLines:
formatting.write(line)
# Start by finding all the acts, noted with div1's
acts = tree.findall(".//{http://www.tei-c.org/ns/1.0}div1")
for act in acts:
target.write('\n<h1 id = %s>\nAct '% act.get('n'))
target.write('%s\n</h1>' % act.get('n'))
# Find all the scenes in the act. Each has the tag div2
scenes = act.findall(".//{http://www.tei-c.org/ns/1.0}div2")
for scene in scenes:
# idNumber is the id attribute so the navigation works.
# It reflects the ActNumber.SceneNumber numbering of Shakespeare plays
idNumber = act.get('n') + "." + scene.get('n')
target.write("\n<h2 id ="+idNumber+">\nScene %s\n</h2>" % scene.get('n'))
writeOneScene(scene, target, dictionary)
target.write("</div>\n</body>\n</html>")
target.close()
formatting.write("\n})")
chars = open("characters.html.erb", "w")
chars.write("<DOCTYPE! HTML>\n<html>")
chars.write('<center>\n<table style="width:50%">\n')
chars.write("<tr><th><b>Character Name</b></th><th><b>Modified Number of Lines</b></th>")
chars.write("<th><b>Original Number of Lines</b></th></tr>")
# In a table we output the name of the character from the dictionary
# and the number of lines they spoke
for key in dictionary:
chars.write('<tr><td>%s</td>' % key)
chars.write('<td>%d</td>' % dictionary[key])
chars.write('<td>%d</td></tr>' % dictionary[key])
chars.write("</table></center>")
chars.close()
|
|
# Copyright 2021 Google LLC.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
from absl import flags
from absl import logging
from absl.testing import parameterized
import tensorflow as tf
import numpy as np
import pandas as pd
from tensorflow_decision_forests.component.inspector import inspector as insp
from tensorflow_decision_forests.component import py_tree
from yggdrasil_decision_forests.metric import metric_pb2
from yggdrasil_decision_forests.model.gradient_boosted_trees import gradient_boosted_trees_pb2
from tensorflow_decision_forests import keras
ColumnType = insp.ColumnType
SimpleColumnSpec = insp.SimpleColumnSpec
CATEGORICAL = insp.ColumnType.CATEGORICAL
NUMERICAL = insp.ColumnType.NUMERICAL
def data_root_path() -> str:
return ""
def test_data_path() -> str:
return os.path.join(data_root_path(),
"external/ydf/yggdrasil_decision_forests/test_data")
def tmp_path() -> str:
return flags.FLAGS.test_tmpdir
def test_model_directory() -> str:
return os.path.join(test_data_path(), "model")
def test_dataset_directory() -> str:
return os.path.join(test_data_path(), "model")
class InspectorTest(parameterized.TestCase, tf.test.TestCase):
def test_classification_random_forest(self):
model_path = os.path.join(test_model_directory(), "adult_binary_class_rf")
# dataset_path = os.path.join(test_dataset_directory(), "adult_test.csv")
inspector = insp.make_inspector(model_path)
self.assertEqual(inspector.model_type(), "RANDOM_FOREST")
self.assertEqual(inspector.task, insp.Task.CLASSIFICATION)
self.assertEqual(inspector.num_trees(), 100)
self.assertEqual(inspector.label(),
SimpleColumnSpec("income", CATEGORICAL, 14))
self.assertEqual(
inspector.objective(),
py_tree.objective.ClassificationObjective(
label="income", classes=["<=50K", ">50K"]))
self.assertEqual(inspector.features(), [
SimpleColumnSpec("age", NUMERICAL, 0),
SimpleColumnSpec("workclass", CATEGORICAL, 1),
SimpleColumnSpec("fnlwgt", NUMERICAL, 2),
SimpleColumnSpec("education", CATEGORICAL, 3),
SimpleColumnSpec("education_num", CATEGORICAL, 4),
SimpleColumnSpec("marital_status", CATEGORICAL, 5),
SimpleColumnSpec("occupation", CATEGORICAL, 6),
SimpleColumnSpec("relationship", CATEGORICAL, 7),
SimpleColumnSpec("race", CATEGORICAL, 8),
SimpleColumnSpec("sex", CATEGORICAL, 9),
SimpleColumnSpec("capital_gain", NUMERICAL, 10),
SimpleColumnSpec("capital_loss", NUMERICAL, 11),
SimpleColumnSpec("hours_per_week", NUMERICAL, 12),
SimpleColumnSpec("native_country", CATEGORICAL, 13),
])
self.assertEqual(inspector.evaluation().num_examples, 22792)
self.assertAlmostEqual(
inspector.evaluation().accuracy, 0.86512, delta=0.0001)
self.assertLen(inspector.training_logs(), 2)
self.assertAlmostEqual(
inspector.training_logs()[-1].evaluation.accuracy,
0.86512,
delta=0.0001)
self.assertEqual(inspector.training_logs()[-1].num_trees,
inspector.num_trees())
self.assertEqual(inspector.winner_take_all_inference(), False)
variable_importances = inspector.variable_importances()
self.assertEqual(
variable_importances, {
"NUM_AS_ROOT": [
(SimpleColumnSpec("relationship", CATEGORICAL, 7), 33.0),
(SimpleColumnSpec("marital_status", CATEGORICAL, 5), 28.0),
(SimpleColumnSpec("capital_gain", NUMERICAL, 10), 15.0),
(SimpleColumnSpec("education_num", CATEGORICAL, 4), 11.0),
(SimpleColumnSpec("age", NUMERICAL, 0), 6.0),
(SimpleColumnSpec("education", CATEGORICAL, 3), 4.0),
(SimpleColumnSpec("occupation", CATEGORICAL, 6), 3.0)
]
})
num_nodes = 0
for _ in inspector.iterate_on_nodes():
num_nodes += 1
self.assertEqual(num_nodes, 125578)
tree = inspector.extract_tree(tree_idx=1) # Second tree
logging.info("Tree:\n%s", tree)
# Checked with :show_model --full_definition
self.assertEqual(tree.root.condition.feature.name, "capital_gain")
all_trees = inspector.extract_all_trees()
self.assertLen(all_trees, inspector.num_trees())
self.assertEqual(all_trees[1].root.condition.feature.name, "capital_gain")
tensorboard_logs = os.path.join(tmp_path(), "tensorboard_logs")
inspector.export_to_tensorboard(tensorboard_logs)
logging.info("@@@ tensorboard_logs: %s", tensorboard_logs)
def test_regression_random_forest(self):
model_path = os.path.join(test_model_directory(), "abalone_regression_rf")
# dataset_path = os.path.join(test_dataset_directory(), "abalone.csv")
inspector = insp.make_inspector(model_path)
self.assertEqual(inspector.model_type(), "RANDOM_FOREST")
self.assertEqual(inspector.task, insp.Task.REGRESSION)
self.assertEqual(inspector.num_trees(), 100)
self.assertEqual(inspector.label(), SimpleColumnSpec("Rings", NUMERICAL, 8))
self.assertEqual(inspector.evaluation().num_examples, 2940)
self.assertAlmostEqual(inspector.evaluation().rmse, 2.13434, delta=0.0001)
self.assertEqual(inspector.objective(),
py_tree.objective.RegressionObjective(label="Rings"))
num_nodes = 0
for _ in inspector.iterate_on_nodes():
num_nodes += 1
self.assertEqual(num_nodes, 88494)
tree = inspector.extract_tree(tree_idx=10)
logging.info("Tree:\n%s", tree)
def test_classification_gradient_boosted_tree(self):
n = 1000
features = np.random.normal(size=[n, 3])
labels = features[:, 0] + features[:, 1] + np.random.normal(size=n) >= 0
# Early stopping will trigger before all the trees are trained.
model = keras.GradientBoostedTreesModel(num_trees=10000)
model.fit(x=features, y=labels)
inspector = model.make_inspector()
# Because of early stopping, the training logs contains the evaluation of
# more trees than what is in the final model.
self.assertGreater(inspector.training_logs()[-1].num_trees,
inspector.num_trees())
# It is very unlikely that the model contains less than 10 trees.
self.assertGreater(inspector.num_trees(), 10)
self.assertAlmostEqual(inspector.bias, -0.023836, delta=0.0001)
self.assertEqual(inspector.num_trees_per_iter, 1)
matching_log = [
log for log in inspector.training_logs()
if log.num_trees == inspector.num_trees()
]
self.assertLen(matching_log, 1)
self.assertEqual(matching_log[0].evaluation, inspector.evaluation())
@parameterized.parameters(
{
"model": "adult_binary_class_gbdt",
"dataset": "adult_test.csv",
"model_name": "GRADIENT_BOOSTED_TREES",
"task": insp.Task.CLASSIFICATION
},
{
"model": "adult_binary_class_oblique_rf",
"dataset": "adult_test.csv",
"model_name": "RANDOM_FOREST",
"task": insp.Task.CLASSIFICATION
},
{
"model": "adult_binary_class_rf_discret_numerical",
"dataset": "adult_test.csv",
"model_name": "RANDOM_FOREST",
"task": insp.Task.CLASSIFICATION
},
{
"model": "sst_binary_class_gbdt",
"dataset": "sst_binary_test.csv",
"model_name": "GRADIENT_BOOSTED_TREES",
"task": insp.Task.CLASSIFICATION
},
{
"model": "synthetic_ranking_gbdt",
"dataset": "synthetic_ranking_test.csv",
"model_name": "GRADIENT_BOOSTED_TREES",
"task": insp.Task.RANKING
},
)
def test_generic(self, model, dataset, model_name, task):
model_path = os.path.join(test_model_directory(), model)
inspector = insp.make_inspector(model_path)
self.assertEqual(inspector.model_type(), model_name)
self.assertEqual(inspector.task, task)
logging.info("Variable importances:\n%s", inspector.variable_importances())
logging.info("Evaluation:\n%s", inspector.evaluation())
logging.info("Training logs:\n%s", inspector.training_logs())
num_nodes = 0
for _ in inspector.iterate_on_nodes():
num_nodes += 1
if num_nodes > 1000:
break
tree = inspector.extract_tree(tree_idx=2)
logging.info("Tree:\n%s", tree)
tensorboard_logs = os.path.join(tmp_path(), "tensorboard_logs")
inspector.export_to_tensorboard(tensorboard_logs)
def test_proto_evaluation_to_evaluation(self):
evaluation = metric_pb2.EvaluationResults()
evaluation.count_predictions_no_weight = 10
evaluation.count_predictions = 10
confusion = evaluation.classification.confusion
confusion.nrow = 3
confusion.ncol = 3
confusion.counts[:] = [2, 1, 1, 1, 3, 1, 1, 1, 4]
confusion.sum = 15
roc_0 = evaluation.classification.rocs.add()
roc_0.auc = 0.6
roc_1 = evaluation.classification.rocs.add()
roc_1.auc = 0.8
roc_2 = evaluation.classification.rocs.add()
roc_2.auc = 0.9
self.assertEqual(
insp._proto_evaluation_to_evaluation(evaluation),
insp.Evaluation(
num_examples=10, accuracy=(2 + 3 + 4) / 15.0, aucs=[0.6, 0.8, 0.9]))
evaluation = metric_pb2.EvaluationResults()
evaluation.count_predictions_no_weight = 10
evaluation.count_predictions = 10
evaluation.loss_value = 5
evaluation.regression.sum_square_error = 10
self.assertEqual(
insp._proto_evaluation_to_evaluation(evaluation),
insp.Evaluation(num_examples=10, loss=5.0, rmse=1.0))
evaluation = metric_pb2.EvaluationResults()
evaluation.count_predictions_no_weight = 10
evaluation.count_predictions = 10
evaluation.ranking.ndcg.value = 10
self.assertEqual(
insp._proto_evaluation_to_evaluation(evaluation),
insp.Evaluation(num_examples=10, ndcg=10.0))
def test_gbt_log_entry_to_evaluation(self):
logs = gradient_boosted_trees_pb2.TrainingLogs()
logs.secondary_metric_names[:] = ["accuracy", "NDCG@5"]
logs.entries.add() # One empty entry.
entry = logs.entries.add()
entry.validation_loss = 0.1
entry.validation_secondary_metrics[:] = [0.2, 0.3]
self.assertAlmostEqual(
insp._gbt_log_entry_to_evaluation(logs, 1).loss, 0.1, delta=0.0001)
self.assertAlmostEqual(
insp._gbt_log_entry_to_evaluation(logs, 1).accuracy, 0.2, delta=0.0001)
self.assertAlmostEqual(
insp._gbt_log_entry_to_evaluation(logs, 1).ndcg, 0.3, delta=0.0001)
if __name__ == "__main__":
tf.test.main()
|
|
# Copyright 2016 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
from __future__ import print_function
import mock
import charms_openstack.test_utils as test_utils
import charm.openstack.barbican as barbican
class Helper(test_utils.PatchHelper):
def setUp(self):
super().setUp()
self.patch_release(barbican.BarbicanCharm.release)
class TestCustomProperties(Helper):
def test_validate_keystone_api_version(self):
config = mock.MagicMock()
for v in ['2', '3', 'none']:
config.keystone_api_version = v
barbican.validate_keystone_api_version(config)
# ensure that it fails
with self.assertRaises(ValueError):
config.keystone_api_version = 'fail-me'
barbican.validate_keystone_api_version(config)
def test_barbican_api_keystone_pipeline(self):
config = mock.MagicMock()
config.keystone_api_version = '2'
self.assertEqual(barbican.barbican_api_keystone_pipeline(config),
'cors keystone_authtoken context apiapp')
config.keystone_api_version = ''
self.assertEqual(barbican.barbican_api_keystone_pipeline(config),
'cors keystone_v3_authtoken context apiapp')
def test_barbican_api_pipeline(self):
config = mock.MagicMock()
config.keystone_api_version = '2'
self.assertEqual(barbican.barbican_api_pipeline(config),
'cors keystone_authtoken context apiapp')
config.keystone_api_version = '3'
self.assertEqual(barbican.barbican_api_pipeline(config),
'cors keystone_v3_authtoken context apiapp')
config.keystone_api_version = 'none'
self.assertEqual(barbican.barbican_api_pipeline(config),
'cors unauthenticated-context apiapp')
def test_barbican_api_keystone_audit_pipeline(self):
config = mock.MagicMock()
config.keystone_api_version = '2'
self.assertEqual(barbican.barbican_api_keystone_audit_pipeline(config),
'keystone_authtoken context audit apiapp')
config.keystone_api_version = ''
self.assertEqual(barbican.barbican_api_keystone_audit_pipeline(config),
'keystone_v3_authtoken context audit apiapp')
class TestHSMProperties(Helper):
def setUp(self):
super().setUp()
self.data_none = {}
self.data_set = {
'library_path': 'a-path',
'login': 'a-login',
'slot_id': 'a-slot_id',
}
def test_library_path(self):
hsm = mock.MagicMock()
hsm.relation.plugin_data = self.data_none
self.assertEqual(barbican.library_path(hsm), '')
hsm.relation.plugin_data = self.data_set
self.assertEqual(barbican.library_path(hsm), 'a-path')
def test_login(self):
hsm = mock.MagicMock()
hsm.relation.plugin_data = self.data_none
self.assertEqual(barbican.login(hsm), '')
hsm.relation.plugin_data = self.data_set
self.assertEqual(barbican.login(hsm), 'a-login')
def test_slot_id(self):
hsm = mock.MagicMock()
hsm.relation.plugin_data = self.data_none
self.assertEqual(barbican.slot_id(hsm), '')
hsm.relation.plugin_data = self.data_set
self.assertEqual(barbican.slot_id(hsm), 'a-slot_id')
class TestBarbicanCharm(Helper):
def test_action_generate_mkek(self):
hsm = mock.MagicMock()
hsm.plugin_data = {
'library_path': 'path1',
'login': '1234',
'slot_id': 'slot1'
}
self.patch_object(barbican.hookenv, 'config')
config = {
'mkek-key-length': 5,
'label-mkek': 'the-label'
}
def cf(key=None):
if key is not None:
return config[key]
return config
self.config.side_effect = cf
self.patch_object(barbican.subprocess, 'check_call')
self.patch_object(barbican.hookenv, 'log')
# try generating a an mkek with no failure
c = barbican.BarbicanCharm()
c.action_generate_mkek(hsm)
cmd = [
'barbican-manage', 'hsm', 'gen_mkek',
'--library-path', 'path1',
'--passphrase', '1234',
'--slot-id', 'slot1',
'--length', '5',
'--label', 'the-label',
]
self.check_call.assert_called_once_with(cmd)
self.log.assert_called_once_with(
"barbican-mangage hsm gen_mkek succeeded")
# and check that a problem is logged if it goes wrong
def side_effect():
raise barbican.subprocess.CalledProcessError
self.check_call.side_effect = side_effect
self.log.reset_mock()
with self.assertRaises(Exception):
c.action_generate_mkek(hsm)
self.log.assert_called_once_with(
"barbican-manage hsm gen_mkek failed.")
def test_action_generate_hmac(self):
hsm = mock.MagicMock()
hsm.plugin_data = {
'library_path': 'path1',
'login': '1234',
'slot_id': 'slot1'
}
self.patch_object(barbican.hookenv, 'config')
config = {
'hmac-key-length': 5,
'label-hmac': 'the-label'
}
def cf(key=None):
if key is not None:
return config[key]
return config
self.config.side_effect = cf
self.patch_object(barbican.subprocess, 'check_call')
self.patch_object(barbican.hookenv, 'log')
# try generating a an hmac with no failure
c = barbican.BarbicanCharm()
c.action_generate_hmac(hsm)
cmd = [
'barbican-manage', 'hsm', 'gen_hmac',
'--library-path', 'path1',
'--passphrase', '1234',
'--slot-id', 'slot1',
'--length', '5',
'--label', 'the-label',
]
self.check_call.assert_called_once_with(cmd)
self.log.assert_called_once_with(
"barbican-mangage hsm gen_hmac succeeded")
# and check that a problem is logged if it goes wrong
def side_effect():
raise barbican.subprocess.CalledProcessError
self.check_call.side_effect = side_effect
self.log.reset_mock()
with self.assertRaises(Exception):
c.action_generate_hmac(hsm)
self.log.assert_called_once_with(
"barbican-manage hsm gen_hmac failed.")
|
|
#!/usr/bin/env python
"""
This program generates a pkl file containing a list of dictionaries.
Each dictionary in the list represents a condensedlet.
The dictionaries have the structure:
{'core': array of ints of core points,
'condensed': array of ints of condensed points,
'plume': array of ints of plume points,
'u_condensed': ,
'v_condensed': ,
'w_condensed': ,
'u_plume': ,
'v_plume': ,
'w_plume': }
pkl files are saved in pkl/ subdirectory indexed by time
"""
from __future__ import print_function
from __future__ import absolute_import
import numpy
from .utility_functions import index_to_zyx, expand_indexes
#-------------------
def expand_cloudlet(cloudlet, indexes, MC):
"""Given an array of indexes composing a cloudlet and a boolean mask
array indicating if each model index may be expanded into (True) or
not (False), expand the cloudlet into the permissable indicies that
are find all indicies adjacent to the cloudlet.
Returns an array of the indicies composing the expanded cloudlet, and
an array of the remaining indicies that may be expanded into.
"""
# Expand the cloudlet indexes into their nearest neighbours
expanded_cloudlet = expand_indexes(cloudlet, MC)
# Find the mask values of the expanded indexes
mask = indexes[expanded_cloudlet]
# Select the expanded cloudlet indexes that may be expanded into
new_points = expanded_cloudlet[mask]
# Remove the indicies that have been added to the cloudlet
indexes[new_points] = False
return new_points, indexes
#---------------------
def expand_current_cloudlets(key, cloudlets, mask, MC):
cloudlet_points = []
for cloudlet in cloudlets:
cloudlet_points.append( [cloudlet[key]] )
cloudlet_expand_indexes = range(len(cloudlet_points))
while cloudlet_expand_indexes:
next_loop_cloudlet_list = []
# Go through the current list of cloudlets
for n in cloudlet_expand_indexes:
expanded_points, mask = expand_cloudlet(cloudlet_points[n][-1],
mask,
MC)
if len(expanded_points) > 0:
cloudlet_points[n].append(expanded_points)
next_loop_cloudlet_list.append(n)
cloudlet_expand_indexes = next_loop_cloudlet_list
for n, cloudlet in enumerate(cloudlet_points):
cloudlets[n][key] = numpy.hstack(cloudlet)
return cloudlets, mask
#---------------------
def make_new_cloudlets(key, mask, MC):
indexes = numpy.arange(MC['nx']*MC['ny']*MC['nz'])[mask]
cloudlets = []
for n in indexes:
if mask[n]:
mask[n] = False
cloudlet_indexes = [numpy.array((n,))]
# add_new_cloudlet
done = False
while not done:
new_indexes, mask = expand_cloudlet(cloudlet_indexes[-1], mask, MC)
if len(new_indexes) > 0:
cloudlet_indexes.append( new_indexes )
else:
# If the number of points in the cloudlet has not changed, we are done
done = True
cloudlet = {}
cloudlet[key] = numpy.hstack(cloudlet_indexes)
cloudlets.append( cloudlet )
return cloudlets
#-----------------
def find_mean_cloudlet_velocity(cloudlets,
u, v, w,
MC):
dx, dy, dz, dt = MC['dx'], MC['dy'], MC['dz'], MC['dt']
ug, vg = MC['ug'], MC['vg']
for cloudlet in cloudlets:
if len(cloudlet['condensed']) > 0:
K, J, I = index_to_zyx( cloudlet['condensed'], MC )
# find the mean motion of the cloudlet
u_mean = u[K, J, I].mean()-ug
v_mean = v[K, J, I].mean()-vg
w_mean = w[K, J, I].mean()
cloudlet['u_condensed'] = round(u_mean*dt/dx)
cloudlet['v_condensed'] = round(v_mean*dt/dy)
cloudlet['w_condensed'] = round(w_mean*dt/dz)
else:
cloudlet['u_condensed'] = 0.
cloudlet['v_condensed'] = 0.
cloudlet['w_condensed'] = 0.
K, J, I = index_to_zyx( cloudlet['plume'], MC )
# find the mean motion of the cloudlet
u_mean = u[K, J, I].mean()-ug
v_mean = v[K, J, I].mean()-vg
w_mean = w[K, J, I].mean()
cloudlet['u_plume'] = round(u_mean*dt/dx)
cloudlet['v_plume'] = round(v_mean*dt/dy)
cloudlet['w_plume'] = round(w_mean*dt/dz)
return cloudlets
#----------------------------
def generate_cloudlets(core, condensed, plume, u, v, w, MC):
# find the indexes of all the core and plume points
core = core.flatten()
condensed = condensed.flatten()
plume = plume.flatten()
plume[condensed] = False
condensed[core] = False
# Create the list that will hold the cloudlets
cloudlets = make_new_cloudlets('core', core, MC)
for cloudlet in cloudlets:
cloudlet['condensed'] = cloudlet['core'][:]
ncore = len(cloudlets)
print("\t%d core cloudlets" % ncore)
cloudlets, condensed = expand_current_cloudlets('condensed',
cloudlets,
condensed,
MC)
# Add any remaining points that have not been added to cloudlets
# as new cloudlets.
condensed_cloudlets = make_new_cloudlets('condensed', condensed, MC)
for cloudlet in condensed_cloudlets:
cloudlet['core'] = numpy.array([], dtype=numpy.int)
cloudlets.append(cloudlet)
for cloudlet in cloudlets:
cloudlet['plume'] = cloudlet['condensed'][:]
ncondensed = len(cloudlets)
print("\t%d condensed cloudlets" % (ncondensed-ncore))
cloudlets, plume = expand_current_cloudlets('plume',
cloudlets,
plume,
MC)
# Add any remaining points that have not been added to cloudlets
# as new cloudlets.
plume_cloudlets = make_new_cloudlets('plume', plume, MC)
for cloudlet in plume_cloudlets:
cloudlet['core'] = numpy.array([], dtype=numpy.int)
cloudlet['condensed'] = numpy.array([], dtype=numpy.int)
cloudlets.append(cloudlet)
nplume = len(cloudlets)
print("\t%d plume cloudlets" % (nplume-ncondensed))
cloudlets = find_mean_cloudlet_velocity(cloudlets,
u, v, w,
MC)
return cloudlets
if __name__ == "__main__":
import doctest
doctest.testmod()
|
|
# Copyright 2013 IBM Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from oslo.config import cfg
from oslotest import mockpatch
import testtools
from tempest import config
from tempest import exceptions
from tempest import test
from tempest.tests import base
from tempest.tests import fake_config
class BaseDecoratorsTest(base.TestCase):
def setUp(self):
super(BaseDecoratorsTest, self).setUp()
self.config_fixture = self.useFixture(fake_config.ConfigFixture())
self.stubs.Set(config, 'TempestConfigPrivate', fake_config.FakePrivate)
class TestAttrDecorator(BaseDecoratorsTest):
def _test_attr_helper(self, expected_attrs, **decorator_args):
@test.attr(**decorator_args)
def foo():
pass
# By our test.attr decorator the attribute __testtools_attrs will be
# set only for 'type' argument, so we test it first.
if 'type' in decorator_args:
# this is what testtools sets
self.assertEqual(getattr(foo, '__testtools_attrs'),
set(expected_attrs))
def test_attr_without_type(self):
self._test_attr_helper(expected_attrs='baz', bar='baz')
def test_attr_decorator_with_smoke_type(self):
# smoke passed as type, so smoke and gate must have been set.
self._test_attr_helper(expected_attrs=['smoke', 'gate'], type='smoke')
def test_attr_decorator_with_list_type(self):
# if type is 'smoke' we'll get the original list of types plus 'gate'
self._test_attr_helper(expected_attrs=['smoke', 'foo', 'gate'],
type=['smoke', 'foo'])
def test_attr_decorator_with_unknown_type(self):
self._test_attr_helper(expected_attrs=['foo'], type='foo')
def test_attr_decorator_with_duplicated_type(self):
self._test_attr_helper(expected_attrs=['foo'], type=['foo', 'foo'])
class TestServicesDecorator(BaseDecoratorsTest):
def _test_services_helper(self, *decorator_args):
class TestFoo(test.BaseTestCase):
@test.services(*decorator_args)
def test_bar(self):
return 0
t = TestFoo('test_bar')
self.assertEqual(set(decorator_args), getattr(t.test_bar,
'__testtools_attrs'))
self.assertEqual(t.test_bar(), 0)
def test_services_decorator_with_single_service(self):
self._test_services_helper('compute')
def test_services_decorator_with_multiple_services(self):
self._test_services_helper('compute', 'network')
def test_services_decorator_with_duplicated_service(self):
self._test_services_helper('compute', 'compute')
def test_services_decorator_with_invalid_service(self):
self.assertRaises(exceptions.InvalidServiceTag,
self._test_services_helper, 'compute',
'bad_service')
def test_services_decorator_with_service_valid_and_unavailable(self):
self.useFixture(mockpatch.PatchObject(test.CONF.service_available,
'cinder', False))
self.assertRaises(testtools.TestCase.skipException,
self._test_services_helper, 'compute',
'volume')
def test_services_list(self):
service_list = test.get_service_list()
for service in service_list:
try:
self._test_services_helper(service)
except exceptions.InvalidServiceTag:
self.fail('%s is not listed in the valid service tag list'
% service)
except KeyError:
# NOTE(mtreinish): This condition is to test for a entry in
# the outer decorator list but not in the service_list dict.
# However, because we're looping over the service_list dict
# it's unlikely we'll trigger this. So manual review is still
# need for the list in the outer decorator.
self.fail('%s is in the list of valid service tags but there '
'is no corresponding entry in the dict returned from'
' get_service_list()' % service)
except testtools.TestCase.skipException:
# Test didn't raise an exception because of an incorrect list
# entry so move onto the next entry
continue
class TestStressDecorator(BaseDecoratorsTest):
def _test_stresstest_helper(self, expected_frequency='process',
expected_inheritance=False,
**decorator_args):
@test.stresstest(**decorator_args)
def foo():
pass
self.assertEqual(getattr(foo, 'st_class_setup_per'),
expected_frequency)
self.assertEqual(getattr(foo, 'st_allow_inheritance'),
expected_inheritance)
self.assertEqual(set(['stress']), getattr(foo, '__testtools_attrs'))
def test_stresstest_decorator_default(self):
self._test_stresstest_helper()
def test_stresstest_decorator_class_setup_frequency(self):
self._test_stresstest_helper('process', class_setup_per='process')
def test_stresstest_decorator_class_setup_frequency_non_default(self):
self._test_stresstest_helper(expected_frequency='application',
class_setup_per='application')
def test_stresstest_decorator_set_frequency_and_inheritance(self):
self._test_stresstest_helper(expected_frequency='application',
expected_inheritance=True,
class_setup_per='application',
allow_inheritance=True)
class TestSkipBecauseDecorator(BaseDecoratorsTest):
def _test_skip_because_helper(self, expected_to_skip=True,
**decorator_args):
class TestFoo(test.BaseTestCase):
_interface = 'json'
@test.skip_because(**decorator_args)
def test_bar(self):
return 0
t = TestFoo('test_bar')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException, t.test_bar)
else:
# assert that test_bar returned 0
self.assertEqual(TestFoo('test_bar').test_bar(), 0)
def test_skip_because_bug(self):
self._test_skip_because_helper(bug='12345')
def test_skip_because_bug_and_interface_match(self):
self._test_skip_because_helper(bug='12346', interface='json')
def test_skip_because_bug_interface_not_match(self):
self._test_skip_because_helper(expected_to_skip=False,
bug='12347', interface='xml')
def test_skip_because_bug_and_condition_true(self):
self._test_skip_because_helper(bug='12348', condition=True)
def test_skip_because_bug_and_condition_false(self):
self._test_skip_because_helper(expected_to_skip=False,
bug='12349', condition=False)
def test_skip_because_bug_condition_false_and_interface_match(self):
"""
Assure that only condition will be evaluated if both parameters are
passed.
"""
self._test_skip_because_helper(expected_to_skip=False,
bug='12350', condition=False,
interface='json')
def test_skip_because_bug_condition_true_and_interface_not_match(self):
"""
Assure that only condition will be evaluated if both parameters are
passed.
"""
self._test_skip_because_helper(bug='12351', condition=True,
interface='xml')
def test_skip_because_bug_without_bug_never_skips(self):
"""Never skip without a bug parameter."""
self._test_skip_because_helper(expected_to_skip=False,
condition=True)
self._test_skip_because_helper(expected_to_skip=False,
interface='json')
def test_skip_because_invalid_bug_number(self):
"""Raise ValueError if with an invalid bug number"""
self.assertRaises(ValueError, self._test_skip_because_helper,
bug='critical_bug')
class TestRequiresExtDecorator(BaseDecoratorsTest):
def setUp(self):
super(TestRequiresExtDecorator, self).setUp()
cfg.CONF.set_default('api_extensions', ['enabled_ext', 'another_ext'],
'compute-feature-enabled')
def _test_requires_ext_helper(self, expected_to_skip=True,
**decorator_args):
class TestFoo(test.BaseTestCase):
@test.requires_ext(**decorator_args)
def test_bar(self):
return 0
t = TestFoo('test_bar')
if expected_to_skip:
self.assertRaises(testtools.TestCase.skipException, t.test_bar)
else:
self.assertEqual(t.test_bar(), 0)
def test_requires_ext_decorator(self):
self._test_requires_ext_helper(expected_to_skip=False,
extension='enabled_ext',
service='compute')
def test_requires_ext_decorator_disabled_ext(self):
self._test_requires_ext_helper(extension='disabled_ext',
service='compute')
def test_requires_ext_decorator_with_all_ext_enabled(self):
# disable fixture so the default (all) is used.
self.config_fixture.cleanUp()
self._test_requires_ext_helper(expected_to_skip=False,
extension='random_ext',
service='compute')
def test_requires_ext_decorator_bad_service(self):
self.assertRaises(KeyError,
self._test_requires_ext_helper,
extension='enabled_ext',
service='bad_service')
class TestSimpleNegativeDecorator(BaseDecoratorsTest):
@test.SimpleNegativeAutoTest
class FakeNegativeJSONTest(test.NegativeAutoTest):
_schema = {}
def test_testfunc_exist(self):
self.assertIn("test_fake_negative", dir(self.FakeNegativeJSONTest))
@mock.patch('tempest.test.NegativeAutoTest.execute')
def test_testfunc_calls_execute(self, mock):
obj = self.FakeNegativeJSONTest("test_fake_negative")
self.assertIn("test_fake_negative", dir(obj))
obj.test_fake_negative()
mock.assert_called_once_with(self.FakeNegativeJSONTest._schema)
|
|
"""ISO 8601 date time string parsing
Basic usage:
>>> import iso8601
>>> iso8601.parse_date("2007-01-25T12:00:00Z")
datetime.datetime(2007, 1, 25, 12, 0, tzinfo=<iso8601.iso8601.Utc ...>)
>>>
Copyright (c) 2007 - 2013 Michael Twomey
Permission is hereby granted, free of charge, to any person obtaining a
copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be included
in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS
OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT.
IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY
CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT,
TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE
SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from datetime import (
datetime,
timedelta,
tzinfo
)
from decimal import Decimal
import logging
import sys
import re
__all__ = ["parse_date", "ParseError"]
LOG = logging.getLogger(__name__)
if sys.version_info >= (3, 0, 0):
_basestring = str
else:
_basestring = basestring
# Adapted from http://delete.me.uk/2005/03/iso8601.html
ISO8601_REGEX = re.compile(
r"""
(?P<year>[0-9]{4})
(-{0,1}(?P<month>[0-9]{1,2})){1}
(-{0,1}(?P<day>[0-9]{1,2})){1}
(
(?P<separator>[ T])
(?P<hour>[0-9]{2})
(:{0,1}(?P<minute>[0-9]{2})){0,1}
(
:{0,1}(?P<second>[0-9]{1,2})
(\.(?P<second_fraction>[0-9]+)){0,1}
){0,1}
(?P<timezone>
Z
|
(
(?P<tz_sign>[-+])
(?P<tz_hour>[0-9]{2})
:{0,1}
(?P<tz_minute>[0-9]{2}){0,1}
)
){0,1}
){0,1}
""",
re.VERBOSE
)
class ParseError(Exception):
"""Raised when there is a problem parsing a date string"""
# Yoinked from python docs
ZERO = timedelta(0)
class Utc(tzinfo):
"""UTC
"""
def utcoffset(self, dt):
return ZERO
def tzname(self, dt):
return "UTC"
def dst(self, dt):
return ZERO
UTC = Utc()
class FixedOffset(tzinfo):
"""Fixed offset in hours and minutes from UTC
"""
def __init__(self, offset_hours, offset_minutes, name):
self.__offset_hours = offset_hours # Keep for later __getinitargs__
self.__offset_minutes = offset_minutes # Keep for later __getinitargs__
self.__offset = timedelta(hours=offset_hours, minutes=offset_minutes)
self.__name = name
def __eq__(self, other):
if isinstance(other, FixedOffset):
return (
(other.__offset == self.__offset)
and
(other.__name == self.__name)
)
if isinstance(other, tzinfo):
return other == self
return False
def __getinitargs__(self):
return (self.__offset_hours, self.__offset_minutes, self.__name)
def utcoffset(self, dt):
return self.__offset
def tzname(self, dt):
return self.__name
def dst(self, dt):
return ZERO
def __repr__(self):
return "<FixedOffset %r %r>" % (self.__name, self.__offset)
def to_int(d, key, default_to_zero=False, default=None):
"""Pull a value from the dict and convert to int
:param default_to_zero: If the value is None or empty, treat it as zero
:param default: If the value is missing in the dict use this default
"""
value = d.get(key, default)
LOG.debug("Got %r for %r with default %r", value, key, default)
if (value in ["", None]) and default_to_zero:
return 0
if value is None:
raise ParseError("Unable to read %s from %s" % (key, d))
return int(value)
def parse_timezone(matches, default_timezone=UTC):
"""Parses ISO 8601 time zone specs into tzinfo offsets
"""
if matches["timezone"] == "Z":
return UTC
# This isn't strictly correct, but it's common to encounter dates without
# timezones so I'll assume the default (which defaults to UTC).
# Addresses issue 4.
if matches["timezone"] is None:
return default_timezone
sign = matches["tz_sign"]
hours = to_int(matches, "tz_hour")
minutes = to_int(matches, "tz_minute", default_to_zero=True)
description = "%s%02d:%02d" % (sign, hours, minutes)
if sign == "-":
hours = -hours
minutes = -minutes
return FixedOffset(hours, minutes, description)
def parse_date(datestring, default_timezone=UTC):
"""Parses ISO 8601 dates into datetime objects
The timezone is parsed from the date string. However it is quite common to
have dates without a timezone (not strictly correct). In this case the
default timezone specified in default_timezone is used. This is UTC by
default.
"""
if not isinstance(datestring, _basestring):
raise ParseError("Expecting a string %r" % datestring)
m = ISO8601_REGEX.match(datestring)
if not m:
raise ParseError("Unable to parse date string %r" % datestring)
groups = m.groupdict()
LOG.debug("Parsed %s into %s with default timezone %s", datestring, groups, default_timezone)
tz = parse_timezone(groups, default_timezone=default_timezone)
groups["second_fraction"] = int(Decimal("0.%s" % (groups["second_fraction"] or 0)) * Decimal("1000000.0"))
try:
return datetime(
year=to_int(groups, "year"),
month=to_int(groups, "month"),
day=to_int(groups, "day"),
hour=to_int(groups, "hour", default_to_zero=True),
minute=to_int(groups, "minute", default_to_zero=True),
second=to_int(groups, "second", default_to_zero=True),
microsecond=groups["second_fraction"],
tzinfo=tz,
)
except Exception as e:
raise ParseError(e)
|
|
from __future__ import unicode_literals
from django.contrib import messages
from django.contrib.auth.decorators import permission_required
from django.contrib.auth.mixins import PermissionRequiredMixin
from django.db import transaction
from django.db.models import Count
from django.shortcuts import get_object_or_404, redirect, render
from django.urls import reverse
from django.views.generic import View
from extras.models import Graph, GRAPH_TYPE_PROVIDER
from utilities.forms import ConfirmationForm
from utilities.views import (
BulkDeleteView, BulkEditView, BulkImportView, ObjectDeleteView, ObjectEditView, ObjectListView,
)
from . import filters, forms, tables
from .models import Circuit, CircuitTermination, CircuitType, Provider, TERM_SIDE_A, TERM_SIDE_Z
#
# Providers
#
class ProviderListView(ObjectListView):
queryset = Provider.objects.annotate(count_circuits=Count('circuits'))
filter = filters.ProviderFilter
filter_form = forms.ProviderFilterForm
table = tables.ProviderDetailTable
template_name = 'circuits/provider_list.html'
class ProviderView(View):
def get(self, request, slug):
provider = get_object_or_404(Provider, slug=slug)
circuits = Circuit.objects.filter(provider=provider).select_related(
'type', 'tenant'
).prefetch_related(
'terminations__site'
)
show_graphs = Graph.objects.filter(type=GRAPH_TYPE_PROVIDER).exists()
return render(request, 'circuits/provider.html', {
'provider': provider,
'circuits': circuits,
'show_graphs': show_graphs,
})
class ProviderCreateView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'circuits.add_provider'
model = Provider
form_class = forms.ProviderForm
template_name = 'circuits/provider_edit.html'
default_return_url = 'circuits:provider_list'
class ProviderEditView(ProviderCreateView):
permission_required = 'circuits.change_provider'
class ProviderDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'circuits.delete_provider'
model = Provider
default_return_url = 'circuits:provider_list'
class ProviderBulkImportView(PermissionRequiredMixin, BulkImportView):
permission_required = 'circuits.add_provider'
model_form = forms.ProviderCSVForm
table = tables.ProviderTable
default_return_url = 'circuits:provider_list'
class ProviderBulkEditView(PermissionRequiredMixin, BulkEditView):
permission_required = 'circuits.change_provider'
cls = Provider
filter = filters.ProviderFilter
table = tables.ProviderTable
form = forms.ProviderBulkEditForm
default_return_url = 'circuits:provider_list'
class ProviderBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'circuits.delete_provider'
cls = Provider
filter = filters.ProviderFilter
table = tables.ProviderTable
default_return_url = 'circuits:provider_list'
#
# Circuit Types
#
class CircuitTypeListView(ObjectListView):
queryset = CircuitType.objects.annotate(circuit_count=Count('circuits'))
table = tables.CircuitTypeTable
template_name = 'circuits/circuittype_list.html'
class CircuitTypeCreateView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'circuits.add_circuittype'
model = CircuitType
form_class = forms.CircuitTypeForm
def get_return_url(self, request, obj):
return reverse('circuits:circuittype_list')
class CircuitTypeEditView(CircuitTypeCreateView):
permission_required = 'circuits.change_circuittype'
class CircuitTypeBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'circuits.delete_circuittype'
cls = CircuitType
queryset = CircuitType.objects.annotate(circuit_count=Count('circuits'))
table = tables.CircuitTypeTable
default_return_url = 'circuits:circuittype_list'
#
# Circuits
#
class CircuitListView(ObjectListView):
queryset = Circuit.objects.select_related('provider', 'type', 'tenant').prefetch_related('terminations__site')
filter = filters.CircuitFilter
filter_form = forms.CircuitFilterForm
table = tables.CircuitTable
template_name = 'circuits/circuit_list.html'
class CircuitView(View):
def get(self, request, pk):
circuit = get_object_or_404(Circuit.objects.select_related('provider', 'type', 'tenant__group'), pk=pk)
termination_a = CircuitTermination.objects.select_related(
'site__region', 'interface__device'
).filter(
circuit=circuit, term_side=TERM_SIDE_A
).first()
termination_z = CircuitTermination.objects.select_related(
'site__region', 'interface__device'
).filter(
circuit=circuit, term_side=TERM_SIDE_Z
).first()
return render(request, 'circuits/circuit.html', {
'circuit': circuit,
'termination_a': termination_a,
'termination_z': termination_z,
})
class CircuitCreateView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'circuits.add_circuit'
model = Circuit
form_class = forms.CircuitForm
template_name = 'circuits/circuit_edit.html'
default_return_url = 'circuits:circuit_list'
class CircuitEditView(CircuitCreateView):
permission_required = 'circuits.change_circuit'
class CircuitDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'circuits.delete_circuit'
model = Circuit
default_return_url = 'circuits:circuit_list'
class CircuitBulkImportView(PermissionRequiredMixin, BulkImportView):
permission_required = 'circuits.add_circuit'
model_form = forms.CircuitCSVForm
table = tables.CircuitTable
default_return_url = 'circuits:circuit_list'
class CircuitBulkEditView(PermissionRequiredMixin, BulkEditView):
permission_required = 'circuits.change_circuit'
cls = Circuit
queryset = Circuit.objects.select_related('provider', 'type', 'tenant').prefetch_related('terminations__site')
filter = filters.CircuitFilter
table = tables.CircuitTable
form = forms.CircuitBulkEditForm
default_return_url = 'circuits:circuit_list'
class CircuitBulkDeleteView(PermissionRequiredMixin, BulkDeleteView):
permission_required = 'circuits.delete_circuit'
cls = Circuit
queryset = Circuit.objects.select_related('provider', 'type', 'tenant').prefetch_related('terminations__site')
filter = filters.CircuitFilter
table = tables.CircuitTable
default_return_url = 'circuits:circuit_list'
@permission_required('circuits.change_circuittermination')
def circuit_terminations_swap(request, pk):
circuit = get_object_or_404(Circuit, pk=pk)
termination_a = CircuitTermination.objects.filter(circuit=circuit, term_side=TERM_SIDE_A).first()
termination_z = CircuitTermination.objects.filter(circuit=circuit, term_side=TERM_SIDE_Z).first()
if not termination_a and not termination_z:
messages.error(request, "No terminations have been defined for circuit {}.".format(circuit))
return redirect('circuits:circuit', pk=circuit.pk)
if request.method == 'POST':
form = ConfirmationForm(request.POST)
if form.is_valid():
if termination_a and termination_z:
# Use a placeholder to avoid an IntegrityError on the (circuit, term_side) unique constraint
with transaction.atomic():
termination_a.term_side = '_'
termination_a.save()
termination_z.term_side = 'A'
termination_z.save()
termination_a.term_side = 'Z'
termination_a.save()
elif termination_a:
termination_a.term_side = 'Z'
termination_a.save()
else:
termination_z.term_side = 'A'
termination_z.save()
messages.success(request, "Swapped terminations for circuit {}.".format(circuit))
return redirect('circuits:circuit', pk=circuit.pk)
else:
form = ConfirmationForm()
return render(request, 'circuits/circuit_terminations_swap.html', {
'circuit': circuit,
'termination_a': termination_a,
'termination_z': termination_z,
'form': form,
'panel_class': 'default',
'button_class': 'primary',
'return_url': circuit.get_absolute_url(),
})
#
# Circuit terminations
#
class CircuitTerminationCreateView(PermissionRequiredMixin, ObjectEditView):
permission_required = 'circuits.add_circuittermination'
model = CircuitTermination
form_class = forms.CircuitTerminationForm
template_name = 'circuits/circuittermination_edit.html'
def alter_obj(self, obj, request, url_args, url_kwargs):
if 'circuit' in url_kwargs:
obj.circuit = get_object_or_404(Circuit, pk=url_kwargs['circuit'])
return obj
def get_return_url(self, request, obj):
return obj.circuit.get_absolute_url()
class CircuitTerminationEditView(CircuitTerminationCreateView):
permission_required = 'circuits.change_circuittermination'
class CircuitTerminationDeleteView(PermissionRequiredMixin, ObjectDeleteView):
permission_required = 'circuits.delete_circuittermination'
model = CircuitTermination
|
|
from __future__ import absolute_import
from itertools import imap
from django.conf import settings
from dimagi.ext.couchdbkit import *
import re
from dimagi.utils.couch.database import iter_docs
from dimagi.utils.decorators.memoized import memoized
from corehq.apps.cachehq.mixins import QuickCachedDocumentMixin
from corehq.apps.users.models import CouchUser, CommCareUser
from dimagi.utils.couch.undo import UndoableDocument, DeleteDocRecord, DELETED_SUFFIX
from datetime import datetime
from corehq.apps.groups.dbaccessors import (
get_group_ids_by_domain,
group_by_domain,
refresh_group_views,
stale_group_by_name,
)
from corehq.apps.locations.models import SQLLocation
from corehq.apps.groups.exceptions import CantSaveException
from corehq.util.quickcache import quickcache
dt_no_Z_re = re.compile('^\d\d\d\d-\d\d-\d\dT\d\d:\d\d:\d\d(\.\d\d\d\d\d\d)?$')
class Group(QuickCachedDocumentMixin, UndoableDocument):
"""
The main use case for these 'groups' of users is currently
so that we can break down reports by arbitrary regions.
(Things like who sees what reports are determined by permissions.)
"""
domain = StringProperty()
name = StringProperty()
# a list of user ids for users
users = ListProperty()
path = ListProperty()
case_sharing = BooleanProperty()
reporting = BooleanProperty(default=True)
last_modified = DateTimeProperty()
# custom data can live here
metadata = DictProperty()
@classmethod
def wrap(cls, data):
last_modified = data.get('last_modified')
# if it's missing a Z because of the Aug. 2014 migration
# that added this in iso_format() without Z, then add a Z
if last_modified and dt_no_Z_re.match(last_modified):
data['last_modified'] += 'Z'
return super(Group, cls).wrap(data)
def save(self, *args, **kwargs):
self.last_modified = datetime.utcnow()
super(Group, self).save(*args, **kwargs)
refresh_group_views()
@classmethod
def save_docs(cls, docs, use_uuids=True, all_or_nothing=False):
utcnow = datetime.utcnow()
for doc in docs:
doc['last_modified'] = utcnow
super(Group, cls).save_docs(docs, use_uuids, all_or_nothing)
refresh_group_views()
bulk_save = save_docs
def delete(self):
super(Group, self).delete()
refresh_group_views()
@classmethod
def delete_docs(cls, docs, **params):
super(Group, cls).delete_docs(docs, **params)
refresh_group_views()
bulk_delete = delete_docs
def clear_caches(self):
super(Group, self).clear_caches()
self.by_domain.clear(self.__class__, self.domain)
self.ids_by_domain.clear(self.__class__, self.domain)
def add_user(self, couch_user_id, save=True):
if not isinstance(couch_user_id, basestring):
couch_user_id = couch_user_id.user_id
if couch_user_id not in self.users:
self.users.append(couch_user_id)
if save:
self.save()
def remove_user(self, couch_user_id, save=True):
if not isinstance(couch_user_id, basestring):
couch_user_id = couch_user_id.user_id
if couch_user_id in self.users:
for i in range(0,len(self.users)):
if self.users[i] == couch_user_id:
del self.users[i]
if save:
self.save()
return
def add_group(self, group):
group.add_to_group(self)
def add_to_group(self, group):
"""
food = Food(path=[food_id])
fruit = Fruit(path=[fruit_id])
If fruit.add_to_group(food._id):
then update fruit.path to be [food_id, fruit_id]
"""
group_id = group._id
if group_id in self.path:
raise Exception("Group %s is already a member of %s" % (
self.get_id,
group_id,
))
new_path = [group_id]
new_path.extend(self.path)
self.path = new_path
self.save()
def remove_group(self, group):
group.remove_from_group(self)
def remove_from_group(self, group):
"""
food = Food(path=[food_id])
fruit = Fruit(path=[food_id, fruit_id])
If fruit.remove_from_group(food._id):
then update fruit.path to be [fruit_id]
"""
group_id = group._id
if group_id not in self.path:
raise Exception("Group %s is not a member of %s" % (
self.get_id,
group_id
))
index = 0
for i in range(0,len(self.path)):
if self.path[i] == group_id:
index = i
break
self.path = self.path[index:]
self.save()
def get_user_ids(self, is_active=True):
return [user.user_id for user in self.get_users(is_active=is_active)]
@memoized
def get_users(self, is_active=True, only_commcare=False):
def is_relevant_user(user):
if user.is_deleted():
return False
if only_commcare and user.__class__ != CommCareUser().__class__:
return False
if is_active and not user.is_active:
return False
return True
users = imap(CouchUser.wrap_correctly, iter_docs(self.get_db(), self.users))
return filter(is_relevant_user, users)
@memoized
def get_static_user_ids(self, is_active=True):
return [user.user_id for user in self.get_static_users(is_active)]
@memoized
def get_static_users(self, is_active=True):
return self.get_users(is_active)
@classmethod
@quickcache(['cls.__name__', 'domain'])
def by_domain(cls, domain):
return group_by_domain(domain)
@classmethod
def choices_by_domain(cls, domain):
group_ids = cls.ids_by_domain(domain)
group_choices = []
for group_doc in iter_docs(cls.get_db(), group_ids):
group_choices.append((group_doc['_id'], group_doc['name']))
return group_choices
@classmethod
@quickcache(['cls.__name__', 'domain'])
def ids_by_domain(cls, domain):
return get_group_ids_by_domain(domain)
@classmethod
def by_name(cls, domain, name, one=True):
result = stale_group_by_name(domain, name)
if one and result:
return result[0]
else:
return result
@classmethod
def by_user(cls, user_or_user_id, wrap=True, include_names=False):
try:
user_id = user_or_user_id.user_id
except AttributeError:
user_id = user_or_user_id
results = cls.view('groups/by_user', key=user_id, include_docs=wrap)
if wrap:
return results
if include_names:
return [dict(group_id=r['id'], name=r['value'][1]) for r in results]
else:
return [r['id'] for r in results]
@classmethod
def get_case_sharing_groups(cls, domain, wrap=True):
all_groups = cls.by_domain(domain)
if wrap:
groups = [group for group in all_groups if group.case_sharing]
groups.extend([
location.case_sharing_group_object() for location in
SQLLocation.objects.filter(domain=domain,
location_type__shares_cases=True)
])
return groups
else:
return [group._id for group in all_groups if group.case_sharing]
@classmethod
def get_reporting_groups(cls, domain):
key = ['^Reporting', domain]
return cls.view(
'groups/by_name',
startkey=key,
endkey=key + [{}],
include_docs=True,
stale=settings.COUCH_STALE_QUERY,
).all()
def create_delete_record(self, *args, **kwargs):
return DeleteGroupRecord(*args, **kwargs)
@property
def display_name(self):
if self.name:
return self.name
else:
return "[No Name]"
@classmethod
def user_in_group(cls, user_id, group_id):
if not user_id or not group_id:
return False
c = cls.get_db().view('groups/by_user',
key=user_id,
startkey_docid=group_id,
endkey_docid=group_id
).count()
if c == 0:
return False
elif c == 1:
return True
else:
raise Exception(
"This should just logically not be possible unless the group "
"has the user in there twice"
)
def is_member_of(self, domain):
return self.domain == domain
@property
def is_deleted(self):
return self.doc_type.endswith(DELETED_SUFFIX)
def __repr__(self):
return ("Group(domain={self.domain!r}, name={self.name!r}, "
"case_sharing={self.case_sharing!r})").format(self=self)
class UnsavableGroup(Group):
def save(self, *args, **kwargs):
raise CantSaveException("Instances of UnsavableGroup cannot be saved")
class DeleteGroupRecord(DeleteDocRecord):
def get_doc(self):
return Group.get(self.doc_id)
|
|
#!/usr/bin/env python
#
# University of California, Santa Barbara
# Computer Science
#
# CS16
# This script grabs a directory from the TURNIN dir
# and it uncompresses all the work turned in by the students
# into another output directory usually assigned as an argument.
#
# Victor Fragoso <[email protected]>
# 02/11/10
# Stripped single quotes from names (i.e. "O'Conner") to keep shell cmd
# calls from bombing.
# Jasen Hall <[email protected]>
# 11/5/13
#
# Last Mod: 11/5/13
# WARNING: Absolute no WARRANTY of this script. Sorry for the inconvinience
#
import sys
import re
import os
import glob
import subprocess
#Configuration File
CONF_FILE_NAME = 'conf.txt'
#PROPERTIES in ConfFile
CLASS_HOME='class.home'
TURNIN_DIR='turnin.dir'
WORK_DIR='work.dir'
WORK_POSTFIX='work.dir.postfix'
TURNIN_FEXT='turnin.fext'
#Simple Logger Property
LOGGER_STATUS='logger.debug'
#Arguments from Command Line
TA_ID='taid'
LAB_DIR='labdir'
#Info extracted from students
LAST_SUBMIT='last_submit'
STUDENTS_NAME='students_name'
#File Handling
INPUT_PATH='input'
OUTPUT_PATH='output'
############################################
def getStudentsName( args, uid ):
finger_cmd = [r'finger', '-lm', uid]
if args[LOGGER_STATUS]:
print "[DEBUG] Invoking Command: finger -lm", uid
out = subprocess.Popen(finger_cmd, stdout=subprocess.PIPE).communicate()[0]
rexp = r'Login: .*Name: (.*).*'
r = re.compile(rexp)
m = r.match( out )
stdname = str()
if m:
stdname = m.group(1).strip().replace(' ', '_')
stdname = stdname.replace("'", ""); # quote breaks shell cmd calls
if args[LOGGER_STATUS]:
print "[DEBUG] Student's Name: ", stdname
args[STUDENTS_NAME] = stdname.strip()
else:
print "[FATAL] Finger Error "
return stdname
def getLastTurnin(listOfFiles, args):
#Get the last TURNIN
students_data = dict()
for lab in listOfFiles:
lab = lab.replace(args[INPUT_PATH]+'/', '')
lab = lab.replace(args[TURNIN_FEXT],'')
uid = lab
pos = lab.rfind("-")
num = 0
if pos != -1:
uid = lab[:pos]
num = lab[pos + 1: ]
if uid not in students_data:
students_data[uid] = dict()
students_data[uid][LAST_SUBMIT] = num
students_data[uid][STUDENTS_NAME]= getStudentsName( args, uid )
students_data[uid][LAST_SUBMIT] = max(num, students_data[uid][LAST_SUBMIT])
#Dumping Students Info
if args[LOGGER_STATUS]: print "[DEBUG] Student's Data :: ", str(students_data)
return students_data
def extractList(args):
if args[WORK_DIR] == 'nil': args[WORK_DIR] = ''
#Building Paths
inputPath = os.path.join(args[CLASS_HOME], args[TURNIN_DIR])
inputPath = os.path.join(inputPath, args[LAB_DIR] )
outputPath = os.path.join(args[CLASS_HOME], args[WORK_DIR])
outputPath = os.path.join(outputPath, args[TA_ID])
if args[WORK_POSTFIX]:
outputPath = outputPath + args[WORK_POSTFIX]
if args[LOGGER_STATUS]:
print '[DEBUG] InputPath: ', inputPath
print '[DEBUG] OutputPath: ', outputPath
args[INPUT_PATH] = inputPath
args[OUTPUT_PATH]= outputPath
filterFileStr = '*' + args[TURNIN_FEXT]
listInputPath = glob.glob( os.path.join(inputPath, filterFileStr) )
students_data = getLastTurnin( list(listInputPath), args )
return students_data
def extractInformation(stdnt_dir, uid, students_data, args):
fileName = uid
if students_data[uid][LAST_SUBMIT] != 0:
fileName += '-' + students_data[uid][LAST_SUBMIT]
fileName += args[TURNIN_FEXT]
if args[LOGGER_STATUS]:
print '[DEBUG] File Name to extract: ', fileName
fileName = os.path.join(args[INPUT_PATH], fileName)
cmd = 'cd ' + stdnt_dir
cmd += '; zcat ' + fileName + ' | tar xvBf - > /dev/null'
if args[LOGGER_STATUS]:
print '[DEBUG] Invoking : ', cmd
subprocess.Popen(cmd, shell=True).wait()
def uncompressLabs(args, students_data):
#Verifying that TA has its own directory for grading
if os.access(args[OUTPUT_PATH], os.F_OK):
if args[LOGGER_STATUS]: print "[DEBUG] Output Path Exists!"
else:
print "[INFO] Creating OutputPath ... ", args[OUTPUT_PATH]
os.mkdir(args[OUTPUT_PATH])
#Create directory for each HW
output_labdir = os.path.join(args[OUTPUT_PATH], args[LAB_DIR])
if os.access(output_labdir, os.F_OK):
print "[DEBUG] LabDir exist"
else:
print "[DEBUG] Creating Lab Directory ... ", output_labdir
os.mkdir(output_labdir)
#Uncompress
for uid in students_data:
if args[LOGGER_STATUS]:
print "[DEBUG] Uncompressing Lab for ", uid
stdnt_dir = students_data[uid][STUDENTS_NAME]
if stdnt_dir == '':
print "[WARNING] Student's Name is empty using UCSB Net Id: ", uid
stdnt_dir = uid
stdnt_dir = os.path.join(output_labdir, stdnt_dir)
#Creating Directory only if it doesn't exist
if not os.access(stdnt_dir, os.F_OK):
if args[LOGGER_STATUS]:
print "[DEBUG] Creating Directory ", stdnt_dir
os.mkdir(stdnt_dir)
#Uncompressing the Tarball
extractInformation(stdnt_dir, uid, students_data, args)
return
##
#Parse the Conf File
def parseConfFile():
confFile = open(CONF_FILE_NAME, 'r')
reg_exp = r'(.*)=(.*)'
r = re.compile(reg_exp)
##
#Dictionary containing info from conffile
conf = dict()
#Reading file
for line in confFile:
line.strip()
if line.find('#') != -1: continue #Comments
match = r.match( line )
if len( match.groups() ) == 0:
print "[ERROR] Invalid Configuration File"
if match.group(1) == CLASS_HOME:
conf[CLASS_HOME] = match.group(2)
elif match.group(1) == TURNIN_DIR:
conf[TURNIN_DIR] = match.group(2)
elif match.group(1) == WORK_DIR:
conf[WORK_DIR] = match.group(2)
elif match.group(1) == TURNIN_FEXT:
conf[TURNIN_FEXT]= match.group(2)
elif match.group(1) == WORK_POSTFIX:
if match.group(2) != 'nil':
conf[WORK_POSTFIX]= match.group(2)
else:
conf[WORK_POSTFIX]= ''
elif match.group(1) == LOGGER_STATUS:
if match.group(2) != '1':
conf[LOGGER_STATUS]= 0
else:
conf[LOGGER_STATUS]= 1
else:
print "[ERROR] Invalid Property in Conf File"
return
confFile.close()
if CLASS_HOME not in conf.keys():
print "[ERROR] Invalid configuration file"
elif TURNIN_DIR not in conf.keys():
print "[ERROR] Invalid configuration file"
elif WORK_DIR not in conf.keys():
print "[ERROR] Invalid configuration file"
elif TURNIN_FEXT not in conf.keys():
print "[ERROR] Invalid configuration file"
#Dump configuration properties
if conf[LOGGER_STATUS]:
print "[DEBUG] Configuration Properties : ", str(conf)
return conf
##
#Entry Point
def main(argv):
if len(sys.argv) != 3:
print 'UCSB :: GradeLab Script'
print '\tUsage: ./grade_labs.py <TAusername> <DirectoryToProcess>'
print '\tTAusername ::= TA\'s Identifier'
print '\tDirectoryToProcess ::= In other words lab dir to uncompress\n\n'
print 'NOTE: This script reads a CONF file where it is specified the'
print 'TURNIN directory. This conf.txt file should be in the same '
print 'directory where this script resides'
return
##
#Reading CONF file
args = parseConfFile()
if not args: return
##
#Read TURNIN/<directoryToProcess> and keep with the most updated submissions
args[TA_ID] = sys.argv[1]
args[LAB_DIR]= sys.argv[2]
##
#Extract list of files
print "[INFO] Reading turned in labs ..."
students_data = extractList(args)
##
#Processing Directory
print "[INFO] Uncompressing last submissions ..."
uncompressLabs( args, students_data )
#
# Python entrypoint
if __name__=='__main__':
main(sys.argv)
|
|
import unittest, time, sys, random, logging
sys.path.extend(['.','..','py'])
import h2o, h2o_cmd,h2o_hosts, h2o_browse as h2b, h2o_import as h2i, h2o_hosts, h2o_glm
import h2o_exec as h2e, h2o_jobs
DO_IMPORT_CHECK = True
class Basic(unittest.TestCase):
def tearDown(self):
h2o.check_sandbox_for_errors()
@classmethod
def setUpClass(cls):
pass
@classmethod
def tearDownClass(cls):
# time.sleep(3600)
h2o.tear_down_cloud()
def test_benchmark_import(self):
# typical size of the michal files
avgMichalSizeUncompressed = 237270000
avgMichalSize = 116561140
avgSynSize = 4020000
covtype200xSize = 15033863400
synSize = 183
if 1==1:
# importFolderPath = '/home/0xdiag/datasets/more1_1200_link'
# importFolderPathFull = '/home/0xdiag/datasets/manyfiles-nflx-gz'
# importFolderPath = 'more1_1200_link'
importFolderPath = 'manyfiles-nflx-gz'
print "Using .gz'ed files in", importFolderPath
# this pattern from browser correctly does 100 files, 1M rowsj
# source_key=*/home/0xdiag/datasets/manyfiles-nflx-gz/file_1[0-9][0-9].dat.gz
csvFilenameAll = [
("file_1.dat.gz", "file_1_A.dat.gz", 1 * avgMichalSize, 3600),
("file_[1-2].dat.gz", "file_2_A.dat.gz", 2 * avgMichalSize, 3600),
("file_[1-4].dat.gz", "file_4_A.dat.gz", 4 * avgMichalSize, 3600),
]
# csvFilenameList = random.sample(csvFilenameAll,1)
csvFilenameList = csvFilenameAll
# split out the pattern match and the filename used for the hex
trialMax = 1
# rebuild the cloud for each file
base_port = 54321
# can fire a parse off and go wait on the jobs queue (inspect afterwards is enough?)
DO_GLM = False
noPoll = False
# benchmarkLogging = ['cpu','disk', 'iostats', 'jstack']
# benchmarkLogging = None
benchmarkLogging = ['cpu','disk', 'network', 'iostats', 'jstack']
benchmarkLogging = ['cpu','disk', 'network', 'iostats']
# IOStatus can hang?
benchmarkLogging = ['cpu', 'disk' 'network']
pollTimeoutSecs = 180
retryDelaySecs = 10
localhost = h2o.decide_if_localhost()
if localhost:
tryHeap = 4
h2o.build_cloud(2,java_heap_GB=tryHeap, base_port=base_port, enable_benchmark_log=True)
else:
tryHeap = 28
h2o_hosts.build_cloud_with_hosts(1, java_heap_GB=tryHeap, base_port=base_port,
enable_benchmark_log=True)
for i,(csvFilepattern, csvFilename, totalBytes, timeoutSecs) in enumerate(csvFilenameList):
# pop open a browser on the cloud
### h2b.browseTheCloud()
# to avoid sticky ports?
### base_port += 2
h2o.beta_features = True
for trial in range(trialMax):
# (importResult, importPattern) = h2i.import_only(path=importFolderPath+"/*")
if DO_IMPORT_CHECK:
for i in range(2):
csvPathname = importFolderPath + "/" + csvFilepattern
(importResult, importPattern) = h2i.import_only(bucket='home-0xdiag-datasets',
path=csvPathname, schema='local', timeoutSecs=timeoutSecs)
importFullList = importResult['files']
importFailList = importResult['fails']
print "\n Problem if this is not empty: importFailList:", h2o.dump_json(importFailList)
# creates csvFilename.hex from file in importFolder dir
h2o.cloudPerfH2O.change_logfile(csvFilename)
h2o.cloudPerfH2O.message("")
h2o.cloudPerfH2O.message("Parse " + csvFilename + " Start--------------------------------")
csvPathname = importFolderPath + "/" + csvFilepattern
start = time.time()
parseResult = h2i.import_parse(bucket='home-0xdiag-datasets', path=csvPathname, schema='local',
hex_key=csvFilename + ".hex", timeoutSecs=timeoutSecs,
retryDelaySecs=retryDelaySecs,
pollTimeoutSecs=pollTimeoutSecs,
noPoll=noPoll,
benchmarkLogging=benchmarkLogging)
elapsed = time.time() - start
print "Parse#", trial, parseResult['destination_key'], "took", elapsed, "seconds",\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=360)
h2o_cmd.infoFromInspect(inspect, csvPathname)
if noPoll:
if (i+1) < len(csvFilenameList):
h2o.check_sandbox_for_errors()
(csvFilepattern, csvFilename, totalBytes2, timeoutSecs) = csvFilenameList[i+1]
# parseResult = h2i.import_parse(path=importFolderPath + "/" + csvFilepattern,
csvPathname = importFolderPathFull + "/" + csvFilepattern
start = time.time()
parseResult = h2i.import_parse(path=csvPathname,
hex_key=csvFilename + ".hex",
timeoutSecs=timeoutSecs,
retryDelaySecs=retryDelaySecs,
pollTimeoutSecs=pollTimeoutSecs,
noPoll=noPoll,
benchmarkLogging=benchmarkLogging)
elapsed = time.time() - start
print "Parse#", trial, parseResult['destination_key'], "took", elapsed, "seconds",\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=360)
h2o_cmd.infoFromInspect(inspect, csvPathname)
if (i+2) < len(csvFilenameList):
h2o.check_sandbox_for_errors()
(csvFilepattern, csvFilename, totalBytes3, timeoutSecs) = csvFilenameList[i+2]
csvPathname = importFolderPathFull + "/" + csvFilepattern
parseResult = h2i.import_parse(path=csvPathname,
hex_key=csvFilename + ".hex", timeoutSecs=timeoutSecs,
retryDelaySecs=retryDelaySecs,
pollTimeoutSecs=pollTimeoutSecs,
noPoll=noPoll,
benchmarkLogging=benchmarkLogging)
elapsed = time.time() - start
print "Parse#", trial, parseResult['destination_key'], "took", elapsed, "seconds",\
"%d pct. of timeout" % ((elapsed*100)/timeoutSecs)
inspect = h2o_cmd.runInspect(None, parseResult['destination_key'], timeoutSecs=360)
h2o_cmd.infoFromInspect(inspect, csvPathname)
# print stats on all three if noPoll
if noPoll:
# does it take a little while to show up in Jobs, from where we issued the parse?
time.sleep(2)
# FIX! use the last (biggest?) timeoutSecs? maybe should increase since parallel
h2o_jobs.pollWaitJobs(pattern=csvFilename,
timeoutSecs=timeoutSecs, benchmarkLogging=benchmarkLogging)
# for getting the MB/sec closer to 'right'
totalBytes += totalBytes2 + totalBytes3
elapsed = time.time() - start
h2o.check_sandbox_for_errors()
if totalBytes is not None:
fileMBS = (totalBytes/1e6)/elapsed
l = '{!s} jvms, {!s}GB heap, {:s} {:s} {:6.2f} MB/sec for {:.2f} secs'.format(
len(h2o.nodes), tryHeap, csvFilepattern, csvFilename, fileMBS, elapsed)
print l
h2o.cloudPerfH2O.message(l)
print "Parse result['destination_key']:", parseResult['destination_key']
# BUG here?
if not noPoll:
pass
# We should be able to see the parse result?
# h2o_cmd.check_enums_from_inspect(parseResult)
# the nflx data doesn't have a small enough # of classes in any col
# use exec to randomFilter out 200 rows for a quick RF. that should work for everyone?
origKey = parseResult['destination_key']
# execExpr = 'a = randomFilter('+origKey+',200,12345678)'
execExpr = 'a = slice('+origKey+',1,200)'
# h2e.exec_expr(h2o.nodes[0], execExpr, "a", timeoutSecs=30)
# runRF takes the parseResult directly
newParseKey = {'destination_key': 'a'}
print "\n" + csvFilepattern
# poker and the water.UDP.set3(UDP.java) fail issue..
# constrain depth to 25
print "Temporarily hacking to do nothing instead of RF on the parsed file"
### RFview = h2o_cmd.runRF(trees=1,depth=25,parseResult=newParseKey, timeoutSecs=timeoutSecs)
### h2b.browseJsonHistoryAsUrlLastMatch("RFView")
#**********************************************************************************
# Do GLM too
# Argument case error: Value 0.0 is not between 12.0 and 9987.0 (inclusive)
if DO_GLM:
# these are all the columns that are enums in the dataset...too many for GLM!
x = range(542) # don't include the output column
# remove the output too! (378)
for i in [3, 4, 5, 6, 7, 8, 9, 10, 11, 14, 16, 17, 18, 19, 20, 424, 425, 426, 540, 541, 378]:
x.remove(i)
x = ",".join(map(str,x))
GLMkwargs = {'x': x, 'y': 378, 'case': 15, 'case_mode': '>',
'max_iter': 10, 'n_folds': 1, 'alpha': 0.2, 'lambda': 1e-5}
start = time.time()
glm = h2o_cmd.runGLM(parseResult=parseResult, timeoutSecs=timeoutSecs, **GLMkwargs)
h2o_glm.simpleCheckGLM(self, glm, None, **GLMkwargs)
elapsed = time.time() - start
h2o.check_sandbox_for_errors()
l = '{:d} jvms, {:d}GB heap, {:s} {:s} GLM: {:6.2f} secs'.format(
len(h2o.nodes), tryHeap, csvFilepattern, csvFilename, elapsed)
print l
h2o.cloudPerfH2O.message(l)
#**********************************************************************************
# print "Waiting 30 secs"
# time.sleep(30)
h2o_cmd.checkKeyDistribution()
h2i.delete_keys_from_import_result(pattern=csvFilename, importResult=importResult)
h2o.nodes[0].remove_all_keys()
### time.sleep(3600)
### h2o.tear_down_cloud()
if not localhost:
print "Waiting 30 secs before building cloud again (sticky ports?)"
### time.sleep(30)
sys.stdout.write('.')
sys.stdout.flush()
if __name__ == '__main__':
h2o.unit_main()
|
|
import sys
import traceback
from browser import document as doc
from browser import window, alert, console
_credits = """ Thanks to CWI, CNRI, BeOpen.com, Zope Corporation and a cast of thousands
for supporting Python development. See www.python.org for more information."""
_copyright = """Copyright (c) 2012, Pierre Quentel [email protected]
All Rights Reserved.
Copyright (c) 2001-2013 Python Software Foundation.
All Rights Reserved.
Copyright (c) 2000 BeOpen.com.
All Rights Reserved.
Copyright (c) 1995-2001 Corporation for National Research Initiatives.
All Rights Reserved.
Copyright (c) 1991-1995 Stichting Mathematisch Centrum, Amsterdam.
All Rights Reserved."""
_license = """Copyright (c) 2012, Pierre Quentel [email protected]
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer. Redistributions in binary
form must reproduce the above copyright notice, this list of conditions and
the following disclaimer in the documentation and/or other materials provided
with the distribution.
Neither the name of the <ORGANIZATION> nor the names of its contributors may
be used to endorse or promote products derived from this software without
specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
POSSIBILITY OF SUCH DAMAGE.
"""
def credits():
print(_credits)
credits.__repr__ = lambda:_credits
def copyright():
print(_copyright)
copyright.__repr__ = lambda:_copyright
def license():
print(_license)
license.__repr__ = lambda:_license
def write(data):
doc['code'].value += str(data)
sys.stdout.write = sys.stderr.write = write
history = []
current = 0
_status = "main" # or "block" if typing inside a block
# execution namespace
editor_ns = {'credits':credits,
'copyright':copyright,
'license':license,
'__name__':'__main__'}
def cursorToEnd(*args):
pos = len(doc['code'].value)
doc['code'].setSelectionRange(pos, pos)
doc['code'].scrollTop = doc['code'].scrollHeight
def get_col(area):
# returns the column num of cursor
sel = doc['code'].selectionStart
lines = doc['code'].value.split('\n')
for line in lines[:-1]:
sel -= len(line) + 1
return sel
def myKeyPress(event):
global _status, current
if event.keyCode == 9: # tab key
event.preventDefault()
doc['code'].value += " "
elif event.keyCode == 13: # return
src = doc['code'].value
if _status == "main":
currentLine = src[src.rfind('>>>') + 4:]
elif _status == "3string":
currentLine = src[src.rfind('>>>') + 4:]
currentLine = currentLine.replace('\n... ', '\n')
else:
currentLine = src[src.rfind('...') + 4:]
if _status == 'main' and not currentLine.strip():
doc['code'].value += '\n>>> '
event.preventDefault()
return
doc['code'].value += '\n'
history.append(currentLine)
current += 1
if _status == "main" or _status == "3string":
try:
_ = editor_ns['_'] = eval(currentLine, editor_ns)
if _ is not None:
write(repr(_)+'\n')
doc['code'].value += '>>> '
_status = "main"
except IndentationError:
doc['code'].value += '... '
_status = "block"
except SyntaxError as msg:
if str(msg) == 'invalid syntax : triple string end not found' or \
str(msg).startswith('Unbalanced bracket'):
doc['code'].value += '... '
_status = "3string"
elif str(msg) == 'eval() argument must be an expression':
try:
exec(currentLine, editor_ns)
except:
traceback.print_exc()
doc['code'].value += '>>> '
_status = "main"
else:
traceback.print_exc()
doc['code'].value += '>>> '
_status = "main"
except:
traceback.print_exc()
doc['code'].value += '>>> '
_status = "main"
elif currentLine == "": # end of block
block = src[src.rfind('>>>') + 4:].splitlines()
block = [block[0]] + [b[4:] for b in block[1:]]
block_src = '\n'.join(block)
# status must be set before executing code in globals()
_status = "main"
try:
_ = exec(block_src, editor_ns)
if _ is not None:
print(repr(_))
except:
traceback.print_exc()
doc['code'].value += '>>> '
else:
doc['code'].value += '... '
cursorToEnd()
event.preventDefault()
def myKeyDown(event):
global _status, current
if event.keyCode == 37: # left arrow
sel = get_col(doc['code'])
if sel < 5:
event.preventDefault()
event.stopPropagation()
elif event.keyCode == 36: # line start
pos = doc['code'].selectionStart
col = get_col(doc['code'])
doc['code'].setSelectionRange(pos - col + 4, pos - col + 4)
event.preventDefault()
elif event.keyCode == 38: # up
if current > 0:
pos = doc['code'].selectionStart
col = get_col(doc['code'])
# remove current line
doc['code'].value = doc['code'].value[:pos - col + 4]
current -= 1
doc['code'].value += history[current]
event.preventDefault()
elif event.keyCode == 40: # down
if current < len(history) - 1:
pos = doc['code'].selectionStart
col = get_col(doc['code'])
# remove current line
doc['code'].value = doc['code'].value[:pos - col + 4]
current += 1
doc['code'].value += history[current]
event.preventDefault()
elif event.keyCode == 8: # backspace
src = doc['code'].value
lstart = src.rfind('\n')
if (lstart == -1 and len(src) < 5) or (len(src) - lstart < 6):
event.preventDefault()
event.stopPropagation()
doc['code'].bind('keypress', myKeyPress)
doc['code'].bind('keydown', myKeyDown)
doc['code'].bind('click', cursorToEnd)
v = sys.implementation.version
doc['code'].value = "Brython %s.%s.%s on %s %s\n>>> " % (
v[0], v[1], v[2], window.navigator.appName, window.navigator.appVersion)
#doc['code'].value += 'Type "copyright", "credits" or "license" for more information.'
doc['code'].focus()
cursorToEnd()
|
|
# Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""An estimator is a rule for calculating an estimate of a given quantity.
# Estimators
* **Estimators** are used to train and evaluate TensorFlow models.
They support regression and classification problems.
* **Classifiers** are functions that have discrete outcomes.
* **Regressors** are functions that predict continuous values.
## Choosing the correct estimator
* For **Regression** problems use one of the following:
* `LinearRegressor`: Uses linear model.
* `DNNRegressor`: Uses DNN.
* `DNNLinearCombinedRegressor`: Uses Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `Estimator`: Use when you need a custom model.
* For **Classification** problems use one of the following:
* `LinearClassifier`: Multiclass classifier using Linear model.
* `DNNClassifier`: Multiclass classifier using DNN.
* `DNNLinearCombinedClassifier`: Multiclass classifier using Wide & Deep.
* `TensorForestEstimator`: Uses RandomForest.
See tf.contrib.tensor_forest.client.random_forest.TensorForestEstimator.
* `SVM`: Binary classifier using linear SVMs.
* `LogisticRegressor`: Use when you need custom model for binary
classification.
* `Estimator`: Use when you need custom model for N class classification.
## Pre-canned Estimators
Pre-canned estimators are machine learning estimators premade for general
purpose problems. If you need more customization, you can always write your
own custom estimator as described in the section below.
Pre-canned estimators are tested and optimized for speed and quality.
### Define the feature columns
Here are some possible types of feature columns used as inputs to a pre-canned
estimator.
Feature columns may vary based on the estimator used. So you can see which
feature columns are fed to each estimator in the below section.
```python
sparse_feature_a = sparse_column_with_keys(
column_name="sparse_feature_a", keys=["AB", "CD", ...])
embedding_feature_a = embedding_column(
sparse_id_column=sparse_feature_a, dimension=3, combiner="sum")
sparse_feature_b = sparse_column_with_hash_bucket(
column_name="sparse_feature_b", hash_bucket_size=1000)
embedding_feature_b = embedding_column(
sparse_id_column=sparse_feature_b, dimension=16, combiner="sum")
crossed_feature_a_x_b = crossed_column(
columns=[sparse_feature_a, sparse_feature_b], hash_bucket_size=10000)
real_feature = real_valued_column("real_feature")
real_feature_buckets = bucketized_column(
source_column=real_feature,
boundaries=[18, 25, 30, 35, 40, 45, 50, 55, 60, 65])
```
### Create the pre-canned estimator
DNNClassifier, DNNRegressor, and DNNLinearCombinedClassifier are all pretty
similar to each other in how you use them. You can easily plug in an
optimizer and/or regularization to those estimators.
#### DNNClassifier
A classifier for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNClassifier(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNRegressor
A regressor for TensorFlow DNN models.
```python
my_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256])
# Or estimator using the ProximalAdagradOptimizer optimizer with
# regularization.
estimator = DNNRegressor(
feature_columns=my_features,
hidden_units=[1024, 512, 256],
optimizer=tf.train.ProximalAdagradOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### DNNLinearCombinedClassifier
A classifier for TensorFlow Linear and DNN joined training models.
* Wide and deep model
* Multi class (2 by default)
```python
my_linear_features = [crossed_feature_a_x_b]
my_deep_features = [embedding_feature_a, embedding_feature_b]
estimator = DNNLinearCombinedClassifier(
# Common settings
n_classes=n_classes,
weight_column_name=weight_column_name,
# Wide settings
linear_feature_columns=my_linear_features,
linear_optimizer=tf.train.FtrlOptimizer(...),
# Deep settings
dnn_feature_columns=my_deep_features,
dnn_hidden_units=[1000, 500, 100],
dnn_optimizer=tf.train.AdagradOptimizer(...))
```
#### LinearClassifier
Train a linear model to classify instances into one of multiple possible
classes. When number of possible classes is 2, this is binary classification.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearClassifier(
feature_columns=my_features,
optimizer=tf.train.FtrlOptimizer(
learning_rate=0.1,
l1_regularization_strength=0.001
))
```
#### LinearRegressor
Train a linear regression model to predict a label value given observation of
feature values.
```python
my_features = [sparse_feature_b, crossed_feature_a_x_b]
estimator = LinearRegressor(
feature_columns=my_features)
```
### LogisticRegressor
Logistic regression estimator for binary classification.
```python
# See tf.contrib.learn.Estimator(...) for details on model_fn structure
def my_model_fn(...):
pass
estimator = LogisticRegressor(model_fn=my_model_fn)
# Input builders
def input_fn_train:
pass
estimator.fit(input_fn=input_fn_train)
estimator.predict(x=x)
```
#### SVM - Support Vector Machine
Support Vector Machine (SVM) model for binary classification.
Currently only linear SVMs are supported.
```python
my_features = [real_feature, sparse_feature_a]
estimator = SVM(
example_id_column='example_id',
feature_columns=my_features,
l2_regularization=10.0)
```
#### DynamicRnnEstimator
An `Estimator` that uses a recurrent neural network with dynamic unrolling.
```python
problem_type = ProblemType.CLASSIFICATION # or REGRESSION
prediction_type = PredictionType.SINGLE_VALUE # or MULTIPLE_VALUE
estimator = DynamicRnnEstimator(problem_type,
prediction_type,
my_feature_columns)
```
### Use the estimator
There are two main functions for using estimators, one of which is for
training, and one of which is for evaluation.
You can specify different data sources for each one in order to use different
datasets for train and eval.
```python
# Input builders
def input_fn_train: # returns x, Y
...
estimator.fit(input_fn=input_fn_train)
def input_fn_eval: # returns x, Y
...
estimator.evaluate(input_fn=input_fn_eval)
estimator.predict(x=x)
```
## Creating Custom Estimator
To create a custom `Estimator`, provide a function to `Estimator`'s
constructor that builds your model (`model_fn`, below):
```python
estimator = tf.contrib.learn.Estimator(
model_fn=model_fn,
model_dir=model_dir) # Where the model's data (e.g., checkpoints)
# are saved.
```
Here is a skeleton of this function, with descriptions of its arguments and
return values in the accompanying tables:
```python
def model_fn(features, targets, mode, params):
# Logic to do the following:
# 1. Configure the model via TensorFlow operations
# 2. Define the loss function for training/evaluation
# 3. Define the training operation/optimizer
# 4. Generate predictions
return predictions, loss, train_op
```
You may use `mode` and check against
`tf.contrib.learn.ModeKeys.{TRAIN, EVAL, INFER}` to parameterize `model_fn`.
In the Further Reading section below, there is an end-to-end TensorFlow
tutorial for building a custom estimator.
## Additional Estimators
There is an additional estimators under
`tensorflow.contrib.factorization.python.ops`:
* Gaussian mixture model (GMM) clustering
## Further reading
For further reading, there are several tutorials with relevant topics,
including:
* [Overview of linear models](../../../tutorials/linear/overview.md)
* [Linear model tutorial](../../../tutorials/wide/index.md)
* [Wide and deep learning tutorial](../../../tutorials/wide_and_deep/index.md)
* [Custom estimator tutorial](../../../tutorials/estimators/index.md)
* [Building input functions](../../../tutorials/input_fn/index.md)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from tensorflow.contrib.learn.python.learn.estimators._sklearn import NotFittedError
from tensorflow.contrib.learn.python.learn.estimators.constants import ProblemType
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn import DNNRegressor
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedClassifier
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedEstimator
from tensorflow.contrib.learn.python.learn.estimators.dnn_linear_combined import DNNLinearCombinedRegressor
from tensorflow.contrib.learn.python.learn.estimators.dynamic_rnn_estimator import DynamicRnnEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import BaseEstimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import Estimator
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input
from tensorflow.contrib.learn.python.learn.estimators.estimator import infer_real_valued_columns_from_input_fn
from tensorflow.contrib.learn.python.learn.estimators.estimator import SKCompat
from tensorflow.contrib.learn.python.learn.estimators.head import binary_svm_head
from tensorflow.contrib.learn.python.learn.estimators.head import Head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_class_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_head
from tensorflow.contrib.learn.python.learn.estimators.head import multi_label_head
from tensorflow.contrib.learn.python.learn.estimators.head import no_op_train_fn
from tensorflow.contrib.learn.python.learn.estimators.head import poisson_regression_head
from tensorflow.contrib.learn.python.learn.estimators.head import regression_head
from tensorflow.contrib.learn.python.learn.estimators.kmeans import KMeansClustering
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearClassifier
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearEstimator
from tensorflow.contrib.learn.python.learn.estimators.linear import LinearRegressor
from tensorflow.contrib.learn.python.learn.estimators.logistic_regressor import LogisticRegressor
from tensorflow.contrib.learn.python.learn.estimators.metric_key import MetricKey
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModeKeys
from tensorflow.contrib.learn.python.learn.estimators.model_fn import ModelFnOps
from tensorflow.contrib.learn.python.learn.estimators.prediction_key import PredictionKey
from tensorflow.contrib.learn.python.learn.estimators.run_config import ClusterConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import Environment
from tensorflow.contrib.learn.python.learn.estimators.run_config import RunConfig
from tensorflow.contrib.learn.python.learn.estimators.run_config import TaskType
from tensorflow.contrib.learn.python.learn.estimators.svm import SVM
|
|
import sys,pdb
from Board import Board
from Tkinter import *
class Game:
# Constants
SQUARE_SIZE = 80
VER_SHIFT_MARGIN = 50
HOR_SHIFT_MARGIN = 150
COLOR_2 = '\033[91m'
COLOR_1 = '\033[93m'
COLOR_RC = '\033[96m'
COLOR_END = '\033[0m'
CELL_WIDTH = 15
class Player:
def __init__(self, flats, capstones):
self.flats = flats
self.capstones = capstones
def __init__(self, n, mode):
self.n = n
self.moves = 0
self.total_squares = n * n
if n == 5:
self.max_flats = 21
self.max_capstones = 1
elif n == 6:
self.max_flats = 30
self.max_capstones = 1
elif n == 7:
self.max_flats = 40
self.max_capstones = 1
else:
raise ValueError('Board size is either 5, 6 or 7.')
self.max_movable = n
self.players = []
self.players.append(Game.Player(self.max_flats, self.max_capstones))
self.players.append(Game.Player(self.max_flats, self.max_capstones))
self.board = []
for i in xrange(self.total_squares):
self.board.append([])
self.turn = 0
self.max_down = 1
self.max_up = n
self.max_left = 'a'
self.max_right = chr(ord('a') + n - 1)
self.winner = {}
self.mode = mode
def __str__(self):
'''Returns a string representation of the current
state of the game
'''
game_string = ''
game_string += 'Current turn: Player ' + str(self.turn + 1) + '\n'
game_string += 'Player 1 unplayed pieces: ' + str(self.players[0].flats) + \
'F, ' + str(self.players[0].capstones) + 'C\n'
game_string += 'Player 2 unplayed pieces: ' + str(self.players[1].flats) + \
'F, ' + str(self.players[1].capstones) + 'C\n\n'
for i in xrange(self.n-1, -1, -1):
game_string += Game.COLOR_RC + str(i+1) + Game.COLOR_END + ' '
for j in xrange(self.n):
idx = i * self.n + j
if len(self.board[idx]) == 0:
for ii in xrange(Game.CELL_WIDTH):
game_string += ' '
else:
spaces = (Game.CELL_WIDTH - len(self.board[idx])) / 2
game_string += ' ' * spaces
game_string += self.square_to_string(self.board[idx])
spaces = (Game.CELL_WIDTH - len(self.board[idx]) + 1) / 2
game_string += ' ' * spaces
game_string += '\n'
game_string += ' '
for i in xrange(self.n):
game_string += ' ' * (Game.CELL_WIDTH/2)
game_string += Game.COLOR_RC + chr(i+97) + Game.COLOR_END
game_string += ' ' * (Game.CELL_WIDTH/2)
return game_string
def square_to_string(self, square):
square_string = ''
for i in range(len(square)):
if square[i][0] == 0:
square_string += Game.COLOR_1
else:
square_string += Game.COLOR_2
square_string += square[i][1]
square_string += Game.COLOR_END
return square_string
def init_display(self):
self.display = Tk()
self.window_height = self.n * Board.SQUARE_SIZE + 2 * Board.VER_SHIFT_MARGIN
self.window_width = self.n * Board.SQUARE_SIZE + 2 * Board.HOR_SHIFT_MARGIN
self.canvas = Canvas(self.display, width = self.window_width, height = self.window_height, background = "#2c3e50")
self.canvas.pack()
self.render_board = Board(self.n, self.canvas, self.window_height, self.window_width)
def render(self):
print self.__str__()
print '\n\n'
def execute_move(self,move_string):
'''Returns
0 if move is invalid
1 if move is valid
2 if player 1 wins
3 if player 2 wins
4 if match drawn
'''
move_string = move_string.strip()
# pdb.set_trace()
if self.turn == 0:
self.moves += 1
if self.moves != 1:
current_piece = self.turn
else:
current_piece = 1 - self.turn
if len(move_string) <= 0:
return 0
if move_string[0].isalpha():
square = self.square_to_num(move_string[1:])
if square == -1:
return 0
if len(self.board[square]) != 0:
return 0
if move_string[0] == 'F' or move_string[0] == 'S':
if self.players[current_piece].flats == 0:
return 0
if self.moves == 1 and move_string[0] == 'S':
return 0
self.board[square].append((current_piece, move_string[0]))
self.players[current_piece].flats -= 1
elif move_string[0] == 'C':
if self.moves == 1:
return 0
if self.players[current_piece].capstones == 0:
return 0
self.board[square].append((current_piece, move_string[0]))
self.players[current_piece].capstones -= 1
else:
return 0
elif move_string[0].isdigit():
if self.moves <= 1:
return 0
count = int(move_string[0])
if count <= 0 or count > self.max_movable:
return 0
square = self.square_to_num(move_string[1:3])
if square == -1:
return 0
if len(self.board[square]) < count:
return 0
direction = move_string[3]
if direction == '+':
change = self.n
elif direction == '-':
change = -self.n
elif direction == '>':
change = 1
elif direction == '<':
change = -1
else:
return 0
prev_square = square
for i in xrange(4,len(move_string)):
if not move_string[i].isdigit():
return 0
next_count = int(move_string[i])
if next_count <= 0 or next_count > count:
return 0
next_square = prev_square + change
if (next_square % self.n == 0 and prev_square % self.n == self.n - 1):
return 0
if (next_square % self.n == self.n - 1 and prev_square % self.n == 0):
return 0
if next_square >= self.total_squares or next_square < 0:
return 0
if len(self.board[next_square]) != 0 and self.board[next_square][-1][1] == 'S':
if next_count != 1 or i != len(move_string) - 1:
return 0
if self.board[square][-1][1] != 'C':
return 0
if len(self.board[next_square]) != 0 and self.board[next_square][-1][1] == 'C':
return 0
count = count - next_count
prev_square = next_square
if count != 0:
return 0
count = int(move_string[0])
prev_square = square
for i in xrange(4, len(move_string)):
next_count = int(move_string[i])
next_square = prev_square + change
if (len(self.board[next_square]) > 0) and (self.board[next_square][-1][1] == 'S'):
self.board[next_square][-1] = (self.board[next_square][-1][0], 'F')
if next_count - count == 0:
self.board[next_square] += self.board[square][-count:]
else:
self.board[next_square] += self.board[square][-count:-count+next_count]
prev_square = next_square
count -= next_count
count = int(move_string[0])
self.board[square] = self.board[square][:-count]
else:
return 0
winner = -1
filled_board = all(len(sqr) > 0 for sqr in self.board)
if self.check_road_win(self.turn):
self.winner['player'] = self.turn
self.winner['type'] = 'road'
winner = 2 + self.turn
elif self.check_road_win(1 - self.turn):
self.winner['player'] = 1 - self.turn
self.winner['type'] = 'road'
winner = 3 - self.turn
elif self.players[0].flats == 0 or self.players[1].flats == 0 or filled_board:
winner = self.check_flat_win()
self.winner['player'] = winner - 2
self.winner['type'] = 'flat'
self.turn = 1 - self.turn
if self.mode == 'GUI':
self.render_board.render(self)
elif self.mode == 'CUI':
self.render()
if winner != -1:
return winner
return 1
def square_to_num(self,square_string):
''' Return -1 if square_string is invalid
'''
if len(square_string) != 2:
return -1
if not square_string[0].isalpha() or not square_string[0].islower() or not square_string[1].isdigit():
return -1
row = ord(square_string[0]) - 96
col = int(square_string[1])
if row < 1 or row > self.n or col < 1 or col > self.n:
return -1
return self.n * (col - 1) + (row - 1)
def check_road_win(self, player):
'''Checks for a road win for player
'''
def check_road_win(player, direction):
'''Direction can be 'ver' or 'hor'
'''
visited = set()
dfs_stack = []
final_positions = set()
if direction == 'ver':
for i in xrange(self.n):
if len(self.board[i]) > 0 and self.board[i][-1][0] == player and self.board[i][-1][1] != 'S':
visited.add(i)
dfs_stack.append(i)
final_positions.add(self.total_squares - 1 - i)
elif direction == 'hor':
for i in xrange(self.n):
if (len(self.board[i*self.n]) > 0) and (self.board[i*self.n][-1][0] == player) and (self.board[i*self.n][-1][1] != 'S'):
visited.add(i*self.n)
dfs_stack.append(i*self.n)
final_positions.add((i + 1) * self.n - 1)
while len(dfs_stack) > 0:
square = dfs_stack.pop()
if square in final_positions:
return True
nbrs = self.get_neighbours(square)
for nbr in nbrs:
if (nbr not in visited) and (len(self.board[nbr]) > 0) and (self.board[nbr][-1][0] == player) and (self.board[nbr][-1][1] != 'S'):
dfs_stack.append(nbr)
visited.add(nbr)
return False
return check_road_win(player, 'hor') or check_road_win(player, 'ver')
def get_neighbours(self,square):
'''Generate a list of neighbours for a given square
Returns empty if square is invalid
'''
if isinstance(square, str):
square = self.square_to_num(square)
if square < 0 or square >= self.total_squares:
return []
elif square == 0:
return [square+1, square+self.n]
elif square == self.n - 1:
return [square-1, square+self.n]
elif square == self.total_squares - self.n:
return [square+1, square-self.n]
elif square == self.total_squares - 1:
return [square-1, square-self.n]
elif square < self.n:
return [square-1, square+1, square+self.n]
elif square % self.n == 0:
return [square+1, square-self.n, square+self.n]
elif (square + 1) % self.n == 0:
return [square-1, square-self.n, square+self.n]
elif square >= self.total_squares - self.n:
return [square-1, square+1, square-self.n]
else:
return [square-1, square+1, square-self.n, square+self.n]
def check_flat_win(self):
'''Checks for a flat win
'''
count_1 = 0
count_2 = 0
for i in xrange(self.total_squares):
if len(self.board[i]) > 0 and self.board[i][-1][0] == 0 and self.board[i][-1][1] != 'S':
count_1 += 1
elif len(self.board[i]) > 0 and self.board[i][-1][0] == 1 and self.board[i][-1][1] != 'S':
count_2 += 1
if count_1 > count_2:
return 2
elif count_2 > count_1:
return 3
elif self.players[0].flats < self.players[1].flats:
return 3
elif self.players[0].flats > self.players[1].flats:
return 2
else:
return 4
def calculate_score(self, player):
'''Calculates the score of the player
'''
if 'player' not in self.winner:
raise ValueError('Nobody has won yet.')
count_1 = 0
count_2 = 0
for i in xrange(self.total_squares):
if len(self.board[i]) > 0 and self.board[i][-1][0] == 0 and self.board[i][-1][1] != 'S':
count_1 += 1
elif len(self.board[i]) > 0 and self.board[i][-1][0] == 1 and self.board[i][-1][1] != 'S':
count_2 += 1
if self.winner['type'] == 'road':
if self.winner['player'] == player:
return self.players[player].flats + self.total_squares
elif player == 0:
return count_1
else:
return count_2
elif self.winner['type'] == 'flat':
if (self.winner['player'] == player or self.winner['player'] == 2) and player == 0:
return self.players[player].flats + count_1
elif (self.winner['player'] == player or self.winner['player'] == 2) and player == 1:
return self.players[player].flats + count_2
elif player == 0:
return count_1
elif player == 1:
return count_2
|
|
# Copyright 2015 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Takes a generator of values, and accumulates them for a frontend."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import threading
from tensorflow.python.platform import gfile
from tensorflow.python.platform import logging
from tensorflow.python.summary.impl import directory_watcher
from tensorflow.python.summary.impl import event_file_loader
from tensorflow.python.summary.impl import reservoir
namedtuple = collections.namedtuple
ScalarEvent = namedtuple('ScalarEvent',
['wall_time', 'step', 'value'])
CompressedHistogramEvent = namedtuple('CompressedHistogramEvent',
['wall_time', 'step',
'compressed_histogram_values'])
CompressedHistogramValue = namedtuple('CompressedHistogramValue',
['basis_point', 'value'])
HistogramEvent = namedtuple('HistogramEvent',
['wall_time', 'step', 'histogram_value'])
HistogramValue = namedtuple('HistogramValue',
['min', 'max', 'num', 'sum', 'sum_squares',
'bucket_limit', 'bucket'])
ImageEvent = namedtuple('ImageEvent',
['wall_time', 'step', 'encoded_image_string',
'width', 'height'])
## The tagTypes below are just arbitrary strings chosen to pass the type
## information of the tag from the backend to the frontend
COMPRESSED_HISTOGRAMS = 'compressedHistograms'
HISTOGRAMS = 'histograms'
IMAGES = 'images'
SCALARS = 'scalars'
GRAPH = 'graph'
## normal CDF for std_devs: (-Inf, -1.5, -1, -0.5, 0, 0.5, 1, 1.5, Inf)
## naturally gives bands around median of width 1 std dev, 2 std dev, 3 std dev,
## and then the long tail.
NORMAL_HISTOGRAM_BPS = (0, 668, 1587, 3085, 5000, 6915, 8413, 9332, 10000)
DEFAULT_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 500,
IMAGES: 4,
SCALARS: 10000,
HISTOGRAMS: 1,
}
STORE_EVERYTHING_SIZE_GUIDANCE = {
COMPRESSED_HISTOGRAMS: 0,
IMAGES: 0,
SCALARS: 0,
HISTOGRAMS: 0,
}
def IsTensorFlowEventsFile(path):
"""Check the path name to see if it is probably a TF Events file."""
return 'tfevents' in path
class EventAccumulator(object):
"""An `EventAccumulator` takes an event generator, and accumulates the values.
The `EventAccumulator` is intended to provide a convenient Python interface
for loading Event data written during a TensorFlow run. TensorFlow writes out
`Event` protobuf objects, which have a timestamp and step number, and often
contain a `Summary`. Summaries can have different kinds of data like an image,
a scalar value, or a histogram. The Summaries also have a tag, which we use to
organize logically related data. The `EventAccumulator` supports retrieving
the `Event` and `Summary` data by its tag.
Calling `Tags()` gets a map from `tagType` (e.g. `'images'`,
`'compressedHistograms'`, `'scalars'`, etc) to the associated tags for those
data types. Then, various functional endpoints (eg
`Accumulator.Scalars(tag)`) allow for the retrieval of all data
associated with that tag.
Before usage, the `EventAccumulator` must be activated via `Reload()` or
`AutoUpdate(interval)`.
If activated via `Reload()`, it loads synchronously, so calls to `Values` or
`Tags` will block until all outstanding events are processed. Afterwards,
`Reload()` may be called again to load any new data.
If activated via `AutoUpdate(interval)`, it loads asynchronously, so calls to
`Values` or `Tags` will immediately return a valid subset of the outstanding
event data. It reloads new data every `interval` seconds.
Histograms and images are very large, so storing all of them is not
recommended.
@@Reload
@@AutoUpdate
@@Tags
@@Scalars
@@Graph
@@Histograms
@@CompressedHistograms
@@Images
"""
def __init__(self, path, size_guidance=DEFAULT_SIZE_GUIDANCE,
compression_bps=NORMAL_HISTOGRAM_BPS):
"""Construct the `EventAccumulator`.
Args:
path: A file path to a directory containing tf events files, or a single
tf events file. The accumulator will load events from this path.
size_guidance: Information on how much data the EventAccumulator should
store in memory. The DEFAULT_SIZE_GUIDANCE tries not to store too much
so as to avoid OOMing the client. The size_guidance should be a map
from a `tagType` string to an integer representing the number of
items to keep per tag for items of that `tagType`. If the size is 0,
all events are stored.
compression_bps: Information on how the `EventAccumulator` should compress
histogram data for the `CompressedHistograms` tag (for details see
`ProcessCompressedHistogram`).
"""
sizes = {}
for key in DEFAULT_SIZE_GUIDANCE:
if key in size_guidance:
sizes[key] = size_guidance[key]
else:
sizes[key] = DEFAULT_SIZE_GUIDANCE[key]
self._scalars = reservoir.Reservoir(size=sizes[SCALARS])
self._graph = None
self._histograms = reservoir.Reservoir(size=sizes[HISTOGRAMS])
self._compressed_histograms = reservoir.Reservoir(
size=sizes[COMPRESSED_HISTOGRAMS])
self._images = reservoir.Reservoir(size=sizes[IMAGES])
self._generator_mutex = threading.Lock()
self._generator = _GeneratorFromPath(path)
self._is_autoupdating = False
self._activated = False
self._compression_bps = compression_bps
self.most_recent_step = -1
self.most_recent_wall_time = -1
def Reload(self):
"""Loads all events added since the last call to `Reload`.
If `Reload` was never called, loads all events in the file.
Calling `Reload` activates the `EventAccumulator`.
Returns:
The `EventAccumulator`.
"""
self._activated = True
with self._generator_mutex:
for event in self._generator.Load():
## Check if the event happened after a crash
if event.step < self.most_recent_step:
## Keep data in reservoirs that has a step less than event.step
_NotExpired = lambda x: x.step < event.step
num_expired_scalars = self._scalars.FilterItems(_NotExpired)
num_expired_histograms = self._histograms.FilterItems(_NotExpired)
num_expired_compressed_histograms = self._compressed_histograms.FilterItems(
_NotExpired)
num_expired_images = self._images.FilterItems(_NotExpired)
purge_msg = (
'Detected out of order event.step likely caused by a Tensorflow '
'restart. Purging expired events from Tensorboard display '
'between the previous step: {} (timestamp: {}) and current step:'
' {} (timestamp: {}). Removing {} scalars, {} histograms, {} '
'compressed histograms, and {} images.').format(
self.most_recent_step, self.most_recent_wall_time, event.step,
event.wall_time, num_expired_scalars, num_expired_histograms,
num_expired_compressed_histograms, num_expired_images)
logging.warn(purge_msg)
else:
self.most_recent_step = event.step
self.most_recent_wall_time = event.wall_time
## Process the event
if event.HasField('graph_def'):
if self._graph is not None:
logging.warn(('Found more than one graph event per run.'
'Overwritting the graph with the newest event'))
self._graph = event.graph_def
elif event.HasField('summary'):
for value in event.summary.value:
if value.HasField('simple_value'):
self._ProcessScalar(value.tag, event.wall_time, event.step,
value.simple_value)
elif value.HasField('histo'):
self._ProcessHistogram(value.tag, event.wall_time, event.step,
value.histo)
self._ProcessCompressedHistogram(value.tag, event.wall_time,
event.step, value.histo)
elif value.HasField('image'):
self._ProcessImage(value.tag, event.wall_time, event.step,
value.image)
return self
def AutoUpdate(self, interval=60):
"""Asynchronously load all events, and periodically reload.
Calling this function is not thread safe.
Calling this function activates the `EventAccumulator`.
Args:
interval: how many seconds after each successful reload to load new events
(default 60)
Returns:
The `EventAccumulator`.
"""
if self._is_autoupdating:
return
self._is_autoupdating = True
self._activated = True
def Update():
self.Reload()
logging.info('EventAccumulator update triggered')
t = threading.Timer(interval, Update)
t.daemon = True
t.start()
# Asynchronously start the update process, so that the accumulator can
# immediately serve data, even if there is a very large event file to parse
t = threading.Timer(0, Update)
t.daemon = True
t.start()
return self
def Tags(self):
"""Return all tags found in the value stream.
Raises:
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
A `{tagType: ['list', 'of', 'tags']}` dictionary.
"""
self._VerifyActivated()
return {IMAGES: self._images.Keys(),
HISTOGRAMS: self._histograms.Keys(),
SCALARS: self._scalars.Keys(),
COMPRESSED_HISTOGRAMS: self._compressed_histograms.Keys(),
GRAPH: self._graph is not None}
def Scalars(self, tag):
"""Given a summary tag, return all associated `ScalarEvent`s.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
An array of `ScalarEvent`s.
"""
self._VerifyActivated()
return self._scalars.Items(tag)
def Graph(self):
"""Return the graph definition, if there is one.
Raises:
ValueError: If there is no graph for this run.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
The `graph_def` proto.
"""
self._VerifyActivated()
if self._graph is None:
raise ValueError('There is no graph in this EventAccumulator')
return self._graph
def Histograms(self, tag):
"""Given a summary tag, return all associated histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
An array of `HistogramEvent`s.
"""
self._VerifyActivated()
return self._histograms.Items(tag)
def CompressedHistograms(self, tag):
"""Given a summary tag, return all associated compressed histograms.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
An array of `CompressedHistogramEvent`s.
"""
self._VerifyActivated()
return self._compressed_histograms.Items(tag)
def Images(self, tag):
"""Given a summary tag, return all associated images.
Args:
tag: A string tag associated with the events.
Raises:
KeyError: If the tag is not found.
RuntimeError: If the `EventAccumulator` has not been activated.
Returns:
An array of `ImageEvent`s.
"""
self._VerifyActivated()
return self._images.Items(tag)
def _VerifyActivated(self):
if not self._activated:
raise RuntimeError('Accumulator must be activated before it may be used.')
def _ProcessScalar(self, tag, wall_time, step, scalar):
"""Processes a simple value by adding it to accumulated state."""
sv = ScalarEvent(wall_time=wall_time, step=step, value=scalar)
self._scalars.AddItem(tag, sv)
def _ProcessHistogram(self, tag, wall_time, step, histo):
"""Processes a histogram by adding it to accumulated state."""
histogram_value = HistogramValue(
min=histo.min,
max=histo.max,
num=histo.num,
sum=histo.sum,
sum_squares=histo.sum_squares,
# convert from proto repeated to list
bucket_limit=list(histo.bucket_limit),
bucket=list(histo.bucket),
)
histogram_event = HistogramEvent(
wall_time=wall_time,
step=step,
histogram_value=histogram_value,
)
self._histograms.AddItem(tag, histogram_event)
def _Remap(self, x, x0, x1, y0, y1):
"""Linearly map from [x0, x1] unto [y0, y1]."""
return y0 + (x - x0) * float(y1 - y0)/(x1 - x0)
def _Percentile(self, compression_bps, bucket_limit, cumsum_weights,
histo_min, histo_max, histo_num):
"""Linearly interpolates a histogram weight for a particular basis point.
Uses clamping methods on `histo_min` and `histo_max` to produce tight
linear estimates of the histogram weight at a particular basis point.
Args:
compression_bps: The desired basis point at which to estimate the weight
bucket_limit: An array of the RHS histogram bucket limits
cumsum_weights: A cumulative sum of the fraction of weights in each
histogram bucket, represented in basis points.
histo_min: The minimum weight observed in the weight histogram
histo_max: The maximum weight observed in the weight histogram
histo_num: The number of items in the weight histogram
Returns:
A linearly interpolated value of the histogram weight estimate.
"""
if histo_num == 0: return 0
for i, cumsum in enumerate(cumsum_weights):
if cumsum >= compression_bps:
cumsum_prev = cumsum_weights[i-1] if i > 0 else 0
# Prevent cumsum = 0, cumsum_prev = 0, lerp divide by zero.
if cumsum == cumsum_prev: continue
# Calculate the lower bound of interpolation
lhs = bucket_limit[i-1] if (i > 0 and cumsum_prev > 0) else histo_min
lhs = max(lhs, histo_min)
# Calculate the upper bound of interpolation
rhs = bucket_limit[i]
rhs = min(rhs, histo_max)
weight = self._Remap(compression_bps, cumsum_prev, cumsum, lhs, rhs)
return weight
## We have not exceeded cumsum, so return the max observed.
return histo_max
def _ProcessCompressedHistogram(self, tag, wall_time, step, histo):
"""Processes a histogram by adding a compression to accumulated state.
Adds a compressed histogram by linearly interpolating histogram buckets to
represent the histogram weight at multiple compression points. Uses
self._compression_bps (passed to EventAccumulator constructor) as the
compression points (represented in basis points, 1/100ths of a precent).
Args:
tag: A string name of the tag for which histograms are retrieved.
wall_time: Time in seconds since epoch
step: Number of steps that have passed
histo: proto2 histogram Object
"""
def _CumulativeSum(arr):
return [sum(arr[:i+1]) for i in range(len(arr))]
# Convert from proto repeated field into a Python list.
bucket = list(histo.bucket)
bucket_limit = list(histo.bucket_limit)
bucket_total = sum(bucket)
fraction_weights = [10000 * x / bucket_total for x in bucket]
cumsum_weights = _CumulativeSum(fraction_weights)
percentiles = [
self._Percentile(bps, bucket_limit, cumsum_weights, histo.min,
histo.max, histo.num) for bps in self._compression_bps
]
compressed_histogram_values = [CompressedHistogramValue(
basis_point=bps,
value=value) for bps, value in zip(self._compression_bps, percentiles)]
histogram_event = CompressedHistogramEvent(
wall_time=wall_time,
step=step,
compressed_histogram_values=compressed_histogram_values)
self._compressed_histograms.AddItem(tag, histogram_event)
def _ProcessImage(self, tag, wall_time, step, image):
"""Processes an image by adding it to accumulated state."""
event = ImageEvent(
wall_time=wall_time,
step=step,
encoded_image_string=image.encoded_image_string,
width=image.width,
height=image.height
)
self._images.AddItem(tag, event)
def _GeneratorFromPath(path):
"""Create an event generator for file or directory at given path string."""
loader_factory = event_file_loader.EventFileLoader
if gfile.IsDirectory(path):
return directory_watcher.DirectoryWatcher(path, loader_factory,
IsTensorFlowEventsFile)
else:
return loader_factory(path)
|
|
#!/usr/bin/env python2.6
# -*- cpy-indent-level: 4; indent-tabs-mode: nil -*-
# ex: set expandtab softtabstop=4 shiftwidth=4:
#
# Copyright (C) 2012,2013 Contributor
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Module for testing parameter support."""
import unittest
if __name__ == "__main__":
import utils
utils.import_depends()
from broker.brokertest import TestBrokerCommand
FEATURE = 'myfeature'
class TestParameterDefinitionFeature(TestBrokerCommand):
def test_00_add_feature(self):
cmd = ["add_feature", "--feature", FEATURE, "--type=host"]
self.noouttest(cmd)
def test_100_add(self):
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=testpath", "--value_type=string", "--description=blaah",
"--required", "--default=default"]
self.noouttest(cmd)
def test_110_add_existing(self):
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=testpath", "--value_type=string", "--description=blaah",
"--required", "--default=default"]
err = self.badrequesttest(cmd)
self.matchoutput(err,
"Parameter Definition testpath, parameter "
"definition holder myfeature already exists.",
cmd)
def test_130_add_default_value_type(self):
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=testdefault", "--description=blaah"]
self.noouttest(cmd)
def test_130_add_int_value_type(self):
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=testint", "--description=blaah",
"--value_type=int", "--default=60"]
self.noouttest(cmd)
def test_130_add_invalid_int_value_type(self):
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=testbadint", "--description=blaah",
"--value_type=int", "--default=foo"]
err = self.badrequesttest(cmd)
self.matchoutput(err, "Expected an integer for default for path=testbadint", cmd)
def test_130_add_float_value_type(self):
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=testfloat", "--description=blaah",
"--value_type=float", "--default=100.100"]
self.noouttest(cmd)
def test_130_add_invalid_float_value_type(self):
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=testbadfloat", "--description=blaah",
"--value_type=float", "--default=foo"]
err = self.badrequesttest(cmd)
self.matchoutput(err, "Expected an floating point number for default for path=testbadfloat", cmd)
def test_130_add_boolean_value_type(self):
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=testboolean", "--description=blaah",
"--value_type=boolean", "--default=yes"]
self.noouttest(cmd)
def test_130_add_invalid_boolean_value_type(self):
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=testbadboolean", "--description=blaah",
"--value_type=boolean", "--default=foo"]
err = self.badrequesttest(cmd)
self.matchoutput(err, "Expected a boolean value for default for path=testbadboolean", cmd)
def test_130_add_list_value_type(self):
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=testlist", "--description=blaah",
"--value_type=list", "--default=val1,val2"]
self.noouttest(cmd)
def test_130_add_json_value_type(self):
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=testjson", "--description=blaah",
"--value_type=json", "--default=\"{'val1':'val2'}\""]
self.noouttest(cmd)
def test_130_add_invalid_json_value_type(self):
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=testbadjson", "--description=blaah",
"--value_type=json", "--default=foo"]
err = self.badrequesttest(cmd)
self.matchoutput(err, "The json string specified for default for path=testbadjson is invalid", cmd)
def test_130_rebuild_required(self):
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=test_rebuild_required", "--value_type=string", "--rebuild_required"]
self.noouttest(cmd)
def test_140_verify_add(self):
cmd = ["search_parameter_definition", "--feature", FEATURE, "--type=host"]
out = self.commandtest(cmd)
self.searchoutput(out,
r'Parameter Definition: testpath \[required\]\s*'
r'Type: string\s*'
r'Default: default',
cmd)
self.searchoutput(out,
r'Parameter Definition: testdefault\s*'
r'Type: string',
cmd)
self.searchoutput(out,
r'Parameter Definition: testint\s*'
r'Type: int\s*'
r'Default: 60',
cmd)
self.searchoutput(out,
r'Parameter Definition: testjson\s*'
r'Type: json\s*'
r"Default: \"{'val1':'val2'}\"",
cmd)
self.searchoutput(out,
r'Parameter Definition: testlist\s*'
r'Type: list\s*'
r'Default: val1,val2',
cmd)
self.searchoutput(out,
r'Parameter Definition: test_rebuild_required\s*'
r'Type: string\s*'
r'Rebuild Required: True',
cmd)
def test_145_verify_add(self):
cmd = ["search_parameter_definition", "--feature", FEATURE, "--format=proto", "--type=host"]
out = self.commandtest(cmd)
p = self.parse_paramdefinition_msg(out, 8)
param_defs = p.param_definitions[:]
param_defs.sort(key=lambda x: x.path)
self.failUnlessEqual(param_defs[0].path, 'test_rebuild_required')
self.failUnlessEqual(param_defs[0].value_type, 'string')
self.failUnlessEqual(param_defs[0].rebuild_required, True)
self.failUnlessEqual(param_defs[1].path, 'testboolean')
self.failUnlessEqual(param_defs[1].value_type, 'boolean')
self.failUnlessEqual(param_defs[1].default, 'yes')
self.failUnlessEqual(param_defs[2].path, 'testdefault')
self.failUnlessEqual(param_defs[2].value_type, 'string')
self.failUnlessEqual(param_defs[2].default, '')
self.failUnlessEqual(param_defs[3].path, 'testfloat')
self.failUnlessEqual(param_defs[3].value_type, 'float')
self.failUnlessEqual(param_defs[3].default, '100.100')
self.failUnlessEqual(param_defs[4].path, 'testint')
self.failUnlessEqual(param_defs[4].value_type, 'int')
self.failUnlessEqual(param_defs[4].default, '60')
self.failUnlessEqual(param_defs[5].path, 'testjson')
self.failUnlessEqual(param_defs[5].value_type, 'json')
self.failUnlessEqual(param_defs[5].default, u'"{\'val1\':\'val2\'}"')
self.failUnlessEqual(param_defs[6].path, 'testlist')
self.failUnlessEqual(param_defs[6].value_type, 'list')
self.failUnlessEqual(param_defs[6].default, "val1,val2")
self.failUnlessEqual(param_defs[7].path, 'testpath')
self.failUnlessEqual(param_defs[7].value_type, 'string')
self.failUnlessEqual(param_defs[7].default, 'default')
self.failUnlessEqual(param_defs[7].is_required, True)
def test_146_update(self):
cmd = ["update_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=testint", "--description=testint",
"--default=100", "--required",
"--rebuild_required"]
self.noouttest(cmd)
def test_147_verify_add(self):
cmd = ["search_parameter_definition", "--feature", FEATURE, "--type=host"]
out = self.commandtest(cmd)
self.searchoutput(out,
r'Parameter Definition: testint \[required\]\s*'
r'Type: int\s*'
r'Default: 100\s*'
r'Description: testint\s*'
r'Rebuild Required: True',
cmd)
def test_150_del_validation(self):
cmd = ["add_personality", "--archetype=aquilon", "--personality=paramtest", "--eon_id=2", "--host_environment=legacy"]
self.noouttest(cmd)
cmd = ["bind_feature", "--personality=paramtest", "--feature", FEATURE]
self.successtest(cmd)
cmd = ["add_parameter", "--personality=paramtest", "--feature", FEATURE,
"--path=testpath", "--value=hello"]
self.noouttest(cmd)
cmd = ["del_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=testpath"]
out = self.badrequesttest(cmd)
self.matchoutput(out, "Parameter with path testpath used by following and cannot be deleted", cmd)
cmd = ["del_parameter", "--personality=paramtest", "--feature", FEATURE, "--path=testpath"]
self.noouttest(cmd)
cmd = ["unbind_feature", "--personality=paramtest", "--feature", FEATURE]
self.successtest(cmd)
cmd = ["del_personality", "--archetype=aquilon", "--personality=paramtest"]
self.noouttest(cmd)
def test_200_del(self):
for path in ['testpath', 'testdefault', 'testint', 'testlist',
'testjson', 'testboolean', 'testfloat', 'test_rebuild_required']:
cmd = ["del_parameter_definition", "--feature", FEATURE,
"--type=host", "--path=%s" % path]
self.noouttest(cmd)
def test_200_verify_delete(self):
cmd = ["search_parameter_definition", "--feature", FEATURE, "--type=host" ]
err = self.notfoundtest(cmd)
self.matchoutput(err, "No parameter definitions found for host "
"feature myfeature", cmd)
def test_210_invalid_path_cleaned(self):
for path in ["/startslash", "endslash/"] :
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=%s" % path, "--value_type=string"]
self.noouttest(cmd)
cmd = ["search_parameter_definition", "--feature", FEATURE, "--type=host"]
out = self.commandtest(cmd)
self.searchoutput(out, r'Parameter Definition: startslash\s*', cmd)
self.searchoutput(out, r'Parameter Definition: endslash\s*', cmd)
def test_215_invalid_path1(self):
for path in ["!badchar", "@badchar", "#badchar", "$badchar", "%badchar", "^badchar",
"&badchar", "*badchar" ":badchar", ";badcharjk", "+badchar"] :
cmd = ["add_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=%s" % path, "--value_type=string"]
err = self.badrequesttest(cmd)
self.matchoutput(err, "Invalid path %s specified, path cannot start with special characters" % path,
cmd)
def test_220_valid_path(self):
for path in ["multi/part1/part2", "noslash", "valid/with_under", "valid/with.dot",
"valid/with-dash", "with_under", "with.dot", "with-dash"] :
cmd = ["add_parameter_definition", "--path=%s" % path,
"--feature", FEATURE, "--type=host", "--value_type=string"]
self.noouttest(cmd)
cmd = ["del_parameter_definition", "--feature", FEATURE, "--type=host",
"--path=%s" % path]
self.noouttest(cmd)
def test_300_del(self):
cmd = ["del_feature", "--feature", FEATURE, "--type=host" ]
self.noouttest(cmd)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestParameterDefinitionFeature)
unittest.TextTestRunner(verbosity=2).run(suite)
|
|
from npc.character.tags import GroupTag, Tag
class TestSavesDefaultValues:
def test_saves_list_values(self):
tag = GroupTag('group', 'guys', 'dolls')
assert 'guys' in tag
assert 'dolls' in tag
def test_saves_keyword_values(self):
tag = GroupTag('group', guys=Tag('rank'), dolls=Tag('rank'))
assert 'guys' in tag
assert 'dolls' in tag
class TestUpdate:
def test_inserts_blank_values(self):
tag = GroupTag('group')
tag.update(['humans', 'monkeys'])
assert 'humans' in tag
def test_inserts_from_group_tag(self):
tag = GroupTag('group')
tag2 = GroupTag('group')
tag2.append('funny')
tag2['funny'].append('giggles')
tag.update(tag2)
assert 'funny' in tag
assert 'giggles' in tag['funny']
def test_leaves_old_values(self):
tag = GroupTag('group')
tag.update(['werewolves'])
tag.update(['humans', 'monkeys'])
assert 'humans' in tag
assert 'werewolves' in tag
class TestFilled:
def test_filled_with_values_set(self):
tag = GroupTag('employer', 'fruitco')
assert tag.filled
assert tag
def test_not_filled_with_no_values(self):
tag = GroupTag('employer')
assert not tag.filled
assert not tag
class TestFilled:
def test_present_with_values_set(self):
tag = GroupTag('employer', 'fruitco')
assert tag.present
assert tag
def test_not_present_with_no_values(self):
tag = GroupTag('employer')
assert not tag.present
assert not tag
class TestValidation:
def test_required_with_no_values_fails(self):
tag = GroupTag('employer', required=True)
tag.validate()
assert not tag.valid
def test_required_with_values_passes(self):
tag = GroupTag('employer', 'fruitco', required=True)
tag.validate()
assert tag.valid
def test_limit_zero_and_required_dont_mix(self):
tag = GroupTag('employer', 'fruitco', required=True, limit=0)
tag.validate()
assert not tag.valid
assert "Tag 'employer' is required but limited to zero values" in tag.problems
def test_subtag_doesnt_match(self):
bad_subtag = Tag('rank')
tag = GroupTag('employer', subtag='job')
tag.data['bobsburgers'] = bad_subtag
tag.validate()
assert not tag.valid
assert "Tag 'employer' uses subtag 'job', but found 'rank' for 'bobsburgers'" in tag.problems
def test_hidden_values_must_exist(self):
tag = GroupTag('employer', 'value1')
tag.hide_value('value2')
tag.validate()
assert not tag.valid
assert "Value 'value2' for tag 'employer' cannot be hidden, because it does not exist" in tag.problems
class TestAppend:
def test_adds_value_to_keys(self):
tag = GroupTag('employer')
tag.append('bobsburgers')
assert 'bobsburgers' in tag
def test_populates_correct_subtag(self):
tag = GroupTag('employer', subtag='job')
tag.append('bobsburgers')
assert tag['bobsburgers'] == Tag('job')
class TestStrictValidation:
def test_limit_exceeded(self):
tag = GroupTag('employer', 'fruitco', 'bobsburgers', limit=1)
tag.validate(strict=True)
assert not tag.valid
def test_under_limit(self):
tag = GroupTag('employer', 'fruitco', 'bobsburgers', limit=5)
tag.validate(strict=True)
assert tag.valid
def test_negative_limit(self):
tag = GroupTag('employer', 'fruitco', 'bobsburgers', limit=-1)
tag.validate(strict=True)
assert tag.valid
class TestHeader:
def test_no_string_when_not_filled(self):
tag = GroupTag('group')
header = tag.to_header()
assert not header
def test_has_string_when_filled(self):
tag = GroupTag('group', 'value')
header = tag.to_header()
assert header == '@group value'
def test_no_hide_when_not_hidden(self):
tag = GroupTag('type', 'value', hidden=False)
header = tag.to_header()
assert header == '@type value'
def test_has_hide_when_hidden(self):
tag = GroupTag('type', 'value', hidden=True)
header = tag.to_header()
assert '@hide type' in header
def test_hides_values_when_marked(self):
tag = GroupTag('type', 'value1', 'value2', hidden=False)
tag.hide_value('value2')
header = tag.to_header()
assert '@hide type >> value2' in header
def test_hides_subvalues(self):
tag = GroupTag('type', 'value1', 'value2', hidden=False)
tag.subtag('value2').append('cart')
tag.subtag('value2').hidden = True
header = tag.to_header()
assert '@hide type >> value2 >> subtags' in header
def test_hides_single_subvalue(self):
tag = GroupTag('type', 'value1', 'value2', hidden=False)
tag.subtag('value2').append('cart')
tag.subtag('value2').append('blart')
tag.subtag('value2').hide_value('cart')
header = tag.to_header()
assert '@hide type >> value2 >> cart' in header
def test_has_one_line_per_value(self):
tag = GroupTag('type', 'value1', 'value2', 'value3')
header = tag.to_header()
assert '@type value1' in header
assert '@type value2' in header
assert '@type value3' in header
class TestTagSlice:
def test_preserves_attributes(self):
tag = GroupTag('employer', 'value1', 'value2', 'value3', required=True, hidden=True, limit=5)
tag2 = tag.tagslice(0, 1)
assert tag2.required == tag.required
assert tag2.hidden == tag.hidden
assert tag2.limit == tag.limit
def test_applies_slice_to_data(self):
tag = GroupTag('employer', 'value1', 'value2', 'value3')
tag2 = tag.tagslice(1, 2)
assert tag2.data == {'value2': Tag('rank')}
def test_first_gets_just_first_element():
tag = GroupTag('employer', 'value1', 'value2', 'value3')
tag2 = tag.first()
assert tag2.data == {'value1': Tag('rank')}
def test_remaining_gets_all_other_elements():
tag = GroupTag('employer', 'value1', 'value2', 'value3')
tag2 = tag.remaining()
assert tag2.data == {'value2': Tag('rank'), 'value3': Tag('rank')}
class TestContains:
def test_wildcard_with_any_values_is_true(self):
tag = GroupTag('group', 'value1', 'value2', 'value3')
assert tag.contains('*')
def test_matches_wrong_case(self):
tag = GroupTag('group', 'value1', 'value2', 'value3')
assert tag.contains('VALUE1')
def test_matches_partial_values(self):
tag = GroupTag('group', 'value1', 'value2', 'value3')
assert tag.contains('val')
def test_matches_subvalues(self):
tag = GroupTag('group', 'value1', 'value2', 'value3')
tag['value1'].append('subval1')
assert tag.contains('subval1')
class TestFirstValue:
def test_returns_string_if_present(self):
tag = GroupTag('type', 'value1', 'value2')
assert tag.first_value() == 'value1'
def test_returns_none_if_no_values(self):
tag = GroupTag('type')
assert tag.first_value() is None
def test_bool_truthy_when_present():
tag = GroupTag('type')
assert not tag
tag.append('human')
assert tag
class TestSanitize:
def test_when_hidden_removes_all_values(self):
tag = GroupTag('group', 'value1', 'value2', 'value3', hidden=True)
tag.sanitize()
assert not tag
def test_when_not_hidden_removes_nothing(self):
tag = GroupTag('group', 'value1', 'value2', 'value3', hidden=False)
tag.sanitize()
assert list(tag) == ['value1', 'value2', 'value3']
def test_hides_all_subtags(self):
tag = GroupTag('group', 'value1', 'value2', 'value3', hidden=False)
tag.subtag('value1').append('subvalue')
tag.subtag('value1').hidden = True
tag.sanitize()
assert not tag.subtag('value1')
def test_hide_present_value_removes_value(self):
tag = GroupTag('group', 'value1', 'value2', 'value3', hidden=False)
tag.hide_value('value2')
tag.sanitize()
assert list(tag) == ['value1', 'value3']
def test_hide_missing_value_does_nothing(self):
tag = GroupTag('group', 'value1', 'value2', 'value3', hidden=False)
tag.hide_value('valuex')
tag.sanitize()
assert list(tag) == ['value1', 'value2', 'value3']
def test_hide_partial_value_does_nothing(self):
tag = GroupTag('group', 'value1', 'value2', 'value3', hidden=False)
tag.hide_value('val')
tag.sanitize()
assert list(tag) == ['value1', 'value2', 'value3']
def test_touch_shows_error(capsys):
tag = GroupTag('group')
tag.touch()
_, err = capsys.readouterr()
assert "Calling touch() on non-flag class GroupTag object 'group'" in err
|
|
import pysam
import sys
import csv
import os
import argparse
from quicksect import IntervalNode
from random import randint, seed
def getOverlap(a, b):
return max(0, min(a[1], b[1]) - max(a[0], b[0]))
def is_rRNA(read,chr):
find_list_rRNA=find(read.reference_start, read.reference_end , tree_rRNA[chr])
if len(find_list_rRNA)>0:
return True
else:
return False
def is_junction(read):
for c in read.cigartuples:
if c[0]==3:
return True
return False
def whichFeature(read,chr):
find_list_cds=find(read.reference_start, read.reference_end , tree_cds[chr])
find_list_utr3=find(read.reference_start, read.reference_end , tree_utr3[chr])
find_list_utr5=find(read.reference_start, read.reference_end , tree_utr5[chr])
find_list_intron=find(read.reference_start, read.reference_end , tree_geneCoordinates[chr])
find_list_intergenic=find(read.reference_start, read.reference_end , tree_intergenic[chr])
threshold=len(read.query_sequence)*0.75
tag_cds=0
tag_utr3=0
tag_utr5=0
max_cds=0
max_utr3=0
max_utr5=0
cds_overlap=[]
utr3_overlap=[]
utr5_overlap=[]
for i in find_list_cds:
overlap=getOverlap((read.reference_start,read.reference_end),i)
if overlap>threshold:
cds_overlap.append(overlap)
for i in find_list_utr3:
overlap=getOverlap((read.reference_start,read.reference_end),i)
if overlap>threshold:
utr3_overlap.append(overlap)
for i in find_list_utr5:
overlap=getOverlap((read.reference_start,read.reference_end),i)
if overlap>threshold:
utr5_overlap.append(overlap)
if len(cds_overlap)>0:
tag_cds=1
max_cds=max(cds_overlap)
if len(utr3_overlap)>0:
tag_utr3=1
max_cds=max(utr3_overlap)
if len(utr5_overlap)>0:
tag_utr5=1
max_cds=max(utr5_overlap)
if tag_cds>1 and tag_utr3+tag_utr5>1:
print "-------"
print "MIXED"
print tag_cds,tag_utr3,tag_utr5
print max_cds,tag_utr3,tag_utr5
print find_list_cds
print find_list_utr3
print find_list_utr5
print read
print "-------"
return 'MIXED'
elif tag_utr3+tag_utr5>1:
return 'UTR_'
elif tag_cds==1:
return 'CDS'
elif tag_utr3==1:
return 'UTR3'
elif tag_utr5==1:
return 'UTR5'
elif tag_cds+tag_utr3+tag_utr5==0:
if len(find_list_intron)>0:
return 'INTRON'
elif len(find_list_intergenic)>0:
return 'INTERGENIC'
else:
return 'DEEP'
#------
def find(start, end, tree):
"Returns a list with the overlapping intervals"
out = []
tree.intersect( start, end, lambda x: out.append(x) )
return [ (x.start, x.end) for x in out ]
'''
tree = IntervalNode( 5, 20 )
overlap = find(27, 28 , tree)
if overlap==[]:
print "----"
'''
ap = argparse.ArgumentParser()
ap.add_argument('bam', help='sorted bam file')
ap.add_argument('outDir', help='dir to save results, dir will b ecreated')
ap.add_argument('statFile', help='file to save number of reads per genome category')
ap.add_argument('org', help='h - human, m - mouse')
#ap.add_argument('--testN', type=int,
# help='Run a test using only the first N features, and then '
# 'print out some example feature IDs and their attributes')
#ap.add_argument('--force', action='store_true',
# help='Overwrite an existing database')
#cmd https://gist.github.com/daler/ec481811a44b3aa469f3
args = ap.parse_args()
##https://www.biostars.org/p/99/
if not os.path.exists(args.outDir):
os.makedirs(args.outDir)
print os.path.dirname(os.path.realpath(__file__))
chr_list=[]
#human or mouse
if args.org=='m':
for i in range(1,20):
chr_list.append(str(i))
chr_list.append('X')
chr_list.append('Y')
elif args.org=='h':
for i in range(1,23):
chr_list.append(str(i))
chr_list.append('X')
chr_list.append('Y')
else:
print "ERROR"
sys.exit(1)
if args.org=='h':
utr3_file=os.path.dirname(os.path.realpath(__file__))+'/annotations/human/bedPrepared/UTR3_GRCh37_prepared.bed'
utr5_file=os.path.dirname(os.path.realpath(__file__))+'/annotations/human/bedPrepared/UTR5_GRCh37_prepared.bed'
cds_file=os.path.dirname(os.path.realpath(__file__))+'/annotations/human/bedPrepared/CDS_GRCh37_prepared.bed'
geneCoordinates_file=os.path.dirname(os.path.realpath(__file__))+'/annotations/human/bedPrepared/geneCoordinatesType_prepared.bed'
elif args.org=='m':
utr3_file=os.path.dirname(os.path.realpath(__file__))+'/annotations/mus_musculus/bedPrepared/UTR3_NCBIM37_prepared.bed'
utr5_file=os.path.dirname(os.path.realpath(__file__))+'/annotations/mus_musculus/bedPrepared/UTR5_NCBIM37_prepared.bed'
cds_file=os.path.dirname(os.path.realpath(__file__))+'/annotations/mus_musculus/bedPrepared/CDS_NCBIM37_prepared.bed'
geneCoordinates_file=os.path.dirname(os.path.realpath(__file__))+'/annotations/mus_musculus/bedPrepared/geneCoordinatesType_prepared.bed'
base=os.path.basename(args.bam)
prefix=os.path.splitext(base)[0]
#DATA STRUCTURE
tree_utr3={}
tree_utr5={}
tree_cds={}
tree_geneCoordinates={}
tree_rRNA={}
tree_intergenic={} # +10,000
for chr in chr_list:
tree_utr3[chr]=IntervalNode(0,0)
tree_utr5[chr]=IntervalNode(0,0)
tree_cds[chr]=IntervalNode(0,0)
tree_geneCoordinates[chr]=IntervalNode(0,0)
tree_rRNA[chr]=IntervalNode(0,0)
tree_intergenic[chr]=IntervalNode(0,0)
print "Load gene annotations ..."
#UTR3
print "Load",utr3_file
with open(utr3_file,'r') as f:
reader=csv.reader(f)
for line in reader:
chr=line[0]
if chr in chr_list:
x=int(line[1])
y=int(line[2])
tree_utr3[chr]=tree_utr3[chr].insert( x, y )
#find_list=find(67208778, 67210057 , tree_utr3[chr])
#UTR5
print "Load",utr5_file
with open(utr5_file,'r') as f:
reader=csv.reader(f)
for line in reader:
chr=line[0]
if chr in chr_list:
x=int(line[1])
y=int(line[2])
tree_utr5[chr]=tree_utr5[chr].insert( x, y )
#CDS
print "Load",cds_file
with open(cds_file,'r') as f:
reader=csv.reader(f)
for line in reader:
chr=line[0]
if chr in chr_list:
x=int(line[1])
y=int(line[2])
tree_cds[chr]=tree_cds[chr].insert( x, y )
#gene coordinates
#1,non-rRNA,ENSG00000008128,1634169,1655766
nGenes_non_rRNA=0
nGenes_rRNA=0
print "Load",geneCoordinates_file
with open(geneCoordinates_file,'r') as f:
reader=csv.reader(f)
for line in reader:
chr=line[0]
if chr in chr_list:
x=int(line[3])
y=int(line[4])
if line[1]=='non-rRNA':
nGenes_non_rRNA+=1
tree_geneCoordinates[chr]=tree_geneCoordinates[chr].insert( x, y )
x_10K=x-10000
y_10K=y+10000
if x_10K<0:
x_10K=0
tree_intergenic[chr]=tree_intergenic[chr].insert( x_10K, y_10K )
elif line[1]=='rRNA':
tree_rRNA[chr]=tree_rRNA[chr].insert( x, y )
nGenes_rRNA+=1
print "Number of non-rRNA genes",nGenes_non_rRNA
print "Number of rRNA genes",nGenes_rRNA
#
#======================================================================
#BAM
outFile={}
for chr in chr_list:
f_file=args.outDir+"/"+prefix+"."+chr+".genomicFeature"
outfile = open(f_file, 'w' )
outFile[chr]=open(f_file, 'w' )
#MT
f_file=args.outDir+"/"+prefix+"."+'MT'+".genomicFeature"
outfile = open(f_file, 'w' )
outFile['MT']=open(f_file, 'w' )
print "Open bam file",args.bam
bamfile = pysam.Samfile(args.bam, "rb")
#list for read categories
multiMappedReads=[]
fusionReads=[]
#counts
nrRNA=0
nDeep=0
nIntergenic=0
nIntron=0
nCDS=0
nUTR3=0
nUTR5=0
nUTR_=0
nJunction=0
nMultiMapped=0
nMixed=0
nIntron=0
nMT=0
singleton=[]
for chr in chr_list:
print "Process chr",chr
for read in bamfile.fetch(chr):
readName=read.query_name
if read.mapq!=50:
multiMappedReads.append(readName)
elif is_rRNA(read,chr):
outFile[chr].write( readName+','+chr + ',' + 'rRNA' + '\n' )
nrRNA+=1
elif is_junction(read):
outFile[chr].write( readName+','+chr + ',' + 'junction' + '\n' )
nJunction+=1
else:
feature=whichFeature(read,chr)
outFile[chr].write( readName+','+chr + ',' + feature + '\n' )
if feature=='CDS':
nCDS+=1
elif feature=='INTRON':
nIntron+=1
elif feature=='UTR3':
nUTR3+=1
elif feature=='UTR5':
nUTR5+=1
elif feature=='UTR_':
nUTR_+=1
elif feature=='MIXED':
nMixed+=1
elif feature=='INTERGENIC':
nIntergenic+=1
elif feature=='DEEP':
nDeep+=1
for read in bamfile.fetch('MT'):
flag_multiMapped=0
if read.is_read1:
readName=read.query_name
else:
readName=read.query_name
if read.mapq!=50:
multiMappedReads.append(readName)
else:
outFile[chr].write( readName+','+'MT' + ',' + 'MT' + '\n' )
nMT+=1
print "multiMappedReads",len(multiMappedReads)
multiMappedReads=set(multiMappedReads)
print "multiMappedReads",len(multiMappedReads)
#multiMappedReads
f_multiMappedReads=args.outDir+"/"+prefix+"."+'_multiMappedReads.reads'
outfile = open(f_multiMappedReads, 'w' )
for i in multiMappedReads:
outfile.write(i)
outfile.write("\n")
nMultiMapped=len(multiMappedReads)
nTotalMapped=nrRNA+nDeep+nIntergenic+nIntron+nCDS+nUTR3+nUTR5+nUTR_+nMultiMapped+nMixed+nJunction+nMT
header=[]
header.append('sampleName')
header.append('nTotalMapped')
header.append('nJunction')
header.append('nCDS')
header.append('nUTR3')
header.append('nUTR5')
header.append('nUTR_')
header.append('nMixed')
header.append('nIntron')
header.append('nIntergenic')
header.append('nDeep')
header.append('nrRNA')
header.append('nMT')
header.append('nMultiMapped')
gf=[]
gf.append(prefix)
gf.append(nTotalMapped)
gf.append(nJunction)
gf.append(nCDS)
gf.append(nUTR3)
gf.append(nUTR5)
gf.append(nUTR_)
gf.append(nMixed)
gf.append(nIntron)
gf.append(nIntergenic)
gf.append(nDeep)
gf.append(nrRNA)
gf.append(nMT)
gf.append(nMultiMapped)
c = csv.writer(open(args.statFile, "w"))
c.writerow(header)
c.writerow(gf)
|
|
"""
RobotX Docker Client
Docker containers as slaves for running automation tests.
Author: ybian <[email protected]>
"""
import sys
import logging
import docker
# from docker import Client
class Docker(object):
"""
Docker Client
"""
def __init__(self, docker_server=None):
self.log = logging.getLogger()
# set the docker_server as base_url
if docker_server is None:
self.docker_server = "unix://var/run/docker.sock"
else:
self.docker_server = "tcp://" + docker_server + ":2375"
# try to bind docker server
try:
self.docker_client = docker.Client(base_url=self.docker_server)
except DockerException, error:
print error
sys.exit(255)
def find_images(self, image):
"""
check if image exists in docker server
"""
image_name = image.split(":")[0]
image_tag = image.split(":")[1]
image_repo = []
try:
images = self.docker_client.images(image_name)
except DockerException, error:
print error
sys.exit(255)
# check if image_name in docker server
if not images:
print "Can't find docker image: %s" % image_name
sys.exit(255)
else:
image_repos = [i["RepoTags"] for i in images]
for i in image_repos:
for j in i:
image_repo.append(j)
# check if image_tag in docker server
if image not in image_repo:
print "Can't find docker image %s" % image
else:
print "Image %s exits in docker server" % image
def inspect_images(self, image):
"""
get image informations, like image ID
"""
try:
images = self.docker_client.inspect_image(image)
except DockerException, error:
print error
sys.exit(255)
return images["Id"]
def create_container(self, config):
"""
basic function for creating container
"""
try:
new_container = self.docker_client.create_container_from_config(\
config)
except DockerException, error:
print error
sys.exit(255)
return new_container
def create_storage_container(self, source, image):
"""
create container for storage automation code
"""
# set config for create storage: 'Volumes': source
bind_route = '/data/:%s:rw' % source
config = {
'Tty': True,
'OpenStdin': True,
'Image': image,
'Cmd': '/bin/bash',
'HostConfig': {'Binds': [bind_route]}
}
storage_container = self.create_container(config)
print "Create storage container", storage_container['Id']
return storage_container['Id']
def create_running_container(self, source_container, image):
"""
create container for running automation
"""
# set config for create running: 'Volumes_from': source_container
config = {
'Tty': True,
'OpenStdin': True,
'Image': image,
'Cmd': '/bin/bash',
'HostConfig': {'VolumesFrom': [source_container]}
}
running_container = self.create_container(config)
print "Create running container", running_container['Id']
return running_container['Id']
def remove_container(self, container, force=False):
"""
remove container using container_Id or container_name
"""
try:
self.docker_client.remove_container(container)
except DockerException, error:
print error
sys.exit(255)
print "Remove container: ", container
def start_container(self, container):
"""
start an exist docker container
"""
try:
self.docker_client.start(container)
except DockerException, error:
print error
sys.exit(255)
print "Start container: ", container
def stop_container(self, container):
"""
stop an exist docker container
"""
try:
self.docker_client.stop(container)
except DockerException, error:
print error
sys.exit(255)
print "Stop container: ", container
def restart_container(self, container):
"""
restart an exist docker container
"""
try:
self.docker_client.restart(container)
except DockerException, error:
print error
sys.exit(255)
print "Restart container: ", container
def inspect_container(self, container):
"""
inspect container to get more information about container
"""
try:
container_info = self.docker_client.inspect_container(container)
except DockerException, error:
print error
sys.exit(255)
return container_info
def get_container_IP(self, container):
"""
get container IP and other infomation
"""
container_info = self.inspect_container(container)
container_IP = container_info['NetworkSettings']['IPAddress']
container_Gateway = container_info['NetworkSettings']['Gateway']
def exec_create(self, container, cmd):
"""
create an exec instance in a running container
"""
try:
exec_instance = self.docker_client.exec_create(container, cmd)
except DockerException, error:
print error
sys.exit(255)
print "Exec_instance created..."
return exec_instance['Id']
def exec_start(self, exec_id, stream=False):
"""
start a previously create exec instance
"""
try:
exec_starter = self.docker_client.exec_start(exec_id, stream)
except DockerException, error:
print error
sys.exit(255)
print "Exec_instance started..."
class DockerException(Exception):
"""
General docker exception
"""
def __init__(self, msg):
self.msg = msg
def __str__(self):
return repr(self.msg)
if __name__ == '__main__':
# # Add an easy demo to show how automation run in docker by using this
# # docker client lib
#
# # First build a docker client connect to docker server 10.66.136.205
# docker = Docker(docker_server="10.66.136.205")
#
# # set basic param
# storage_image = "storage:0.1.0"
# running_image = "run:0.1.0"
# source_route = "/data/"
#
# # check if images exists
# docker.find_images(storage_image)
# docker.find_images(running_image)
#
# # create storage_container and running_container
# storage_container = docker.create_storage_container(\
# source_route, storage_image)
# running_container = docker.create_running_container(\
# storage_container, running_image)
# #print storage_container, "\n", running_container
#
# # start storage_container and running_container
# docker.start_container(storage_container)
# docker.start_container(running_container)
#
# # create running_container exec_instance and start this instance
# # exec cmd "bash -c 'Xvfb :99 &'"
# cmd = "bash -c 'Xvfb :99 &'"
# exec_Xfvb = docker.exec_create(running_container, cmd)
# docker.exec_start(exec_Xfvb)
#
#
# # test run automation script
# automation_cmd = "bash -c 'pybot \
# tools-automation/tests/Maitai2/cases/01_BasicFunction/login.txt'"
# exec_automation = docker.exec_create(running_container, automation_cmd)
# run_result = docker.exec_start(exec_automation)
dockerC = Docker(docker_server="10.66.136.205")
cmd = "bash -c 'Xvfb :99'"
exec_Xfvb = dockerC.exec_create("run", cmd)
print exec_Xfvb
dockerC.exec_start(exec_Xfvb, stream=True)
automation_cmd = "bash -c 'pybot tools-automation/tests/Maitai2/cases/01_BasicFunction/login.txt'"
exec_automation = dockerC.exec_create("run", automation_cmd)
print exec_automation
test = dockerC.exec_start(exec_automation)
|
|
from __future__ import absolute_import, division, print_function
import math
import itertools
import operator
import pytest
from datetime import datetime, date
from cytoolz import pluck
import datashape
import blaze
from blaze.compute.python import (nunique, mean, rrowfunc, rowfunc,
reduce_by_funcs)
from blaze import dshape, discover
from blaze.compute.core import compute, compute_up
from blaze.expr import (Symbol, by, union, merge, join, count, Distinct,
Apply, sum, min, max, any, summary, Symbol,
count, std, head, Symbol)
import numpy as np
from blaze import cos, sin
from blaze.compatibility import builtins
from blaze.utils import raises
t = Symbol('t', 'var * {name: string, amount: int, id: int}')
data = [['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]]
tbig = Symbol('tbig', 'var * {name: string, sex: string[1], amount: int, id: int}')
databig = [['Alice', 'F', 100, 1],
['Alice', 'F', 100, 3],
['Drew', 'F', 100, 4],
['Drew', 'M', 100, 5],
['Drew', 'M', 200, 5]]
def test_dispatched_rowfunc():
cw = t['amount'] + 100
assert rowfunc(t)(t) == t
assert rowfunc(cw)(('Alice', 100, 1)) == 200
def test_reduce_by_funcs():
e = summary(number=t.id.max(), sum=t.amount.sum())
b = by(t, e)
assert reduce_by_funcs(b)[2]([1,2,3], [4,5,6]) == (1, 7)
def test_symbol():
assert compute(t, data) == data
def test_projection():
assert list(compute(t['name'], data)) == [x[0] for x in data]
def test_eq():
assert list(compute(t['amount'] == 100, data)) == [x[1] == 100 for x in data]
def test_selection():
assert list(compute(t[t['amount'] == 0], data)) == \
[x for x in data if x[1] == 0]
assert list(compute(t[t['amount'] > 150], data)) == \
[x for x in data if x[1] > 150]
def test_arithmetic():
assert list(compute(t['amount'] + t['id'], data)) == \
[b + c for a, b, c, in data]
assert list(compute(t['amount'] * t['id'], data)) == \
[b * c for a, b, c, in data]
assert list(compute(t['amount'] % t['id'], data)) == \
[b % c for a, b, c, in data]
def test_unary_ops():
for op in ('cos', 'sin', 'exp', 'ceil', 'floor', 'trunc', 'isnan'):
f = getattr(blaze, op)
pyf = getattr(math, op)
result = list(compute(f(t['amount']), data))
assert result == [pyf(x[1]) for x in data]
def test_neg():
assert list(compute(-t['amount'], data)) == [-x[1] for x in data]
def test_reductions():
assert compute(sum(t['amount']), data) == 100 + 200 + 50
assert compute(min(t['amount']), data) == 50
assert compute(max(t['amount']), data) == 200
assert compute(nunique(t['amount']), data) == 3
assert compute(nunique(t['name']), data) == 2
assert compute(count(t['amount']), data) == 3
assert compute(any(t['amount'] > 150), data) is True
assert compute(any(t['amount'] > 250), data) is False
def test_1d_reductions_keepdims():
for r in [sum, min, max, nunique, count]:
assert compute(r(t.amount, keepdims=True), data) == \
(compute(r(t.amount), data),)
def test_count():
t = Symbol('t', '3 * int')
assert compute(t.count(), [1, None, 2]) == 2
def reduction_runner(funcs):
from blaze.compatibility import builtins as bts
exprs = sum, min, max
for blaze_expr, py_func in itertools.product(exprs, funcs):
f = getattr(operator, py_func)
reduc_f = getattr(bts, blaze_expr.__name__)
ground_truth = f(reduc_f([100, 200, 50]), 5)
assert compute(f(blaze_expr(t['amount']), 5), data) == ground_truth
def test_reduction_arithmetic():
funcs = 'add', 'mul'
reduction_runner(funcs)
def test_reduction_compare():
funcs = 'eq', 'ne', 'lt', 'gt', 'le', 'ge'
reduction_runner(funcs)
def test_mean():
assert compute(mean(t['amount']), data) == float(100 + 200 + 50) / 3
assert 50 < compute(std(t['amount']), data) < 100
def test_std():
amt = [row[1] for row in data]
assert np.allclose(compute(t.amount.std(), data), np.std(amt))
assert np.allclose(compute(t.amount.std(unbiased=True), data),
np.std(amt, ddof=1))
assert np.allclose(compute(t.amount.var(), data), np.var(amt))
assert np.allclose(compute(t.amount.var(unbiased=True), data),
np.var(amt, ddof=1))
def test_by_no_grouper():
names = t['name']
assert set(compute(by(names, names.count()), data)) == \
set([('Alice', 2), ('Bob', 1)])
def test_by_one():
print(compute(by(t['name'], t['amount'].sum()), data))
assert set(compute(by(t['name'], t['amount'].sum()), data)) == \
set([('Alice', 150), ('Bob', 200)])
def test_by_compound_apply():
print(compute(by(t['name'], (t['amount'] + 1).sum()), data))
assert set(compute(by(t['name'], (t['amount'] + 1).sum()), data)) == \
set([('Alice', 152), ('Bob', 201)])
def test_by_two():
result = compute(by(tbig[['name', 'sex']], tbig['amount'].sum()),
databig)
expected = [('Alice', 'F', 200),
('Drew', 'F', 100),
('Drew', 'M', 300)]
print(set(result))
assert set(result) == set(expected)
def test_by_three():
result = compute(by(tbig[['name', 'sex']],
(tbig['id'] + tbig['amount']).sum()),
databig)
expected = [('Alice', 'F', 204),
('Drew', 'F', 104),
('Drew', 'M', 310)]
print(result)
assert set(result) == set(expected)
def test_works_on_generators():
assert list(compute(t['amount'], iter(data))) == \
[x[1] for x in data]
assert list(compute(t['amount'], (i for i in data))) == \
[x[1] for x in data]
def test_join():
left = [['Alice', 100], ['Bob', 200]]
right = [['Alice', 1], ['Bob', 2]]
L = Symbol('L', 'var * {name: string, amount: int}')
R = Symbol('R', 'var * {name: string, id: int}')
joined = join(L, R, 'name')
assert dshape(joined.schema) == \
dshape('{name: string, amount: int, id: int}')
result = list(compute(joined, {L: left, R: right}))
expected = [('Alice', 100, 1), ('Bob', 200, 2)]
assert result == expected
def test_outer_join():
left = [(1, 'Alice', 100),
(2, 'Bob', 200),
(4, 'Dennis', 400)]
right = [('NYC', 1),
('Boston', 1),
('LA', 3),
('Moscow', 4)]
L = Symbol('L', 'var * {id: int, name: string, amount: real}')
R = Symbol('R', 'var * {city: string, id: int}')
assert set(compute(join(L, R), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='left'), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='right'), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
assert set(compute(join(L, R, how='outer'), {L: left, R: right})) == set(
[(1, 'Alice', 100, 'NYC'),
(1, 'Alice', 100, 'Boston'),
(2, 'Bob', 200, None),
(3, None, None, 'LA'),
(4, 'Dennis', 400, 'Moscow')])
def test_multi_column_join():
left = [(1, 2, 3),
(2, 3, 4),
(1, 3, 5)]
right = [(1, 2, 30),
(1, 3, 50),
(1, 3, 150)]
L = Symbol('L', 'var * {x: int, y: int, z: int}')
R = Symbol('R', 'var * {x: int, y: int, w: int}')
j = join(L, R, ['x', 'y'])
print(list(compute(j, {L: left, R: right})))
assert list(compute(j, {L: left, R: right})) == [(1, 2, 3, 30),
(1, 3, 5, 50),
(1, 3, 5, 150)]
@pytest.mark.xfail(reason="This doesn't necessarily make sense")
def test_column_of_column():
assert list(compute(t['name']['name'], data)) == \
list(compute(t['name'], data))
def test_Distinct():
assert set(compute(Distinct(t['name']), data)) == set(['Alice', 'Bob'])
assert set(compute(Distinct(t), data)) == set(map(tuple, data))
e = Distinct(t)
assert compute(e, []) == ()
def test_Distinct_count():
t2 = t['name'].distinct()
gby = by(t2, t2.count())
result = set(compute(gby, data))
assert result == set([('Alice', 1), ('Bob', 1)])
def test_sort():
assert list(compute(t.sort('amount'), data)) == \
sorted(data, key=lambda x: x[1], reverse=False)
assert list(compute(t.sort('amount', ascending=True), data)) == \
sorted(data, key=lambda x: x[1], reverse=False)
assert list(compute(t.sort(['amount', 'id']), data)) == \
sorted(data, key=lambda x: (x[1], x[2]), reverse=False)
def test_fancy_sort():
assert list(compute(t.sort(t['amount']), data)) ==\
list(compute(t.sort('amount'), data))
assert list(compute(t.sort(t[['amount', 'id']]), data)) ==\
list(compute(t.sort(['amount', 'id']), data))
assert list(compute(t.sort(0-t['amount']), data)) ==\
list(compute(t.sort('amount'), data))[::-1]
def test_head():
assert list(compute(t.head(1), data)) == [data[0]]
e = head(t, 101)
p = list(range(1000))
assert len(list(compute(e, p))) == 101
def test_graph_double_join():
idx = [['A', 1],
['B', 2],
['C', 3],
['D', 4],
['E', 5],
['F', 6]]
arc = [[1, 3],
[2, 3],
[4, 3],
[5, 3],
[3, 1],
[2, 1],
[5, 1],
[1, 6],
[2, 6],
[4, 6]]
wanted = [['A'],
['F']]
t_idx = Symbol('t_idx', 'var * {name: string, b: int32}')
t_arc = Symbol('t_arc', 'var * {a: int32, b: int32}')
t_wanted = Symbol('t_wanted', 'var * {name: string}')
j = join(join(t_idx, t_arc, 'b'), t_wanted, 'name')[['name', 'b', 'a']]
result = compute(j, {t_idx: idx, t_arc: arc, t_wanted: wanted})
result = sorted(map(tuple, result))
expected = sorted([('A', 3, 1),
('A', 2, 1),
('A', 5, 1),
('F', 1, 6),
('F', 2, 6),
('F', 4, 6)])
assert result == expected
def test_label():
assert list(compute((t['amount'] * 1).label('foo'), data)) == \
list(compute((t['amount'] * 1), data))
def test_relabel_join():
names = Symbol('names', 'var * {first: string, last: string}')
siblings = join(names.relabel({'first': 'left'}),
names.relabel({'first': 'right'}),
'last')[['left', 'right']]
data = [('Alice', 'Smith'),
('Bob', 'Jones'),
('Charlie', 'Smith')]
print(set(compute(siblings, {names: data})))
assert ('Alice', 'Charlie') in set(compute(siblings, {names: data}))
assert ('Alice', 'Bob') not in set(compute(siblings, {names: data}))
def test_map_column():
inc = lambda x: x + 1
assert list(compute(t['amount'].map(inc, 'int'), data)) == [x[1] + 1 for x in data]
def test_map():
assert (list(compute(t.map(lambda _, amt, id: amt + id, 'int'), data)) ==
[x[1] + x[2] for x in data])
def test_apply_column():
result = compute(Apply(t['amount'], builtins.sum), data)
expected = compute(t['amount'].sum(), data)
assert result == expected
def test_apply():
data2 = tuple(map(tuple, data))
assert compute(Apply(t, hash), data2) == hash(data2)
def test_map_datetime():
from datetime import datetime
data = [['A', 0], ['B', 1]]
t = Symbol('t', 'var * {foo: string, datetime: int64}')
result = list(compute(t['datetime'].map(datetime.utcfromtimestamp,
'datetime'), data))
expected = [datetime(1970, 1, 1, 0, 0, 0), datetime(1970, 1, 1, 0, 0, 1)]
assert result == expected
def test_by_multi_column_grouper():
t = Symbol('t', 'var * {x: int, y: int, z: int}')
expr = by(t[['x', 'y']], t['z'].count())
data = [(1, 2, 0), (1, 2, 0), (1, 1, 0)]
print(set(compute(expr, data)))
assert set(compute(expr, data)) == set([(1, 2, 2), (1, 1, 1)])
def test_merge():
col = (t['amount'] * 2).label('new')
expr = merge(t['name'], col)
assert list(compute(expr, data)) == [(row[0], row[1] * 2) for row in data]
def test_map_columnwise():
colwise = t['amount'] * t['id']
expr = colwise.map(lambda x: x / 10, 'int64', name='mod')
assert list(compute(expr, data)) == [((row[1]*row[2]) / 10) for row in data]
def test_map_columnwise_of_selection():
tsel = t[t['name'] == 'Alice']
colwise = tsel['amount'] * tsel['id']
expr = colwise.map(lambda x: x / 10, 'int64', name='mod')
assert list(compute(expr, data)) == [((row[1]*row[2]) / 10) for row in data[::2]]
def test_selection_out_of_order():
expr = t['name'][t['amount'] < 100]
assert list(compute(expr, data)) == ['Alice']
def test_recursive_rowfunc():
f = rrowfunc(t['name'], t)
assert [f(row) for row in data] == [row[0] for row in data]
f = rrowfunc(t['amount'] + t['id'], t)
assert [f(row) for row in data] == [row[1] + row[2] for row in data]
assert raises(Exception, lambda: rrowfunc(t[t['amount'] < 0]['name'], t))
def test_recursive_rowfunc_is_used():
expr = by(t['name'], (2 * (t['amount'] + t['id'])).sum())
expected = [('Alice', 2*(101 + 53)),
('Bob', 2*(202))]
assert set(compute(expr, data)) == set(expected)
class TestFunctionExpressions(object):
def test_compound(self):
s = t.amount.mean()
r = compute(s, data)
assert isinstance(r, float)
expr = cos(s) ** 2 + sin(s) ** 2
result = compute(expr, data)
expected = math.cos(r) ** 2 + math.sin(r) ** 2
assert result == expected
def test_user_defined_function(self):
s = t.amount.count()
r = compute(s, data)
assert isinstance(r, int)
def myfunc(x):
return (cos(x) + sin(x)) ** 2 / math.pi
result = compute(myfunc(s), data)
expected = (math.cos(r) + math.sin(r)) ** 2 / math.pi
assert result == expected
def test_user_defined_calls(self):
s = t.amount.count()
r = compute(s, data)
def myother(y):
return 2 + y ** 10
def myfunc(x):
return myother((cos(x) + sin(x)) ** 2 / math.pi)
result = compute(myfunc(s), data)
expected = myother((math.cos(r) + math.sin(r)) ** 2 / math.pi)
assert result == expected
def test_union():
L1 = [['Alice', 100, 1],
['Bob', 200, 2],
['Alice', 50, 3]]
L2 = [['Alice', 100, 4],
['Bob', 200, 5],
['Alice', 50, 6]]
L3 = [['Alice', 100, 7],
['Bob', 200, 8],
['Alice', 50, 9]]
t1 = Symbol('t1', 'var * {name: string, amount: int, id: int}')
t2 = Symbol('t2', 'var * {name: string, amount: int, id: int}')
t3 = Symbol('t3', 'var * {name: string, amount: int, id: int}')
expr = union(t1, t2, t3)
result = list(compute(expr, {t1: L1, t2: L2, t3: L3}))
assert result == L1 + L2 + L3
def test_by_groupby_deep():
data = [(1, 2, 'Alice'),
(1, 3, 'Bob'),
(2, 4, 'Alice'),
(2, 4, '')]
schema = '{x: int, y: int, name: string}'
t = Symbol('t', datashape.var * schema)
t2 = t[t['name'] != '']
t3 = merge(t2.x, t2.name)
expr = by(t3.name, t3.x.mean())
result = set(compute(expr, data))
assert result == set([('Alice', 1.5), ('Bob', 1.0)])
def test_by_then_sort_dict_items_sequence():
expr = by(tbig.name, tbig.amount.sum()).sort('name')
assert compute(expr, databig)
def test_summary():
expr = summary(count=t.id.count(), sum=t.amount.sum())
assert compute(expr, data) == (3, 350)
assert compute(expr, iter(data)) == (3, 350)
def test_summary_keepdims():
assert compute(summary(count=t.id.count(), sum=t.amount.sum(),
keepdims=True), data) == \
(compute(summary(count=t.id.count(), sum=t.amount.sum(),
keepdims=False), data),)
def test_summary_by():
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum()))
assert set(compute(expr, data)) == set([('Alice', 2, 150),
('Bob', 1, 200)])
expr = by(t.name, summary(count=t.id.count(), sum=(t.amount + 1).sum()))
assert set(compute(expr, data)) == set([('Alice', 2, 152),
('Bob', 1, 201)])
expr = by(t.name, summary(count=t.id.count(), sum=t.amount.sum() + 1))
assert set(compute(expr, data)) == set([('Alice', 2, 151),
('Bob', 1, 201)])
def test_reduction_arithmetic():
expr = t.amount.sum() + 1
assert compute(expr, data) == 351
def test_scalar_arithmetic():
x = Symbol('x', 'real')
y = Symbol('y', 'real')
assert compute(x + y, {x: 2, y: 3}) == 5
assert compute_up(x + y, 2, 3) == 5
assert compute_up(x * y, 2, 3) == 6
assert compute_up(x / y, 6, 3) == 2
assert compute_up(x % y, 4, 3) == 1
assert compute_up(x ** y, 4, 3) == 64
assert compute(x + 1, {x: 2}) == 3
assert compute(x * 2, {x: 2}) == 4
assert compute(1 + x, {x: 2}) == 3
assert compute(2 * x, {x: 2}) == 4
assert compute_up(-x, 1) == -1
assert compute_up(blaze.sin(x), 1) == math.sin(1)
def test_like():
t = Symbol('t', 'var * {name: string, city: string}')
data = [('Alice Smith', 'New York'),
('Bob Smith', 'Chicago'),
('Alice Walker', 'LA')]
assert list(compute(t.like(name='Alice*'), data)) == [data[0], data[2]]
assert list(compute(t.like(name='lice*'), data)) == []
assert list(compute(t.like(name='*Smith*'), data)) == [data[0], data[1]]
assert list(compute(t.like(name='*Smith*', city='New York'), data)) == [data[0]]
def test_datetime_comparison():
data = [['Alice', date(2000, 1, 1)],
['Bob', date(2000, 2, 2)],
['Alice', date(2000, 3, 3)]]
t = Symbol('t', 'var * {name: string, when: date}')
assert list(compute(t[t.when > '2000-01-01'], data)) == data[1:]
def test_datetime_access():
data = [['Alice', 100, 1, datetime(2000, 1, 1, 1, 1, 1)],
['Bob', 200, 2, datetime(2000, 1, 1, 1, 1, 1)],
['Alice', 50, 3, datetime(2000, 1, 1, 1, 1, 1)]]
t = Symbol('t',
'var * {amount: float64, id: int64, name: string, when: datetime}')
assert list(compute(t.when.year, data)) == [2000, 2000, 2000]
assert list(compute(t.when.second, data)) == [1, 1, 1]
assert list(compute(t.when.date, data)) == [date(2000, 1, 1)] * 3
payments = [{'name': 'Alice', 'payments': [
{'amount': 100, 'when': datetime(2000, 1, 1, 1, 1 ,1)},
{'amount': 200, 'when': datetime(2000, 2, 2, 2, 2, 2)}
]},
{'name': 'Bob', 'payments': [
{'amount': 300, 'when': datetime(2000, 3, 3, 3, 3 ,3)},
{'amount': -400, 'when': datetime(2000, 4, 4, 4, 4, 4)},
{'amount': 500, 'when': datetime(2000, 5, 5, 5, 5, 5)}
]},
]
payments_ordered = [('Alice', [( 100, datetime(2000, 1, 1, 1, 1 ,1)),
( 200, datetime(2000, 2, 2, 2, 2, 2))]),
('Bob', [( 300, datetime(2000, 3, 3, 3, 3 ,3)),
(-400, datetime(2000, 4, 4, 4, 4, 4)),
( 500, datetime(2000, 5, 5, 5, 5, 5))])]
payment_dshape = 'var * {name: string, payments: var * {amount: int32, when: datetime}}'
def test_nested():
t = Symbol('t', payment_dshape)
assert list(compute(t.name, payments_ordered)) == ['Alice', 'Bob']
assert list(compute(t.payments, payments_ordered)) == \
[p[1] for p in payments_ordered]
assert list(compute(t.payments.amount, payments_ordered)) == \
[(100, 200), (300, -400, 500)]
assert list(compute(t.payments.amount + 1, payments_ordered)) ==\
[(101, 201), (301, -399, 501)]
def test_scalar():
s = Symbol('s', '{name: string, id: int32, payments: var * {amount: int32, when: datetime}}')
data = ('Alice', 1, ((100, datetime(2000, 1, 1, 1, 1 ,1)),
(200, datetime(2000, 2, 2, 2, 2, 2)),
(300, datetime(2000, 3, 3, 3, 3, 3))))
assert compute(s.name, data) == 'Alice'
assert compute(s.id + 1, data) == 2
assert tuple(compute(s.payments.amount, data)) == (100, 200, 300)
assert tuple(compute(s.payments.amount + 1, data)) == (101, 201, 301)
def test_slice():
assert compute(t[0], data) == data[0]
assert list(compute(t[:2], data)) == list(data[:2])
assert list(compute(t.name[:2], data)) == [data[0][0], data[1][0]]
|
|
from __future__ import unicode_literals
# encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'ProductImage._order'
db.add_column('shop_productimage', '_order', self.gf('django.db.models.fields.IntegerField')(null=True), keep_default=False)
if not db.dry_run:
try:
for product in orm.Product.objects.all():
for i, image in enumerate(product.images.all().order_by("id")):
image._order = i
image.save()
except:
pass
def backwards(self, orm):
# Deleting field 'ProductImage._order'
db.delete_column('shop_productimage', '_order')
models = {
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.assignedkeyword': {
'Meta': {'object_name': 'AssignedKeyword'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'keyword': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'assignments'", 'to': "orm['generic.Keyword']"}),
'object_pk': ('django.db.models.fields.IntegerField', [], {})
},
'generic.keyword': {
'Meta': {'object_name': 'Keyword'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'generic.rating': {
'Meta': {'object_name': 'Rating'},
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'object_pk': ('django.db.models.fields.IntegerField', [], {}),
'value': ('django.db.models.fields.IntegerField', [], {})
},
'pages.page': {
'Meta': {'ordering': "('titles',)", 'object_name': 'Page'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'content_model': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'in_footer': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'in_navigation': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
#'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']"}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'login_required': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'parent': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': "orm['pages.Page']"}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'titles': ('django.db.models.fields.CharField', [], {'max_length': '1000', 'null': 'True'})
},
'shop.cart': {
'Meta': {'object_name': 'Cart'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'auto_now': 'True', 'null': 'True', 'blank': 'True'})
},
'shop.cartitem': {
'Meta': {'object_name': 'CartItem'},
'cart': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Cart']"}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True'}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'url': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'shop.category': {
'Meta': {'ordering': "('_order',)", 'object_name': 'Category', '_ormbases': ['pages.Page']},
'combined': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'options': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'product_options'", 'blank': 'True', 'to': "orm['shop.ProductOption']"}),
'page_ptr': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['pages.Page']", 'unique': 'True', 'primary_key': 'True'}),
'price_max': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'price_min': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.Sale']", 'null': 'True', 'blank': 'True'})
},
'shop.discountcode': {
'Meta': {'object_name': 'DiscountCode'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'discountcode_related'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'code': ('cartridge.shop.fields.DiscountCodeField', [], {'unique': 'True', 'max_length': '20'}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'free_shipping': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'min_purchase': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'shop.order': {
'Meta': {'ordering': "('-id',)", 'object_name': 'Order'},
'additional_instructions': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'billing_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'billing_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'billing_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'billing_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'billing_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'discount_code': ('cartridge.shop.fields.DiscountCodeField', [], {'max_length': '20', 'blank': 'True'}),
'discount_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'item_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'shipping_detail_city': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_country': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_first_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_last_name': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_phone': ('django.db.models.fields.CharField', [], {'max_length': '20'}),
'shipping_detail_postcode': ('django.db.models.fields.CharField', [], {'max_length': '10'}),
'shipping_detail_state': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_detail_street': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'shipping_total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'shipping_type': ('django.db.models.fields.CharField', [], {'max_length': '50', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'time': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'total': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'user_id': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'})
},
'shop.orderitem': {
'Meta': {'object_name': 'OrderItem'},
'description': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'order': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'items'", 'to': "orm['shop.Order']"}),
'quantity': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'max_length': '20'}),
'total_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'default': "'0'", 'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.product': {
'Meta': {'object_name': 'Product'},
'available': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'products'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'content': ('mezzanine.core.fields.RichTextField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'null': 'True', 'blank': 'True'}),
'description': ('django.db.models.fields.TextField', [], {'blank': 'True'}),
'expiry_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
#'keywords': ('mezzanine.generic.fields.KeywordsField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.AssignedKeyword']"}),
'keywords_string': ('django.db.models.fields.CharField', [], {'max_length': '500', 'blank': 'True'}),
'publish_date': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
#'rating': ('mezzanine.generic.fields.RatingField', [], {'object_id_field': "'object_pk'", 'to': "orm['generic.Rating']"}),
'rating_average': ('django.db.models.fields.FloatField', [], {'default': '0'}),
'rating_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'related_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'short_url': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'site': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['sites.Site']"}),
'slug': ('django.db.models.fields.CharField', [], {'max_length': '100', 'null': 'True', 'blank': 'True'}),
'status': ('django.db.models.fields.IntegerField', [], {'default': '1'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'upsell_products': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'upsell_products_rel_+'", 'blank': 'True', 'to': "orm['shop.Product']"})
},
'shop.productaction': {
'Meta': {'unique_together': "(('product', 'timestamp'),)", 'object_name': 'ProductAction'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'actions'", 'to': "orm['shop.Product']"}),
'timestamp': ('django.db.models.fields.IntegerField', [], {}),
'total_cart': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'total_purchase': ('django.db.models.fields.IntegerField', [], {'default': '0'})
},
'shop.productimage': {
'Meta': {'ordering': "('_order',)", 'object_name': 'ProductImage'},
'_order': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'description': ('django.db.models.fields.CharField', [], {'max_length': '100', 'blank': 'True'}),
'file': ('django.db.models.fields.files.ImageField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'images'", 'to': "orm['shop.Product']"})
},
'shop.productoption': {
'Meta': {'object_name': 'ProductOption'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'type': ('django.db.models.fields.IntegerField', [], {})
},
'shop.productvariation': {
'Meta': {'ordering': "('-default',)", 'object_name': 'ProductVariation'},
'default': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'image': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['shop.ProductImage']", 'null': 'True', 'blank': 'True'}),
'num_in_stock': ('django.db.models.fields.IntegerField', [], {'null': 'True', 'blank': 'True'}),
'option1': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'option2': ('cartridge.shop.fields.OptionField', [], {'max_length': '50', 'null': 'True'}),
'product': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'variations'", 'to': "orm['shop.Product']"}),
'sale_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sale_id': ('django.db.models.fields.IntegerField', [], {'null': 'True'}),
'sale_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'sale_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'sku': ('cartridge.shop.fields.SKUField', [], {'unique': 'True', 'max_length': '20'}),
'unit_price': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'})
},
'shop.sale': {
'Meta': {'object_name': 'Sale'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'categories': ('django.db.models.fields.related.ManyToManyField', [], {'symmetrical': 'False', 'related_name': "'sale_related'", 'blank': 'True', 'to': "orm['shop.Category']"}),
'discount_deduct': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_exact': ('cartridge.shop.fields.MoneyField', [], {'null': 'True', 'max_digits': '10', 'decimal_places': '2', 'blank': 'True'}),
'discount_percent': ('django.db.models.fields.DecimalField', [], {'null': 'True', 'max_digits': '4', 'decimal_places': '2', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'products': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['shop.Product']", 'symmetrical': 'False', 'blank': 'True'}),
'title': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'valid_from': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'valid_to': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'})
},
'sites.site': {
'Meta': {'ordering': "('domain',)", 'object_name': 'Site', 'db_table': "'django_site'"},
'domain': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
}
}
complete_apps = ['shop']
|
|
# Python
import pytest
import mock
import json
import os
import six
from datetime import timedelta
# Django
from django.core.urlresolvers import resolve
from django.core.cache import cache
from django.utils.six.moves.urllib.parse import urlparse
from django.utils import timezone
from django.contrib.auth.models import User
from django.conf import settings
from django.core.serializers.json import DjangoJSONEncoder
from jsonbfield.fields import JSONField
# AWX
from awx.main.models.projects import Project
from awx.main.models.ha import Instance
from awx.main.models.fact import Fact
from rest_framework.test import (
APIRequestFactory,
force_authenticate,
)
from awx.main.models.credential import CredentialType, Credential
from awx.main.models.jobs import JobTemplate, SystemJobTemplate
from awx.main.models.inventory import (
Group,
Inventory,
InventoryUpdate,
InventorySource
)
from awx.main.models.organization import (
Organization,
Team,
)
from awx.main.models.rbac import Role
from awx.main.models.notifications import (
NotificationTemplate,
Notification
)
from awx.main.models.workflow import WorkflowJobTemplate
from awx.main.models.ad_hoc_commands import AdHocCommand
@pytest.fixture(autouse=True)
def clear_cache():
'''
Clear cache (local memory) for each test to prevent using cached settings.
'''
cache.clear()
@pytest.fixture(scope="session", autouse=True)
def celery_memory_broker():
'''
FIXME: Not sure how "far" just setting the BROKER_URL will get us.
We may need to incluence CELERY's configuration like we do in the old unit tests (see base.py)
Allows django signal code to execute without the need for redis
'''
settings.BROKER_URL='memory://localhost/'
@pytest.fixture
def user():
def u(name, is_superuser=False):
try:
user = User.objects.get(username=name)
except User.DoesNotExist:
user = User(username=name, is_superuser=is_superuser, password=name)
user.save()
return user
return u
@pytest.fixture
def check_jobtemplate(project, inventory, credential):
return \
JobTemplate.objects.create(
job_type='check',
project=project,
inventory=inventory,
credential=credential,
name='check-job-template'
)
@pytest.fixture
def deploy_jobtemplate(project, inventory, credential):
return \
JobTemplate.objects.create(
job_type='run',
project=project,
inventory=inventory,
credential=credential,
name='deploy-job-template'
)
@pytest.fixture
def team(organization):
return organization.teams.create(name='test-team')
@pytest.fixture
def team_member(user, team):
ret = user('team-member', False)
team.member_role.members.add(ret)
return ret
@pytest.fixture(scope="session", autouse=True)
def project_playbooks():
'''
Return playbook_files as playbooks for manual projects when testing.
'''
class PlaybooksMock(mock.PropertyMock):
def __get__(self, obj, obj_type):
return obj.playbook_files
mocked = mock.patch.object(Project, 'playbooks', new_callable=PlaybooksMock)
mocked.start()
@pytest.fixture
@mock.patch.object(Project, "update", lambda self, **kwargs: None)
def project(instance, organization):
prj = Project.objects.create(name="test-proj",
description="test-proj-desc",
organization=organization,
playbook_files=['helloworld.yml', 'alt-helloworld.yml'],
local_path='_92__test_proj',
scm_revision='1234567890123456789012345678901234567890',
scm_url='localhost',
scm_type='git'
)
return prj
@pytest.fixture
@mock.patch.object(Project, "update", lambda self, **kwargs: None)
def manual_project(instance, organization):
prj = Project.objects.create(name="test-manual-proj",
description="manual-proj-desc",
organization=organization,
playbook_files=['helloworld.yml', 'alt-helloworld.yml'],
local_path='_92__test_proj'
)
return prj
@pytest.fixture
def project_factory(organization):
def factory(name):
try:
prj = Project.objects.get(name=name)
except Project.DoesNotExist:
prj = Project.objects.create(name=name,
description="description for " + name,
organization=organization
)
return prj
return factory
@pytest.fixture
def job_factory(job_template, admin):
def factory(job_template=job_template, initial_state='new', created_by=admin):
return job_template.create_job(created_by=created_by, status=initial_state)
return factory
@pytest.fixture
def team_factory(organization):
def factory(name):
try:
t = Team.objects.get(name=name)
except Team.DoesNotExist:
t = Team.objects.create(name=name,
description="description for " + name,
organization=organization)
return t
return factory
@pytest.fixture
def user_project(user):
owner = user('owner')
return Project.objects.create(name="test-user-project", created_by=owner, description="test-user-project-desc")
@pytest.fixture
def insights_project():
return Project.objects.create(name="test-insights-project", scm_type="insights")
@pytest.fixture
def instance(settings):
return Instance.objects.create(uuid=settings.SYSTEM_UUID, hostname="instance.example.org", capacity=100)
@pytest.fixture
def organization(instance):
return Organization.objects.create(name="test-org", description="test-org-desc")
@pytest.fixture
def credentialtype_ssh():
ssh = CredentialType.defaults['ssh']()
ssh.save()
return ssh
@pytest.fixture
def credentialtype_aws():
aws = CredentialType.defaults['aws']()
aws.save()
return aws
@pytest.fixture
def credentialtype_net():
net = CredentialType.defaults['net']()
net.save()
return net
@pytest.fixture
def credentialtype_vault():
vault_type = CredentialType.defaults['vault']()
vault_type.save()
return vault_type
@pytest.fixture
def credentialtype_scm():
scm_type = CredentialType.defaults['scm']()
scm_type.save()
return scm_type
@pytest.fixture
def credentialtype_insights():
insights_type = CredentialType.defaults['insights']()
insights_type.save()
return insights_type
@pytest.fixture
def credential(credentialtype_aws):
return Credential.objects.create(credential_type=credentialtype_aws, name='test-cred',
inputs={'username': 'something', 'password': 'secret'})
@pytest.fixture
def net_credential(credentialtype_net):
return Credential.objects.create(credential_type=credentialtype_net, name='test-cred',
inputs={'username': 'something', 'password': 'secret'})
@pytest.fixture
def vault_credential(credentialtype_vault):
return Credential.objects.create(credential_type=credentialtype_vault, name='test-cred',
inputs={'vault_password': 'secret'})
@pytest.fixture
def machine_credential(credentialtype_ssh):
return Credential.objects.create(credential_type=credentialtype_ssh, name='machine-cred',
inputs={'username': 'test_user', 'password': 'pas4word'})
@pytest.fixture
def scm_credential(credentialtype_scm):
return Credential.objects.create(credential_type=credentialtype_scm, name='scm-cred',
inputs={'username': 'optimus', 'password': 'prime'})
@pytest.fixture
def insights_credential(credentialtype_insights):
return Credential.objects.create(credential_type=credentialtype_insights, name='insights-cred',
inputs={'username': 'morocco_mole', 'password': 'secret_squirrel'})
@pytest.fixture
def org_credential(organization, credentialtype_aws):
return Credential.objects.create(credential_type=credentialtype_aws, name='test-cred',
inputs={'username': 'something', 'password': 'secret'},
organization=organization)
@pytest.fixture
def inventory(organization):
return organization.inventories.create(name="test-inv")
@pytest.fixture
def insights_inventory(inventory):
inventory.scm_type = 'insights'
inventory.save()
return inventory
@pytest.fixture
def scm_inventory_source(inventory, project):
inv_src = InventorySource(
name="test-scm-inv",
source_project=project,
source='scm',
source_path='inventory_file',
update_on_project_update=True,
inventory=inventory,
scm_last_revision=project.scm_revision)
with mock.patch('awx.main.models.unified_jobs.UnifiedJobTemplate.update'):
inv_src.save()
return inv_src
@pytest.fixture
def inventory_factory(organization):
def factory(name, org=organization):
try:
inv = Inventory.objects.get(name=name, organization=org)
except Inventory.DoesNotExist:
inv = Inventory.objects.create(name=name, organization=org)
return inv
return factory
@pytest.fixture
def label(organization):
return organization.labels.create(name="test-label", description="test-label-desc")
@pytest.fixture
def notification_template(organization):
return NotificationTemplate.objects.create(name='test-notification_template',
organization=organization,
notification_type="webhook",
notification_configuration=dict(url="http://localhost",
headers={"Test": "Header"}))
@pytest.fixture
def notification_template_with_encrypt(organization):
return NotificationTemplate.objects.create(name='test-notification_template_with_encrypt',
organization=organization,
notification_type="slack",
notification_configuration=dict(channels=["Foo", "Bar"],
token="token"))
@pytest.fixture
def notification(notification_template):
return Notification.objects.create(notification_template=notification_template,
status='successful',
notifications_sent=1,
notification_type='email',
recipients='[email protected]',
subject='email subject')
@pytest.fixture
def job_template_with_survey_passwords(job_template_with_survey_passwords_factory):
return job_template_with_survey_passwords_factory(persisted=True)
@pytest.fixture
def admin(user):
return user('admin', True)
@pytest.fixture
def system_auditor(user):
u = user(False)
Role.singleton('system_auditor').members.add(u)
return u
@pytest.fixture
def alice(user):
return user('alice', False)
@pytest.fixture
def bob(user):
return user('bob', False)
@pytest.fixture
def rando(user):
"Rando, the random user that doesn't have access to anything"
return user('rando', False)
@pytest.fixture
def org_admin(user, organization):
ret = user('org-admin', False)
organization.admin_role.members.add(ret)
organization.member_role.members.add(ret)
return ret
@pytest.fixture
def org_auditor(user, organization):
ret = user('org-auditor', False)
organization.auditor_role.members.add(ret)
organization.member_role.members.add(ret)
return ret
@pytest.fixture
def org_member(user, organization):
ret = user('org-member', False)
organization.member_role.members.add(ret)
return ret
@pytest.fixture
def organizations(instance):
def rf(organization_count=1):
orgs = []
for i in xrange(0, organization_count):
o = Organization.objects.create(name="test-org-%d" % i, description="test-org-desc")
orgs.append(o)
return orgs
return rf
@pytest.fixture
def group_factory(inventory):
def g(name):
try:
return Group.objects.get(name=name, inventory=inventory)
except:
return Group.objects.create(inventory=inventory, name=name)
return g
@pytest.fixture
def hosts(group_factory):
group1 = group_factory('group-1')
def rf(host_count=1):
hosts = []
for i in xrange(0, host_count):
name = '%s-host-%s' % (group1.name, i)
(host, created) = group1.inventory.hosts.get_or_create(name=name)
if created:
group1.hosts.add(host)
hosts.append(host)
return hosts
return rf
@pytest.fixture
def group(inventory):
return inventory.groups.create(name='single-group')
@pytest.fixture
def inventory_source(inventory):
return InventorySource.objects.create(name='single-inv-src',
inventory=inventory, source='gce')
@pytest.fixture
def inventory_source_factory(inventory_factory):
def invsrc(name, source=None, inventory=None):
if inventory is None:
inventory = inventory_factory("inv-is-%s" % name)
if source is None:
source = 'file'
try:
return inventory.inventory_sources.get(name=name)
except:
return inventory.inventory_sources.create(name=name, source=source)
return invsrc
@pytest.fixture
def inventory_update(inventory_source):
return InventoryUpdate.objects.create(inventory_source=inventory_source)
@pytest.fixture
def host(group, inventory):
return group.hosts.create(name='single-host', inventory=inventory)
@pytest.fixture
def permissions():
return {
'admin':{'create':True, 'read':True, 'write':True,
'update':True, 'delete':True, 'scm_update':True, 'execute':True, 'use':True,},
'auditor':{'read':True, 'create':False, 'write':False,
'update':False, 'delete':False, 'scm_update':False, 'execute':False, 'use':False,},
'usage':{'read':False, 'create':False, 'write':False,
'update':False, 'delete':False, 'scm_update':False, 'execute':False, 'use':True,},
}
def _request(verb):
def rf(url, data_or_user=None, user=None, middleware=None, expect=None, **kwargs):
if type(data_or_user) is User and user is None:
user = data_or_user
elif 'data' not in kwargs:
kwargs['data'] = data_or_user
if 'format' not in kwargs:
kwargs['format'] = 'json'
view, view_args, view_kwargs = resolve(urlparse(url)[2])
request = getattr(APIRequestFactory(), verb)(url, **kwargs)
if middleware:
middleware.process_request(request)
if user:
force_authenticate(request, user=user)
response = view(request, *view_args, **view_kwargs)
if middleware:
middleware.process_response(request, response)
if expect:
if response.status_code != expect:
print(response.data)
assert response.status_code == expect
response.render()
return response
return rf
@pytest.fixture
def post():
return _request('post')
@pytest.fixture
def get():
return _request('get')
@pytest.fixture
def put():
return _request('put')
@pytest.fixture
def patch():
return _request('patch')
@pytest.fixture
def delete():
return _request('delete')
@pytest.fixture
def head():
return _request('head')
@pytest.fixture
def options():
return _request('options')
@pytest.fixture
def fact_scans(group_factory, fact_ansible_json, fact_packages_json, fact_services_json):
group1 = group_factory('group-1')
def rf(fact_scans=1, timestamp_epoch=timezone.now()):
facts_json = {}
facts = []
module_names = ['ansible', 'services', 'packages']
timestamp_current = timestamp_epoch
facts_json['ansible'] = fact_ansible_json
facts_json['packages'] = fact_packages_json
facts_json['services'] = fact_services_json
for i in xrange(0, fact_scans):
for host in group1.hosts.all():
for module_name in module_names:
facts.append(Fact.objects.create(host=host, timestamp=timestamp_current, module=module_name, facts=facts_json[module_name]))
timestamp_current += timedelta(days=1)
return facts
return rf
def _fact_json(module_name):
current_dir = os.path.dirname(os.path.realpath(__file__))
with open('%s/%s.json' % (current_dir, module_name)) as f:
return json.load(f)
@pytest.fixture
def fact_ansible_json():
return _fact_json('ansible')
@pytest.fixture
def fact_packages_json():
return _fact_json('packages')
@pytest.fixture
def fact_services_json():
return _fact_json('services')
@pytest.fixture
def ad_hoc_command_factory(inventory, machine_credential, admin):
def factory(inventory=inventory, credential=machine_credential, initial_state='new', created_by=admin):
adhoc = AdHocCommand(
name='test-adhoc', inventory=inventory, credential=credential,
status=initial_state, created_by=created_by
)
adhoc.save()
return adhoc
return factory
@pytest.fixture
def job_template(organization):
jt = JobTemplate(name='test-job_template')
jt.save()
return jt
@pytest.fixture
def job_template_labels(organization, job_template):
job_template.labels.create(name="label-1", organization=organization)
job_template.labels.create(name="label-2", organization=organization)
return job_template
@pytest.fixture
def workflow_job_template(organization):
wjt = WorkflowJobTemplate(name='test-workflow_job_template')
wjt.save()
return wjt
@pytest.fixture
def workflow_job_factory(workflow_job_template, admin):
def factory(workflow_job_template=workflow_job_template, initial_state='new', created_by=admin):
return workflow_job_template.create_unified_job(created_by=created_by, status=initial_state)
return factory
@pytest.fixture
def system_job_template():
sys_jt = SystemJobTemplate(name='test-system_job_template', job_type='cleanup_jobs')
sys_jt.save()
return sys_jt
@pytest.fixture
def system_job_factory(system_job_template, admin):
def factory(system_job_template=system_job_template, initial_state='new', created_by=admin):
return system_job_template.create_unified_job(created_by=created_by, status=initial_state)
return factory
def dumps(value):
return DjangoJSONEncoder().encode(value)
# Taken from https://github.com/django-extensions/django-extensions/blob/54fe88df801d289882a79824be92d823ab7be33e/django_extensions/db/fields/json.py
def get_db_prep_save(self, value, connection, **kwargs):
"""Convert our JSON object to a string before we save"""
if value is None and self.null:
return None
# default values come in as strings; only non-strings should be
# run through `dumps`
if not isinstance(value, six.string_types):
value = dumps(value)
return value
@pytest.fixture
def monkeypatch_jsonbfield_get_db_prep_save(mocker):
JSONField.get_db_prep_save = get_db_prep_save
|
|
""":py:mod:`postgres` is a high-value abstraction over `psycopg2`_.
Installation
------------
:py:mod:`postgres` is available on `GitHub`_ and on `PyPI`_::
$ pip install postgres
We `test <https://travis-ci.org/gittip/postgres.py>`_ against Python 2.6, 2.7,
3.2, and 3.3. We don't yet have a testing matrix for different versions of
:py:mod:`psycopg2` or PostgreSQL.
:py:mod:`postgres` is in the `public domain`_.
Tutorial
--------
Instantiate a :py:class:`Postgres` object when your application starts:
>>> from postgres import Postgres
>>> db = Postgres("postgres://jrandom@localhost/testdb")
Use :py:meth:`~postgres.Postgres.run` to run SQL statements:
>>> db.run("CREATE TABLE foo (bar text)")
>>> db.run("INSERT INTO foo VALUES ('baz')")
>>> db.run("INSERT INTO foo VALUES ('buz')")
Use :py:meth:`~postgres.Postgres.all` to run SQL and fetch all results:
>>> db.all("SELECT * FROM foo ORDER BY bar")
[{'bar': 'baz'}, {'bar': 'buz'}]
Use :py:meth:`~postgres.Postgres.one_or_zero` to run SQL and fetch one result
or :py:class:`None`:
>>> db.one_or_zero("SELECT * FROM foo WHERE bar='baz'")
{'bar': 'baz'}
>>> db.one_or_zero("SELECT * FROM foo WHERE bar='blam'")
Bind Parameters
+++++++++++++++
In case you're not familiar with bind parameters in `DB-API 2.0`_, the basic
idea is that you put ``%(foo)s`` in your SQL strings, and then pass in a second
argument, a :py:class:`dict`, containing parameters that :py:mod:`psycopg2` (as
an implementation of DB-API 2.0) will bind to the query in a way that is safe
against `SQL injection`_. (This is inspired by old-style Python string
formatting, but it is not the same.)
>>> db.one("SELECT * FROM foo WHERE bar=%(bar)s", {"bar": "baz"})
{'bar': 'baz'}
Never build SQL strings out of user input!
Always pass user input as bind parameters!
Context Managers
++++++++++++++++
Eighty percent of your database usage should be covered by the simple
:py:meth:`~postgres.Postgres.run`, :py:meth:`~postgres.Postgres.all`,
:py:meth:`~postgres.Postgres.one_or_zero` API introduced above. For the other
20%, :py:mod:`postgres` provides two context managers for working at
increasingly lower levels of abstraction. The lowest level of abstraction in
:py:mod:`postgres` is a :py:mod:`psycopg2` `connection pool
<http://initd.org/psycopg/docs/pool.html>`_ that we configure and manage for
you. Everything in :py:mod:`postgres`, both the simple API and the context
managers, uses this connection pool.
Use the :py:func:`~postgres.Postgres.get_transaction` context manager to work
directly with a :py:mod:`psycogpg2` `cursor
<http://initd.org/psycopg/docs/cursor.html>`_ while still taking advantage of
connection pooling and automatic transaction management:
>>> with db.get_transaction() as txn:
... txn.execute("INSERT INTO foo VALUES ('blam')")
... txn.execute("SELECT * FROM foo ORDER BY bar")
... txn.fetchall()
...
[{'bar': 'baz'}, {'bar': 'blam'}, {'bar': 'buz'}]
Note that other calls won't see the changes on your transaction until the end
of your code block, when the context manager commits the transaction for you::
>>> with db.get_transaction() as txn:
... txn.execute("INSERT INTO foo VALUES ('blam')")
... db.all("SELECT * FROM foo ORDER BY bar")
...
[{'bar': 'baz'}, {'bar': 'buz'}]
>>> db.all("SELECT * FROM foo ORDER BY bar")
[{'bar': 'baz'}, {'bar': 'blam'}, {'bar': 'buz'}]
The :py:func:`~postgres.Postgres.get_transaction` manager gives you a cursor
with :py:attr:`autocommit` turned off on its connection. If the block under
management raises an exception, the connection is rolled back. Otherwise it's
committed. Use this when you want a series of statements to be part of one
transaction, but you don't need fine-grained control over the transaction. For
fine-grained control, use :py:func:`~postgres.Postgres.get_connection` to get a
connection straight from the connection pool:
>>> with db.get_connection() as connection:
... cursor = connection.cursor()
... cursor.execute("SELECT * FROM foo ORDER BY bar")
... cursor.fetchall()
...
[{'bar': 'baz'}, {'bar': 'buz'}]
A connection gotten in this way will have :py:attr:`autocommit` turned off, and
it'll never be implicitly committed otherwise. It'll actually be rolled back
when you're done with it, so it's up to you to explicitly commit as needed.
This is the lowest-level abstraction that :py:mod:`postgres` provides,
basically just a pre-configured connection pool from :py:mod:`psycopg2`.
API
---
.. _psycopg2: http://initd.org/psycopg/
.. _GitHub: https://github.com/gittip/postgres
.. _PyPI: https://pypi.python.org/pypi/postgres
.. _this advice: http://initd.org/psycopg/docs/usage.html#unicode-handling
.. _public domain: http://creativecommons.org/publicdomain/zero/1.0/
.. _DB-API 2.0: http://www.python.org/dev/peps/pep-0249/
.. _SQL injection: http://en.wikipedia.org/wiki/SQL_injection
"""
from __future__ import unicode_literals
try: # Python 2
import urlparse
# "Note: In Python 2, if you want to uniformly receive all your database
# input in Unicode, you can register the related typecasters globally as
# soon as Psycopg is imported."
# -- http://initd.org/psycopg/docs/usage.html#unicode-handling
import psycopg2.extensions
psycopg2.extensions.register_type(psycopg2.extensions.UNICODE)
psycopg2.extensions.register_type(psycopg2.extensions.UNICODEARRAY)
except ImportError: # Python 3
import urllib.parse as urlparse
import psycopg2
from psycopg2.extras import RealDictCursor
from psycopg2.pool import ThreadedConnectionPool as ConnectionPool
__version__ = '2.0.0rc1'
# A Helper
# ========
# Heroku gives us an URL, psycopg2 wants a DSN. Convert!
if 'postgres' not in urlparse.uses_netloc:
# Teach urlparse about postgres:// URLs.
urlparse.uses_netloc.append('postgres')
def url_to_dsn(url):
parsed = urlparse.urlparse(url)
dbname = parsed.path[1:] # /foobar
user = parsed.username
password = parsed.password
host = parsed.hostname
port = parsed.port
if port is None:
port = '5432' # postgres default port
dsn = "dbname=%s user=%s password=%s host=%s port=%s"
dsn %= (dbname, user, password, host, port)
return dsn
# Exceptions
# ==========
class OutOfBounds(Exception):
def __init__(self, n, lo, hi):
self.n = n
self.lo = lo
self.hi = hi
def __str__(self):
msg = "Got {n} rows; expecting "
if self.lo == self.hi:
msg += "exactly {lo}."
elif self.hi - self.lo == 1:
msg += "{lo} or {hi}."
else:
msg += "between {lo} and {hi} (inclusive)."
return msg.format(**self.__dict__)
class TooFew(OutOfBounds): pass
class TooMany(OutOfBounds): pass
# The Main Event
# ==============
class Postgres(object):
"""Interact with a `PostgreSQL <http://www.postgresql.org/>`_ database.
:param unicode url: A ``postgres://`` URL or a `PostgreSQL connection string <http://www.postgresql.org/docs/current/static/libpq-connect.html>`_
:param int minconn: The minimum size of the connection pool
:param int maxconn: The maximum size of the connection pool
:param cursor_factory: Defaults to :py:class:`~psycopg2.extras.RealDictCursor`
This is the main object that :py:mod:`postgres` provides, and you should
have one instance per process for each PostgreSQL database your process
wants to talk to using this library.
>>> import postgres
>>> db = postgres.Postgres("postgres://jrandom@localhost/test")
(Note that importing :py:mod:`postgres` under Python 2 will cause the
registration of typecasters with :py:mod:`psycopg2` to ensure that you get
unicode instead of bytestrings for text data, according to `this advice`_.)
When instantiated, this object creates a `thread-safe connection pool
<http://initd.org/psycopg/docs/pool.html#psycopg2.pool.ThreadedConnectionPool>`_,
which opens :py:attr:`minconn` connections immediately, and up to
:py:attr:`maxconn` according to demand. The fundamental value of a
:py:class:`~postgres.Postgres` instance is that it runs everything through
its connection pool.
Check the :py:mod:`psycopg2` `docs
<http://initd.org/psycopg/docs/extras.html#connection-and-cursor-subclasses>`_
for additional :py:attr:`cursor_factories`, such as
:py:class:`NamedTupleCursor`.
The names in our simple API, :py:meth:`~postgres.Postgres.run`,
:py:meth:`~postgres.Postgres.all`, and
:py:meth:`~postgres.Postgres.one_or_zero`, were chosen to be short and
memorable, and to not conflict with the DB-API 2.0 :py:meth:`execute`,
:py:meth:`fetchall`, and :py:meth:`fetchone` methods, which have slightly
different semantics (under DB-API 2.0 you call :py:meth:`execute` on a
cursor and then call one of the :py:meth:`fetch*` methods on the same
cursor to retrieve rows; with our simple API there is no second
:py:meth:`fetch` step). See `this ticket`_ for more of the rationale behind
these names. The context managers on this class are named starting with
:py:meth:`get_` to set them apart from the simple-case API. Note that when
working inside a block under one of the context managers, you're using
DB-API 2.0 (:py:meth:`execute` + :py:meth:`fetch*`), not our simple API
(:py:meth:`~postgres.Postgres.run`, :py:meth:`~postgres.Postgres.all`,
:py:meth:`~postgres.Postgres.one_or_zero`).
.. _this ticket: https://github.com/gittip/postgres.py/issues/16
"""
def __init__(self, url, minconn=1, maxconn=10, \
cursor_factory=RealDictCursor):
if url.startswith("postgres://"):
dsn = url_to_dsn(url)
Connection.cursor_factory = cursor_factory
self.pool = ConnectionPool( minconn=minconn
, maxconn=maxconn
, dsn=dsn
, connection_factory=Connection
)
def run(self, sql, parameters=None):
"""Execute a query and discard any results.
:param unicode sql: the SQL statement to execute
:param parameters: the bind parameters for the SQL statement
:type parameters: dict or tuple
:returns: :py:const:`None`
>>> db.run("CREATE TABLE foo (bar text)")
>>> db.run("INSERT INTO foo VALUES ('baz')")
>>> db.run("INSERT INTO foo VALUES ('buz')")
"""
with self.get_transaction() as txn:
txn.execute(sql, parameters)
def all(self, sql, parameters=None):
"""Execute a query and return all results.
:param unicode sql: the SQL statement to execute
:param parameters: the bind parameters for the SQL statement
:type parameters: dict or tuple
:returns: :py:class:`list` of rows
>>> for row in db.all("SELECT bar FROM foo"):
... print(row["bar"])
...
baz
buz
"""
with self.get_transaction() as txn:
txn.execute(sql, parameters)
return txn.fetchall()
def one_or_zero(self, sql, parameters=None, zero=None):
"""Execute a query and return a single result or a default value.
:param unicode sql: the SQL statement to execute
:param parameters: the bind parameters for the SQL statement
:type parameters: dict or tuple
:returns: a single row or the value of the :py:attr:`zero` argument
:param zero: the value to return if zero results are found
:raises: :py:exc:`~postgres.TooFew` or :py:exc:`~postgres.TooMany`
Use this for the common case where there should only be one record, but
it may not exist yet.
>>> row = db.one_or_zero("SELECT * FROM foo WHERE bar='blam'")
>>> if row is None:
... print("No blam yet.")
...
No blam yet.
"""
out = self._some(sql, parameters, 0, 1)
if out is None:
out = zero
return out
def _some(self, sql, parameters=None, lo=0, hi=1):
# This is undocumented (and largely untested) because I think it's a
# rare case where this is wanted directly. It was added to make one and
# one_or_zero DRY when we had one. Help yourself to it now that you've
# found it. :^)
with self.get_transaction() as txn:
txn.execute(sql, parameters)
if txn.rowcount < lo:
raise TooFew(txn.rowcount, lo, hi)
elif txn.rowcount > hi:
raise TooMany(txn.rowcount, lo, hi)
return txn.fetchone()
def get_transaction(self, *a, **kw):
"""Return a :py:class:`~postgres.TransactionContextManager` that uses
our connection pool.
This gets you a cursor with :py:attr:`autocommit` turned off on its
connection. If your code block inside the :py:obj:`with` statement
raises an exception, the transaction will be rolled back. Otherwise,
it'll be committed. The context manager closes the cursor when the
block ends.
Use this when you want a series of statements to be part of one
transaction, but you don't need fine-grained control over the
transaction.
>>> with db.get_transaction() as txn:
... txn.execute("SELECT * FROM foo")
... txn.fetchall()
...
[{'bar': 'baz'}, {'bar': 'buz'}]
"""
return TransactionContextManager(self.pool, *a, **kw)
def get_connection(self):
"""Return a :py:class:`~postgres.ConnectionContextManager` that uses
our connection pool.
Use this when you want to take advantage of connection pooling, but
otherwise need full control, for example, to do complex things with
transactions.
>>> with db.get_connection() as connection:
... cursor = connection.cursor()
... cursor.execute("SELECT * FROM foo")
... cursor.fetchall()
...
[{'bar': 'baz'}, {'bar': 'buz'}]
"""
return ConnectionContextManager(self.pool)
class Connection(psycopg2.extensions.connection):
"""This is a subclass of :py:class:`psycopg2.extensions.connection`.
:py:class:`Postgres` uses this class as the :py:attr:`connection_factory`
for its connection pool. We use this subclass to support the
:py:attr:`cursor_factory` parameter to the :py:class:`Postgres`
constructor, and to ensure that the client encoding is ``UTF-8``.
"""
cursor_factory = None # set this before using this object
def __init__(self, *a, **kw):
psycopg2.extensions.connection.__init__(self, *a, **kw)
self.set_client_encoding('UTF-8')
def cursor(self, *a, **kw):
if 'cursor_factory' not in kw:
kw['cursor_factory'] = self.cursor_factory
return psycopg2.extensions.connection.cursor(self, *a, **kw)
# Context Managers
# ================
class TransactionContextManager(object):
"""Instantiated once per :py:func:`~postgres.Postgres.get_transaction`
call.
The return value of :py:func:`TransactionContextManager.__enter__` is a
:py:class:`psycopg2.extras.RealDictCursor`. Any positional and keyword
arguments to our constructor are passed through to the cursor constructor.
When the block starts, the :py:class:`~postgres.Connection` underlying the
cursor is checked out of the connection pool and :py:attr:`autocommit` is
set to :py:const:`False`. If the block raises an exception, the
:py:class:`~postgres.Connection` is rolled back. Otherwise, it's committed.
In either case, the cursor is closed, and the
:py:class:`~postgres.Connection` is put back in the pool.
"""
def __init__(self, pool, *a, **kw):
self.pool = pool
self.a = a
self.kw = kw
self.conn = None
def __enter__(self):
"""Get a connection from the pool.
"""
self.conn = self.pool.getconn()
self.conn.autocommit = False
self.cursor = self.conn.cursor(*self.a, **self.kw)
return self.cursor
def __exit__(self, *exc_info):
"""Put our connection back in the pool.
"""
if exc_info == (None, None, None):
self.conn.commit()
else:
self.conn.rollback()
self.cursor.close()
self.conn.autocommit = False
self.pool.putconn(self.conn)
class ConnectionContextManager(object):
"""Instantiated once per :py:func:`~postgres.Postgres.get_connection` call.
The return value of :py:func:`ConnectionContextManager.__enter__` is a
:py:class:`postgres.Connection`. When the block starts, a
:py:class:`~postgres.Connection` is checked out of the connection pool and
:py:attr:`autocommit` is set to :py:const:`False`. When the block ends, the
:py:class:`~postgres.Connection` is rolled back before being put back in
the pool.
"""
def __init__(self, pool):
self.pool = pool
self.conn = None
def __enter__(self):
"""Get a connection from the pool.
"""
self.conn = self.pool.getconn()
self.conn.autocommit = False
return self.conn
def __exit__(self, *exc_info):
"""Put our connection back in the pool.
"""
self.conn.rollback()
self.conn.autocommit = False
self.pool.putconn(self.conn)
|
|
# Copyright 2020 Datera
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from unittest import mock
import uuid
from cinder import context
from cinder import exception
from cinder.tests.unit import test
from cinder import version
from cinder.volume import configuration as conf
from cinder.volume import volume_types
sys.modules['dfs_sdk'] = mock.MagicMock()
from cinder.volume.drivers.datera import datera_iscsi as datera # noqa
datera.datc.DEFAULT_SI_SLEEP = 0
datera.datc.DEFAULT_SNAP_SLEEP = 0
OS_PREFIX = datera.datc.OS_PREFIX
UNMANAGE_PREFIX = datera.datc.UNMANAGE_PREFIX
DateraAPIException = datera.datc.DateraAPIException
class DateraVolumeTestCasev22(test.TestCase):
def setUp(self):
self.cfg = mock.Mock(spec=conf.Configuration)
self.cfg.san_ip = '127.0.0.1'
self.cfg.datera_api_port = '7717'
self.cfg.san_is_local = True
self.cfg.datera_num_replicas = 1
self.cfg.datera_503_timeout = 0.01
self.cfg.datera_503_interval = 0.001
self.cfg.datera_debug = False
self.cfg.san_login = 'user'
self.cfg.san_password = 'pass'
self.cfg.datera_tenant_id = '/root/test-tenant'
self.cfg.driver_client_cert = None
self.cfg.driver_client_cert_key = None
self.cfg.datera_disable_profiler = False
self.cfg.datera_ldap_server = ""
self.cfg.datera_volume_type_defaults = {}
self.cfg.datera_disable_template_override = False
self.cfg.datera_disable_extended_metadata = False
self.cfg.datera_enable_image_cache = False
self.cfg.datera_image_cache_volume_type_id = ""
self.cfg.filter_function = lambda: None
self.cfg.goodness_function = lambda: None
self.cfg.use_chap_auth = False
self.cfg.chap_username = ""
self.cfg.chap_password = ""
super(DateraVolumeTestCasev22, self).setUp()
mock_exec = mock.Mock()
mock_exec.return_value = ('', '')
self.driver = datera.DateraDriver(execute=mock_exec,
configuration=self.cfg)
self.driver.api = mock.MagicMock()
self.driver.apiv = "2.2"
self.driver.set_initialized()
# No-op config getter
self.driver.configuration.get = lambda *args, **kwargs: {}
# self.addCleanup(self.api_patcher.stop)
self.driver.datera_version = "3.3.3"
def test_volume_create_success(self):
testvol = _stub_volume()
self.assertIsNone(self.driver.create_volume(testvol))
def test_volume_create_fails(self):
testvol = _stub_volume()
self.driver.api.app_instances.create.side_effect = DateraAPIException
self.assertRaises(DateraAPIException,
self.driver.create_volume,
testvol)
@mock.patch.object(volume_types, 'get_volume_type')
def test_create_volume_with_extra_specs(self, mock_get_type):
mock_get_type.return_value = {
'name': u'The Best',
'qos_specs_id': None,
'deleted': False,
'created_at': '2015-08-14 04:18:11',
'updated_at': None,
'extra_specs': {
u'volume_backend_name': u'datera',
u'qos:max_iops_read': u'2000',
u'qos:max_iops_write': u'4000',
u'qos:max_iops_total': u'4000'
},
'is_public': True,
'deleted_at': None,
'id': u'dffb4a83-b8fb-4c19-9f8c-713bb75db3b1',
'description': None
}
mock_volume = _stub_volume(
volume_type_id='dffb4a83-b8fb-4c19-9f8c-713bb75db3b1'
)
self.assertIsNone(self.driver.create_volume(mock_volume))
self.assertTrue(mock_get_type.called)
def test_create_cloned_volume_success(self):
testvol = _stub_volume()
ref = _stub_volume(id=str(uuid.uuid4()))
self.assertIsNone(self.driver.create_cloned_volume(testvol, ref))
def test_create_cloned_volume_success_larger(self):
newsize = 2
testvol = _stub_volume(size=newsize)
ref = _stub_volume(id=str(uuid.uuid4()))
mock_extend = mock.MagicMock()
self.driver._extend_volume_2_2 = mock_extend
self.driver._extend_volume_2_1 = mock_extend
self.driver.create_cloned_volume(testvol, ref)
mock_extend.assert_called_once_with(testvol, newsize)
def test_create_cloned_volume_fails(self):
testvol = _stub_volume()
ref = _stub_volume(id=str(uuid.uuid4()))
self.driver.api.app_instances.create.side_effect = DateraAPIException
self.assertRaises(DateraAPIException,
self.driver.create_cloned_volume,
testvol,
ref)
def test_delete_volume_success(self):
testvol = _stub_volume()
self.driver.api.app_instances.delete.return_value = {}
self.assertIsNone(self.driver.delete_volume(testvol))
def test_delete_volume_not_found(self):
testvol = _stub_volume()
self.driver.api.app_instances.list.side_effect = exception.NotFound
self.assertIsNone(self.driver.delete_volume(testvol))
def test_delete_volume_fails(self):
testvol = _stub_volume()
self.driver.api.app_instances.list.side_effect = DateraAPIException
self.assertRaises(DateraAPIException,
self.driver.delete_volume, testvol)
def test_ensure_export_success(self):
testvol = _stub_volume()
ctxt = context.get_admin_context()
self.assertIsNone(self.driver.ensure_export(ctxt, testvol, None))
def test_ensure_export_fails(self):
# This can't fail because it's a no-op
testvol = _stub_volume()
ctxt = context.get_admin_context()
self.assertIsNone(self.driver.ensure_export(ctxt, testvol, None))
def test_create_export_target_does_not_exist_success(self):
testvol = _stub_volume()
aimock = mock.MagicMock()
simock = mock.MagicMock()
simock.reload.return_value = simock
aimock.storage_instances.list.return_value = [simock]
simock.op_state = "available"
self.driver.cvol_to_ai = mock.Mock()
self.driver.cvol_to_ai.return_value = aimock
self.assertIsNone(self.driver.create_export(None, testvol, None))
def test_create_export_fails(self):
testvol = _stub_volume()
aimock = mock.MagicMock()
simock = mock.MagicMock()
simock.reload.return_value = simock
aimock.storage_instances.list.side_effect = DateraAPIException
simock.op_state = "available"
self.driver.cvol_to_ai = mock.Mock()
self.driver.cvol_to_ai.return_value = aimock
self.assertRaises(DateraAPIException,
self.driver.create_export,
None,
testvol,
None)
def test_initialize_connection_success(self):
testvol = _stub_volume()
aimock = mock.MagicMock()
simock = mock.MagicMock()
simock.access = {"ips": ["test-ip"], "iqn": "test-iqn"}
simock.reload.return_value = simock
aimock.storage_instances.list.return_value = [simock]
self.driver.cvol_to_ai = mock.Mock()
self.driver.cvol_to_ai.return_value = aimock
self.assertEqual(self.driver.initialize_connection(testvol, {}),
{'data': {'discard': False,
'target_discovered': False,
'target_iqn': 'test-iqn',
'target_lun': 0,
'target_portal': 'test-ip:3260',
'volume_id': testvol['id']},
'driver_volume_type': 'iscsi'})
def test_initialize_connection_fails(self):
testvol = _stub_volume()
aimock = mock.MagicMock()
simock = mock.MagicMock()
simock.access = {"ips": ["test-ip"], "iqn": "test-iqn"}
simock.reload.return_value = simock
aimock.storage_instances.list.side_effect = DateraAPIException
self.driver.cvol_to_ai = mock.Mock()
self.driver.cvol_to_ai.return_value = aimock
self.assertRaises(DateraAPIException,
self.driver.initialize_connection,
testvol,
{})
def test_detach_volume_success(self):
testvol = _stub_volume()
self.driver.cvol_to_ai = mock.MagicMock()
aimock = mock.MagicMock()
aimock.set.return_value = {}
self.driver.cvol_to_ai.return_value = aimock
ctxt = context.get_admin_context()
self.assertIsNone(self.driver.detach_volume(ctxt, testvol))
def test_detach_volume_fails(self):
testvol = _stub_volume()
self.driver.cvol_to_ai = mock.MagicMock()
aimock = mock.MagicMock()
aimock.set.side_effect = DateraAPIException
self.driver.cvol_to_ai.return_value = aimock
ctxt = context.get_admin_context()
self.assertRaises(DateraAPIException,
self.driver.detach_volume,
ctxt, testvol)
def test_detach_volume_not_found(self):
testvol = _stub_volume()
self.driver.cvol_to_ai = mock.MagicMock()
aimock = mock.MagicMock()
aimock.set.side_effect = exception.NotFound
self.driver.cvol_to_ai.return_value = aimock
ctxt = context.get_admin_context()
self.assertIsNone(self.driver.detach_volume(ctxt, testvol))
def test_create_snapshot_success(self):
testsnap = _stub_snapshot(volume_id=str(uuid.uuid4()))
volmock = mock.MagicMock()
snapmock = mock.MagicMock()
snapmock.reload.return_value = snapmock
snapmock.uuid = testsnap['id']
snapmock.op_state = "available"
volmock.snapshots.create.return_value = snapmock
self.driver.cvol_to_dvol = mock.MagicMock()
self.driver.cvol_to_dvol.return_value = volmock
self.assertIsNone(self.driver.create_snapshot(testsnap))
def test_create_snapshot_fails(self):
testsnap = _stub_snapshot(volume_id=str(uuid.uuid4()))
self.driver.api.app_instances.list.side_effect = DateraAPIException
self.assertRaises(DateraAPIException,
self.driver.create_snapshot,
testsnap)
def test_delete_snapshot_success(self):
testsnap = _stub_snapshot(volume_id=str(uuid.uuid4()))
self.assertIsNone(self.driver.delete_snapshot(testsnap))
def test_delete_snapshot_not_found(self):
testsnap = _stub_snapshot(volume_id=str(uuid.uuid4()))
self.driver.cvol_to_dvol = mock.MagicMock()
aimock = mock.MagicMock()
aimock.snapshots.list.side_effect = exception.NotFound
self.driver.cvol_to_dvol.return_value = aimock
self.assertIsNone(self.driver.delete_snapshot(testsnap))
def test_delete_snapshot_fails(self):
testsnap = _stub_snapshot(volume_id=str(uuid.uuid4()))
self.driver.cvol_to_dvol = mock.MagicMock()
aimock = mock.MagicMock()
aimock.snapshots.list.side_effect = DateraAPIException
self.driver.cvol_to_dvol.return_value = aimock
self.assertRaises(DateraAPIException,
self.driver.delete_snapshot,
testsnap)
def test_create_volume_from_snapshot_success(self):
testsnap = _stub_snapshot(volume_id=str(uuid.uuid4()))
testvol = _stub_volume()
volmock = mock.MagicMock()
snapmock = mock.MagicMock()
snapmock.reload.return_value = snapmock
snapmock.uuid = testsnap['id']
snapmock.op_state = "available"
self.driver.cvol_to_dvol = mock.MagicMock()
self.driver.cvol_to_dvol.return_value = volmock
volmock.snapshots.list.return_value = [snapmock]
self.assertIsNone(self.driver.create_volume_from_snapshot(
testvol, testsnap))
def test_create_volume_from_snapshot_fails(self):
testsnap = _stub_snapshot(volume_id=str(uuid.uuid4()))
testvol = _stub_volume()
self.driver.cvol_to_dvol = mock.MagicMock()
aimock = mock.MagicMock()
aimock.snapshots.list.side_effect = DateraAPIException
self.driver.cvol_to_dvol.return_value = aimock
self.assertRaises(DateraAPIException,
self.driver.create_volume_from_snapshot,
testvol,
testsnap)
def test_extend_volume_success(self):
newsize = 2
testvol = _stub_volume()
mockvol = mock.MagicMock()
mockvol.size = newsize
self.driver.cvol_to_dvol = mock.MagicMock()
self.driver.cvol_to_dvol.return_value = mockvol
self.driver._offline_flip_2_2 = mock.MagicMock()
self.driver._offline_flip_2_1 = mock.MagicMock()
self.assertIsNone(self.driver.extend_volume(testvol, newsize))
def test_extend_volume_fails(self):
newsize = 2
testvol = _stub_volume()
mockvol = mock.MagicMock()
mockvol.size = newsize
mockvol.set.side_effect = DateraAPIException
self.driver.cvol_to_dvol = mock.MagicMock()
self.driver.cvol_to_dvol.return_value = mockvol
self.driver._offline_flip_2_2 = mock.MagicMock()
self.driver._offline_flip_2_1 = mock.MagicMock()
self.assertRaises(DateraAPIException,
self.driver.extend_volume,
testvol,
newsize)
def test_manage_existing(self):
existing_ref = {'source-name': "A:B:C:D"}
testvol = _stub_volume()
self.driver.cvol_to_ai = mock.MagicMock()
self.assertIsNone(self.driver.manage_existing(testvol, existing_ref))
def test_manage_existing_wrong_ref(self):
existing_ref = {'source-name': "ABCD"}
testvol = _stub_volume()
self.driver.cvol_to_ai = mock.MagicMock()
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing,
testvol,
existing_ref)
def test_manage_existing_get_size(self):
existing_ref = {'source-name': "A:B:C:D"}
testvol = _stub_volume()
volmock = mock.MagicMock()
volmock.size = testvol['size']
self.driver.cvol_to_dvol = mock.MagicMock()
self.driver.cvol_to_dvol.return_value = volmock
self.assertEqual(self.driver.manage_existing_get_size(
testvol, existing_ref), testvol['size'])
def test_manage_existing_get_size_wrong_ref(self):
existing_ref = {'source-name': "ABCD"}
testvol = _stub_volume()
self.driver.cvol_to_ai = mock.MagicMock()
self.assertRaises(exception.ManageExistingInvalidReference,
self.driver.manage_existing_get_size,
testvol,
existing_ref)
def test_get_manageable_volumes(self):
testvol = _stub_volume()
v1 = {'reference': {'source-name': 'some-ai:storage-1:volume-1'},
'size': 1,
'safe_to_manage': True,
'reason_not_safe': '',
'cinder_id': None,
'extra_info': {'snapshots': '[]'}}
v2 = {'reference': {'source-name': 'some-other-ai:storage-1:volume-1'},
'size': 2,
'safe_to_manage': True,
'reason_not_safe': '',
'cinder_id': None,
'extra_info': {'snapshots': '[]'}}
mock1 = mock.MagicMock()
mock1.__getitem__.side_effect = ['some-ai']
mock1.name = 'some-ai'
mocksi1 = mock.MagicMock()
mocksi1.name = "storage-1"
mocksi1.__getitem__.side_effect = [[mock.MagicMock()]]
mock1.storage_instances.list.return_value = [mocksi1]
mockvol1 = mock.MagicMock()
mockvol1.name = "volume-1"
mockvol1.size = v1['size']
mocksi1.volumes.list.return_value = [mockvol1]
mock2 = mock.MagicMock()
mock2.__getitem__.side_effect = ['some-other-ai']
mock2.name = 'some-other-ai'
mocksi2 = mock.MagicMock()
mocksi2.name = "storage-1"
mocksi2.__getitem__.side_effect = [[mock.MagicMock()]]
mock2.storage_instances.list.return_value = [mocksi2]
mockvol2 = mock.MagicMock()
mockvol2.name = "volume-1"
mockvol2.size = v2['size']
mocksi2.volumes.list.return_value = [mockvol2]
listmock = mock.MagicMock()
listmock.return_value = [mock1, mock2]
self.driver.api.app_instances.list = listmock
marker = mock.MagicMock()
limit = mock.MagicMock()
offset = mock.MagicMock()
sort_keys = mock.MagicMock()
sort_dirs = mock.MagicMock()
if (version.version_string() >= '15.0.0'):
with mock.patch(
'cinder.volume.volume_utils.paginate_entries_list') \
as mpage:
self.driver.get_manageable_volumes(
[testvol], marker, limit, offset, sort_keys, sort_dirs)
mpage.assert_called_once_with(
[v1, v2], marker, limit, offset, sort_keys, sort_dirs)
else:
with mock.patch(
'cinder.volume.utils.paginate_entries_list') as mpage:
self.driver.get_manageable_volumes(
[testvol], marker, limit, offset, sort_keys, sort_dirs)
mpage.assert_called_once_with(
[v1, v2], marker, limit, offset, sort_keys, sort_dirs)
def test_unmanage(self):
testvol = _stub_volume()
self.assertIsNone(self.driver.unmanage(testvol))
class DateraVolumeTestCasev21(DateraVolumeTestCasev22):
def setUp(self):
super(DateraVolumeTestCasev21, self).setUp()
self.driver.api = mock.MagicMock()
self.driver.apiv = '2.1'
def _stub_volume(*args, **kwargs):
uuid = 'c20aba21-6ef6-446b-b374-45733b4883ba'
name = 'volume-00000001'
size = 1
volume = {}
volume['id'] = kwargs.get('id', uuid)
volume['project_id'] = "test-project"
volume['display_name'] = kwargs.get('display_name', name)
volume['size'] = kwargs.get('size', size)
volume['provider_location'] = kwargs.get('provider_location', None)
volume['volume_type_id'] = kwargs.get('volume_type_id', None)
return volume
def _stub_snapshot(*args, **kwargs):
uuid = '0bb34f0c-fea4-48e0-bf96-591120ac7e3c'
name = 'snapshot-00000001'
volume_size = 1
snap = {}
snap['id'] = kwargs.get('id', uuid)
snap['project_id'] = "test-project"
snap['display_name'] = kwargs.get('display_name', name)
snap['volume_id'] = kwargs.get('volume_id', None)
snap['volume_size'] = kwargs.get('volume_size', volume_size)
return snap
|
|
from django.conf import settings
from django.core.exceptions import ImproperlyConfigured
from .. import Error, Tags, Warning, register
REFERRER_POLICY_VALUES = {
'no-referrer', 'no-referrer-when-downgrade', 'origin',
'origin-when-cross-origin', 'same-origin', 'strict-origin',
'strict-origin-when-cross-origin', 'unsafe-url',
}
SECRET_KEY_INSECURE_PREFIX = 'django-insecure-'
SECRET_KEY_MIN_LENGTH = 50
SECRET_KEY_MIN_UNIQUE_CHARACTERS = 5
W001 = Warning(
"You do not have 'django.middleware.security.SecurityMiddleware' "
"in your MIDDLEWARE so the SECURE_HSTS_SECONDS, "
"SECURE_CONTENT_TYPE_NOSNIFF, SECURE_BROWSER_XSS_FILTER, "
"SECURE_REFERRER_POLICY, and SECURE_SSL_REDIRECT settings will have no "
"effect.",
id='security.W001',
)
W002 = Warning(
"You do not have "
"'django.middleware.clickjacking.XFrameOptionsMiddleware' in your "
"MIDDLEWARE, so your pages will not be served with an "
"'x-frame-options' header. Unless there is a good reason for your "
"site to be served in a frame, you should consider enabling this "
"header to help prevent clickjacking attacks.",
id='security.W002',
)
W004 = Warning(
"You have not set a value for the SECURE_HSTS_SECONDS setting. "
"If your entire site is served only over SSL, you may want to consider "
"setting a value and enabling HTTP Strict Transport Security. "
"Be sure to read the documentation first; enabling HSTS carelessly "
"can cause serious, irreversible problems.",
id='security.W004',
)
W005 = Warning(
"You have not set the SECURE_HSTS_INCLUDE_SUBDOMAINS setting to True. "
"Without this, your site is potentially vulnerable to attack "
"via an insecure connection to a subdomain. Only set this to True if "
"you are certain that all subdomains of your domain should be served "
"exclusively via SSL.",
id='security.W005',
)
W006 = Warning(
"Your SECURE_CONTENT_TYPE_NOSNIFF setting is not set to True, "
"so your pages will not be served with an "
"'X-Content-Type-Options: nosniff' header. "
"You should consider enabling this header to prevent the "
"browser from identifying content types incorrectly.",
id='security.W006',
)
W008 = Warning(
"Your SECURE_SSL_REDIRECT setting is not set to True. "
"Unless your site should be available over both SSL and non-SSL "
"connections, you may want to either set this setting True "
"or configure a load balancer or reverse-proxy server "
"to redirect all connections to HTTPS.",
id='security.W008',
)
W009 = Warning(
"Your SECRET_KEY has less than %(min_length)s characters, less than "
"%(min_unique_chars)s unique characters, or it's prefixed with "
"'%(insecure_prefix)s' indicating that it was generated automatically by "
"Django. Please generate a long and random SECRET_KEY, otherwise many of "
"Django's security-critical features will be vulnerable to attack." % {
'min_length': SECRET_KEY_MIN_LENGTH,
'min_unique_chars': SECRET_KEY_MIN_UNIQUE_CHARACTERS,
'insecure_prefix': SECRET_KEY_INSECURE_PREFIX,
},
id='security.W009',
)
W018 = Warning(
"You should not have DEBUG set to True in deployment.",
id='security.W018',
)
W019 = Warning(
"You have "
"'django.middleware.clickjacking.XFrameOptionsMiddleware' in your "
"MIDDLEWARE, but X_FRAME_OPTIONS is not set to 'DENY'. "
"Unless there is a good reason for your site to serve other parts of "
"itself in a frame, you should change it to 'DENY'.",
id='security.W019',
)
W020 = Warning(
"ALLOWED_HOSTS must not be empty in deployment.",
id='security.W020',
)
W021 = Warning(
"You have not set the SECURE_HSTS_PRELOAD setting to True. Without this, "
"your site cannot be submitted to the browser preload list.",
id='security.W021',
)
W022 = Warning(
'You have not set the SECURE_REFERRER_POLICY setting. Without this, your '
'site will not send a Referrer-Policy header. You should consider '
'enabling this header to protect user privacy.',
id='security.W022',
)
E023 = Error(
'You have set the SECURE_REFERRER_POLICY setting to an invalid value.',
hint='Valid values are: {}.'.format(', '.join(sorted(REFERRER_POLICY_VALUES))),
id='security.E023',
)
E100 = Error(
"DEFAULT_HASHING_ALGORITHM must be 'sha1' or 'sha256'.",
id='security.E100',
)
def _security_middleware():
return 'django.middleware.security.SecurityMiddleware' in settings.MIDDLEWARE
def _xframe_middleware():
return 'django.middleware.clickjacking.XFrameOptionsMiddleware' in settings.MIDDLEWARE
@register(Tags.security, deploy=True)
def check_security_middleware(app_configs, **kwargs):
passed_check = _security_middleware()
return [] if passed_check else [W001]
@register(Tags.security, deploy=True)
def check_xframe_options_middleware(app_configs, **kwargs):
passed_check = _xframe_middleware()
return [] if passed_check else [W002]
@register(Tags.security, deploy=True)
def check_sts(app_configs, **kwargs):
passed_check = not _security_middleware() or settings.SECURE_HSTS_SECONDS
return [] if passed_check else [W004]
@register(Tags.security, deploy=True)
def check_sts_include_subdomains(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
not settings.SECURE_HSTS_SECONDS or
settings.SECURE_HSTS_INCLUDE_SUBDOMAINS is True
)
return [] if passed_check else [W005]
@register(Tags.security, deploy=True)
def check_sts_preload(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
not settings.SECURE_HSTS_SECONDS or
settings.SECURE_HSTS_PRELOAD is True
)
return [] if passed_check else [W021]
@register(Tags.security, deploy=True)
def check_content_type_nosniff(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_CONTENT_TYPE_NOSNIFF is True
)
return [] if passed_check else [W006]
@register(Tags.security, deploy=True)
def check_ssl_redirect(app_configs, **kwargs):
passed_check = (
not _security_middleware() or
settings.SECURE_SSL_REDIRECT is True
)
return [] if passed_check else [W008]
@register(Tags.security, deploy=True)
def check_secret_key(app_configs, **kwargs):
try:
secret_key = settings.SECRET_KEY
except (ImproperlyConfigured, AttributeError):
passed_check = False
else:
passed_check = (
len(set(secret_key)) >= SECRET_KEY_MIN_UNIQUE_CHARACTERS and
len(secret_key) >= SECRET_KEY_MIN_LENGTH and
not secret_key.startswith(SECRET_KEY_INSECURE_PREFIX)
)
return [] if passed_check else [W009]
@register(Tags.security, deploy=True)
def check_debug(app_configs, **kwargs):
passed_check = not settings.DEBUG
return [] if passed_check else [W018]
@register(Tags.security, deploy=True)
def check_xframe_deny(app_configs, **kwargs):
passed_check = (
not _xframe_middleware() or
settings.X_FRAME_OPTIONS == 'DENY'
)
return [] if passed_check else [W019]
@register(Tags.security, deploy=True)
def check_allowed_hosts(app_configs, **kwargs):
return [] if settings.ALLOWED_HOSTS else [W020]
@register(Tags.security, deploy=True)
def check_referrer_policy(app_configs, **kwargs):
if _security_middleware():
if settings.SECURE_REFERRER_POLICY is None:
return [W022]
# Support a comma-separated string or iterable of values to allow fallback.
if isinstance(settings.SECURE_REFERRER_POLICY, str):
values = {v.strip() for v in settings.SECURE_REFERRER_POLICY.split(',')}
else:
values = set(settings.SECURE_REFERRER_POLICY)
if not values <= REFERRER_POLICY_VALUES:
return [E023]
return []
# RemovedInDjango40Warning
@register(Tags.security)
def check_default_hashing_algorithm(app_configs, **kwargs):
if settings.DEFAULT_HASHING_ALGORITHM not in {'sha1', 'sha256'}:
return [E100]
return []
|
|
import operator
import sys
import unittest
import numpy
import pytest
import chainer
from chainer.backends import cuda
from chainer import basic_math
from chainer import gradient_check
from chainer import testing
from chainer.testing import attr
from chainer.testing import backend
from chainer.utils import type_check
@testing.parameterize(*testing.product({
'shape': [
# x1, x2, y
((3, 2), (3, 2), (3, 2)),
((), (), ()),
((3, 2), (3, 1), (3, 2)),
((2,), (3, 2), (3, 2)),
],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestBinaryOp(unittest.TestCase):
def setUp(self):
self.x1 = numpy.random.uniform(.5, 1, self.shape[0]).astype(self.dtype)
self.x2 = numpy.random.uniform(.5, 1, self.shape[1]).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, self.shape[2]).astype(self.dtype)
self.ggx1 = numpy.random.uniform(-1, 1, self.shape[0]).astype(
self.dtype)
self.ggx2 = numpy.random.uniform(-1, 1, self.shape[1]).astype(
self.dtype)
def check_forward(self, op, x1_data, x2_data):
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
y = op(x1, x2)
options = {}
if self.dtype == numpy.float16:
options = {'atol': 1e-4, 'rtol': 1e-3}
testing.assert_allclose(op(self.x1, self.x2), y.data, **options)
def forward_cpu(self, op):
self.check_forward(op, self.x1, self.x2)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
def test_floordiv_forward_cpu(self):
self.forward_cpu(lambda x, y: x // y)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__radd__(x))
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rsub__(x))
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rmul__(x))
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rtruediv__(x))
def test_rfloordiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rfloordiv__(x))
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y.__rpow__(x))
def forward_gpu(self, op):
self.check_forward(op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2))
@attr.gpu
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
def test_floordiv_forward_gpu(self):
self.forward_gpu(lambda x, y: x // y)
@attr.gpu
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__radd__(x))
@attr.gpu
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rsub__(x))
@attr.gpu
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rmul__(x))
@attr.gpu
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rtruediv__(x))
@attr.gpu
def test_rfloordiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rfloordiv__(x))
@attr.gpu
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y.__rpow__(x))
@attr.gpu
def test_add_constant_allocation(self):
x = 0
y = chainer.Variable(cuda.cupy.ones((1,)))
z = y + x
self.assertEqual(1, z.data.get()[0])
def check_backward(self, op, x1_data, x2_data, y_grad):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
gradient_check.check_backward(op, (x1_data, x2_data), y_grad,
dtype=numpy.float64, **options)
def backward_cpu(self, op):
self.check_backward(op, self.x1, self.x2, self.gy)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y)
def backward_gpu(self, op):
self.check_backward(
op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2),
cuda.to_gpu(self.gy))
@attr.gpu
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y)
def check_double_backward(
self, op, x1_data, x2_data, y_grad, ggx1_data, ggx2_data, **args):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
options.update(args)
gradient_check.check_double_backward(
op, (x1_data, x2_data), y_grad, (ggx1_data, ggx2_data),
dtype=numpy.float64, **options)
def double_backward_cpu(self, op, **options):
self.check_double_backward(
op, self.x1, self.x2, self.gy, self.ggx1, self.ggx2,
**options)
def test_div_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: x / y, atol=5e-2, rtol=5e-2)
def test_pow_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: x ** y)
def test_rpow_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: y.__rpow__(x))
def double_backward_gpu(self, op, **options):
self.check_double_backward(
op, cuda.to_gpu(self.x1), cuda.to_gpu(self.x2),
cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx1), cuda.to_gpu(self.ggx2), **options)
@attr.gpu
def test_div_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: x / y, atol=5e-2, rtol=5e-2)
@attr.gpu
def test_pow_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_rpow_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: y.__rpow__(x))
@testing.parameterize(*testing.product({
'in_shapes': [
((3, 2),) * 3,
((),) * 3,
((1, 3), (), (2, 1, 2, 1)),
((), (2, 1, 2), (3, 1)),
((3, 1), (1, 1), (2,)),
],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
@backend.inject_backend_tests(
['test_forward', 'test_backward', 'test_double_backward'],
# CPU tests
testing.product({
'use_cuda': [False],
'use_ideep': ['never', 'always'],
})
# GPU tests
+ testing.product({
'use_cuda': [True],
'use_cudnn': ['never', 'always'],
}))
class TestMultipleAdd(unittest.TestCase):
def setUp(self):
x1_shape, x2_shape, x3_shape = self.in_shapes
self.x1 = numpy.random.uniform(.5, 1, x1_shape).astype(self.dtype)
self.x2 = numpy.random.uniform(.5, 1, x2_shape).astype(self.dtype)
self.x3 = numpy.random.uniform(.5, 1, x3_shape).astype(self.dtype)
y_shape = numpy.broadcast(self.x1, self.x2, self.x3).shape
self.gy = numpy.random.uniform(-1, 1, y_shape).astype(self.dtype)
self.ggx1 = numpy.random.uniform(-1, 1, x1_shape).astype(self.dtype)
self.ggx2 = numpy.random.uniform(-1, 1, x2_shape).astype(self.dtype)
self.ggx3 = numpy.random.uniform(-1, 1, x3_shape).astype(self.dtype)
def check_forward(self, func, x1_data, x2_data, x3_data, backend_config):
# convert to cupy.ndarray for GPU tests
if backend_config.use_cuda:
x1_data, x2_data, x3_data = cuda.to_gpu(
(x1_data, x2_data, x3_data))
x1 = chainer.Variable(x1_data)
x2 = chainer.Variable(x2_data)
x3 = chainer.Variable(x3_data)
with backend_config:
y = func(x1, x2, x3)
options = {}
if self.dtype == numpy.float16:
options = {'atol': 1e-4, 'rtol': 1e-3}
testing.assert_allclose(
(self.x1 + self.x2 + self.x3), y.data, **options)
def forward_cpu(self, func, backend_config):
self.check_forward(func, self.x1, self.x2, self.x3, backend_config)
def test_forward(self, backend_config):
func = chainer.functions.add
self.forward_cpu(func, backend_config)
def check_backward(self, func, x1_data, x2_data, x3_data, y_grad,
backend_config):
# convert to cupy.ndarray for GPU tests
if backend_config.use_cuda:
x1_data, x2_data, x3_data, y_grad = cuda.to_gpu(
(x1_data, x2_data, x3_data, y_grad))
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
with backend_config:
gradient_check.check_backward(func, (x1_data, x2_data, x3_data),
y_grad,
dtype=numpy.float64, **options)
def backward_cpu(self, func, backend_config):
self.check_backward(
func, self.x1, self.x2, self.x3, self.gy, backend_config)
def test_backward(self, backend_config):
func = chainer.functions.add
self.backward_cpu(func, backend_config)
def check_double_backward(
self, func, backend_config, x1_data, x2_data, x3_data, y_grad,
ggx1_data, ggx2_data, ggx3_data, **args):
# convert to cupy.ndarray for GPU tests
if backend_config.use_cuda:
(x1_data, x2_data, x3_data, y_grad,
ggx1_data, ggx2_data, ggx3_data) = cuda.to_gpu(
(x1_data, x2_data, x3_data, y_grad,
ggx1_data, ggx2_data, ggx3_data))
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
options.update(args)
with backend_config:
gradient_check.check_double_backward(
func, (x1_data, x2_data, x3_data), y_grad,
(ggx1_data,
ggx2_data, ggx3_data),
dtype=numpy.float64, **options)
def double_backward_cpu(self, func, backend_config, **options):
self.check_double_backward(
func, backend_config, self.x1, self.x2, self.x3, self.gy,
self.ggx1, self.ggx2, self.ggx3,
**options)
def test_double_backward(self, backend_config):
func = chainer.functions.add
self.double_backward_cpu(func, backend_config, atol=5e-2, rtol=5e-2)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestBinaryOpConstant(unittest.TestCase):
def _test_constant_one(self, func, lhs, rhs, gpu=False):
if gpu:
lhs = cuda.to_gpu(lhs)
x = chainer.Variable(lhs)
y = func(x, rhs)
self.assertEqual(y.data.dtype, self.dtype)
y.backward()
self.assertEqual(x.grad.dtype, self.dtype)
def _test_constant(self, func):
x_data = numpy.array(1, self.dtype)
self._test_constant_one(func, x_data, 1)
self._test_constant_one(func, x_data, 1.0)
self._test_constant_one(func, x_data, numpy.int64(1))
self._test_constant_one(func, x_data, numpy.float64(1.0))
def _test_constant_gpu(self, func):
x_data = numpy.array(1, self.dtype)
self._test_constant_one(func, x_data, 1, True)
self._test_constant_one(func, x_data, 1.0, True)
self._test_constant_one(func, x_data, numpy.int64(1), True)
self._test_constant_one(func, x_data, numpy.float64(1), True)
def _test_constant_array_one(self, func, lhs, rhs):
x = chainer.Variable(lhs)
y = func(x, rhs)
self.assertEqual(y.data.dtype, self.dtype)
y.grad = numpy.ones_like(y.data, self.dtype)
y.backward()
self.assertEqual(x.grad.dtype, self.dtype)
def _test_constant_array(self, func):
x_data = numpy.array([1.0, 2.0], self.dtype)
self._test_constant_array_one(
func, x_data, numpy.array([3.0, 4.0], self.dtype))
with pytest.raises(TypeError):
self._test_constant_array_one(func, x_data, [3.0, 4.0])
with pytest.raises(TypeError):
self._test_constant_array_one(func, x_data, (3.0, 4.0))
with pytest.raises(TypeError):
self._test_constant_array_one(func, x_data, [3.0, 4.0, 5.0])
with pytest.raises(TypeError):
self._test_constant_array_one(func, x_data, (3.0, 4.0, 5.0))
with pytest.raises(type_check.InvalidType):
self._test_constant_array_one(
func, x_data, numpy.array([3.0, 4.0, 5.0], self.dtype))
def _test_constant_array_gpu_one(self, func, lhs, rhs):
x = chainer.Variable(cuda.to_gpu(lhs))
y = func(x, rhs)
self.assertEqual(y.data.dtype, self.dtype)
y.grad = cuda.cupy.ones_like(y.data).astype(self.dtype)
y.backward()
self.assertEqual(x.grad.dtype, self.dtype)
def _test_constant_array_gpu(self, func, exception=TypeError):
x_data = numpy.array([1.0, 2.0], self.dtype)
self._test_constant_array_gpu_one(
func, x_data, cuda.to_gpu(numpy.array([3.0, 4.0], self.dtype)))
with pytest.raises(exception):
self._test_constant_array_one(
func, x_data, cuda.to_gpu(
numpy.array([3.0, 4.0, 5.0], self.dtype)))
def test_add_constant(self):
self._test_constant(lambda x, y: x + y)
@attr.gpu
def test_add_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x + y)
def test_add_constant_array(self):
self._test_constant_array(lambda x, y: x + y)
@attr.gpu
def test_add_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: x + y)
def test_radd_constant(self):
self._test_constant(lambda x, y: y + x)
@attr.gpu
def test_radd_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y + x)
def test_radd_constant_array(self):
self._test_constant_array(lambda x, y: y + x)
@attr.gpu
def test_radd_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: y + x)
def test_sub_constant(self):
self._test_constant(lambda x, y: x - y)
@attr.gpu
def test_sub_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x - y)
def test_sub_constant_array(self):
self._test_constant_array(lambda x, y: x - y)
@attr.gpu
def test_sub_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: x - y)
def test_rsub_constant(self):
self._test_constant(lambda x, y: y - x)
@attr.gpu
def test_rsub_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y - x)
def test_rsub_constant_array(self):
self._test_constant_array(lambda x, y: y - x)
@attr.gpu
def test_rsub_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: y - x)
def test_mul_constant(self):
self._test_constant(lambda x, y: x * y)
@attr.gpu
def test_mul_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x * y)
def test_mul_constant_array(self):
self._test_constant_array(lambda x, y: x * y)
@attr.gpu
def test_mul_constant_array_gpu(self):
self._test_constant_array(lambda x, y: x * y)
def test_rmul_constant(self):
self._test_constant(lambda x, y: y * x)
@attr.gpu
def test_rmul_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y * x)
def test_rmul_constant_array(self):
self._test_constant_array(lambda x, y: y * x)
@attr.gpu
def test_rmul_constant_array_gpu(self):
# _test_constant_array_one throws pycuda._pvt_struct.error
self._test_constant_array_gpu(lambda x, y: y * x, exception=Exception)
def test_div_constant(self):
self._test_constant(lambda x, y: x / y)
@attr.gpu
def test_div_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x / y)
def test_div_constant_array(self):
self._test_constant_array(lambda x, y: x / y)
@attr.gpu
def test_div_constant_array_gpu(self):
# _test_constant_array_one throws pycuda._pvt_struct.error
self._test_constant_array_gpu(lambda x, y: x / y, exception=Exception)
def test_rdiv_constant(self):
self._test_constant(lambda x, y: y / x)
@attr.gpu
def test_rdiv_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y / x)
def test_rdiv_constant_array(self):
self._test_constant_array(lambda x, y: y / x)
@attr.gpu
def test_rdiv_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: y / x)
def test_pow_constant(self):
self._test_constant(lambda x, y: x ** y)
@attr.gpu
def test_pow_constant_gpu(self):
self._test_constant_gpu(lambda x, y: x ** y)
def test_pow_constant_array(self):
self._test_constant_array(lambda x, y: x ** y)
@attr.gpu
def test_pow_constant_array_gpu(self):
self._test_constant_array_gpu(lambda x, y: x ** y, exception=TypeError)
def test_rpow_constant(self):
self._test_constant(lambda x, y: y ** x)
@attr.gpu
def test_rpow_constant_gpu(self):
self._test_constant_gpu(lambda x, y: y ** x)
def test_rpow_constant_array(self):
self._test_constant_array(lambda x, y: y ** x)
@attr.gpu
def test_rpow_constant_array_gpu(self):
# _test_constant_array_one throws pycuda._pvt_struct.error
self._test_constant_array_gpu(lambda x, y: y ** x, exception=Exception)
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestVariableConstantOp(unittest.TestCase):
def make_date(self):
raise NotImplementedError()
def setUp(self):
self.x = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
self.gy = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
self.value = 0.5
def check_forward(self, op, x_data):
x = chainer.Variable(x_data)
y = op(x, self.value)
if self.dtype == numpy.float16:
atol = 5e-4
rtol = 5e-4
else:
atol = 1e-7
rtol = 1e-7
testing.assert_allclose(
op(self.x, self.value), y.data, atol=atol, rtol=rtol)
def forward_cpu(self, op):
self.check_forward(op, self.x)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y + x)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y - x)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y * x)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y / x)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y ** x)
def forward_gpu(self, op):
self.check_forward(op, cuda.to_gpu(self.x))
@attr.gpu
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y + x)
@attr.gpu
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y - x)
@attr.gpu
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y * x)
@attr.gpu
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y / x)
@attr.gpu
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y ** x)
def check_backward(self, op, x_data, y_grad):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
gradient_check.check_backward(lambda x: op(x, self.value),
x_data, y_grad,
dtype=numpy.float64, **options)
def backward_cpu(self, op):
self.check_backward(op, self.x, self.gy)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
def test_radd_backward_cpu(self):
self.backward_cpu(lambda x, y: y + x)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
def test_rsub_backward_cpu(self):
self.backward_cpu(lambda x, y: y - x)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
def test_rmul_backward_cpu(self):
self.backward_cpu(lambda x, y: y * x)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
def test_rdiv_backward_cpu(self):
self.backward_cpu(lambda x, y: y / x)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y)
def test_rpow_backward_cpu(self):
self.backward_cpu(lambda x, y: y ** x)
def backward_gpu(self, op):
self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
def test_radd_backward_gpu(self):
self.backward_gpu(lambda x, y: y + x)
@attr.gpu
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
def test_rsub_backward_gpu(self):
self.backward_gpu(lambda x, y: y - x)
@attr.gpu
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
def test_rmul_backward_gpu(self):
self.backward_gpu(lambda x, y: y * x)
@attr.gpu
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
def test_rdiv_backward_gpu(self):
self.backward_gpu(lambda x, y: y / x)
@attr.gpu
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_rpow_backward_gpu(self):
self.backward_gpu(lambda x, y: y ** x)
def check_double_backward(self, op, x_data, y_grad, x_grad_grad):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
def _op(x):
return op(x, self.value)
gradient_check.check_double_backward(
_op, x_data, y_grad, x_grad_grad, dtype=numpy.float64, **options)
def double_backward_cpu(self, op):
self.check_double_backward(op, self.x, self.gy, self.ggx)
def test_pow_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: x ** y)
def test_rpow_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: y ** x)
def test_rdiv_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: y / x)
def double_backward_gpu(self, op):
self.check_double_backward(
op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
@attr.gpu
def test_pow_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_rpow_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: y ** x)
@attr.gpu
def test_rdiv_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: y / x)
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestVariableConstantArrayOp(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(.5, 1, (3, 2)).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype)
self.ggx = numpy.random.uniform(.5, 1, (3, 2)).astype(self.dtype)
self.value = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype)
def check_forward(self, op, x_data, gpu, positive):
value = self.value
if positive:
value = numpy.abs(value)
v = value
if gpu:
v = cuda.to_gpu(v)
x = chainer.Variable(x_data)
y = op(x, v)
if self.dtype == numpy.float16:
tol = 1e-3
else:
tol = 1e-6
testing.assert_allclose(
op(self.x, value), y.data, atol=tol, rtol=tol)
def forward_cpu(self, op, positive=False):
self.check_forward(op, self.x, False, positive)
def test_add_forward_cpu(self):
self.forward_cpu(lambda x, y: x + y)
def test_radd_forward_cpu(self):
self.forward_cpu(lambda x, y: y + x)
def test_sub_forward_cpu(self):
self.forward_cpu(lambda x, y: x - y)
def test_rsub_forward_cpu(self):
self.forward_cpu(lambda x, y: y - x)
def test_mul_forward_cpu(self):
self.forward_cpu(lambda x, y: x * y)
def test_rmul_forward_cpu(self):
self.forward_cpu(lambda x, y: y * x)
def test_div_forward_cpu(self):
self.forward_cpu(lambda x, y: x / y)
def test_rdiv_forward_cpu(self):
self.forward_cpu(lambda x, y: y / x)
def test_pow_forward_cpu(self):
self.forward_cpu(lambda x, y: x ** y)
def test_rpow_forward_cpu(self):
self.forward_cpu(lambda x, y: y ** x, positive=True)
def forward_gpu(self, op, positive=False):
self.check_forward(op, cuda.to_gpu(self.x), True, positive)
@attr.gpu
def test_add_forward_gpu(self):
self.forward_gpu(lambda x, y: x + y)
@attr.gpu
def test_radd_forward_gpu(self):
self.forward_gpu(lambda x, y: y + x)
@attr.gpu
def test_sub_forward_gpu(self):
self.forward_gpu(lambda x, y: x - y)
@attr.gpu
def test_rsub_forward_gpu(self):
self.forward_gpu(lambda x, y: y - x)
@attr.gpu
def test_mul_forward_gpu(self):
self.forward_gpu(lambda x, y: x * y)
@attr.gpu
def test_rmul_forward_gpu(self):
self.forward_gpu(lambda x, y: y * x)
@attr.gpu
def test_div_forward_gpu(self):
self.forward_gpu(lambda x, y: x / y)
@attr.gpu
def test_rdiv_forward_gpu(self):
self.forward_gpu(lambda x, y: y / x)
@attr.gpu
def test_pow_forward_gpu(self):
self.forward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_rpow_forward_gpu(self):
self.forward_gpu(lambda x, y: y ** x, positive=True)
def check_backward(self, op, x_data, y_grad, gpu, positive):
value = self.value
if positive:
value = numpy.abs(value)
if gpu:
value = cuda.to_gpu(value)
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
gradient_check.check_backward(lambda x: op(x, value), x_data, y_grad,
dtype=numpy.float64, **options)
def backward_cpu(self, op, positive=False):
self.check_backward(op, self.x, self.gy, False, positive)
def test_add_backward_cpu(self):
self.backward_cpu(lambda x, y: x + y)
def test_radd_backward_cpu(self):
self.backward_cpu(lambda x, y: y + x)
def test_sub_backward_cpu(self):
self.backward_cpu(lambda x, y: x - y)
def test_rsub_backward_cpu(self):
self.backward_cpu(lambda x, y: y - x)
def test_mul_backward_cpu(self):
self.backward_cpu(lambda x, y: x * y)
def test_rmul_backward_cpu(self):
self.backward_cpu(lambda x, y: y * x)
def test_div_backward_cpu(self):
self.backward_cpu(lambda x, y: x / y)
def test_rdiv_backward_cpu(self):
self.backward_cpu(lambda x, y: y / x)
def test_pow_backward_cpu(self):
self.backward_cpu(lambda x, y: x ** y)
def test_rpow_backward_cpu(self):
self.backward_cpu(lambda x, y: y ** x, positive=True)
def backward_gpu(self, op, positive=False):
self.check_backward(
op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy), True, positive)
@attr.gpu
def test_add_backward_gpu(self):
self.backward_gpu(lambda x, y: x + y)
@attr.gpu
def test_radd_backward_gpu(self):
self.backward_gpu(lambda x, y: y + x)
@attr.gpu
def test_sub_backward_gpu(self):
self.backward_gpu(lambda x, y: x - y)
@attr.gpu
def test_mul_backward_gpu(self):
self.backward_gpu(lambda x, y: x * y)
@attr.gpu
def test_rmul_backward_gpu(self):
self.backward_gpu(lambda x, y: y * x)
@attr.gpu
def test_div_backward_gpu(self):
self.backward_gpu(lambda x, y: x / y)
@attr.gpu
def test_rdiv_backward_gpu(self):
self.backward_gpu(lambda x, y: y / x)
@attr.gpu
def test_pow_backward_gpu(self):
self.backward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_rpow_backward_gpu(self):
self.backward_gpu(lambda x, y: y ** x, positive=True)
def check_double_backward(self, op, x_data, y_grad, x_grad_grad, gpu,
positive):
value = self.value
if positive:
value = numpy.abs(value)
if gpu:
value = cuda.to_gpu(value)
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
def _op(x):
return op(x, value)
gradient_check.check_double_backward(
_op, x_data, y_grad, x_grad_grad, dtype=numpy.float64, **options)
def double_backward_cpu(self, op, positive=False):
self.check_double_backward(
op, self.x, self.gy, self.ggx, False, positive)
def test_pow_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: x ** y)
def test_rpow_double_backward_cpu(self):
self.double_backward_cpu(lambda x, y: y ** x, positive=True)
def double_backward_gpu(self, op, positive=False):
self.check_double_backward(
op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx), True, positive)
@attr.gpu
def test_pow_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: x ** y)
@attr.gpu
def test_rpow_double_backward_gpu(self):
self.double_backward_gpu(lambda x, y: y ** x, positive=True)
@testing.parameterize(*testing.product({
'shape': [(3, 2), ()],
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestUnaryFunctions(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
for i in numpy.ndindex(self.shape):
if -0.1 < self.x[i] < 0.1:
self.x[i] = 0.5
self.gy = numpy.random.uniform(-1, 1, self.shape).astype(self.dtype)
self.ggx = numpy.random.uniform(.5, 1, self.shape).astype(self.dtype)
def check_forward(self, op, op_np, x_data):
x = chainer.Variable(x_data)
y = op(x)
testing.assert_allclose(
op_np(self.x), y.data, atol=1e-7, rtol=1e-7)
def forward_cpu(self, op, op_np):
self.check_forward(op, op_np, self.x)
def test_neg_forward_cpu(self):
self.forward_cpu(lambda x: -x, lambda x: -x)
def test_abs_forward_cpu(self):
self.forward_cpu(lambda x: abs(x), lambda x: abs(x))
def forward_gpu(self, op, op_np):
self.check_forward(op, op_np, cuda.to_gpu(self.x))
@attr.gpu
def test_neg_forward_gpu(self):
self.forward_gpu(lambda x: -x, lambda x: -x)
@attr.gpu
def test_abs_forward_gpu(self):
self.forward_gpu(lambda x: abs(x), lambda x: abs(x))
def check_backward(self, op, x_data, y_grad):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
gradient_check.check_backward(
op, x_data, y_grad, dtype=numpy.float64, **options)
def backward_cpu(self, op):
self.check_backward(op, self.x, self.gy)
def test_neg_backward_cpu(self):
self.backward_cpu(lambda x: -x)
def test_abs_backward_cpu(self):
self.backward_cpu(lambda x: abs(x))
def backward_gpu(self, op):
self.check_backward(op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
@attr.gpu
def test_neg_backward_gpu(self):
self.backward_gpu(lambda x: -x)
@attr.gpu
def test_abs_backward_gpu(self):
self.backward_gpu(lambda x: abs(x))
def check_double_backward(self, op, x_data, y_grad, x_grad_grad):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
gradient_check.check_double_backward(
op, x_data, y_grad, x_grad_grad, dtype=numpy.float64, **options)
def double_backward_cpu(self, op):
self.check_double_backward(op, self.x, self.gy, self.ggx)
def test_neg_double_backward_cpu(self):
self.double_backward_cpu(lambda x: -x)
def test_abs_double_backward_cpu(self):
self.double_backward_cpu(lambda x: abs(x))
def double_backward_gpu(self, op):
self.check_double_backward(
op, cuda.to_gpu(self.x), cuda.to_gpu(self.gy),
cuda.to_gpu(self.ggx))
@attr.gpu
def test_neg_double_backward_gpu(self):
self.double_backward_gpu(lambda x: -x)
@attr.gpu
def test_abs_double_backward_gpu(self):
self.double_backward_gpu(lambda x: abs(x))
@testing.parameterize(*testing.product({
'dtype': [numpy.float16, numpy.float32, numpy.float64],
}))
class TestNegativePow(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 0, (3, 2)).astype(self.dtype)
self.gy = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype)
self.ggx = numpy.random.uniform(-1, 1, (3, 2)).astype(self.dtype)
def check_backward(self, x_data, y_grad):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
gradient_check.check_backward(
lambda x: x ** 2, x_data, y_grad, dtype=numpy.float64, **options)
def test_backward_cpu(self):
self.check_backward(self.x, self.gy)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(cuda.to_gpu(self.x), cuda.to_gpu(self.gy))
def check_double_backward(self, x_data, y_grad, x_grad_grad):
options = {}
if self.dtype == numpy.float16:
options = {'atol': 5e-3, 'rtol': 5e-2}
gradient_check.check_double_backward(
lambda x: x ** 2, x_data, y_grad, x_grad_grad, dtype=numpy.float64,
**options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.gy, self.ggx)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.gy), cuda.to_gpu(self.ggx))
@testing.parameterize(*testing.product_dict(
[
{'left_const': False, 'right_const': False},
{'left_const': True, 'right_const': False},
{'left_const': False, 'right_const': True},
], [
{'dtype': numpy.float16},
{'dtype': numpy.float32},
{'dtype': numpy.float64},
], [
{'x_shape': (3, 2), 'y_shape': (2, 4), 'z_shape': (3, 4)},
{'x_shape': (2, 3, 2), 'y_shape': (2, 2, 4), 'z_shape': (2, 3, 4)},
{'x_shape': (2, 1, 3, 4),
'y_shape': (2, 4, 2),
'z_shape': (2, 2, 3, 2)},
{'x_shape': (5, 3, 2), 'y_shape': (2,), 'z_shape': (5, 3)},
{'x_shape': (2,), 'y_shape': (5, 2, 4), 'z_shape': (5, 4)},
{'x_shape': (2, 3, 2), 'y_shape': (2, 4), 'z_shape': (2, 3, 4)},
{'x_shape': (3,), 'y_shape': (3,), 'z_shape': ()},
]
))
@unittest.skipUnless(sys.version_info >= (3, 5),
'Only for Python3.5 or higher')
class TestMatMul(unittest.TestCase):
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
self.y = numpy.random.uniform(-1, 1, self.y_shape).astype(self.dtype)
self.gz = numpy.random.uniform(-1, 1, self.z_shape).astype(self.dtype)
self.ggx = numpy.random.uniform(
-1, 1, self.x_shape).astype(self.dtype)
self.ggy = numpy.random.uniform(
-1, 1, self.y_shape).astype(self.dtype)
def _get_forward_answer(self, x, y):
if x.ndim <= 2 or y.ndim == 1:
return numpy.dot(x, y)
elif hasattr(numpy, 'matmul'):
# Note: NumPy 1.14.0 has a bug in einsum (numpy/numpy#10343),
# so we use matmul if available to avoid it
return numpy.matmul(x, y)
else:
return numpy.einsum('...ij,...jk->...ik', x, y)
def check_forward(self, x_data, y_data):
if self.left_const:
x = x_data
else:
x = chainer.Variable(x_data)
if self.right_const:
y = y_data
else:
y = chainer.Variable(y_data)
z = operator.matmul(x, y)
if self.dtype == numpy.float16:
options = {'atol': 1e-3, 'rtol': 1e-3}
else:
options = {'atol': 1e-7, 'rtol': 1e-7}
testing.assert_allclose(
self._get_forward_answer(self.x, self.y), z.data, **options)
def test_forward_cpu(self):
self.check_forward(self.x, self.y)
@attr.gpu
def test_forward_gpu(self):
self.check_forward(cuda.to_gpu(self.x), cuda.to_gpu(self.y))
def check_backward(self, x_data, y_data, z_grad):
if self.right_const:
def op(x):
return operator.matmul(x, y_data)
data = x_data,
elif self.left_const:
def op(y):
return operator.matmul(x_data, y)
data = y_data,
else:
op = operator.matmul
data = x_data, y_data
if self.dtype == numpy.float16:
options = {'atol': 1e-3, 'rtol': 1e-2}
else:
options = {'atol': 1e-4, 'rtol': 1e-4}
gradient_check.check_backward(
op, data, z_grad, dtype=numpy.float64, **options)
def test_backward_cpu(self):
self.check_backward(self.x, self.y, self.gz)
@attr.gpu
def test_backward_gpu(self):
self.check_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.y), cuda.to_gpu(self.gz))
def check_double_backward(
self, x_data, y_data, z_grad, x_grad_grad, y_grad_grad):
if self.right_const:
def op(x):
return operator.matmul(x, y_data.astype(x.dtype))
data = x_data,
grad_grad = x_grad_grad,
elif self.left_const:
def op(y):
return operator.matmul(x_data.astype(y.dtype), y)
data = y_data,
grad_grad = y_grad_grad,
else:
op = operator.matmul
data = x_data, y_data
grad_grad = x_grad_grad, y_grad_grad
if self.dtype == numpy.float16:
options = {'atol': 1e-3, 'rtol': 1e-2}
else:
options = {'atol': 1e-4, 'rtol': 1e-4}
gradient_check.check_double_backward(
op, data, z_grad, grad_grad, dtype=numpy.float64, **options)
def test_double_backward_cpu(self):
self.check_double_backward(self.x, self.y, self.gz, self.ggx, self.ggy)
@attr.gpu
def test_double_backward_gpu(self):
self.check_double_backward(
cuda.to_gpu(self.x), cuda.to_gpu(self.y), cuda.to_gpu(self.gz),
cuda.to_gpu(self.ggx), cuda.to_gpu(self.ggy))
@testing.parameterize(
{'x_shape': (), 'y_shape': ()},
{'x_shape': (3, 2), 'y_shape': ()},
{'x_shape': (), 'y_shape': (2, 4)},
{'x_shape': (2, 3), 'y_shape': (2, 3)},
{'x_shape': (2,), 'y_shape': (1,)},
)
@unittest.skipUnless(sys.version_info >= (3, 5),
'Only for Python3.5 or higher')
class TestMatMulInvalidShape(unittest.TestCase):
dtype = numpy.float32
def setUp(self):
self.x = numpy.random.uniform(-1, 1, self.x_shape).astype(self.dtype)
self.y = numpy.random.uniform(-1, 1, self.y_shape).astype(self.dtype)
def test_invalid_type(self):
x = chainer.Variable(self.x)
y = chainer.Variable(self.y)
with pytest.raises(type_check.InvalidType):
operator.matmul(x, y)
class TestNotSupportOperation(unittest.TestCase):
def setUp(self):
self.x = chainer.Variable(numpy.zeros(10))
self.y = chainer.Variable(numpy.zeros(10))
def test_lt(self):
with pytest.raises(NotImplementedError):
self.x < self.y
def test_le(self):
with pytest.raises(NotImplementedError):
self.x <= self.y
def test_eq(self):
with pytest.raises(NotImplementedError):
self.x == self.y
def test_ne(self):
with pytest.raises(NotImplementedError):
self.x != self.y
def test_gt(self):
with pytest.raises(NotImplementedError):
self.x > self.y
def test_ge(self):
with pytest.raises(NotImplementedError):
self.x >= self.y
def test_nonzero(self):
with pytest.raises(NotImplementedError):
if self.x:
pass
class ConvertValueToStringTest(unittest.TestCase):
def _check_scalar(self, value, string):
self.assertEqual(basic_math._convert_value_to_string(value), string)
def test_integer_positive(self):
self._check_scalar(2, '2')
def test_integer_zero(self):
self._check_scalar(0, '0')
def test_integer_negative(self):
self._check_scalar(-2, '(-2)')
def test_float_positive(self):
self._check_scalar(2.0, '2.0')
def test_float_zero(self):
self._check_scalar(0.0, '0.0')
def test_float_negative(self):
self._check_scalar(-2.0, '(-2.0)')
def test_numpy_scalar(self):
self._check_scalar(numpy.float32(2), '2.0')
def _check_array(self, value, string):
self.assertEqual(basic_math._convert_value_to_string(value), string)
value = chainer.Variable(value)
self.assertEqual(basic_math._convert_value_to_string(value), string)
def test_array_cpu(self):
self._check_array(numpy.array([1, 2]), 'constant array')
@attr.gpu
def test_array_gpu(self):
self._check_array(cuda.ndarray([1, 2]), 'constant array')
class TestLabel(unittest.TestCase):
def test_neg(self):
self.assertEqual(basic_math.Neg().label, '__neg__')
def test_absolute(self):
self.assertEqual(basic_math.Absolute().label, '|_|')
def test_add(self):
self.assertEqual(basic_math.Add().label, '_ + _')
def test_add_constant(self):
self.assertEqual(basic_math.AddConstant(2.0).label, '_ + 2.0')
def test_sub(self):
self.assertEqual(basic_math.Sub().label, '_ - _')
def test_sub_from_constant(self):
self.assertEqual(basic_math.SubFromConstant(2.0).label, '2.0 - _')
def test_mul(self):
self.assertEqual(basic_math.Mul().label, '_ * _')
def test_mul_constant(self):
self.assertEqual(basic_math.MulConstant(2.0).label, '_ * 2.0')
def test_div(self):
self.assertEqual(basic_math.Div().label, '_ / _')
def test_div_from_constant(self):
self.assertEqual(basic_math.DivFromConstant(2.0).label, '2.0 / _')
def test_pow_var_var(self):
self.assertEqual(basic_math.PowVarVar().label, '_ ** _')
def test_pow_var_const(self):
self.assertEqual(basic_math.PowVarConst(2.0).label, '_ ** 2.0')
def test_pow_const_var(self):
self.assertEqual(basic_math.PowConstVar(2.0).label, '2.0 ** _')
testing.run_module(__name__, __file__)
|
|
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 12:27:37 2013
@author: Andrew
"""
import cv2, time, numpy as np, serial as ps
class ChaseBall(object):
def __init__(self):
self.imgScale = np.array([1.0, 1.0 , 1.0], np.float32)
self.ballFound = False
self.frontCarFound = False
self.backCarFound = False
self.ballMean = np.array([95.2919, 193.6647, 178.0390])
self.carBackMean = np.array([22.2108, 14.2900, 174.0439])
self.carFrontMean = np.array([177.9610, 101.8435, 9.8372])
self.ballCov = np.array([[1722.1, 1770.2, 1952.4],
[1770.2, 2520.8, 2538.9], [1952.4, 2538.9, 2679.1]])
self.carBackCov = np.array([[213.0, -27.1, 338.8],
[-27.1, 47.8, 72.2], [338.8, 72.2, 1099.3]])
self.carFrontCov = np.array([[1974.4, 1026.2, 105.1],
[1026.2, 750.5, 112.2], [105.1, 112.2, 38.2]])
self.colorThresh = 4
self.vc = cv2.VideoCapture(0)
self.KInv = np.array([[ 0.00201472, 0.0, -0.64312242],
[ 0.0, 0.00203603, -0.3760941 ],
[ 0.0, 0.0, 1.0 ]])
h = 3.0 * np.pi / 8.0
self.H = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(h), -np.sin(h)],
[0.0, np.sin(h), np.cos(h)]])
self.H = np.dot(self.H, self.KInv)
if not self.vc.isOpened():
print 'Video Capture is not working.'
cv2.namedWindow('ThresholdedImage', cv2.CV_WINDOW_AUTOSIZE)
self.ser = ps.Serial('/dev/tty.usbmodemfa131', 9600)
def onMouse(self, event, x, y, flags, param):
if event == cv2.EVENT_LBUTTONDOWN:
self.x1, self.y1 = np.int16([x,y])
if event == cv2.EVENT_LBUTTONUP:
self.x2, self.y2 = np.int16([x,y])
def setImgScale(self):
self.getImg()
bMax = np.amax(self.img[:,:,0])
gMax = np.amax(self.img[:,:,1])
rMax = np.amax(self.img[:,:,2])
self.imgScale = np.array([bMax, gMax, rMax], np.float32)
self.imgScale = 255.0 / self.imgScale
def getImg(self):
rval, img = self.vc.read()
img = img[0::2, 0::2, :]
self.img = cv2.GaussianBlur(img, (5,5), 1)
self.img = self.img.astype(np.float32)
self.img[:,:,0] = self.img[:,:,0] * self.imgScale[0]
self.img[:,:,1] = self.img[:,:,1] * self.imgScale[1]
self.img[:,:,2] = self.img[:,:,2] * self.imgScale[2]
self.img = np.clip(self.img, 0, 255)
self.img = self.img.astype(np.uint8)
self.displayImg = self.img
def addColor(self, whichObject):
self.getImg()
if whichObject == 0:
windowStr = 'Pick ball color'
elif whichObject == 1:
windowStr = 'Pick car front color'
else:
windowStr = 'Pick car back color'
cv2.namedWindow(windowStr, cv2.CV_WINDOW_AUTOSIZE)
cv2.imshow(windowStr, self.img)
cv2.setMouseCallback(windowStr,self.onMouse)
cv2.waitKey()
cv2.destroyWindow(windowStr)
if self.x1 < self.x2:
x1 = self.x1
x2 = self.x2
else:
x1 = self.x2
x2 = self.x1
if self.y1 < self.y2:
y1 = self.y1
y2 = self.y2
else:
y1 = self.y2
y2 = self.y1
temp = self.img[y1 : y2, x1 : x2, :]
numPts = np.shape(temp)[0] * np.shape(temp)[1]
newTemp = np.reshape(temp, (numPts, 3))
colorMean = np.mean(newTemp, axis = 0)
print colorMean
colorCov = np.cov(newTemp.T)
print colorCov
if whichObject == 0:
self.ballMean = colorMean
self.ballCov = colorCov
elif whichObject == 1:
self.carFrontMean = colorMean
self.carFrontCov = colorCov
elif whichObject == 2:
self.carBackMean = colorMean
self.carBackCov = colorCov
def findColorObjectMahalanobis(self, color, P):
# calculating Mahalanobis distance between pixel colors and target color
Pinv = np.linalg.inv(P)
img0 = self.img[:,:,0] - color[0]
img1 = self.img[:,:,1] - color[1]
img2 = self.img[:,:,2] - color[2]
temp1 = img0 * Pinv[0,0] + img1 * Pinv[0,1] + img2 * Pinv[0,2]
temp2 = img0 * Pinv[1,0] + img1 * Pinv[1,1] + img2 * Pinv[1,2]
temp3 = img0 * Pinv[2,0] + img1 * Pinv[2,1] + img2 * Pinv[2,2]
# creating threshold image, closing connected components
binImg = 255 - np.sqrt(img0 * temp1 + img1 * temp2 + img2 * temp3)
binImg = np.clip(binImg, 0, 255).astype(np.uint8)
binImg[binImg > (255 - self.colorThresh)] = 255
binImg[binImg < 255] = 0
binImg = cv2.morphologyEx(binImg, cv2.MORPH_CLOSE,
np.ones(5, dtype = np.uint8))
contourImg = binImg.copy()
contours, hierarchy = cv2.findContours(contourImg,
cv2.RETR_LIST, cv2.CHAIN_APPROX_SIMPLE)
max_area = 0
best_cnt = np.array([0])
# finding largest connected component
for cnt in contours:
area = cv2.contourArea(cnt)
if area > max_area:
max_area = area
best_cnt = cnt
if best_cnt.any():
M = cv2.moments(best_cnt)
cx,cy = M['m10']/M['m00'], M['m01']/M['m00']
else:
cx, cy = -1, -1
return cx, cy
def findBall(self):
self.ballFound = False
self.getImg()
cx, cy = self.findColorObjectMahalanobis(self.ballMean, self.ballCov)
if cx > -1:
self.ballFound = True
cv2.circle(self.displayImg, (int(cx), int(cy)), 5, (0, 0, 255), -1)
print 'ball'
print np.array([cx, cy])
cx, cy, cz = np.dot(self.H, np.array([cx, cy, 1.0]))
self.ballLoc = np.array([cx / cz, cy / cz])
def findCar(self):
self.frontCarFound = False
self.backCarFound = False
cx, cy = self.findColorObjectMahalanobis(self.carBackMean,
self.carBackCov)
if cx > -1:
self.backCarFound = True
cv2.circle(self.displayImg, (int(cx), int(cy)), 5, (255,0,0), -1)
print 'car back'
print np.array([cx, cy])
cx, cy, cz = np.dot(self.H, np.array([cx, cy, 1.0]))
self.backCarLoc = np.array([cx / cz, cy / cz])
cx, cy = self.findColorObjectMahalanobis(self.carFrontMean,
self.carFrontCov)
if cx > -1:
self.frontCarFound = True
cv2.circle(self.displayImg, (int(cx), int(cy)), 5, (255, 0, 255),
-1)
print 'car front'
print np.array([cx, cy])
cx, cy, cz = np.dot(self.H, np.array([cx, cy, 1.0]))
self.frontCarLoc = np.array([cx / cz, cy / cz])
def determineMotion(self):
self.findBall()
self.findCar()
# forward = 0 - stationary
# forward = 1 - forward
# forward = 2 - backward
# turn = 0 - no turn
# turn = 1 - left turn
# turn = 2 - right turn
forward = 0
turn = 0
if self.ballFound and self.backCarFound and self.frontCarFound:
carDiff = self.frontCarLoc - self.backCarLoc
carDir = np.arctan2(carDiff[1], carDiff[0])
carDist = carDiff[0] * carDiff[0] + carDiff[1] * carDiff[1]
ballDiff = self.ballLoc - self.frontCarLoc
ballDir = np.arctan2(ballDiff[1], ballDiff[0])
ballDist = ballDiff[0] * ballDiff[0] + ballDiff[1] * ballDiff[1]
moveDir = (ballDir - carDir) * 180 / np.pi
if moveDir > 180:
moveDir = moveDir - 360
if moveDir < -180:
moveDir = moveDir + 360
print moveDir
print ballDist / carDist
if np.abs(moveDir) <= 20:
if ballDist > 0.5 * carDist:
forward = 1
turn = 0
if moveDir > 20 and moveDir <= 60:
if ballDist > (4 * carDist):
forward = 1
turn = 1
else:
forward = 2
turn = 2
if moveDir < -20 and moveDir >= -60:
if ballDist > (4 * carDist):
forward = 1
turn = 2
else:
forward = 2
turn = 1
if moveDir > 60 and moveDir <= 120:
forward = 2
turn = 2
if moveDir < -60 and moveDir >= -120:
forward = 2
turn = 1
if moveDir > 120 and moveDir < 180:
if ballDist < (4 * carDist):
forward = 1
turn = 1
else:
forward = 2
turn = 2
if moveDir < -120 and moveDir >= -180:
if ballDist < (4 * carDist):
forward = 1
turn = 2
else:
forward = 2
turn = 1
print forward, turn
return forward, turn
def moveCar(self, forward, turn):
# a = forward
# b = backward
# c = forward left
# d = forward right
# e = backward right
# f = backward left
if forward > 0:
if forward == 1:
if turn == 0:
self.ser.write('a')
elif turn == 1:
self.ser.write('d')
else:
self.ser.write('c')
else:
if turn == 0:
self.ser.write('b')
elif turn == 1:
self.ser.write('f')
else:
self.ser.write('e')
def displayMoveDirection(self, forward, turn):
if forward > 0:
if forward < 2:
if turn == 0:
ang = 0
elif turn == 1:
ang = np.pi / 4
else:
ang = -np.pi / 4
else:
if turn == 0:
ang = np.pi
elif turn == 1:
ang = 3 * np.pi / 4
else:
ang = -3 * np.pi / 4
carDiff = self.frontCarLoc - self.backCarLoc
carDir = np.arctan2(carDiff[0], carDiff[1])
ang += carDir
pt1 = self.frontCarLoc
Rx = np.cos(ang)
Ry = np.sin(ang)
ptx = Rx * 0.0 + Ry * 50.0
pty = Rx * 50.0 - Ry * 0.0
pt2 = self.frontCarLoc + np.array([ptx, pty])
pt1 = (int(pt1[0]), int(pt1[1]))
pt2 = (int(pt2[0]), int(pt2[1]))
cv2.line(self.displayImg, pt1, pt2, (255, 255, 255), 2)
def estimateHomography(self):
pts1Found = False
while not pts1Found:
self.getImg()
fx1, fy1 = self.findColorObjectMahalanobis(self.carFrontMean,
self.carFrontCov)
bx1, by1 = self.findColorObjectMahalanobis(self.carBackMean,
self.carBackCov)
cv2.imshow('ThresholdedImage', self.displayImg)
cv2.waitKey()
pts1Found = (fx1 > -1) and (bx1 > -1)
if not pts1Found:
forward, turn = 1, 0
self.moveCar(forward, turn)
time.sleep(0.75)
self.moveCar(forward, turn)
print 'Found 1st set'
pts2Found = False
while not pts2Found:
forward, turn = 1, 0
self.moveCar(forward, turn)
time.sleep(0.75)
self.moveCar(forward, turn)
self.getImg()
fx2, fy2 = self.findColorObjectMahalanobis(self.carFrontMean,
self.carFrontCov)
bx2, by2 = self.findColorObjectMahalanobis(self.carBackMean,
self.carBackCov)
pts2Found = (fx2 > -1) and (bx2 > -1)
fx1, fy1, fz1 = np.dot(self.KInv, np.array([fx1, fy1, 1.0]))
bx1, by1, bz1 = np.dot(self.KInv, np.array([bx1, by1, 1.0]))
fx2, fy2, fz2 = np.dot(self.KInv, np.array([fx2, fy2, 1.0]))
bx2, by2, bz2 = np.dot(self.KInv, np.array([bx2, by2, 1.0]))
pts = np.array([[fx1, bx1, fx2, bx2], [fy1, by1, fy2, by2],
[fz1, bz1, fz2, bz2]])
scale = 3.0 / 2.0 - np.sqrt(5) / 2.0
a = np.pi / 8.0
b = 5.0 * np.pi / 8.0
angs = np.array([a, a + scale * (b - a), b - scale * (b - a), b])
errs = np.array([0.0, 0.0, 0.0, 0.0])
for ii in range(4):
R = np.array([[1.0, 0.0, 0.0],
[0.0, np.cos(angs[ii]), -np.sin(angs[ii])],
[0.0, np.sin(angs[ii]), np.cos(angs[ii])]])
newPts = np.dot(R, pts)
newPts = newPts[0:2, :] / newPts[2,:]
diffPts = np.array([newPts[:,1] - newPts[:, 0],
newPts[:, 3] - newPts[:, 2]])
errs[ii] = (np.linalg.norm(diffPts[0,:]) -
np.linalg.norm(diffPts[1,:]))**2
for ii in range(15):
if errs[1] < errs[2]:
angs[2:4] = angs[1:3]
errs[2:4] = errs[1:3]
newIdx = 1
else:
angs[0:2] = angs[1:3]
errs[0:2] = errs[1:3]
newIdx = 2
angs[3 - newIdx : 5 - newIdx] = angs[3 - newIdx : 5 - newIdx]
errs[3 - newIdx : 5 - newIdx] = errs[3 - newIdx : 5 - newIdx]
angs[newIdx] = angs[3 * newIdx - 3] + scale * (
angs[-3 * newIdx + 6] - angs[3 * newIdx - 3])
errs[newIdx] = self.rotationErr(pts, angs[newIdx])
Hangle = np.sum(angs[1:3]) / 2.0
print pts
print Hangle * 180 / np.pi
self.H = np.array([[1.0, 0.0, 0.0],
[0.0, np.cos(Hangle), -np.sin(Hangle)],
[0.0, np.sin(Hangle), np.cos(Hangle)]])
self.H = np.dot(self.H, self.KInv)
def rotationErr(self, pts, ang):
R = np.array([[1.0, 0.0, 0.0], [0.0, np.cos(ang), -np.sin(ang)],
[0.0, np.sin(ang), np.cos(ang)]])
newPts = np.dot(R, pts)
newPts = newPts[0:2, :] / newPts[2,:]
diffPts = np.array([newPts[:,1] - newPts[:, 0],
newPts[:, 3] - newPts[:, 2]])
errs = (np.linalg.norm(diffPts[0,:]) - np.linalg.norm(diffPts[1,:]))**2
return errs
def testMoveCar(self):
for ii in range(2):
for jj in range(3):
print ii, jj
temp.moveCar(ii + 1, jj)
time.sleep(1)
if __name__ == '__main__':
temp = ChaseBall()
doTest = 0
if doTest :
temp.testMoveCar()
else:
temp.setImgScale()
for ii in range(3):
temp.addColor(ii)
temp.estimateHomography()
#
while True:
# for ii in range(5):
forward, turn = temp.determineMotion()
#
# print forward, turn
temp.moveCar(forward, turn)
#
cv2.imshow('ThresholdedImage', temp.displayImg)
cv2.waitKey()
time.sleep(0.5)
# temp.addColor(0)
|
|
#!/usr/bin/python
#
# Copyright 2013 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Connections via pexpect to SSH and Telnet endpoints.
By deliberate side-effect, this module overwrites pexpect.spawn.__select
with an implementation based on poll(), to support use with higher file
descriptors than supported by select().
"""
import errno
import os
import re
import select
import socket
import time
import paramiko
import pexpect
import gflags
import logging
import sshclient
import push_exceptions as exceptions
FLAGS = gflags.FLAGS
TIMEOUT_DEFAULT = 20.0
class Error(Exception):
pass
class ConnectionError(Error):
"""The connection failed due to an error."""
class TimeoutError(Error):
"""The operation timed-out."""
class OperationFailedError(Error):
"""The sub-process had a non-zero exit status."""
class ScpError(Error):
"""An error occurred during an SCP operation."""
def _SelectViaPoll(_, rfds, wfds, efds, timeout):
"""poll() based replacement for pexpect.spawn.__select().
As mentioned in the module docstring, this is required since Python's select
is unable to wait for events on high-numbered file descriptors. The API is
as per select.select(), however if we are interrupted by a signal, we wait
again for the remaining time.
Args:
_: An object, self, unused.
rfds: A list, file descriptors to check for read.
wfds: A list, file descriptors to check for write.
efds: A list, file descriptors to check for exceptions.
timeout: A float, timeout (seconds).
Returns:
A tuple of three lists, being the descriptors in each of the incoming lists
which are ready for read, write or have an exception, respectively.
"""
if wfds or efds:
logging.fatal('Unexpected code change in pexpect: __select '
'called with wfds=%s efds=%s', wfds, efds)
p = select.poll()
for fd in rfds:
p.register(fd, select.POLLIN)
# See pexpect.spawn.__select for timeout handling logic; this is the same
# in select() and poll(), except that the timeout argument to poll() is
# in milliseconds. poll() raises the same exception on timeout as select().
if timeout is not None:
end_time = time.time() + timeout
while True:
try:
fdstate = p.poll(int(timeout * 1000) if timeout is not None else None)
# Build a list of descriptors which select() would return as 'available
# for read' (which includes EOF conditions which may be indicated as
# POLLIN, POLLHUP or POLLIN|POLLHUP, depending on the type of file
# descriptor).
rrfds = []
for fd, state in fdstate:
if state & select.POLLIN or state & select.POLLHUP:
rrfds.append(fd)
return (rrfds, [], [])
except select.error as e:
if e[0] == errno.EINTR:
if timeout is not None:
timeout = end_time - time.time()
if timeout < 0:
return ([], [], [])
else:
raise
# Override pexpect.spawn.__select as mentioned in module docstring.
pexpect.spawn._spawn__select = _SelectViaPoll
class Connection(object):
"""The base class for pexpect connections."""
def __init__(self, host, username, password=None, success=None,
connect_command=None, timeout=None, find_prompt=False,
enable_password=None, find_prompt_prefix=None):
"""Initializer.
Args:
host: A string, the hostname or IP address to connect to.
username: A string, the username to use on the connection.
password: A string, the password to use on the connection.
success: A string, the string to expect to trigger successful completion.
connect_command: A string, the command to connect (minus the host suffix).
timeout: A float, the number of seconds before a connection times out.
find_prompt: A bool, if true then success is a regexp and it's group(1)
should be used to build self._prompt.
enable_password: A string, the enable password to optionally use.
find_prompt_prefix: A string, the prefix to put before group(1) from the
success regexp to build self._prompt, if find_prompt is true.
"""
self._connect_timeout = timeout or TIMEOUT_DEFAULT
self._host = host
self._username = username
self._password = password
self._success = success
self._find_prompt = find_prompt
self._connect_command = connect_command
self._enable_password = enable_password
self._find_prompt_prefix = (
r'(?:^|\n)' if find_prompt_prefix is None else find_prompt_prefix)
self.child = None
def _MaybeFindPrompt(self):
if self._find_prompt:
try:
self._prompt = self._find_prompt_prefix + re.escape(
self.child.match.group(1))
self.re_prompt = re.compile(self._prompt)
logging.debug('%s: prompt set to %r', self._host, self._prompt)
except IndexError:
logging.debug('%s: find_prompt set but no capture group - skipping',
self._host)
class SocketSpawn(pexpect.spawn):
"""Wrapper around pexpect.spawn to use a supplied socket.
This class does not close the file; it assumes it is a Python socket
which will be held/destroyed by the caller.
"""
# pylint: disable=g-bad-name
def __init__(self, sock, *args, **kwargs):
pexpect.spawn.__init__(self, None, *args, **kwargs)
self.child_fd = sock.fileno()
self.closed = False
self.name = '<file descriptor %d>' % self.child_fd
def isalive(self):
if self.child_fd == -1:
return False
try:
os.fstat(self.child_fd)
return True
except OSError:
return False
def __del__(self):
return
def close(self):
return
def terminate(self, force=False):
_ = force
return
def kill(self, sig):
_ = sig
return
class SocketConnection(Connection):
"""IPv4 TCP socket connection class."""
def __init__(self, host, port, username, password=None, success=None,
timeout=None, initial_chat=None, find_prompt=False,
find_prompt_prefix=None):
"""Creates an IPv4 TCP socket connection.
Args:
host: As per parent.
port: An int, the port number to connect to.
username: As per parent.
password: As per parent.
success: As per parent.
timeout: As per parent.
initial_chat: A tuple of tuples, each tuple in this list is a string
to expect from the socket and a response; the chat must occur in the
exact order specified. Intended only for telnet option negotiation.
find_prompt: As per parent.
find_prompt_prefix: As per parent.
"""
super(SocketConnection, self).__init__(
host, username=username, password=password, success=success,
timeout=timeout, find_prompt=find_prompt,
find_prompt_prefix=find_prompt_prefix)
self._port = port
self._initial_chat = initial_chat
self._connect_timeout = timeout or TIMEOUT_DEFAULT
if success is None:
self._success = self._username+r'.*> '
def Connect(self):
"""Makes the connection."""
self._sock = socket.socket()
self._sock.settimeout(self._connect_timeout)
try:
self._sock.connect((self._host, self._port))
except socket.timeout:
raise TimeoutError(self._connect_timeout)
except socket.gaierror as e:
raise ConnectionError('Lookup failure for %r: %s' % (self._host, e[1]))
except socket.error as e:
raise ConnectionError('Connect failure for %r: %s' % (self._host, e[1]))
if self._initial_chat is not None:
try:
for expected_recv, to_send in self._initial_chat:
actual_recv = self._sock.recv(len(expected_recv))
if actual_recv == expected_recv:
self._sock.send(to_send)
else:
raise ConnectionError('Initial chat failure for %r: expected %r, '
'got %r' % (self._host, expected_recv,
actual_recv))
except socket.timeout:
logging.debug('Initial chat timeout for %r', self._host)
raise TimeoutError(self._connect_timeout)
self._sock.settimeout(None)
self.child = SocketSpawn(self._sock, maxread=8192)
self.child.timeout = self._connect_timeout
logging.debug('Socket connected to %r:%s', self._host, self._port)
responses = self.child.compile_pattern_list([
self._success,
r'[Ll]ogin|[Uu]ser[Nn]ame',
r'[Pp]assword:',
r'Permission denied|Authentication failed'])
self.exit_list = self.child.compile_pattern_list(pexpect.EOF)
while True:
try:
timeout = max(1, self._connect_timeout)
pattern = self.child.expect_list(responses, timeout=timeout)
logging.debug('Connect() matched responses[%d]', pattern)
if pattern == 0:
self._MaybeFindPrompt()
break
elif pattern == 1:
self.child.send(self._username+'\r')
elif pattern == 2:
self.child.send(self._password+'\r')
elif pattern == 3:
raise ConnectionError('Permission denied for %r' % self._host)
else:
raise ConnectionError('Unexpected pattern %d' % pattern)
except pexpect.TIMEOUT:
raise TimeoutError(timeout)
except pexpect.EOF as e:
raise ConnectionError(str(e))
return None
class SshSpawn(pexpect.spawn):
"""Wrapper around pexpect.spawn to use a Paramiko channel."""
# pylint: disable=g-bad-name
def __init__(self, channel, *args, **kwargs):
pexpect.spawn.__init__(self, None, *args, **kwargs)
self.channel = channel
self.child_fd = None
self.closed = False
self.name = '<ssh channel %s>' % channel.get_id()
def isalive(self):
try:
return self.channel.get_transport().is_active()
except AttributeError:
return False
def read_nonblocking(self, size=1, timeout=None):
"""See parent. This actually may or may not block based on timeout."""
if not self.isalive():
raise pexpect.EOF('End Of File (EOF) in read() - Not alive.')
if timeout == -1:
timeout = self.timeout
self.channel.settimeout(timeout)
try:
s = self.channel.recv(size)
except socket.timeout:
raise pexpect.TIMEOUT('Timeout (%s) exceeded in read().' % timeout)
except paramiko.SSHException as e:
raise pexpect.EOF('Paramiko exception: %s' % e)
except EOFError:
raise pexpect.EOF('Paramiko reported End Of File (EOF) in read()')
if not s:
self.flag_eof = 1
raise pexpect.EOF('End Of File (EOF) in read().')
return s
def send(self, s):
return self.channel.send(s)
def __del__(self):
return
def close(self):
return
def terminate(self, force=False):
_ = force
return
def kill(self, sig):
_ = sig
return
class HpSshSpawn(SshSpawn):
"""Wrapped pexpect.spawn to use a Paramiko channel and HP ANSI filters.
This also deals with the annoying pager which cannot be disabled.
"""
# ANSI character sequences to convert to a newline.
NEWLINE_RE = re.compile('\x1B(?:\\[0m|E)')
# All other ANSI character sequences (removed from the output).
# Matches all strings containing \x1B, unless they contain a truncated ANSI
# sequence at the end of the string.
ANSI_RE = re.compile('\x1B([^[]|\\[[^@-~]*[@-~])')
def __init__(self, channel, *args, **kwargs):
SshSpawn.__init__(self, channel, *args, **kwargs)
self._read_nonblocking_buf = ''
def _Filter(self, text):
text = re.sub(self.NEWLINE_RE, '\n', text)
text = re.sub(self.ANSI_RE, '', text)
logging.vlog(4, 'Filtered: %r', text)
return text
def read_nonblocking(self, size=1, timeout=None):
"""Read, handling terminal control input from an HP ProCurve.
This may or may not actually block, as per its parent.
Args:
size: An int, the minimum size block to return.
timeout: An optional float, wait only timeout seconds at most.
Returns:
A string, the filtered output.
"""
start = time.time()
if timeout == -1:
timeout = self.timeout
while True:
if timeout and time.time() > start + timeout:
return ''
in_data = SshSpawn.read_nonblocking(self, size=size, timeout=timeout)
logging.vlog(4, 'Unfiltered: %r', in_data)
if in_data and self._read_nonblocking_buf:
logging.debug('Prepending data: %r', self._read_nonblocking_buf)
in_data = self._read_nonblocking_buf + in_data
self._read_nonblocking_buf = ''
filtered = self._Filter(in_data)
escape_location = filtered.find('\x1B')
if escape_location != -1:
logging.debug('Partial ANSI tag in filtered data: %r', filtered)
self._read_nonblocking_buf = filtered[escape_location:]
filtered = filtered[:escape_location]
if filtered:
return filtered
class ParamikoSshConnection(Connection):
"""Base class for SSH connections using Paramiko."""
def __init__(self, host, username, password=None, success=None,
timeout=None, find_prompt=False, ssh_keys=None,
enable_password=None, ssh_client=None, find_prompt_prefix=None):
"""Initializer.
Args:
host: As per parent.
username: As per parent.
password: As per parent.
success: As per parent.
timeout: As per parent.
find_prompt: As per parent.
ssh_keys: A tuple of strings, SSH private keys (optional; may be None).
enable_password: As per parent.
ssh_client: A instance of an object that implements an SSH client.
find_prompt_prefix: As per parent.
"""
super(ParamikoSshConnection, self).__init__(
host, username, password, success, None, timeout, find_prompt,
enable_password=enable_password, find_prompt_prefix=find_prompt_prefix)
if success is None:
self._success = self._username+r'.*> '
self.ssh_client = ssh_client
self._ssh_client = None
self._ssh_keys = ssh_keys or ()
self._spawn = SshSpawn
if self._spawn is None:
raise NotImplementedError('Must supply a spawn= keywoard argument.')
def Connect(self):
"""Makes the connection.
We can have an instance of this class without being connected to the
device, e.g. after a disconnect. Hence setting up the actual SSH connection
should happen in this method, not in the constructor.
"""
try:
if self.ssh_client:
# An SSH client was provided. Use it.
self._ssh_client = self.ssh_client.Connect(
hostname=self._host,
username=self._username,
password=self._password,
ssh_keys=self._ssh_keys,
timeout=self._connect_timeout)
else:
# The Connect() function from the sshclient module is a factory that
# returns a paramiko.SSHClient instance.
self._ssh_client = sshclient.Connect(
hostname=self._host,
username=self._username,
password=self._password,
ssh_keys=self._ssh_keys,
timeout=self._connect_timeout)
except (exceptions.ConnectError, exceptions.AuthenticationError) as e:
raise ConnectionError(str(e))
# We are connected. Now set up pexpect.
try:
ssh_channel = self._ssh_client.invoke_shell()
ssh_channel.set_combine_stderr(True)
self.child = self._spawn(ssh_channel, maxread=8192)
timeout = max(1, self._connect_timeout)
pattern = self.child.expect([self._success], timeout=timeout)
if pattern == 0:
self._MaybeFindPrompt()
except pexpect.TIMEOUT:
raise TimeoutError(timeout)
except pexpect.EOF as e:
raise ConnectionError(str(e))
except paramiko.SSHException as e:
msg = 'SSHException connecting to %r: %s' % (self._host, e)
raise ConnectionError(msg)
# Used by _Disconnect in ftos.py and ios.py.
self.exit_list = self.child.compile_pattern_list(pexpect.EOF)
return None
class HpSshFilterConnection(ParamikoSshConnection):
"""Creates an SSH connection to an HP Switch with terminal escape filtering.
This filters terminal escape sequences seen on the Hewlett-Packard ProCurve
ethernet switches.
"""
def __init__(self, host, username, password=None, success=None,
timeout=None, find_prompt=False, ssh_keys=None,
enable_password=None, ssh_client=None, find_prompt_prefix=None):
super(HpSshFilterConnection, self).__init__(
host, username, password, success, timeout, find_prompt,
ssh_keys=ssh_keys, enable_password=enable_password,
ssh_client=ssh_client, find_prompt_prefix=find_prompt_prefix)
self._spawn = HpSshSpawn
def _MaybeFindPrompt(self):
"""Perform real login and then enable if we have an enable password."""
# We always run this for HP, no matter the state of self._find_prompt.
self._prompt = r'(?:^|\n|\r)([A-Za-z0-9\._-]+)(?:>|#) '
# Shake out the prompt. We may be facing a Password prompt or
# a 'Press any key to continue' prompt.
self.child.send('\r')
# Only send the password once.
password_sent = False
try:
# Login.
while True:
logging.vlog(3, 'Expecting prompt %r', self._prompt)
compiled_regexes = self.child.compile_pattern_list(
[self._prompt, r'Press any key to continue',
'Password:', 'Invalid password',
'Unable to verify password'])
i = self.child.expect(compiled_regexes, timeout=10)
if i == 0:
re_str = (re.escape(self.child.match.group(1)) +
r'(?:>|#) ')
logging.vlog(3, 'Prompt set to %r', re_str)
self.re_prompt = re.compile(re_str)
break
elif i == 1:
logging.vlog(3, 'Pressing any key (space)')
self.child.send(' ')
elif i == 2 and not password_sent:
# Send the password only once.
try:
self.child.sendline(self._password)
logging.vlog(3, 'Sent user password (again) to %r', self._host)
password_sent = True
except (pexpect.TIMEOUT, pexpect.EOF) as e:
self._ssh_client = None
raise ConnectionError(str(e))
elif i <= 3 and i < 5:
logging.error('CONNECT_ERROR Incorrect user password on %r',
self._host)
# Sleep momentarily before expecting again to break buffer swap races.
time.sleep(0.05)
# Enable.
password_sent = False
logging.vlog(3, 'Enabling for HP on %r', self._host)
self.child.sendline('enable')
while True:
i = self.child.expect([self._prompt, 'Password:',
'Invalid password',
'Unable to verify password'], timeout=10)
if i == 0:
# Found the prompt, we're enabled.
break
elif i == 1 and not password_sent:
if self._enable_password is not None:
self.child.sendline(self._enable_password)
logging.vlog(3, 'Sent enable password to %r', self._host)
else:
self.child.sendline(self._password)
logging.vlog(3, 'Sent user password to %r', self._host)
password_sent = True
elif i <= 3 and i < 5:
logging.error('CONNECT_ERROR Incorrect user password on %r',
self._host)
# Sleep momentarily before expecting again to break buffer swap races.
time.sleep(0.05)
except (pexpect.TIMEOUT, pexpect.EOF) as e:
self._ssh_client = None
raise ConnectionError(str(e))
class ScpPutConnection(Connection):
"""Copies a file via SCP (RCP over SSH)."""
def __init__(self, host, username, password=None):
"""Initializer.
Args:
host: As per parent.
username: As per parent.
password: As per parent.
"""
super(ScpPutConnection, self).__init__(host, username, password)
self._ssh_client = sshclient.Connect(hostname=self._host,
username=self._username,
password=self._password)
self.transport = self._ssh_client.get_transport()
def Copy(self, source_data, destination_file):
"""Handles the SCP file copy.
Args:
source_data: The source data to copy as a string
destination_file: The file on the remote device
Raises:
ScpError: There was an error copying the file.
"""
try:
sshclient.ScpPut(self.transport, source_data, destination_file,
self._connect_timeout)
except sshclient.ScpError as e:
raise ScpError('SCP put failed: %s: %s' % (e.__class__.__name__, e))
|
|
#!/usr/bin/env python
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
""" Sample command-line program for listing Google Dataproc Clusters"""
import argparse
import os
from google.cloud import storage
import googleapiclient.discovery
# Currently only the "global" region is supported
REGION = 'global'
DEFAULT_FILENAME = 'pyspark_sort.py'
def get_default_pyspark_file():
"""Gets the PySpark file from this directory"""
current_dir = os.path.dirname(os.path.abspath(__file__))
f = open(os.path.join(current_dir, DEFAULT_FILENAME), 'r')
return f, DEFAULT_FILENAME
def get_pyspark_file(filename):
f = open(filename, 'r')
return f, os.path.basename(filename)
def upload_pyspark_file(project_id, bucket_name, filename, file):
"""Uploads the PySpark file in this directory to the configured
input bucket."""
print('Uploading pyspark file to GCS')
client = storage.Client(project=project_id)
bucket = client.get_bucket(bucket_name)
blob = bucket.blob(filename)
blob.upload_from_file(file)
def download_output(project_id, cluster_id, output_bucket, job_id):
"""Downloads the output file from Cloud Storage and returns it as a
string."""
print('Downloading output file')
client = storage.Client(project=project_id)
bucket = client.get_bucket(output_bucket)
output_blob = (
'google-cloud-dataproc-metainfo/{}/jobs/{}/driveroutput.000000000'
.format(cluster_id, job_id))
return bucket.blob(output_blob).download_as_string()
# [START create_cluster]
def create_cluster(dataproc, project, cluster_name, zone):
print('Creating cluster.')
zone_uri = \
'https://www.googleapis.com/compute/v1/projects/{}/zones/{}'.format(
project, zone)
cluster_data = {
'projectId': project,
'clusterName': cluster_name,
'config': {
'gceClusterConfig': {
'zoneUri': zone_uri
}
}
}
result = dataproc.projects().regions().clusters().create(
projectId=project,
region=REGION,
body=cluster_data).execute()
return result
# [END create_cluster]
def wait_for_cluster_creation(dataproc, project_id, cluster_name, zone):
print('Waiting for cluster creation')
while True:
result = dataproc.projects().regions().clusters().list(
projectId=project_id,
region=REGION).execute()
cluster_list = result['clusters']
cluster = [c
for c in cluster_list
if c['clusterName'] == cluster_name][0]
if cluster['status']['state'] == 'ERROR':
raise Exception(result['status']['details'])
if cluster['status']['state'] == 'RUNNING':
print("Cluster created.")
break
# [START list_clusters_with_detail]
def list_clusters_with_details(dataproc, project):
result = dataproc.projects().regions().clusters().list(
projectId=project,
region=REGION).execute()
cluster_list = result['clusters']
for cluster in cluster_list:
print("{} - {}"
.format(cluster['clusterName'], cluster['status']['state']))
return result
# [END list_clusters_with_detail]
def get_cluster_id_by_name(cluster_list, cluster_name):
"""Helper function to retrieve the ID and output bucket of a cluster by
name."""
cluster = [c for c in cluster_list if c['clusterName'] == cluster_name][0]
return cluster['clusterUuid'], cluster['config']['configBucket']
# [START submit_pyspark_job]
def submit_pyspark_job(dataproc, project, cluster_name, bucket_name, filename):
"""Submits the Pyspark job to the cluster, assuming `filename` has
already been uploaded to `bucket_name`"""
job_details = {
'projectId': project,
'job': {
'placement': {
'clusterName': cluster_name
},
'pysparkJob': {
'mainPythonFileUri': 'gs://{}/{}'.format(bucket_name, filename)
}
}
}
result = dataproc.projects().regions().jobs().submit(
projectId=project,
region=REGION,
body=job_details).execute()
job_id = result['reference']['jobId']
print('Submitted job ID {}'.format(job_id))
return job_id
# [END submit_pyspark_job]
# [START delete]
def delete_cluster(dataproc, project, cluster):
print('Tearing down cluster')
result = dataproc.projects().regions().clusters().delete(
projectId=project,
region=REGION,
clusterName=cluster).execute()
return result
# [END delete]
# [START wait]
def wait_for_job(dataproc, project, job_id):
print('Waiting for job to finish...')
while True:
result = dataproc.projects().regions().jobs().get(
projectId=project,
region=REGION,
jobId=job_id).execute()
# Handle exceptions
if result['status']['state'] == 'ERROR':
raise Exception(result['status']['details'])
elif result['status']['state'] == 'DONE':
print('Job finished')
return result
# [END wait]
# [START get_client]
def get_client():
"""Builds an http client authenticated with the service account
credentials."""
dataproc = googleapiclient.discovery.build('dataproc', 'v1')
return dataproc
# [END get_client]
def main(project_id, zone, cluster_name, bucket_name, pyspark_file=None):
dataproc = get_client()
try:
if pyspark_file:
spark_file, spark_filename = get_pyspark_file(pyspark_file)
else:
spark_file, spark_filename = get_default_pyspark_file()
create_cluster(dataproc, project_id, cluster_name, zone)
wait_for_cluster_creation(dataproc, project_id, cluster_name, zone)
upload_pyspark_file(project_id, bucket_name,
spark_filename, spark_file)
cluster_list = list_clusters_with_details(
dataproc, project_id)['clusters']
(cluster_id, output_bucket) = (
get_cluster_id_by_name(cluster_list, cluster_name))
# [START call_submit_pyspark_job]
job_id = submit_pyspark_job(
dataproc, project_id, cluster_name, bucket_name, spark_filename)
# [END call_submit_pyspark_job]
wait_for_job(dataproc, project_id, job_id)
output = download_output(project_id, cluster_id, output_bucket, job_id)
print('Received job output {}'.format(output))
return output
finally:
delete_cluster(dataproc, project_id, cluster_name)
spark_file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter
)
parser.add_argument(
'--project_id', help='Project ID you want to access.', required=True),
parser.add_argument(
'--zone', help='Region to create clusters in', required=True)
parser.add_argument(
'--cluster_name', help='Name of the cluster to create', required=True)
parser.add_argument(
'--gcs_bucket', help='Bucket to upload Pyspark file to', required=True)
parser.add_argument(
'--pyspark_file', help='Pyspark filename. Defaults to pyspark_sort.py')
args = parser.parse_args()
main(
args.project_id, args.zone,
args.cluster_name, args.gcs_bucket, args.pyspark_file)
|
|
#!/usr/bin/env python
# encoding: ISO8859-1
"""
Copyright (c)2011, Hideyuki Tanaka
All rights reserved.
Redistribution and use in source and binary forms, with or without
modification, are permitted provided that the following conditions are met:
* Redistributions of source code must retain the above copyright
notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above
copyright notice, this list of conditions and the following
disclaimer in the documentation and/or other materials provided
with the distribution.
* Neither the name of Hideyuki Tanaka nor the names of other
contributors may be used to endorse or promote products derived
from this software without specific prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
"AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
(INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
import os, subprocess, sys
from waflib.TaskGen import before, after, feature
from waflib import Options, Task, Utils, Logs, Errors
C1 = '#XXX'.encode()
C2 = '#YYY'.encode()
UNPACK_DIR = '.unittest-gtest'
GTEST_DIR = 'gtest-1.7.0/fused-src'
def cleanup():
import shutil
try: shutil.rmtree(UNPACK_DIR)
except OSError: pass
def unpack_gtest(conf):
cwd = os.getcwd()
fname = __file__
if fname.endswith('.pyc'):
fname = fname[0:-1]
f = open(fname, 'rb')
while 1:
line = f.readline()
if not line:
Logs.error('not contain gtest archive')
sys.exit(1)
if line == '#==>\n'.encode():
txt = f.readline()
if not txt:
Logs.error('corrupt archive')
if f.readline() != '#<==\n'.encode():
Logs.error('corrupt archive')
break
txt = txt[1:-1].replace(C1, '\n'.encode()).replace(C2, '\r'.encode())
cleanup()
tmp = 't.tar.bz2'
os.makedirs(UNPACK_DIR)
os.chdir(UNPACK_DIR)
t = open(tmp, 'wb')
t.write(txt)
t.close()
def check_call(args):
if subprocess.call(args):
raise
try:
check_call(['tar', 'xf', tmp])
check_call(['mkdir', GTEST_DIR + '/gtest/gtest'])
check_call(['cp', GTEST_DIR + '/gtest/gtest.h', GTEST_DIR + '/gtest/gtest/gtest.h'])
except:
os.chdir(cwd)
cleanup()
Logs.error('gtest cannot be unpacked.')
os.unlink(tmp)
conf.env.UNITTEST_GTEST_PATH = os.path.abspath(os.getcwd())
os.chdir(cwd)
def configure(conf):
try:
unpack_gtest(conf)
conf.msg('Unpacking gtest', 'yes')
except:
conf.msg('Unpacking gtest', 'no')
Logs.error(sys.exc_info()[1])
conf.check_cxx(lib = 'pthread', uselib_store = 'GTEST_PTHREAD')
def options(opt):
opt.add_option('--check', action = 'store_true', default = False,
help = 'Execute unit tests')
opt.add_option('--checkall', action = 'store_true', default = False,
help = 'Execute all unit tests')
opt.add_option('--checkone', action = 'store', default = False,
help = 'Execute specified unit test')
opt.add_option('--checkfilter', action = 'store', default = False,
help = 'Execute unit tests sprcified by pattern')
def match_filter(filt, targ):
if isinstance(filt, str):
(pat, _, _) = filt.partition('.')
if pat == '*':
return True
return pat == targ
return False
@feature('testt', 'gtest')
@before('process_rule')
def test_remover(self):
if not Options.options.check and not Options.options.checkall and self.target != Options.options.checkone and not match_filter(Options.options.checkfilter, self.target):
self.meths[:] = []
@feature('gtest')
@before('process_source')
def gtest_attach(self):
if not hasattr(self.bld, 'def_gtest_objects'):
self.bld.objects(
source = [UNPACK_DIR + '/' + GTEST_DIR + '/gtest/gtest-all.cc',
UNPACK_DIR + '/' + GTEST_DIR + '/gtest/gtest_main.cc'],
target = 'GTEST_OBJECTS'
)
self.bld.def_gtest_objects = True
DIR = self.env.UNITTEST_GTEST_PATH + '/' + GTEST_DIR
self.includes = self.to_list(getattr(self, 'includes', [])) + [DIR]
self.use = self.to_list(getattr(self, 'use', [])) + ['GTEST_PTHREAD', 'GTEST_OBJECTS']
@feature('testt', 'gtest')
@after('apply_link')
def make_test(self):
if not 'cprogram' in self.features and not 'cxxprogram' in self.features:
Logs.error('test cannot be executed %s'%self)
return
self.default_install_path = None
self.create_task('utest', self.link_task.outputs)
import threading
testlock = threading.Lock()
class utest(Task.Task):
"""
Execute a unit test
"""
color = 'PINK'
ext_in = ['.bin']
vars = []
def runnable_status(self):
stat = super(utest, self).runnable_status()
if stat != Task.SKIP_ME:
return stat
if Options.options.checkall:
return Task.RUN_ME
if Options.options.checkone == self.generator.name:
return Task.RUN_ME
if isinstance(Options.options.checkfilter, str):
if match_filter(Options.options.checkfilter, self.generator.name):
return Task.RUN_ME
return stat
def run(self):
"""
Execute the test. The execution is always successful, but the results
are stored on ``self.generator.bld.utest_results`` for postprocessing.
"""
status = 0
filename = self.inputs[0].abspath()
self.ut_exec = getattr(self, 'ut_exec', [filename])
if getattr(self.generator, 'ut_fun', None):
self.generator.ut_fun(self)
try:
fu = getattr(self.generator.bld, 'all_test_paths')
except AttributeError:
fu = os.environ.copy()
lst = []
for g in self.generator.bld.groups:
for tg in g:
if getattr(tg, 'link_task', None):
lst.append(tg.link_task.outputs[0].parent.abspath())
def add_path(dct, path, var):
dct[var] = os.pathsep.join(Utils.to_list(path) + [os.environ.get(var, '')])
if sys.platform == 'win32':
add_path(fu, lst, 'PATH')
elif sys.platform == 'darwin':
add_path(fu, lst, 'DYLD_LIBRARY_PATH')
add_path(fu, lst, 'LD_LIBRARY_PATH')
else:
add_path(fu, lst, 'LD_LIBRARY_PATH')
self.generator.bld.all_test_paths = fu
if isinstance(Options.options.checkfilter, str):
(_, _, filt) = Options.options.checkfilter.partition('.')
if filt != "":
self.ut_exec += ['--gtest_filter=' + filt]
cwd = getattr(self.generator, 'ut_cwd', '') or self.inputs[0].parent.abspath()
proc = Utils.subprocess.Popen(self.ut_exec, cwd=cwd, env=fu, stderr=Utils.subprocess.PIPE, stdout=Utils.subprocess.PIPE)
(stdout, stderr) = proc.communicate()
tup = (filename, proc.returncode, stdout, stderr)
self.generator.utest_result = tup
testlock.acquire()
try:
bld = self.generator.bld
Logs.debug("ut: %r", tup)
try:
bld.utest_results.append(tup)
except AttributeError:
bld.utest_results = [tup]
a = getattr(self.generator.bld, 'added_post_fun', False)
if not a:
self.generator.bld.add_post_fun(summary)
self.generator.bld.added_post_fun = True
finally:
testlock.release()
def summary(bld):
lst = getattr(bld, 'utest_results', [])
if not lst: return
total = len(lst)
fail = len([x for x in lst if x[1]])
Logs.pprint('CYAN', 'test summary')
Logs.pprint('CYAN', ' tests that pass %d/%d' % (total-fail, total))
for (f, code, out, err) in lst:
if not code:
Logs.pprint('GREEN', ' %s' % f)
if isinstance(Options.options.checkfilter, str):
print(out)
if fail>0:
Logs.pprint('RED', ' tests that fail %d/%d' % (fail, total))
for (f, code, out, err) in lst:
if code:
Logs.pprint('RED', ' %s' % f)
print(out.decode('utf-8'))
raise Errors.WafError('test failed')
|
|
# Copyright (c) 2013-2014 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from oslo.config import cfg
import requests
from neutron.common import constants as n_const
from neutron.common import exceptions as n_exc
from neutron.common import utils
from neutron.extensions import portbindings
from neutron.openstack.common import excutils
from neutron.openstack.common import jsonutils
from neutron.openstack.common import log
from neutron.plugins.common import constants
from neutron.plugins.ml2 import driver_api as api
LOG = log.getLogger(__name__)
ODL_NETWORKS = 'networks'
ODL_SUBNETS = 'subnets'
ODL_PORTS = 'ports'
odl_opts = [
cfg.StrOpt('url',
help=_("HTTP URL of OpenDaylight REST interface.")),
cfg.StrOpt('username',
help=_("HTTP username for authentication")),
cfg.StrOpt('password', secret=True,
help=_("HTTP password for authentication")),
cfg.IntOpt('timeout', default=10,
help=_("HTTP timeout in seconds.")),
cfg.IntOpt('session_timeout', default=30,
help=_("Tomcat session timeout in minutes.")),
]
cfg.CONF.register_opts(odl_opts, "ml2_odl")
def try_del(d, keys):
"""Ignore key errors when deleting from a dictionary."""
for key in keys:
try:
del d[key]
except KeyError:
pass
class OpendaylightAuthError(n_exc.NeutronException):
message = '%(msg)s'
class OpenDaylightMechanismDriver(api.MechanismDriver):
"""Mechanism Driver for OpenDaylight.
This driver was a port from the Tail-F NCS MechanismDriver. The API
exposed by ODL is slightly different from the API exposed by NCS,
but the general concepts are the same.
"""
auth = None
out_of_sync = True
def initialize(self):
self.url = cfg.CONF.ml2_odl.url
self.timeout = cfg.CONF.ml2_odl.timeout
self.username = cfg.CONF.ml2_odl.username
self.password = cfg.CONF.ml2_odl.password
required_opts = ('url', 'username', 'password')
for opt in required_opts:
if not getattr(self, opt):
raise cfg.RequiredOptError(opt, 'ml2_odl')
self.auth = (self.username, self.password)
self.vif_type = portbindings.VIF_TYPE_OVS
self.vif_details = {portbindings.CAP_PORT_FILTER: True}
# Postcommit hooks are used to trigger synchronization.
def create_network_postcommit(self, context):
self.synchronize('create', ODL_NETWORKS, context)
def update_network_postcommit(self, context):
self.synchronize('update', ODL_NETWORKS, context)
def delete_network_postcommit(self, context):
self.synchronize('delete', ODL_NETWORKS, context)
def create_subnet_postcommit(self, context):
self.synchronize('create', ODL_SUBNETS, context)
def update_subnet_postcommit(self, context):
self.synchronize('update', ODL_SUBNETS, context)
def delete_subnet_postcommit(self, context):
self.synchronize('delete', ODL_SUBNETS, context)
def create_port_postcommit(self, context):
self.synchronize('create', ODL_PORTS, context)
def update_port_postcommit(self, context):
self.synchronize('update', ODL_PORTS, context)
def delete_port_postcommit(self, context):
self.synchronize('delete', ODL_PORTS, context)
def synchronize(self, operation, object_type, context):
"""Synchronize ODL with Neutron following a configuration change."""
if self.out_of_sync:
self.sync_full(context)
else:
self.sync_single_resource(operation, object_type, context)
@staticmethod
def filter_create_network_attributes(network, context):
"""Filter out network attributes not required for a create."""
try_del(network, ['status', 'subnets'])
@staticmethod
def filter_create_subnet_attributes(subnet, context):
"""Filter out subnet attributes not required for a create."""
pass
@classmethod
def filter_create_port_attributes(cls, port, context):
"""Filter out port attributes not required for a create."""
cls.add_security_groups(port, context)
# TODO(kmestery): Converting to uppercase due to ODL bug
# https://bugs.opendaylight.org/show_bug.cgi?id=477
port['mac_address'] = port['mac_address'].upper()
try_del(port, ['status'])
def sync_resources(self, collection_name, context):
"""Sync objects from Neutron over to OpenDaylight.
This will handle syncing networks, subnets, and ports from Neutron to
OpenDaylight. It also filters out the requisite items which are not
valid for create API operations.
"""
to_be_synced = []
dbcontext = context._plugin_context
obj_getter = getattr(context._plugin, 'get_%s' % collection_name)
resources = obj_getter(dbcontext)
for resource in resources:
try:
urlpath = collection_name + '/' + resource['id']
self.sendjson('get', urlpath, None)
except requests.exceptions.HTTPError as e:
with excutils.save_and_reraise_exception() as ctx:
if e.response.status_code == requests.codes.not_found:
attr_filter = self.create_object_map[collection_name]
attr_filter(resource, context)
to_be_synced.append(resource)
ctx.reraise = False
key = collection_name[:-1] if len(to_be_synced) == 1 else (
collection_name)
self.sendjson('post', collection_name, {key: to_be_synced})
@utils.synchronized('odl-sync-full')
def sync_full(self, context):
"""Resync the entire database to ODL.
Transition to the in-sync state on success.
Note: we only allow a single thread in here at a time.
"""
if not self.out_of_sync:
return
for collection_name in [ODL_NETWORKS, ODL_SUBNETS, ODL_PORTS]:
self.sync_resources(collection_name, context)
self.out_of_sync = False
@staticmethod
def filter_update_network_attributes(network, context):
"""Filter out network attributes for an update operation."""
try_del(network, ['id', 'status', 'subnets', 'tenant_id'])
@staticmethod
def filter_update_subnet_attributes(subnet, context):
"""Filter out subnet attributes for an update operation."""
try_del(subnet, ['id', 'network_id', 'ip_version', 'cidr',
'allocation_pools', 'tenant_id'])
@classmethod
def filter_update_port_attributes(cls, port, context):
"""Filter out port attributes for an update operation."""
cls.add_security_groups(port, context)
try_del(port, ['network_id', 'id', 'status', 'mac_address',
'tenant_id', 'fixed_ips'])
def sync_single_resource(self, operation, object_type, context):
"""Sync over a single resource from Neutron to OpenDaylight.
Handle syncing a single operation over to OpenDaylight, and correctly
filter attributes out which are not required for the requisite
operation (create or update) being handled.
"""
try:
obj_id = context.current['id']
if operation == 'delete':
self.sendjson('delete', object_type + '/' + obj_id, None)
else:
if operation == 'create':
urlpath = object_type
method = 'post'
attr_filter = self.create_object_map[object_type]
elif operation == 'update':
urlpath = object_type + '/' + obj_id
method = 'put'
attr_filter = self.update_object_map[object_type]
resource = context.current.copy()
attr_filter(resource, context)
self.sendjson(method, urlpath, {object_type[:-1]: resource})
except Exception:
with excutils.save_and_reraise_exception():
self.out_of_sync = True
@staticmethod
def add_security_groups(port, context):
"""Populate the 'security_groups' field with entire records."""
dbcontext = context._plugin_context
groups = [context._plugin.get_security_group(dbcontext, sg)
for sg in port['security_groups']]
port['security_groups'] = groups
def sendjson(self, method, urlpath, obj):
"""Send json to the OpenDaylight controller."""
headers = {'Content-Type': 'application/json'}
data = jsonutils.dumps(obj, indent=2) if obj else None
url = '/'.join([self.url, urlpath])
LOG.debug("Sending METHOD (%(method)s) URL (%(url)s) JSON (%(obj)s)",
{'method': method, 'url': url, 'obj': obj})
r = requests.request(method, url=url,
headers=headers, data=data,
auth=self.auth, timeout=self.timeout)
r.raise_for_status()
def bind_port(self, context):
LOG.debug("Attempting to bind port %(port)s on "
"network %(network)s",
{'port': context.current['id'],
'network': context.network.current['id']})
for segment in context.network.network_segments:
if self.check_segment(segment):
context.set_binding(segment[api.ID],
self.vif_type,
self.vif_details,
status=n_const.PORT_STATUS_ACTIVE)
LOG.debug("Bound using segment: %s", segment)
return
else:
LOG.debug("Refusing to bind port for segment ID %(id)s, "
"segment %(seg)s, phys net %(physnet)s, and "
"network type %(nettype)s",
{'id': segment[api.ID],
'seg': segment[api.SEGMENTATION_ID],
'physnet': segment[api.PHYSICAL_NETWORK],
'nettype': segment[api.NETWORK_TYPE]})
def check_segment(self, segment):
"""Verify a segment is valid for the OpenDaylight MechanismDriver.
Verify the requested segment is supported by ODL and return True or
False to indicate this to callers.
"""
network_type = segment[api.NETWORK_TYPE]
return network_type in [constants.TYPE_LOCAL, constants.TYPE_GRE,
constants.TYPE_VXLAN, constants.TYPE_VLAN]
OpenDaylightMechanismDriver.create_object_map = {
ODL_NETWORKS: OpenDaylightMechanismDriver.filter_create_network_attributes,
ODL_SUBNETS: OpenDaylightMechanismDriver.filter_create_subnet_attributes,
ODL_PORTS: OpenDaylightMechanismDriver.filter_create_port_attributes}
OpenDaylightMechanismDriver.update_object_map = {
ODL_NETWORKS: OpenDaylightMechanismDriver.filter_update_network_attributes,
ODL_SUBNETS: OpenDaylightMechanismDriver.filter_update_subnet_attributes,
ODL_PORTS: OpenDaylightMechanismDriver.filter_update_port_attributes}
|
|
# -*- coding: utf-8 -*-
"""
theflasktest.testsuite
~~~~~~~~~~~~~~~
Tests Flask itself. The majority of Flask is already tested
as part of Werkzeug.
:copyright: (c) 2011 by Armin Ronacher.
:license: BSD, see LICENSE for more details.
"""
from __future__ import print_function
import os
import sys
import flask
import warnings
import unittest
from functools import update_wrapper
from contextlib import contextmanager
from werkzeug.utils import import_string, find_modules
from flask._compat import reraise, StringIO
def add_to_path(path):
"""Adds an entry to sys.path if it's not already there. This does
not append it but moves it to the front so that we can be sure it
is loaded.
"""
if not os.path.isdir(path):
raise RuntimeError('Tried to add nonexisting path')
def _samefile(x, y):
if x == y:
return True
try:
return os.path.samefile(x, y)
except (IOError, OSError, AttributeError):
# Windows has no samefile
return False
sys.path[:] = [x for x in sys.path if not _samefile(path, x)]
sys.path.insert(0, path)
def iter_suites():
"""Yields all testsuites."""
for module in find_modules(__name__):
mod = import_string(module)
if hasattr(mod, 'suite'):
yield mod.suite()
def find_all_tests(suite):
"""Yields all the tests and their names from a given suite."""
suites = [suite]
while suites:
s = suites.pop()
try:
suites.extend(s)
except TypeError:
yield s, '%s.%s.%s' % (
s.__class__.__module__,
s.__class__.__name__,
s._testMethodName
)
@contextmanager
def catch_warnings():
"""Catch warnings in a with block in a list"""
# make sure deprecation warnings are active in tests
warnings.simplefilter('default', category=DeprecationWarning)
filters = warnings.filters
warnings.filters = filters[:]
old_showwarning = warnings.showwarning
log = []
def showwarning(message, category, filename, lineno, file=None, line=None):
log.append(locals())
try:
warnings.showwarning = showwarning
yield log
finally:
warnings.filters = filters
warnings.showwarning = old_showwarning
@contextmanager
def catch_stderr():
"""Catch stderr in a StringIO"""
old_stderr = sys.stderr
sys.stderr = rv = StringIO()
try:
yield rv
finally:
sys.stderr = old_stderr
def emits_module_deprecation_warning(f):
def new_f(self, *args, **kwargs):
with catch_warnings() as log:
f(self, *args, **kwargs)
self.assert_true(log, 'expected deprecation warning')
for entry in log:
self.assert_in('Modules are deprecated', str(entry['message']))
return update_wrapper(new_f, f)
class FlaskTestCase(unittest.TestCase):
"""Baseclass for all the tests that Flask uses. Use these methods
for testing instead of the camelcased ones in the baseclass for
consistency.
"""
def ensure_clean_request_context(self):
# make sure we're not leaking a request context since we are
# testing theflasktest internally in debug mode in a few cases
leaks = []
while flask._request_ctx_stack.top is not None:
leaks.append(flask._request_ctx_stack.pop())
self.assert_equal(leaks, [])
def setup(self):
pass
def teardown(self):
pass
def setUp(self):
self.setup()
def tearDown(self):
unittest.TestCase.tearDown(self)
self.ensure_clean_request_context()
self.teardown()
def assert_equal(self, x, y):
return self.assertEqual(x, y)
def assert_raises(self, exc_type, callable=None, *args, **kwargs):
catcher = _ExceptionCatcher(self, exc_type)
if callable is None:
return catcher
with catcher:
callable(*args, **kwargs)
def assert_true(self, x, msg=None):
self.assertTrue(x, msg)
def assert_false(self, x, msg=None):
self.assertFalse(x, msg)
def assert_in(self, x, y):
self.assertIn(x, y)
def assert_not_in(self, x, y):
self.assertNotIn(x, y)
if sys.version_info[:2] == (2, 6):
def assertIn(self, x, y):
assert x in y, "%r unexpectedly not in %r" % (x, y)
def assertNotIn(self, x, y):
assert x not in y, "%r unexpectedly in %r" % (x, y)
class _ExceptionCatcher(object):
def __init__(self, test_case, exc_type):
self.test_case = test_case
self.exc_type = exc_type
def __enter__(self):
return self
def __exit__(self, exc_type, exc_value, tb):
exception_name = self.exc_type.__name__
if exc_type is None:
self.test_case.fail('Expected exception of type %r' %
exception_name)
elif not issubclass(exc_type, self.exc_type):
reraise(exc_type, exc_value, tb)
return True
class BetterLoader(unittest.TestLoader):
"""A nicer loader that solves two problems. First of all we are setting
up tests from different sources and we're doing this programmatically
which breaks the default loading logic so this is required anyways.
Secondly this loader has a nicer interpolation for test names than the
default one so you can just do ``run-tests.py ViewTestCase`` and it
will work.
"""
def getRootSuite(self):
return suite()
def loadTestsFromName(self, name, module=None):
root = self.getRootSuite()
if name == 'suite':
return root
all_tests = []
for testcase, testname in find_all_tests(root):
if testname == name or \
testname.endswith('.' + name) or \
('.' + name + '.') in testname or \
testname.startswith(name + '.'):
all_tests.append(testcase)
if not all_tests:
raise LookupError('could not find test case for "%s"' % name)
if len(all_tests) == 1:
return all_tests[0]
rv = unittest.TestSuite()
for test in all_tests:
rv.addTest(test)
return rv
def setup_path():
add_to_path(os.path.abspath(os.path.join(
os.path.dirname(__file__), 'test_apps')))
def suite():
"""A testsuite that has all the Flask tests. You can use this
function to integrate the Flask tests into your own testsuite
in case you want to test that monkeypatches to Flask do not
break it.
"""
setup_path()
suite = unittest.TestSuite()
for other_suite in iter_suites():
suite.addTest(other_suite)
return suite
def main():
"""Runs the testsuite as command line application."""
try:
unittest.main(testLoader=BetterLoader(), defaultTest='suite')
except Exception as e:
print('Error: %s' % e)
|
|
# Licensed under the Apache License: http://www.apache.org/licenses/LICENSE-2.0
# For details: https://bitbucket.org/ned/coveragepy/src/default/NOTICE.txt
"""Raw data collector for coverage.py."""
import os, sys
from coverage import env
from coverage.backward import iitems
from coverage.files import abs_file
from coverage.misc import CoverageException
from coverage.pytracer import PyTracer
try:
# Use the C extension code when we can, for speed.
from coverage.tracer import CTracer, CFileDisposition # pylint: disable=no-name-in-module
except ImportError:
# Couldn't import the C extension, maybe it isn't built.
if os.getenv('COVERAGE_TEST_TRACER') == 'c':
# During testing, we use the COVERAGE_TEST_TRACER environment variable
# to indicate that we've fiddled with the environment to test this
# fallback code. If we thought we had a C tracer, but couldn't import
# it, then exit quickly and clearly instead of dribbling confusing
# errors. I'm using sys.exit here instead of an exception because an
# exception here causes all sorts of other noise in unittest.
sys.stderr.write(
"*** COVERAGE_TEST_TRACER is 'c' but can't import CTracer!\n"
)
sys.exit(1)
CTracer = None
class FileDisposition(object):
"""A simple value type for recording what to do with a file."""
pass
class Collector(object):
"""Collects trace data.
Creates a Tracer object for each thread, since they track stack
information. Each Tracer points to the same shared data, contributing
traced data points.
When the Collector is started, it creates a Tracer for the current thread,
and installs a function to create Tracers for each new thread started.
When the Collector is stopped, all active Tracers are stopped.
Threads started while the Collector is stopped will never have Tracers
associated with them.
"""
# The stack of active Collectors. Collectors are added here when started,
# and popped when stopped. Collectors on the stack are paused when not
# the top, and resumed when they become the top again.
_collectors = []
def __init__(
self,
should_trace, check_include, timid, branch, warn, concurrency,
):
"""Create a collector.
`should_trace` is a function, taking a filename, and returning a
`coverage.FileDisposition object`.
`check_include` is a function taking a filename and a frame. It returns
a boolean: True if the file should be traced, False if not.
If `timid` is true, then a slower simpler trace function will be
used. This is important for some environments where manipulation of
tracing functions make the faster more sophisticated trace function not
operate properly.
If `branch` is true, then branches will be measured. This involves
collecting data on which statements followed each other (arcs). Use
`get_arc_data` to get the arc data.
`warn` is a warning function, taking a single string message argument,
to be used if a warning needs to be issued.
`concurrency` is a string indicating the concurrency library in use.
Valid values are "greenlet", "eventlet", "gevent", or "thread" (the
default).
"""
self.should_trace = should_trace
self.check_include = check_include
self.warn = warn
self.branch = branch
self.threading = None
self.concurrency = concurrency
self.concur_id_func = None
try:
if concurrency == "greenlet":
import greenlet # pylint: disable=import-error,useless-suppression
self.concur_id_func = greenlet.getcurrent
elif concurrency == "eventlet":
import eventlet.greenthread # pylint: disable=import-error,useless-suppression
self.concur_id_func = eventlet.greenthread.getcurrent
elif concurrency == "gevent":
import gevent # pylint: disable=import-error,useless-suppression
self.concur_id_func = gevent.getcurrent
elif concurrency == "thread" or not concurrency:
# It's important to import threading only if we need it. If
# it's imported early, and the program being measured uses
# gevent, then gevent's monkey-patching won't work properly.
import threading
self.threading = threading
else:
raise CoverageException(
"Don't understand concurrency=%s" % concurrency
)
except ImportError:
raise CoverageException(
"Couldn't trace with concurrency=%s, "
"the module isn't installed." % concurrency
)
self.reset()
if timid:
# Being timid: use the simple Python trace function.
self._trace_class = PyTracer
else:
# Being fast: use the C Tracer if it is available, else the Python
# trace function.
self._trace_class = CTracer or PyTracer
if self._trace_class is CTracer:
self.file_disposition_class = CFileDisposition
self.supports_plugins = True
else:
self.file_disposition_class = FileDisposition
self.supports_plugins = False
def __repr__(self):
return "<Collector at 0x%x>" % id(self)
def tracer_name(self):
"""Return the class name of the tracer we're using."""
return self._trace_class.__name__
def reset(self):
"""Clear collected data, and prepare to collect more."""
# A dictionary mapping filenames to dicts with line number keys (if not
# branch coverage), or mapping filenames to dicts with line number
# pairs as keys (if branch coverage).
self.data = {}
# A dictionary mapping filenames to file tracer plugin names that will
# handle them.
self.file_tracers = {}
# The .should_trace_cache attribute is a cache from filenames to
# coverage.FileDisposition objects, or None. When a file is first
# considered for tracing, a FileDisposition is obtained from
# Coverage.should_trace. Its .trace attribute indicates whether the
# file should be traced or not. If it should be, a plugin with dynamic
# filenames can decide not to trace it based on the dynamic filename
# being excluded by the inclusion rules, in which case the
# FileDisposition will be replaced by None in the cache.
if env.PYPY:
import __pypy__ # pylint: disable=import-error
# Alex Gaynor said:
# should_trace_cache is a strictly growing key: once a key is in
# it, it never changes. Further, the keys used to access it are
# generally constant, given sufficient context. That is to say, at
# any given point _trace() is called, pypy is able to know the key.
# This is because the key is determined by the physical source code
# line, and that's invariant with the call site.
#
# This property of a dict with immutable keys, combined with
# call-site-constant keys is a match for PyPy's module dict,
# which is optimized for such workloads.
#
# This gives a 20% benefit on the workload described at
# https://bitbucket.org/pypy/pypy/issue/1871/10x-slower-than-cpython-under-coverage
self.should_trace_cache = __pypy__.newdict("module")
else:
self.should_trace_cache = {}
# Our active Tracers.
self.tracers = []
def _start_tracer(self):
"""Start a new Tracer object, and store it in self.tracers."""
tracer = self._trace_class()
tracer.data = self.data
tracer.trace_arcs = self.branch
tracer.should_trace = self.should_trace
tracer.should_trace_cache = self.should_trace_cache
tracer.warn = self.warn
if hasattr(tracer, 'concur_id_func'):
tracer.concur_id_func = self.concur_id_func
elif self.concur_id_func:
raise CoverageException(
"Can't support concurrency=%s with %s, "
"only threads are supported" % (
self.concurrency, self.tracer_name(),
)
)
if hasattr(tracer, 'file_tracers'):
tracer.file_tracers = self.file_tracers
if hasattr(tracer, 'threading'):
tracer.threading = self.threading
if hasattr(tracer, 'check_include'):
tracer.check_include = self.check_include
fn = tracer.start()
self.tracers.append(tracer)
return fn
# The trace function has to be set individually on each thread before
# execution begins. Ironically, the only support the threading module has
# for running code before the thread main is the tracing function. So we
# install this as a trace function, and the first time it's called, it does
# the real trace installation.
def _installation_trace(self, frame_unused, event_unused, arg_unused):
"""Called on new threads, installs the real tracer."""
# Remove ourselves as the trace function
sys.settrace(None)
# Install the real tracer.
fn = self._start_tracer()
# Invoke the real trace function with the current event, to be sure
# not to lose an event.
if fn:
fn = fn(frame_unused, event_unused, arg_unused)
# Return the new trace function to continue tracing in this scope.
return fn
def start(self):
"""Start collecting trace information."""
if self._collectors:
self._collectors[-1].pause()
self._collectors.append(self)
# Check to see whether we had a fullcoverage tracer installed.
traces0 = []
fn0 = sys.gettrace()
if fn0:
tracer0 = getattr(fn0, '__self__', None)
if tracer0:
traces0 = getattr(tracer0, 'traces', [])
# Install the tracer on this thread.
fn = self._start_tracer()
# Replay all the events from fullcoverage into the new trace function.
for args in traces0:
(frame, event, arg), lineno = args
try:
fn(frame, event, arg, lineno=lineno)
except TypeError:
raise Exception(
"fullcoverage must be run with the C trace function."
)
# Install our installation tracer in threading, to jump start other
# threads.
if self.threading:
self.threading.settrace(self._installation_trace)
def stop(self):
"""Stop collecting trace information."""
assert self._collectors
assert self._collectors[-1] is self, (
"Expected current collector to be %r, but it's %r" % (
self, self._collectors[-1],
)
)
self.pause()
self.tracers = []
# Remove this Collector from the stack, and resume the one underneath
# (if any).
self._collectors.pop()
if self._collectors:
self._collectors[-1].resume()
def pause(self):
"""Pause tracing, but be prepared to `resume`."""
for tracer in self.tracers:
tracer.stop()
stats = tracer.get_stats()
if stats:
print("\nCoverage.py tracer stats:")
for k in sorted(stats.keys()):
print("%16s: %s" % (k, stats[k]))
if self.threading:
self.threading.settrace(None)
def resume(self):
"""Resume tracing after a `pause`."""
for tracer in self.tracers:
tracer.start()
if self.threading:
self.threading.settrace(self._installation_trace)
else:
self._start_tracer()
def save_data(self, covdata):
"""Save the collected data to a `CoverageData`.
Also resets the collector.
"""
def abs_file_dict(d):
"""Return a dict like d, but with keys modified by `abs_file`."""
return dict((abs_file(k), v) for k, v in iitems(d))
if self.branch:
covdata.set_arcs(abs_file_dict(self.data))
else:
covdata.set_lines(abs_file_dict(self.data))
covdata.set_file_tracers(abs_file_dict(self.file_tracers))
self.reset()
|
|
#!/usr/bin/env python
# Copyright (c) 2014-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the BSD-style license found in the
# LICENSE file in the root directory of this source tree. An additional grant
# of patent rights can be found in the PATENTS file in the same directory.
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import argparse
import ast
import json
import logging
import os
import sys
import uuid
import subprocess
from gentable import *
from utils import platform
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
sys.path.append(SCRIPT_DIR + "/../tests")
# the log format for the logging module
LOG_FORMAT = "%(levelname)s [Line %(lineno)d]: %(message)s"
CANONICAL_PLATFORMS = {
"specs": "All Platforms",
"darwin": "Darwin (Apple OS X)",
"linux": "Ubuntu, CentOS",
"centos": "CentOS",
"ubuntu": "Ubuntu",
"utility": "Utility",
}
TEMPLATE_API_DEFINITION = """
{
"tables": %s,
"events": [
]
}
"""
class NoIndent(object):
"""Special instance checked object for removing json newlines."""
def __init__(self, value):
self.value = value
if 'type' in self.value and isinstance(self.value['type'], DataType):
self.value['type'] = str(self.value['type'])
class Encoder(json.JSONEncoder):
"""
Newlines are such a pain in json-generated output.
Use this custom encoder to produce pretty json multiplexed with a more
raw json output within.
"""
def __init__(self, *args, **kwargs):
super(Encoder, self).__init__(*args, **kwargs)
self.kwargs = dict(kwargs)
del self.kwargs['indent']
self._replacement_map = {}
def default(self, o):
if isinstance(o, NoIndent):
key = uuid.uuid4().hex
self._replacement_map[key] = json.dumps(o.value, **self.kwargs)
return "@@%s@@" % (key,)
else:
return super(Encoder, self).default(o)
def encode(self, o):
result = super(Encoder, self).encode(o)
for k, v in self._replacement_map.iteritems():
result = result.replace('"@@%s@@"' % (k,), v)
return result
def gen_api_json(api):
"""Apply the api literal object to the template."""
api = json.dumps(
api, cls=Encoder, sort_keys=True, indent=1, separators=(',', ': ')
)
return TEMPLATE_API_DEFINITION % (api)
def gen_spec(tree):
"""Given a table tree, produce a literal of the table representation."""
exec(compile(tree, "<string>", "exec"))
columns = [NoIndent({
"name": column.name,
"type": column.type,
"description": column.description,
"options": column.options,
}) for column in table.columns()]
foreign_keys = [NoIndent({"column": key.column, "table": key.table})
for key in table.foreign_keys()]
return {
"name": table.table_name,
"columns": columns,
"foreign_keys": foreign_keys,
"function": table.function,
"description": table.description,
"attributes": table.attributes,
"examples": table.examples,
}
def gen_diff(api_old_path, api_new_path):
"""Quick and dirty way to view table API changes."""
with open(api_old_path, 'r') as fh:
api_old = json.loads(fh.read())
with open(api_new_path, 'r') as fh:
api_new = json.loads(fh.read())
# Prune table lists into maps
old_tables = {}
new_tables = {}
for category in api_new["tables"]:
for table in category["tables"]:
new_tables["%s:%s" % (category["name"], table["name"])] = table
for category in api_old["tables"]:
for table in category["tables"]:
old_tables["%s:%s" % (category["name"], table["name"])] = table
# Iterate backwards then forward to detect added/removed.
tables_added = []
tables_removed = []
columns_added = []
columns_removed = []
for name, table in new_tables.iteritems():
if name not in old_tables:
tables_added.append(name)
continue
for column in table["columns"]:
old_columns = [c["name"] for c in old_tables[name]["columns"]]
if column["name"] not in old_columns:
columns_added.append("%s:%s:%s:%s" % (category["name"],
table["name"], column["name"], column["type"]))
for name, table in old_tables.iteritems():
if name not in new_tables:
tables_removed.append(name)
continue
for column in table["columns"]:
new_columns = [c["name"] for c in new_tables[name]["columns"]]
if column["name"] not in new_columns:
columns_removed.append("%s:%s:%s:%s" % (category["name"],
table["name"], column["name"], column["type"]))
# Sort then pretty print (md) the changes.
tables_added.sort()
for name in tables_added:
print("Added table `%s` to %s" % tuple(name.split(":")[::-1]))
columns_added.sort()
for name in columns_added:
column = name.split(":")
print("Added column `%s` (`%s`) to table `%s`" % (column[2], column[3],
column[1]))
tables_removed.sort()
for name in tables_removed:
print("Removed table `%s` from %s" % tuple(name.split(":")[::-1]))
columns_removed.sort()
for name in columns_removed:
column = name.split(":")
print("Removed column `%s` (`%s`) from table `%s`" % (column[2],
column[3], column[1]))
def gen_api(tables_path, profile={}):
blacklist = None
blacklist_path = os.path.join(tables_path, "blacklist")
if os.path.exists(blacklist_path):
with open(blacklist_path, "r") as fh:
blacklist = fh.read()
categories = {}
for base, _, files in os.walk(tables_path):
for spec_file in files:
if spec_file[0] == '.' or spec_file.find("example") == 0:
continue
# Exclude blacklist specific file
if spec_file == 'blacklist':
continue
platform = os.path.basename(base)
platform_name = CANONICAL_PLATFORMS[platform]
name = spec_file.split(".table", 1)[0]
if platform not in categories.keys():
categories[platform] = {"name": platform_name, "tables": []}
with open(os.path.join(base, spec_file), "rU") as fh:
tree = ast.parse(fh.read())
table_spec = gen_spec(tree)
table_profile = profile.get("%s.%s" % (platform, name), {})
table_spec["profile"] = NoIndent(table_profile)
table_spec["blacklisted"] = is_blacklisted(table_spec["name"],
blacklist=blacklist)
categories[platform]["tables"].append(table_spec)
categories = [{"key": k, "name": v["name"], "tables": v["tables"]}
for k, v in categories.iteritems()]
return categories
def main(argc, argv):
parser = argparse.ArgumentParser("Generate API documentation.")
parser.add_argument(
"--debug", default=False, action="store_true",
help="Output debug messages (when developing)"
)
parser.add_argument(
"--tables", default="specs",
help="Path to osquery table specs"
)
parser.add_argument(
"--profile", default=None,
help="Add the results of a profile summary to the API."
)
parser.add_argument(
"--diff", default=False, action="store_true",
help="Compare API changes API_PREVIOUS API_CURRENT"
)
parser.add_argument(
"--output", default=False, action="store_true",
help="Create output file as the version tagged."
)
parser.add_argument(
"--directory", default=".",
help="Directory to use for the output file."
)
parser.add_argument("vars", nargs="*")
args = parser.parse_args()
if args.debug:
logging.basicConfig(format=LOG_FORMAT, level=logging.DEBUG)
else:
logging.basicConfig(format=LOG_FORMAT, level=logging.INFO)
if args.diff:
if len(args.vars) < 2:
logging.error("If using --diff you must supply API_OLD API_NEW")
exit(1)
gen_diff(args.vars[0], args.vars[1])
exit(0)
if not os.path.exists(args.tables):
logging.error("Cannot find path: %s" % (args.tables))
exit(1)
profile = {}
if args.profile is not None:
if not os.path.exists(args.profile):
logging.error("Cannot find path: %s" % (args.profile))
exit(1)
with open(args.profile, "r") as fh:
try:
profile = json.loads(fh.read())
except Exception as e:
logging.error("Cannot parse profile data: %s" % (str(e)))
exit(2)
# Read in the optional list of blacklisted tables, then generate
# categories.
api = gen_api(args.tables, profile)
# Output file will be the version with json extension, otherwise stdout
if args.output:
print('[+] creating tables json')
cmd = ['git', 'describe', '--tags', 'HEAD']
proc = subprocess.Popen(
cmd, stdout=subprocess.PIPE, stderr=subprocess.PIPE
)
out, err = proc.communicate()
output_file = out.split("\n")[0] + ".json"
if args.directory[-1:] == '/':
output_path = args.directory + output_file
else:
output_path = args.directory + '/' + output_file
with open(output_path, 'w') as f:
print(gen_api_json(api), file=f)
print('[+] tables json file created at %s' % (output_path))
else:
print(gen_api_json(api))
if __name__ == "__main__":
SCRIPT_DIR = os.path.dirname(os.path.realpath(__file__))
main(len(sys.argv), sys.argv)
|
|
# Fuck you Disyer. Stealing my fucking paypal. GET FUCKED: toontown.minigame.Purchase
from panda3d.core import CollisionNode, CollisionPolygon, Lens, Point3, Texture, Vec3, Vec4, lookAt
from direct.directnotify import DirectNotifyGlobal
from direct.gui import DirectGuiGlobals as DGG
from direct.showbase.PythonUtil import Functor
from direct.task.Task import Task
import MinigameGlobals
from PurchaseBase import *
from toontown.distributed import DelayDelete
from otp.nametag.NametagFloat2d import *
from otp.nametag import NametagGlobals
from toontown.toon import ToonHead
from toontown.toonbase import ToontownGlobals
from toontown.toonbase import ToontownTimer
COUNT_UP_RATE = 0.15
COUNT_UP_DURATION = 0.5
DELAY_BEFORE_COUNT_UP = 1.0
DELAY_AFTER_COUNT_UP = 1.0
COUNT_DOWN_RATE = 0.075
COUNT_DOWN_DURATION = 0.5
DELAY_AFTER_COUNT_DOWN = 0.0
DELAY_AFTER_CELEBRATE = 2.6
COUNT_SFX_MIN_DELAY = 0.034
COUNT_SFX_START_T = 0.079
OVERMAX_SFX_MIN_DELAY = 0.067
OVERMAX_SFX_START_T = 0.021
class Purchase(PurchaseBase):
notify = DirectNotifyGlobal.directNotify.newCategory('Purchase')
def __init__(self, toon, pointsArray, playerMoney, ids, states, remain, doneEvent):
PurchaseBase.__init__(self, toon, doneEvent)
self.ids = ids
self.pointsArray = pointsArray
self.playerMoney = playerMoney
self.states = states
self.remain = remain
self.tutorialMode = 0
self.fsm.addState(State.State('reward', self.enterReward, self.exitReward, ['purchase']))
doneState = self.fsm.getStateNamed('done')
doneState.addTransition('reward')
self.unexpectedEventNames = []
self.unexpectedExits = []
self.setupUnexpectedExitHooks()
def load(self):
purchaseModels = loader.loadModel('phase_4/models/gui/purchase_gui')
PurchaseBase.load(self, purchaseModels)
interiorPhase = 3.5
self.bg = loader.loadModel('phase_%s/models/modules/toon_interior' % interiorPhase)
self.bg.setPos(0.0, 5.0, -1.0)
self.wt = self.bg.find('**/random_tc1_TI_wallpaper')
wallTex = loader.loadTexture('phase_%s/maps/wall_paper_a5.jpg' % interiorPhase)
self.wt.setTexture(wallTex, 100)
self.wt.setColorScale(0.8, 0.67, 0.549, 1.0)
self.bt = self.bg.find('**/random_tc1_TI_wallpaper_border')
wallTex = loader.loadTexture('phase_%s/maps/wall_paper_a5.jpg' % interiorPhase)
self.bt.setTexture(wallTex, 100)
self.bt.setColorScale(0.8, 0.67, 0.549, 1.0)
self.wb = self.bg.find('**/random_tc1_TI_wainscotting')
wainTex = loader.loadTexture('phase_%s/maps/wall_paper_b4.jpg' % interiorPhase)
self.wb.setTexture(wainTex, 100)
self.wb.setColorScale(0.473, 0.675, 0.488, 1.0)
self.playAgain = DirectButton(parent=self.frame, relief=None, scale=1.04, pos=(0.72, 0, -0.24), image=(purchaseModels.find('**/PurchScrn_BTN_UP'),
purchaseModels.find('**/PurchScrn_BTN_DN'),
purchaseModels.find('**/PurchScrn_BTN_RLVR'),
purchaseModels.find('**/PurchScrn_BTN_UP')), text=TTLocalizer.GagShopPlayAgain, text_fg=(0, 0.1, 0.7, 1), text_scale=0.05, text_pos=(0, 0.015, 0), image3_color=Vec4(0.6, 0.6, 0.6, 1), text3_fg=Vec4(0, 0, 0.4, 1), command=self.__handlePlayAgain)
self.backToPlayground = DirectButton(parent=self.frame, relief=None, scale=1.04, pos=(0.72, 0, -0.045), image=(purchaseModels.find('**/PurchScrn_BTN_UP'),
purchaseModels.find('**/PurchScrn_BTN_DN'),
purchaseModels.find('**/PurchScrn_BTN_RLVR'),
purchaseModels.find('**/PurchScrn_BTN_UP')), text=TTLocalizer.GagShopBackToPlayground, text_fg=(0, 0.1, 0.7, 1), text_scale=0.05, text_pos=(0, 0.015, 0), image3_color=Vec4(0.6, 0.6, 0.6, 1), text3_fg=Vec4(0, 0, 0.4, 1), command=self.__handleBackToPlayground)
self.timer = ToontownTimer.ToontownTimer()
self.timer.hide()
self.timer.posInTopRightCorner()
numAvs = 0
count = 0
localToonIndex = 0
for index in xrange(len(self.ids)):
avId = self.ids[index]
if avId == base.localAvatar.doId:
localToonIndex = index
if self.states[index] != PURCHASE_NO_CLIENT_STATE and self.states[index] != PURCHASE_DISCONNECTED_STATE:
numAvs = numAvs + 1
layoutList = (None,
(0,),
(0, 2),
(0, 1, 3),
(0, 1, 2, 3))
layout = layoutList[numAvs]
headFramePosList = (Vec3(0.105, 0, -0.384),
Vec3(0.105, 0, -0.776),
Vec3(0.85, 0, -0.555),
Vec3(-0.654, 0, -0.555))
AVID_INDEX = 0
LAYOUT_INDEX = 1
TOON_INDEX = 2
self.avInfoArray = [(base.localAvatar.doId, headFramePosList[0], localToonIndex)]
pos = 1
for index in xrange(len(self.ids)):
avId = self.ids[index]
if self.states[index] != PURCHASE_NO_CLIENT_STATE and self.states[index] != PURCHASE_DISCONNECTED_STATE:
if avId != base.localAvatar.doId:
if avId in base.cr.doId2do:
self.avInfoArray.append((avId, headFramePosList[layout[pos]], index))
pos = pos + 1
self.headFrames = []
for avInfo in self.avInfoArray:
av = base.cr.doId2do.get(avInfo[AVID_INDEX])
if av:
headFrame = PurchaseHeadFrame(av, purchaseModels)
headFrame.setAvatarState(self.states[avInfo[TOON_INDEX]])
headFrame.setPos(avInfo[LAYOUT_INDEX])
self.headFrames.append((avInfo[AVID_INDEX], headFrame))
purchaseModels.removeNode()
self.foreground = loader.loadModel('phase_3.5/models/modules/TT_A1')
self.foreground.setPos(12.5, -20, -5.5)
self.foreground.setHpr(180, 0, 0)
self.backgroundL = self.foreground.copyTo(hidden)
self.backgroundL.setPos(-14.5, -25, -5)
self.backgroundL.setHpr(180, 0, 0)
self.backgroundR = self.backgroundL.copyTo(hidden)
self.backgroundR.setPos(30, -25, -5)
self.backgroundR.setHpr(180, 0, 0)
streets = loader.loadModel('phase_3.5/models/modules/street_modules')
sidewalk = streets.find('**/street_sidewalk_40x40')
self.sidewalk = sidewalk.copyTo(hidden)
self.sidewalkR = sidewalk.copyTo(hidden)
self.sidewalkL = sidewalk.copyTo(hidden)
self.sidewalk.setPos(-20, -25, -5.5)
self.sidewalk.setColor(0.9, 0.6, 0.4)
self.sidewalkL.setPos(-40, -25, -5.5)
self.sidewalkL.setColor(0.9, 0.6, 0.4)
self.sidewalkR.setPos(0, -25, -5.5)
self.sidewalkR.setColor(0.9, 0.6, 0.4)
streets.removeNode()
doors = loader.loadModel('phase_4/models/modules/doors')
door = doors.find('**/door_single_square_ur_door')
self.door = door.copyTo(hidden)
self.door.setH(180)
self.door.setPos(0, -16.75, -5.5)
self.door.setScale(1.5, 1.5, 2.0)
self.door.setColor(1.0, 0.8, 0, 1)
doors.removeNode()
self.rewardDoubledJellybeanLabel = DirectLabel(text=TTLocalizer.PartyRewardDoubledJellybean, text_fg=(1.0, 0.125, 0.125, 1.0), text_shadow=(0, 0, 0, 1), relief=None, pos=(0.0, 0, -0.67), scale=0.08)
self.rewardDoubledJellybeanLabel.hide()
self.countSound = loader.loadSfx('phase_3.5/audio/sfx/tick_counter.ogg')
self.overMaxSound = loader.loadSfx('phase_3.5/audio/sfx/AV_collision.ogg')
self.celebrateSound = loader.loadSfx('phase_4/audio/sfx/MG_win.ogg')
return
def unload(self):
PurchaseBase.unload(self)
self.cleanupUnexpectedExitHooks()
self.bg.removeNode()
del self.bg
self.notify.debug('destroying head frames')
for headFrame in self.headFrames:
if not headFrame[1].isEmpty():
headFrame[1].reparentTo(hidden)
headFrame[1].destroy()
del self.headFrames
self.playAgain.destroy()
del self.playAgain
self.backToPlayground.destroy()
del self.backToPlayground
self.timer.stop()
self.timer.destroy()
del self.timer
for counter in self.counters:
counter.destroy()
del counter
del self.counters
for total in self.totalCounters:
total.destroy()
del total
del self.totalCounters
taskMgr.remove('countUpTask')
taskMgr.remove('countDownTask')
taskMgr.remove('celebrate')
taskMgr.remove('purchase-trans')
taskMgr.remove('delayAdd')
taskMgr.remove('delaySubtract')
self.foreground.removeNode()
del self.foreground
self.backgroundL.removeNode()
del self.backgroundL
self.backgroundR.removeNode()
del self.backgroundR
self.sidewalk.removeNode()
self.sidewalkL.removeNode()
self.sidewalkR.removeNode()
del self.sidewalk
del self.sidewalkL
del self.sidewalkR
self.door.removeNode()
del self.door
self.collisionFloor.removeNode()
del self.collisionFloor
del self.countSound
del self.celebrateSound
self.rewardDoubledJellybeanLabel.removeNode()
del self.rewardDoubledJellybeanLabel
def showStatusText(self, text):
self.statusLabel['text'] = text
taskMgr.remove('resetStatusText')
taskMgr.doMethodLater(2.0, self.resetStatusText, 'resetStatusText')
def resetStatusText(self, task):
self.statusLabel['text'] = ''
return Task.done
def __handlePlayAgain(self):
for headFrame in self.headFrames:
headFrame[1].wrtReparentTo(aspect2d)
self.toon.inventory.reparentTo(hidden)
self.toon.inventory.hide()
taskMgr.remove('resetStatusText')
taskMgr.remove('showBrokeMsgTask')
self.statusLabel['text'] = TTLocalizer.WaitingForOtherToons
messenger.send('purchasePlayAgain')
def handleDone(self, playAgain):
base.localAvatar.b_setParent(ToontownGlobals.SPHidden)
if playAgain:
self.doneStatus = {'loader': 'minigame',
'where': 'minigame'}
else:
self.doneStatus = {'loader': 'safeZoneLoader',
'where': 'playground'}
messenger.send(self.doneEvent)
def __handleBackToPlayground(self):
self.toon.inventory.reparentTo(hidden)
self.toon.inventory.hide()
messenger.send('purchaseBackToToontown')
def __timerExpired(self):
messenger.send('purchaseTimeout')
def findHeadFrame(self, id):
for headFrame in self.headFrames:
if headFrame[0] == id:
return headFrame[1]
return None
def __handleStateChange(self, playerStates):
self.states = playerStates
for avInfo in self.avInfoArray:
index = avInfo[2]
headFrame = self.findHeadFrame(avInfo[0])
state = self.states[index]
headFrame.setAvatarState(state)
def enter(self):
base.playMusic(self.music, looping=1, volume=0.8)
self.fsm.request('reward')
def enterReward(self):
numToons = 0
toonLayouts = ((2,),
(1, 3),
(0, 2, 4),
(0, 1, 3, 4))
toonPositions = (5.0, 1.75, -0.25, -1.75, -5.0)
self.toons = []
self.toonsKeep = []
self.counters = []
self.totalCounters = []
camera.reparentTo(render)
camera.setPos(0, 16.0, 2.0)
camera.lookAt(0, 0, 0.75)
base.transitions.irisIn(0.4)
base.camLens.setMinFov(60 / (4.0 / 3.0))
self.title.reparentTo(aspect2d)
self.foreground.reparentTo(render)
self.backgroundL.reparentTo(render)
self.backgroundR.reparentTo(render)
self.sidewalk.reparentTo(render)
self.sidewalkL.reparentTo(render)
self.sidewalkR.reparentTo(render)
self.door.reparentTo(render)
size = 20
z = -2.5
floor = CollisionPolygon(Point3(-size, -size, z), Point3(size, -size, z), Point3(size, size, z), Point3(-size, size, z))
floor.setTangible(1)
floorNode = CollisionNode('collision_floor')
floorNode.addSolid(floor)
self.collisionFloor = render.attachNewNode(floorNode)
NametagGlobals.setOnscreenChatForced(1)
for index in xrange(len(self.ids)):
avId = self.ids[index]
if self.states[index] != PURCHASE_NO_CLIENT_STATE and self.states[index] != PURCHASE_DISCONNECTED_STATE and avId in base.cr.doId2do:
numToons += 1
toon = base.cr.doId2do[avId]
toon.stopSmooth()
self.toons.append(toon)
self.toonsKeep.append(DelayDelete.DelayDelete(toon, 'Purchase.enterReward'))
counter = DirectLabel(parent=hidden, relief=None, pos=(0.0, 0.0, 0.0), text=str(0), text_scale=0.2, text_fg=(0.95, 0.95, 0, 1), text_pos=(0, -0.1, 0), text_font=ToontownGlobals.getSignFont())
counter['image'] = DGG.getDefaultDialogGeom()
counter['image_scale'] = (0.33, 1, 0.33)
counter.setScale(0.5)
counter.count = 0
counter.max = self.pointsArray[index]
self.counters.append(counter)
money = self.playerMoney[index]
totalCounter = DirectLabel(parent=hidden, relief=None, pos=(0.0, 0.0, 0.0), text=str(money), text_scale=0.2, text_fg=(0.95, 0.95, 0, 1), text_pos=(0, -0.1, 0), text_font=ToontownGlobals.getSignFont(), image=self.jarImage)
totalCounter.setScale(0.5)
totalCounter.count = money
totalCounter.max = toon.getMaxMoney()
self.totalCounters.append(totalCounter)
self.accept('clientCleanup', self._handleClientCleanup)
pos = 0
toonLayout = toonLayouts[numToons - 1]
for toon in self.toons:
thisPos = toonPositions[toonLayout[pos]]
toon.setPos(Vec3(thisPos, 1.0, -2.5))
toon.setHpr(Vec3(0, 0, 0))
toon.setAnimState('neutral', 1)
toon.setShadowHeight(0)
if not toon.isDisabled():
toon.reparentTo(render)
self.counters[pos].setPos(thisPos * -0.17, 0, toon.getHeight() / 10 + 0.25)
self.counters[pos].reparentTo(aspect2d)
self.totalCounters[pos].setPos(thisPos * -0.17, 0, -0.825)
self.totalCounters[pos].reparentTo(aspect2d)
pos += 1
self.maxPoints = max(self.pointsArray)
def reqCountUp(state):
self.countUp()
return Task.done
countUpDelay = DELAY_BEFORE_COUNT_UP
taskMgr.doMethodLater(countUpDelay, reqCountUp, 'countUpTask')
def reqCountDown(state):
self.countDown()
return Task.done
countDownDelay = countUpDelay + COUNT_UP_DURATION + DELAY_AFTER_COUNT_UP
taskMgr.doMethodLater(countDownDelay, reqCountDown, 'countDownTask')
def celebrate(task):
for counter in task.counters:
counter.hide()
winningPoints = max(task.pointsArray)
for i in xrange(len(task.ids)):
if task.pointsArray[i] == winningPoints:
avId = task.ids[i]
if avId in base.cr.doId2do:
toon = base.cr.doId2do[avId]
toon.setAnimState('jump', 1.0)
base.playSfx(task.celebrateSound)
return Task.done
celebrateDelay = countDownDelay + COUNT_DOWN_DURATION + DELAY_AFTER_COUNT_DOWN
celebrateTask = taskMgr.doMethodLater(celebrateDelay, celebrate, 'celebrate')
celebrateTask.counters = self.counters
celebrateTask.pointsArray = self.pointsArray
celebrateTask.ids = self.ids
celebrateTask.celebrateSound = self.celebrateSound
def reqPurchase(state):
self.fsm.request('purchase')
return Task.done
purchaseDelay = celebrateDelay + DELAY_AFTER_CELEBRATE
taskMgr.doMethodLater(purchaseDelay, reqPurchase, 'purchase-trans')
if base.skipMinigameReward:
self.fsm.request('purchase')
return
def _changeCounterUp(self, task, counter, newCount, toonId):
counter.count = newCount
counter['text'] = str(counter.count)
if toonId == base.localAvatar.doId:
now = globalClock.getRealTime()
if task.lastSfxT + COUNT_SFX_MIN_DELAY < now:
base.playSfx(task.countSound, time=COUNT_SFX_START_T)
task.lastSfxT = now
def _countUpTask(self, task):
now = globalClock.getRealTime()
startT = task.getStartTime()
if now >= startT + task.duration:
for counter, toonId in zip(self.counters, self.ids):
if counter.count != counter.max:
self._changeCounterUp(task, counter, counter.max, toonId)
return Task.done
t = (now - startT) / task.duration
for counter, toonId in zip(self.counters, self.ids):
curCount = int(t * counter.max)
if curCount != counter.count:
self._changeCounterUp(task, counter, curCount, toonId)
return Task.cont
def countUp(self):
totalDelay = 0
if base.cr.newsManager.isHolidayRunning(ToontownGlobals.JELLYBEAN_TROLLEY_HOLIDAY) or base.cr.newsManager.isHolidayRunning(ToontownGlobals.JELLYBEAN_TROLLEY_HOLIDAY_MONTH):
self.rewardDoubledJellybeanLabel.show()
countUpTask = taskMgr.add(self._countUpTask, 'countUp')
countUpTask.duration = COUNT_UP_DURATION
countUpTask.countSound = self.countSound
countUpTask.lastSfxT = 0
def _changeCounterDown(self, task, counter, newCount, total, toonId):
counter.count = newCount
counter['text'] = str(counter.count)
total.count = total.startAmount + (counter.max - newCount)
if total.count > total.max:
total.count = total.max
total['text'] = str(total.count)
if total.count == total.max:
total['text_fg'] = (1, 0, 0, 1)
if toonId == base.localAvatar.doId:
now = globalClock.getRealTime()
if total.count < total.max:
minDelay = COUNT_SFX_MIN_DELAY
snd = task.countSound
startT = COUNT_SFX_START_T
else:
minDelay = OVERMAX_SFX_MIN_DELAY
snd = task.overMaxSound
startT = OVERMAX_SFX_START_T
if task.lastSfxT + minDelay < now:
task.lastSfxT = now
base.playSfx(snd, time=startT)
def _countDownTask(self, task):
now = globalClock.getRealTime()
startT = task.getStartTime()
if now >= startT + task.duration:
for counter, total, toonId in zip(self.counters, self.totalCounters, self.ids):
if counter.count != 0:
self._changeCounterDown(task, counter, 0, total, toonId)
return Task.done
t = (now - startT) / task.duration
for counter, total, toonId in zip(self.counters, self.totalCounters, self.ids):
curCount = int(counter.max * (1 - t))
if curCount != counter.count:
self._changeCounterDown(task, counter, curCount, total, toonId)
return Task.cont
def countDown(self):
totalDelay = 0
for total in self.totalCounters:
total.startAmount = total.count
countDownTask = taskMgr.add(self._countDownTask, 'countDown')
countDownTask.duration = COUNT_DOWN_DURATION
countDownTask.countSound = self.countSound
countDownTask.overMaxSound = self.overMaxSound
countDownTask.lastSfxT = 0
def exitReward(self):
self.ignore('clientCleanup')
taskMgr.remove('countUpTask')
taskMgr.remove('countDownTask')
taskMgr.remove('celebrate')
taskMgr.remove('purchase-trans')
taskMgr.remove('delayAdd')
taskMgr.remove('delaySubtract')
for toon in self.toons:
toon.detachNode()
del self.toons
if hasattr(self, 'toonsKeep'):
for delayDelete in self.toonsKeep:
delayDelete.destroy()
del self.toonsKeep
for counter in self.counters:
counter.reparentTo(hidden)
for total in self.totalCounters:
total.reparentTo(hidden)
self.foreground.reparentTo(hidden)
self.backgroundL.reparentTo(hidden)
self.backgroundR.reparentTo(hidden)
self.sidewalk.reparentTo(hidden)
self.sidewalkL.reparentTo(hidden)
self.sidewalkR.reparentTo(hidden)
self.door.reparentTo(hidden)
self.title.reparentTo(self.frame)
self.rewardDoubledJellybeanLabel.hide()
base.camLens.setMinFov(settings['fov'] / (4.0 / 3.0))
NametagGlobals.setOnscreenChatForced(0)
def _handleClientCleanup(self):
if hasattr(self, 'toonsKeep'):
for delayDelete in self.toonsKeep:
delayDelete.destroy()
del self.toonsKeep
self.ignore('clientCleanup')
def enterPurchase(self):
PurchaseBase.enterPurchase(self)
self.rewardDoubledJellybeanLabel.hide()
self.bg.reparentTo(render)
self.accept('purchaseStateChange', self.__handleStateChange)
self.playAgain.reparentTo(self.toon.inventory.purchaseFrame)
self.backToPlayground.reparentTo(self.toon.inventory.purchaseFrame)
self.pointDisplay.reparentTo(self.toon.inventory.purchaseFrame)
self.statusLabel.reparentTo(self.toon.inventory.purchaseFrame)
for headFrame in self.headFrames:
headFrame[1].show()
headFrame[1].reparentTo(self.toon.inventory.purchaseFrame)
if not self.tutorialMode:
if not config.GetBool('disable-purchase-timer', 0):
self.timer.show()
self.timer.countdown(self.remain, self.__timerExpired)
else:
self.timer.hide()
self.disablePlayAgain()
self.accept('disableGagPanel', Functor(self.toon.inventory.setActivateMode, 'gagTutDisabled', gagTutMode=1))
self.accept('disableBackToPlayground', self.disableBackToPlayground)
self.accept('enableGagPanel', self.handleEnableGagPanel)
self.accept('enableBackToPlayground', self.enableBackToPlayground)
for avId, headFrame in self.headFrames:
if avId != self.newbieId:
headFrame.hide()
messenger.send('gagScreenIsUp')
if base.autoPlayAgain:
base.transitions.fadeOut(0)
self.__handlePlayAgain()
def exitPurchase(self):
PurchaseBase.exitPurchase(self)
self.ignore('disableGagPanel')
self.ignore('disableBackToPlayground')
self.ignore('enableGagPanel')
self.ignore('enableBackToPlayground')
self.bg.reparentTo(hidden)
self.playAgain.reparentTo(self.frame)
self.backToPlayground.reparentTo(self.frame)
self.pointDisplay.reparentTo(self.frame)
self.statusLabel.reparentTo(self.frame)
self.ignore('purchaseStateChange')
if base.autoPlayAgain:
base.transitions.fadeIn()
def disableBackToPlayground(self):
self.backToPlayground['state'] = DGG.DISABLED
def enableBackToPlayground(self):
self.backToPlayground['state'] = DGG.NORMAL
def disablePlayAgain(self):
self.playAgain['state'] = DGG.DISABLED
def enablePlayAgain(self):
self.playAgain['state'] = DGG.NORMAL
def enterTutorialMode(self, newbieId):
self.tutorialMode = 1
self.newbieId = newbieId
def handleEnableGagPanel(self):
self.toon.inventory.setActivateMode('purchase', gagTutMode=1)
self.checkForBroke()
def handleGagTutorialDone(self):
self.enableBackToPlayground()
def setupUnexpectedExitHooks(self):
for avId in self.ids:
if avId in base.cr.doId2do:
toon = base.cr.doId2do[avId]
eventName = toon.uniqueName('disable')
self.accept(eventName, self.__handleUnexpectedExit, extraArgs=[avId])
self.unexpectedEventNames.append(eventName)
def cleanupUnexpectedExitHooks(self):
for eventName in self.unexpectedEventNames:
self.ignore(eventName)
def __handleUnexpectedExit(self, avId):
self.unexpectedExits.append(avId)
class PurchaseHeadFrame(DirectFrame):
notify = DirectNotifyGlobal.directNotify.newCategory('Purchase')
def __init__(self, av, purchaseModels):
DirectFrame.__init__(self, relief=None, image=purchaseModels.find('**/Char_Pnl'))
self.initialiseoptions(PurchaseHeadFrame)
self.statusLabel = DirectLabel(parent=self, relief=None, text='', text_scale=TTLocalizer.PstatusLabel, text_wordwrap=7.5, text_fg=(0.05, 0.14, 0.4, 1), text_pos=(0.1, 0, 0))
self.av = av
self.avKeep = DelayDelete.DelayDelete(av, 'PurchaseHeadFrame.av')
self.accept('clientCleanup', self._handleClientCleanup)
self.head = self.stateNodePath[0].attachNewNode('head', 20)
self.head.setPosHprScale(-0.22, 10.0, -0.1, 180.0, 0.0, 0.0, 0.1, 0.1, 0.1)
self.headModel = ToonHead.ToonHead()
self.headModel.setupHead(self.av.style, forGui=1)
self.headModel.reparentTo(self.head)
self.tag2Node = NametagFloat2d()
self.tag2Node.setContents(Nametag.CName)
self.av.nametag.addNametag(self.tag2Node)
self.tag2 = self.attachNewNode(self.tag2Node)
self.tag2.setPosHprScale(-0.22, 10.0, 0.12, 0, 0, 0, 0.046, 0.046, 0.046)
self.tag1Node = NametagFloat2d()
self.tag1Node.setContents(Nametag.CSpeech | Nametag.CThought)
self.av.nametag.addNametag(self.tag1Node)
self.tag1 = self.attachNewNode(self.tag1Node)
self.tag1.setPosHprScale(-0.15, 0, -0.1, 0, 0, 0, 0.046, 0.046, 0.046)
self.hide()
return
def destroy(self):
DirectFrame.destroy(self)
del self.statusLabel
self.headModel.delete()
del self.headModel
self.head.removeNode()
del self.head
self.av.nametag.removeNametag(self.tag1Node)
self.av.nametag.removeNametag(self.tag2Node)
self.tag1.removeNode()
self.tag2.removeNode()
del self.tag1
del self.tag2
del self.tag1Node
del self.tag2Node
del self.av
self.removeAvKeep()
def setAvatarState(self, state):
if state == PURCHASE_DISCONNECTED_STATE:
self.statusLabel['text'] = TTLocalizer.GagShopPlayerDisconnected % self.av.getName()
self.statusLabel['text_pos'] = (0.015, 0.072, 0)
self.head.hide()
self.tag1.hide()
self.tag2.hide()
elif state == PURCHASE_EXIT_STATE:
self.statusLabel['text'] = TTLocalizer.GagShopPlayerExited % self.av.getName()
self.statusLabel['text_pos'] = (0.015, 0.072, 0)
self.head.hide()
self.tag1.hide()
self.tag2.hide()
elif state == PURCHASE_PLAYAGAIN_STATE:
self.statusLabel['text'] = TTLocalizer.GagShopPlayerPlayAgain
self.statusLabel['text_pos'] = (0.1, -0.12, 0)
elif state == PURCHASE_WAITING_STATE:
self.statusLabel['text'] = TTLocalizer.GagShopPlayerBuying
self.statusLabel['text_pos'] = (0.1, -0.12, 0)
elif state == PURCHASE_NO_CLIENT_STATE:
Purchase.notify.warning("setAvatarState('no client state'); OK for gag purchase tutorial")
else:
Purchase.notify.warning('unknown avatar state: %s' % state)
def _handleClientCleanup(self):
self.destroy()
def removeAvKeep(self):
if hasattr(self, 'avKeep'):
self.notify.debug('destroying avKeep %s' % self.avKeep)
self.avKeep.destroy()
del self.avKeep
self.ignore('clientCleanup')
|
|
# coding=utf-8
# Copyright 2018 HuggingFace Inc..
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import argparse
import json
import logging
import os
import sys
import unittest
from unittest.mock import patch
import torch
from transformers import ViTMAEForPreTraining, Wav2Vec2ForPreTraining
from transformers.file_utils import is_apex_available
from transformers.testing_utils import CaptureLogger, TestCasePlus, get_gpu_count, slow, torch_device
SRC_DIRS = [
os.path.join(os.path.dirname(__file__), dirname)
for dirname in [
"text-generation",
"text-classification",
"token-classification",
"language-modeling",
"multiple-choice",
"question-answering",
"summarization",
"translation",
"image-classification",
"speech-recognition",
"audio-classification",
"speech-pretraining",
"image-pretraining",
]
]
sys.path.extend(SRC_DIRS)
if SRC_DIRS is not None:
import run_audio_classification
import run_clm
import run_generation
import run_glue
import run_image_classification
import run_mae
import run_mlm
import run_ner
import run_qa as run_squad
import run_seq2seq_qa as run_squad_seq2seq
import run_speech_recognition_ctc
import run_speech_recognition_seq2seq
import run_summarization
import run_swag
import run_translation
import run_wav2vec2_pretraining_no_trainer
logging.basicConfig(level=logging.DEBUG)
logger = logging.getLogger()
def get_setup_file():
parser = argparse.ArgumentParser()
parser.add_argument("-f")
args = parser.parse_args()
return args.f
def get_results(output_dir):
results = {}
path = os.path.join(output_dir, "all_results.json")
if os.path.exists(path):
with open(path, "r") as f:
results = json.load(f)
else:
raise ValueError(f"can't find {path}")
return results
def is_cuda_and_apex_available():
is_using_cuda = torch.cuda.is_available() and torch_device == "cuda"
return is_using_cuda and is_apex_available()
class ExamplesTests(TestCasePlus):
def test_run_glue(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_glue.py
--model_name_or_path distilbert-base-uncased
--output_dir {tmp_dir}
--overwrite_output_dir
--train_file ./tests/fixtures/tests_samples/MRPC/train.csv
--validation_file ./tests/fixtures/tests_samples/MRPC/dev.csv
--do_train
--do_eval
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--learning_rate=1e-4
--max_steps=10
--warmup_steps=2
--seed=42
--max_seq_length=128
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16")
with patch.object(sys, "argv", testargs):
run_glue.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result["eval_accuracy"], 0.75)
def test_run_clm(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_clm.py
--model_name_or_path distilgpt2
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--do_train
--do_eval
--block_size 128
--per_device_train_batch_size 5
--per_device_eval_batch_size 5
--num_train_epochs 2
--output_dir {tmp_dir}
--overwrite_output_dir
""".split()
if torch.cuda.device_count() > 1:
# Skipping because there are not enough batches to train the model + would need a drop_last to work.
return
if torch_device != "cuda":
testargs.append("--no_cuda")
with patch.object(sys, "argv", testargs):
run_clm.main()
result = get_results(tmp_dir)
self.assertLess(result["perplexity"], 100)
def test_run_clm_config_overrides(self):
# test that config_overrides works, despite the misleading dumps of default un-updated
# config via tokenizer
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_clm.py
--model_type gpt2
--tokenizer_name gpt2
--train_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--config_overrides n_embd=10,n_head=2
""".split()
if torch_device != "cuda":
testargs.append("--no_cuda")
logger = run_clm.logger
with patch.object(sys, "argv", testargs):
with CaptureLogger(logger) as cl:
run_clm.main()
self.assertIn('"n_embd": 10', cl.out)
self.assertIn('"n_head": 2', cl.out)
def test_run_mlm(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_mlm.py
--model_name_or_path distilroberta-base
--train_file ./tests/fixtures/sample_text.txt
--validation_file ./tests/fixtures/sample_text.txt
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--prediction_loss_only
--num_train_epochs=1
""".split()
if torch_device != "cuda":
testargs.append("--no_cuda")
with patch.object(sys, "argv", testargs):
run_mlm.main()
result = get_results(tmp_dir)
self.assertLess(result["perplexity"], 42)
def test_run_ner(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
# with so little data distributed training needs more epochs to get the score on par with 0/1 gpu
epochs = 7 if get_gpu_count() > 1 else 2
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_ner.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/conll/sample.json
--validation_file tests/fixtures/tests_samples/conll/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--do_train
--do_eval
--warmup_steps=2
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=2
--num_train_epochs={epochs}
--seed 7
""".split()
if torch_device != "cuda":
testargs.append("--no_cuda")
with patch.object(sys, "argv", testargs):
run_ner.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result["eval_accuracy"], 0.75)
self.assertLess(result["eval_loss"], 0.5)
def test_run_squad(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_qa.py
--model_name_or_path bert-base-uncased
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=10
--warmup_steps=2
--do_train
--do_eval
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(sys, "argv", testargs):
run_squad.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result["eval_f1"], 30)
self.assertGreaterEqual(result["eval_exact"], 30)
def test_run_squad_seq2seq(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_seq2seq_qa.py
--model_name_or_path t5-small
--context_column context
--question_column question
--answer_column answers
--version_2_with_negative
--train_file tests/fixtures/tests_samples/SQUAD/sample.json
--validation_file tests/fixtures/tests_samples/SQUAD/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=10
--warmup_steps=2
--do_train
--do_eval
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(sys, "argv", testargs):
run_squad_seq2seq.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result["eval_f1"], 30)
self.assertGreaterEqual(result["eval_exact"], 30)
def test_run_swag(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_swag.py
--model_name_or_path bert-base-uncased
--train_file tests/fixtures/tests_samples/swag/sample.json
--validation_file tests/fixtures/tests_samples/swag/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=20
--warmup_steps=2
--do_train
--do_eval
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
""".split()
with patch.object(sys, "argv", testargs):
run_swag.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result["eval_accuracy"], 0.8)
def test_generation(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
testargs = ["run_generation.py", "--prompt=Hello", "--length=10", "--seed=42"]
if is_cuda_and_apex_available():
testargs.append("--fp16")
model_type, model_name = (
"--model_type=gpt2",
"--model_name_or_path=sshleifer/tiny-gpt2",
)
with patch.object(sys, "argv", testargs + [model_type, model_name]):
result = run_generation.main()
self.assertGreaterEqual(len(result[0]), 10)
@slow
def test_run_summarization(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_summarization.py
--model_name_or_path t5-small
--train_file tests/fixtures/tests_samples/xsum/sample.json
--validation_file tests/fixtures/tests_samples/xsum/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=50
--warmup_steps=8
--do_train
--do_eval
--learning_rate=2e-4
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
""".split()
with patch.object(sys, "argv", testargs):
run_summarization.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result["eval_rouge1"], 10)
self.assertGreaterEqual(result["eval_rouge2"], 2)
self.assertGreaterEqual(result["eval_rougeL"], 7)
self.assertGreaterEqual(result["eval_rougeLsum"], 7)
@slow
def test_run_translation(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_translation.py
--model_name_or_path sshleifer/student_marian_en_ro_6_1
--source_lang en
--target_lang ro
--train_file tests/fixtures/tests_samples/wmt16/sample.json
--validation_file tests/fixtures/tests_samples/wmt16/sample.json
--output_dir {tmp_dir}
--overwrite_output_dir
--max_steps=50
--warmup_steps=8
--do_train
--do_eval
--learning_rate=3e-3
--per_device_train_batch_size=2
--per_device_eval_batch_size=1
--predict_with_generate
--source_lang en_XX
--target_lang ro_RO
""".split()
with patch.object(sys, "argv", testargs):
run_translation.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result["eval_bleu"], 30)
@unittest.skip("This is currently broken.")
def test_run_image_classification(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_image_classification.py
--output_dir {tmp_dir}
--model_name_or_path google/vit-base-patch16-224-in21k
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--remove_unused_columns False
--overwrite_output_dir True
--dataloader_num_workers 16
--metric_for_best_model accuracy
--max_steps 10
--train_val_split 0.1
--seed 42
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16")
with patch.object(sys, "argv", testargs):
run_image_classification.main()
result = get_results(tmp_dir)
self.assertGreaterEqual(result["eval_accuracy"], 0.8)
def test_run_speech_recognition_ctc(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_speech_recognition_ctc.py
--output_dir {tmp_dir}
--model_name_or_path hf-internal-testing/tiny-random-wav2vec2
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--eval_split_name validation
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--remove_unused_columns False
--overwrite_output_dir True
--preprocessing_num_workers 16
--max_steps 10
--seed 42
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16")
with patch.object(sys, "argv", testargs):
run_speech_recognition_ctc.main()
result = get_results(tmp_dir)
self.assertLess(result["eval_loss"], result["train_loss"])
def test_run_speech_recognition_seq2seq(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_speech_recognition_seq2seq.py
--output_dir {tmp_dir}
--model_name_or_path hf-internal-testing/tiny-random-speech-encoder-decoder
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_name clean
--train_split_name validation
--eval_split_name validation
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 4
--remove_unused_columns False
--overwrite_output_dir True
--preprocessing_num_workers 16
--max_steps 10
--seed 42
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16")
with patch.object(sys, "argv", testargs):
run_speech_recognition_seq2seq.main()
result = get_results(tmp_dir)
self.assertLess(result["eval_loss"], result["train_loss"])
def test_run_audio_classification(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_audio_classification.py
--output_dir {tmp_dir}
--model_name_or_path hf-internal-testing/tiny-random-wav2vec2
--dataset_name anton-l/superb_demo
--dataset_config_name ks
--train_split_name test
--eval_split_name test
--audio_column_name audio
--label_column_name label
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--remove_unused_columns False
--overwrite_output_dir True
--num_train_epochs 10
--max_steps 50
--seed 42
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16")
with patch.object(sys, "argv", testargs):
run_audio_classification.main()
result = get_results(tmp_dir)
self.assertLess(result["eval_loss"], result["train_loss"])
def test_run_wav2vec2_pretraining(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_wav2vec2_pretraining_no_trainer.py
--output_dir {tmp_dir}
--model_name_or_path hf-internal-testing/tiny-random-wav2vec2
--dataset_name hf-internal-testing/librispeech_asr_dummy
--dataset_config_names clean
--dataset_split_names validation
--learning_rate 1e-4
--per_device_train_batch_size 4
--per_device_eval_batch_size 4
--preprocessing_num_workers 16
--max_train_steps 2
--validation_split_percentage 5
--seed 42
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16")
with patch.object(sys, "argv", testargs):
run_wav2vec2_pretraining_no_trainer.main()
model = Wav2Vec2ForPreTraining.from_pretrained(tmp_dir)
self.assertIsNotNone(model)
@unittest.skip("This is currently broken.")
def test_run_vit_mae_pretraining(self):
stream_handler = logging.StreamHandler(sys.stdout)
logger.addHandler(stream_handler)
tmp_dir = self.get_auto_remove_tmp_dir()
testargs = f"""
run_mae.py
--output_dir {tmp_dir}
--dataset_name hf-internal-testing/cats_vs_dogs_sample
--do_train
--do_eval
--learning_rate 1e-4
--per_device_train_batch_size 2
--per_device_eval_batch_size 1
--remove_unused_columns False
--overwrite_output_dir True
--dataloader_num_workers 16
--metric_for_best_model accuracy
--max_steps 10
--train_val_split 0.1
--seed 42
""".split()
if is_cuda_and_apex_available():
testargs.append("--fp16")
with patch.object(sys, "argv", testargs):
run_mae.main()
model = ViTMAEForPreTraining.from_pretrained(tmp_dir)
self.assertIsNotNone(model)
|
|
import tkinter
import theory.bellmanFord as bellmanFord
################
## Controller ##
################
class SimulationController():
def __init__(self, mode, parent):
self._model = SimulationModel()
self._view = SimulationView(self, parent)
self._mode = mode
self._model.enterInactiveState()
self._view.enterInactiveState()
################
## Commands
def endSimulation(self):
self._view.enterInactiveState()
self._model.enterInactiveState()
self._mode.draw()
def startSimulation(self):
algebra = self._mode.algebraController.getComputationAlgebra()
withPaths = self._mode.algebraController.getWithPaths()
graph = self._mode.graphController.getGraph()
self._model.enterSimulationState(algebra, withPaths, graph)
self._simulationTimeChanged()
def moveToStart(self):
self._model.moveToStart()
self._simulationTimeChanged()
def moveBack(self):
self._model.moveBack()
self._simulationTimeChanged()
def moveForwards(self):
self._model.moveForwards()
self._simulationTimeChanged()
def moveToEnd(self):
self._model.moveToEnd()
self._simulationTimeChanged()
############
## Getters
def getCurrentTime(self):
return self._model.currentTime
def getCurrentState(self):
return self._model.getCurrentState()
def isSimulating(self):
return self._model.isSimulating
def hasConverged(self):
return self._model.hasConverged()
def getConvergenceTime(self):
return self._model.getConvergenceTime()
#############
## Internal
def _simulationTimeChanged(self):
self._view.enterSimulationState(
self._model.canMoveToStart(),
self._model.canMoveBack(),
self._model.canMoveForward(),
self._model.canMoveToEnd()
)
self._mode.draw()
###########
## Model ##
###########
class SimulationModel():
def __init__(self):
pass
############
## Actions
def enterInactiveState(self):
self.isSimulating = False
self.computation = None
self.currentTime = None
def enterSimulationState(self, algebra, withPaths, graph):
self.isSimulating = True
self.algebra = algebra
self.identityMatrix = bellmanFord.createIdentityMatrix(algebra, len(graph))
self.adjacencyMatrix = bellmanFord.createAdjacencyMatrix(algebra, graph)
self.graph = graph
state1 = self.identityMatrix
state2 = bellmanFord.iterate(self.algebra, state1, self.identityMatrix, self.adjacencyMatrix)
self.computation = [state1, state2]
self.currentTime = 0
self.simulate(len(graph)**2)
def simulate(self, steps):
i = 0
while not self.hasConverged() and i < steps:
currentState = self.computation[-1]
newState = bellmanFord.iterate(self.algebra, currentState, self.identityMatrix, self.adjacencyMatrix)
self.computation.append(newState)
i += 1
def moveToStart(self):
self.currentTime = 0
def moveBack(self):
self.currentTime -= 1
def moveForwards(self):
if self.currentTime > len(self.computation) - 2:
self.simulate(1)
self.currentTime += 1
def moveToEnd(self):
self.currentTime = len(self.computation)-2
############
## Getters
def hasConverged(self):
return len(self.computation) >= 2 and self.computation[-1] == self.computation[-2]
def canMoveToStart(self):
return self.currentTime > 0
def canMoveBack(self):
return self.currentTime > 0
def canMoveForward(self):
return (not self.hasConverged()) or (self.currentTime < len(self.computation) - 2)
def canMoveToEnd(self):
return self.hasConverged() and self.currentTime < len(self.computation) - 2
def getCurrentState(self):
state = self.computation[self.currentTime]
source = self.graph.sourceNode
return {n:state[n][source] for n in range(len(self.graph))}
def getConvergenceTime(self):
if self.hasConverged():
return len(self.computation)-2
else:
return None
##########
## View ##
##########
class SimulationView(tkinter.Frame):
def __init__(self, controller, parent, *args, **kwargs):
tkinter.Frame.__init__(self, parent, *args, **kwargs)
self.controller = controller
self.startB = tkinter.Button(self, text="<<",command=controller.moveToStart)
self.backB = tkinter.Button(self, text="<", command=controller.moveBack)
self.commandB = tkinter.Button(self, width=5)
self.forwardsB = tkinter.Button(self, text=">", command=controller.moveForwards)
self.endB = tkinter.Button(self, text=">>",command=controller.moveToEnd)
self.startB.grid(row=0,column=1)
self.backB.grid(row=0,column=2)
self.commandB.grid(row=0,column=3)
self.forwardsB.grid(row=0,column=4)
self.endB.grid(row=0,column=5)
self.grid_columnconfigure(0, weight=1)
self.grid_columnconfigure(6, weight=1)
#######################
## Configure buttons ##
#######################
def enterInactiveState(self):
self.startB.configure(state=tkinter.DISABLED)
self.backB.configure(state=tkinter.DISABLED)
self.forwardsB.configure(state=tkinter.DISABLED)
self.endB.configure(state=tkinter.DISABLED)
self.commandB.configure(text="Start", command=self.controller.startSimulation)
def enterSimulationState(self, start, back, forward, end):
self.startB.configure(state=tkinter.ACTIVE if start else tkinter.DISABLED)
self.backB.configure(state=tkinter.ACTIVE if back else tkinter.DISABLED)
self.forwardsB.configure(state=tkinter.ACTIVE if forward else tkinter.DISABLED)
self.endB.configure(state=tkinter.ACTIVE if end else tkinter.DISABLED)
self.commandB.configure(text="Stop", command=self.controller.endSimulation)
|
|
# Copyright (c) 2011 OpenStack, LLC
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Tests For HostManager
"""
from oslo.config import cfg
from cinder import db
from cinder import exception
from cinder.openstack.common.scheduler import filters
from cinder.openstack.common import timeutils
from cinder.scheduler import host_manager
from cinder import test
from cinder.tests.scheduler import fakes
CONF = cfg.CONF
class FakeFilterClass1(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class FakeFilterClass2(filters.BaseHostFilter):
def host_passes(self, host_state, filter_properties):
pass
class HostManagerTestCase(test.TestCase):
"""Test case for HostManager class"""
def setUp(self):
super(HostManagerTestCase, self).setUp()
self.host_manager = host_manager.HostManager()
self.fake_hosts = [host_manager.HostState('fake_host%s' % x)
for x in xrange(1, 5)]
def test_choose_host_filters_not_found(self):
self.flags(scheduler_default_filters='FakeFilterClass3')
self.host_manager.filter_classes = [FakeFilterClass1,
FakeFilterClass2]
self.assertRaises(exception.SchedulerHostFilterNotFound,
self.host_manager._choose_host_filters, None)
def test_choose_host_filters(self):
self.flags(scheduler_default_filters=['FakeFilterClass2'])
self.host_manager.filter_classes = [FakeFilterClass1,
FakeFilterClass2]
# Test 'volume' returns 1 correct function
filter_classes = self.host_manager._choose_host_filters(None)
self.assertEqual(len(filter_classes), 1)
self.assertEqual(filter_classes[0].__name__, 'FakeFilterClass2')
def _mock_get_filtered_hosts(self, info, specified_filters=None):
self.mox.StubOutWithMock(self.host_manager, '_choose_host_filters')
info['got_objs'] = []
info['got_fprops'] = []
def fake_filter_one(_self, obj, filter_props):
info['got_objs'].append(obj)
info['got_fprops'].append(filter_props)
return True
self.stubs.Set(FakeFilterClass1, '_filter_one', fake_filter_one)
self.host_manager._choose_host_filters(specified_filters).AndReturn(
[FakeFilterClass1])
def _verify_result(self, info, result):
for x in info['got_fprops']:
self.assertEqual(x, info['expected_fprops'])
self.assertEqual(set(info['expected_objs']), set(info['got_objs']))
self.assertEqual(set(result), set(info['got_objs']))
def test_get_filtered_hosts(self):
fake_properties = {'moo': 1, 'cow': 2}
info = {'expected_objs': self.fake_hosts,
'expected_fprops': fake_properties}
self._mock_get_filtered_hosts(info)
self.mox.ReplayAll()
result = self.host_manager.get_filtered_hosts(self.fake_hosts,
fake_properties)
self._verify_result(info, result)
def test_update_service_capabilities(self):
service_states = self.host_manager.service_states
self.assertDictMatch(service_states, {})
self.mox.StubOutWithMock(timeutils, 'utcnow')
timeutils.utcnow().AndReturn(31337)
timeutils.utcnow().AndReturn(31338)
timeutils.utcnow().AndReturn(31339)
host1_volume_capabs = dict(free_capacity_gb=4321, timestamp=1)
host2_volume_capabs = dict(free_capacity_gb=5432, timestamp=1)
host3_volume_capabs = dict(free_capacity_gb=6543, timestamp=1)
self.mox.ReplayAll()
service_name = 'volume'
self.host_manager.update_service_capabilities(service_name, 'host1',
host1_volume_capabs)
self.host_manager.update_service_capabilities(service_name, 'host2',
host2_volume_capabs)
self.host_manager.update_service_capabilities(service_name, 'host3',
host3_volume_capabs)
# Make sure dictionary isn't re-assigned
self.assertEqual(self.host_manager.service_states, service_states)
# Make sure original dictionary wasn't copied
self.assertEqual(host1_volume_capabs['timestamp'], 1)
host1_volume_capabs['timestamp'] = 31337
host2_volume_capabs['timestamp'] = 31338
host3_volume_capabs['timestamp'] = 31339
expected = {'host1': host1_volume_capabs,
'host2': host2_volume_capabs,
'host3': host3_volume_capabs}
self.assertDictMatch(service_states, expected)
def test_get_all_host_states(self):
context = 'fake_context'
topic = CONF.volume_topic
self.mox.StubOutWithMock(db, 'service_get_all_by_topic')
self.mox.StubOutWithMock(host_manager.LOG, 'warn')
self.mox.StubOutWithMock(host_manager.utils, 'service_is_up')
ret_services = fakes.VOLUME_SERVICES
db.service_get_all_by_topic(context, topic).AndReturn(ret_services)
host_manager.utils.service_is_up(ret_services[0]).AndReturn(True)
host_manager.utils.service_is_up(ret_services[1]).AndReturn(True)
host_manager.utils.service_is_up(ret_services[2]).AndReturn(True)
host_manager.utils.service_is_up(ret_services[3]).AndReturn(True)
host_manager.utils.service_is_up(ret_services[4]).AndReturn(True)
# Disabled service
host_manager.LOG.warn("volume service is down or disabled. "
"(host: host5)")
db.service_get_all_by_topic(context, topic).AndReturn(ret_services)
host_manager.utils.service_is_up(ret_services[0]).AndReturn(True)
host_manager.utils.service_is_up(ret_services[1]).AndReturn(True)
host_manager.utils.service_is_up(ret_services[2]).AndReturn(True)
host_manager.utils.service_is_up(ret_services[3]).AndReturn(False)
# Stopped service
host_manager.LOG.warn("volume service is down or disabled. "
"(host: host4)")
host_manager.utils.service_is_up(ret_services[4]).AndReturn(True)
# Disabled service
host_manager.LOG.warn("volume service is down or disabled. "
"(host: host5)")
self.mox.ReplayAll()
self.host_manager.get_all_host_states(context)
host_state_map = self.host_manager.host_state_map
self.assertEqual(len(host_state_map), 4)
# Check that service is up
for i in xrange(4):
volume_node = fakes.VOLUME_SERVICES[i]
host = volume_node['host']
self.assertEqual(host_state_map[host].service,
volume_node)
self.host_manager.get_all_host_states(context)
host_state_map = self.host_manager.host_state_map
self.assertEqual(len(host_state_map), 3)
for i in xrange(3):
volume_node = fakes.VOLUME_SERVICES[i]
host = volume_node['host']
self.assertEqual(host_state_map[host].service,
volume_node)
class HostStateTestCase(test.TestCase):
"""Test case for HostState class"""
def test_update_from_volume_capability(self):
fake_host = host_manager.HostState('host1')
self.assertEqual(fake_host.free_capacity_gb, None)
volume_capability = {'total_capacity_gb': 1024,
'free_capacity_gb': 512,
'reserved_percentage': 0,
'timestamp': None}
fake_host.update_from_volume_capability(volume_capability)
self.assertEqual(fake_host.free_capacity_gb, 512)
def test_update_from_volume_infinite_capability(self):
fake_host = host_manager.HostState('host1')
self.assertEqual(fake_host.free_capacity_gb, None)
volume_capability = {'total_capacity_gb': 'infinite',
'free_capacity_gb': 'infinite',
'reserved_percentage': 0,
'timestamp': None}
fake_host.update_from_volume_capability(volume_capability)
self.assertEqual(fake_host.total_capacity_gb, 'infinite')
self.assertEqual(fake_host.free_capacity_gb, 'infinite')
def test_update_from_volume_unknown_capability(self):
fake_host = host_manager.HostState('host1')
self.assertEqual(fake_host.free_capacity_gb, None)
volume_capability = {'total_capacity_gb': 'infinite',
'free_capacity_gb': 'unknown',
'reserved_percentage': 0,
'timestamp': None}
fake_host.update_from_volume_capability(volume_capability)
self.assertEqual(fake_host.total_capacity_gb, 'infinite')
self.assertEqual(fake_host.free_capacity_gb, 'unknown')
|
|
#!/usr/bin/env python
# ***** BEGIN LICENSE BLOCK *****
# Version: MPL 1.1/GPL 2.0/LGPL 2.1
#
# The contents of this file are subject to the Mozilla Public License
# Version 1.1 (the "License"); you may not use this file except in
# compliance with the License. You may obtain a copy of the License at
# http://www.mozilla.org/MPL/
#
# Software distributed under the License is distributed on an "AS IS"
# basis, WITHOUT WARRANTY OF ANY KIND, either express or implied. See the
# License for the specific language governing rights and limitations
# under the License.
#
# The Original Code is Komodo code.
#
# The Initial Developer of the Original Code is ActiveState Software Inc.
# Portions created by ActiveState Software Inc are Copyright (C) 2000-2007
# ActiveState Software Inc. All Rights Reserved.
#
# Contributor(s):
# ActiveState Software Inc
#
# Alternatively, the contents of this file may be used under the terms of
# either the GNU General Public License Version 2 or later (the "GPL"), or
# the GNU Lesser General Public License Version 2.1 or later (the "LGPL"),
# in which case the provisions of the GPL or the LGPL are applicable instead
# of those above. If you wish to allow use of your version of this file only
# under the terms of either the GPL or the LGPL, and not to allow others to
# use your version of this file under the terms of the MPL, indicate your
# decision by deleting the provisions above and replace them with the notice
# and other provisions required by the GPL or the LGPL. If you do not delete
# the provisions above, a recipient may use your version of this file under
# the terms of any one of the MPL, the GPL or the LGPL.
#
# ***** END LICENSE BLOCK *****
#
# Contributors:
# Eric Promislow ([email protected])
"""
perlcile - a Code Intelligence Language Engine for the Perl language
Module Usage:
from perlcile import scan_purelang
content = open("foo.pl", "r").read()
scan_purelang(content, "foo.pl")
Command-line Usage:
perlcile.py [<options>...] [<Perl file>]
Options:
-h, --help dump this help and exit
-V, --version dump this script's version and exit
-v, --verbose verbose output, use twice for more verbose output
-f, --filename <path> specify the filename of the file content
passed in on stdin, this is used for the "path"
attribute of the emitted <file> tag.
--md5=<string> md5 hash for the input
--mtime=<secs> modification time for output info, in #secs since
1/1/70.
-L, --language <name>
the language of the file being scanned
-c, --clock print timing info for scans (CIX is not printed)
One or more Perl files can be specified as arguments or content can be
passed in on stdin. A directory can also be specified, in which case
all .pl files in that directory are scanned.
This is a Language Engine for the Code Intelligence (codeintel) system.
Code Intelligence XML format. See:
http://specs.activestate.com/Komodo_3.0/func/code_intelligence.html
http://specs.tl.activestate.com/kd/kd-0100.html
The command-line interface will return non-zero iff the scan failed.
"""
import os
import os.path
import sys
import getopt
from hashlib import md5
import re
import logging
import glob
import time
import stat
from ciElementTree import Element, SubElement, tostring
from SilverCity import ScintillaConstants
from codeintel2 import perl_lexer, perl_parser, util
from codeintel2.tree import pretty_tree_from_tree
from codeintel2.common import CILEError
from codeintel2 import parser_cix
#---- global data
_version_ = (0, 1, 0)
log = logging.getLogger("perlcile")
# log.setLevel(logging.DEBUG)
_gClockIt = 0 # if true then we are gathering timing data
_gClock = None # if gathering timing data this is set to time retrieval fn
_gStartTime = None # start time of current file being scanned
gProvideFullDocs = False
#---- internal support
# This code has intimate knowledge of the code objects defined in
# perl_parser.py
def scan_purelang(buf):
content = buf.accessor.text.expandtabs(8)
tokenizer = perl_lexer.PerlLexer(content, gProvideFullDocs)
parser = perl_parser.Parser(tokenizer, provide_full_docs=gProvideFullDocs)
parser.moduleName = buf.path
parse_tree = parser.parse()
tree = parser.produce_CIX()
return tree
def scan_multilang(tokens, module_elem):
"""Build the Perl module CIX element tree.
"tokens" is a generator of UDL tokens for this UDL-based
multi-lang document.
"module_elem" is the <module> element of a CIX element tree on
which the Perl module should be built.
This should return a list of the CSL tokens in the token stream.
"""
tokenizer = perl_lexer.PerlMultiLangLexer(tokens)
# "PerlHTML" is about all we need for whichever Perl-based
# template language is being used. This could just as easily be a
# boolean that indicates whether we're processing a pure language
# or a multi-lang one.
parser = perl_parser.Parser(
tokenizer, lang="PerlHTML", provide_full_docs=gProvideFullDocs)
parser.moduleName = "" # Unknown
parser.parse()
parse_tree = parser.produce_CIX_NoHeader(module_elem)
csl_tokens = tokenizer.get_csl_tokens()
return csl_tokens, tokenizer.has_perl_code()
#---- mainline
def main(argv):
logging.basicConfig()
# Parse options.
try:
opts, args = getopt.getopt(argv[1:], "Vvhf:cL:",
["version", "verbose", "help", "filename=", "md5=", "mtime=",
"clock", "language="])
except getopt.GetoptError, ex:
log.error(str(ex))
log.error("Try `perlcile --help'.")
return 1
numVerboses = 0
stdinFilename = None
md5sum = None
mtime = None
lang = "Perl"
global _gClockIt
for opt, optarg in opts:
if opt in ("-h", "--help"):
sys.stdout.write(__doc__)
return
elif opt in ("-V", "--version"):
ver = '.'.join([str(part) for part in _version_])
print "perlcile %s" % ver
return
elif opt in ("-v", "--verbose"):
numVerboses += 1
if numVerboses == 1:
log.setLevel(logging.INFO)
else:
log.setLevel(logging.DEBUG)
elif opt in ("-f", "--filename"):
stdinFilename = optarg
elif opt in ("-L", "--language"):
lang = optarg
elif opt in ("--md5",):
md5sum = optarg
elif opt in ("--mtime",):
mtime = optarg
elif opt in ("-c", "--clock"):
_gClockIt = 1
global _gClock
if sys.platform.startswith("win"):
_gClock = time.clock
else:
_gClock = time.time
if len(args) == 0:
contentOnStdin = 1
filenames = [stdinFilename or "<stdin>"]
else:
contentOnStdin = 0
paths = []
for arg in args:
paths += glob.glob(arg)
filenames = []
for path in paths:
if os.path.isfile(path):
filenames.append(path)
elif os.path.isdir(path):
perlfiles = [os.path.join(path, n) for n in os.listdir(path)
if os.path.splitext(n)[1] in (".pl", ".pm")]
perlfiles = [f for f in perlfiles if os.path.isfile(f)]
filenames += perlfiles
if 1:
for filename in filenames:
if contentOnStdin:
log.debug("reading content from stdin")
content = sys.stdin.read()
log.debug("finished reading content from stdin")
if mtime is None:
mtime = int(time.time())
else:
if mtime is None:
mtime = int(os.stat(filename)[stat.ST_MTIME])
content = open(filename, 'r').read()
if _gClockIt:
sys.stdout.write("scanning '%s'..." % filename)
global _gStartTime
_gStartTime = _gClock()
data = scan(
content, filename, md5sum=md5sum, mtime=mtime, lang=lang)
if _gClockIt:
sys.stdout.write(" %.3fs\n" % (_gClock()-_gStartTime))
elif data:
sys.stdout.write(data)
try:
pass
except KeyboardInterrupt:
log.debug("user abort")
return 1
if __name__ == "__main__":
sys.exit(main(sys.argv))
|
|
"""
Creates a table objects from the fits files arranged into the default file structure, as used by GALAH and from the table observations
Authors: Janez Kos ([email protected]), Sanjib Sharma, Ghayandi de Silva, Sarah Martell, GALAH collaboration
barycentric and heliocentric calculation by Andy Casey and astropy
Copyright (C) 2015 Janez Kos
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program; if not, write to the Free Software
Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA.
HOW TO RUN THIS CODE:
1. Make sure you have the following modules installed:
- os
- pyfits
- ephem
- sys
- numpy
- psycopg2
- multiprocessing
- argparse
2. run observations.py
3. Run with the only argument being the path to the folder in which the ususal folder structure of the GALAH data is.
4. Run with -h to see other options.
The code will produce a sql database, its dump and a csv table.
"""
from os import walk, system, path
import psycopg2 as mdb
import pyfits
import numpy as np
import ephem
import sys
from astropy.time import Time
import astropy.constants as constants
import astropy.coordinates as coord
import astropy.units as u
import multiprocessing
import argparse
def baryvel(dje):
"""
Calculates the heliocentric and barycentric velocity components of Earth.
Parameters
----------
dje : `~astropy.time.Time` or float
The Julian ephemeris date.
Returns
-------
dvelh : `~astropy.units.Quantity`
The heliocentric velocity components in (X, Y, Z) coordinates.
dvelb : `~astropy.units.Quantity`
The barycentric velocity components in (X, Y, Z) coordinates.
"""
if isinstance(dje, Time):
dje = dje.jd
# Prepare for the pain.
dcto = 2415020.
dcjul = 36525. # Days in Julian year
dc1 = 1.
# Constants dcfel(i,k) of fast changing elements.
dcfel = np.array([
1.7400353e00, 6.2833195099091e02, 5.2796e-6,
6.2565836e00, 6.2830194572674e02, -2.6180e-6,
4.7199666e00, 8.3997091449254e03, -1.9780e-5,
1.9636505e-1, 8.4334662911720e03, -5.6044e-5,
4.1547339e00, 5.2993466764997e01, 5.8845e-6,
4.6524223e00, 2.1354275911213e01, 5.6797e-6,
4.2620486e00, 7.5025342197656e00, 5.5317e-6,
1.4740694e00, 3.8377331909193e00, 5.6093e-6]).reshape(8, 3)
# Constants dceps and ccsel(i,k) of slowly changing elements.
dceps = np.array([4.093198e-1, -2.271110e-4, -2.860401e-8])
ccsel = np.array([
1.675104e-2, -4.179579e-5, -1.260516e-7,
2.220221e-1, 2.809917e-2, 1.852532e-5,
1.589963e00, 3.418075e-2, 1.430200e-5,
2.994089e00, 2.590824e-2, 4.155840e-6,
8.155457e-1, 2.486352e-2, 6.836840e-6,
1.735614e00, 1.763719e-2, 6.370440e-6,
1.968564e00, 1.524020e-2, -2.517152e-6,
1.282417e00, 8.703393e-3, 2.289292e-5,
2.280820e00, 1.918010e-2, 4.484520e-6,
4.833473e-2, 1.641773e-4, -4.654200e-7,
5.589232e-2, -3.455092e-4, -7.388560e-7,
4.634443e-2, -2.658234e-5, 7.757000e-8,
8.997041e-3, 6.329728e-6, -1.939256e-9,
2.284178e-2, -9.941590e-5, 6.787400e-8,
4.350267e-2, -6.839749e-5, -2.714956e-7,
1.348204e-2, 1.091504e-5, 6.903760e-7,
3.106570e-2, -1.665665e-4, -1.590188e-7]).reshape(17, 3)
# Constants of the arguments of the short-period perturbations.
dcargs = np.array([
5.0974222e0, -7.8604195454652e2,
3.9584962e0, -5.7533848094674e2,
1.6338070e0, -1.1506769618935e3,
2.5487111e0, -3.9302097727326e2,
4.9255514e0, -5.8849265665348e2,
1.3363463e0, -5.5076098609303e2,
1.6072053e0, -5.2237501616674e2,
1.3629480e0, -1.1790629318198e3,
5.5657014e0, -1.0977134971135e3,
5.0708205e0, -1.5774000881978e2,
3.9318944e0, 5.2963464780000e1,
4.8989497e0, 3.9809289073258e1,
1.3097446e0, 7.7540959633708e1,
3.5147141e0, 7.9618578146517e1,
3.5413158e0, -5.4868336758022e2]).reshape(15, 2)
# Amplitudes ccamps(n,k) of the short-period perturbations.
ccamps = np.array([
-2.279594e-5, 1.407414e-5, 8.273188e-6, 1.340565e-5, -2.490817e-7,
-3.494537e-5, 2.860401e-7, 1.289448e-7, 1.627237e-5, -1.823138e-7,
6.593466e-7, 1.322572e-5, 9.258695e-6, -4.674248e-7, -3.646275e-7,
1.140767e-5, -2.049792e-5, -4.747930e-6, -2.638763e-6, -1.245408e-7,
9.516893e-6, -2.748894e-6, -1.319381e-6, -4.549908e-6, -1.864821e-7,
7.310990e-6, -1.924710e-6, -8.772849e-7, -3.334143e-6, -1.745256e-7,
-2.603449e-6, 7.359472e-6, 3.168357e-6, 1.119056e-6, -1.655307e-7,
3.228859e-6, 1.308997e-7, 1.013137e-7, 2.403899e-6, -3.736225e-7,
3.442177e-7, 2.671323e-6, 1.832858e-6, -2.394688e-7, -3.478444e-7,
8.702406e-6, -8.421214e-6, -1.372341e-6, -1.455234e-6, -4.998479e-8,
-1.488378e-6, -1.251789e-5, 5.226868e-7, -2.049301e-7, 0,
-8.043059e-6, -2.991300e-6, 1.473654e-7, -3.154542e-7, 0,
3.699128e-6, -3.316126e-6, 2.901257e-7, 3.407826e-7, 0,
2.550120e-6, -1.241123e-6, 9.901116e-8, 2.210482e-7, 0,
-6.351059e-7, 2.341650e-6, 1.061492e-6, 2.878231e-7, 0]).reshape(15, 5)
# Constants csec3 and ccsec(n,k) of the secular perturbations in longitude.
ccsec3 = -7.757020e-8
ccsec = np.array([
1.289600e-6, 5.550147e-1, 2.076942e00,
3.102810e-5, 4.035027e00, 3.525565e-1,
9.124190e-6, 9.990265e-1, 2.622706e00,
9.793240e-7, 5.508259e00, 1.559103e01]).reshape(4, 3)
# Sidereal rates.
dcsld = 1.990987e-7 #sidereal rate in longitude
ccsgd = 1.990969e-7 #sidereal rate in mean anomaly
# Constants used in the calculation of the lunar contribution.
cckm = 3.122140e-5
ccmld = 2.661699e-6
ccfdi = 2.399485e-7
# Constants dcargm(i,k) of the arguments of the perturbations of the motion
# of the moon.
dcargm = np.array([5.1679830e0, 8.3286911095275e3, 5.4913150e0,
-7.2140632838100e3, 5.9598530e0, 1.5542754389685e4]).reshape(3, 2)
# Amplitudes ccampm(n,k) of the perturbations of the moon.
ccampm = np.array([
1.097594e-1, 2.896773e-7, 5.450474e-2, 1.438491e-7,
-2.223581e-2, 5.083103e-8, 1.002548e-2, -2.291823e-8,
1.148966e-2, 5.658888e-8, 8.249439e-3, 4.063015e-8]).reshape(3, 4)
# ccpamv(k) = a*m*dl,dt (planets), dc1mme = 1-mass(earth+moon)
ccpamv = np.array([8.326827e-11, 1.843484e-11, 1.988712e-12, 1.881276e-12])
dc1mme = 0.99999696e0
# Time arguments.
dt = (dje - dcto) / dcjul
tvec = np.array([1e0, dt, dt * dt])
# Values of all elements for the instant(aneous?) dje.
temp = np.dot(tvec.T, dcfel.T).T % (2 * np.pi)
dml = temp[0]
forbel = temp[1:8]
g = forbel[0]
deps = (tvec * dceps).sum() % (2 * np.pi)
sorbel = np.dot(tvec.T, ccsel.T).T % (2 * np.pi)
e = sorbel[0]
# Secular perturbations in longitude.
sn = np.sin(np.dot(tvec[0:2].T, ccsec[:, 1:3].T).T % (2 * np.pi))
# Periodic perturbations of the Earth-Moon barycenter.
pertl = (ccsec[:,0] * sn).sum() + dt * ccsec3 * sn[2]
pertld, pertr, pertrd = 0, 0, 0
for k in range(0, 15):
a = (dcargs[k,0] + dt * dcargs[k,1]) % 2 * np.pi
cosa, sina = np.cos(a), np.sin(a)
pertl += ccamps[k,0] * cosa + ccamps[k,1] * sina
pertr += ccamps[k,2] * cosa + ccamps[k,3] * sina
if k < 11:
pertld += (ccamps[k,1] * cosa - ccamps[k,0] * sina) * ccamps[k,4]
pertrd += (ccamps[k,3] * cosa - ccamps[k,2] * sina) * ccamps[k,4]
# Elliptic part of the motion of the Earth-Moon barycenter.
phi = (e * e / 4e0) * (((8e0 / e) - e) * np.sin(g) + 5 * np.sin(2 * g) \
+ (13 / 3.) * e * np.sin(3 * g))
f = g + phi
sinf, cosf = np.sin(f), np.cos(f)
dpsi = (dc1 - e * e) / (dc1 + e * cosf)
phid = 2 * e * ccsgd * ((1 + 1.5 * e**2) * cosf + e * (1.25 - 0.5 * sinf**2))
psid = ccsgd * e * sinf / np.sqrt(dc1 - e * e)
# Perturbed heliocentric motion of the Earth-Moon barycenter.
d1pdro = dc1 + pertr
drd = d1pdro * (psid + dpsi * pertrd)
drld = d1pdro * dpsi * (dcsld + phid + pertld)
dtl = (dml + phi + pertl) % (2 * np.pi)
dsinls = np.sin(dtl)
dcosls = np.cos(dtl)
dxhd = drd * dcosls - drld * dsinls
dyhd = drd * dsinls + drld * dcosls
# Influence of eccentricity, evection and variation on the geocentric
# motion of the moon.
pertl, pertld, pertp, pertpd = 0, 0, 0, 0
for k in range(0, 3):
a = (dcargm[k,0] + dt * dcargm[k,1]) % (2 * np.pi)
sina = np.sin(a)
cosa = np.cos(a)
pertl += ccampm[k,0] * sina
pertld += ccampm[k,1] * cosa
pertp += ccampm[k,2] * cosa
pertpd -= ccampm[k,3] * sina
# Heliocentric motion of the Earth.
tl = forbel[1] + pertl
sinlm = np.sin(tl)
coslm = np.cos(tl)
sigma = cckm / (1.0 + pertp)
a = sigma * (ccmld + pertld)
b = sigma * pertpd
dxhd = dxhd + a * sinlm + b * coslm
dyhd = dyhd - a * coslm + b * sinlm
dzhd = -sigma * ccfdi * np.cos(forbel[2])
# Barycentric motion of the Earth.
dxbd = dxhd * dc1mme
dybd = dyhd * dc1mme
dzbd = dzhd * dc1mme
for k in range(0, 4):
plon = forbel[k + 3]
pomg = sorbel[k + 1]
pecc = sorbel[k + 9]
tl = (plon + 2.0 * pecc * np.sin(plon - pomg)) % (2 * np.pi)
dxbd += ccpamv[k] * (np.sin(tl) + pecc * np.sin(pomg))
dybd -= ccpamv[k] * (np.cos(tl) + pecc * np.cos(pomg))
dzbd -= ccpamv[k] * sorbel[k + 13] * np.cos(plon - sorbel[k + 5])
# Transition to mean equator of date.
dcosep = np.cos(deps)
dsinep = np.sin(deps)
dyahd = dcosep * dyhd - dsinep * dzhd
dzahd = dsinep * dyhd + dcosep * dzhd
dyabd = dcosep * dybd - dsinep * dzbd
dzabd = dsinep * dybd + dcosep * dzbd
dvelh = constants.au * (np.array([dxhd, dyahd, dzahd])) / u.second
dvelb = constants.au * (np.array([dxbd, dyabd, dzabd])) / u.second
return (dvelh, dvelb)
# NOTE:
# We may want to change the syntax input for corrections so that it accepts a single
# sky coordinate instead of ra/dec.
# Similarly lon/lat/alt/jd could be replaced with a single astropy.units.Time
# class.
def corrections(lon, lat, alt, ra, dec, mjd):
"""
Calculate the heliocentric radial velocity corrections for an astronomical
source.
Parameters
----------
lon : `~astropy.coordinates.Longitude` or float
Earth longitude of the observatory (western direction is positive). Can
be anything that initialises an `~astropy.coordinates.Angle` object
(if float, in degrees).
lat : `~astropy.coordinates.Latitude` or float
Earth latitude of observatory. Can be anything that initialises an
`~astropy.coordinates.Latitude` object (if float, in degrees).
alt : `~astropy.units.Quantity` or float
Altitude of the observatory (if float, in meters).
ra : `~astropy.coordinates.Angle` or float
Right ascension of the object for epoch J2000 (if float, in degrees).
dec : `~astropy.coordinates.Angle` or float
Declination of the object for epoch J2000 (if float, in degrees).
mjd : float
The modified Julian date for the middle of exposure.
Returns
-------
barycorr : `~astropy.units.Quantity`
The barycentric velocity correction.
helcorr : `~astropy.units.Quantity`
The heliocentric velocity correction.
"""
if not isinstance(lon, coord.Longitude):
lon = coord.Longitude(lon * u.deg)
if not isinstance(lat, coord.Latitude):
lat = coord.Latitude(lat * u.deg)
if not isinstance(alt, u.Quantity):
alt *= u.m
if not isinstance(ra, u.Quantity):
ra *= u.deg
if not isinstance(dec, u.Quantity):
dec *= u.deg
# Here we specify the location so that we can easily calculate the mean
# local siderial time later on
time = Time(2.4e6 + mjd, format="jd", location=(lon, lat, alt))
epoch = time.datetime.year + time.datetime.month/12. \
+ time.datetime.day/365.
# Precess the coordinates to the current epoch
coordinate = coord.SkyCoord(ra, dec, frame="fk5").transform_to(coord.FK5(equinox="J%s" % (epoch)))
# Convert geodetic latitude into geocentric latitude to correct for rotation
# of the Earth
dlat = ((-11. * 60. + 32.743) * np.sin(2 * lat) + 1.1633 * np.sin(4 * lat) \
- 0.0026 * np.sin(6 * lat)) * u.degree
geocentric_lat = lat + dlat / 3600.
# Calculate distance of observer from Earth center
r = alt + 6378160.0 * u.m * (0.998327073 \
+ 0.001676438 * np.cos(2 * geocentric_lat) \
- 0.000003510 * np.cos(4 * geocentric_lat) \
+ 0.000000008 * np.cos(6 * geocentric_lat))
# Calculate rotational velocity perpendicular to the radius vector
# Note: 23.934469591229 is the siderial day in hours for 1986
v = 2 * np.pi * r / (23.934469591229 * 3600 * u.second)
# Calculate vdiurnal velocity
time.delta_ut1_utc = 0#we get error otherwise. No big dela for this application
vdiurnal = v * np.cos(lat) * np.cos(coordinate.dec) \
* np.sin(coordinate.ra - time.sidereal_time("mean"))
# Calculate baricentric and heliocentric velocities
vh, vb = baryvel(time)
# Project along the line of sight
projection = np.array([
np.cos(coordinate.dec) * np.cos(coordinate.ra),
np.cos(coordinate.dec) * np.sin(coordinate.ra),
np.sin(coordinate.dec)])
vbar = (vb * projection).sum()
vhel = (vh * projection).sum()
# Using baricentric velocity for correction
vbar_correction = vdiurnal + vbar
vhel_correction = vdiurnal + vhel
# [TODO] it may be useful to return other components of velocity or extra
# information about the transforms (e.g., gmst, ut, lmst, dlat, lat, vbar,
# vhel, etc)
return (vbar_correction, vhel_correction)
def sol_corrections(ra,de,header):
alt_obs = 1164
lat_obs = -31.2769
long_obs = 149.0658
ut_start = Time("{0}T{1}".format(header["UTDATE"].replace(":", "-"),
header["UTSTART"]), format="isot", scale="utc")
ut_end = Time("{0}T{1}".format(header["UTDATE"].replace(":", "-"),
header["UTEND"]), format="isot", scale="utc")
# Get the MJD of the mid-point of the observation.
mjd = (ut_end - ut_start).jd/2 + ut_start.mjd
# Calculate the correction.
return corrections(long_obs, lat_obs, alt_obs, ra, de, mjd)
def find_file(dirname,fitsfile, ccd, motherfolder):
"""
will find a path to the file given the default galah folder structure
"""
folders=["commissioning/comm1/", "commissioning/comm2/", "commissioning/comm3/", "pilot/", "survey/"]
for i in folders:
composed_path=motherfolder+i+str(dirname)+"/data/ccd_"+str(ccd)+"/"+str(fitsfile)
ifpath=path.isfile(composed_path)
if ifpath: return composed_path
return None
def find_galah_id(name):
"""
returns galah_id if it appears in the name
"""
name=name.split("_")
if len(name)>1 and name[0]=='galahic': return int(name[1])
else: return 'NULL'
def zenith_distance(utdate, utstart, ra, de):
obs = ephem.Observer()
obs.lon = '149.0658'
obs.lat = '-31.2769'
obs.elevation = 1164
obs.date = "/".join(utdate.split(":"))+" "+utstart
star = ephem.FixedBody()
star._ra = np.radians(ra)
star._dec = np.radians(de)
star.compute(obs)
#print ra,de,star.ra,star.dec,np.degrees(star.alt),ephem.degrees(star.alt)
return 90.0-np.degrees(star.alt)
def do_one_file(dirname,fitsfile,run,ccd,plate,ndfclass_updated, cob_id, runccd_id,run_id, motherfolder,n,N):
print n,"/",N,
print " + Creating table for file", str(dirname)+"/"+fitsfile
out_storage=[]
i=(dirname,fitsfile, run,ccd,plate,ndfclass_updated, cob_id, runccd_id,run_id)
path=find_file(i[0],i[1], i[3], motherfolder)
if path==None:
print " - No file found for %s/%s" % (i[0], i[1])
hdulist = pyfits.open(path)
fdata=hdulist["STRUCT.MORE.FIBRES"].data
name= fdata["NAME"]
ra= fdata["RA"]
de= fdata["DEC"]
x= fdata["X"]
y= fdata["Y"]
xe= fdata["XERR"]
ye= fdata["YERR"]
tp= fdata["TYPE"]
pivot= fdata["PIVOT"]
mag= fdata["MAGNITUDE"]
comment= fdata["COMMENT"]
name= fdata["NAME"]
mag=fdata["MAGNITUDE"]
theta=fdata["THETA"]
pmra=fdata["PMRA"]
pmdec=fdata["PMDEC"]
pid=fdata["PID"]
retractor=fdata["RETRACTOR"]
wlen=fdata["WLEN"]
hdulist.close()
h0=pyfits.getheader(path,0)
utdate=h0["UTDATE"]
utstart=h0["UTSTART"]
zenithh=h0["ZDSTART"]
for j in range(1,401):
#change coordinates into degrees
ra_ins=np.degrees(ra[j-1])
de_ins=np.degrees(de[j-1])
#fix observations type
tp_ins=tp[j-1]
if tp_ins=='.': tp_ins='P'
#calculate airmass
zenith=zenith_distance(utdate,utstart,ra_ins,de_ins)
airmass=1.0/np.cos(np.radians(float(zenith)))
#calculate barycentric velocity
barycentric, heliocentric=sol_corrections(ra_ins, de_ins, h0)
barycentric=barycentric.to("km/s").value
heliocentric=heliocentric.to("km/s").value
galahic=find_galah_id(name[j-1])
outname=((i[0]*10000+i[2])*100000+j)*10+i[3]#[date][run][algo][pivot][ccd]
#print i[0],i[1],i[2],i[3],i[4],i[5],i[6],pivot[j-1],j,tp_ins,ra_ins,de_ins,x[j-1],y[j-1],xe[j-1],ye[j-1],name[j-1],comment[j-1],mag[j-1],galahic, outname, airmass, barycentric, heliocentric
out_storage.append((i[7],i[8],pivot[j-1],j,tp_ins,ra_ins,de_ins,x[j-1],y[j-1],xe[j-1],ye[j-1],theta[j-1],name[j-1],comment[j-1],mag[j-1],pmra[j-1],pmdec[j-1],round(pid[j-1],12),retractor[j-1],round(wlen[j-1],12),galahic, outname, airmass, barycentric, heliocentric))
#cur.execute("insert into objects values (%s, '%s', %s, %s, %s, '%s', %s, %s, %s, '%s', %s, %s, %s, %s, %s, %s, '%s', '%s', %s, %s, %s, %s, %s, %s)" % (i[0],i[1],i[2],i[3],i[4],i[5],i[6],pivot[j-1],j,tp_ins,ra_ins,de_ins,x[j-1],y[j-1],xe[j-1],ye[j-1],name[j-1],comment[j-1],mag[j-1],galahic, outname, airmass, barycentric, heliocentric))
#con.commit()
return out_storage
def iterFlatten(root):
if isinstance(root, (list)):
for element in root:
for e in iterFlatten(element):
yield e
else:
yield root
def multi_run_wrapper(args):
return do_one_file(*args)
def main(dbname, user, motherfolder, add, night, ncpu):
con=mdb.connect("dbname=%s user=%s" % (dbname, user))
cur = con.cursor()
#check if table exists
try:
cur.execute("select * from objects limit 3")
if len(cur.fetchall())>0: ifexists=True
else: ifexists=False
except:
con.close()
con=mdb.connect("dbname=%s user=%s" % (dbname, user))
cur = con.cursor()
ifexists=False
#create a table
if (add==False and night==None) or ifexists==False:
cur.execute("DROP TABLE IF EXISTS objects")# be careful! will delete the previous table
cur.execute("CREATE TABLE objects(runccd_id bigint)")
cur.execute("ALTER TABLE objects ADD run_id bigint")
cur.execute("ALTER TABLE objects ADD pivot smallint")
cur.execute("ALTER TABLE objects ADD fibre smallint")
cur.execute("ALTER TABLE objects ADD type varchar(1)")
cur.execute("ALTER TABLE objects ADD ra float8")
cur.execute("ALTER TABLE objects ADD dec float8")
cur.execute("ALTER TABLE objects ADD x float8")
cur.execute("ALTER TABLE objects ADD y float8")
cur.execute("ALTER TABLE objects ADD xerr float8")
cur.execute("ALTER TABLE objects ADD yerr float8")
cur.execute("ALTER TABLE objects ADD theta float8")
cur.execute("ALTER TABLE objects ADD name varchar(256)")
cur.execute("ALTER TABLE objects ADD comment varchar(256)")
cur.execute("ALTER TABLE objects ADD mag float8")
cur.execute("ALTER TABLE objects ADD pmra float8")
cur.execute("ALTER TABLE objects ADD pmdec float8")
cur.execute("ALTER TABLE objects ADD pid int")
cur.execute("ALTER TABLE objects ADD retractor int")
cur.execute("ALTER TABLE objects ADD wlen float8")
cur.execute("ALTER TABLE objects ADD galah_id bigint")#if exsists
cur.execute("ALTER TABLE objects ADD out_name bigint")#proposed output name of individual spectra. Reduction pipeline can decide for which spectra to create the actual output. Arcs, parked fibres and so on can be omitted.
cur.execute("ALTER TABLE objects ADD airmass float8")
cur.execute("ALTER TABLE objects ADD barycentric float8")
cur.execute("ALTER TABLE objects ADD heliocentric float8")
elif add==False and night!=None:
cur.execute("delete from objects where run_id/1000=%s" % (night))
else:
pass
#a folder in which all the observations are. Let us call it the motherfolder
if motherfolder[-1]!="/": motherfolder=motherfolder+"/"
#find the files to process
if night==None and add==False:#default
cur.execute("select dirname, fitsfile, run, ccd, plate, ndfclass_updated, cob_id, runccd_id, run_id from observations where qflag & 64 = 0 and obstatus>0 order by dirname,run,ccd")
files=cur.fetchall()
elif add==True:#add missing
cur.execute("select dirname, fitsfile, run, ccd, plate, ndfclass_updated, cob_id, runccd_id, run_id from observations where qflag & 64 = 0 and obstatus>0 and (dirname,fitsfile) not in (select distinct(run_id/1000),fitsfile from objects) order by dirname,run,ccd")
files=cur.fetchall()
else:#for one night only
cur.execute("select dirname, fitsfile, run, ccd, plate, ndfclass_updated, cob_id,runccd_id, run_id from observations where qflag & 64 = 0 and obstatus>0 and dirname=%s order by dirname,run,ccd" % (night))
files=cur.fetchall()
print files
#invoke multyprocessing
N_CPU=int(ncpu)
pool=multiprocessing.Pool(N_CPU)
arg=[]
n=0
N=len(files)
for i in files:
n+=1
arg.append(i+(motherfolder,n, N))
result=pool.imap(multi_run_wrapper, arg)
result= [item for sublist in result for item in sublist]
for i in result:
aa=[]
for nn,j in enumerate(i):
if j<0.000000000000000000000000000000000000001: aa.append(0.0)# cheat if number is too small for double precision
else: aa.append(j)
cur.execute("insert into objects values (%s, %s, %s, %s, '%s', %s, %s, %s, %s, %s, %s, %s, '%s', '%s', %s, %s, %s, %s, %s, %s, %s, %s, %s, %s, %s)" % tuple(aa))
con.commit()
if __name__=="__main__":
#computer/user specific:
dbname='hermes_master'#name of the postgresql database. Must be created beforehand
user='jkos'#username for the database.
#parse the arguments
parser = argparse.ArgumentParser()
parser.add_argument("motherfolder", help="path to the folder where galah data is")
parser.add_argument("--add", help="add to database rather than rewrite the whole database", action="store_true")
parser.add_argument("--one_night", help="add to database only one night of data. Use yymmdd to specify the night")
parser.add_argument("--n_cpu", help="specify how many cpus you want to use")
args = parser.parse_args()
main(dbname, user, args.motherfolder,args.add,args.one_night,args.n_cpu)
system("pg_dump -O %s -t objects > objects.sql" % (dbname))
system("psql %s -c \"COPY (SELECT * from objects order by runccd_id,fibre) TO stdout DELIMITER ',' CSV HEADER\" > objects.csv" % (dbname))
|
|
# -*- coding: utf-8 -*-
from plivo.utils import to_param_dict
from plivo.utils.validators import *
from ..base import ListResponseObject, PlivoResource, PlivoResourceInterface
class Identity(PlivoResource):
_name = 'Identity'
_identifier_string = 'id'
def update(self,
country_iso=None,
salutation=None,
first_name=None,
last_name=None,
birth_place=None,
birth_date=None,
nationality=None,
id_nationality=None,
id_issue_date=None,
id_type=None,
id_number=None,
address_line1=None,
address_line2=None,
city=None,
region=None,
postal_code=None,
alias=None,
business_name=None,
fiscal_identification_code=None,
street_code=None,
municipal_code=None,
subaccount=None,
file_to_upload=None,
auto_correct_address=None,
callback_url=None):
return self.client.identities.update(
self.id, country_iso, salutation, first_name, last_name,
birth_place, birth_date, nationality, id_nationality,
id_issue_date, id_type, id_number, alias, business_name,
subaccount, file_to_upload, callback_url)
def delete(self):
return self.client.identities.delete(self.id)
class Identities(PlivoResourceInterface):
_resource_type = Identity
@validate_args(
country_iso=[of_type(six.text_type)],
salutation=[all_of(of_type(six.text_type), is_in(('Mr', 'Ms')))],
first_name=[of_type(six.text_type)],
last_name=[of_type(six.text_type)],
birth_place=[of_type(six.text_type)],
birth_date=[of_type(six.text_type)],
nationality=[of_type(six.text_type)],
id_nationality=[of_type(six.text_type)],
id_issue_date=[of_type(six.text_type)],
id_type=[of_type(six.text_type)],
id_number=[of_type(six.text_type)],
alias=[optional(of_type(six.text_type))],
business_name=[optional(of_type(six.text_type))],
subaccount=[optional(is_subaccount())],
file_to_upload=[optional(of_type(six.text_type))])
def create(self,
country_iso,
salutation,
first_name,
last_name,
birth_place,
birth_date,
nationality,
id_nationality,
id_issue_date,
id_type,
id_number,
address_line1,
address_line2,
city,
region,
postal_code,
alias=None,
business_name=None,
fiscal_identification_code=None,
street_code=None,
municipal_code=None,
subaccount=None,
file_to_upload=None,
auto_correct_address=None,
callback_url=None):
if file_to_upload:
file_extension = file_to_upload.strip().split('.')[-1].lower()
if file_extension not in ['jpg', 'jpeg', 'png', 'pdf']:
raise ValidationError(
'File format of the file to be uploaded should be one of JPG, JPEG, PNG or PDF'
)
content_types = {
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'pdf': 'application/pdf',
}
import os
files = {
'file': (file_to_upload.split(os.sep)[-1], open(
file_to_upload, 'rb'), content_types[file_extension])
}
else:
files = {'file': ''}
data_to_send = to_param_dict(self.create, locals())
return self.client.request(
'POST', ('Verification', 'Identity'), data_to_send, files=files)
@validate_args(
identity_id=[of_type(six.text_type)],
country_iso=[of_type(six.text_type)],
salutation=[all_of(of_type(six.text_type), is_in(('Mr', 'Ms')))],
first_name=[of_type(six.text_type)],
last_name=[of_type(six.text_type)],
birth_place=[of_type(six.text_type)],
birth_date=[of_type(six.text_type)],
nationality=[of_type(six.text_type)],
id_nationality=[of_type(six.text_type)],
id_issue_date=[of_type(six.text_type)],
id_type=[of_type(six.text_type)],
id_number=[of_type(six.text_type)],
alias=[optional(of_type(six.text_type))],
business_name=[optional(of_type(six.text_type))],
subaccount=[optional(is_subaccount())],
file_to_upload=[optional(of_type(six.text_type))])
def update(self,
identity_id,
country_iso=None,
salutation=None,
first_name=None,
last_name=None,
birth_place=None,
birth_date=None,
nationality=None,
id_nationality=None,
id_issue_date=None,
id_type=None,
id_number=None,
address_line1=None,
address_line2=None,
city=None,
region=None,
postal_code=None,
alias=None,
business_name=None,
fiscal_identification_code=None,
street_code=None,
municipal_code=None,
subaccount=None,
file_to_upload=None,
auto_correct_address=None,
callback_url=None):
if file_to_upload:
file_extension = file_to_upload.strip().split('.')[-1].lower()
if file_extension not in ['jpg', 'jpeg', 'png', 'pdf']:
raise ValidationError(
'File format of the file to be uploaded should be one of JPG, JPEG, PNG or PDF'
)
content_types = {
'jpeg': 'image/jpeg',
'jpg': 'image/jpeg',
'png': 'image/png',
'pdf': 'application/pdf',
}
import os
files = {
'file': (file_to_upload.split(os.sep)[-1], open(
file_to_upload, 'rb'), content_types[file_extension])
}
else:
files = {'file': ''}
data_to_send = to_param_dict(self.create, locals())
return self.client.request(
'POST', ('Verification', 'Identity', identity_id),
data_to_send,
files=files)
@validate_args(identity_id=[of_type(six.text_type)])
def get(self, identity_id):
return self.client.request(
'GET', ('Verification', 'Identity', identity_id),
response_type=Identity)
@validate_args(
country_iso=[optional(of_type(six.text_type))],
customer_name=[optional(of_type(six.text_type))],
alias=[optional(of_type(six.text_type))],
verification_status=[
optional(
all_of(
of_type(six.text_type),
is_in(('pending', 'accepted', 'rejected', ))))
],
limit=[
optional(
all_of(
of_type(*six.integer_types),
check(lambda limit: 0 < limit <= 20, '0 < limit <= 20')))
],
offset=[
optional(
all_of(
of_type(*six.integer_types),
check(lambda offset: 0 <= offset, '0 <= offset')))
])
def list(self,
country_iso=None,
customer_name=None,
alias=None,
verification_status=None,
limit=20,
offset=0):
return self.client.request(
'GET', ('Verification', 'Identity', ),
to_param_dict(self.list, locals()),
response_type=ListResponseObject,
objects_type=Identity)
@validate_args(identity_id=[of_type(six.text_type)])
def delete(self, identity_id):
return self.client.request('DELETE', ('Verification', 'Identity',
identity_id))
|
|
# -*- coding: utf-8 -*-
"""This file contains a appfirewall.log (Mac OS X Firewall) parser."""
import datetime
import logging
import pyparsing
from plaso.events import time_events
from plaso.lib import eventdata
from plaso.lib import timelib
from plaso.parsers import manager
from plaso.parsers import text_parser
__author__ = 'Joaquin Moreno Garijo ([email protected])'
class MacAppFirewallLogEvent(time_events.TimestampEvent):
"""Convenience class for a Mac Wifi log line event."""
DATA_TYPE = u'mac:asl:appfirewall:line'
def __init__(self, timestamp, structure, process_name, action):
"""Initializes the event object.
Args:
timestamp: The timestamp time value, epoch.
structure: structure with the parse fields.
computer_name: string with the name of the computer.
agent: string with the agent that save the log.
status: string with the saved status action.
process_name: string name of the entity that tried do the action.
action: string with the action
"""
super(MacAppFirewallLogEvent, self).__init__(
timestamp, eventdata.EventTimestamp.ADDED_TIME)
self.timestamp = timestamp
self.computer_name = structure.computer_name
self.agent = structure.agent
self.status = structure.status
self.process_name = process_name
self.action = action
class MacAppFirewallParser(text_parser.PyparsingSingleLineTextParser):
"""Parse text based on appfirewall.log file."""
NAME = u'mac_appfirewall_log'
DESCRIPTION = u'Parser for appfirewall.log files.'
ENCODING = u'utf-8'
# Regular expressions for known actions.
# Define how a log line should look like.
# Example: 'Nov 2 04:07:35 DarkTemplar-2.local socketfilterfw[112] '
# '<Info>: Dropbox: Allow (in:0 out:2)'
# INFO: process_name is going to have a white space at the beginning.
FIREWALL_LINE = (
text_parser.PyparsingConstants.MONTH.setResultsName(u'month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName(u'day') +
text_parser.PyparsingConstants.TIME.setResultsName(u'time') +
pyparsing.Word(pyparsing.printables).setResultsName(u'computer_name') +
pyparsing.Word(pyparsing.printables).setResultsName(u'agent') +
pyparsing.Literal(u'<').suppress() +
pyparsing.CharsNotIn(u'>').setResultsName(u'status') +
pyparsing.Literal(u'>:').suppress() +
pyparsing.CharsNotIn(u':').setResultsName(u'process_name') +
pyparsing.Literal(u':') +
pyparsing.SkipTo(pyparsing.lineEnd).setResultsName(u'action'))
# Repeated line.
# Example: Nov 29 22:18:29 --- last message repeated 1 time ---
REPEATED_LINE = (
text_parser.PyparsingConstants.MONTH.setResultsName(u'month') +
text_parser.PyparsingConstants.ONE_OR_TWO_DIGITS.setResultsName(u'day') +
text_parser.PyparsingConstants.TIME.setResultsName(u'time') +
pyparsing.Literal(u'---').suppress() +
pyparsing.CharsNotIn(u'---').setResultsName(u'process_name') +
pyparsing.Literal(u'---').suppress())
# Define the available log line structures.
LINE_STRUCTURES = [
(u'logline', FIREWALL_LINE),
(u'repeated', REPEATED_LINE)]
def __init__(self):
"""Initializes a parser object."""
super(MacAppFirewallParser, self).__init__()
self._year_use = 0
self._last_month = None
self.previous_structure = None
def VerifyStructure(self, parser_mediator, line):
"""Verify that this file is a Mac AppFirewall log file.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
line: A single line from the text file.
Returns:
True if this is the correct parser, False otherwise.
"""
try:
line = self.FIREWALL_LINE.parseString(line)
except pyparsing.ParseException:
logging.debug(u'Not a Mac AppFirewall log file')
return False
if (line.action != u'creating /var/log/appfirewall.log' or
line.status != u'Error'):
return False
return True
def ParseRecord(self, parser_mediator, key, structure):
"""Parses each record structure and return an event object if applicable.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
key: An identification string indicating the name of the parsed
structure.
structure: A pyparsing.ParseResults object from a line in the
log file.
Returns:
An event object (instance of EventObject) or None.
"""
if key in [u'logline', u'repeated']:
return self._ParseLogLine(parser_mediator, structure, key)
else:
logging.warning(
u'Unable to parse record, unknown structure: {0:s}'.format(key))
def _ParseLogLine(self, parser_mediator, structure, key):
"""Parse a logline and store appropriate attributes.
Args:
parser_mediator: A parser mediator object (instance of ParserMediator).
structure: log line of structure.
key: type of line log (normal or repeated).
Returns:
Return an object MacAppFirewallLogEvent.
"""
# TODO: improve this to get a valid year.
if not self._year_use:
self._year_use = parser_mediator.year
if not self._year_use:
# Get from the creation time of the file.
self._year_use = self._GetYear(
self.file_entry.GetStat(), parser_mediator.timezone)
# If fail, get from the current time.
if not self._year_use:
self._year_use = timelib.GetCurrentYear()
# Gap detected between years.
month = timelib.MONTH_DICT.get(structure.month.lower())
if not self._last_month:
self._last_month = month
if month < self._last_month:
self._year_use += 1
timestamp = self._GetTimestamp(
structure.day,
month,
self._year_use,
structure.time)
if not timestamp:
logging.debug(u'Invalid timestamp {0:s}'.format(structure.timestamp))
return
self._last_month = month
# If the actual entry is a repeated entry, we take the basic information
# from the previous entry, but using the timestmap from the actual entry.
if key == u'logline':
self.previous_structure = structure
else:
structure = self.previous_structure
# Pyparsing reads in RAW, but the text is in UTF8.
try:
action = structure.action.decode(u'utf-8')
except UnicodeDecodeError:
logging.warning(
u'Decode UTF8 failed, the message string may be cut short.')
action = structure.action.decode(u'utf-8', u'ignore')
# Due to the use of CharsNotIn pyparsing structure contains whitespaces
# that need to be removed.
process_name = structure.process_name.strip()
event_object = MacAppFirewallLogEvent(
timestamp, structure, process_name, action)
return event_object
def _GetTimestamp(self, day, month, year, time):
"""Gets a timestamp from a pyparsing ParseResults timestamp.
This is a timestamp_string as returned by using
text_parser.PyparsingConstants structures:
08, Nov, [20, 36, 37]
Args:
timestamp_string: The pyparsing ParseResults object
Returns:
day: An integer representing the day.
month: An integer representing the month.
year: An integer representing the year.
timestamp: A plaso timelib timestamp event or 0.
"""
try:
hour, minute, second = time
timestamp = timelib.Timestamp.FromTimeParts(
year, month, day, hour, minute, second)
except ValueError:
timestamp = 0
return timestamp
def _GetYear(self, stat, timezone):
"""Retrieves the year either from the input file or from the settings."""
time = getattr(stat, u'crtime', 0)
if not time:
time = getattr(stat, u'ctime', 0)
if not time:
logging.error(
u'Unable to determine correct year of log file, defaulting to '
u'current year.')
return timelib.GetCurrentYear()
try:
timestamp = datetime.datetime.fromtimestamp(time, timezone)
except ValueError as exception:
logging.error((
u'Unable to determine correct year of log file with error: {0:s}, '
u'defaulting to current year.').format(exception))
return timelib.GetCurrentYear()
return timestamp.year
manager.ParsersManager.RegisterParser(MacAppFirewallParser)
|
|
import copy
import warnings
import numpy
import six
from chainer import backend
from chainer.backends import cuda
from chainer import configuration
from chainer.dataset import convert
from chainer.dataset import iterator as iterator_module
from chainer import function
from chainer import iterators
from chainer import link
from chainer import reporter as reporter_module
from chainer.training import extension
class Evaluator(extension.Extension):
"""Trainer extension to evaluate models on a validation set.
This extension evaluates the current models by a given evaluation function.
It creates a :class:`~chainer.Reporter` object to store values observed in
the evaluation function on each iteration. The report for all iterations
are aggregated to :class:`~chainer.DictSummary`. The collected mean values
are further reported to the reporter object of the trainer, where the name
of each observation is prefixed by the evaluator name. See
:class:`~chainer.Reporter` for details in naming rules of the reports.
Evaluator has a structure to customize similar to that of
:class:`~chainer.training.updaters.StandardUpdater`.
The main differences are:
- There are no optimizers in an evaluator. Instead, it holds links
to evaluate.
- An evaluation loop function is used instead of an update function.
- Preparation routine can be customized, which is called before each
evaluation. It can be used, e.g., to initialize the state of stateful
recurrent networks.
There are two ways to modify the evaluation behavior besides setting a
custom evaluation function. One is by setting a custom evaluation loop via
the ``eval_func`` argument. The other is by inheriting this class and
overriding the :meth:`evaluate` method. In latter case, users have to
create and handle a reporter object manually. Users also have to copy the
iterators before using them, in order to reuse them at the next time of
evaluation. In both cases, the functions are called in testing mode
(i.e., ``chainer.config.train`` is set to ``False``).
This extension is called at the end of each epoch by default.
Args:
iterator: Dataset iterator for the validation dataset. It can also be
a dictionary of iterators. If this is just an iterator, the
iterator is registered by the name ``'main'``.
target: Link object or a dictionary of links to evaluate. If this is
just a link object, the link is registered by the name ``'main'``.
converter: Converter function to build input arrays.
:func:`~chainer.dataset.concat_examples` is used by default.
device: Device to which the validation data is sent. Negative value
indicates the host memory (CPU).
eval_hook: Function to prepare for each evaluation process. It is
called at the beginning of the evaluation. The evaluator extension
object is passed at each call.
eval_func: Evaluation function called at each iteration. The target
link to evaluate as a callable is used by default.
Attributes:
converter: Converter function.
device: Device to which the validation data is sent.
eval_hook: Function to prepare for each evaluation process.
eval_func: Evaluation function called at each iteration.
"""
trigger = 1, 'epoch'
default_name = 'validation'
priority = extension.PRIORITY_WRITER
name = None
def __init__(self, iterator, target, converter=convert.concat_examples,
device=None, eval_hook=None, eval_func=None):
if device is not None:
device = backend._get_device_compat(device)
if isinstance(iterator, iterator_module.Iterator):
iterator = {'main': iterator}
self._iterators = iterator
if isinstance(target, link.Link):
target = {'main': target}
self._targets = target
self.converter = converter
self.device = device
self.eval_hook = eval_hook
self.eval_func = eval_func
for key, iter in six.iteritems(iterator):
if (isinstance(iter, (iterators.SerialIterator,
iterators.MultiprocessIterator,
iterators.MultithreadIterator)) and
getattr(iter, 'repeat', False)):
msg = 'The `repeat` property of the iterator {} '
'is set to `True`. Typically, the evaluator sweeps '
'over iterators until they stop, '
'but as the property being `True`, this iterator '
'might not stop and evaluation could go into '
'an infinite loop.'
'We recommend to check the configuration '
'of iterators'.format(key)
warnings.warn(msg)
def get_iterator(self, name):
"""Returns the iterator of the given name."""
return self._iterators[name]
def get_all_iterators(self):
"""Returns a dictionary of all iterators."""
return dict(self._iterators)
def get_target(self, name):
"""Returns the target link of the given name."""
return self._targets[name]
def get_all_targets(self):
"""Returns a dictionary of all target links."""
return dict(self._targets)
def __call__(self, trainer=None):
"""Executes the evaluator extension.
Unlike usual extensions, this extension can be executed without passing
a trainer object. This extension reports the performance on validation
dataset using the :func:`~chainer.report` function. Thus, users can use
this extension independently from any trainer by manually configuring
a :class:`~chainer.Reporter` object.
Args:
trainer (~chainer.training.Trainer): Trainer object that invokes
this extension. It can be omitted in case of calling this
extension manually.
Returns:
dict: Result dictionary that contains mean statistics of values
reported by the evaluation function.
"""
# set up a reporter
reporter = reporter_module.Reporter()
if self.name is not None:
prefix = self.name + '/'
else:
prefix = ''
for name, target in six.iteritems(self._targets):
reporter.add_observer(prefix + name, target)
reporter.add_observers(prefix + name,
target.namedlinks(skipself=True))
with reporter:
with configuration.using_config('train', False):
result = self.evaluate()
reporter_module.report(result)
return result
def evaluate(self):
"""Evaluates the model and returns a result dictionary.
This method runs the evaluation loop over the validation dataset. It
accumulates the reported values to :class:`~chainer.DictSummary` and
returns a dictionary whose values are means computed by the summary.
Note that this function assumes that the main iterator raises
``StopIteration`` or code in the evaluation loop raises an exception.
So, if this assumption is not held, the function could be caught in
an infinite loop.
Users can override this method to customize the evaluation routine.
.. note::
This method encloses :attr:`eval_func` calls with
:func:`function.no_backprop_mode` context, so all calculations
using :class:`~chainer.FunctionNode`\\s inside
:attr:`eval_func` do not make computational graphs. It is for
reducing the memory consumption.
Returns:
dict: Result dictionary. This dictionary is further reported via
:func:`~chainer.report` without specifying any observer.
"""
iterator = self._iterators['main']
eval_func = self.eval_func or self._targets['main']
if self.eval_hook:
self.eval_hook(self)
if hasattr(iterator, 'reset'):
iterator.reset()
it = iterator
else:
it = copy.copy(iterator)
summary = reporter_module.DictSummary()
for batch in it:
observation = {}
with reporter_module.report_scope(observation):
in_arrays = self._call_converter(batch, self.device)
with function.no_backprop_mode():
if isinstance(in_arrays, tuple):
eval_func(*in_arrays)
elif isinstance(in_arrays, dict):
eval_func(**in_arrays)
else:
eval_func(in_arrays)
summary.add(observation)
return summary.compute_mean()
def _call_converter(self, batch, device):
# TODO(niboshi): This is a temporary workaround to keep backward
# compatibility about user-defined custom converters. Existing
# converters expect int values as the `device` argument, so they
# can't handle ChainerX devices. We should either break backward
# compatibility at some time or introduce a sparate API.
converter = self.converter
if converter is convert.concat_examples:
return converter(batch, device)
else:
if device is None:
return converter(batch, None)
if device.xp is numpy:
return converter(batch, -1)
if device.xp is cuda.cupy:
return converter(batch, device.device.id)
raise NotImplementedError(
'Currently only `concat_examples` supports ChainerX.')
def finalize(self):
"""Finalizes the evaluator object.
This method calls the `finalize` method of each iterator that
this evaluator has.
It is called at the end of training loops.
"""
for iterator in six.itervalues(self._iterators):
iterator.finalize()
|
|
from collections import deque
import os
import pickle
import time
import numpy as np
import cv2
import tensorflow as tf
class LanderState(object):
def __init__(self, l_pos, l_v, base_pos, fuel):
self.l_pos = l_pos
self.l_v = l_v
self.base_pos = base_pos
self.fuel = fuel
def _to_pix(pos, size):
return np.round(np.asarray(pos) * size).astype(np.int)
def state_to_im(state, im):
size = im.shape[::-1]
im[:] = 0
lander_size = [0.08, 0.18]
base_size = [0.28, 0.08]
pix_pos = _to_pix(state.l_pos, size)
pix_size = _to_pix(lander_size, size)
cv2.rectangle(im, tuple(pix_pos - (pix_size[0] / 2, 0)),
tuple(pix_pos - (pix_size[0] / 2, 0) + pix_size), 1., -1)
pix_ground_lt = _to_pix((0., state.base_pos[1]), size)
pix_ground_rb = _to_pix((1., 0.), size)
cv2.rectangle(im, tuple(pix_ground_lt),
tuple(pix_ground_rb), 0.3, -1)
pix_pos = _to_pix(state.base_pos, size)
pix_size = _to_pix(base_size, size)
cv2.rectangle(im, tuple(pix_pos - pix_size / 2),
tuple(pix_pos - pix_size / 2 + pix_size), 0.7, -1)
return im
def propagate(state, action):
'''
action is 0 (thrust to left), 1 (thrust to right), 2 (thrust up), 3 (do nothing)
'''
dt = 0.1
g = np.array([0., -0.5])
a = {0: np.array([-1., 0.]), 1: np.array([1., 0.]), 2: np.array([0., 1.]), 3: np.array([0., 0.])}[action]
return LanderState(l_pos=state.l_pos + dt * state.l_v + 0.5 * (a + g) * dt * dt,
l_v=np.clip(state.l_v + dt * (a + g), -5, 5),
base_pos=state.base_pos,
fuel=state.fuel - (action != 3))
def get_action(state):
'''
Returns action as 0 (thrust to left), 1 (thrust to right), 2 (thrust up), 3 (do nothing)
'''
v_goal = (state.base_pos - state.l_pos) * (1.25, 0.8)
dv = v_goal - state.l_v
if np.argmax(np.abs(dv)) == 0:
a = int((np.sign(dv[0]) + 1) / 2)
elif dv[1] > 0:
a = 2
else:
a = 3
return a
def state_to_reward(state):
if (state.l_pos[1] > 1.5) or (state.l_pos[0] > 1.25) or (state.l_pos[0] < -0.25): # out of bounds
return -10.
if (state.l_pos[1] < state.base_pos[1]): # crash against ground
return -10. * np.abs(state.l_v[1])
if state.fuel <= 0: # out of fuel
return -1.
if np.abs(state.l_pos[1] - state.base_pos[1]) < 0.02 and \
np.abs(state.l_pos[0] - state.base_pos[0]) < 0.14 and \
np.abs(state.l_v[0]) < 0.5 and -0.1 < state.l_v[1] <= 0: # win
return 20.
return 0.
def init_state():
l_pos = np.random.random(2) * [1., 0.2] + [0., 0.8]
l_v = (np.random.random(2) - 0.5) * [0.02, 0.1]
base_pos = np.random.random(2) * [0.9, 0.3]
fuel = 200
return LanderState(l_pos, l_v, base_pos, fuel)
def play(im_size, n_steps, display=False, player=get_action, state=None):
'''
Returns 3 n_steps-length np arrays, and the final state. The three
arrays are: one with frames, one with actions, one with rewards.
actions[i] is the action taken when presented with frames[i], and
rewards[i] is the reward obtained AFTER executing action actions[i],
i.e., the reward for the state that corresponds to frames[i+1]
'''
movie = np.zeros((n_steps, ) + im_size[::-1], dtype=np.float32)
actions = np.zeros((n_steps, 4), dtype=np.float32)
if state is None:
state = init_state()
reward = np.zeros(n_steps, dtype=np.float32)
for i in xrange(n_steps):
state_to_im(state, movie[i])
a = player(state)
actions[i, a] = 1.
state = propagate(state, a)
r = state_to_reward(state)
if r < 0.:
if display:
print "lose!"
reward[i] = r
state = init_state()
elif r > 0.:
if display:
print "win!"
reward[i] = r
state = init_state()
if display:
cv2.imshow("Lander", cv2.resize(np.flipud(movie[i]), None, None,
160/movie[i].shape[1], 160/movie[i].shape[0], cv2.INTER_NEAREST))
cv2.waitKey(1)
return movie, actions, reward, state
def conv_relu(input, kernel_shape, stride):
weights = tf.get_variable("weights", kernel_shape, initializer=tf.truncated_normal_initializer(
mean=0., stddev=0.01 / np.sqrt(np.prod(kernel_shape[:3]))))
biases = tf.get_variable("biases", kernel_shape[-1:], initializer=tf.constant_initializer(0.0))
conv = tf.nn.conv2d(input, weights, [1, stride, stride, 1], padding='SAME')
# conv_max = tf.nn.avg_pool(conv, [1, stride, stride, 1], [1, stride, stride, 1], padding='SAME')
return tf.nn.relu(conv + biases)
def model(data, prev_outputs, image_size, n_channels, n_actions, n_prev_actions):
kernel_defs = [(8, 16, 4), (2, 32, 1)] # each conv layer, (patch_side, n_kernels, stride)
fc_sizes = [256]
n_input_kernels = n_channels
for i, k in enumerate(kernel_defs):
with tf.variable_scope("conv_%i" % i):
kernel_shape = (k[0], k[0], n_input_kernels, k[1])
data = conv_relu(data, kernel_shape, k[2])
n_input_kernels = k[1]
for i, n in enumerate(fc_sizes):
with tf.variable_scope("fc_%i" % i):
if i == 0:
previous_n = kernel_defs[-1][1] * np.prod(image_size) / np.prod([k[2] for k in kernel_defs])**2
data = tf.reshape(data, [-1, previous_n])
reshape_prev_outputs = tf.reshape(prev_outputs, [-1, n_actions * n_prev_actions])
prev_outputs_weights = tf.get_variable("prev_outputs_weights", [n_actions * n_prev_actions, n],
initializer=tf.truncated_normal_initializer(mean=0., stddev=0.01/np.sqrt(n_prev_actions * n_actions)))
else:
previous_n = fc_sizes[i-1]
weights = tf.get_variable("weights", [previous_n, n],
initializer=tf.truncated_normal_initializer(mean=0., stddev=0.01 / np.sqrt(previous_n)))
biases = tf.get_variable("biases", [n], initializer=tf.constant_initializer(0.0))
relu_input = tf.matmul(data, weights) + biases
if i == 0:
relu_input += 0.1 * (previous_n / n_actions / n_prev_actions) * tf.matmul(reshape_prev_outputs, prev_outputs_weights)
data = tf.nn.relu(relu_input)
with tf.variable_scope("flat_out"):
weights = tf.get_variable("weights", [fc_sizes[-1], n_actions],
initializer=tf.truncated_normal_initializer(mean=0., stddev=0.01 / np.sqrt(fc_sizes[-1])))
biases = tf.get_variable("biases", [n_actions], initializer=tf.constant_initializer(0.0))
return tf.matmul(data, weights) + biases
def make_learner(image_size, n_channels, n_actions, n_prev_actions):
things = {}
things['graph'] = tf.Graph()
with things['graph'].as_default():
# Input and teacher place holders
things['input'] = tf.placeholder(tf.float32)
things['output'] = tf.placeholder(tf.float32)
things['prev_outputs_input'] = tf.placeholder(tf.float32)
things['logits'] = model(things['input'], things['prev_outputs_input'], image_size, n_channels, n_actions, n_prev_actions)
things['loss'] = tf.reduce_mean(
tf.nn.softmax_cross_entropy_with_logits(things['logits'], things['output']))
things['q_loss'] = tf.reduce_mean(
tf.square(things['logits'] - things['output']))
things['learner'] = tf.train.AdamOptimizer(0.001).minimize(things['loss'])
things['q_learner'] = tf.train.RMSPropOptimizer(1e-9, decay=0.99, epsilon=1e-12).minimize(things['q_loss'])
# Predictors for the training, validation, and test data.
things['prediction'] = tf.nn.softmax(things['logits'])
things['saver'] = tf.train.Saver()
return things
def make_datasets(movie, actions, reward, n_validation, n_test, frames_per_sequence):
'''
Generates training, validation and test datasets from the given movie, actions and reward, returning
them as three 4-tuples, one with training data one with validation data, one with test data.
Each 4-tuple is (images, prev_outputs, labels, reward). However, there is a difference between the
training 4-tuple and the validation and test 4-tuples. The validation and test data is already
"sequencified", meaning that images[k] is already the set of frames_per_sequence frames to
feed to the convnet, and prev_outputs[k] is already the set of (frames_per_sequence - 1) outputs
to feed to the convnet. labels[k] is the action corresponding to the last frame in images[k]. and
reward[k] is the reward received after performing that action. images, prev_outputs, labels and
reward have the same length.
Meanwhile, in the training 4-tuple, images and prev_outputs are not sequencified. This means that
images[k] is just one frame, so images[k:k+frames_per_sequence] is the sequence of frames at
whose last frame the player performed the action labels[k] and received the reward reward[k],
while the actions preceding that action are prev_outputs[k:k+frames_per_sequence - 1]. If N
is the length of labels and reward, the length of images is N + frames_per_sequence - 1, and
the length of prev_outputs is N + frames_per_sequence - 2.
'''
assert len(movie) >= n_validation + n_test + frames_per_sequence
n_frames = len(movie) - frames_per_sequence + 1
n_training = n_frames - n_validation - n_test
n_actions = actions.shape[1]
df = frames_per_sequence - 1
validation_set, validation_prev_outputs, validation_labels, validation_reward = movie[:n_validation + df],\
actions[:n_validation + df - 1], actions[df:n_validation + df], reward[df:n_validation + df]
train_set, train_prev_outputs, train_labels, train_reward = movie[n_validation:n_training + n_validation + df],\
actions[n_validation:n_training + n_validation + df - 1], actions[n_validation + df:n_training + n_validation + df],\
reward[n_validation + df:n_training + n_validation + df]
test_set, test_prev_outputs, test_labels, test_reward = movie[n_training + n_validation:],\
actions[n_validation + n_training:-1], actions[n_training + n_validation + df:], reward[n_training + n_validation + df:]
assert test_labels.shape[0] == n_test
assert test_reward.shape[0] == n_test
assert test_set.shape[0] == n_test + frames_per_sequence - 1
sequenced_validation = sequencify(validation_set, frames_per_sequence)
sequenced_validation_prev_outputs = sequencify(validation_prev_outputs, frames_per_sequence - 1)
sequenced_test = sequencify(test_set, frames_per_sequence)
sequenced_test_prev_outputs = sequencify(test_prev_outputs, frames_per_sequence - 1)
assert sequenced_validation.shape[0] == n_validation
assert sequenced_validation_prev_outputs.shape == (n_validation, n_actions, frames_per_sequence - 1)
assert sequenced_test.shape[0] == n_test
assert sequenced_test_prev_outputs.shape == (n_test, n_actions, frames_per_sequence - 1)
return (train_set, train_prev_outputs, train_labels, train_reward),\
(sequenced_validation, sequenced_validation_prev_outputs, validation_labels, validation_reward),\
(sequenced_test, sequenced_test_prev_outputs, test_labels, test_reward)
def accuracy(predictions, labels):
return (100.0 * np.sum(np.argmax(predictions, 1) == np.argmax(labels, 1))
/ predictions.shape[0])
def sequencify(frames, frames_per_sequence, idx=None, offset=0):
if idx is None:
idx = xrange(len(frames) - frames_per_sequence + 1)
return np.array([np.rollaxis(np.array([frames[j] for j in range(i + offset, i + offset + frames_per_sequence)]),
0, frames[0].ndim + 1) for i in idx])
def display_activators(seqs, predictions):
winners = np.argmax(predictions, axis=1)
for i in np.unique(winners):
activations = predictions[winners==i][:, i]
weights = activations - np.amin(activations)
weights /= np.amax(weights)
weights = weights ** 2
avg_seq = np.sum(weights * np.rollaxis(seqs[winners==i], 0, seqs.ndim), axis=-1) / np.sum(weights)
full = np.vstack(np.rollaxis(avg_seq, 2, 0))
cv2.imshow("%i activator" % i, np.flipud(cv2.resize((full-np.amin(full)) / (np.amax(full) - np.amin(full)),
None, None, 3., 3.)))
cv2.waitKey(100)
class Player(object):
def __init__(self, frames_per_sequence, im_size, mean_im, n_actions):
self._frame_mem = deque([], maxlen=frames_per_sequence)
self._action_mem = deque([], maxlen=frames_per_sequence)
self._im = np.zeros(im_size[::-1], dtype=np.float32)
self._mean_im = mean_im
self._n_actions = n_actions
def play(self, state, things, session, explore_epsilon=0.):
self._im = state_to_im(state, self._im)
self._im -= self._mean_im
self._frame_mem.append(self._im.copy())
if np.random.random() < explore_epsilon:
action = np.random.randint(self._n_actions)
elif len(self._frame_mem) == self._frame_mem.maxlen:
current_sequence = sequencify(np.asarray(self._frame_mem), len(self._frame_mem))
current_prev_outputs = sequencify(np.asarray(self._action_mem)[-(len(self._frame_mem) - 1):],
len(self._frame_mem) - 1)
feed_dict = {things['input']: current_sequence, things['prev_outputs_input']: current_prev_outputs}
prediction = session.run(things['prediction'], feed_dict=feed_dict)
assert prediction.shape == (1, self._n_actions)
action = np.argmax(prediction[0])
else:
action = np.random.randint(self._n_actions)
self._action_mem.append(np.eye(self._n_actions, dtype=np.float32)[action])
return action
def load_model(folder, instance_filename):
with open(os.path.join(folder, 'model.pickle'), 'r') as f:
model_dict = pickle.load(f)
things = make_learner(model_dict['frame_size'],
model_dict['frames_per_sequence'],
model_dict['n_actions'],
model_dict['frames_per_sequence'] - 1)
return things, model_dict
def learn_supervised(save=True):
frame_size = (32, 32)
frames_per_sequence = 6
n_training = 50000
n_validation = 5000
n_test = 5000
n_passes = 4
batch_size = 64
n_frames = n_training + n_validation + n_test
movie, actions, reward, _ = play(frame_size, n_frames + frames_per_sequence - 1)
print "Dataset win: %i, lose: %i" % (np.sum(reward > 0), np.sum(reward < 0))
mean_im = np.mean(movie, axis=0)
movie -= mean_im
n_actions = actions.shape[1]
things = make_learner(frame_size, frames_per_sequence, n_actions, frames_per_sequence - 1)
n_steps = n_training * n_passes / batch_size
t0_s=time.strftime("%c")
folder = '/home/ubuntu/lander/' + t0_s
os.mkdir(folder)
model_dict = dict(frame_size=frame_size, frames_per_sequence=frames_per_sequence,
n_actions=n_actions, mean_im=mean_im)
with open(os.path.join(folder, 'model.pickle'), 'w') as f:
pickle.dump(model_dict, f)
with tf.Session(graph=things['graph']) as session:
tf.initialize_all_variables().run()
print('Initialized')
train, validation, test = make_datasets(movie, actions, reward, n_validation, n_test, frames_per_sequence)
train_idx = np.random.permutation(n_training)
for step in range(n_steps):
offset = (step * batch_size) % (n_training - batch_size)
idx = train_idx[offset:(offset + batch_size)]
batch_data = sequencify(train[0], frames_per_sequence, idx)
batch_prev_outputs = sequencify(train[1], frames_per_sequence - 1, idx)
batch_labels = train[2][idx]
feed_dict = {things['input']: batch_data, things['prev_outputs_input']: batch_prev_outputs, things['output']: batch_labels}
session.run(things['learner'], feed_dict=feed_dict)
if (step % (n_steps / 10) == 0):
l, predictions = session.run(
[things['loss'], things['prediction']], feed_dict=feed_dict)
print('Minibatch loss at step %d/%d: %.3f, accuracy %.1f%%' % (step, n_steps, l, accuracy(predictions, batch_labels)))
feed_dict = {things['input']: validation[0], things['prev_outputs_input']: validation[1], things['output']: validation[2]}
l, predictions, = session.run([things['loss'], things['prediction']], feed_dict=feed_dict)
print('Validation loss %.3f, accuracy: %.1f%%' % (l, accuracy(predictions, validation[2])))
display_activators(validation[0], predictions)
if save:
things['saver'].save(session, os.path.join(folder, 'instance_%.5i' % step))
feed_dict = {things['input']: test[0], things['prev_outputs_input']: test[1]}
predictions, = session.run([things['prediction']], feed_dict=feed_dict)
print('Test accuracy: %.1f%%' % accuracy(predictions, test[2]))
player = Player(frames_per_sequence, frame_size, mean_im, n_actions)
print "Let's play"
_, _, reward, _ = play(frame_size, 2000, display=True, player=lambda state: player.play(state, things, session))
print "Win: %i, lose: %i" % (np.sum(reward > 0), np.sum(reward < 0))
def load_and_play(folder, instance_filename):
things, model_dict = load_model(folder, instance_filename)
with tf.Session(graph=things['graph']) as session:
things['saver'].restore(session, os.path.join(folder, instance_filename))
player = Player(model_dict['frames_per_sequence'], model_dict['frame_size'], model_dict['mean_im'], model_dict['n_actions'])
print "Let's play"
_, _, reward, _ = play(model_dict['frame_size'], 2000, display=True, player=lambda state: player.play(state, things, session))
print "Win: %i, lose: %i" % (np.sum(reward > 0), np.sum(reward < 0))
def load_and_reinforce(folder, instance_filename):
things, model_dict = load_model(folder, instance_filename)
frames_per_sequence = model_dict['frames_per_sequence']
frame_size = model_dict['frame_size']
n_actions = model_dict['n_actions']
mean_im = model_dict['mean_im']
mem_size = 100000
frame_mem = deque([], mem_size)
action_mem = deque([], mem_size)
reward_mem = deque([], mem_size)
n_validation = 500
replay_size = 31
gamma = 0.95
explore_epsilon = 0.1
n_training = 500000
n_training_per_epoch = 1000
state = None
player = Player(frames_per_sequence, frame_size, mean_im, n_actions)
with tf.Session(graph=things['graph']) as session:
things['saver'].restore(session, os.path.join(folder, instance_filename))
tf.get_variable_scope().reuse_variables()
w = tf.get_variable("flat_out/weights")
b = tf.get_variable("flat_out/biases")
w.assign(w / 1000.).op.run()
b.assign(b / 1000.).op.run()
print('Initialized')
movie, actions, reward, _ = play(frame_size, 10 + n_validation + 10 + frames_per_sequence - 1, player=lambda state:player.play(state, things, session))
_, validation, _ = make_datasets(movie, actions, reward, n_validation, 10, frames_per_sequence)
for iteration in range(n_training):
movie, actions, reward, state = play(frame_size, 1,
player=lambda state:player.play(state, things, session, explore_epsilon), state=state, display=True)
frame_mem.append(movie[0])
action_mem.append(actions[0])
reward_mem.append(reward[0])
if len(frame_mem) < 500 * replay_size + frames_per_sequence + 1:
continue
idx = np.random.randint(0, len(frame_mem) - frames_per_sequence - 1, replay_size)
# idx[-1] = len(frame_mem) - frames_per_sequence - 1
batch_data = sequencify(frame_mem, frames_per_sequence, idx)
batch_prev_outputs = sequencify(action_mem, frames_per_sequence - 1, idx)
batch_next_data = sequencify(frame_mem, frames_per_sequence, idx, offset=1)
batch_next_prev_outputs = sequencify(action_mem, frames_per_sequence - 1, idx, offset=1)
batch_actions = np.array([action_mem[i] for i in idx + frames_per_sequence - 1])
batch_rewards = np.array([reward_mem[i] for i in idx + frames_per_sequence - 1])
feed_dict = {things['input']: batch_data, things['prev_outputs_input']: batch_prev_outputs}
current_q = session.run(things['logits'], feed_dict=feed_dict) # get the current Q values
feed_dict_next = {things['input']: batch_next_data, things['prev_outputs_input']: batch_next_prev_outputs}
next_q = session.run(things['logits'], feed_dict=feed_dict_next) # get the next Q values
next_values = (batch_rewards == 0) * gamma * np.amax(next_q, axis=1) + batch_rewards # only non-terminal (reward=0) get Q values as part of the value
current_q[np.arange(current_q.shape[0]), np.argmax(batch_actions, axis=1)] = next_values
feed_dict = {things['input']: batch_data, things['prev_outputs_input']: batch_prev_outputs, things['output']: current_q}
session.run(things['q_learner'], feed_dict=feed_dict)
if iteration % n_training_per_epoch == 0:
epoch = iteration / n_training_per_epoch
l, modified_q = session.run(
[things['q_loss'], things['logits']], feed_dict=feed_dict)
print('Epoch %i q_loss, q_avg: %.4f, %.4f' % (epoch, l, np.mean(np.amax(modified_q, axis=1))))
feed_dict = {things['input']: validation[0], things['prev_outputs_input']: validation[1]}
q = session.run(things['logits'], feed_dict=feed_dict)
print('Validation q_avg: %.6f' % np.mean(np.amax(q, axis=1)))
epoch_reward = np.array([reward_mem[i] for i in range(len(reward_mem) - n_training_per_epoch, len(reward_mem))])
print "Epoch win: %i, lose: %i" % (np.sum(epoch_reward > 0), np.sum(epoch_reward < 0))
# if epoch % 100 == 0:
# things['saver'].save(session, os.path.join(folder, instance_filename, '_reinforce_epoch_%.2i' % epoch))
feed_dict = {things['input']: test[0], things['prev_outputs_input']: test[1]}
q = session.run(things['logits'], feed_dict=feed_dict)
print('Test q_avg: %.2f' % np.mean(np.amax(q, axis=1)))
print "Let's play"
_, _, reward, _ = play(frame_size, 2000, display=True, player=lambda state: player.play(state, things, session))
print "Win: %i, lose: %i" % (np.sum(reward > 0), np.sum(reward < 0))
def gsum(values, shifts):
total = []
real_lengths = [len(v) + s for v, s in zip(values, shifts)]
max_length = max(real_lengths)
carry = 0
i = 0
while carry != 0 or i < max_length:
for j in range(len(values)):
if i >= shifts[j] and i < real_lengths[j]:
carry += values[j][i - shifts[j]]
total.append(carry % 10)
carry /= 10
i += 1
return total
def gprod(l1, l2):
n1 = len(l1)
n2 = len(l2)
if n1 == 0 or n2 == 0:
return []
if n1 == 1 and n1 == 1:
m = l1[0] * l2[0]
if m >= 10:
return [m % 10, m / 10]
else:
return [m]
else:
a, b = l1[n1 / 2:], l1[:n1 / 2]
c, d = l2[n2 / 2:], l2[:n2 / 2]
return gsum([gprod(a, c), gprod(b, c), gprod(a, d), gprod(b, d)], [n1 / 2 + n2 / 2, n2 / 2, n1 / 2, 0])
if __name__ == "__main__":
# learn_supervised()
# load_and_play('/home/ubuntu/lander/Thu Mar 31 10:49:03 2016', 'instance_12500')
load_and_reinforce('/home/ubuntu/lander/Thu Mar 31 11:51:34 2016', 'instance_03120')
# play((80, 80), 100000, display=False)
# print gprod([3,5], [2,4])
|
|
# -*- coding: utf-8 -*-
from ethereum.slogging import getLogger
from ethereum.utils import big_endian_to_int
from raiden.encoding import messages, signing
from raiden.encoding.format import buffer_for
from raiden.encoding.signing import recover_publickey
from raiden.utils import publickey_to_address, sha3, ishash, pex
from raiden.transfer.state import BalanceProofState
__all__ = (
'Ack',
'Ping',
'SecretRequest',
'Secret',
'DirectTransfer',
'Lock',
'LockedTransfer',
'MediatedTransfer',
'RefundTransfer',
)
log = getLogger(__name__) # pylint: disable=invalid-name
EMPTY_MERKLE_ROOT = b'\x00' * 32
def assert_envelope_values(nonce, channel, transferred_amount, locksroot):
if nonce <= 0:
raise ValueError('nonce cannot be zero or negative')
if nonce >= 2 ** 64:
raise ValueError('nonce is too large')
if len(channel) != 20:
raise ValueError('channel is an invalid address')
if transferred_amount < 0:
raise ValueError('transferred_amount cannot be negative')
if transferred_amount >= 2 ** 256:
raise ValueError('transferred_amount is too large')
if len(locksroot) != 32:
raise ValueError('locksroot must have length 32')
def assert_transfer_values(identifier, token, recipient):
if identifier < 0:
raise ValueError('identifier cannot be negative')
if identifier >= 2 ** 64:
raise ValueError('identifier is too large')
if len(token) != 20:
raise ValueError('token is an invalid address')
if len(recipient) != 20:
raise ValueError('recipient is an invalid address')
def decode(data):
klass = CMDID_TO_CLASS[data[0]]
return klass.decode(data)
class MessageHashable(object):
pass
class Message(MessageHashable):
# pylint: disable=no-member
@property
def hash(self):
packed = self.packed()
return sha3(packed.data)
def __eq__(self, other):
return isinstance(other, self.__class__) and self.hash == other.hash
def __hash__(self):
return big_endian_to_int(self.hash)
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
packed = self.packed()
return '<{klass} [msghash={msghash}]>'.format(
klass=self.__class__.__name__,
msghash=pex(sha3(packed.data)),
)
@classmethod
def decode(cls, packed):
packed = messages.wrap(packed)
return cls.unpack(packed)
def encode(self):
packed = self.packed()
return bytes(packed.data)
def packed(self):
klass = messages.CMDID_MESSAGE[self.cmdid]
data = buffer_for(klass)
data[0] = self.cmdid
packed = klass(data)
self.pack(packed)
return packed
class SignedMessage(Message):
# signing is a bit problematic, we need to pack the data to sign, but the
# current API assumes that signing is called before, this can be improved
# by changing the order to packing then signing
def __init__(self):
super(SignedMessage, self).__init__()
self.signature = b''
self.sender = b''
def sign(self, private_key, node_address):
""" Sign message using `private_key`. """
packed = self.packed()
field = type(packed).fields_spec[-1]
assert field.name == 'signature', 'signature is not the last field'
# this slice must be from the end of the buffer
message_data = packed.data[:-field.size_bytes]
signature = signing.sign(message_data, private_key)
packed.signature = signature
self.sender = node_address
self.signature = signature
@classmethod
def decode(cls, data):
packed = messages.wrap(data)
if packed is None:
return
# signature must be at the end
message_type = type(packed)
signature = message_type.fields_spec[-1]
assert signature.name == 'signature', 'signature is not the last field'
data_that_was_signed = data[:-signature.size_bytes]
message_signature = data[-signature.size_bytes:]
try:
publickey = recover_publickey(data_that_was_signed, message_signature)
except ValueError:
# raised if the signature has the wrong length
log.error('invalid signature')
return
except TypeError as e:
# raised if the PublicKey instantiation failed
log.error('invalid key data: {}'.format(e.message))
return
except Exception as e: # pylint: disable=broad-except
# secp256k1 is using bare Exception classes: raised if the recovery failed
log.error('error while recovering pubkey: {}'.format(e.message))
return
message = cls.unpack(packed) # pylint: disable=no-member
message.sender = publickey_to_address(publickey)
return message
class EnvelopeMessage(SignedMessage):
def __init__(self):
super(EnvelopeMessage, self).__init__()
self.nonce = 0
self.transferred_amount = 0
self.locksroot = EMPTY_MERKLE_ROOT
self.channel = b''
@property
def message_hash(self):
packed = self.packed()
klass = type(packed)
field = klass.fields_spec[-1]
assert field.name == 'signature', 'signature is not the last field'
data = packed.data
message_data = data[:-field.size_bytes]
message_hash = sha3(message_data)
return message_hash
def sign(self, private_key, node_address):
packed = self.packed()
klass = type(packed)
field = klass.fields_spec[-1]
assert field.name == 'signature', 'signature is not the last field'
data = packed.data
nonce = klass.get_bytes_from(data, 'nonce')
transferred_amount = klass.get_bytes_from(data, 'transferred_amount')
locksroot = klass.get_bytes_from(data, 'locksroot')
channel_address = klass.get_bytes_from(data, 'channel')
message_hash = self.message_hash
data_to_sign = nonce + transferred_amount + locksroot + channel_address + message_hash
signature = signing.sign(data_to_sign, private_key)
packed.signature = signature
self.sender = node_address
self.signature = signature
@classmethod
def decode(cls, data):
packed = messages.wrap(data)
if packed is None:
return
# signature must be at the end
message_type = type(packed)
signature = message_type.fields_spec[-1]
assert signature.name == 'signature', 'signature is not the last field'
message_data = data[:-signature.size_bytes]
message_signature = data[-signature.size_bytes:]
message_hash = sha3(message_data)
nonce = message_type.get_bytes_from(data, 'nonce')
transferred_amount = message_type.get_bytes_from(data, 'transferred_amount')
locksroot = message_type.get_bytes_from(data, 'locksroot')
channel_address = message_type.get_bytes_from(data, 'channel')
data_that_was_signed = (
nonce + transferred_amount + locksroot + channel_address + message_hash
)
try:
publickey = recover_publickey(data_that_was_signed, message_signature)
except ValueError:
# raised if the signature has the wrong length
log.error('invalid signature')
return
except TypeError as e:
# raised if the PublicKey instantiation failed
log.error('invalid key data: {}'.format(e.message))
return
except Exception as e: # pylint: disable=broad-except
# secp256k1 is using bare Exception classes: raised if the recovery failed
log.error('error while recovering pubkey: {}'.format(e.message))
return
message = cls.unpack(packed) # pylint: disable=no-member
message.sender = publickey_to_address(publickey)
return message
def to_balanceproof(self):
return BalanceProofState(
self.nonce,
self.transferred_amount,
self.locksroot,
self.channel,
self.message_hash,
self.signature,
)
class Ack(Message):
""" All accepted messages should be confirmed by an `Ack` which echoes the
orginals Message hash.
We don't sign Acks because attack vector can be mitigated and to speed up
things.
"""
cmdid = messages.ACK
def __init__(self, sender, echo):
super(Ack, self).__init__()
self.sender = sender
self.echo = echo
@staticmethod
def unpack(packed):
return Ack(
packed.sender,
packed.echo,
)
def pack(self, packed):
packed.echo = self.echo
packed.sender = self.sender
def __repr__(self):
return '<{} [echohash:{}]>'.format(
self.__class__.__name__,
pex(self.echo),
)
class Ping(SignedMessage):
""" Ping, should be responded by an Ack message. """
cmdid = messages.PING
def __init__(self, nonce):
super(Ping, self).__init__()
self.nonce = nonce
@staticmethod
def unpack(packed):
ping = Ping(packed.nonce)
ping.signature = packed.signature
return ping
def pack(self, packed):
packed.nonce = self.nonce
packed.signature = self.signature
class SecretRequest(SignedMessage):
""" Requests the secret which unlocks a hashlock. """
cmdid = messages.SECRETREQUEST
def __init__(self, identifier, hashlock, amount):
super(SecretRequest, self).__init__()
self.identifier = identifier
self.hashlock = hashlock
self.amount = amount
def __repr__(self):
return '<{} [hashlock:{} amount:{} hash:{}]>'.format(
self.__class__.__name__,
pex(self.hashlock),
self.amount,
pex(self.hash),
)
@staticmethod
def unpack(packed):
secret_request = SecretRequest(packed.identifier, packed.hashlock, packed.amount)
secret_request.signature = packed.signature
return secret_request
def pack(self, packed):
packed.identifier = self.identifier
packed.hashlock = self.hashlock
packed.amount = self.amount
packed.signature = self.signature
class Secret(EnvelopeMessage):
""" Message used to do state changes on a partner Raiden Channel.
Locksroot changes need to be synchronized among both participants, the
protocol is for only the side unlocking to send the Secret message allowing
the other party to withdraw.
"""
cmdid = messages.SECRET
def __init__(
self,
identifier,
nonce,
channel,
transferred_amount,
locksroot,
secret):
super(Secret, self).__init__()
assert_envelope_values(
nonce,
channel,
transferred_amount,
locksroot,
)
if identifier < 0:
raise ValueError('identifier cannot be negative')
if identifier >= 2 ** 64:
raise ValueError('identifier is too large')
if len(secret) != 32:
raise ValueError('secret must have 32 bytes')
self.identifier = identifier
self.secret = secret
self.nonce = nonce
self.channel = channel
self.transferred_amount = transferred_amount
self.locksroot = locksroot
self._hashlock = None
def __repr__(self):
return (
'<{} [channel:{} nonce:{} transferred_amount:{} locksroot:{} '
'hash:{} hashlock:{}]>'
).format(
self.__class__.__name__,
pex(self.channel),
self.nonce,
self.transferred_amount,
pex(self.locksroot),
pex(self.hash),
pex(self.hashlock),
)
@property
def hashlock(self):
if self._hashlock is None:
self._hashlock = sha3(self.secret)
return self._hashlock
@staticmethod
def unpack(packed):
secret = Secret(
packed.identifier,
packed.nonce,
packed.channel,
packed.transferred_amount,
packed.locksroot,
packed.secret,
)
secret.signature = packed.signature
return secret
def pack(self, packed):
packed.identifier = self.identifier
packed.nonce = self.nonce
packed.channel = self.channel
packed.transferred_amount = self.transferred_amount
packed.locksroot = self.locksroot
packed.secret = self.secret
packed.signature = self.signature
class RevealSecret(SignedMessage):
"""Message used to reveal a secret to party known to have interest in it.
This message is not sufficient for state changes in the raiden Channel, the
reason is that a node participating in split transfer or in both mediated
transfer for an exchange might can reveal the secret to it's partners, but
that must not update the internal channel state.
"""
cmdid = messages.REVEALSECRET
def __init__(self, secret):
super(RevealSecret, self).__init__()
self.secret = secret
self._hashlock = None
def __repr__(self):
return '<{} [hashlock:{} hash:{}]>'.format(
self.__class__.__name__,
pex(self.hashlock),
pex(self.hash),
)
@property
def hashlock(self):
if self._hashlock is None:
self._hashlock = sha3(self.secret)
return self._hashlock
@staticmethod
def unpack(packed):
reveal_secret = RevealSecret(packed.secret)
reveal_secret.signature = packed.signature
return reveal_secret
def pack(self, packed):
packed.secret = self.secret
packed.signature = self.signature
class DirectTransfer(EnvelopeMessage):
""" A direct token exchange, used when both participants have a previously
opened channel.
Signs the unidirectional settled `balance` of `token` to `recipient` plus
locked transfers.
Settled refers to the inclusion of formerly locked amounts.
Locked amounts are not included in the balance yet, but represented
by the `locksroot`.
Args:
nonce: A sequential nonce, used to protected against replay attacks and
to give a total order for the messages. This nonce is per
participant, not shared.
token: The address of the token being exchanged in the channel.
transferred_amount: The total amount of token that was transferred to
the channel partner. This value is monotonically increasing and can
be larger than a channels deposit, since the channels are
bidirecional.
recipient: The address of the raiden node participating in the channel.
locksroot: The root of a merkle tree which records the current
outstanding locks.
"""
cmdid = messages.DIRECTTRANSFER
def __init__(
self,
identifier,
nonce,
token,
channel,
transferred_amount,
recipient,
locksroot):
assert_envelope_values(
nonce,
channel,
transferred_amount,
locksroot,
)
assert_transfer_values(identifier, token, recipient)
super(DirectTransfer, self).__init__()
self.identifier = identifier
self.nonce = nonce
self.token = token
self.channel = channel
self.transferred_amount = transferred_amount #: total amount of token sent to partner
self.recipient = recipient #: partner's address
self.locksroot = locksroot #: the merkle root that represent all pending locked transfers
@staticmethod
def unpack(packed):
transfer = DirectTransfer(
packed.identifier,
packed.nonce,
packed.token,
packed.channel,
packed.transferred_amount,
packed.recipient,
packed.locksroot,
)
transfer.signature = packed.signature
return transfer
def pack(self, packed):
packed.identifier = self.identifier
packed.nonce = self.nonce
packed.token = self.token
packed.channel = self.channel
packed.transferred_amount = self.transferred_amount
packed.recipient = self.recipient
packed.locksroot = self.locksroot
packed.signature = self.signature
def __repr__(self):
representation = (
'<{} [channel:{} nonce:{} transferred_amount:{} locksroot:{} '
'hash:{} id:{}]>'
).format(
self.__class__.__name__,
pex(self.channel),
self.nonce,
self.transferred_amount,
pex(self.locksroot),
pex(self.hash),
self.identifier,
)
return representation
class Lock(MessageHashable):
""" Describes a locked `amount`.
Args:
amount: Amount of the token being transferred.
expiration: Highest block_number until which the transfer can be settled
hashlock: Hashed secret `sha3(secret)` used to register the transfer,
the real `secret` is necessary to release the locked amount.
"""
# Lock extends MessageHashable but it is not a message, it is a
# serializable structure that is reused in some messages
def __init__(self, amount, expiration, hashlock):
# guarantee that `amount` can be serialized using the available bytes
# in the fixed length format
if amount < 0:
raise ValueError('amount {} needs to be positive'.format(amount))
if amount >= 2 ** 256:
raise ValueError('amount {} is too large'.format(amount))
if expiration < 0:
raise ValueError('expiration {} needs to be positive'.format(amount))
if expiration >= 2 ** 256:
raise ValueError('expiration {} is too large'.format(amount))
assert ishash(hashlock)
self.amount = amount
self.expiration = expiration
self.hashlock = hashlock
self._asbytes = None
@property
def as_bytes(self):
if self._asbytes is None:
packed = messages.Lock(buffer_for(messages.Lock))
packed.amount = self.amount
packed.expiration = self.expiration
packed.hashlock = self.hashlock
self._asbytes = packed.data
# convert bytearray to bytes
return bytes(self._asbytes)
@classmethod
def from_bytes(cls, serialized):
packed = messages.Lock(serialized)
return cls(
packed.amount,
packed.expiration,
packed.hashlock,
)
def __eq__(self, other):
if isinstance(other, Lock):
return self.as_bytes == other.as_bytes
return False
def __ne__(self, other):
return not self.__eq__(other)
class LockedTransfer(EnvelopeMessage):
""" A transfer which signs that the partner can claim `locked_amount` if
she knows the secret to `hashlock`.
The token amount is implicitly represented in the `locksroot` and won't be
reflected in the `transferred_amount` until the secret is revealed.
This signs Carol, that she can claim locked_amount from Bob if she knows the secret to hashlock
If the secret to hashlock becomes public, but Bob fails to sign Carol a netted balance,
with an updated rootlock which reflects the deletion of the lock, then
Carol can request settlement on chain by providing:
any signed [nonce, token, balance, recipient, locksroot, ...]
along a merkle proof from locksroot to the not yet netted formerly locked amount
"""
def __init__(
self,
identifier,
nonce,
token,
channel,
transferred_amount,
recipient,
locksroot,
lock):
super(LockedTransfer, self).__init__()
assert_envelope_values(
nonce,
channel,
transferred_amount,
locksroot,
)
assert_transfer_values(identifier, token, recipient)
self.identifier = identifier
self.nonce = nonce
self.token = token
self.channel = channel
self.transferred_amount = transferred_amount
self.recipient = recipient
self.locksroot = locksroot
self.lock = lock
def to_mediatedtransfer(self, target, initiator='', fee=0):
return MediatedTransfer(
self.identifier,
self.nonce,
self.token,
self.channel,
self.transferred_amount,
self.recipient,
self.locksroot,
self.lock,
target,
initiator,
fee,
)
def to_refundtransfer(self, target, initiator='', fee=0):
return RefundTransfer(
self.identifier,
self.nonce,
self.token,
self.channel,
self.transferred_amount,
self.recipient,
self.locksroot,
self.lock,
target,
initiator,
fee,
)
@staticmethod
def unpack(packed):
lock = Lock(
packed.amount,
packed.expiration,
packed.hashlock,
)
locked_transfer = LockedTransfer(
packed.identifier,
packed.nonce,
packed.token,
packed.channel,
packed.transferred_amount,
packed.recipient,
packed.locksroot,
lock,
)
locked_transfer.signature = packed.signature
return locked_transfer
def pack(self, packed):
packed.identifier = self.identifier
packed.nonce = self.nonce
packed.token = self.token
packed.channel = self.channel
packed.transferred_amount = self.transferred_amount
packed.recipient = self.recipient
packed.locksroot = self.locksroot
lock = self.lock
packed.amount = lock.amount
packed.expiration = lock.expiration
packed.hashlock = lock.hashlock
packed.signature = self.signature
class MediatedTransfer(LockedTransfer):
"""
A MediatedTransfer has a `target` address to which a chain of transfers shall
be established. Here the `haslock` is mandatory.
`fee` is the remaining fee a recipient shall use to complete the mediated transfer.
The recipient can deduct his own fee from the amount and lower `fee` to the remaining fee.
Just as the recipient can fail to forward at all, or the assumed amount,
it can deduct a too high fee, but this would render completion of the transfer unlikely.
The initiator of a mediated transfer will calculate fees based on the likely fees along the
path. Note, it can not determine the path, as it does not know which nodes are available.
Initial `amount` should be expected received amount + fees.
Fees are always payable by the initiator.
`initiator` is the party that knows the secret to the `hashlock`
"""
cmdid = messages.MEDIATEDTRANSFER
def __init__(
self,
identifier,
nonce,
token,
channel,
transferred_amount,
recipient,
locksroot,
lock,
target,
initiator,
fee=0):
if len(target) != 20:
raise ValueError('target is an invalid address')
if len(initiator) != 20:
raise ValueError('initiator is an invalid address')
if fee >= 2 ** 256:
raise ValueError('fee is too large')
super(MediatedTransfer, self).__init__(
identifier,
nonce,
token,
channel,
transferred_amount,
recipient,
locksroot,
lock,
)
self.target = target
self.fee = fee
self.initiator = initiator
def __repr__(self):
representation = (
'<{} [channel:{} nonce:{} transferred_amount:{} locksroot:{} '
'hash:{} id:{} hashlock:{} expiration:{} amount:{}]>'
).format(
self.__class__.__name__,
pex(self.channel),
self.nonce,
self.transferred_amount,
pex(self.locksroot),
pex(self.hash),
self.identifier,
pex(self.lock.hashlock),
self.lock.expiration,
self.lock.amount,
)
return representation
@staticmethod
def unpack(packed):
lock = Lock(
packed.amount,
packed.expiration,
packed.hashlock,
)
mediated_transfer = MediatedTransfer(
packed.identifier,
packed.nonce,
packed.token,
packed.channel,
packed.transferred_amount,
packed.recipient,
packed.locksroot,
lock,
packed.target,
packed.initiator,
packed.fee,
)
mediated_transfer.signature = packed.signature
return mediated_transfer
def pack(self, packed):
packed.identifier = self.identifier
packed.nonce = self.nonce
packed.token = self.token
packed.channel = self.channel
packed.transferred_amount = self.transferred_amount
packed.recipient = self.recipient
packed.locksroot = self.locksroot
packed.target = self.target
packed.initiator = self.initiator
packed.fee = self.fee
lock = self.lock
packed.amount = lock.amount
packed.expiration = lock.expiration
packed.hashlock = lock.hashlock
packed.signature = self.signature
class RefundTransfer(MediatedTransfer):
""" A special MediatedTransfer sent from a payee to a payer indicating that
no route is available, this transfer will effectively refund the payer the
transfer amount allowing him to try a new path to complete the transfer.
"""
cmdid = messages.REFUNDTRANSFER
@staticmethod
def unpack(packed):
lock = Lock(
packed.amount,
packed.expiration,
packed.hashlock,
)
locked_transfer = RefundTransfer(
packed.identifier,
packed.nonce,
packed.token,
packed.channel,
packed.transferred_amount,
packed.recipient,
packed.locksroot,
lock,
packed.target,
packed.initiator,
packed.fee,
)
locked_transfer.signature = packed.signature
return locked_transfer
CMDID_TO_CLASS = {
messages.ACK: Ack,
messages.PING: Ping,
messages.SECRETREQUEST: SecretRequest,
messages.SECRET: Secret,
messages.REVEALSECRET: RevealSecret,
messages.DIRECTTRANSFER: DirectTransfer,
messages.MEDIATEDTRANSFER: MediatedTransfer,
messages.REFUNDTRANSFER: RefundTransfer,
}
|
|
# coding=utf-8
r"""
This code was generated by
\ / _ _ _| _ _
| (_)\/(_)(_|\/| |(/_ v1.0.0
/ /
"""
from twilio.base import deserialize
from twilio.base import values
from twilio.base.instance_context import InstanceContext
from twilio.base.instance_resource import InstanceResource
from twilio.base.list_resource import ListResource
from twilio.base.page import Page
class TranscriptionList(ListResource):
def __init__(self, version, account_sid):
"""
Initialize the TranscriptionList
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionList
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionList
"""
super(TranscriptionList, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, }
self._uri = '/Accounts/{account_sid}/Transcriptions.json'.format(**self._solution)
def stream(self, limit=None, page_size=None):
"""
Streams TranscriptionInstance records from the API as a generator stream.
This operation lazily loads records as efficiently as possible until the limit
is reached.
The results are returned as a generator, so this operation is memory efficient.
:param int limit: Upper limit for the number of records to return. stream()
guarantees to never return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, stream() will attempt to read the
limit with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.transcription.TranscriptionInstance]
"""
limits = self._version.read_limits(limit, page_size)
page = self.page(page_size=limits['page_size'], )
return self._version.stream(page, limits['limit'])
def list(self, limit=None, page_size=None):
"""
Lists TranscriptionInstance records from the API as a list.
Unlike stream(), this operation is eager and will load `limit` records into
memory before returning.
:param int limit: Upper limit for the number of records to return. list() guarantees
never to return more than limit. Default is no limit
:param int page_size: Number of records to fetch per request, when not set will use
the default value of 50 records. If no page_size is defined
but a limit is defined, list() will attempt to read the limit
with the most efficient page size, i.e. min(limit, 1000)
:returns: Generator that will yield up to limit results
:rtype: list[twilio.rest.api.v2010.account.transcription.TranscriptionInstance]
"""
return list(self.stream(limit=limit, page_size=page_size, ))
def page(self, page_token=values.unset, page_number=values.unset,
page_size=values.unset):
"""
Retrieve a single page of TranscriptionInstance records from the API.
Request is executed immediately
:param str page_token: PageToken provided by the API
:param int page_number: Page Number, this value is simply for client state
:param int page_size: Number of records to return, defaults to 50
:returns: Page of TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionPage
"""
data = values.of({'PageToken': page_token, 'Page': page_number, 'PageSize': page_size, })
response = self._version.page(method='GET', uri=self._uri, params=data, )
return TranscriptionPage(self._version, response, self._solution)
def get_page(self, target_url):
"""
Retrieve a specific page of TranscriptionInstance records from the API.
Request is executed immediately
:param str target_url: API-generated URL for the requested results page
:returns: Page of TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionPage
"""
response = self._version.domain.twilio.request(
'GET',
target_url,
)
return TranscriptionPage(self._version, response, self._solution)
def get(self, sid):
"""
Constructs a TranscriptionContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionContext
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionContext
"""
return TranscriptionContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
def __call__(self, sid):
"""
Constructs a TranscriptionContext
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionContext
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionContext
"""
return TranscriptionContext(self._version, account_sid=self._solution['account_sid'], sid=sid, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.TranscriptionList>'
class TranscriptionPage(Page):
def __init__(self, version, response, solution):
"""
Initialize the TranscriptionPage
:param Version version: Version that contains the resource
:param Response response: Response from the API
:param account_sid: The SID of the Account that created the resource
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionPage
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionPage
"""
super(TranscriptionPage, self).__init__(version, response)
# Path Solution
self._solution = solution
def get_instance(self, payload):
"""
Build an instance of TranscriptionInstance
:param dict payload: Payload response from the API
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionInstance
"""
return TranscriptionInstance(self._version, payload, account_sid=self._solution['account_sid'], )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
return '<Twilio.Api.V2010.TranscriptionPage>'
class TranscriptionContext(InstanceContext):
def __init__(self, version, account_sid, sid):
"""
Initialize the TranscriptionContext
:param Version version: Version that contains the resource
:param account_sid: The SID of the Account that created the resource to fetch
:param sid: The unique string that identifies the resource
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionContext
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionContext
"""
super(TranscriptionContext, self).__init__(version)
# Path Solution
self._solution = {'account_sid': account_sid, 'sid': sid, }
self._uri = '/Accounts/{account_sid}/Transcriptions/{sid}.json'.format(**self._solution)
def fetch(self):
"""
Fetch the TranscriptionInstance
:returns: The fetched TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionInstance
"""
payload = self._version.fetch(method='GET', uri=self._uri, )
return TranscriptionInstance(
self._version,
payload,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
def delete(self):
"""
Deletes the TranscriptionInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._version.delete(method='DELETE', uri=self._uri, )
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.TranscriptionContext {}>'.format(context)
class TranscriptionInstance(InstanceResource):
class Status(object):
IN_PROGRESS = "in-progress"
COMPLETED = "completed"
FAILED = "failed"
def __init__(self, version, payload, account_sid, sid=None):
"""
Initialize the TranscriptionInstance
:returns: twilio.rest.api.v2010.account.transcription.TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionInstance
"""
super(TranscriptionInstance, self).__init__(version)
# Marshaled Properties
self._properties = {
'account_sid': payload.get('account_sid'),
'api_version': payload.get('api_version'),
'date_created': deserialize.rfc2822_datetime(payload.get('date_created')),
'date_updated': deserialize.rfc2822_datetime(payload.get('date_updated')),
'duration': payload.get('duration'),
'price': deserialize.decimal(payload.get('price')),
'price_unit': payload.get('price_unit'),
'recording_sid': payload.get('recording_sid'),
'sid': payload.get('sid'),
'status': payload.get('status'),
'transcription_text': payload.get('transcription_text'),
'type': payload.get('type'),
'uri': payload.get('uri'),
}
# Context
self._context = None
self._solution = {'account_sid': account_sid, 'sid': sid or self._properties['sid'], }
@property
def _proxy(self):
"""
Generate an instance context for the instance, the context is capable of
performing various actions. All instance actions are proxied to the context
:returns: TranscriptionContext for this TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionContext
"""
if self._context is None:
self._context = TranscriptionContext(
self._version,
account_sid=self._solution['account_sid'],
sid=self._solution['sid'],
)
return self._context
@property
def account_sid(self):
"""
:returns: The SID of the Account that created the resource
:rtype: unicode
"""
return self._properties['account_sid']
@property
def api_version(self):
"""
:returns: The API version used to create the transcription
:rtype: unicode
"""
return self._properties['api_version']
@property
def date_created(self):
"""
:returns: The RFC 2822 date and time in GMT that the resource was created
:rtype: datetime
"""
return self._properties['date_created']
@property
def date_updated(self):
"""
:returns: The RFC 2822 date and time in GMT that the resource was last updated
:rtype: datetime
"""
return self._properties['date_updated']
@property
def duration(self):
"""
:returns: The duration of the transcribed audio in seconds.
:rtype: unicode
"""
return self._properties['duration']
@property
def price(self):
"""
:returns: The charge for the transcription
:rtype: unicode
"""
return self._properties['price']
@property
def price_unit(self):
"""
:returns: The currency in which price is measured
:rtype: unicode
"""
return self._properties['price_unit']
@property
def recording_sid(self):
"""
:returns: The SID that identifies the transcription's recording
:rtype: unicode
"""
return self._properties['recording_sid']
@property
def sid(self):
"""
:returns: The unique string that identifies the resource
:rtype: unicode
"""
return self._properties['sid']
@property
def status(self):
"""
:returns: The status of the transcription
:rtype: TranscriptionInstance.Status
"""
return self._properties['status']
@property
def transcription_text(self):
"""
:returns: The text content of the transcription.
:rtype: unicode
"""
return self._properties['transcription_text']
@property
def type(self):
"""
:returns: The transcription type
:rtype: unicode
"""
return self._properties['type']
@property
def uri(self):
"""
:returns: The URI of the resource, relative to `https://api.twilio.com`
:rtype: unicode
"""
return self._properties['uri']
def fetch(self):
"""
Fetch the TranscriptionInstance
:returns: The fetched TranscriptionInstance
:rtype: twilio.rest.api.v2010.account.transcription.TranscriptionInstance
"""
return self._proxy.fetch()
def delete(self):
"""
Deletes the TranscriptionInstance
:returns: True if delete succeeds, False otherwise
:rtype: bool
"""
return self._proxy.delete()
def __repr__(self):
"""
Provide a friendly representation
:returns: Machine friendly representation
:rtype: str
"""
context = ' '.join('{}={}'.format(k, v) for k, v in self._solution.items())
return '<Twilio.Api.V2010.TranscriptionInstance {}>'.format(context)
|
|
import sys, inspect
from fixture.command.generate import (
DataHandler, register_handler, FixtureSet, NoData, UnsupportedHandler)
from fixture import SQLAlchemyFixture
try:
import sqlalchemy
except ImportError:
sqlalchemy = False
class TableEnv(object):
"""a shared environment of sqlalchemy Table instances.
can be initialized with python paths to objects or objects themselves
"""
def __init__(self, *objects):
self.objects = objects
self.tablemap = {}
for obj in self.objects:
module = None
if isinstance(obj, basestring):
modpath = obj
if modpath not in sys.modules:
# i.e. modpath from command-line option...
try:
if "." in modpath:
cut = modpath.rfind(".")
names = [modpath[cut+1:]]
parent = __import__(
modpath[0:cut], globals(), locals(), names)
module = getattr(parent, names[0])
else:
module = __import__(modpath)
except:
etype, val, tb = sys.exc_info()
raise (
ImportError("%s: %s (while importing %s)" % (
etype, val, modpath)), None, tb)
else:
module = sys.modules[modpath]
obj = module
if module is None:
module = inspect.getmodule(obj)
self._find_objects(obj, module)
def __contains__(self, key):
return key in self.tablemap
def __getitem__(self, table):
try:
return self.tablemap[table]
except KeyError:
etype, val, tb = sys.exc_info()
raise LookupError, (
"Could not locate original declaration of Table %s "
"(looked in: %s) You might need to add "
"--env='path.to.module'?" % (
table, ", ".join([repr(p) for p in self.objects]))), tb
def _find_objects(self, obj, module):
from sqlalchemy.schema import Table
from sqlalchemy.orm.mapper import (
has_mapper, class_mapper, object_mapper,
mapper_registry)
# get dict key/vals or dir() through object ...
if not hasattr(obj, 'items'):
def getitems():
for name in dir(obj):
yield name, getattr(obj, name)
else:
getitems = obj.items
for name, o in getitems():
if isinstance(o, Table):
self.tablemap.setdefault(o, {})
self.tablemap[o]['name'] = name
self.tablemap[o]['module'] = module
def get_real_table(self, table):
return getattr(self[table]['module'], self[table]['name'])
class SQLAlchemyHandler(DataHandler):
"""handles genration of fixture code from a sqlalchemy data source."""
loadable_fxt_class = SQLAlchemyFixture
class RecordSetAdapter(object):
"""adapts a sqlalchemy record set object for use in a
SQLAlchemyFixtureSet."""
columns = None
def __init__(self, obj):
raise NotImplementedError("not a concrete implementation")
def primary_key_from_instance(self, data):
raise NotImplementedError
def __init__(self, object_path, options, connection=None, **kw):
from sqlalchemy import BoundMetaData, create_engine
from sqlalchemy.ext.sessioncontext import SessionContext
self.connection = connection
super(SQLAlchemyHandler, self).__init__(object_path, options, **kw)
if not self.connection:
if self.options.dsn:
self.meta = BoundMetaData(self.options.dsn)
else:
raise MisconfiguredHandler(
"--dsn option is required by %s" % self.__class__)
self.connection = self.meta.engine.connect()
self.session_context = SessionContext(
lambda: sqlalchemy.create_session(bind_to=self.connection))
self.env = TableEnv(*[self.obj.__module__] + self.options.env)
def add_fixture_set(self, fset):
t = self.env[fset.obj.table]
self.template.add_import("from %s import %s" % (
t['module'].__name__, t['name']))
def begin(self, *a,**kw):
DataHandler.begin(self, *a,**kw)
self.transaction = self.session_context.current.create_transaction()
self.transaction.add(self.connection)
def commit(self):
self.transaction.commit()
def rollback(self):
self.transaction.rollback()
def find(self, idval):
self.rs = [self.obj.get(idval)]
# session = self.session_context.current
# self.rs = [session.query(self.obj).get(idval)]
return self.rs
def findall(self, query=None):
"""gets record set for query."""
session = self.session_context.current
if query:
self.rs = session.query(self.obj).select_whereclause(query)
else:
self.rs = session.query(self.obj).select()
if not len(self.rs):
raise NoData("no data for query \"%s\" on %s" % (query, self.obj))
return self.rs
@staticmethod
def recognizes(object_path, obj=None):
"""returns True if obj is not None.
this method is just a starting point for sqlalchemy handlers.
"""
if not sqlalchemy:
raise UnsupportedHandler("sqlalchemy module not found")
if obj is None:
return False
return True
def sets(self):
"""yields FixtureSet for each row in SQLObject."""
for row in self.rs:
yield SQLAlchemyFixtureSet(row, self.obj, self.connection, self.env,
adapter=self.RecordSetAdapter)
## NOTE: the order that handlers are registered in is important for discovering
## sqlalchemy types...
class SQLAlchemyAssignedMapperHandler(SQLAlchemyHandler):
class RecordSetAdapter(SQLAlchemyHandler.RecordSetAdapter):
def __init__(self, obj):
self.mapped_class = obj
if self.mapped_class.mapper.local_table:
self.table = self.mapped_class.mapper.local_table
elif self.mapped_class.mapper.select_table:
self.table = self.mapped_class.mapper.select_table
else:
raise LookupError(
"not sure how to get a table from mapped class %s" %
self.mapped_class)
self.columns = self.mapped_class.mapper.columns
self.id_attr = self.table.primary_key
def primary_key_from_instance(self, data):
return self.mapped_class.mapper.primary_key_from_instance(data)
@staticmethod
def recognizes(object_path, obj=None):
if not SQLAlchemyHandler.recognizes(object_path, obj=obj):
return False
def isa_mapper(mapper):
from sqlalchemy.orm.mapper import Mapper
if type(mapper)==Mapper:
return True
if hasattr(obj, 'mapper'):
# i.e. assign_mapper ...
if isa_mapper(obj.mapper):
return True
if hasattr(obj, '_mapper'):
# i.e. sqlsoup ??
if isa_mapper(obj._mapper):
return True
return False
register_handler(SQLAlchemyAssignedMapperHandler)
class SQLAlchemyTableHandler(SQLAlchemyHandler):
class RecordSetAdapter(SQLAlchemyHandler.RecordSetAdapter):
def __init__(self, obj):
self.table = obj
self.columns = self.table.columns
keys = [k for k in self.table.primary_key]
if len(keys) != 1:
raise ValueError("unsupported primary key type %s" % keys)
self.id_attr = keys[0].key
def primary_key_from_instance(self, data):
key_str = []
for k in self.table.primary_key:
key_str.append(str(getattr(data, k.key)))
return "_".join(key_str)
@staticmethod
def recognizes(object_path, obj=None):
if not SQLAlchemyHandler.recognizes(object_path, obj=obj):
return False
from sqlalchemy.schema import Table
if isinstance(obj, Table):
raise NotImplementedError(
"Generating data with a table object is not implemented. "
"Please use a mapped class or mapper object instead. Or, "
"consider submitting a patch to support this.")
return True
return False
register_handler(SQLAlchemyTableHandler)
class SQLAlchemyMappedClassHandler(SQLAlchemyHandler):
class RecordSetAdapter(SQLAlchemyHandler.RecordSetAdapter):
def __init__(self, obj):
self.columns = obj.c
self.id_attr = obj.id.key
from sqlalchemy.orm.mapper import object_mapper
# is this safe?
self.mapper = object_mapper(obj())
if self.mapper.local_table:
self.table = self.mapper.local_table
elif self.mapper.select_table:
self.table = self.mapper.select_table
else:
raise LookupError(
"not sure how to get a table from mapper %s" %
self.mapper)
def primary_key_from_instance(self, data):
return self.mapper.primary_key_from_instance(data)
@staticmethod
def recognizes(object_path, obj=None):
if not SQLAlchemyHandler.recognizes(object_path, obj=obj):
return False
if hasattr(obj, 'c'):
if hasattr(obj.c, '__module__') and \
obj.c.__module__.startswith('sqlalchemy'):
# eeesh
return True
return False
register_handler(SQLAlchemyMappedClassHandler)
class SQLAlchemyFixtureSet(FixtureSet):
"""a fixture set for a sqlalchemy record set."""
def __init__(self, data, obj, connection, env, adapter=None):
# print data, model
FixtureSet.__init__(self, data)
self.env = env
self.connection = connection
if adapter:
self.obj = adapter(obj)
else:
self.obj = obj
self.primary_key = None
self.data_dict = {}
for col in self.obj.columns:
sendkw = {}
if col.foreign_key:
sendkw['foreign_key'] = col.foreign_key
val = self.get_col_value(col.name, **sendkw)
self.data_dict[col.name] = val
def attr_to_db_col(self, col):
return col.name
def get_col_value(self, colname, foreign_key=None):
"""transform column name into a value or a
new set if it's a foreign key (recursion).
"""
value = getattr(self.data, colname)
if value is None:
# this means that we are in a NULL column or foreign key
# which could be perfectly legal.
return None
if foreign_key:
from sqlalchemy.ext.assignmapper import assign_mapper
from sqlalchemy.ext.sqlsoup import class_for_table
table = foreign_key.column.table
stmt = table.select(getattr(table.c, foreign_key.column.key)==value)
rs = self.connection.execute(stmt)
# adapter is always table adapter here, since that's
# how we obtain foreign keys...
subset = SQLAlchemyFixtureSet(
rs.fetchone(), table, self.connection, self.env,
adapter=SQLAlchemyTableHandler.RecordSetAdapter)
return subset
return value
def get_id_attr(self):
return self.obj.id_attr
def obj_id(self):
return self.env[self.obj.table]['name']
def set_id(self):
"""returns id of this set (the primary key value)."""
compid = self.obj.primary_key_from_instance(self.data)
return "_".join([str(i) for i in compid])
|
|
"""Support for Radio Thermostat wifi-enabled home thermostats."""
import logging
import radiotherm
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice
from homeassistant.components.climate.const import (
CURRENT_HVAC_COOL,
CURRENT_HVAC_HEAT,
CURRENT_HVAC_IDLE,
FAN_OFF,
FAN_ON,
HVAC_MODE_AUTO,
HVAC_MODE_COOL,
HVAC_MODE_HEAT,
HVAC_MODE_OFF,
PRESET_AWAY,
PRESET_HOME,
SUPPORT_FAN_MODE,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_HOST,
PRECISION_HALVES,
STATE_ON,
TEMP_FAHRENHEIT,
)
import homeassistant.helpers.config_validation as cv
from homeassistant.util import dt as dt_util
_LOGGER = logging.getLogger(__name__)
ATTR_FAN_ACTION = "fan_action"
CONF_HOLD_TEMP = "hold_temp"
PRESET_HOLIDAY = "holiday"
PRESET_ALTERNATE = "alternate"
STATE_CIRCULATE = "circulate"
PRESET_MODES = [PRESET_HOME, PRESET_ALTERNATE, PRESET_AWAY, PRESET_HOLIDAY]
OPERATION_LIST = [HVAC_MODE_AUTO, HVAC_MODE_COOL, HVAC_MODE_HEAT, HVAC_MODE_OFF]
CT30_FAN_OPERATION_LIST = [STATE_ON, HVAC_MODE_AUTO]
CT80_FAN_OPERATION_LIST = [STATE_ON, STATE_CIRCULATE, HVAC_MODE_AUTO]
# Mappings from radiotherm json data codes to and from Home Assistant state
# flags. CODE is the thermostat integer code and these map to and
# from Home Assistant state flags.
# Programmed temperature mode of the thermostat.
CODE_TO_TEMP_MODE = {
0: HVAC_MODE_OFF,
1: HVAC_MODE_HEAT,
2: HVAC_MODE_COOL,
3: HVAC_MODE_AUTO,
}
TEMP_MODE_TO_CODE = {v: k for k, v in CODE_TO_TEMP_MODE.items()}
# Programmed fan mode (circulate is supported by CT80 models)
CODE_TO_FAN_MODE = {0: HVAC_MODE_AUTO, 1: STATE_CIRCULATE, 2: STATE_ON}
FAN_MODE_TO_CODE = {v: k for k, v in CODE_TO_FAN_MODE.items()}
# Active thermostat state (is it heating or cooling?). In the future
# this should probably made into heat and cool binary sensors.
CODE_TO_TEMP_STATE = {0: CURRENT_HVAC_IDLE, 1: CURRENT_HVAC_HEAT, 2: CURRENT_HVAC_COOL}
# Active fan state. This is if the fan is actually on or not. In the
# future this should probably made into a binary sensor for the fan.
CODE_TO_FAN_STATE = {0: FAN_OFF, 1: FAN_ON}
PRESET_MODE_TO_CODE = {"home": 0, "alternate": 1, "away": 2, "holiday": 3}
CODE_TO_PRESET_MODE = {0: "home", 1: "alternate", 2: "away", 3: "holiday"}
def round_temp(temperature):
"""Round a temperature to the resolution of the thermostat.
RadioThermostats can handle 0.5 degree temps so the input
temperature is rounded to that value and returned.
"""
return round(temperature * 2.0) / 2.0
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HOST): vol.All(cv.ensure_list, [cv.string]),
vol.Optional(CONF_HOLD_TEMP, default=False): cv.boolean,
}
)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE | SUPPORT_PRESET_MODE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Radio Thermostat."""
hosts = []
if CONF_HOST in config:
hosts = config[CONF_HOST]
else:
hosts.append(radiotherm.discover.discover_address())
if hosts is None:
_LOGGER.error("No Radiotherm Thermostats detected")
return False
hold_temp = config.get(CONF_HOLD_TEMP)
tstats = []
for host in hosts:
try:
tstat = radiotherm.get_thermostat(host)
tstats.append(RadioThermostat(tstat, hold_temp))
except OSError:
_LOGGER.exception("Unable to connect to Radio Thermostat: %s", host)
add_entities(tstats, True)
class RadioThermostat(ClimateDevice):
"""Representation of a Radio Thermostat."""
def __init__(self, device, hold_temp):
"""Initialize the thermostat."""
self.device = device
self._target_temperature = None
self._current_temperature = None
self._current_humidity = None
self._current_operation = HVAC_MODE_OFF
self._name = None
self._fmode = None
self._fstate = None
self._tmode = None
self._tstate = None
self._hold_temp = hold_temp
self._hold_set = False
self._prev_temp = None
self._preset_mode = None
self._program_mode = None
self._is_away = False
# Fan circulate mode is only supported by the CT80 models.
self._is_model_ct80 = isinstance(self.device, radiotherm.thermostat.CT80)
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
async def async_added_to_hass(self):
"""Register callbacks."""
# Set the time on the device. This shouldn't be in the
# constructor because it's a network call. We can't put it in
# update() because calling it will clear any temporary mode or
# temperature in the thermostat. So add it as a future job
# for the event loop to run.
self.hass.async_add_job(self.set_time)
@property
def name(self):
"""Return the name of the Radio Thermostat."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_FAHRENHEIT
@property
def precision(self):
"""Return the precision of the system."""
return PRECISION_HALVES
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
return {ATTR_FAN_ACTION: self._fstate}
@property
def fan_modes(self):
"""List of available fan modes."""
if self._is_model_ct80:
return CT80_FAN_OPERATION_LIST
return CT30_FAN_OPERATION_LIST
@property
def fan_mode(self):
"""Return whether the fan is on."""
return self._fmode
def set_fan_mode(self, fan_mode):
"""Turn fan on/off."""
code = FAN_MODE_TO_CODE.get(fan_mode, None)
if code is not None:
self.device.fmode = code
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def current_humidity(self):
"""Return the current temperature."""
return self._current_humidity
@property
def hvac_mode(self):
"""Return the current operation. head, cool idle."""
return self._current_operation
@property
def hvac_modes(self):
"""Return the operation modes list."""
return OPERATION_LIST
@property
def hvac_action(self):
"""Return the current running hvac operation if supported."""
if self.hvac_mode == HVAC_MODE_OFF:
return None
return self._tstate
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def preset_mode(self):
"""Return the current preset mode, e.g., home, away, temp."""
if self._program_mode == 0:
return PRESET_HOME
if self._program_mode == 1:
return PRESET_ALTERNATE
if self._program_mode == 2:
return PRESET_AWAY
if self._program_mode == 3:
return PRESET_HOLIDAY
@property
def preset_modes(self):
"""Return a list of available preset modes."""
return PRESET_MODES
def update(self):
"""Update and validate the data from the thermostat."""
# Radio thermostats are very slow, and sometimes don't respond
# very quickly. So we need to keep the number of calls to them
# to a bare minimum or we'll hit the Home Assistant 10 sec warning. We
# have to make one call to /tstat to get temps but we'll try and
# keep the other calls to a minimum. Even with this, these
# thermostats tend to time out sometimes when they're actively
# heating or cooling.
# First time - get the name from the thermostat. This is
# normally set in the radio thermostat web app.
if self._name is None:
self._name = self.device.name["raw"]
# Request the current state from the thermostat.
try:
data = self.device.tstat["raw"]
except radiotherm.validate.RadiothermTstatError:
_LOGGER.warning(
"%s (%s) was busy (invalid value returned)",
self._name,
self.device.host,
)
return
current_temp = data["temp"]
if self._is_model_ct80:
try:
humiditydata = self.device.humidity["raw"]
except radiotherm.validate.RadiothermTstatError:
_LOGGER.warning(
"%s (%s) was busy (invalid value returned)",
self._name,
self.device.host,
)
return
self._current_humidity = humiditydata
# Map thermostat values into various STATE_ flags.
self._current_temperature = current_temp
self._fmode = CODE_TO_FAN_MODE[data["fmode"]]
self._fstate = CODE_TO_FAN_STATE[data["fstate"]]
self._tmode = CODE_TO_TEMP_MODE[data["tmode"]]
self._tstate = CODE_TO_TEMP_STATE[data["tstate"]]
self._program_mode = data["program_mode"]
self._preset_mode = CODE_TO_PRESET_MODE[data["program_mode"]]
self._current_operation = self._tmode
if self._tmode == HVAC_MODE_COOL:
self._target_temperature = data["t_cool"]
elif self._tmode == HVAC_MODE_HEAT:
self._target_temperature = data["t_heat"]
elif self._tmode == HVAC_MODE_AUTO:
# This doesn't really work - tstate is only set if the HVAC is
# active. If it's idle, we don't know what to do with the target
# temperature.
if self._tstate == CURRENT_HVAC_COOL:
self._target_temperature = data["t_cool"]
elif self._tstate == CURRENT_HVAC_HEAT:
self._target_temperature = data["t_heat"]
else:
self._current_operation = HVAC_MODE_OFF
def set_temperature(self, **kwargs):
"""Set new target temperature."""
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is None:
return
temperature = round_temp(temperature)
if self._current_operation == HVAC_MODE_COOL:
self.device.t_cool = temperature
elif self._current_operation == HVAC_MODE_HEAT:
self.device.t_heat = temperature
elif self._current_operation == HVAC_MODE_AUTO:
if self._tstate == CURRENT_HVAC_COOL:
self.device.t_cool = temperature
elif self._tstate == CURRENT_HVAC_HEAT:
self.device.t_heat = temperature
# Only change the hold if requested or if hold mode was turned
# on and we haven't set it yet.
if kwargs.get("hold_changed", False) or not self._hold_set:
if self._hold_temp:
self.device.hold = 1
self._hold_set = True
else:
self.device.hold = 0
def set_time(self):
"""Set device time."""
# Calling this clears any local temperature override and
# reverts to the scheduled temperature.
now = dt_util.now()
self.device.time = {
"day": now.weekday(),
"hour": now.hour,
"minute": now.minute,
}
def set_hvac_mode(self, hvac_mode):
"""Set operation mode (auto, cool, heat, off)."""
if hvac_mode in (HVAC_MODE_OFF, HVAC_MODE_AUTO):
self.device.tmode = TEMP_MODE_TO_CODE[hvac_mode]
# Setting t_cool or t_heat automatically changes tmode.
elif hvac_mode == HVAC_MODE_COOL:
self.device.t_cool = self._target_temperature
elif hvac_mode == HVAC_MODE_HEAT:
self.device.t_heat = self._target_temperature
def set_preset_mode(self, preset_mode):
"""Set Preset mode (Home, Alternate, Away, Holiday)."""
if preset_mode in (PRESET_MODES):
self.device.program_mode = PRESET_MODE_TO_CODE[preset_mode]
else:
_LOGGER.error(
"preset_mode %s not in PRESET_MODES", preset_mode,
)
|
|
"""Generic output formatting.
Formatter objects transform an abstract flow of formatting events into
specific output events on writer objects. Formatters manage several stack
structures to allow various properties of a writer object to be changed and
restored; writers need not be able to handle relative changes nor any sort
of ``change back'' operation. Specific writer properties which may be
controlled via formatter objects are horizontal alignment, font, and left
margin indentations. A mechanism is provided which supports providing
arbitrary, non-exclusive style settings to a writer as well. Additional
interfaces facilitate formatting events which are not reversible, such as
paragraph separation.
Writer objects encapsulate device interfaces. Abstract devices, such as
file formats, are supported as well as physical devices. The provided
implementations all work with abstract devices. The interface makes
available mechanisms for setting the properties which formatter objects
manage and inserting data into the output.
"""
import string
import sys
from types import StringType
AS_IS = None
class NullFormatter:
def __init__(self, writer=None):
if not writer:
writer = NullWriter()
self.writer = writer
def end_paragraph(self, blankline): pass
def add_line_break(self): pass
def add_hor_rule(self, *args, **kw): pass
def add_label_data(self, format, counter, blankline=None): pass
def add_flowing_data(self, data): pass
def add_literal_data(self, data): pass
def flush_softspace(self): pass
def push_alignment(self, align): pass
def pop_alignment(self): pass
def push_font(self, x): pass
def pop_font(self): pass
def push_margin(self, margin): pass
def pop_margin(self): pass
def set_spacing(self, spacing): pass
def push_style(self, *styles): pass
def pop_style(self, n=1): pass
def assert_line_data(self, flag=1): pass
class AbstractFormatter:
# Space handling policy: blank spaces at the boundary between elements
# are handled by the outermost context. "Literal" data is not checked
# to determine context, so spaces in literal data are handled directly
# in all circumstances.
def __init__(self, writer):
self.writer = writer # Output device
self.align = None # Current alignment
self.align_stack = [] # Alignment stack
self.font_stack = [] # Font state
self.margin_stack = [] # Margin state
self.spacing = None # Vertical spacing state
self.style_stack = [] # Other state, e.g. color
self.nospace = 1 # Should leading space be suppressed
self.softspace = 0 # Should a space be inserted
self.para_end = 1 # Just ended a paragraph
self.parskip = 0 # Skipped space between paragraphs?
self.hard_break = 1 # Have a hard break
self.have_label = 0
def end_paragraph(self, blankline):
if not self.hard_break:
self.writer.send_line_break()
self.have_label = 0
if self.parskip < blankline and not self.have_label:
self.writer.send_paragraph(blankline - self.parskip)
self.parskip = blankline
self.have_label = 0
self.hard_break = self.nospace = self.para_end = 1
self.softspace = 0
def add_line_break(self):
if not (self.hard_break or self.para_end):
self.writer.send_line_break()
self.have_label = self.parskip = 0
self.hard_break = self.nospace = 1
self.softspace = 0
def add_hor_rule(self, *args, **kw):
if not self.hard_break:
self.writer.send_line_break()
apply(self.writer.send_hor_rule, args, kw)
self.hard_break = self.nospace = 1
self.have_label = self.para_end = self.softspace = self.parskip = 0
def add_label_data(self, format, counter, blankline = None):
if self.have_label or not self.hard_break:
self.writer.send_line_break()
if not self.para_end:
self.writer.send_paragraph((blankline and 1) or 0)
if type(format) is StringType:
self.writer.send_label_data(self.format_counter(format, counter))
else:
self.writer.send_label_data(format)
self.nospace = self.have_label = self.hard_break = self.para_end = 1
self.softspace = self.parskip = 0
def format_counter(self, format, counter):
label = ''
for c in format:
try:
if c == '1':
label = label + ('%d' % counter)
elif c in 'aA':
if counter > 0:
label = label + self.format_letter(c, counter)
elif c in 'iI':
if counter > 0:
label = label + self.format_roman(c, counter)
else:
label = label + c
except:
label = label + c
return label
def format_letter(self, case, counter):
label = ''
while counter > 0:
counter, x = divmod(counter-1, 26)
s = chr(ord(case) + x)
label = s + label
return label
def format_roman(self, case, counter):
ones = ['i', 'x', 'c', 'm']
fives = ['v', 'l', 'd']
label, index = '', 0
# This will die of IndexError when counter is too big
while counter > 0:
counter, x = divmod(counter, 10)
if x == 9:
label = ones[index] + ones[index+1] + label
elif x == 4:
label = ones[index] + fives[index] + label
else:
if x >= 5:
s = fives[index]
x = x-5
else:
s = ''
s = s + ones[index]*x
label = s + label
index = index + 1
if case == 'I':
return label.upper()
return label
def add_flowing_data(self, data,
# These are only here to load them into locals:
whitespace = string.whitespace,
join = string.join, split = string.split):
if not data: return
# The following looks a bit convoluted but is a great improvement over
# data = regsub.gsub('[' + string.whitespace + ']+', ' ', data)
prespace = data[:1] in whitespace
postspace = data[-1:] in whitespace
data = join(split(data))
if self.nospace and not data:
return
elif prespace or self.softspace:
if not data:
if not self.nospace:
self.softspace = 1
self.parskip = 0
return
if not self.nospace:
data = ' ' + data
self.hard_break = self.nospace = self.para_end = \
self.parskip = self.have_label = 0
self.softspace = postspace
self.writer.send_flowing_data(data)
def add_literal_data(self, data):
if not data: return
if self.softspace:
self.writer.send_flowing_data(" ")
self.hard_break = data[-1:] == '\n'
self.nospace = self.para_end = self.softspace = \
self.parskip = self.have_label = 0
self.writer.send_literal_data(data)
def flush_softspace(self):
if self.softspace:
self.hard_break = self.para_end = self.parskip = \
self.have_label = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
def push_alignment(self, align):
if align and align != self.align:
self.writer.new_alignment(align)
self.align = align
self.align_stack.append(align)
else:
self.align_stack.append(self.align)
def pop_alignment(self):
if self.align_stack:
del self.align_stack[-1]
if self.align_stack:
self.align = align = self.align_stack[-1]
self.writer.new_alignment(align)
else:
self.align = None
self.writer.new_alignment(None)
def push_font(self, (size, i, b, tt)):
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
if self.font_stack:
csize, ci, cb, ctt = self.font_stack[-1]
if size is AS_IS: size = csize
if i is AS_IS: i = ci
if b is AS_IS: b = cb
if tt is AS_IS: tt = ctt
font = (size, i, b, tt)
self.font_stack.append(font)
self.writer.new_font(font)
def pop_font(self):
if self.font_stack:
del self.font_stack[-1]
if self.font_stack:
font = self.font_stack[-1]
else:
font = None
self.writer.new_font(font)
def push_margin(self, margin):
self.margin_stack.append(margin)
fstack = filter(None, self.margin_stack)
if not margin and fstack:
margin = fstack[-1]
self.writer.new_margin(margin, len(fstack))
def pop_margin(self):
if self.margin_stack:
del self.margin_stack[-1]
fstack = filter(None, self.margin_stack)
if fstack:
margin = fstack[-1]
else:
margin = None
self.writer.new_margin(margin, len(fstack))
def set_spacing(self, spacing):
self.spacing = spacing
self.writer.new_spacing(spacing)
def push_style(self, *styles):
if self.softspace:
self.hard_break = self.para_end = self.softspace = 0
self.nospace = 1
self.writer.send_flowing_data(' ')
for style in styles:
self.style_stack.append(style)
self.writer.new_styles(tuple(self.style_stack))
def pop_style(self, n=1):
del self.style_stack[-n:]
self.writer.new_styles(tuple(self.style_stack))
def assert_line_data(self, flag=1):
self.nospace = self.hard_break = not flag
self.para_end = self.parskip = self.have_label = 0
class NullWriter:
"""Minimal writer interface to use in testing & inheritance."""
def __init__(self): pass
def flush(self): pass
def new_alignment(self, align): pass
def new_font(self, font): pass
def new_margin(self, margin, level): pass
def new_spacing(self, spacing): pass
def new_styles(self, styles): pass
def send_paragraph(self, blankline): pass
def send_line_break(self): pass
def send_hor_rule(self, *args, **kw): pass
def send_label_data(self, data): pass
def send_flowing_data(self, data): pass
def send_literal_data(self, data): pass
class AbstractWriter(NullWriter):
def __init__(self):
pass
def new_alignment(self, align):
print "new_alignment(%s)" % `align`
def new_font(self, font):
print "new_font(%s)" % `font`
def new_margin(self, margin, level):
print "new_margin(%s, %d)" % (`margin`, level)
def new_spacing(self, spacing):
print "new_spacing(%s)" % `spacing`
def new_styles(self, styles):
print "new_styles(%s)" % `styles`
def send_paragraph(self, blankline):
print "send_paragraph(%s)" % `blankline`
def send_line_break(self):
print "send_line_break()"
def send_hor_rule(self, *args, **kw):
print "send_hor_rule()"
def send_label_data(self, data):
print "send_label_data(%s)" % `data`
def send_flowing_data(self, data):
print "send_flowing_data(%s)" % `data`
def send_literal_data(self, data):
print "send_literal_data(%s)" % `data`
class DumbWriter(NullWriter):
def __init__(self, file=None, maxcol=72):
self.file = file or sys.stdout
self.maxcol = maxcol
NullWriter.__init__(self)
self.reset()
def reset(self):
self.col = 0
self.atbreak = 0
def send_paragraph(self, blankline):
self.file.write('\n'*blankline)
self.col = 0
self.atbreak = 0
def send_line_break(self):
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_hor_rule(self, *args, **kw):
self.file.write('\n')
self.file.write('-'*self.maxcol)
self.file.write('\n')
self.col = 0
self.atbreak = 0
def send_literal_data(self, data):
self.file.write(data)
i = data.rfind('\n')
if i >= 0:
self.col = 0
data = data[i+1:]
data = data.expandtabs()
self.col = self.col + len(data)
self.atbreak = 0
def send_flowing_data(self, data):
if not data: return
atbreak = self.atbreak or data[0] in string.whitespace
col = self.col
maxcol = self.maxcol
write = self.file.write
for word in data.split():
if atbreak:
if col + len(word) >= maxcol:
write('\n')
col = 0
else:
write(' ')
col = col + 1
write(word)
col = col + len(word)
atbreak = 1
self.col = col
self.atbreak = data[-1] in string.whitespace
def test(file = None):
w = DumbWriter()
f = AbstractFormatter(w)
if file:
fp = open(file)
elif sys.argv[1:]:
fp = open(sys.argv[1])
else:
fp = sys.stdin
while 1:
line = fp.readline()
if not line:
break
if line == '\n':
f.end_paragraph(1)
else:
f.add_flowing_data(line)
f.end_paragraph(0)
if __name__ == '__main__':
test()
|
|
from __future__ import print_function
from test_orm import OrloDbTest
import arrow
import unittest
__author__ = 'alforbes'
class TestStats(OrloDbTest):
ENDPOINT = '/stats'
def setUp(self):
super(OrloDbTest, self).setUp()
for r in range(0, 3):
self._create_finished_release()
def test_endpoint_200(self):
"""
Test self.ENDPOINT returns 200
"""
response = self.client.get(self.ENDPOINT)
self.assert200(response)
def test_endpoint_returns_dict(self):
"""
Test self.ENDPOINT returns a dictionary
"""
response = self.client.get(self.ENDPOINT)
self.assertIsInstance(response.json, dict)
def test_result_includes_normals(self):
"""
Test the result includes a 'normal' field
"""
response = self.client.get(self.ENDPOINT)
for k, v in response.json.items():
self.assertIn('successful', v['releases']['normal'])
self.assertIn('failed', v['releases']['normal'])
def test_result_includes_rollbacks(self):
"""
Test the result includes a 'rollback' field
"""
response = self.client.get(self.ENDPOINT)
for k, v in response.json.items():
self.assertIn('successful', v['releases']['rollback'])
self.assertIn('failed', v['releases']['rollback'])
def test_result_includes_totals(self):
"""
Test the result includes a 'successful' field
"""
response = self.client.get(self.ENDPOINT)
for k, v in response.json.items():
self.assertIn('successful', v['releases']['total'])
self.assertIn('failed', v['releases']['total'])
def test_with_invalid_stime(self):
"""
Test that an invalid start time is handled gracefully
"""
response = self.client.get(self.ENDPOINT + '?stime=foo')
self.assert400(response)
self.assertIn('message', response.json)
class TestUserStats(TestStats):
ENDPOINT = '/stats/user'
def test_stats_user_200_with_user(self):
"""
Test that /stats/user/username returns 200
"""
response = self.client.get(self.ENDPOINT + '/testuser')
self.assert200(response)
def test_stats_user_returns_dict_with_user(self):
"""
Test that /stats/user/username returns a dictionary
"""
response = self.client.get(self.ENDPOINT + '/testuser')
self.assertIsInstance(response.json, dict)
class TestTeamStats(TestStats):
ENDPOINT = '/stats/team'
def test_stats_team_200_with_team(self):
"""
Test that /stats/team/team_name returns 200
"""
response = self.client.get(self.ENDPOINT + '/test%20team')
self.assert200(response)
def test_stats_team_returns_dict_with_team(self):
"""
Test that /stats/team/team_name returns a dictionary
"""
response = self.client.get(self.ENDPOINT + '/test%20team')
self.assertIsInstance(response.json, dict)
class TestPlatformStats(TestStats):
ENDPOINT = '/stats/platform'
def test_stats_platform_200_with_platform(self):
"""
Test that /stats/platform/platform_name returns 200
"""
response = self.client.get(self.ENDPOINT + '/test_platform')
self.assert200(response)
class TestPackageStats(TestStats):
ENDPOINT = '/stats/package'
def test_stats_package_200_with_package(self):
"""
Test that /stats/package/package_name returns 200
"""
response = self.client.get(self.ENDPOINT + '/test-package')
self.assert200(response)
def test_stats_package_returns_dict_with_package(self):
"""
Test that /stats/package/package_name returns a dictionary
"""
response = self.client.get(self.ENDPOINT + '/test-package')
self.assertIsInstance(response.json, dict)
class TestStatsByDateRelease(TestStats):
"""
Testing the "by_date" urls
"""
ENDPOINT = '/stats/by_date/release'
def test_result_includes_normals(self):
unittest.skip("Not suitable test for this endpoint")
def test_result_includes_rollbacks(self):
unittest.skip("Not suitable test for this endpoint")
def test_result_includes_totals(self):
unittest.skip("Not suitable test for this endpoint")
def test_with_invalid_stime(self):
# TODO the stats endpoints should be made consistent with the others by calling
# apply_filters on the query parameters
unittest.skip("Not suitable test for this endpoint")
def test_stats_by_date_with_year(self):
"""
Test /stats/by_date/year
"""
year = arrow.utcnow().year
response = self.client.get(self.ENDPOINT + '?stime_gt={}-01-01'.format(year))
self.assert200(response)
def test_stats_by_date_with_year_month(self):
"""
Test /stats/by_date/year
"""
year = arrow.utcnow().year
month = arrow.utcnow().month
response = self.client.get(self.ENDPOINT + '?stime_gt={}-{}-01'.format(year, month))
self.assert200(response)
def test_stats_by_date_with_unit_day(self):
"""
Test /stats/by_date/year
"""
response = self.client.get(self.ENDPOINT + '?unit=day')
self.assert200(response)
def test_stats_by_date_with_summarize_by_unit_day(self):
"""
Test /stats/by_date/year
"""
response = self.client.get(self.ENDPOINT + '?unit=day&summarize_by_unit=1')
self.assert200(response)
def test_stats_by_date_with_platform_filter(self):
"""
Test /stats/by_date with a platform filter
"""
year = str(arrow.utcnow().year)
response = self.client.get(self.ENDPOINT + '?platform=test_platform')
self.assert200(response)
self.assertIn(year, response.json)
def test_stats_by_date_with_platform_filter_negative(self):
"""
Test /stats/by_date with a bad platform filter returns nothing
"""
response = self.client.get(self.ENDPOINT + '?platform=bad_platform_foo')
self.assert200(response)
self.assertEqual({}, response.json)
class TestStatsByDatePackage(OrloDbTest):
"""
Testing the "by_date" urls
"""
ENDPOINT = '/stats/by_date/package'
def setUp(self):
super(OrloDbTest, self).setUp()
for r in range(0, 3):
self._create_finished_release()
def test_endpoint_200(self):
"""
Test self.ENDPOINT returns 200
"""
response = self.client.get(self.ENDPOINT)
self.assert200(response)
def test_endpoint_returns_dict(self):
"""
Test self.ENDPOINT returns a dictionary
"""
response = self.client.get(self.ENDPOINT)
self.assertIsInstance(response.json, dict)
def test_package_name_in_dict(self):
"""
Test the package name is in the returned json
"""
response = self.client.get(self.ENDPOINT)
year = str(arrow.utcnow().year)
month = str(arrow.utcnow().month)
self.assertIn('test-package', response.json[year][month])
|
|
# Standard Python packages
import math
import numbers
# Special dependencies
import numpy
class InfiniteType:
def __init__(self, multiplier=1.):
if multiplier == 0.: raise ZeroDivisionError, "Cannot multiply infinity and zero."
self._multiplier = multiplier
def __repr__(self):
if self is Infinity:
return "Infinity"
elif self is MinusInfinity:
return "-Infinity"
elif self._multiplier > 0.:
return "Infinity*%g" % self._multiplier
else:
return "-Infinity*%g" % abs(self._multiplier)
def __neg__(self):
if self is Infinity:
return MinusInfinity
elif self is MinusInfinity:
return Infinity
else:
return self * -1.
def __mul__(self, number):
if number == 0.: raise ZeroDivisionError, "Cannot multiply infinity and zero."
return InfiniteType(self._multiplier * number)
def __div__(self, number):
if isinstance(number, InfiniteType): raise ZeroDivisionError, "Cannot divide infinity and infinity."
if number == 0: raise ZeroDivisionError, "Cannot divide infinity and zero."
return InfiniteType(self._multiplier / number)
def __truediv__(self, number):
return self.__div__(number)
#: Symbol representing infinity; can be multiplied by any scalar.
Infinity = InfiniteType()
MinusInfinity = InfiniteType(-1.)
#: A small number (1e-5), used to avoid numerical round-off issues in
#: comparisons.
#:
#: The following can be used to set epsilon (without any
#: multiple-reference issues)::
#:
#: import cassius
#: cassius.epsilon = 1e-10
epsilon = 1e-5
######################################################### Utility functions
def _roundlevel_nsigfigs(num, n):
if num == 0.: return 1
return n - int(math.ceil(math.log10(abs(num))))
def str_round(num, n):
"""Round a number to n digits and return the result as a string."""
num = round(num, n)
format = "%."+str(max(n, 0))+"f"
return format % num
def round_sigfigs(num, n):
"Round a number to n significant figures."
return round(num, _roundlevel_nsigfigs(num, n))
def str_sigfigs(num, n):
"""Round a number to n significant figures and return the result as
a string."""
level = _roundlevel_nsigfigs(num, n)
num = round(num, level)
format = "%."+str(max(level, 0))+"f"
return format % num
def round_errpair(num, err, n=2):
"""Round a number and its uncertainty to n significant figures in
the uncertainty (default is two)."""
level = _roundlevel_nsigfigs(err, n)
return round(num, level), round(err, level)
def str_errpair(num, err, n=2):
"""Round a number and its uncertainty to n significant figures in the
uncertainty (default is two) and return the result as a string."""
level = _roundlevel_nsigfigs(err, n)
num = round(num, level)
err = round(err, level)
format = "%."+str(max(level, 0))+"f"
return format % num, format % err
def unicode_errpair(num, err, n=2):
"""Round a number and its uncertainty to n significant figures in the
uncertainty (default is two) and return the result joined by a unicode
plus-minus sign."""
return u"\u00b1".join(str_errpair(num, err, n))
def mean(*values, **kwds):
"""Compute the mean of N values (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
decimals = kwds.get("decimals", None)
sigfigs = kwds.get("sigfigs", None)
string = kwds.get("string", False)
if len(values) == 1 and not isinstance(values[0], (numbers.Number, numpy.number)):
values = values[0]
sum_1 = 0.
sum_y = 0.
for y in values:
if not isinstance(y, (numbers.Number, numpy.number)):
raise ValueError, "mean() requires a list of numbers"
sum_1 += 1.
sum_y += y
if sum_1 != 0.:
output = sum_y / sum_1
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
else:
raise ValueError, "Cannot take the mean without any values"
def wmean(values, weights, decimals=None, sigfigs=None, string=False):
"""Compute the weighted mean of N values with N weights (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
sum_1 = 0.
sum_y = 0.
for y, weight in itertools.izip(values, weights):
if not isinstance(y, (numbers.Number, numpy.number)) or not isinstance(weight, (numbers.Number, numpy.number)):
raise ValueError, "wmean() requires lists of numbers"
sum_1 += weight
sum_y += weight * y
if sum_1 != 0.:
outputval, outputerr = sum_y / sum_1, math.sqrt(1. / sum_1)
if decimals is not None:
if string:
return str_round(outputval, decimals), str_round(outputerr, decimals)
else:
return round(outputval, decimals), round(outputerr, decimals)
elif sigfigs is not None:
if string:
return str_errpair(outputval, outputerr, sigfigs)
else:
return round_errpair(outputval, outputerr, sigfigs)
else:
if string:
return str(outputval), str(outputerr)
else:
return outputval, outputerr
else:
raise ValueError, "Cannot take the weighted mean without any values"
def linearfit(xvalues, yvalues, weights=None, decimals=None, sigfigs=None, string=False):
"""Compute a linear fit of N x-y pairs with weights (N > 0).
Keyword arguments:
weights (list of numbers or `None`): if `None`, weight all
points equally.
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
if weights is None:
weights = numpy.ones(min(len(xvalues), len(yvalues)), dtype=numpy.float)
sum_1 = 0.
sum_x = 0.
sum_xx = 0.
sum_y = 0.
sum_xy = 0.
for x, y, weight in itertools.izip(xvalues, yvalues, weights):
if not isinstance(x, (numbers.Number, numpy.number)) or not isinstance(y, (numbers.Number, numpy.number)) or not isinstance(weight, (numbers.Number, numpy.number)):
raise ValueError, "linearfit() requires lists of numbers"
sum_1 += weight
sum_x += weight * x
sum_xx += weight * x**2
sum_y += weight * y
sum_xy += weight * x * y
delta = (sum_1 * sum_xx) - (sum_x * sum_x)
if delta != 0.:
intercept = ((sum_xx * sum_y) - (sum_x * sum_xy)) / delta
intercept_err = math.sqrt(sum_xx / delta)
slope = ((sum_1 * sum_xy) - (sum_x * sum_y)) / delta
slope_err = math.sqrt(sum_1 / delta)
if decimals is not None:
if string:
intercept, intercept_err = str_round(intercept, decimals), str_round(intercept_err, decimals)
slope, slope_err = str_round(slope, decimals), str_round(slope_err, decimals)
else:
intercept, intercept_err = round(intercept, decimals), round(intercept_err, decimals)
slope, slope_err = round(slope, decimals), round(slope_err, decimals)
elif sigfigs is not None:
if string:
intercept, intercept_err = str_errpair(intercept, intercept_err, sigfigs)
slope, slope_err = str_errpair(slope, slope_err, sigfigs)
else:
intercept, intercept_err = round_errpair(intercept, intercept_err, sigfigs)
slope, slope_err = round_errpair(slope, slope_err, sigfigs)
elif string:
intercept, intercept_err = str(intercept), str(intercept_err)
slope, slope_err = str(slope), str(slope_err)
return intercept, intercept_err, slope, slope_err
else:
raise ValueError, "Cannot take a linear fit without any values"
def rms(*values, **kwds):
"""Compute the root-mean-square of N values (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
decimals = kwds.get("decimals", None)
sigfigs = kwds.get("sigfigs", None)
string = kwds.get("string", False)
if len(values) == 1 and not isinstance(values[0], (numbers.Number, numpy.number)):
values = values[0]
sum_1 = 0.
sum_yy = 0.
for y in values:
if not isinstance(y, (numbers.Number, numpy.number)):
raise ValueError, "rms() requires a list of numbers"
sum_1 += 1.
sum_yy += y**2
if sum_1 != 0.:
output = math.sqrt(sum_yy / sum_1)
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
else:
raise ValueError, "Cannot take the RMS with fewer than one unique value"
def stdev(*values, **kwds):
"""Compute the standard deviation of N values (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
decimals = kwds.get("decimals", None)
sigfigs = kwds.get("sigfigs", None)
string = kwds.get("string", False)
if len(values) == 1 and not isinstance(values[0], (numbers.Number, numpy.number)):
values = values[0]
sum_1 = 0.
sum_y = 0.
sum_yy = 0.
for y in values:
if not isinstance(y, (numbers.Number, numpy.number)):
raise ValueError, "stdev() requires a list of numbers"
sum_1 += 1.
sum_y += y
sum_yy += y**2
if sum_1 != 0. and (sum_yy / sum_1) > (sum_y / sum_1)**2:
output = math.sqrt((sum_yy / sum_1) - (sum_y / sum_1)**2)
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
else:
raise ValueError, "Cannot take the stdev with fewer than one unique value"
def covariance(xvalues, yvalues, decimals=None, sigfigs=None, string=False):
"""Compute the covariance of N x-y pairs (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
xmean = mean(*xvalues)
ymean = mean(*yvalues)
sum_1 = 0.
sum_xy = 0.
for x, y in itertools.izip(xvalues, yvalues):
sum_1 += 1.
sum_xy += (x - xmean) * (y - ymean)
output = sum_xy / sum_1
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
def correlation(xvalues, yvalues, decimals=None, sigfigs=None, string=False):
"""Compute the correlation of N x-y pairs (N > 0).
Keyword arguments:
decimals (int or `None`): number of digits after the decimal
point to found the result, if not `None`
sigfigs (int or `None`): number of significant digits to round
the result, if not `None`
string (bool): return output as a string (forces number of digits)
"""
xmean = mean(xvalues)
ymean = mean(yvalues)
sum_xx = 0.
sum_yy = 0.
sum_xy = 0.
for x, y in itertools.izip(xvalues, yvalues):
sum_xx += (x - xmean)**2
sum_yy += (y - ymean)**2
sum_xy += (x - xmean) * (y - ymean)
if sum_xx + sum_yy != 0.:
output = sum_xy / math.sqrt(sum_xx + sum_yy)
if decimals is not None:
if string:
return str_round(output, decimals)
else:
return round(output, decimals)
elif sigfigs is not None:
if string:
return str_sigfigs(output, sigfigs)
else:
return round_sigfigs(output, sigfigs)
else:
if string:
return str(output)
else:
return output
else:
raise ValueError, "Cannot take the correlation without any values"
def ubiquitous(array):
"""Return the most ubiquitous (most frequent) member of a list."""
if isinstance(array, numpy.ndarray):
keys = numpy.unique(array)
maximal = None
for k in keys:
this = len(array[array == k])
if maximal is None or this > maximal:
maximal_key = k
maximal = this
if maximal is not None:
return maximal_key
else:
return None
else:
keys = set(array)
maximal = None
for k in keys:
this = len(array.count(k))
if maximal is None or this > maximal:
maximal_key = k
maximal = this
if maximal is not None:
return maximal_key
else:
return None
def erf(x):
"""Return the error function of x.
(For complex erf, get SciPy and load scipy.special)
"""
# http://stackoverflow.com/questions/457408/is-there-an-easily-available-implementation-of-erf-for-python
sign = 1
if x < 0:
sign = -1
x = abs(x)
a1 = 0.254829592
a2 = -0.284496736
a3 = 1.421413741
a4 = -1.453152027
a5 = 1.061405429
p = 0.3275911
# http://www.amazon.com/dp/0486612724/?tag=stackoverfl08-20 formula 7.1.26
t = 1.0/(1.0 + p*x)
y = 1.0 - (((((a5*t + a4)*t) + a3)*t + a2)*t + a1)*t*math.exp(-x*x)
return sign*y # erf(-x) = -erf(x)
def erfc(x):
"""Return 1 minus the error function of x.
(For complex erfc, get SciPy and load scipy.special)
"""
return 1. - erf(x)
def gaussian_likelihood(f, x, y, ey):
"""Gaussian likelihood function usable in Curve.objective and Curve.fit.
Expression:
(f - y)**2 / ey**2 or 0 if ey == 0
where f is the value of the curve at x, y is the data, and ey
is the uncertainty in the data (one Gaussian sigma).
"""
return ((f - y)**2/ey**2 if ey != 0. else 0.)
def poisson_likelihood(f, x, y):
"""Poisson likelihood function usable in Curve.objective and Curve.fit.
Expression:
-2 * (y * log(f) - f - log(y!))
where f is the value of the curve at x and y is the data
(usually an integer, like a histogram bin value).
Considerations:
Note the factor of 2! Not all texts include this factor. With
the factor of 2, this Poisson likelihood can be used
interchangeably with a Gaussian likelihood (i.e. chi^2):
uncertainty in a best fit value is the distance you need to
walk to raise this objective function by 1.0, just like the
Gaussian likelihood (not 0.5!).
"""
# try:
# return -2.*(y*math.log(f) - f - math.log(math.factorial(y)))
# except ValueError:
# return -2.*(y*math.log(1e-10) - 1e-10 - math.log(math.factorial(y)))
### much better:
try:
return -2.*(y*math.log(f) - f - sum(map(math.log, xrange(1, y+1))))
except ValueError:
# note: if f == 0., then any non-zero y is impossible
# is it right to give it a small value? something to think about...
return -2.*(y*math.log(1e-10) - 1e-10 - sum(map(math.log, xrange(1, y+1))))
|
|
import numpy
import types, time
from numpy.ma import *
from numpy.core.numerictypes import float32
from numpy.ma.core import umath
from numpy.testing import *
pi = numpy.pi
def eq(v,w, msg=''):
result = allclose(v,w)
if not result:
print """Not eq:%s
%s
----
%s"""% (msg, str(v), str(w))
return result
class TestMa(TestCase):
def setUp (self):
x=numpy.array([1.,1.,1.,-2., pi/2.0, 4., 5., -10., 10., 1., 2., 3.])
y=numpy.array([5.,0.,3., 2., -1., -4., 0., -10., 10., 1., 0., 3.])
a10 = 10.
m1 = [1, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0]
m2 = [0, 0, 1, 0, 0, 1, 1, 0, 0, 0 ,0, 1]
xm = array(x, mask=m1)
ym = array(y, mask=m2)
z = numpy.array([-.5, 0., .5, .8])
zm = array(z, mask=[0,1,0,0])
xf = numpy.where(m1, 1.e+20, x)
s = x.shape
xm.set_fill_value(1.e+20)
self.d = (x, y, a10, m1, m2, xm, ym, z, zm, xf, s)
def test_testBasic1d(self):
"Test of basic array creation and properties in 1 dimension."
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.failIf(isMaskedArray(x))
self.failUnless(isMaskedArray(xm))
self.assertEqual(shape(xm), s)
self.assertEqual(xm.shape, s)
self.assertEqual(xm.dtype, x.dtype)
self.assertEqual( xm.size , reduce(lambda x,y:x*y, s))
self.assertEqual(count(xm) , len(m1) - reduce(lambda x,y:x+y, m1))
self.failUnless(eq(xm, xf))
self.failUnless(eq(filled(xm, 1.e20), xf))
self.failUnless(eq(x, xm))
def test_testBasic2d(self):
"Test of basic array creation and properties in 2 dimensions."
for s in [(4,3), (6,2)]:
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
x.shape = s
y.shape = s
xm.shape = s
ym.shape = s
xf.shape = s
self.failIf(isMaskedArray(x))
self.failUnless(isMaskedArray(xm))
self.assertEqual(shape(xm), s)
self.assertEqual(xm.shape, s)
self.assertEqual( xm.size , reduce(lambda x,y:x*y, s))
self.assertEqual( count(xm) , len(m1) - reduce(lambda x,y:x+y, m1))
self.failUnless(eq(xm, xf))
self.failUnless(eq(filled(xm, 1.e20), xf))
self.failUnless(eq(x, xm))
self.setUp()
def test_testArithmetic (self):
"Test of basic arithmetic."
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
a2d = array([[1,2],[0,4]])
a2dm = masked_array(a2d, [[0,0],[1,0]])
self.failUnless(eq (a2d * a2d, a2d * a2dm))
self.failUnless(eq (a2d + a2d, a2d + a2dm))
self.failUnless(eq (a2d - a2d, a2d - a2dm))
for s in [(12,), (4,3), (2,6)]:
x = x.reshape(s)
y = y.reshape(s)
xm = xm.reshape(s)
ym = ym.reshape(s)
xf = xf.reshape(s)
self.failUnless(eq(-x, -xm))
self.failUnless(eq(x + y, xm + ym))
self.failUnless(eq(x - y, xm - ym))
self.failUnless(eq(x * y, xm * ym))
olderr = numpy.seterr(divide='ignore', invalid='ignore')
self.failUnless(eq(x / y, xm / ym))
numpy.seterr(**olderr)
self.failUnless(eq(a10 + y, a10 + ym))
self.failUnless(eq(a10 - y, a10 - ym))
self.failUnless(eq(a10 * y, a10 * ym))
olderr = numpy.seterr(divide='ignore', invalid='ignore')
self.failUnless(eq(a10 / y, a10 / ym))
numpy.seterr(**olderr)
self.failUnless(eq(x + a10, xm + a10))
self.failUnless(eq(x - a10, xm - a10))
self.failUnless(eq(x * a10, xm * a10))
self.failUnless(eq(x / a10, xm / a10))
self.failUnless(eq(x**2, xm**2))
self.failUnless(eq(abs(x)**2.5, abs(xm) **2.5))
self.failUnless(eq(x**y, xm**ym))
self.failUnless(eq(numpy.add(x,y), add(xm, ym)))
self.failUnless(eq(numpy.subtract(x,y), subtract(xm, ym)))
self.failUnless(eq(numpy.multiply(x,y), multiply(xm, ym)))
olderr = numpy.seterr(divide='ignore', invalid='ignore')
self.failUnless(eq(numpy.divide(x,y), divide(xm, ym)))
numpy.seterr(**olderr)
def test_testMixedArithmetic(self):
na = numpy.array([1])
ma = array([1])
self.failUnless(isinstance(na + ma, MaskedArray))
self.failUnless(isinstance(ma + na, MaskedArray))
def test_testUfuncs1 (self):
"Test various functions such as sin, cos."
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.failUnless (eq(numpy.cos(x), cos(xm)))
self.failUnless (eq(numpy.cosh(x), cosh(xm)))
self.failUnless (eq(numpy.sin(x), sin(xm)))
self.failUnless (eq(numpy.sinh(x), sinh(xm)))
self.failUnless (eq(numpy.tan(x), tan(xm)))
self.failUnless (eq(numpy.tanh(x), tanh(xm)))
olderr = numpy.seterr(divide='ignore', invalid='ignore')
self.failUnless (eq(numpy.sqrt(abs(x)), sqrt(xm)))
self.failUnless (eq(numpy.log(abs(x)), log(xm)))
self.failUnless (eq(numpy.log10(abs(x)), log10(xm)))
numpy.seterr(**olderr)
self.failUnless (eq(numpy.exp(x), exp(xm)))
self.failUnless (eq(numpy.arcsin(z), arcsin(zm)))
self.failUnless (eq(numpy.arccos(z), arccos(zm)))
self.failUnless (eq(numpy.arctan(z), arctan(zm)))
self.failUnless (eq(numpy.arctan2(x, y), arctan2(xm, ym)))
self.failUnless (eq(numpy.absolute(x), absolute(xm)))
self.failUnless (eq(numpy.equal(x,y), equal(xm, ym)))
self.failUnless (eq(numpy.not_equal(x,y), not_equal(xm, ym)))
self.failUnless (eq(numpy.less(x,y), less(xm, ym)))
self.failUnless (eq(numpy.greater(x,y), greater(xm, ym)))
self.failUnless (eq(numpy.less_equal(x,y), less_equal(xm, ym)))
self.failUnless (eq(numpy.greater_equal(x,y), greater_equal(xm, ym)))
self.failUnless (eq(numpy.conjugate(x), conjugate(xm)))
self.failUnless (eq(numpy.concatenate((x,y)), concatenate((xm,ym))))
self.failUnless (eq(numpy.concatenate((x,y)), concatenate((x,y))))
self.failUnless (eq(numpy.concatenate((x,y)), concatenate((xm,y))))
self.failUnless (eq(numpy.concatenate((x,y,x)), concatenate((x,ym,x))))
def test_xtestCount (self):
"Test count"
ott = array([0.,1.,2.,3.], mask=[1,0,0,0])
self.failUnless( isinstance(count(ott), types.IntType))
self.assertEqual(3, count(ott))
self.assertEqual(1, count(1))
self.failUnless (eq(0, array(1,mask=[1])))
ott=ott.reshape((2,2))
assert isinstance(count(ott,0),numpy.ndarray)
assert isinstance(count(ott), types.IntType)
self.failUnless (eq(3, count(ott)))
assert getmask(count(ott,0)) is nomask
self.failUnless (eq([1,2],count(ott,0)))
def test_testMinMax (self):
"Test minimum and maximum."
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
xr = numpy.ravel(x) #max doesn't work if shaped
xmr = ravel(xm)
#true because of careful selection of data
self.failUnless(eq(max(xr), maximum(xmr)))
#true because of careful selection of data
self.failUnless(eq(min(xr), minimum(xmr)))
def test_testAddSumProd (self):
"Test add, sum, product."
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
self.failUnless (eq(numpy.add.reduce(x), add.reduce(x)))
self.failUnless (eq(numpy.add.accumulate(x), add.accumulate(x)))
self.failUnless (eq(4, sum(array(4),axis=0)))
self.failUnless (eq(4, sum(array(4), axis=0)))
self.failUnless (eq(numpy.sum(x,axis=0), sum(x,axis=0)))
self.failUnless (eq(numpy.sum(filled(xm,0),axis=0), sum(xm,axis=0)))
self.failUnless (eq(numpy.sum(x,0), sum(x,0)))
self.failUnless (eq(numpy.product(x,axis=0), product(x,axis=0)))
self.failUnless (eq(numpy.product(x,0), product(x,0)))
self.failUnless (eq(numpy.product(filled(xm,1),axis=0),
product(xm,axis=0)))
if len(s) > 1:
self.failUnless (eq(numpy.concatenate((x,y),1),
concatenate((xm,ym),1)))
self.failUnless (eq(numpy.add.reduce(x,1), add.reduce(x,1)))
self.failUnless (eq(numpy.sum(x,1), sum(x,1)))
self.failUnless (eq(numpy.product(x,1), product(x,1)))
def test_testCI(self):
"Test of conversions and indexing"
x1 = numpy.array([1,2,4,3])
x2 = array(x1, mask = [1,0,0,0])
x3 = array(x1, mask = [0,1,0,1])
x4 = array(x1)
# test conversion to strings
junk, garbage = str(x2), repr(x2)
assert eq(numpy.sort(x1),sort(x2, fill_value=0))
# tests of indexing
assert type(x2[1]) is type(x1[1])
assert x1[1] == x2[1]
assert x2[0] is masked
assert eq(x1[2],x2[2])
assert eq(x1[2:5],x2[2:5])
assert eq(x1[:],x2[:])
assert eq(x1[1:], x3[1:])
x1[2]=9
x2[2]=9
assert eq(x1,x2)
x1[1:3] = 99
x2[1:3] = 99
assert eq(x1,x2)
x2[1] = masked
assert eq(x1,x2)
x2[1:3]=masked
assert eq(x1,x2)
x2[:] = x1
x2[1] = masked
assert allequal(getmask(x2),array([0,1,0,0]))
x3[:] = masked_array([1,2,3,4],[0,1,1,0])
assert allequal(getmask(x3), array([0,1,1,0]))
x4[:] = masked_array([1,2,3,4],[0,1,1,0])
assert allequal(getmask(x4), array([0,1,1,0]))
assert allequal(x4, array([1,2,3,4]))
x1 = numpy.arange(5)*1.0
x2 = masked_values(x1, 3.0)
assert eq(x1,x2)
assert allequal(array([0,0,0,1,0],MaskType), x2.mask)
assert eq(3.0, x2.fill_value)
x1 = array([1,'hello',2,3],object)
x2 = numpy.array([1,'hello',2,3],object)
s1 = x1[1]
s2 = x2[1]
self.assertEqual(type(s2), str)
self.assertEqual(type(s1), str)
self.assertEqual(s1, s2)
assert x1[1:1].shape == (0,)
def test_testCopySize(self):
"Tests of some subtle points of copying and sizing."
n = [0,0,1,0,0]
m = make_mask(n)
m2 = make_mask(m)
self.failUnless(m is m2)
m3 = make_mask(m, copy=1)
self.failUnless(m is not m3)
x1 = numpy.arange(5)
y1 = array(x1, mask=m)
self.failUnless( y1._data is not x1)
self.failUnless( allequal(x1,y1._data))
self.failUnless( y1.mask is m)
y1a = array(y1, copy=0)
self.failUnless( y1a.mask is y1.mask)
y2 = array(x1, mask=m, copy=0)
self.failUnless( y2.mask is m)
self.failUnless( y2[2] is masked)
y2[2]=9
self.failUnless( y2[2] is not masked)
self.failUnless( y2.mask is not m)
self.failUnless( allequal(y2.mask, 0))
y3 = array(x1*1.0, mask=m)
self.failUnless(filled(y3).dtype is (x1*1.0).dtype)
x4 = arange(4)
x4[2] = masked
y4 = resize(x4, (8,))
self.failUnless( eq(concatenate([x4,x4]), y4))
self.failUnless( eq(getmask(y4),[0,0,1,0,0,0,1,0]))
y5 = repeat(x4, (2,2,2,2), axis=0)
self.failUnless( eq(y5, [0,0,1,1,2,2,3,3]))
y6 = repeat(x4, 2, axis=0)
self.failUnless( eq(y5, y6))
def test_testPut(self):
"Test of put"
d = arange(5)
n = [0,0,0,1,1]
m = make_mask(n)
x = array(d, mask = m)
self.failUnless( x[3] is masked)
self.failUnless( x[4] is masked)
x[[1,4]] = [10,40]
self.failUnless( x.mask is not m)
self.failUnless( x[3] is masked)
self.failUnless( x[4] is not masked)
self.failUnless( eq(x, [0,10,2,-1,40]))
x = array(d, mask = m)
x.put([0,1,2],[-1,100,200])
self.failUnless( eq(x, [-1,100,200,0,0]))
self.failUnless( x[3] is masked)
self.failUnless( x[4] is masked)
def test_testMaPut(self):
(x, y, a10, m1, m2, xm, ym, z, zm, xf, s) = self.d
m = [1, 0, 0, 0, 0, 0, 1, 0, 0, 1, 0, 1]
i = numpy.nonzero(m)[0]
put(ym, i, zm)
assert all(take(ym, i, axis=0) == zm)
def test_testOddFeatures(self):
"Test of other odd features"
x = arange(20); x=x.reshape(4,5)
x.flat[5] = 12
assert x[1,0] == 12
z = x + 10j * x
assert eq(z.real, x)
assert eq(z.imag, 10*x)
assert eq((z*conjugate(z)).real, 101*x*x)
z.imag[...] = 0.0
x = arange(10)
x[3] = masked
assert str(x[3]) == str(masked)
c = x >= 8
assert count(where(c,masked,masked)) == 0
assert shape(where(c,masked,masked)) == c.shape
z = where(c , x, masked)
assert z.dtype is x.dtype
assert z[3] is masked
assert z[4] is masked
assert z[7] is masked
assert z[8] is not masked
assert z[9] is not masked
assert eq(x,z)
z = where(c , masked, x)
assert z.dtype is x.dtype
assert z[3] is masked
assert z[4] is not masked
assert z[7] is not masked
assert z[8] is masked
assert z[9] is masked
z = masked_where(c, x)
assert z.dtype is x.dtype
assert z[3] is masked
assert z[4] is not masked
assert z[7] is not masked
assert z[8] is masked
assert z[9] is masked
assert eq(x,z)
x = array([1.,2.,3.,4.,5.])
c = array([1,1,1,0,0])
x[2] = masked
z = where(c, x, -x)
assert eq(z, [1.,2.,0., -4., -5])
c[0] = masked
z = where(c, x, -x)
assert eq(z, [1.,2.,0., -4., -5])
assert z[0] is masked
assert z[1] is not masked
assert z[2] is masked
assert eq(masked_where(greater(x, 2), x), masked_greater(x,2))
assert eq(masked_where(greater_equal(x, 2), x),
masked_greater_equal(x,2))
assert eq(masked_where(less(x, 2), x), masked_less(x,2))
assert eq(masked_where(less_equal(x, 2), x), masked_less_equal(x,2))
assert eq(masked_where(not_equal(x, 2), x), masked_not_equal(x,2))
assert eq(masked_where(equal(x, 2), x), masked_equal(x,2))
assert eq(masked_where(not_equal(x,2), x), masked_not_equal(x,2))
assert eq(masked_inside(range(5), 1, 3), [0, 199, 199, 199, 4])
assert eq(masked_outside(range(5), 1, 3),[199,1,2,3,199])
assert eq(masked_inside(array(range(5), mask=[1,0,0,0,0]), 1, 3).mask,
[1,1,1,1,0])
assert eq(masked_outside(array(range(5), mask=[0,1,0,0,0]), 1, 3).mask,
[1,1,0,0,1])
assert eq(masked_equal(array(range(5), mask=[1,0,0,0,0]), 2).mask,
[1,0,1,0,0])
assert eq(masked_not_equal(array([2,2,1,2,1], mask=[1,0,0,0,0]), 2).mask,
[1,0,1,0,1])
assert eq(masked_where([1,1,0,0,0], [1,2,3,4,5]), [99,99,3,4,5])
atest = ones((10,10,10), dtype=float32)
btest = zeros(atest.shape, MaskType)
ctest = masked_where(btest,atest)
assert eq(atest,ctest)
z = choose(c, (-x, x))
assert eq(z, [1.,2.,0., -4., -5])
assert z[0] is masked
assert z[1] is not masked
assert z[2] is masked
x = arange(6)
x[5] = masked
y = arange(6)*10
y[2]= masked
c = array([1,1,1,0,0,0], mask=[1,0,0,0,0,0])
cm = c.filled(1)
z = where(c,x,y)
zm = where(cm,x,y)
assert eq(z, zm)
assert getmask(zm) is nomask
assert eq(zm, [0,1,2,30,40,50])
z = where(c, masked, 1)
assert eq(z, [99,99,99,1,1,1])
z = where(c, 1, masked)
assert eq(z, [99, 1, 1, 99, 99, 99])
def test_testMinMax(self):
"Test of minumum, maximum."
assert eq(minimum([1,2,3],[4,0,9]), [1,0,3])
assert eq(maximum([1,2,3],[4,0,9]), [4,2,9])
x = arange(5)
y = arange(5) - 2
x[3] = masked
y[0] = masked
assert eq(minimum(x,y), where(less(x,y), x, y))
assert eq(maximum(x,y), where(greater(x,y), x, y))
assert minimum(x) == 0
assert maximum(x) == 4
def test_testTakeTransposeInnerOuter(self):
"Test of take, transpose, inner, outer products"
x = arange(24)
y = numpy.arange(24)
x[5:6] = masked
x=x.reshape(2,3,4)
y=y.reshape(2,3,4)
assert eq(numpy.transpose(y,(2,0,1)), transpose(x,(2,0,1)))
assert eq(numpy.take(y, (2,0,1), 1), take(x, (2,0,1), 1))
assert eq(numpy.inner(filled(x,0),filled(y,0)),
inner(x, y))
assert eq(numpy.outer(filled(x,0),filled(y,0)),
outer(x, y))
y = array(['abc', 1, 'def', 2, 3], object)
y[2] = masked
t = take(y,[0,3,4])
assert t[0] == 'abc'
assert t[1] == 2
assert t[2] == 3
def test_testInplace(self):
"""Test of inplace operations and rich comparisons"""
y = arange(10)
x = arange(10)
xm = arange(10)
xm[2] = masked
x += 1
assert eq(x, y+1)
xm += 1
assert eq(x, y+1)
x = arange(10)
xm = arange(10)
xm[2] = masked
x -= 1
assert eq(x, y-1)
xm -= 1
assert eq(xm, y-1)
x = arange(10)*1.0
xm = arange(10)*1.0
xm[2] = masked
x *= 2.0
assert eq(x, y*2)
xm *= 2.0
assert eq(xm, y*2)
x = arange(10)*2
xm = arange(10)
xm[2] = masked
x /= 2
assert eq(x, y)
xm /= 2
assert eq(x, y)
x = arange(10)*1.0
xm = arange(10)*1.0
xm[2] = masked
x /= 2.0
assert eq(x, y/2.0)
xm /= arange(10)
assert eq(xm, ones((10,)))
x = arange(10).astype(float32)
xm = arange(10)
xm[2] = masked
x += 1.
assert eq(x, y+1.)
def test_testPickle(self):
"Test of pickling"
import pickle
x = arange(12)
x[4:10:2] = masked
x = x.reshape(4,3)
s = pickle.dumps(x)
y = pickle.loads(s)
assert eq(x,y)
def test_testMasked(self):
"Test of masked element"
xx=arange(6)
xx[1] = masked
self.failUnless(str(masked) == '--')
self.failUnless(xx[1] is masked)
self.failUnlessEqual(filled(xx[1], 0), 0)
# don't know why these should raise an exception...
#self.failUnlessRaises(Exception, lambda x,y: x+y, masked, masked)
#self.failUnlessRaises(Exception, lambda x,y: x+y, masked, 2)
#self.failUnlessRaises(Exception, lambda x,y: x+y, masked, xx)
#self.failUnlessRaises(Exception, lambda x,y: x+y, xx, masked)
def test_testAverage1(self):
"Test of average."
ott = array([0.,1.,2.,3.], mask=[1,0,0,0])
self.failUnless(eq(2.0, average(ott,axis=0)))
self.failUnless(eq(2.0, average(ott, weights=[1., 1., 2., 1.])))
result, wts = average(ott, weights=[1.,1.,2.,1.], returned=1)
self.failUnless(eq(2.0, result))
self.failUnless(wts == 4.0)
ott[:] = masked
self.failUnless(average(ott,axis=0) is masked)
ott = array([0.,1.,2.,3.], mask=[1,0,0,0])
ott=ott.reshape(2,2)
ott[:,1] = masked
self.failUnless(eq(average(ott,axis=0), [2.0, 0.0]))
self.failUnless(average(ott,axis=1)[0] is masked)
self.failUnless(eq([2.,0.], average(ott, axis=0)))
result, wts = average(ott, axis=0, returned=1)
self.failUnless(eq(wts, [1., 0.]))
def test_testAverage2(self):
"More tests of average."
w1 = [0,1,1,1,1,0]
w2 = [[0,1,1,1,1,0],[1,0,0,0,0,1]]
x=arange(6)
self.failUnless(allclose(average(x, axis=0), 2.5))
self.failUnless(allclose(average(x, axis=0, weights=w1), 2.5))
y=array([arange(6), 2.0*arange(6)])
self.failUnless(allclose(average(y, None),
numpy.add.reduce(numpy.arange(6))*3./12.))
self.failUnless(allclose(average(y, axis=0), numpy.arange(6) * 3./2.))
self.failUnless(allclose(average(y, axis=1),
[average(x,axis=0), average(x,axis=0) * 2.0]))
self.failUnless(allclose(average(y, None, weights=w2), 20./6.))
self.failUnless(allclose(average(y, axis=0, weights=w2),
[0.,1.,2.,3.,4.,10.]))
self.failUnless(allclose(average(y, axis=1),
[average(x,axis=0), average(x,axis=0) * 2.0]))
m1 = zeros(6)
m2 = [0,0,1,1,0,0]
m3 = [[0,0,1,1,0,0],[0,1,1,1,1,0]]
m4 = ones(6)
m5 = [0, 1, 1, 1, 1, 1]
self.failUnless(allclose(average(masked_array(x, m1),axis=0), 2.5))
self.failUnless(allclose(average(masked_array(x, m2),axis=0), 2.5))
self.failUnless(average(masked_array(x, m4),axis=0) is masked)
self.assertEqual(average(masked_array(x, m5),axis=0), 0.0)
self.assertEqual(count(average(masked_array(x, m4),axis=0)), 0)
z = masked_array(y, m3)
self.failUnless(allclose(average(z, None), 20./6.))
self.failUnless(allclose(average(z, axis=0), [0.,1.,99.,99.,4.0, 7.5]))
self.failUnless(allclose(average(z, axis=1), [2.5, 5.0]))
self.failUnless(allclose( average(z,axis=0, weights=w2),
[0.,1., 99., 99., 4.0, 10.0]))
a = arange(6)
b = arange(6) * 3
r1, w1 = average([[a,b],[b,a]], axis=1, returned=1)
self.assertEqual(shape(r1) , shape(w1))
self.assertEqual(r1.shape , w1.shape)
r2, w2 = average(ones((2,2,3)), axis=0, weights=[3,1], returned=1)
self.assertEqual(shape(w2) , shape(r2))
r2, w2 = average(ones((2,2,3)), returned=1)
self.assertEqual(shape(w2) , shape(r2))
r2, w2 = average(ones((2,2,3)), weights=ones((2,2,3)), returned=1)
self.failUnless(shape(w2) == shape(r2))
a2d = array([[1,2],[0,4]], float)
a2dm = masked_array(a2d, [[0,0],[1,0]])
a2da = average(a2d, axis=0)
self.failUnless(eq (a2da, [0.5, 3.0]))
a2dma = average(a2dm, axis=0)
self.failUnless(eq( a2dma, [1.0, 3.0]))
a2dma = average(a2dm, axis=None)
self.failUnless(eq(a2dma, 7./3.))
a2dma = average(a2dm, axis=1)
self.failUnless(eq(a2dma, [1.5, 4.0]))
def test_testToPython(self):
self.assertEqual(1, int(array(1)))
self.assertEqual(1.0, float(array(1)))
self.assertEqual(1, int(array([[[1]]])))
self.assertEqual(1.0, float(array([[1]])))
self.failUnlessRaises(TypeError, float, array([1,1]))
self.failUnlessRaises(ValueError, bool, array([0,1]))
self.failUnlessRaises(ValueError, bool, array([0,0],mask=[0,1]))
def test_testScalarArithmetic(self):
xm = array(0, mask=1)
self.failUnless((1/array(0)).mask)
self.failUnless((1 + xm).mask)
self.failUnless((-xm).mask)
self.failUnless((-xm).mask)
self.failUnless(maximum(xm, xm).mask)
self.failUnless(minimum(xm, xm).mask)
self.failUnless(xm.filled().dtype is xm._data.dtype)
x = array(0, mask=0)
self.failUnless(x.filled() == x._data)
self.failUnlessEqual(str(xm), str(masked_print_option))
def test_testArrayMethods(self):
a = array([1,3,2])
b = array([1,3,2], mask=[1,0,1])
self.failUnless(eq(a.any(), a._data.any()))
self.failUnless(eq(a.all(), a._data.all()))
self.failUnless(eq(a.argmax(), a._data.argmax()))
self.failUnless(eq(a.argmin(), a._data.argmin()))
self.failUnless(eq(a.choose(0,1,2,3,4), a._data.choose(0,1,2,3,4)))
self.failUnless(eq(a.compress([1,0,1]), a._data.compress([1,0,1])))
self.failUnless(eq(a.conj(), a._data.conj()))
self.failUnless(eq(a.conjugate(), a._data.conjugate()))
m = array([[1,2],[3,4]])
self.failUnless(eq(m.diagonal(), m._data.diagonal()))
self.failUnless(eq(a.sum(), a._data.sum()))
self.failUnless(eq(a.take([1,2]), a._data.take([1,2])))
self.failUnless(eq(m.transpose(), m._data.transpose()))
def test_testArrayAttributes(self):
a = array([1,3,2])
b = array([1,3,2], mask=[1,0,1])
self.failUnlessEqual(a.ndim, 1)
def test_testAPI(self):
self.failIf([m for m in dir(numpy.ndarray)
if m not in dir(MaskedArray) and not m.startswith('_')])
def test_testSingleElementSubscript(self):
a = array([1,3,2])
b = array([1,3,2], mask=[1,0,1])
self.failUnlessEqual(a[0].shape, ())
self.failUnlessEqual(b[0].shape, ())
self.failUnlessEqual(b[1].shape, ())
class TestUfuncs(TestCase):
def setUp(self):
self.d = (array([1.0, 0, -1, pi/2]*2, mask=[0,1]+[0]*6),
array([1.0, 0, -1, pi/2]*2, mask=[1,0]+[0]*6),)
def test_testUfuncRegression(self):
for f in ['sqrt', 'log', 'log10', 'exp', 'conjugate',
'sin', 'cos', 'tan',
'arcsin', 'arccos', 'arctan',
'sinh', 'cosh', 'tanh',
'arcsinh',
'arccosh',
'arctanh',
'absolute', 'fabs', 'negative',
# 'nonzero', 'around',
'floor', 'ceil',
# 'sometrue', 'alltrue',
'logical_not',
'add', 'subtract', 'multiply',
'divide', 'true_divide', 'floor_divide',
'remainder', 'fmod', 'hypot', 'arctan2',
'equal', 'not_equal', 'less_equal', 'greater_equal',
'less', 'greater',
'logical_and', 'logical_or', 'logical_xor',
]:
try:
uf = getattr(umath, f)
except AttributeError:
uf = getattr(fromnumeric, f)
mf = getattr(numpy.ma, f)
args = self.d[:uf.nin]
olderr = numpy.geterr()
f_invalid_ignore = ['sqrt', 'arctanh', 'arcsin', 'arccos',
'arccosh', 'arctanh', 'log', 'log10','divide',
'true_divide', 'floor_divide', 'remainder',
'fmod']
if f in f_invalid_ignore:
numpy.seterr(invalid='ignore')
if f in ['arctanh', 'log', 'log10']:
numpy.seterr(divide='ignore')
ur = uf(*args)
mr = mf(*args)
numpy.seterr(**olderr)
self.failUnless(eq(ur.filled(0), mr.filled(0), f))
self.failUnless(eqmask(ur.mask, mr.mask))
def test_reduce(self):
a = self.d[0]
self.failIf(alltrue(a,axis=0))
self.failUnless(sometrue(a,axis=0))
self.failUnlessEqual(sum(a[:3],axis=0), 0)
self.failUnlessEqual(product(a,axis=0), 0)
def test_minmax(self):
a = arange(1,13).reshape(3,4)
amask = masked_where(a < 5,a)
self.failUnlessEqual(amask.max(), a.max())
self.failUnlessEqual(amask.min(), 5)
self.failUnless((amask.max(0) == a.max(0)).all())
self.failUnless((amask.min(0) == [5,6,7,8]).all())
self.failUnless(amask.max(1)[0].mask)
self.failUnless(amask.min(1)[0].mask)
def test_nonzero(self):
for t in "?bhilqpBHILQPfdgFDGO":
x = array([1,0,2,0], mask=[0,0,1,1])
self.failUnless(eq(nonzero(x), [0]))
class TestArrayMethods(TestCase):
def setUp(self):
x = numpy.array([ 8.375, 7.545, 8.828, 8.5 , 1.757, 5.928,
8.43 , 7.78 , 9.865, 5.878, 8.979, 4.732,
3.012, 6.022, 5.095, 3.116, 5.238, 3.957,
6.04 , 9.63 , 7.712, 3.382, 4.489, 6.479,
7.189, 9.645, 5.395, 4.961, 9.894, 2.893,
7.357, 9.828, 6.272, 3.758, 6.693, 0.993])
X = x.reshape(6,6)
XX = x.reshape(3,2,2,3)
m = numpy.array([0, 1, 0, 1, 0, 0,
1, 0, 1, 1, 0, 1,
0, 0, 0, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 0, 0,
0, 0, 1, 0, 1, 0])
mx = array(data=x,mask=m)
mX = array(data=X,mask=m.reshape(X.shape))
mXX = array(data=XX,mask=m.reshape(XX.shape))
m2 = numpy.array([1, 1, 0, 1, 0, 0,
1, 1, 1, 1, 0, 1,
0, 0, 1, 1, 0, 1,
0, 0, 0, 1, 1, 1,
1, 0, 0, 1, 1, 0,
0, 0, 1, 0, 1, 1])
m2x = array(data=x,mask=m2)
m2X = array(data=X,mask=m2.reshape(X.shape))
m2XX = array(data=XX,mask=m2.reshape(XX.shape))
self.d = (x,X,XX,m,mx,mX,mXX)
#------------------------------------------------------
def test_trace(self):
(x,X,XX,m,mx,mX,mXX,) = self.d
mXdiag = mX.diagonal()
self.assertEqual(mX.trace(), mX.diagonal().compressed().sum())
self.failUnless(eq(mX.trace(),
X.trace() - sum(mXdiag.mask*X.diagonal(),axis=0)))
def test_clip(self):
(x,X,XX,m,mx,mX,mXX,) = self.d
clipped = mx.clip(2,8)
self.failUnless(eq(clipped.mask,mx.mask))
self.failUnless(eq(clipped._data,x.clip(2,8)))
self.failUnless(eq(clipped._data,mx._data.clip(2,8)))
def test_ptp(self):
(x,X,XX,m,mx,mX,mXX,) = self.d
(n,m) = X.shape
self.assertEqual(mx.ptp(),mx.compressed().ptp())
rows = numpy.zeros(n,numpy.float_)
cols = numpy.zeros(m,numpy.float_)
for k in range(m):
cols[k] = mX[:,k].compressed().ptp()
for k in range(n):
rows[k] = mX[k].compressed().ptp()
self.failUnless(eq(mX.ptp(0),cols))
self.failUnless(eq(mX.ptp(1),rows))
def test_swapaxes(self):
(x,X,XX,m,mx,mX,mXX,) = self.d
mXswapped = mX.swapaxes(0,1)
self.failUnless(eq(mXswapped[-1],mX[:,-1]))
mXXswapped = mXX.swapaxes(0,2)
self.assertEqual(mXXswapped.shape,(2,2,3,3))
def test_cumprod(self):
(x,X,XX,m,mx,mX,mXX,) = self.d
mXcp = mX.cumprod(0)
self.failUnless(eq(mXcp._data,mX.filled(1).cumprod(0)))
mXcp = mX.cumprod(1)
self.failUnless(eq(mXcp._data,mX.filled(1).cumprod(1)))
def test_cumsum(self):
(x,X,XX,m,mx,mX,mXX,) = self.d
mXcp = mX.cumsum(0)
self.failUnless(eq(mXcp._data,mX.filled(0).cumsum(0)))
mXcp = mX.cumsum(1)
self.failUnless(eq(mXcp._data,mX.filled(0).cumsum(1)))
def test_varstd(self):
(x,X,XX,m,mx,mX,mXX,) = self.d
self.failUnless(eq(mX.var(axis=None),mX.compressed().var()))
self.failUnless(eq(mX.std(axis=None),mX.compressed().std()))
self.failUnless(eq(mXX.var(axis=3).shape,XX.var(axis=3).shape))
self.failUnless(eq(mX.var().shape,X.var().shape))
(mXvar0,mXvar1) = (mX.var(axis=0), mX.var(axis=1))
for k in range(6):
self.failUnless(eq(mXvar1[k],mX[k].compressed().var()))
self.failUnless(eq(mXvar0[k],mX[:,k].compressed().var()))
self.failUnless(eq(numpy.sqrt(mXvar0[k]),
mX[:,k].compressed().std()))
def eqmask(m1, m2):
if m1 is nomask:
return m2 is nomask
if m2 is nomask:
return m1 is nomask
return (m1 == m2).all()
#def timingTest():
# for f in [testf, testinplace]:
# for n in [1000,10000,50000]:
# t = testta(n, f)
# t1 = testtb(n, f)
# t2 = testtc(n, f)
# print f.test_name
# print """\
#n = %7d
#numpy time (ms) %6.1f
#MA maskless ratio %6.1f
#MA masked ratio %6.1f
#""" % (n, t*1000.0, t1/t, t2/t)
#def testta(n, f):
# x=numpy.arange(n) + 1.0
# tn0 = time.time()
# z = f(x)
# return time.time() - tn0
#def testtb(n, f):
# x=arange(n) + 1.0
# tn0 = time.time()
# z = f(x)
# return time.time() - tn0
#def testtc(n, f):
# x=arange(n) + 1.0
# x[0] = masked
# tn0 = time.time()
# z = f(x)
# return time.time() - tn0
#def testf(x):
# for i in range(25):
# y = x **2 + 2.0 * x - 1.0
# w = x **2 + 1.0
# z = (y / w) ** 2
# return z
#testf.test_name = 'Simple arithmetic'
#def testinplace(x):
# for i in range(25):
# y = x**2
# y += 2.0*x
# y -= 1.0
# y /= x
# return y
#testinplace.test_name = 'Inplace operations'
if __name__ == "__main__":
run_module_suite()
|
|
"""
exec_command
Implements exec_command function that is (almost) equivalent to
commands.getstatusoutput function but on NT, DOS systems the
returned status is actually correct (though, the returned status
values may be different by a factor). In addition, exec_command
takes keyword arguments for (re-)defining environment variables.
Provides functions:
exec_command --- execute command in a specified directory and
in the modified environment.
find_executable --- locate a command using info from environment
variable PATH. Equivalent to posix `which`
command.
Author: Pearu Peterson <[email protected]>
Created: 11 January 2003
Requires: Python 2.x
Successfully tested on:
======== ============ =================================================
os.name sys.platform comments
======== ============ =================================================
posix linux2 Debian (sid) Linux, Python 2.1.3+, 2.2.3+, 2.3.3
PyCrust 0.9.3, Idle 1.0.2
posix linux2 Red Hat 9 Linux, Python 2.1.3, 2.2.2, 2.3.2
posix sunos5 SunOS 5.9, Python 2.2, 2.3.2
posix darwin Darwin 7.2.0, Python 2.3
nt win32 Windows Me
Python 2.3(EE), Idle 1.0, PyCrust 0.7.2
Python 2.1.1 Idle 0.8
nt win32 Windows 98, Python 2.1.1. Idle 0.8
nt win32 Cygwin 98-4.10, Python 2.1.1(MSC) - echo tests
fail i.e. redefining environment variables may
not work. FIXED: don't use cygwin echo!
Comment: also `cmd /c echo` will not work
but redefining environment variables do work.
posix cygwin Cygwin 98-4.10, Python 2.3.3(cygming special)
nt win32 Windows XP, Python 2.3.3
======== ============ =================================================
Known bugs:
* Tests, that send messages to stderr, fail when executed from MSYS prompt
because the messages are lost at some point.
"""
from __future__ import division, absolute_import, print_function
__all__ = ['exec_command', 'find_executable']
import os
import sys
import subprocess
import locale
import warnings
from numpy.distutils.misc_util import is_sequence, make_temp_file
from numpy.distutils import log
def filepath_from_subprocess_output(output):
"""
Convert `bytes` in the encoding used by a subprocess into a filesystem-appropriate `str`.
Inherited from `exec_command`, and possibly incorrect.
"""
mylocale = locale.getpreferredencoding(False)
if mylocale is None:
mylocale = 'ascii'
output = output.decode(mylocale, errors='replace')
output = output.replace('\r\n', '\n')
# Another historical oddity
if output[-1:] == '\n':
output = output[:-1]
# stdio uses bytes in python 2, so to avoid issues, we simply
# remove all non-ascii characters
if sys.version_info < (3, 0):
output = output.encode('ascii', errors='replace')
return output
def forward_bytes_to_stdout(val):
"""
Forward bytes from a subprocess call to the console, without attempting to
decode them.
The assumption is that the subprocess call already returned bytes in
a suitable encoding.
"""
if sys.version_info.major < 3:
# python 2 has binary output anyway
sys.stdout.write(val)
elif hasattr(sys.stdout, 'buffer'):
# use the underlying binary output if there is one
sys.stdout.buffer.write(val)
elif hasattr(sys.stdout, 'encoding'):
# round-trip the encoding if necessary
sys.stdout.write(val.decode(sys.stdout.encoding))
else:
# make a best-guess at the encoding
sys.stdout.write(val.decode('utf8', errors='replace'))
def temp_file_name():
# 2019-01-30, 1.17
warnings.warn('temp_file_name is deprecated since NumPy v1.17, use '
'tempfile.mkstemp instead', DeprecationWarning, stacklevel=1)
fo, name = make_temp_file()
fo.close()
return name
def get_pythonexe():
pythonexe = sys.executable
if os.name in ['nt', 'dos']:
fdir, fn = os.path.split(pythonexe)
fn = fn.upper().replace('PYTHONW', 'PYTHON')
pythonexe = os.path.join(fdir, fn)
assert os.path.isfile(pythonexe), '%r is not a file' % (pythonexe,)
return pythonexe
def find_executable(exe, path=None, _cache={}):
"""Return full path of a executable or None.
Symbolic links are not followed.
"""
key = exe, path
try:
return _cache[key]
except KeyError:
pass
log.debug('find_executable(%r)' % exe)
orig_exe = exe
if path is None:
path = os.environ.get('PATH', os.defpath)
if os.name=='posix':
realpath = os.path.realpath
else:
realpath = lambda a:a
if exe.startswith('"'):
exe = exe[1:-1]
suffixes = ['']
if os.name in ['nt', 'dos', 'os2']:
fn, ext = os.path.splitext(exe)
extra_suffixes = ['.exe', '.com', '.bat']
if ext.lower() not in extra_suffixes:
suffixes = extra_suffixes
if os.path.isabs(exe):
paths = ['']
else:
paths = [ os.path.abspath(p) for p in path.split(os.pathsep) ]
for path in paths:
fn = os.path.join(path, exe)
for s in suffixes:
f_ext = fn+s
if not os.path.islink(f_ext):
f_ext = realpath(f_ext)
if os.path.isfile(f_ext) and os.access(f_ext, os.X_OK):
log.info('Found executable %s' % f_ext)
_cache[key] = f_ext
return f_ext
log.warn('Could not locate executable %s' % orig_exe)
return None
############################################################
def _preserve_environment( names ):
log.debug('_preserve_environment(%r)' % (names))
env = {name: os.environ.get(name) for name in names}
return env
def _update_environment( **env ):
log.debug('_update_environment(...)')
for name, value in env.items():
os.environ[name] = value or ''
def exec_command(command, execute_in='', use_shell=None, use_tee=None,
_with_python = 1, **env ):
"""
Return (status,output) of executed command.
.. deprecated:: 1.17
Use subprocess.Popen instead
Parameters
----------
command : str
A concatenated string of executable and arguments.
execute_in : str
Before running command ``cd execute_in`` and after ``cd -``.
use_shell : {bool, None}, optional
If True, execute ``sh -c command``. Default None (True)
use_tee : {bool, None}, optional
If True use tee. Default None (True)
Returns
-------
res : str
Both stdout and stderr messages.
Notes
-----
On NT, DOS systems the returned status is correct for external commands.
Wild cards will not work for non-posix systems or when use_shell=0.
"""
# 2019-01-30, 1.17
warnings.warn('exec_command is deprecated since NumPy v1.17, use '
'subprocess.Popen instead', DeprecationWarning, stacklevel=1)
log.debug('exec_command(%r,%s)' % (command,\
','.join(['%s=%r'%kv for kv in env.items()])))
if use_tee is None:
use_tee = os.name=='posix'
if use_shell is None:
use_shell = os.name=='posix'
execute_in = os.path.abspath(execute_in)
oldcwd = os.path.abspath(os.getcwd())
if __name__[-12:] == 'exec_command':
exec_dir = os.path.dirname(os.path.abspath(__file__))
elif os.path.isfile('exec_command.py'):
exec_dir = os.path.abspath('.')
else:
exec_dir = os.path.abspath(sys.argv[0])
if os.path.isfile(exec_dir):
exec_dir = os.path.dirname(exec_dir)
if oldcwd!=execute_in:
os.chdir(execute_in)
log.debug('New cwd: %s' % execute_in)
else:
log.debug('Retaining cwd: %s' % oldcwd)
oldenv = _preserve_environment( list(env.keys()) )
_update_environment( **env )
try:
st = _exec_command(command,
use_shell=use_shell,
use_tee=use_tee,
**env)
finally:
if oldcwd!=execute_in:
os.chdir(oldcwd)
log.debug('Restored cwd to %s' % oldcwd)
_update_environment(**oldenv)
return st
def _exec_command(command, use_shell=None, use_tee = None, **env):
"""
Internal workhorse for exec_command().
"""
if use_shell is None:
use_shell = os.name=='posix'
if use_tee is None:
use_tee = os.name=='posix'
if os.name == 'posix' and use_shell:
# On POSIX, subprocess always uses /bin/sh, override
sh = os.environ.get('SHELL', '/bin/sh')
if is_sequence(command):
command = [sh, '-c', ' '.join(command)]
else:
command = [sh, '-c', command]
use_shell = False
elif os.name == 'nt' and is_sequence(command):
# On Windows, join the string for CreateProcess() ourselves as
# subprocess does it a bit differently
command = ' '.join(_quote_arg(arg) for arg in command)
# Inherit environment by default
env = env or None
try:
# universal_newlines is set to False so that communicate()
# will return bytes. We need to decode the output ourselves
# so that Python will not raise a UnicodeDecodeError when
# it encounters an invalid character; rather, we simply replace it
proc = subprocess.Popen(command, shell=use_shell, env=env,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
universal_newlines=False)
except EnvironmentError:
# Return 127, as os.spawn*() and /bin/sh do
return 127, ''
text, err = proc.communicate()
mylocale = locale.getpreferredencoding(False)
if mylocale is None:
mylocale = 'ascii'
text = text.decode(mylocale, errors='replace')
text = text.replace('\r\n', '\n')
# Another historical oddity
if text[-1:] == '\n':
text = text[:-1]
# stdio uses bytes in python 2, so to avoid issues, we simply
# remove all non-ascii characters
if sys.version_info < (3, 0):
text = text.encode('ascii', errors='replace')
if use_tee and text:
print(text)
return proc.returncode, text
def _quote_arg(arg):
"""
Quote the argument for safe use in a shell command line.
"""
# If there is a quote in the string, assume relevants parts of the
# string are already quoted (e.g. '-I"C:\\Program Files\\..."')
if '"' not in arg and ' ' in arg:
return '"%s"' % arg
return arg
############################################################
|
|
#!/usr/bin/env python
"""
Reads original Excel spreadsheets from various authors and builds
one new master Excel file.
"""
import sys
import openpyxl
import unicodecsv
columns = [
"collection",
"filename",
"page numbers",
"pages in document",
"main title",
"sub title",
"alt title",
"descriptive title",
"author",
"publication",
"volume",
"issue/number",
"date (month.day/season)",
"year",
"publisher",
"publisher location",
"subjects"
]
def write_master():
csv = unicodecsv.writer(sys.stdout)
csv.writerow(columns)
write_spreadsheet(csv, "spreadsheets/British Spreadsheet--Jennie.xlsx", british_jennie)
write_spreadsheet(csv, "spreadsheets/Nick British scans.xlsx", british_nick)
write_spreadsheet(csv, "spreadsheets/Irish Drama Database.xlsx", irish_drama)
write_spreadsheet(csv, "spreadsheets/Master Conrad Spreadsheet-Final 5-16(PM).xlsx", conrad)
write_spreadsheet(csv, "spreadsheets/Master RAI Database.FINAL.xlsx", rai)
def write_spreadsheet(csv, filename, row_reader):
old_sheet = openpyxl.load_workbook(filename).worksheets[0]
seen_header = False
last_subject = None
for row in old_sheet.rows:
if not seen_header:
seen_header = True
continue
new_row = row_reader(row)
# if there isn't a filename we don't put it in the spreadsheet
if new_row[1] == None:
continue
if new_row[16] in (None, "--", "-"):
new_row[16] = last_subject
csv.writerow(new_row)
last_subject = new_row[16]
"""
British Spreadsheet--Jennie.xlsx (many columns after primary authors blank)
1 unique id
2 file name
3 page numbers
4 pages in document
5 main title
6 sub title
7 alt title
8 descriptive title
9 author
10 placement in source
11 publication
12 volume
13 issue/number
14 part
15 date (month.day/season)
16 year
17 publisher
18 publisher location
19 document type (empty)
20 primary author
21 source
22 date acquired
23 location acquired
24 medium acquired
25 call number
26 supplemental to
27 supplementary documents
28 british or american
"""
def british_jennie(row):
return [
"british", # collection
row[1].value, # filename
row[2].value, # page numbers
row[3].value, # pages in document
row[4].value, # main title
row[5].value, # sub title
row[6].value, # alt title
row[7].value, # descriptive title
row[8].value, # author
row[10].value, # publication
row[11].value, # volume
row[12].value, # issue/number
row[14].value, # date (month.day/season)
row[15].value, # year
row[16].value, # publisher
row[17].value, # publisher location
row[19].value # subject(s)
]
"""
Nick British scans.xlsx
1 file name
2 page numbers
3 pages in document
4 main title
5 sub title
6 alt title
7 descriptive title
8 author
9 placement in source
10 publication
11 volume
12 issue/number
13 date (month.day)
14 year
15 document type (empty)
16 source database
17 date acquired
18 medium acquired
19 picture
20 picture description
21 supplemental to
22 supplementary documents
23 plain text
24 rescan
1 Unique Identifier,
2 File Name,
3 Page #(s),
4 Pages in Document
5 Main Title
6 Sub Title
7 Alt Title
8 Descriptive Title
9 Author
10 Placement in Source
11 Publication
12 Volume
13 Issue/Number
14 Date (Month.Day/Season)
15 Year
16 Publisher
17 Publisher Location
18 Document Type/Genre
19 Source (Database, archive, etc.)
20 Date Acquired
21 Location Acquired
22 Medium Acquired
23 Call Number
24 Picture (Y/N)
25 Picture Description
26 Supplemental to:
27 Supplementary Documents
28 Plain text OCR file name
29 Plain PDF OCR file name
30 PDF/A underlaid text OCR file name
31 British or American (A or B)
32 Spellings of authors' names
33 Authors principally at issue
34 Secondary authors at issue
35 Sentiment analysis, part 1
36 Sentiment analysis, part 2
37 Works mentioned? (Y/N)
38 National idenfication (Y/N)
39 Style as issue (Y/N)
40 Author's biography (Y/N)
41 Apparent gender of article writer (M/F/U)
42 Foreign place names (Y/N)
43 Gender as issue (Y/N)
44 Race as issue (Y/N)
45 Socioeconomic class as issue (Y/N)
46 Military as issue (Y/N)
47 America invoked: similarity (Y/N)
48 Notes
49 Rescan?
"""
def british_nick(row):
subjects = row[32].value
if subjects and row[33].value:
subjects += " ; " + row[33].value
return [
"british", # collection
row[1].value, # filename
row[2].value, # page numbers
row[3].value, # pages in document
row[4].value, # main title
row[5].value, # sub title
row[6].value, # alt title
row[7].value, # descriptive title
row[8].value, # author
row[10].value, # publication
row[11].value, # volume
row[12].value, # issue/number
row[13].value, # date (month.day/season)
row[14].value, # year
row[15].value, # publisher
row[16].value, # publisher location
subjects # subject(s)
]
"""
Irish Drama Database.xlsx
1 unique id
2 file name
3 page numbers
4 pages in document
5 main title
6 sub title
7 alt title
8 descriptive title
9 author
10 placement in source
11 publication
12 volume
13 issue/number
14 date (month.day)
15 year
16 publisher
17 publisher location
18 document type
19 primary author(s) semicolon delimited
20 source database
21 date acquired
22 location acquired
23 medium acquired
24 call number
"""
def irish_drama(row):
return [
"irish-drama", # collection
row[1].value, # filename
row[2].value, # page numbers
row[3].value, # pages in document
row[4].value, # main title
row[5].value, # sub title
row[6].value, # alt title
row[7].value, # descriptive title
row[8].value, # author
row[10].value, # publication
row[11].value, # volume
row[12].value, # issue/number
row[13].value, # date (month.day/season)
row[14].value, # year
row[15].value, # publisher
row[16].value, # publisher location
row[18].value # subject(s)
]
"""
Master Conrad Spreadsheet-Final 5-1 .xlsx
1 unique id
2 file name
3 page numbers
4 pages in document
5 main title
6 sub title
7 alt title
8 descriptive title
9 author
10 placement in source
11 publication
12 volume
13 issue
14 date (month.day/season)
15 year
16 publisher
17 publisher location
18 type (review, essay, etc)
19 picture
20 picture description
21 british (a) or american (b)
22 author at issue
23 secondary authors at issue
24 notes
25 primary descriptor
"""
def conrad(row):
subjects = []
if row[21].value:
subjects.append(row[21].value)
if row[22].value:
secondary = row[22].value
# can have multiple names, separeated by commas
if ',' in secondary:
subjects += [s.strip() for s in secondary.split(',')]
else:
subjects.append(secondary)
subjects = ' ; '.join(subjects)
return [
"conrad", # collection
row[1].value, # filename
row[2].value, # page numbers
row[3].value, # pages in document
row[4].value, # main title
row[5].value, # sub title
row[6].value, # alt title
row[7].value, # descriptive title
row[8].value, # author
row[10].value, # publication
row[11].value, # volume
row[12].value, # issue/number
row[13].value, # date (month.day/season)
row[14].value, # year
row[15].value, # publisher
row[16].value, # publisher location
subjects # subjects
]
"""
Master RAI Database.xlsx
1 unique id
2 file name
3 page numbers
4 pages in document
5 main title
6 sub title
7 alt title
8 descriptive title
9 author
10 placement in source
11 publication
12 volume
13 issue number
14 part
15 date (month.day)
16 year
17 type
18 primary author(s) semicolon delimited
19 source database/archive
20 date acquired
21 location acquired
22 medium acquired
23 call number
24 british or american
"""
def rai(row):
return [
"russian", # collection
row[1].value, # filename
row[2].value, # page numbers
row[3].value, # pages in document
row[4].value, # main title
row[5].value, # sub title
row[6].value, # alt title
row[7].value, # descriptive title
row[8].value, # author
row[10].value, # publication
row[11].value, # volume
row[12].value, # issue/number
row[14].value, # date (month.day/season)
row[15].value, # year
row[16].value, # publisher
row[17].value, # publisher location
row[19].value # subject(s)
]
write_master()
|
|
"""Support for MQTT humidifiers."""
import functools
import logging
import voluptuous as vol
from homeassistant.components import humidifier
from homeassistant.components.humidifier import (
ATTR_HUMIDITY,
ATTR_MODE,
DEFAULT_MAX_HUMIDITY,
DEFAULT_MIN_HUMIDITY,
DEVICE_CLASS_DEHUMIDIFIER,
DEVICE_CLASS_HUMIDIFIER,
SUPPORT_MODES,
HumidifierEntity,
)
from homeassistant.const import (
CONF_NAME,
CONF_OPTIMISTIC,
CONF_PAYLOAD_OFF,
CONF_PAYLOAD_ON,
CONF_STATE,
)
from homeassistant.core import HomeAssistant, callback
import homeassistant.helpers.config_validation as cv
from homeassistant.helpers.reload import async_setup_reload_service
from homeassistant.helpers.typing import ConfigType
from . import (
CONF_COMMAND_TOPIC,
CONF_QOS,
CONF_RETAIN,
CONF_STATE_TOPIC,
DOMAIN,
PLATFORMS,
subscription,
)
from .. import mqtt
from .debug_info import log_messages
from .mixins import MQTT_ENTITY_COMMON_SCHEMA, MqttEntity, async_setup_entry_helper
CONF_AVAILABLE_MODES_LIST = "modes"
CONF_COMMAND_TEMPLATE = "command_template"
CONF_DEVICE_CLASS = "device_class"
CONF_MODE_COMMAND_TEMPLATE = "mode_command_template"
CONF_MODE_COMMAND_TOPIC = "mode_command_topic"
CONF_MODE_STATE_TOPIC = "mode_state_topic"
CONF_MODE_STATE_TEMPLATE = "mode_state_template"
CONF_PAYLOAD_RESET_MODE = "payload_reset_mode"
CONF_PAYLOAD_RESET_HUMIDITY = "payload_reset_humidity"
CONF_STATE_VALUE_TEMPLATE = "state_value_template"
CONF_TARGET_HUMIDITY_COMMAND_TEMPLATE = "target_humidity_command_template"
CONF_TARGET_HUMIDITY_COMMAND_TOPIC = "target_humidity_command_topic"
CONF_TARGET_HUMIDITY_MIN = "min_humidity"
CONF_TARGET_HUMIDITY_MAX = "max_humidity"
CONF_TARGET_HUMIDITY_STATE_TEMPLATE = "target_humidity_state_template"
CONF_TARGET_HUMIDITY_STATE_TOPIC = "target_humidity_state_topic"
DEFAULT_NAME = "MQTT Humidifier"
DEFAULT_OPTIMISTIC = False
DEFAULT_PAYLOAD_ON = "ON"
DEFAULT_PAYLOAD_OFF = "OFF"
DEFAULT_PAYLOAD_RESET = "None"
MQTT_HUMIDIFIER_ATTRIBUTES_BLOCKED = frozenset(
{
humidifier.ATTR_HUMIDITY,
humidifier.ATTR_MAX_HUMIDITY,
humidifier.ATTR_MIN_HUMIDITY,
humidifier.ATTR_MODE,
humidifier.ATTR_AVAILABLE_MODES,
}
)
_LOGGER = logging.getLogger(__name__)
def valid_mode_configuration(config):
"""Validate that the mode reset payload is not one of the available modes."""
if config.get(CONF_PAYLOAD_RESET_MODE) in config.get(CONF_AVAILABLE_MODES_LIST):
raise ValueError("modes must not contain payload_reset_mode")
return config
def valid_humidity_range_configuration(config):
"""Validate that the target_humidity range configuration is valid, throws if it isn't."""
if config.get(CONF_TARGET_HUMIDITY_MIN) >= config.get(CONF_TARGET_HUMIDITY_MAX):
raise ValueError("target_humidity_max must be > target_humidity_min")
if config.get(CONF_TARGET_HUMIDITY_MAX) > 100:
raise ValueError("max_humidity must be <= 100")
return config
PLATFORM_SCHEMA = vol.All(
mqtt.MQTT_RW_PLATFORM_SCHEMA.extend(
{
# CONF_AVAIALABLE_MODES_LIST and CONF_MODE_COMMAND_TOPIC must be used together
vol.Inclusive(
CONF_AVAILABLE_MODES_LIST, "available_modes", default=[]
): cv.ensure_list,
vol.Inclusive(
CONF_MODE_COMMAND_TOPIC, "available_modes"
): mqtt.valid_publish_topic,
vol.Optional(CONF_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_DEVICE_CLASS, default=DEVICE_CLASS_HUMIDIFIER): vol.In(
[DEVICE_CLASS_HUMIDIFIER, DEVICE_CLASS_DEHUMIDIFIER]
),
vol.Optional(CONF_MODE_COMMAND_TEMPLATE): cv.template,
vol.Optional(CONF_MODE_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(CONF_MODE_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Optional(CONF_OPTIMISTIC, default=DEFAULT_OPTIMISTIC): cv.boolean,
vol.Optional(CONF_PAYLOAD_OFF, default=DEFAULT_PAYLOAD_OFF): cv.string,
vol.Optional(CONF_PAYLOAD_ON, default=DEFAULT_PAYLOAD_ON): cv.string,
vol.Optional(CONF_STATE_VALUE_TEMPLATE): cv.template,
vol.Required(CONF_TARGET_HUMIDITY_COMMAND_TOPIC): mqtt.valid_publish_topic,
vol.Optional(CONF_TARGET_HUMIDITY_COMMAND_TEMPLATE): cv.template,
vol.Optional(
CONF_TARGET_HUMIDITY_MAX, default=DEFAULT_MAX_HUMIDITY
): cv.positive_int,
vol.Optional(
CONF_TARGET_HUMIDITY_MIN, default=DEFAULT_MIN_HUMIDITY
): cv.positive_int,
vol.Optional(CONF_TARGET_HUMIDITY_STATE_TEMPLATE): cv.template,
vol.Optional(CONF_TARGET_HUMIDITY_STATE_TOPIC): mqtt.valid_subscribe_topic,
vol.Optional(
CONF_PAYLOAD_RESET_HUMIDITY, default=DEFAULT_PAYLOAD_RESET
): cv.string,
vol.Optional(
CONF_PAYLOAD_RESET_MODE, default=DEFAULT_PAYLOAD_RESET
): cv.string,
}
).extend(MQTT_ENTITY_COMMON_SCHEMA.schema),
valid_humidity_range_configuration,
valid_mode_configuration,
)
async def async_setup_platform(
hass: HomeAssistant, config: ConfigType, async_add_entities, discovery_info=None
):
"""Set up MQTT humidifier through configuration.yaml."""
await async_setup_reload_service(hass, DOMAIN, PLATFORMS)
await _async_setup_entity(hass, async_add_entities, config)
async def async_setup_entry(hass, config_entry, async_add_entities):
"""Set up MQTT humidifier dynamically through MQTT discovery."""
setup = functools.partial(
_async_setup_entity, hass, async_add_entities, config_entry=config_entry
)
await async_setup_entry_helper(hass, humidifier.DOMAIN, setup, PLATFORM_SCHEMA)
async def _async_setup_entity(
hass, async_add_entities, config, config_entry=None, discovery_data=None
):
"""Set up the MQTT humidifier."""
async_add_entities([MqttHumidifier(hass, config, config_entry, discovery_data)])
class MqttHumidifier(MqttEntity, HumidifierEntity):
"""A MQTT humidifier component."""
_attributes_extra_blocked = MQTT_HUMIDIFIER_ATTRIBUTES_BLOCKED
def __init__(self, hass, config, config_entry, discovery_data):
"""Initialize the MQTT humidifier."""
self._state = False
self._target_humidity = None
self._mode = None
self._supported_features = 0
self._topic = None
self._payload = None
self._value_templates = None
self._command_templates = None
self._optimistic = None
self._optimistic_target_humidity = None
self._optimistic_mode = None
MqttEntity.__init__(self, hass, config, config_entry, discovery_data)
@staticmethod
def config_schema():
"""Return the config schema."""
return PLATFORM_SCHEMA
def _setup_from_config(self, config):
"""(Re)Setup the entity."""
self._attr_device_class = config.get(CONF_DEVICE_CLASS)
self._attr_min_humidity = config.get(CONF_TARGET_HUMIDITY_MIN)
self._attr_max_humidity = config.get(CONF_TARGET_HUMIDITY_MAX)
self._topic = {
key: config.get(key)
for key in (
CONF_STATE_TOPIC,
CONF_COMMAND_TOPIC,
CONF_TARGET_HUMIDITY_STATE_TOPIC,
CONF_TARGET_HUMIDITY_COMMAND_TOPIC,
CONF_MODE_STATE_TOPIC,
CONF_MODE_COMMAND_TOPIC,
)
}
self._value_templates = {
CONF_STATE: config.get(CONF_STATE_VALUE_TEMPLATE),
ATTR_HUMIDITY: config.get(CONF_TARGET_HUMIDITY_STATE_TEMPLATE),
ATTR_MODE: config.get(CONF_MODE_STATE_TEMPLATE),
}
self._command_templates = {
CONF_STATE: config.get(CONF_COMMAND_TEMPLATE),
ATTR_HUMIDITY: config.get(CONF_TARGET_HUMIDITY_COMMAND_TEMPLATE),
ATTR_MODE: config.get(CONF_MODE_COMMAND_TEMPLATE),
}
self._payload = {
"STATE_ON": config[CONF_PAYLOAD_ON],
"STATE_OFF": config[CONF_PAYLOAD_OFF],
"HUMIDITY_RESET": config[CONF_PAYLOAD_RESET_HUMIDITY],
"MODE_RESET": config[CONF_PAYLOAD_RESET_MODE],
}
if CONF_MODE_COMMAND_TOPIC in config and CONF_AVAILABLE_MODES_LIST in config:
self._available_modes = config[CONF_AVAILABLE_MODES_LIST]
else:
self._available_modes = []
if self._available_modes:
self._attr_supported_features = SUPPORT_MODES
else:
self._attr_supported_features = 0
optimistic = config[CONF_OPTIMISTIC]
self._optimistic = optimistic or self._topic[CONF_STATE_TOPIC] is None
self._optimistic_target_humidity = (
optimistic or self._topic[CONF_TARGET_HUMIDITY_STATE_TOPIC] is None
)
self._optimistic_mode = optimistic or self._topic[CONF_MODE_STATE_TOPIC] is None
for tpl_dict in (self._command_templates, self._value_templates):
for key, tpl in tpl_dict.items():
if tpl is None:
tpl_dict[key] = lambda value: value
else:
tpl.hass = self.hass
tpl_dict[key] = tpl.async_render_with_possible_json_value
async def _subscribe_topics(self):
"""(Re)Subscribe to topics."""
topics = {}
@callback
@log_messages(self.hass, self.entity_id)
def state_received(msg):
"""Handle new received MQTT message."""
payload = self._value_templates[CONF_STATE](msg.payload)
if not payload:
_LOGGER.debug("Ignoring empty state from '%s'", msg.topic)
return
if payload == self._payload["STATE_ON"]:
self._state = True
elif payload == self._payload["STATE_OFF"]:
self._state = False
self.async_write_ha_state()
if self._topic[CONF_STATE_TOPIC] is not None:
topics[CONF_STATE_TOPIC] = {
"topic": self._topic[CONF_STATE_TOPIC],
"msg_callback": state_received,
"qos": self._config[CONF_QOS],
}
@callback
@log_messages(self.hass, self.entity_id)
def target_humidity_received(msg):
"""Handle new received MQTT message for the target humidity."""
rendered_target_humidity_payload = self._value_templates[ATTR_HUMIDITY](
msg.payload
)
if not rendered_target_humidity_payload:
_LOGGER.debug("Ignoring empty target humidity from '%s'", msg.topic)
return
if rendered_target_humidity_payload == self._payload["HUMIDITY_RESET"]:
self._target_humidity = None
self.async_write_ha_state()
return
try:
target_humidity = round(float(rendered_target_humidity_payload))
except ValueError:
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid target humidity",
msg.payload,
msg.topic,
rendered_target_humidity_payload,
)
return
if (
target_humidity < self._attr_min_humidity
or target_humidity > self._attr_max_humidity
):
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid target humidity",
msg.payload,
msg.topic,
rendered_target_humidity_payload,
)
return
self._target_humidity = target_humidity
self.async_write_ha_state()
if self._topic[CONF_TARGET_HUMIDITY_STATE_TOPIC] is not None:
topics[CONF_TARGET_HUMIDITY_STATE_TOPIC] = {
"topic": self._topic[CONF_TARGET_HUMIDITY_STATE_TOPIC],
"msg_callback": target_humidity_received,
"qos": self._config[CONF_QOS],
}
self._target_humidity = None
@callback
@log_messages(self.hass, self.entity_id)
def mode_received(msg):
"""Handle new received MQTT message for mode."""
mode = self._value_templates[ATTR_MODE](msg.payload)
if mode == self._payload["MODE_RESET"]:
self._mode = None
self.async_write_ha_state()
return
if not mode:
_LOGGER.debug("Ignoring empty mode from '%s'", msg.topic)
return
if mode not in self.available_modes:
_LOGGER.warning(
"'%s' received on topic %s. '%s' is not a valid mode",
msg.payload,
msg.topic,
mode,
)
return
self._mode = mode
self.async_write_ha_state()
if self._topic[CONF_MODE_STATE_TOPIC] is not None:
topics[CONF_MODE_STATE_TOPIC] = {
"topic": self._topic[CONF_MODE_STATE_TOPIC],
"msg_callback": mode_received,
"qos": self._config[CONF_QOS],
}
self._mode = None
self._sub_state = await subscription.async_subscribe_topics(
self.hass, self._sub_state, topics
)
@property
def assumed_state(self):
"""Return true if we do optimistic updates."""
return self._optimistic
@property
def available_modes(self) -> list:
"""Get the list of available modes."""
return self._available_modes
@property
def is_on(self):
"""Return true if device is on."""
return self._state
@property
def target_humidity(self):
"""Return the current target humidity."""
return self._target_humidity
@property
def mode(self):
"""Return the current mode."""
return self._mode
async def async_turn_on(
self,
**kwargs,
) -> None:
"""Turn on the entity.
This method is a coroutine.
"""
mqtt_payload = self._command_templates[CONF_STATE](self._payload["STATE_ON"])
mqtt.async_publish(
self.hass,
self._topic[CONF_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
self._state = True
self.async_write_ha_state()
async def async_turn_off(self, **kwargs) -> None:
"""Turn off the entity.
This method is a coroutine.
"""
mqtt_payload = self._command_templates[CONF_STATE](self._payload["STATE_OFF"])
mqtt.async_publish(
self.hass,
self._topic[CONF_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic:
self._state = False
self.async_write_ha_state()
async def async_set_humidity(self, humidity: int) -> None:
"""Set the target humidity of the humidifier.
This method is a coroutine.
"""
mqtt_payload = self._command_templates[ATTR_HUMIDITY](humidity)
mqtt.async_publish(
self.hass,
self._topic[CONF_TARGET_HUMIDITY_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic_target_humidity:
self._target_humidity = humidity
self.async_write_ha_state()
async def async_set_mode(self, mode: str) -> None:
"""Set the mode of the fan.
This method is a coroutine.
"""
if mode not in self.available_modes:
_LOGGER.warning("'%s'is not a valid mode", mode)
return
mqtt_payload = self._command_templates[ATTR_MODE](mode)
mqtt.async_publish(
self.hass,
self._topic[CONF_MODE_COMMAND_TOPIC],
mqtt_payload,
self._config[CONF_QOS],
self._config[CONF_RETAIN],
)
if self._optimistic_mode:
self._mode = mode
self.async_write_ha_state()
|
|
#
#
# Copyright (C) 2010, 2011, 2012 Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
# TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""Module implementing the parameter types code."""
import re
import operator
import ipaddr
from ganeti import compat
from ganeti import utils
from ganeti import constants
from ganeti import objects
from ganeti.serializer import Private
_PAREN_RE = re.compile("^[a-zA-Z0-9_-]+$")
def Parens(text):
"""Enclose text in parens if necessary.
@param text: Text
"""
text = str(text)
if _PAREN_RE.match(text):
return text
else:
return "(%s)" % text
class _WrapperBase(object):
__slots__ = [
"_fn",
"_text",
]
def __init__(self, text, fn):
"""Initializes this class.
@param text: Description
@param fn: Wrapped function
"""
assert text.strip()
self._text = text
self._fn = fn
def __call__(self, *args):
return self._fn(*args)
class _DescWrapper(_WrapperBase):
"""Wrapper class for description text.
"""
def __str__(self):
return self._text
def __repr__(self):
return "<%s %r>" % (self._text, self._fn)
class _CommentWrapper(_WrapperBase):
"""Wrapper class for comment.
"""
def __str__(self):
return "%s [%s]" % (self._fn, self._text)
def WithDesc(text):
"""Builds wrapper class with description text.
@type text: string
@param text: Description text
@return: Callable class
"""
assert text[0] == text[0].upper()
return compat.partial(_DescWrapper, text)
def Comment(text):
"""Builds wrapper for adding comment to description text.
@type text: string
@param text: Comment text
@return: Callable class
"""
assert not frozenset(text).intersection("[]")
return compat.partial(_CommentWrapper, text)
def CombinationDesc(op, args, fn):
"""Build description for combinating operator.
@type op: string
@param op: Operator as text (e.g. "and")
@type args: list
@param args: Operator arguments
@type fn: callable
@param fn: Wrapped function
"""
# Some type descriptions are rather long. If "None" is listed at the
# end or somewhere in between it is easily missed. Therefore it should
# be at the beginning, e.g. "None or (long description)".
if __debug__ and TNone in args and args.index(TNone) > 0:
raise Exception("TNone must be listed first")
if len(args) == 1:
descr = str(args[0])
else:
descr = (" %s " % op).join(Parens(i) for i in args)
return WithDesc(descr)(fn)
# Modifiable default values; need to define these here before the
# actual LUs
@WithDesc(str([]))
def EmptyList():
"""Returns an empty list.
"""
return []
@WithDesc(str({}))
def EmptyDict():
"""Returns an empty dict.
"""
return {}
#: The without-default default value
NoDefault = object()
# Some basic types
@WithDesc("Anything")
def TAny(_):
"""Accepts any value.
"""
return True
@WithDesc("NotNone")
def TNotNone(val):
"""Checks if the given value is not None.
"""
return val is not None
@WithDesc("None")
def TNone(val):
"""Checks if the given value is None.
"""
return val is None
@WithDesc("ValueNone")
def TValueNone(val):
"""Checks if the given value is L{constants.VALUE_NONE}.
"""
return val == constants.VALUE_NONE
@WithDesc("Boolean")
def TBool(val):
"""Checks if the given value is a boolean.
"""
return isinstance(val, bool)
@WithDesc("Integer")
def TInt(val):
"""Checks if the given value is an integer.
"""
# For backwards compatibility with older Python versions, boolean values are
# also integers and should be excluded in this test.
#
# >>> (isinstance(False, int), isinstance(True, int))
# (True, True)
return isinstance(val, (int, long)) and not isinstance(val, bool)
@WithDesc("Float")
def TFloat(val):
"""Checks if the given value is a float.
"""
return isinstance(val, float)
@WithDesc("String")
def TString(val):
"""Checks if the given value is a string.
"""
return isinstance(val, basestring)
@WithDesc("EvalToTrue")
def TTrue(val):
"""Checks if a given value evaluates to a boolean True value.
"""
return bool(val)
def TElemOf(target_list):
"""Builds a function that checks if a given value is a member of a list.
"""
def fn(val):
return val in target_list
return WithDesc("OneOf %s" % (utils.CommaJoin(target_list), ))(fn)
# Container types
@WithDesc("List")
def TList(val):
"""Checks if the given value is a list.
"""
return isinstance(val, list)
@WithDesc("Tuple")
def TTuple(val):
"""Checks if the given value is a tuple.
"""
return isinstance(val, tuple)
@WithDesc("Dictionary")
def TDict(val):
"""Checks if the given value is a dictionary.
Note that L{PrivateDict}s subclass dict and pass this check.
"""
return isinstance(val, dict)
def TIsLength(size):
"""Check is the given container is of the given size.
"""
def fn(container):
return len(container) == size
return WithDesc("Length %s" % (size, ))(fn)
# Combinator types
def TAnd(*args):
"""Combine multiple functions using an AND operation.
"""
def fn(val):
return compat.all(t(val) for t in args)
return CombinationDesc("and", args, fn)
def TOr(*args):
"""Combine multiple functions using an OR operation.
"""
def fn(val):
return compat.any(t(val) for t in args)
return CombinationDesc("or", args, fn)
def TMap(fn, test):
"""Checks that a modified version of the argument passes the given test.
"""
return WithDesc("Result of %s must be %s" %
(Parens(fn), Parens(test)))(lambda val: test(fn(val)))
def TRegex(pobj):
"""Checks whether a string matches a specific regular expression.
@param pobj: Compiled regular expression as returned by C{re.compile}
"""
desc = WithDesc("String matching regex \"%s\"" %
pobj.pattern.encode("string_escape"))
return desc(TAnd(TString, pobj.match))
def TMaybe(test):
"""Wrap a test in a TOr(TNone, test).
This makes it easier to define TMaybe* types.
"""
return TOr(TNone, test)
def TMaybeValueNone(test):
"""Used for unsetting values.
"""
return TMaybe(TOr(TValueNone, test))
# Type aliases
#: a non-empty string
TNonEmptyString = WithDesc("NonEmptyString")(TAnd(TString, TTrue))
#: a maybe non-empty string
TMaybeString = TMaybe(TNonEmptyString)
#: a maybe boolean (bool or none)
TMaybeBool = TMaybe(TBool)
#: Maybe a dictionary (dict or None)
TMaybeDict = TMaybe(TDict)
#: Maybe a list (list or None)
TMaybeList = TMaybe(TList)
#: a non-negative number (value > 0)
# val_type should be TInt, TDouble (== TFloat), or TNumber
def TNonNegative(val_type):
return WithDesc("EqualOrGreaterThanZero")(TAnd(val_type, lambda v: v >= 0))
#: a positive number (value >= 0)
# val_type should be TInt, TDouble (== TFloat), or TNumber
def TPositive(val_type):
return WithDesc("GreaterThanZero")(TAnd(val_type, lambda v: v > 0))
#: a non-negative integer (value >= 0)
TNonNegativeInt = TNonNegative(TInt)
#: a positive integer (value > 0)
TPositiveInt = TPositive(TInt)
#: a maybe positive integer (positive integer or None)
TMaybePositiveInt = TMaybe(TPositiveInt)
#: a negative integer (value < 0)
TNegativeInt = \
TAnd(TInt, WithDesc("LessThanZero")(compat.partial(operator.gt, 0)))
#: a positive float
TNonNegativeFloat = \
TAnd(TFloat, WithDesc("EqualOrGreaterThanZero")(lambda v: v >= 0.0))
#: Job ID
TJobId = WithDesc("JobId")(TOr(TNonNegativeInt,
TRegex(re.compile("^%s$" %
constants.JOB_ID_TEMPLATE))))
#: Double (== Float)
TDouble = TFloat
#: Number
TNumber = TOr(TInt, TFloat)
#: Relative job ID
TRelativeJobId = WithDesc("RelativeJobId")(TNegativeInt)
def TInstanceOf(cls):
"""Checks if a given value is an instance of C{cls}.
@type cls: class
@param cls: Class object
"""
name = "%s.%s" % (cls.__module__, cls.__name__)
desc = WithDesc("Instance of %s" % (Parens(name), ))
return desc(lambda val: isinstance(val, cls))
def TPrivate(val_type):
"""Checks if a given value is an instance of Private.
"""
def fn(val):
return isinstance(val, Private) and val_type(val.Get())
desc = WithDesc("Private %s" % Parens(val_type))
return desc(fn)
def TSecret(val_type):
"""Checks if a given value is an instance of Private.
However, the type is named Secret in the Haskell equivalent.
"""
def fn(val):
return isinstance(val, Private) and val_type(val.Get())
desc = WithDesc("Private %s" % Parens(val_type))
return desc(fn)
def TListOf(my_type):
"""Checks if a given value is a list with all elements of the same type.
"""
desc = WithDesc("List of %s" % (Parens(my_type), ))
return desc(TAnd(TList, lambda lst: compat.all(my_type(v) for v in lst)))
TMaybeListOf = lambda item_type: TMaybe(TListOf(item_type))
def TTupleOf(*val_types):
"""Checks if a given value is a list with the proper size and its
elements match the given types.
"""
desc = WithDesc("Tuple of %s" % Parens(', '.join(str(v) for v in val_types)))
return desc(TAnd(TOr(TTuple, TList), TIsLength(len(val_types)),
TItems(val_types)))
def TSetOf(val_type):
"""Checks if a given value is a list with all elements of the same
type and eliminates duplicated elements.
"""
desc = WithDesc("Set of %s" % (Parens(val_type), ))
return desc(lambda st: TListOf(val_type)(list(set(st))))
def TDictOf(key_type, val_type):
"""Checks a dict type for the type of its key/values.
"""
desc = WithDesc("Dictionary with keys of %s and values of %s" %
(Parens(key_type), Parens(val_type)))
def fn(container):
return (compat.all(key_type(v) for v in container.keys()) and
compat.all(val_type(v) for v in container.values()))
return desc(TAnd(TDict, fn))
def _TStrictDictCheck(require_all, exclusive, items, val):
"""Helper function for L{TStrictDict}.
"""
notfound_fn = lambda _: not exclusive
if require_all and not frozenset(val.keys()).issuperset(items.keys()):
# Requires items not found in value
return False
return compat.all(items.get(key, notfound_fn)(value)
for (key, value) in val.items())
def TStrictDict(require_all, exclusive, items):
"""Strict dictionary check with specific keys.
@type require_all: boolean
@param require_all: Whether all keys in L{items} are required
@type exclusive: boolean
@param exclusive: Whether only keys listed in L{items} should be accepted
@type items: dictionary
@param items: Mapping from key (string) to verification function
"""
descparts = ["Dictionary containing"]
if exclusive:
descparts.append(" none but the")
if require_all:
descparts.append(" required")
if len(items) == 1:
descparts.append(" key ")
else:
descparts.append(" keys ")
descparts.append(utils.CommaJoin("\"%s\" (value %s)" % (key, value)
for (key, value) in items.items()))
desc = WithDesc("".join(descparts))
return desc(TAnd(TDict,
compat.partial(_TStrictDictCheck, require_all, exclusive,
items)))
def TItems(items):
"""Checks individual items of a container.
If the verified value and the list of expected items differ in length, this
check considers only as many items as are contained in the shorter list. Use
L{TIsLength} to enforce a certain length.
@type items: list
@param items: List of checks
"""
assert items, "Need items"
text = ["Item", "item"]
desc = WithDesc(utils.CommaJoin("%s %s is %s" %
(text[int(idx > 0)], idx, Parens(check))
for (idx, check) in enumerate(items)))
return desc(lambda value: compat.all(check(i)
for (check, i) in zip(items, value)))
TMaxValue = lambda max: WithDesc('Less than %s' % max)(lambda val: val < max)
TAllocPolicy = TElemOf(constants.VALID_ALLOC_POLICIES)
TCVErrorCode = TElemOf(constants.CV_ALL_ECODES_STRINGS)
TQueryResultCode = TElemOf(constants.RS_ALL)
TExportTarget = TOr(TNonEmptyString, TList)
TExportMode = TElemOf(constants.EXPORT_MODES)
TDiskIndex = TAnd(TNonNegativeInt, TMaxValue(constants.MAX_DISKS))
TReplaceDisksMode = TElemOf(constants.REPLACE_MODES)
TDiskTemplate = TElemOf(constants.DISK_TEMPLATES)
TEvacMode = TElemOf(constants.NODE_EVAC_MODES)
TIAllocatorTestDir = TElemOf(constants.VALID_IALLOCATOR_DIRECTIONS)
TIAllocatorMode = TElemOf(constants.VALID_IALLOCATOR_MODES)
TImportExportCompression = TElemOf(constants.IEC_ALL)
TAdminStateSource = TElemOf(constants.ADMIN_STATE_SOURCES)
def TSetParamsMods(fn):
"""Generates a check for modification lists.
"""
# Old format
# TODO: Remove in version 2.11 including support in LUInstanceSetParams
old_mod_item_fn = \
TAnd(TIsLength(2),
TItems([TOr(TElemOf(constants.DDMS_VALUES), TNonNegativeInt), fn]))
# New format, supporting adding/removing disks/NICs at arbitrary indices
mod_item_fn = \
TAnd(TIsLength(3), TItems([
TElemOf(constants.DDMS_VALUES_WITH_MODIFY),
Comment("Device index, can be negative, e.g. -1 for last disk")
(TOr(TInt, TString)),
fn,
]))
return TOr(Comment("Recommended")(TListOf(mod_item_fn)),
Comment("Deprecated")(TListOf(old_mod_item_fn)))
TINicParams = \
Comment("NIC parameters")(TDictOf(TElemOf(constants.INIC_PARAMS),
TMaybe(TString)))
TIDiskParams = \
Comment("Disk parameters")(TDictOf(TNonEmptyString,
TOr(TNonEmptyString, TInt)))
THypervisor = TElemOf(constants.HYPER_TYPES)
TMigrationMode = TElemOf(constants.HT_MIGRATION_MODES)
TNICMode = TElemOf(constants.NIC_VALID_MODES)
TInstCreateMode = TElemOf(constants.INSTANCE_CREATE_MODES)
TRebootType = TElemOf(constants.REBOOT_TYPES)
TFileDriver = TElemOf(constants.FILE_DRIVER)
TOobCommand = TElemOf(constants.OOB_COMMANDS)
# FIXME: adjust this after all queries are in haskell
TQueryTypeOp = TElemOf(set(constants.QR_VIA_OP)
.union(set(constants.QR_VIA_LUXI)))
TDiskParams = \
Comment("Disk parameters")(TDictOf(TNonEmptyString,
TOr(TNonEmptyString, TInt)))
TDiskChanges = \
TAnd(TIsLength(2),
TItems([Comment("Disk index")(TNonNegativeInt),
Comment("Parameters")(TDiskParams)]))
TRecreateDisksInfo = TOr(TListOf(TNonNegativeInt), TListOf(TDiskChanges))
def TStorageType(val):
"""Builds a function that checks if a given value is a valid storage
type.
"""
return (val in constants.STORAGE_TYPES)
TTagKind = TElemOf(constants.VALID_TAG_TYPES)
TDdmSimple = TElemOf(constants.DDMS_VALUES)
TVerifyOptionalChecks = TElemOf(constants.VERIFY_OPTIONAL_CHECKS)
TSshKeyType = TElemOf(constants.SSHK_ALL)
@WithDesc("IPv4 network")
def _CheckCIDRNetNotation(value):
"""Ensure a given CIDR notation type is valid.
"""
try:
ipaddr.IPv4Network(value)
except ipaddr.AddressValueError:
return False
return True
@WithDesc("IPv4 address")
def _CheckCIDRAddrNotation(value):
"""Ensure a given CIDR notation type is valid.
"""
try:
ipaddr.IPv4Address(value)
except ipaddr.AddressValueError:
return False
return True
@WithDesc("IPv6 address")
def _CheckCIDR6AddrNotation(value):
"""Ensure a given CIDR notation type is valid.
"""
try:
ipaddr.IPv6Address(value)
except ipaddr.AddressValueError:
return False
return True
@WithDesc("IPv6 network")
def _CheckCIDR6NetNotation(value):
"""Ensure a given CIDR notation type is valid.
"""
try:
ipaddr.IPv6Network(value)
except ipaddr.AddressValueError:
return False
return True
TIPv4Address = TAnd(TString, _CheckCIDRAddrNotation)
TIPv6Address = TAnd(TString, _CheckCIDR6AddrNotation)
TIPv4Network = TAnd(TString, _CheckCIDRNetNotation)
TIPv6Network = TAnd(TString, _CheckCIDR6NetNotation)
def TObject(val_type):
return TDictOf(TAny, val_type)
def TObjectCheck(obj, fields_types):
"""Helper to generate type checks for objects.
@param obj: The object to generate type checks
@param fields_types: The fields and their types as a dict
@return: A ht type check function
"""
assert set(obj.GetAllSlots()) == set(fields_types.keys()), \
"%s != %s" % (set(obj.GetAllSlots()), set(fields_types.keys()))
return TStrictDict(True, True, fields_types)
TQueryFieldDef = \
TObjectCheck(objects.QueryFieldDefinition, {
"name": TNonEmptyString,
"title": TNonEmptyString,
"kind": TElemOf(constants.QFT_ALL),
"doc": TNonEmptyString
})
TQueryRow = \
TListOf(TAnd(TIsLength(2),
TItems([TElemOf(constants.RS_ALL), TAny])))
TQueryResult = TListOf(TQueryRow)
TQueryResponse = \
TObjectCheck(objects.QueryResponse, {
"fields": TListOf(TQueryFieldDef),
"data": TQueryResult
})
TQueryFieldsResponse = \
TObjectCheck(objects.QueryFieldsResponse, {
"fields": TListOf(TQueryFieldDef)
})
TJobIdListItem = \
TAnd(TIsLength(2),
TItems([Comment("success")(TBool),
Comment("Job ID if successful, error message"
" otherwise")(TOr(TString, TJobId))]))
TJobIdList = TListOf(TJobIdListItem)
TJobIdListOnly = TStrictDict(True, True, {
constants.JOB_IDS_KEY: Comment("List of submitted jobs")(TJobIdList)
})
TInstanceMultiAllocResponse = \
TStrictDict(True, True, {
constants.JOB_IDS_KEY: Comment("List of submitted jobs")(TJobIdList),
constants.ALLOCATABLE_KEY: TListOf(TNonEmptyString),
constants.FAILED_KEY: TListOf(TNonEmptyString)
})
|
|
from deciderType import deciderType
class deciderTypeAirplane(deciderType):
"""Austin's Airplane Finder"""
def startup(self):
self.setDeciderName("DeciderTypeAirplane")
self.planeThreshold = 4 # number of tests that must pass to count as a plane
# load correlation data from file
self.correlationThreshold = 22900
self.correlationFile = 'correlationData.txt'
self.getCorrelationData()
def isNoise(self, theListener): # returns true if there is a noise
return self.isPlane(theListener)
def isPlane(self, theListener): # returns true if there is a noise (after sanity checks)
# 'hearPlaneNow' is true if the Listener hears a plane right now
hearPlaneNow = self.hearPlaneNow(theListener)
if not self.clockRunning and hearPlaneNow:
self.startTimer()
if self.clockRunning and not hearPlaneNow:
self.stopTimer()
self.addListenerHistory(hearPlaneNow)
if not self.sanityCheckConsequtive():
self.addDecisionHistory(False)
return False
if not self.sanityCheckTimer():
self.addDecisionHistory(False)
return False
self.addDecisionHistory(True)
return True
def hearPlaneNow(self, theListener): # return True if it sounds like a plane right now
# run tests
if self.runPlaneTests(theListener) >= self.planeThreshold: # sounds like a plane right now
return True
else:
return False
def runPlaneTests(self, theListener):
plane = 0
# TODO: theListener.fftData --> theListener.fftData
if sum(theListener.fftData[1:101])/100 > 400:
plane += 1
if sum(theListener.fftData[1:201])/200 > 300:
plane += 1
if sum(theListener.fftData[1:301])/300 > 250:
plane += 1
# check if avg(100:200) > 200
if sum(theListener.fftData[101:200])/100 > 200:
plane += 1
if sum(theListener.fftData[201:250])/50 > 125:
plane += 1
plane += self.testCorrelation(theListener)
return plane
def testCorrelation(self, theListener, length=0):
# return 1 if correlated to plane, 0 if not
if length == 0:
testLength = theListener.fftNumUsefulBins
else:
if length <= theListener.fftNumUsefulBins:
testLength = length
else:
self.myLog.add("ERROR in testCorrelation(): length is longer than fftNumUsefulBins")
testLength = theListener.fftNumUsefulBins
if theListener.fftNumUsefulBins!=len(self.correlationData):
print('ERROR: fftNumUsefulBins different size than correlationData ' + str(theListener.fftNumUsefulBins) + ' vs ' + str(len(self.correlationData)))
correlSum = 0.0
for n in range(testLength):
#print(str(n) + '\tdata: ' + str(theListener.fftData[n]) + '\tcorrel: ' + str(self.correlationData[n]) + '\tsum: ' + str(float(theListener.fftData[n])*float(self.correlationData[n])) + '\tcumm: ' + str(correlSum))
correlSum += float(theListener.fftDataABS[n])*float(self.correlationData[n])
self.statsCorrelation = round(correlSum,1)
if correlSum > self.correlationThreshold:
return 1
else:
return 0
def calculateCorrelationData(self, newTeachPath):
# calculates the Pearson correlation for each frequency bin
import os
from scipy.stats import pearsonr
import listener
tempListener = listener.Listener()
groundTruth = []
numSamples = 0
correlData = [] # correlation data matrix
finalData = []
# get list of files in recordings directory
filelist = os.listdir(newTeachPath)
# take a filename and see if it is a .fft file
for file in filelist:
fftfile = newTeachPath + file
# check if 'file' is a file (it could be a directory)
if os.path.isfile(fftfile) and file[-4:]=='.fft':
# set the groundTruth if the file is for a plane or not
if file[0]=='p': # it's a plane
groundTruth.append(float(1))
else:
groundTruth.append(float(0))
# get the FFT data from file and add it to the list
correlData.append(tempListener.getFFTData(fftfile))
numSamples += 1
if len(correlData[numSamples-1]) != tempListener.fftNumUsefulBins:
print('Error: FFT in file (' + fftfile + ') not equal to expected value')
print('Expected: ' + str(tempListener.fftNumUsefulBins) + '\tActual: ' + str(len(correlData[numSamples-1])))
# print(groundTruth)
file = open('rawCorrelData.csv','w') # open the file in write mode
for b in range(len(groundTruth)):
file.write(str(groundTruth[b]) + ', ')
file.write('\n')
for freq in range(int(tempListener.fftNumUsefulBins)):
freqSet = []
for sample in range(numSamples):
freqSet.append(correlData[sample][freq])
file.write(str(correlData[sample][freq]) + ', ')
file.write('\n')
# find the correlation co-efficient for this frequency
# use scipy's pearsonr(x,y) to calculate the correlation
result = pearsonr(freqSet,groundTruth)
finalData.append(result[0])
# TODO: sanity check the correlation data against existing FFT files?
file.close() # be nice and close out the file
# save the correlation data to a file for future use
self.saveCorrelationData(self.correlationFile, finalData)
def teach(self, plane, theListener):
# plane = True if there is a plane
theListener.audioCapture() # grab an audio sample
from datetime import datetime
now = datetime.now()
timestamp = now.strftime("%Y%m%d_%H%M_%S")
if plane == True:
newFileName = theListener.teachPath + 'plane_' + str(timestamp) + '.wav'
else:
newFileName = theListener.teachPath + 'not_' + str(timestamp) + '.wav'
import os
if not os.path.isfile(theListener.fileName):
self.myLog.add("ERROR: teach() the Listener filename doesn't exist")
print("ERROR: teach() the Listerer filename doesn't exist")
exit()
# move the file and change the name
os.rename(theListener.fileName, newFileName)
def learn(self, theListener): # learn from collected wave files
import os
learnCount = 0
# get list of files in recordings directory
filelist = os.listdir(theListener.teachPath)
# take a filename and see if it has an associated .fft file
for file in filelist:
wavefile = theListener.teachPath + file
fftfile = wavefile + '.fft'
# check if 'file' is a file (it could be a directory)
if os.path.isfile(wavefile) and wavefile[-4:]=='.wav':
# got a valid file name, so check if the FFT file exists
if not os.path.isfile(fftfile): # fft doesn't exist so make it
print(wavefile + ' FFT does not exist. Creating...')
theListener.getAudioData(wavefile)
theListener.doFFT()
theListener.saveFFT(fftfile, theListener.fftData)
learnCount += 1
if learnCount > 0:
print('\n\nLearned from ' + str(learnCount) + ' new WAVE files.\n\n')
else:
print('\n\nThere was nothing to learn. You need to teach the pauser some WAVE files.\n\n')
print('Calculating correlation...could take a while...')
self.calculateCorrelationData(theListener.teachPath)
|
|
##########################################################################
#
# Copyright (c) 2013, Image Engine Design Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above
# copyright notice, this list of conditions and the following
# disclaimer.
#
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided with
# the distribution.
#
# * Neither the name of John Haddon nor the names of
# any other contributors to this software may be used to endorse or
# promote products derived from this software without specific prior
# written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
# IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
# THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
# PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
# CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
# EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
# PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR
# PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF
# LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING
# NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
#
##########################################################################
import os
import re
import string
import fnmatch
import IECore
import Gaffer
import GafferUI
import GafferScene
##########################################################################
# Metadata
##########################################################################
Gaffer.Metadata.registerNode(
GafferScene.Shader,
"description",
"""
The base type for all nodes which create shaders. Use the
ShaderAssignment node to assign them to objects in the scene.
""",
"nodeGadget:minWidth", 0.0,
plugs = {
"name" : [
"description",
"""
The name of the shader being represented. This should
be considered read-only. Use the Shader.loadShader()
method to load a shader.
""",
"layout:section", "",
"nodule:type", "",
],
"type" : [
"description",
"""
The type of the shader being represented. This should
be considered read-only. Use the Shader.loadShader()
method to load a shader.
""",
"layout:section", "",
"nodule:type", "",
],
"parameters" : [
"description",
"""
Where the parameters for the shader are represented.
""",
"nodeGadget:nodulePosition", "left",
"nodule:type", "GafferUI::CompoundNodule",
"compoundNodule:orientation", "y",
"compoundNodule:spacing", 0.2,
],
"parameters.*" : [
# Although the parameters plug is positioned
# as we want above, we must also register
# appropriate values for each individual parameter,
# for the case where they get promoted to a box
# individually.
"nodeGadget:nodulePosition", "left",
],
"out" : [
"description",
"""
The output from the shader.
""",
"nodeGadget:nodulePosition", "right",
],
}
)
##########################################################################
# PlugValueWidgets
##########################################################################
class __ShaderNamePlugValueWidget( GafferUI.PlugValueWidget ) :
def __init__( self, plug, **kw ) :
row = GafferUI.ListContainer( GafferUI.ListContainer.Orientation.Horizontal, spacing = 4 )
GafferUI.PlugValueWidget.__init__( self, row, plug, **kw )
with row :
self.__label = GafferUI.Label( "" )
GafferUI.Spacer( IECore.V2i( 1 ), parenting = { "expand" : True } )
self.__button = GafferUI.Button( "Reload" )
self.__buttonClickedConnection = self.__button.clickedSignal().connect( Gaffer.WeakMethod( self.__buttonClicked ) )
self._updateFromPlug()
def hasLabel( self ) :
return True
def _updateFromPlug( self ) :
with self.getContext() :
shaderName = self.getPlug().getValue()
self.__label.setText( "<h3>Shader : " + shaderName + "</h3>" )
## \todo Disable the type check once we've got all the shader types implementing reloading properly.
nodeType = self.getPlug().node().typeName()
self.__button.setEnabled( bool( shaderName ) and ( "RenderMan" in nodeType or "OSL" in nodeType ) )
def __buttonClicked( self, button ) :
node = self.getPlug().node()
node.shaderLoader().clear()
with Gaffer.UndoContext( node.ancestor( Gaffer.ScriptNode ) ) :
node.loadShader( node["name"].getValue(), keepExistingValues = True )
GafferUI.PlugValueWidget.registerCreator( GafferScene.Shader, "name", __ShaderNamePlugValueWidget )
GafferUI.PlugValueWidget.registerCreator( GafferScene.Shader, "parameters", GafferUI.LayoutPlugValueWidget )
GafferUI.PlugValueWidget.registerCreator( GafferScene.Shader, "out", None )
GafferUI.PlugValueWidget.registerCreator( GafferScene.Shader, "type", None )
##########################################################################
# NodeFinderDialogue mode
##########################################################################
def __shaderNameExtractor( node ) :
if isinstance( node, GafferScene.Shader ) :
return node["name"].getValue()
else :
return ""
GafferUI.NodeFinderDialogue.registerMode( "Shader Names", __shaderNameExtractor )
##########################################################################
# Shader menu
##########################################################################
## Appends menu items for the creation of all shaders found on some searchpaths.
def appendShaders( menuDefinition, prefix, searchPaths, extensions, nodeCreator, matchExpression = "*" ) :
menuDefinition.append( prefix, { "subMenu" : IECore.curry( __shaderSubMenu, searchPaths, extensions, nodeCreator, matchExpression ) } )
def __nodeName( shaderName ) :
nodeName = os.path.split( shaderName )[-1]
nodeName = nodeName.translate( string.maketrans( ".-", "__" ) )
return nodeName
def __loadFromFile( menu, extensions, nodeCreator ) :
path = Gaffer.FileSystemPath( os.getcwd() )
path.setFilter( Gaffer.FileSystemPath.createStandardFilter( extensions ) )
dialogue = GafferUI.PathChooserDialogue( path, title="Load Shader", confirmLabel = "Load", valid=True, leaf=True )
path = dialogue.waitForPath( parentWindow = menu.ancestor( GafferUI.ScriptWindow ) )
if not path :
return None
shaderName = os.path.splitext( str( path ) )[0]
return nodeCreator( __nodeName( shaderName ), shaderName )
def __shaderSubMenu( searchPaths, extensions, nodeCreator, matchExpression ) :
if isinstance( matchExpression, str ) :
matchExpression = re.compile( fnmatch.translate( matchExpression ) )
shaders = set()
pathsVisited = set()
for path in searchPaths :
if path in pathsVisited :
continue
for root, dirs, files in os.walk( path ) :
for file in files :
if os.path.splitext( file )[1][1:] in extensions :
shaderPath = os.path.join( root, file ).partition( path )[-1].lstrip( "/" )
if shaderPath not in shaders and matchExpression.match( shaderPath ) :
shaders.add( os.path.splitext( shaderPath )[0] )
pathsVisited.add( path )
shaders = sorted( list( shaders ) )
categorisedShaders = [ x for x in shaders if "/" in x ]
uncategorisedShaders = [ x for x in shaders if "/" not in x ]
shadersAndMenuPaths = []
for shader in categorisedShaders :
shadersAndMenuPaths.append( ( shader, "/" + shader ) )
for shader in uncategorisedShaders :
if not categorisedShaders :
shadersAndMenuPaths.append( ( shader, "/" + shader ) )
else :
shadersAndMenuPaths.append( ( shader, "/Other/" + shader ) )
result = IECore.MenuDefinition()
for shader, menuPath in shadersAndMenuPaths :
menuPath = "/".join( [ IECore.CamelCase.toSpaced( x ) for x in menuPath.split( "/" ) ] )
result.append(
menuPath,
{
"command" : GafferUI.NodeMenu.nodeCreatorWrapper( IECore.curry( nodeCreator, __nodeName( shader ), shader ) ),
"searchText" : menuPath.rpartition( "/" )[-1].replace( " ", "" ),
},
)
result.append( "/LoadDivider", { "divider" : True } )
result.append( "/Load...", { "command" : GafferUI.NodeMenu.nodeCreatorWrapper( lambda menu : __loadFromFile( menu, extensions, nodeCreator ) ) } )
return result
|
|
import sys
import time
from datetime import date, datetime
from decimal import Decimal
try:
basestring
except NameError:
basestring = str
class OrientRecord(object):
"""
Object that represent an Orient Document / Record
"""
oRecordData = property(lambda self: self.__o_storage)
def __str__(self):
rep = ""
if self.__o_storage:
rep = str( self.__o_storage )
if self.__o_class is not None:
rep = "'@" + str(self.__o_class) + "':" + rep + ""
if self.__version is not None:
rep = rep + ",'version':" + str(self.__version)
if self.__rid is not None:
rep = rep + ",'rid':'" + str(self.__rid) + "'"
return '{' + rep + '}'
@staticmethod
def addslashes(string):
l = [ "\\", '"', "'", "\0", ]
for i in l:
if i in string:
string = string.replace( i, '\\' + i )
return string
def __init__(self, content=None):
self.__rid = None
self.__version = None
self.__o_class = None
self.__o_storage = {}
if not content:
content = {}
for key in content.keys():
if key == '__rid': # Ex: select @rid, field from v_class
self.__rid = content[ key ]
# self.__rid = OrientRecordLink( content[ key ][ 1: ] )
elif key == '__version': # Ex: select @rid, @version from v_class
self.__version = content[key]
elif key == '__o_class':
self.__o_class = content[ key ]
elif key[0:1] == '@':
# special case dict
# { '@my_class': { 'accommodation': 'hotel' } }
self.__o_class = key[1:]
for _key, _value in content[key].items():
if isinstance(_value, basestring):
self.__o_storage[_key] = self.addslashes( _value )
else:
self.__o_storage[_key] = _value
elif key == '__o_storage':
self.__o_storage = content[key]
else:
self.__o_storage[key] = content[key]
def _set_keys(self, content=dict):
for key in content.keys():
self._set_keys( content[key] )
@property
def _in(self):
try:
return self.__o_storage['in']
except KeyError:
return None
@property
def _out(self):
try:
return self.__o_storage['out']
except KeyError:
return None
@property
def _rid(self):
return self.__rid
@property
def _version(self):
return self.__version
@property
def _class(self):
return self.__o_class
def update(self, **kwargs):
self.__rid = kwargs.get('__rid', None)
self.__version = kwargs.get('__version', None)
if self.__o_class is None:
self.__o_class = kwargs.get('__o_class', None)
""" This method is for backward compatibility when someone
use 'getattr(record, a_key)' """
def __getattr__(self, item):
"""
:param item: string
:return: mixed
:raise: AttributeError
"""
try:
return self.__o_storage[item]
except KeyError:
raise AttributeError( "'OrientRecord' object has no attribute "
"'" + item + "'" )
class OrientRecordLink(object):
def __init__(self, recordlink):
cid, rpos = recordlink.split(":")
self.__link = recordlink
self.clusterID = cid
self.recordPosition = rpos
def __str__(self):
return self.get_hash()
def get(self):
return self.__link
def get_hash(self):
return "#%s" % self.__link
class OrientBinaryObject(object):
"""
This will be a RidBag
"""
def __init__(self, stri):
self.b64 = stri
def get_hash(self):
return "_" + self.b64 + "_"
def getBin(self):
import base64
return base64.b64decode(self.b64)
class OrientCluster(object):
def __init__(self, name, cluster_id, cluster_type=None, segment=None):
"""
Information regarding a Cluster on the Orient Server
:param name: str name of the cluster
:param id: int id of the cluster
:param type: cluster type (only for version <24 of the protocol)
:param segment: cluster segment (only for version <24 of the protocol)
"""
#: str name of the cluster
self.name = name
#: int idof the cluster
self.id = cluster_id
self.type = cluster_type
self.segment = segment
def __str__(self):
return "%s: %d" % (self.name, self.id)
def __eq__(self, other):
return self.name == other.name and self.id == other.id
def __ne__(self, other):
return self.name != other.name or self.id != other.id
class OrientVersion(object):
def __init__(self, release):
"""
Object representing Orient db release Version
:param release: String release
"""
#: string full OrientDB release
self.release = release
#: Major version
self.major = None
#: Minor version
self.minor = None
#: build number
self.build = None
#: string build version
self.subversion = None
self._parse_version(release)
def _parse_version( self, string_release ):
import re
if not isinstance(string_release, str):
string_release = string_release.decode()
try:
version_info = string_release.split( "." )
self.major = version_info[0]
self.minor = version_info[1]
self.build = version_info[2]
except IndexError:
pass
regx = re.match('.*([0-9]+).*', self.major )
self.major = regx.group(1)
try:
_temp = self.minor.split( "-" )
self.minor = _temp[0]
self.subversion = _temp[1]
except IndexError:
pass
try:
regx = re.match( '([0-9]+)[\.\- ]*(.*)', self.build )
self.build = regx.group(1)
self.subversion = regx.group(2)
except TypeError:
pass
self.major = int( self.major )
self.minor = int( self.minor )
self.build = 0 if self.build is None else int( self.build )
self.subversion = '' if self.subversion is None else str( self.subversion )
def __str__(self):
return self.release
class OrientNode(object):
def __init__(self, node_dict=None):
"""
Represent a server node in a multi clusered configuration
TODO: extends this object with different listeners if we're going to support in the driver an abstarction of the HTTP protocol, for now we are not interested in that
:param node_dict: dict with starting configs (usaully from a db_open, db_reload record response)
"""
#: node name
self.name = None
#: node is
self.id = None
#: datetime object the node was started
self.started_on = None
#: binary listener host
self.host = None
#: binary lister port
self.port = None
if node_dict is not None:
self._parse_dict(node_dict)
def _parse_dict(self, node_dict):
self.id = node_dict['id']
self.name = node_dict['name']
self.started_on = node_dict['startedOn']
listener = None
for l in node_dict['listeners']:
if l['protocol'] == 'ONetworkProtocolBinary':
listener = l
break
if listener:
listen = listener['listen'].split(':')
self.host = listen[0]
self.port = listen[1]
def __str__(self):
return self.name
|
|
# -*- coding: utf-8 -*-
from rdflib import py3compat
__doc__ = py3compat.format_doctest_out("""
The :class:`~rdflib.resource.Resource` class wraps a
:class:`~rdflib.graph.Graph`
and a resource reference (i.e. a :class:`rdflib.term.URIRef` or
:class:`rdflib.term.BNode`) to support a resource-oriented way of
working with a graph.
It contains methods directly corresponding to those methods of the Graph
interface that relate to reading and writing data. The difference is that a
Resource also binds a resource identifier, making it possible to work without
tracking both the graph and a current subject. This makes for a "resource
oriented" style, as compared to the triple orientation of the Graph API.
Resulting generators are also wrapped so that any resource reference values
(:class:`rdflib.term.URIRef`s and :class:`rdflib.term.BNode`s) are in turn
wrapped as Resources. (Note that this behaviour differs from the corresponding
methods in :class:`~rdflib.graph.Graph`, where no such conversion takes place.)
Basic Usage Scenario
--------------------
Start by importing things we need and define some namespaces::
>>> from rdflib import *
>>> FOAF = Namespace("http://xmlns.com/foaf/0.1/")
>>> CV = Namespace("http://purl.org/captsolo/resume-rdf/0.2/cv#")
Load some RDF data::
>>> graph = Graph().parse(format='n3', data='''
... @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
... @prefix xsd: <http://www.w3.org/2001/XMLSchema#>.
... @prefix foaf: <http://xmlns.com/foaf/0.1/> .
... @prefix cv: <http://purl.org/captsolo/resume-rdf/0.2/cv#> .
...
... @base <http://example.org/> .
...
... </person/some1#self> a foaf:Person;
... rdfs:comment "Just a Python & RDF hacker."@en;
... foaf:depiction </images/person/some1.jpg>;
... foaf:homepage <http://example.net/>;
... foaf:name "Some Body" .
...
... </images/person/some1.jpg> a foaf:Image;
... rdfs:label "some 1"@en;
... rdfs:comment "Just an image"@en;
... foaf:thumbnail </images/person/some1-thumb.jpg> .
...
... </images/person/some1-thumb.jpg> a foaf:Image .
...
... [] a cv:CV;
... cv:aboutPerson </person/some1#self>;
... cv:hasWorkHistory [ cv:employedIn </#company>;
... cv:startDate "2009-09-04"^^xsd:date ] .
... ''')
Create a Resource::
>>> person = Resource(
... graph, URIRef("http://example.org/person/some1#self"))
Retrieve some basic facts::
>>> person.identifier
rdflib.term.URIRef(%(u)s'http://example.org/person/some1#self')
>>> person.value(FOAF.name)
rdflib.term.Literal(%(u)s'Some Body')
>>> person.value(RDFS.comment)
rdflib.term.Literal(%(u)s'Just a Python & RDF hacker.', lang=%(u)s'en')
Resources can be sliced (like graphs, but the subject is fixed)::
>>> for name in person[FOAF.name]:
... print(name)
Some Body
>>> person[FOAF.name : Literal("Some Body")]
True
Resources as unicode are represented by their identifiers as unicode::
>>> %(unicode)s(person) #doctest: +SKIP
%(u)s'Resource(http://example.org/person/some1#self'
Resource references are also Resources, so you can easily get e.g. a qname
for the type of a resource, like::
>>> person.value(RDF.type).qname()
%(u)s'foaf:Person'
Or for the predicates of a resource::
>>> sorted(
... p.qname() for p in person.predicates()
... ) #doctest: +NORMALIZE_WHITESPACE +SKIP
[%(u)s'foaf:depiction', %(u)s'foaf:homepage',
%(u)s'foaf:name', %(u)s'rdf:type', %(u)s'rdfs:comment']
Follow relations and get more data from their Resources as well::
>>> for pic in person.objects(FOAF.depiction):
... print(pic.identifier)
... print(pic.value(RDF.type).qname())
... print(pic.label())
... print(pic.comment())
... print(pic.value(FOAF.thumbnail).identifier)
http://example.org/images/person/some1.jpg
foaf:Image
some 1
Just an image
http://example.org/images/person/some1-thumb.jpg
>>> for cv in person.subjects(CV.aboutPerson):
... work = list(cv.objects(CV.hasWorkHistory))[0]
... print(work.value(CV.employedIn).identifier)
... print(work.value(CV.startDate))
http://example.org/#company
2009-09-04
It's just as easy to work with the predicates of a resource::
>>> for s, p in person.subject_predicates():
... print(s.value(RDF.type).qname())
... print(p.qname())
... for s, o in p.subject_objects():
... print(s.value(RDF.type).qname())
... print(o.value(RDF.type).qname())
cv:CV
cv:aboutPerson
cv:CV
foaf:Person
This is useful for e.g. inspection::
>>> thumb_ref = URIRef("http://example.org/images/person/some1-thumb.jpg")
>>> thumb = Resource(graph, thumb_ref)
>>> for p, o in thumb.predicate_objects():
... print(p.qname())
... print(o.qname())
rdf:type
foaf:Image
Similarly, adding, setting and removing data is easy::
>>> thumb.add(RDFS.label, Literal("thumb"))
>>> print(thumb.label())
thumb
>>> thumb.set(RDFS.label, Literal("thumbnail"))
>>> print(thumb.label())
thumbnail
>>> thumb.remove(RDFS.label)
>>> list(thumb.objects(RDFS.label))
[]
Schema Example
--------------
With this artificial schema data::
>>> graph = Graph().parse(format='n3', data='''
... @prefix rdf: <http://www.w3.org/1999/02/22-rdf-syntax-ns#> .
... @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
... @prefix owl: <http://www.w3.org/2002/07/owl#> .
... @prefix v: <http://example.org/def/v#> .
...
... v:Artifact a owl:Class .
...
... v:Document a owl:Class;
... rdfs:subClassOf v:Artifact .
...
... v:Paper a owl:Class;
... rdfs:subClassOf v:Document .
...
... v:Choice owl:oneOf (v:One v:Other) .
...
... v:Stuff a rdf:Seq; rdf:_1 v:One; rdf:_2 v:Other .
...
... ''')
From this class::
>>> artifact = Resource(graph, URIRef("http://example.org/def/v#Artifact"))
we can get at subclasses::
>>> subclasses = list(artifact.transitive_subjects(RDFS.subClassOf))
>>> [c.qname() for c in subclasses]
[%(u)s'v:Artifact', %(u)s'v:Document', %(u)s'v:Paper']
and superclasses from the last subclass::
>>> [c.qname() for c in subclasses[-1].transitive_objects(RDFS.subClassOf)]
[%(u)s'v:Paper', %(u)s'v:Document', %(u)s'v:Artifact']
Get items from the Choice::
>>> choice = Resource(graph, URIRef("http://example.org/def/v#Choice"))
>>> [it.qname() for it in choice.value(OWL.oneOf).items()]
[%(u)s'v:One', %(u)s'v:Other']
And the sequence of Stuff::
>>> stuff = Resource(graph, URIRef("http://example.org/def/v#Stuff"))
>>> [it.qname() for it in stuff.seq()]
[%(u)s'v:One', %(u)s'v:Other']
On add, other resources are auto-unboxed:
>>> paper = Resource(graph, URIRef("http://example.org/def/v#Paper"))
>>> paper.add(RDFS.subClassOf, artifact)
>>> artifact in paper.objects(RDFS.subClassOf) # checks Resource instance
True
>>> (paper._identifier, RDFS.subClassOf, artifact._identifier) in graph
True
Technical Details
-----------------
Comparison is based on graph and identifier::
>>> g1 = Graph()
>>> t1 = Resource(g1, URIRef("http://example.org/thing"))
>>> t2 = Resource(g1, URIRef("http://example.org/thing"))
>>> t3 = Resource(g1, URIRef("http://example.org/other"))
>>> t4 = Resource(Graph(), URIRef("http://example.org/other"))
>>> t1 is t2
False
>>> t1 == t2
True
>>> t1 != t2
False
>>> t1 == t3
False
>>> t1 != t3
True
>>> t3 != t4
True
>>> t3 < t1 and t1 > t3
True
>>> t1 >= t1 and t1 >= t3
True
>>> t1 <= t1 and t3 <= t1
True
>>> t1 < t1 or t1 < t3 or t3 > t1 or t3 > t3
False
Hash is computed from graph and identifier::
>>> g1 = Graph()
>>> t1 = Resource(g1, URIRef("http://example.org/thing"))
>>> hash(t1) == hash(Resource(g1, URIRef("http://example.org/thing")))
True
>>> hash(t1) == hash(Resource(Graph(), t1.identifier))
False
>>> hash(t1) == hash(Resource(Graph(), URIRef("http://example.org/thing")))
False
The Resource class is suitable as a base class for mapper toolkits. For
example, consider this utility for accessing RDF properties via qname-like
attributes::
>>> class Item(Resource):
...
... def __getattr__(self, p):
... return list(self.objects(self._to_ref(*p.split('_', 1))))
...
... def _to_ref(self, pfx, name):
... return URIRef(self._graph.store.namespace(pfx) + name)
It works as follows::
>>> graph = Graph().parse(format='n3', data='''
... @prefix rdfs: <http://www.w3.org/2000/01/rdf-schema#> .
... @prefix foaf: <http://xmlns.com/foaf/0.1/> .
...
... @base <http://example.org/> .
... </person/some1#self>
... foaf:name "Some Body";
... foaf:depiction </images/person/some1.jpg> .
... </images/person/some1.jpg> rdfs:comment "Just an image"@en .
... ''')
>>> person = Item(graph, URIRef("http://example.org/person/some1#self"))
>>> print(person.foaf_name[0])
Some Body
The mechanism for wrapping references as resources cooperates with subclasses.
Therefore, accessing referenced resources automatically creates new ``Item``
objects::
>>> isinstance(person.foaf_depiction[0], Item)
True
>>> print(person.foaf_depiction[0].rdfs_comment[0])
Just an image
""")
from rdflib.term import Node, BNode, URIRef
from rdflib.namespace import RDF
from rdflib.paths import Path
__all__ = ['Resource']
class Resource(object):
def __init__(self, graph, subject):
self._graph = graph
self._identifier = subject
graph = property(lambda self: self._graph)
identifier = property(lambda self: self._identifier)
def __hash__(self):
return hash(Resource) ^ hash(self._graph) ^ hash(self._identifier)
def __eq__(self, other):
return (isinstance(other, Resource) and
self._graph == other._graph and
self._identifier == other._identifier)
__ne__ = lambda self, other: not self == other
def __lt__(self, other):
if isinstance(other, Resource):
return self._identifier < other._identifier
else:
return False
__gt__ = lambda self, other: not (self < other or self == other)
__le__ = lambda self, other: self < other or self == other
__ge__ = lambda self, other: not self < other
def __unicode__(self):
return str(self._identifier)
if py3compat.PY3:
__str__ = __unicode__
def add(self, p, o):
if isinstance(o, Resource):
o = o._identifier
self._graph.add((self._identifier, p, o))
def remove(self, p, o=None):
if isinstance(o, Resource):
o = o._identifier
self._graph.remove((self._identifier, p, o))
def set(self, p, o):
if isinstance(o, Resource):
o = o._identifier
self._graph.set((self._identifier, p, o))
def subjects(self, predicate=None): # rev
return self._resources(
self._graph.subjects(predicate, self._identifier))
def predicates(self, o=None):
if isinstance(o, Resource):
o = o._identifier
return self._resources(
self._graph.predicates(self._identifier, o))
def objects(self, predicate=None):
return self._resources(
self._graph.objects(self._identifier, predicate))
def subject_predicates(self):
return self._resource_pairs(
self._graph.subject_predicates(self._identifier))
def subject_objects(self):
return self._resource_pairs(
self._graph.subject_objects(self._identifier))
def predicate_objects(self):
return self._resource_pairs(
self._graph.predicate_objects(self._identifier))
def value(self, p=RDF.value, o=None, default=None, any=True):
if isinstance(o, Resource):
o = o._identifier
return self._cast(
self._graph.value(self._identifier, p, o, default, any))
def label(self):
return self._graph.label(self._identifier)
def comment(self):
return self._graph.comment(self._identifier)
def items(self):
return self._resources(self._graph.items(self._identifier))
def transitive_objects(self, predicate, remember=None):
return self._resources(self._graph.transitive_objects(
self._identifier, predicate, remember))
def transitive_subjects(self, predicate, remember=None):
return self._resources(self._graph.transitive_subjects(
predicate, self._identifier, remember))
def seq(self):
return self._resources(self._graph.seq(self._identifier))
def qname(self):
return self._graph.qname(self._identifier)
def _resource_pairs(self, pairs):
for s1, s2 in pairs:
yield self._cast(s1), self._cast(s2)
def _resource_triples(self, triples):
for s,p,o in triples:
yield self._cast(s), self._cast(p), self._cast(o)
def _resources(self, nodes):
for node in nodes:
yield self._cast(node)
def _cast(self, node):
if isinstance(node, (BNode, URIRef)):
return self._new(node)
else:
return node
def __iter__(self):
return self._resource_triples(self._graph.triples((self.identifier, None, None)))
def __getitem__(self, item):
if isinstance(item, slice):
if item.step:
raise TypeError("Resources fix the subject for slicing, and can only be sliced by predicate/object. ")
p,o=item.start,item.stop
if p is None and o is None:
return self.predicate_objects()
elif p is None:
return self.predicates(o)
elif o is None:
return self.objects(p)
else:
return (self.identifier, p, o) in self._graph
elif isinstance(item, (Node, Path)):
return self.objects(item)
else:
raise TypeError("You can only index a resource by a single rdflib term, a slice of rdflib terms, not %s (%s)"%(item, type(item)))
def __setitem__(self, item, value):
self.set(item, value)
def _new(self, subject):
return type(self)(self._graph, subject)
def __str__(self):
return 'Resource(%s)' % self._identifier
def __repr__(self):
return 'Resource(%s,%s)' % (self._graph, self._identifier)
|
|
import asyncio
import uuid
from waterbutler.core.streams import BaseStream
from waterbutler.core.streams import MultiStream
from waterbutler.core.streams import StringStream
class FormDataStream(MultiStream):
"""A child of MultiSteam used to create stream friendly multipart form data requests.
Usage:
>>> stream = FormDataStream(key1='value1', file=FileStream(...))
Or:
>>> stream = FormDataStream()
>>> stream.add_field('key1', 'value1')
>>> stream.add_file('file', FileStream(...), mime='text/plain')
Additional options for files can be passed as a tuple ordered as:
>>> FormDataStream(fieldName=(FileStream(...), 'fileName', 'Mime', 'encoding'))
Auto generates boundaries and properly concatenates them
Use FormDataStream.headers to get the proper headers to be included with requests
Namely Content-Length, Content-Type
"""
@classmethod
def make_boundary(cls):
"""Creates a random-ish boundary for
form data seperator
"""
return uuid.uuid4().hex
@classmethod
def make_header(cls, name, disposition='form-data', additional_headers=None, **extra):
additional_headers = additional_headers or {}
header = 'Content-Disposition: {}; name="{}"'.format(disposition, name)
header += ''.join([
'; {}="{}"'.format(key, value)
for key, value
in extra.items()
if value is not None
])
additional = '\r\n'.join([
'{}: {}'.format(key, value)
for key, value in additional_headers.items()
if value is not None
])
header += '\r\n'
if additional:
header += additional
header += '\r\n'
return header + '\r\n'
def __init__(self, **fields):
""":param dict fields: A dict of fieldname: value to create the body of the stream"""
self.can_add_more = True
self.boundary = self.make_boundary()
super().__init__()
for key, value in fields.items():
if isinstance(value, tuple):
self.add_file(key, *value)
elif isinstance(value, asyncio.StreamReader):
self.add_file(key, value)
else:
self.add_field(key, value)
@property
def end_boundary(self):
return StringStream('--{}--\r\n'.format(self.boundary))
@property
def headers(self):
"""The headers required to make a proper multipart form request
Implicitly calls finalize as accessing headers will often indicate sending of the request
Meaning nothing else will be added to the stream"""
self.finalize()
return {
'Content-Length': str(self.size),
'Content-Type': 'multipart/form-data; boundary={}'.format(self.boundary)
}
@asyncio.coroutine
def read(self, n=-1):
if self.can_add_more:
self.finalize()
return (yield from super().read(n=n))
def finalize(self):
assert self.stream, 'Must add at least one stream to finalize'
if self.can_add_more:
self.can_add_more = False
self.add_streams(self.end_boundary)
def add_fields(self, **fields):
for key, value in fields.items():
self.add_field(key, value)
def add_field(self, key, value):
assert self.can_add_more, 'Cannot add more fields after calling finalize or read'
self.add_streams(
self._make_boundary_stream(),
StringStream(self.make_header(key) + value + '\r\n')
)
def add_file(self, field_name, file_stream, file_name=None, mime='application/octet-stream', disposition='file', transcoding='binary'):
assert self.can_add_more, 'Cannot add more fields after calling finalize or read'
header = self.make_header(
field_name,
disposition=disposition,
filename=file_name,
additional_headers={
'Content-Type': mime,
'Content-Transfer-Encoding': transcoding
}
)
self.add_streams(
self._make_boundary_stream(),
StringStream(header),
file_stream,
StringStream('\r\n')
)
def _make_boundary_stream(self):
return StringStream('--{}\r\n'.format(self.boundary))
class ResponseStreamReader(BaseStream):
def __init__(self, response, size=None, name=None, unsizable=False):
super().__init__()
if 'Content-Length' in response.headers:
self._size = int(response.headers['Content-Length'])
elif not unsizable:
self._size = int(size)
else:
self._size = None
self._name = name
self.response = response
@property
def partial(self):
return self.response.status == 206
@property
def content_type(self):
return self.response.headers.get('Content-Type', 'application/octet-stream')
@property
def content_range(self):
return self.response.headers['Content-Range']
@property
def name(self):
return self._name
@property
def size(self):
return self._size
@asyncio.coroutine
def _read(self, size):
chunk = (yield from self.response.content.read(size))
if not chunk:
self.feed_eof()
return chunk
class RequestStreamReader(BaseStream):
def __init__(self, request, inner):
super().__init__()
self.inner = inner
self.request = request
@property
def size(self):
return int(self.request.headers.get('Content-Length'))
def at_eof(self):
return self.inner.at_eof()
@asyncio.coroutine
def _read(self, size):
if self.inner.at_eof():
return b''
if size < 0:
return (yield from self.inner.read(size))
try:
return (yield from self.inner.readexactly(size))
except asyncio.IncompleteReadError as e:
return e.partial
|
|
#!/usr/bin/env python
from strands_executive_msgs.msg import Task
import rospy
import json
import requests
from calendar import timegm
from dateutil import parser
from dateutil import tz
from datetime import datetime
from datetime import timedelta
from strands_executive_msgs.srv import CreateTask
from pprint import pprint
from threading import Thread
PKG = 'gcal_routine'
def rostime_str(rt):
return str(datetime.fromtimestamp(rt.secs)) + ' ' + str(rt.secs)
class GCal:
def __init__(self, calendar, key, add_cb=None,
remove_cb=None, update_wait=60, minTimeDelta=None,
maxTimeDelta=None, file_name=None, time_critical=False):
self.tz_utc = tz.gettz('UTC')
if file_name is not None:
self.uri = file_name
else:
self.uri = self._get_url(calendar, key)
self.time_offset = rospy.Duration.from_sec(0)
rospy.loginfo('using uri %s', self.uri)
self.events = {}
self.gcal = {}
self.previous_events = {}
self.update_wait = update_wait
self.add_cb = add_cb
self.remove_cb = remove_cb
self.minTimeDelta = minTimeDelta
self.maxTimeDelta = maxTimeDelta
self.time_critical = time_critical
self.update_worker = Thread(target=self._update_run)
def start_worker(self):
self.update_worker.start()
def _get_url(self, calendar, key, max_results=2500):
return 'https://www.googleapis.com/calendar/v3/calendars/' \
'%s/events?key=%s&singleEvents=true&' \
'orderBy=startTime&maxResults=%d' % (calendar,
key, max_results)
def _update_run(self):
# make sure we can be killed here
while not rospy.is_shutdown():
added = []
removed = []
self.update(added, removed)
# sleep until next check
target = rospy.get_rostime()
target.secs = target.secs + self.update_wait
while rospy.get_rostime() < target and not rospy.is_shutdown():
rospy.sleep(1)
def shift_to_now(self):
times = [s.start_after for s in self.events.values()]
if len(times) < 1:
return
self.time_offset = min(times) - rospy.get_rostime()
rospy.logdebug('now is %s', rostime_str(rospy.get_rostime()))
for s in self.events.values():
s.start_after = s.start_after - self.time_offset
s.end_before = s.end_before - self.time_offset
rospy.logdebug('new event times for %s: %s -> %s',
s.action,
rostime_str(s.start_after),
rostime_str(s.end_before))
def update(self, added, removed):
self.previous_events = self.events.copy()
if self.uri.lower().startswith('http'):
try:
uri = self.uri
now = datetime.now()
if self.minTimeDelta is not None:
mt = now - timedelta(days=self.minTimeDelta)
uri = "%s&timeMin=%sZ" % (uri, mt.isoformat())
if self.maxTimeDelta is not None:
mt = now + timedelta(days=self.maxTimeDelta)
uri = "%s&timeMax=%sZ" % (uri, mt.isoformat())
rospy.loginfo('updating from google calendar %s', uri)
response = requests.get(uri)
self.gcal = json.loads(response.text)
except Exception, e:
rospy.logerr('failed to get response from %s: %s',
self.uri, str(e))
return
else:
g = open(self.uri, 'rb')
self.gcal = json.loads(g.read())
g.close()
self._to_task_list()
if self._find_changes(added, removed):
rospy.loginfo('changes in the calendar to process +%d -%d',
len(added), len(removed))
for a in added:
rospy.loginfo('instantiate %s' % a)
self.events[a] = self.task_from_gcal(self.events[a])
if self.add_cb is not None:
for a in added:
self.add_cb(a, self.events[a])
if self.remove_cb is not None:
for r in removed:
self.remove_cb(r, self.previous_events[r])
return True
else:
rospy.logdebug('no changes, keep watching')
return False
def get_task_list(self):
return self.events
def _find_changes(self, added=[], removed=[]):
"""
identifies the change set. Returns True when a change has been found
"""
new_ids = set(self.events.keys())
prev_ids = set(self.previous_events.keys())
additions = new_ids.difference(prev_ids)
deletions = prev_ids.difference(new_ids)
if len(additions) > 0 or len(deletions) > 0:
added.extend(additions)
removed.extend(deletions)
return True
else:
return False
def task_from_gcal(self, gcal_event):
start = parser.parse(gcal_event['start']['dateTime'])
start_utc = start.astimezone(self.tz_utc)
end = parser.parse(gcal_event['end']['dateTime'])
end_utc = end.astimezone(self.tz_utc)
action_name = gcal_event['summary']
factory_name = '/' + action_name + "_create"
try:
factory = rospy.ServiceProxy(factory_name, CreateTask)
# if 'description' in gcal_event:
# t = factory.call(gcal_event['description']).task
# else:
start_after = rospy.Time.from_sec(timegm(start_utc.timetuple())) \
- self.time_offset
end_before = rospy.Time.from_sec(timegm(end_utc.timetuple())) \
- self.time_offset
sa = "start_after: {secs: %d, nsecs: %d}" % \
(start_after.secs, start_after.nsecs)
eb = "end_before: {secs: %d, nsecs: %d}" % \
(end_before.secs, end_before.nsecs)
sn = "start_node_id: '%s'" % gcal_event['location']
en = "end_node_id: '%s'" % gcal_event['location']
if gcal_event.has_key('description'):
ds = "description: '%s'" % gcal_event['description']
else:
ds = "description: "
yaml = "{%s, %s, %s, %s, %s}" % (sa, eb, sn, en, ds)
rospy.loginfo("calling with pre-populated yaml: %s" % yaml)
t = factory.call(yaml).task
rospy.loginfo("got the task back: %s" % str(t))
except Exception as e:
rospy.logwarn("Couldn't instantiate task from factory %s."
"error: %s."
"Using default constructor." %
(factory_name, str(e)))
t = Task()
t.action = gcal_event['summary']
t.start_after = rospy.Time.from_sec(
timegm(start_utc.timetuple())) \
- self.time_offset
t.end_before = rospy.Time.from_sec(timegm(end_utc.timetuple())) \
- self.time_offset
if 'location' in gcal_event:
t.start_node_id = gcal_event['location']
if len(t.end_node_id) == 0:
t.end_node_id = gcal_event['location']
if t.max_duration.secs == 0:
t.max_duration = (t.end_before - t.start_after) / 2
# if it's a time critical task, then the new
# scheduler requires the task to have the same end
# time as start time, to indicate time "criticalness".
# Opportunistically, in this case we assume the
# max duration to be the event length in calendar.
if self.time_critical:
t.max_duration = t.end_before - t.start_after
t.max_duration.secs = t.max_duration.secs / 2
t.end_before = t.start_after
return t
def _to_task_list(self):
self.events = {}
for gcal_event in self.gcal['items']:
try:
k = gcal_event['id'] + gcal_event['updated']
self.events[k] = gcal_event
except Exception as e:
rospy.logerr('failed to convert event from iCal to task: %s',
str(e))
if __name__ == '__main__':
t = Task()
pprint(t)
|
|
import pytest
import numpy as np
from devito import (Grid, TimeFunction, SparseTimeFunction, Function, Operator, Eq,
SubDimension, SubDomain, configuration, solve)
from devito.finite_differences import Derivative
from devito.finite_differences.differentiable import diff2sympy
from devito.exceptions import InvalidOperator
from devito.ir import FindSymbols, retrieve_iteration_tree
from devito.passes.equations.linearity import collect_derivatives
from devito.tools import timed_region
class TestCollectDerivatives(object):
"""
Test collect_derivatives and all mechanisms used by collect_derivatives
indirectly.
"""
def test_nocollection_if_diff_dims(self):
"""
Test that expressions with different time dimensions are not collected.
"""
grid = Grid((10,))
f = TimeFunction(name="f", grid=grid, save=10)
f2 = TimeFunction(name="f2", grid=grid, save=10)
g = TimeFunction(name="g", grid=grid)
g2 = TimeFunction(name="g2", grid=grid)
w = Function(name="w", grid=grid)
with timed_region('x'):
eq = Eq(w, f.dt*g + f2.dt*g2)
# Since all Function are time dependent, there should be no collection
# and produce the same result as with the pre evaluated expression
expr = Operator._lower_exprs([eq])[0]
expr2 = Operator._lower_exprs([eq.evaluate])[0]
assert expr == expr2
def test_numeric_constant(self):
grid = Grid(shape=(10, 10))
u = TimeFunction(name="u", grid=grid, space_order=4, time_order=2)
eq = Eq(u.forward, u.dx.dx + 0.3*u.dy.dx)
leq = collect_derivatives.func([eq])[0]
assert len(leq.find(Derivative)) == 3
def test_symbolic_constant(self):
grid = Grid(shape=(10, 10))
dt = grid.time_dim.spacing
u = TimeFunction(name="u", grid=grid, space_order=4, time_order=2)
eq = Eq(u.forward, u.dx.dx + dt**0.2*u.dy.dx)
leq = collect_derivatives.func([eq])[0]
assert len(leq.find(Derivative)) == 3
def test_symbolic_constant_times_add(self):
grid = Grid(shape=(10, 10))
dt = grid.time_dim.spacing
u = TimeFunction(name="u", grid=grid, space_order=4, time_order=2)
f = Function(name='f', grid=grid)
eq = Eq(u.forward, u.laplace + dt**0.2*u.biharmonic(1/f))
leq = collect_derivatives.func([eq])[0]
assert len(eq.rhs.args) == 3
assert len(leq.rhs.args) == 2
assert all(isinstance(i, Derivative) for i in leq.rhs.args)
def test_solve(self):
"""
By remaining unevaluated until after Operator's collect_derivatives,
the Derivatives after a solve() should be collected.
"""
grid = Grid(shape=(10, 10))
u = TimeFunction(name="u", grid=grid, space_order=4, time_order=2)
pde = u.dt2 - (u.dx.dx + u.dy.dy) - u.dx.dy
eq = Eq(u.forward, solve(pde, u.forward))
leq = collect_derivatives.func([eq])[0]
assert len(eq.rhs.find(Derivative)) == 5
assert len(leq.rhs.find(Derivative)) == 4
assert len(leq.rhs.args[3].find(Derivative)) == 3 # Check factorization
def test_nocollection_if_unworthy(self):
grid = Grid(shape=(10, 10))
dt = grid.time_dim.spacing
u = TimeFunction(name="u", grid=grid)
eq = Eq(u.forward, (0.4 + dt)*(u.dx + u.dy))
leq = collect_derivatives.func([eq])[0]
assert eq == leq
def test_pull_and_collect(self):
grid = Grid(shape=(10, 10))
dt = grid.time_dim.spacing
hx, _ = grid.spacing_symbols
u = TimeFunction(name="u", grid=grid)
v = TimeFunction(name="v", grid=grid)
eq = Eq(u.forward, ((0.4 + dt)*u.dx + 0.3)*hx + v.dx)
leq = collect_derivatives.func([eq])[0]
assert eq != leq
args = leq.rhs.args
assert len(args) == 2
assert diff2sympy(args[0]) == 0.3*hx
assert args[1] == (hx*(dt + 0.4)*u + v).dx
def test_pull_and_collect_nested(self):
grid = Grid(shape=(10, 10))
dt = grid.time_dim.spacing
hx, hy = grid.spacing_symbols
u = TimeFunction(name="u", grid=grid, space_order=2)
v = TimeFunction(name="v", grid=grid, space_order=2)
eq = Eq(u.forward, (((0.4 + dt)*u.dx + 0.3)*hx + v.dx).dy + (0.2 + hy)*v.dy)
leq = collect_derivatives.func([eq])[0]
assert eq != leq
assert leq.rhs == ((v + hx*(0.4 + dt)*u).dx + 0.3*hx + (0.2 + hy)*v).dy
def test_pull_and_collect_nested_v2(self):
grid = Grid(shape=(10, 10))
dt = grid.time_dim.spacing
hx, hy = grid.spacing_symbols
u = TimeFunction(name="u", grid=grid, space_order=2)
v = TimeFunction(name="v", grid=grid, space_order=2)
eq = Eq(u.forward, ((0.4 + dt*(hy + 1. + hx*hy))*u.dx + 0.3)*hx + v.dx)
leq = collect_derivatives.func([eq])[0]
assert eq != leq
assert leq.rhs == 0.3*hx + (hx*(0.4 + dt*(hy + 1. + hx*hy))*u + v).dx
def test_nocollection_subdims(self):
grid = Grid(shape=(10, 10))
xi, yi = grid.interior.dimensions
u = TimeFunction(name="u", grid=grid)
v = TimeFunction(name="v", grid=grid)
f = Function(name='f', grid=grid)
eq = Eq(u.forward, u.dx + 0.2*f[xi, yi]*v.dx)
leq = collect_derivatives.func([eq])[0]
assert eq == leq
def test_nocollection_staggered(self):
grid = Grid(shape=(10, 10))
x, y = grid.dimensions
u = TimeFunction(name="u", grid=grid)
v = TimeFunction(name="v", grid=grid, staggered=x)
eq = Eq(u.forward, u.dx + v.dx)
leq = collect_derivatives.func([eq])[0]
assert eq == leq
class TestBuffering(object):
def test_basic(self):
nt = 10
grid = Grid(shape=(4, 4))
u = TimeFunction(name='u', grid=grid, save=nt)
u1 = TimeFunction(name='u', grid=grid, save=nt)
eqn = Eq(u.forward, u + 1)
op0 = Operator(eqn, opt='noop')
op1 = Operator(eqn, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 2
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
assert buffers.pop().symbolic_shape[0] == 2
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1)
assert np.all(u.data == u1.data)
@pytest.mark.parametrize('async_degree', [2, 4])
def test_async_degree(self, async_degree):
nt = 10
grid = Grid(shape=(4, 4))
u = TimeFunction(name='u', grid=grid, save=nt)
u1 = TimeFunction(name='u', grid=grid, save=nt)
eqn = Eq(u.forward, u + 1)
op0 = Operator(eqn, opt='noop')
op1 = Operator(eqn, opt=('buffering', {'buf-async-degree': async_degree}))
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 2
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
assert buffers.pop().symbolic_shape[0] == async_degree
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1)
assert np.all(u.data == u1.data)
def test_two_heterogeneous_buffers(self):
nt = 10
grid = Grid(shape=(4, 4))
u = TimeFunction(name='u', grid=grid, save=nt)
u1 = TimeFunction(name='u', grid=grid, save=nt)
v = TimeFunction(name='v', grid=grid, save=nt)
v1 = TimeFunction(name='v', grid=grid, save=nt)
eqns = [Eq(u.forward, u + v + 1),
Eq(v.forward, u + v + v.backward)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 3
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 2
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1, v=v1)
assert np.all(u.data == u1.data)
assert np.all(v.data == v1.data)
def test_unread_buffered_function(self):
nt = 10
grid = Grid(shape=(4, 4))
time = grid.time_dim
u = TimeFunction(name='u', grid=grid, save=nt)
u1 = TimeFunction(name='u', grid=grid, save=nt)
v = TimeFunction(name='v', grid=grid)
v1 = TimeFunction(name='v', grid=grid)
eqns = [Eq(v.forward, v + 1, implicit_dims=time),
Eq(u, v)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 1
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1, v=v1)
assert np.all(u.data == u1.data)
assert np.all(v.data == v1.data)
def test_over_injection(self):
nt = 10
grid = Grid(shape=(4, 4))
src = SparseTimeFunction(name='src', grid=grid, npoint=1, nt=nt)
rec = SparseTimeFunction(name='rec', grid=grid, npoint=1, nt=nt)
u = TimeFunction(name="u", grid=grid, time_order=2, space_order=2, save=nt)
u1 = TimeFunction(name="u", grid=grid, time_order=2, space_order=2, save=nt)
src.data[:] = 1.
eqns = ([Eq(u.forward, u + 1)] +
src.inject(field=u.forward, expr=src) +
rec.interpolate(expr=u.forward))
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) ==\
5 + bool(configuration['language'] != 'C')
buffers = [i for i in FindSymbols().visit(op1) if i.is_Array]
assert len(buffers) == 1
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1)
assert np.all(u.data == u1.data)
def test_over_one_subdomain(self):
class sd0(SubDomain):
name = 'd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 3, 3), y: ('middle', 3, 3)}
s_d0 = sd0()
nt = 10
grid = Grid(shape=(10, 10), subdomains=(s_d0,))
u = TimeFunction(name="u", grid=grid, save=nt)
u1 = TimeFunction(name="u", grid=grid, save=nt)
v = TimeFunction(name='v', grid=grid)
v1 = TimeFunction(name='v', grid=grid)
eqns = [Eq(v.forward, v + 1, subdomain=s_d0),
Eq(u, v, subdomain=s_d0)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1, v=v1)
assert np.all(u.data == u1.data)
assert np.all(v.data == v1.data)
def test_over_two_subdomains_illegal(self):
"""
Cannot use buffering when:
* an Eq writes to `f` using one set of SubDimensions
* another Eq reads from `f` through a different set of SubDimensions
as the second Eq may want to read unwritten memory (i.e., zero-valued)
in the buffered Function, while with buffering it might end up reading values
written in a previous iteration, thus breaking a storage-related RAW dependence.
"""
class sd0(SubDomain):
name = 'd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 3, 3), y: ('middle', 3, 3)}
class sd1(SubDomain):
name = 'd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 2, 2), y: ('middle', 2, 2)}
s_d0 = sd0()
s_d1 = sd1()
nt = 10
grid = Grid(shape=(10, 10), subdomains=(s_d0, s_d1))
u = TimeFunction(name="u", grid=grid, save=nt)
eqns = [Eq(u.forward, u + 1, subdomain=s_d0),
Eq(u.forward, u.forward + 1, subdomain=s_d1)]
try:
Operator(eqns, opt='buffering')
except InvalidOperator:
assert True
except:
assert False
@pytest.mark.xfail(reason="Cannot deal with non-overlapping SubDimensions yet")
def test_over_two_subdomains(self):
class sd0(SubDomain):
name = 'd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('left', 2), y: ('left', 2)}
class sd1(SubDomain):
name = 'd0'
def define(self, dimensions):
x, y = dimensions
return {x: ('middle', 2, 2), y: ('middle', 2, 2)}
s_d0 = sd0()
s_d1 = sd1()
nt = 10
grid = Grid(shape=(10, 10), subdomains=(s_d0, s_d1))
u = TimeFunction(name="u", grid=grid, save=nt)
u1 = TimeFunction(name="u", grid=grid, save=nt)
eqns = [Eq(u.forward, u + 1, subdomain=s_d0),
Eq(u.forward, u.forward + u + 1, subdomain=s_d1)]
op0 = Operator(eqns, opt='noop')
op1 = Operator(eqns, opt='buffering')
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1)
assert np.all(u.data == u1.data)
def test_subdimensions(self):
nt = 10
grid = Grid(shape=(10, 10, 10))
x, y, z = grid.dimensions
xi = SubDimension.middle(name='xi', parent=x, thickness_left=2, thickness_right=2)
yi = SubDimension.middle(name='yi', parent=y, thickness_left=2, thickness_right=2)
zi = SubDimension.middle(name='zi', parent=z, thickness_left=2, thickness_right=2)
u = TimeFunction(name='u', grid=grid, save=nt)
u1 = TimeFunction(name='u', grid=grid, save=nt)
eqn = Eq(u.forward, u + 1).xreplace({x: xi, y: yi, z: zi})
op0 = Operator(eqn, opt='noop')
op1 = Operator(eqn, opt='buffering')
# Check generated code
assert len(retrieve_iteration_tree(op1)) == 2
assert len([i for i in FindSymbols().visit(op1) if i.is_Array]) == 1
op0.apply(time_M=nt-2)
op1.apply(time_M=nt-2, u=u1)
assert np.all(u.data == u1.data)
|
|
# Copyright 2009-present MongoDB, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Bits and pieces used by the driver that don't really fit elsewhere."""
import sys
import traceback
from bson.py3compat import abc, iteritems, itervalues, string_type
from bson.son import SON
from pymongo import ASCENDING
from pymongo.errors import (CursorNotFound,
DuplicateKeyError,
ExecutionTimeout,
NotPrimaryError,
OperationFailure,
WriteError,
WriteConcernError,
WTimeoutError)
from pymongo.hello_compat import HelloCompat
# From the SDAM spec, the "node is shutting down" codes.
_SHUTDOWN_CODES = frozenset([
11600, # InterruptedAtShutdown
91, # ShutdownInProgress
])
# From the SDAM spec, the "not primary" error codes are combined with the
# "node is recovering" error codes (of which the "node is shutting down"
# errors are a subset).
_NOT_MASTER_CODES = frozenset([
10058, # LegacyNotPrimary <=3.2 "not primary" error code
10107, # NotWritablePrimary
13435, # NotPrimaryNoSecondaryOk
11602, # InterruptedDueToReplStateChange
13436, # NotPrimaryOrSecondary
189, # PrimarySteppedDown
]) | _SHUTDOWN_CODES
# From the retryable writes spec.
_RETRYABLE_ERROR_CODES = _NOT_MASTER_CODES | frozenset([
7, # HostNotFound
6, # HostUnreachable
89, # NetworkTimeout
9001, # SocketException
262, # ExceededTimeLimit
])
_UUNDER = u"_"
def _gen_index_name(keys):
"""Generate an index name from the set of fields it is over."""
return _UUNDER.join(["%s_%s" % item for item in keys])
def _index_list(key_or_list, direction=None):
"""Helper to generate a list of (key, direction) pairs.
Takes such a list, or a single key, or a single key and direction.
"""
if direction is not None:
return [(key_or_list, direction)]
else:
if isinstance(key_or_list, string_type):
return [(key_or_list, ASCENDING)]
elif not isinstance(key_or_list, (list, tuple)):
raise TypeError("if no direction is specified, "
"key_or_list must be an instance of list")
return key_or_list
def _index_document(index_list):
"""Helper to generate an index specifying document.
Takes a list of (key, direction) pairs.
"""
if isinstance(index_list, abc.Mapping):
raise TypeError("passing a dict to sort/create_index/hint is not "
"allowed - use a list of tuples instead. did you "
"mean %r?" % list(iteritems(index_list)))
elif not isinstance(index_list, (list, tuple)):
raise TypeError("must use a list of (key, direction) pairs, "
"not: " + repr(index_list))
if not len(index_list):
raise ValueError("key_or_list must not be the empty list")
index = SON()
for (key, value) in index_list:
if not isinstance(key, string_type):
raise TypeError("first item in each key pair must be a string")
if not isinstance(value, (string_type, int, abc.Mapping)):
raise TypeError("second item in each key pair must be 1, -1, "
"'2d', or another valid MongoDB index specifier.")
index[key] = value
return index
def _check_command_response(response, max_wire_version,
allowable_errors=None,
parse_write_concern_error=False):
"""Check the response to a command for errors.
"""
if "ok" not in response:
# Server didn't recognize our message as a command.
raise OperationFailure(response.get("$err"),
response.get("code"),
response,
max_wire_version)
if parse_write_concern_error and 'writeConcernError' in response:
_error = response["writeConcernError"]
_labels = response.get("errorLabels")
if _labels:
_error.update({'errorLabels': _labels})
_raise_write_concern_error(_error)
if response["ok"]:
return
details = response
# Mongos returns the error details in a 'raw' object
# for some errors.
if "raw" in response:
for shard in itervalues(response["raw"]):
# Grab the first non-empty raw error from a shard.
if shard.get("errmsg") and not shard.get("ok"):
details = shard
break
errmsg = details["errmsg"]
code = details.get("code")
# For allowable errors, only check for error messages when the code is not
# included.
if allowable_errors:
if code is not None:
if code in allowable_errors:
return
elif errmsg in allowable_errors:
return
# Server is "not primary" or "recovering"
if code is not None:
if code in _NOT_MASTER_CODES:
raise NotPrimaryError(errmsg, response)
elif HelloCompat.LEGACY_ERROR in errmsg or "node is recovering" in errmsg:
raise NotPrimaryError(errmsg, response)
# Other errors
# findAndModify with upsert can raise duplicate key error
if code in (11000, 11001, 12582):
raise DuplicateKeyError(errmsg, code, response, max_wire_version)
elif code == 50:
raise ExecutionTimeout(errmsg, code, response, max_wire_version)
elif code == 43:
raise CursorNotFound(errmsg, code, response, max_wire_version)
raise OperationFailure(errmsg, code, response, max_wire_version)
def _check_gle_response(result, max_wire_version):
"""Return getlasterror response as a dict, or raise OperationFailure."""
# Did getlasterror itself fail?
_check_command_response(result, max_wire_version)
if result.get("wtimeout", False):
# MongoDB versions before 1.8.0 return the error message in an "errmsg"
# field. If "errmsg" exists "err" will also exist set to None, so we
# have to check for "errmsg" first.
raise WTimeoutError(result.get("errmsg", result.get("err")),
result.get("code"),
result)
error_msg = result.get("err", "")
if error_msg is None:
return result
if error_msg.startswith(HelloCompat.LEGACY_ERROR):
raise NotPrimaryError(error_msg, result)
details = result
# mongos returns the error code in an error object for some errors.
if "errObjects" in result:
for errobj in result["errObjects"]:
if errobj.get("err") == error_msg:
details = errobj
break
code = details.get("code")
if code in (11000, 11001, 12582):
raise DuplicateKeyError(details["err"], code, result)
raise OperationFailure(details["err"], code, result)
def _raise_last_write_error(write_errors):
# If the last batch had multiple errors only report
# the last error to emulate continue_on_error.
error = write_errors[-1]
if error.get("code") == 11000:
raise DuplicateKeyError(error.get("errmsg"), 11000, error)
raise WriteError(error.get("errmsg"), error.get("code"), error)
def _raise_write_concern_error(error):
if "errInfo" in error and error["errInfo"].get('wtimeout'):
# Make sure we raise WTimeoutError
raise WTimeoutError(
error.get("errmsg"), error.get("code"), error)
raise WriteConcernError(
error.get("errmsg"), error.get("code"), error)
def _check_write_command_response(result):
"""Backward compatibility helper for write command error handling.
"""
# Prefer write errors over write concern errors
write_errors = result.get("writeErrors")
if write_errors:
_raise_last_write_error(write_errors)
error = result.get("writeConcernError")
if error:
error_labels = result.get("errorLabels")
if error_labels:
error.update({'errorLabels': error_labels})
_raise_write_concern_error(error)
def _raise_last_error(bulk_write_result):
"""Backward compatibility helper for insert error handling.
"""
# Prefer write errors over write concern errors
write_errors = bulk_write_result.get("writeErrors")
if write_errors:
_raise_last_write_error(write_errors)
_raise_write_concern_error(bulk_write_result["writeConcernErrors"][-1])
def _fields_list_to_dict(fields, option_name):
"""Takes a sequence of field names and returns a matching dictionary.
["a", "b"] becomes {"a": 1, "b": 1}
and
["a.b.c", "d", "a.c"] becomes {"a.b.c": 1, "d": 1, "a.c": 1}
"""
if isinstance(fields, abc.Mapping):
return fields
if isinstance(fields, (abc.Sequence, abc.Set)):
if not all(isinstance(field, string_type) for field in fields):
raise TypeError("%s must be a list of key names, each an "
"instance of %s" % (option_name,
string_type.__name__))
return dict.fromkeys(fields, 1)
raise TypeError("%s must be a mapping or "
"list of key names" % (option_name,))
def _handle_exception():
"""Print exceptions raised by subscribers to stderr."""
# Heavily influenced by logging.Handler.handleError.
# See note here:
# https://docs.python.org/3.4/library/sys.html#sys.__stderr__
if sys.stderr:
einfo = sys.exc_info()
try:
traceback.print_exception(einfo[0], einfo[1], einfo[2],
None, sys.stderr)
except IOError:
pass
finally:
del einfo
|
|
from workalendar.core import WesternCalendar, ChristianMixin
from workalendar.core import MON, TUE, FRI, SAT, SUN
from datetime import date, timedelta
class Australia(WesternCalendar, ChristianMixin):
"Australia"
include_good_friday = True
include_easter_monday = True
include_queens_birthday = False
include_labour_day_october = False
include_boxing_day = True
# Shall we shift Anzac Day?
shift_anzac_day = True
ANZAC_SHIFT_DAYS = (SAT, SUN)
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(1, 26, "Australia Day"),
)
def get_canberra_day(self, year):
return (
Australia.get_nth_weekday_in_month(year, 3, MON, 2),
"Canberra Day"
)
def get_queens_birthday(self, year):
return (
Australia.get_nth_weekday_in_month(year, 6, MON, 2),
"Queen's Birthday"
)
def get_labour_day_october(self, year):
return (
Australia.get_nth_weekday_in_month(year, 10, MON),
'Labour Day'
)
def get_anzac_day(self, year):
anzac_day = date(year, 4, 25)
if not self.shift_anzac_day:
return (anzac_day, "Anzac Day")
if anzac_day.weekday() in self.ANZAC_SHIFT_DAYS:
anzac_day = self.find_following_working_day(anzac_day)
return (anzac_day, "Anzac Day")
def get_variable_days(self, year):
# usual variable days
days = super(Australia, self).get_variable_days(year)
january_first = date(year, 1, 1)
if january_first.weekday() in self.get_weekend_days():
days.append((
self.find_following_working_day(january_first),
"New Year's Day shift")
)
australia_day = date(year, 1, 26)
if australia_day.weekday() in self.get_weekend_days():
days.append((
self.find_following_working_day(australia_day),
"Australia Day shift")
)
# was fixed, but might be shifted
days.append(self.get_anzac_day(year))
if self.include_queens_birthday:
days.append(self.get_queens_birthday(year))
if self.include_labour_day_october:
days.append(self.get_labour_day_october(year))
christmas = date(year, 12, 25)
boxing_day = date(year, 12, 26)
if christmas.weekday() in self.get_weekend_days():
shift = self.find_following_working_day(christmas)
days.append((shift, "Christmas Shift"))
days.append((shift + timedelta(days=1), "Boxing Day Shift"))
elif boxing_day.weekday() in self.get_weekend_days():
shift = self.find_following_working_day(boxing_day)
days.append((shift, "Boxing Day Shift"))
return days
class AustralianCapitalTerritory(Australia):
"Australian Capital Territory"
include_easter_saturday = True
include_queens_birthday = True
include_labour_day_october = True
include_boxing_day = True
def get_family_community_day(self, year):
# Since this day is picked unsing the school year calendar, there's no
# mathematical way yet to provide it surely
# Family & Community Day was celebrated on the first Tuesday of
# November in 2007, 2008 and 2009
# Per Holidays (Reconciliation Day) Amendment Bill 2017, 2017 is the
# last year that ACT will celebrate family and community day. It is
# being replaced by Reconciliaton day
if year in (2007, 2008, 2009):
day = AustralianCapitalTerritory.get_nth_weekday_in_month(
year, 11, TUE)
elif year == 2010:
day = date(2010, 9, 27)
elif year == 2011:
day = date(2011, 10, 10)
elif year == 2012:
day = date(2012, 10, 8)
elif year == 2013:
day = date(2013, 9, 30)
elif year == 2014:
day = date(2014, 9, 29)
elif year == 2015:
day = date(2015, 9, 28)
elif year == 2016:
day = date(2016, 9, 26)
elif year == 2017:
day = date(2017, 9, 25)
else:
return None
return (day, "Family & Community Day")
def get_reconciliation_day(self, year):
if year >= 2018:
reconciliation_day = date(year, 5, 27)
if reconciliation_day.weekday() == MON:
return (reconciliation_day, "Reconciliation Day")
else:
shift = AustralianCapitalTerritory.get_first_weekday_after(
reconciliation_day, MON)
return shift, "Reconciliation Day Shift"
def get_variable_days(self, year):
days = super(AustralianCapitalTerritory, self).get_variable_days(year)
days.append(self.get_canberra_day(year))
family_community_day = self.get_family_community_day(year)
if family_community_day is not None:
days.append(family_community_day)
reconciliation_day = self.get_reconciliation_day(year)
if reconciliation_day is not None:
days.append(reconciliation_day)
return days
class NewSouthWales(Australia):
"New South Wales"
include_queens_birthday = True
include_easter_saturday = True
include_easter_sunday = True
include_labour_day_october = True
include_boxing_day = True
ANZAC_SHIFT_DAYS = (SUN,)
class NorthernTerritory(Australia):
"Northern Territory"
include_easter_saturday = True
include_queens_birthday = True
include_boxing_day = True
ANZAC_SHIFT_DAYS = (SUN,)
def get_may_day(self, year):
return (
NorthernTerritory.get_nth_weekday_in_month(year, 5, MON),
"May Day"
)
def get_picnic_day(self, year):
return (
NorthernTerritory.get_nth_weekday_in_month(year, 8, MON),
"Picnic Day"
)
def get_variable_days(self, year):
days = super(NorthernTerritory, self).get_variable_days(year)
days.extend([
self.get_may_day(year),
self.get_picnic_day(year),
])
return days
class Queensland(Australia):
"Queensland"
include_easter_saturday = True
include_queens_birthday = True
include_boxing_day = True
ANZAC_SHIFT_DAYS = (SUN,)
def get_labour_day_may(self, year):
return (
Queensland.get_nth_weekday_in_month(year, 5, MON),
"Labour Day"
)
def get_variable_days(self, year):
days = super(Queensland, self).get_variable_days(year)
days.append(self.get_labour_day_may(year))
return days
class SouthAustralia(Australia):
"South Australia"
include_easter_saturday = True
include_queens_birthday = True
include_labour_day_october = True
ANZAC_SHIFT_DAYS = (SUN,)
def get_adelaides_cup(self, year):
return (
SouthAustralia.get_nth_weekday_in_month(year, 3, MON, 2),
"Adelaide's cup"
)
def get_proclamation_day(self, year):
return (date(year, 12, 26), "Proclamation Day")
def get_variable_days(self, year):
days = super(SouthAustralia, self).get_variable_days(year)
days.extend([
self.get_adelaides_cup(year),
self.get_proclamation_day(year),
])
return days
class Tasmania(Australia):
"Tasmania"
include_queens_birthday = True
include_boxing_day = True
shift_anzac_day = False
@property
def has_recreation_day(self):
return True
def get_eight_hours_day(self, year):
return (
Tasmania.get_nth_weekday_in_month(year, 3, MON, 2),
"Eight hours Day"
)
def get_recreation_day(self, year):
return (
Tasmania.get_nth_weekday_in_month(year, 11, MON),
"Recreation Day"
)
def get_variable_days(self, year):
days = super(Tasmania, self).get_variable_days(year)
days.append(self.get_eight_hours_day(year))
if self.has_recreation_day:
days.append(self.get_recreation_day(year))
return days
class Hobart(Tasmania):
"Hobart"
@property
def has_recreation_day(self):
return False
def get_hobart(self, year):
return (
Hobart.get_nth_weekday_in_month(year, 2, MON, 2),
"Royal Hobart Regatta"
)
def get_variable_days(self, year):
days = super(Hobart, self).get_variable_days(year)
days.append(self.get_hobart(year))
return days
class Victoria(Australia):
"Victoria"
include_easter_saturday = True
include_queens_birthday = True
include_boxing_day = True
shift_anzac_day = False
def get_labours_day_in_march(self, year):
return (
Victoria.get_nth_weekday_in_month(year, 3, MON, 2),
"Labour Day"
)
def get_melbourne_cup(self, year):
return (
Victoria.get_nth_weekday_in_month(year, 11, TUE),
"Melbourne Cup"
)
def get_variable_days(self, year):
days = super(Victoria, self).get_variable_days(year)
days.append(self.get_labours_day_in_march(year))
days.append(self.get_melbourne_cup(year))
return days
class WesternAustralia(Australia):
"Western Australia"
include_boxing_day = True
def get_labours_day_in_march(self, year):
return (
WesternAustralia.get_nth_weekday_in_month(year, 3, MON),
"Labour Day"
)
def get_western_australia_day(self, year):
return (
WesternAustralia.get_nth_weekday_in_month(year, 6, MON),
"Western Australia Day"
)
def get_variable_days(self, year):
# It is not possible to surely compute Queen's Birthday holiday in
# The western Australia territory, since it's based on the Governor
# Decision (it is typically the last Monday of September or the first
# Monday of October)
days = super(WesternAustralia, self).get_variable_days(year)
days.append(self.get_labours_day_in_march(year))
days.append(self.get_western_australia_day(year))
return days
class MarshallIslands(WesternCalendar, ChristianMixin):
"Marshall Islands"
FIXED_HOLIDAYS = WesternCalendar.FIXED_HOLIDAYS + (
(3, 3, "Remembrance Day"),
(5, 1, "Constitution Day"),
(11, 17, "Presidents' Day"),
(12, 31, "New Year's Eve"),
)
include_good_friday = True
def get_variable_days(self, year):
days = super(MarshallIslands, self).get_variable_days(year)
days.append((
MarshallIslands.get_nth_weekday_in_month(year, 7, FRI),
"Fishermen's Holiday"
))
days.append((
MarshallIslands.get_nth_weekday_in_month(year, 9, FRI),
"Labour Day"
))
days.append((
MarshallIslands.get_last_weekday_in_month(year, 9, FRI),
"Manit Day"
))
days.append((
MarshallIslands.get_nth_weekday_in_month(year, 12, FRI),
"Gospel Day"
))
return days
|
|
from importlib import import_module
import inspect
import os
import re
from django import template
from django.apps import apps
from django.conf import settings
from django.contrib import admin
from django.contrib.admin.views.decorators import staff_member_required
from django.db import models
from django.core.exceptions import ViewDoesNotExist
from django.http import Http404
from django.core import urlresolvers
from django.contrib.admindocs import utils
from django.utils.decorators import method_decorator
from django.utils._os import upath
from django.utils import six
from django.utils.translation import ugettext as _
from django.views.generic import TemplateView
# Exclude methods starting with these strings from documentation
MODEL_METHODS_EXCLUDE = ('_', 'add_', 'delete', 'save', 'set_')
class BaseAdminDocsView(TemplateView):
"""
Base view for admindocs views.
"""
@method_decorator(staff_member_required)
def dispatch(self, *args, **kwargs):
if not utils.docutils_is_available:
# Display an error message for people without docutils
self.template_name = 'admin_doc/missing_docutils.html'
return self.render_to_response(admin.site.each_context())
return super(BaseAdminDocsView, self).dispatch(*args, **kwargs)
def get_context_data(self, **kwargs):
kwargs.update({'root_path': urlresolvers.reverse('admin:index')})
kwargs.update(admin.site.each_context())
return super(BaseAdminDocsView, self).get_context_data(**kwargs)
class BookmarkletsView(BaseAdminDocsView):
template_name = 'admin_doc/bookmarklets.html'
def get_context_data(self, **kwargs):
context = super(BookmarkletsView, self).get_context_data(**kwargs)
context.update({
'admin_url': "%s://%s%s" % (
self.request.scheme, self.request.get_host(), context['root_path'])
})
return context
class TemplateTagIndexView(BaseAdminDocsView):
template_name = 'admin_doc/template_tag_index.html'
def get_context_data(self, **kwargs):
load_all_installed_template_libraries()
tags = []
app_libs = list(six.iteritems(template.libraries))
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for tag_name, tag_func in library.tags.items():
title, body, metadata = utils.parse_docstring(tag_func.__doc__)
if title:
title = utils.parse_rst(title, 'tag', _('tag:') + tag_name)
if body:
body = utils.parse_rst(body, 'tag', _('tag:') + tag_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'tag', _('tag:') + tag_name)
if library in template.builtins:
tag_library = ''
else:
tag_library = module_name.split('.')[-1]
tags.append({
'name': tag_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
kwargs.update({'tags': tags})
return super(TemplateTagIndexView, self).get_context_data(**kwargs)
class TemplateFilterIndexView(BaseAdminDocsView):
template_name = 'admin_doc/template_filter_index.html'
def get_context_data(self, **kwargs):
load_all_installed_template_libraries()
filters = []
app_libs = list(six.iteritems(template.libraries))
builtin_libs = [(None, lib) for lib in template.builtins]
for module_name, library in builtin_libs + app_libs:
for filter_name, filter_func in library.filters.items():
title, body, metadata = utils.parse_docstring(filter_func.__doc__)
if title:
title = utils.parse_rst(title, 'filter', _('filter:') + filter_name)
if body:
body = utils.parse_rst(body, 'filter', _('filter:') + filter_name)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'filter', _('filter:') + filter_name)
if library in template.builtins:
tag_library = ''
else:
tag_library = module_name.split('.')[-1]
filters.append({
'name': filter_name,
'title': title,
'body': body,
'meta': metadata,
'library': tag_library,
})
kwargs.update({'filters': filters})
return super(TemplateFilterIndexView, self).get_context_data(**kwargs)
class ViewIndexView(BaseAdminDocsView):
template_name = 'admin_doc/view_index.html'
def get_context_data(self, **kwargs):
views = []
urlconf = import_module(settings.ROOT_URLCONF)
view_functions = extract_views_from_urlpatterns(urlconf.urlpatterns)
for (func, regex, namespace, name) in view_functions:
views.append({
'full_name': '%s.%s' % (func.__module__, getattr(func, '__name__', func.__class__.__name__)),
'url': simplify_regex(regex),
'url_name': ':'.join((namespace or []) + (name and [name] or [])),
'namespace': ':'.join((namespace or [])),
'name': name,
})
kwargs.update({'views': views})
return super(ViewIndexView, self).get_context_data(**kwargs)
class ViewDetailView(BaseAdminDocsView):
template_name = 'admin_doc/view_detail.html'
def get_context_data(self, **kwargs):
view = self.kwargs['view']
mod, func = urlresolvers.get_mod_func(view)
try:
view_func = getattr(import_module(mod), func)
except (ImportError, AttributeError):
raise Http404
title, body, metadata = utils.parse_docstring(view_func.__doc__)
if title:
title = utils.parse_rst(title, 'view', _('view:') + view)
if body:
body = utils.parse_rst(body, 'view', _('view:') + view)
for key in metadata:
metadata[key] = utils.parse_rst(metadata[key], 'model', _('view:') + view)
kwargs.update({
'name': view,
'summary': title,
'body': body,
'meta': metadata,
})
return super(ViewDetailView, self).get_context_data(**kwargs)
class ModelIndexView(BaseAdminDocsView):
template_name = 'admin_doc/model_index.html'
def get_context_data(self, **kwargs):
m_list = [m._meta for m in apps.get_models()]
kwargs.update({'models': m_list})
return super(ModelIndexView, self).get_context_data(**kwargs)
class ModelDetailView(BaseAdminDocsView):
template_name = 'admin_doc/model_detail.html'
def get_context_data(self, **kwargs):
# Get the model class.
try:
app_config = apps.get_app_config(self.kwargs['app_label'])
except LookupError:
raise Http404(_("App %(app_label)r not found") % self.kwargs)
try:
model = app_config.get_model(self.kwargs['model_name'])
except LookupError:
raise Http404(_("Model %(model_name)r not found in app %(app_label)r") % self.kwargs)
opts = model._meta
# Gather fields/field descriptions.
fields = []
for field in opts.fields:
# ForeignKey is a special case since the field will actually be a
# descriptor that returns the other object
if isinstance(field, models.ForeignKey):
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = utils.parse_rst(
(_("the related `%(app_label)s.%(data_type)s` object") % {
'app_label': app_label, 'data_type': data_type,
}),
'model',
_('model:') + data_type,
)
else:
data_type = get_readable_field_data_type(field)
verbose = field.verbose_name
fields.append({
'name': field.name,
'data_type': data_type,
'verbose': verbose,
'help_text': field.help_text,
})
# Gather many-to-many fields.
for field in opts.many_to_many:
data_type = field.rel.to.__name__
app_label = field.rel.to._meta.app_label
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {
'app_label': app_label,
'object_name': data_type,
}
fields.append({
'name': "%s.all" % field.name,
"data_type": 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % field.name,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
# Gather model methods.
for func_name, func in model.__dict__.items():
if (inspect.isfunction(func) and len(inspect.getargspec(func)[0]) == 1):
try:
for exclude in MODEL_METHODS_EXCLUDE:
if func_name.startswith(exclude):
raise StopIteration
except StopIteration:
continue
verbose = func.__doc__
if verbose:
verbose = utils.parse_rst(utils.trim_docstring(verbose), 'model', _('model:') + opts.model_name)
fields.append({
'name': func_name,
'data_type': get_return_data_type(func_name),
'verbose': verbose,
})
# Gather related objects
for rel in opts.get_all_related_objects() + opts.get_all_related_many_to_many_objects():
verbose = _("related `%(app_label)s.%(object_name)s` objects") % {
'app_label': rel.opts.app_label,
'object_name': rel.opts.object_name,
}
accessor = rel.get_accessor_name()
fields.append({
'name': "%s.all" % accessor,
'data_type': 'List',
'verbose': utils.parse_rst(_("all %s") % verbose, 'model', _('model:') + opts.model_name),
})
fields.append({
'name': "%s.count" % accessor,
'data_type': 'Integer',
'verbose': utils.parse_rst(_("number of %s") % verbose, 'model', _('model:') + opts.model_name),
})
kwargs.update({
'name': '%s.%s' % (opts.app_label, opts.object_name),
# Translators: %s is an object type name
'summary': _("Attributes on %s objects") % opts.object_name,
'description': model.__doc__,
'fields': fields,
})
return super(ModelDetailView, self).get_context_data(**kwargs)
class TemplateDetailView(BaseAdminDocsView):
template_name = 'admin_doc/template_detail.html'
def get_context_data(self, **kwargs):
template = self.kwargs['template']
templates = []
for dir in settings.TEMPLATE_DIRS:
template_file = os.path.join(dir, template)
templates.append({
'file': template_file,
'exists': os.path.exists(template_file),
'contents': lambda: open(template_file).read() if os.path.exists(template_file) else '',
'order': list(settings.TEMPLATE_DIRS).index(dir),
})
kwargs.update({
'name': template,
'templates': templates,
})
return super(TemplateDetailView, self).get_context_data(**kwargs)
####################
# Helper functions #
####################
def load_all_installed_template_libraries():
# Load/register all template tag libraries from installed apps.
for module_name in template.get_templatetags_modules():
mod = import_module(module_name)
try:
libraries = [
os.path.splitext(p)[0]
for p in os.listdir(os.path.dirname(upath(mod.__file__)))
if p.endswith('.py') and p[0].isalpha()
]
except OSError:
libraries = []
for library_name in libraries:
try:
template.get_library(library_name)
except template.InvalidTemplateLibrary:
pass
def get_return_data_type(func_name):
"""Return a somewhat-helpful data type given a function name"""
if func_name.startswith('get_'):
if func_name.endswith('_list'):
return 'List'
elif func_name.endswith('_count'):
return 'Integer'
return ''
def get_readable_field_data_type(field):
"""Returns the description for a given field type, if it exists,
Fields' descriptions can contain format strings, which will be interpolated
against the values of field.__dict__ before being output."""
return field.description % field.__dict__
def extract_views_from_urlpatterns(urlpatterns, base='', namespace=None):
"""
Return a list of views from a list of urlpatterns.
Each object in the returned list is a two-tuple: (view_func, regex)
"""
views = []
for p in urlpatterns:
if hasattr(p, 'url_patterns'):
try:
patterns = p.url_patterns
except ImportError:
continue
views.extend(extract_views_from_urlpatterns(
patterns,
base + p.regex.pattern,
(namespace or []) + (p.namespace and [p.namespace] or [])
))
elif hasattr(p, 'callback'):
try:
views.append((p.callback, base + p.regex.pattern,
namespace, p.name))
except ViewDoesNotExist:
continue
else:
raise TypeError(_("%s does not appear to be a urlpattern object") % p)
return views
named_group_matcher = re.compile(r'\(\?P(<\w+>).+?\)')
non_named_group_matcher = re.compile(r'\(.*?\)')
def simplify_regex(pattern):
"""
Clean up urlpattern regexes into something somewhat readable by Mere Humans:
turns something like "^(?P<sport_slug>\w+)/athletes/(?P<athlete_slug>\w+)/$"
into "<sport_slug>/athletes/<athlete_slug>/"
"""
# handle named groups first
pattern = named_group_matcher.sub(lambda m: m.group(1), pattern)
# handle non-named groups
pattern = non_named_group_matcher.sub("<var>", pattern)
# clean up any outstanding regex-y characters.
pattern = pattern.replace('^', '').replace('$', '').replace('?', '').replace('//', '/').replace('\\', '')
if not pattern.startswith('/'):
pattern = '/' + pattern
return pattern
|
Subsets and Splits