repo_name
stringlengths 5
100
| path
stringlengths 4
375
| copies
stringclasses 991
values | size
stringlengths 4
7
| content
stringlengths 666
1M
| license
stringclasses 15
values |
---|---|---|---|---|---|
hub-cap/lady-rainicorn | rainicorn/openstack/common/exception.py | 1 | 3454 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2011 OpenStack Foundation.
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Exceptions common to OpenStack projects
"""
import logging
from rainicorn.openstack.common.gettextutils import _
_FATAL_EXCEPTION_FORMAT_ERRORS = False
class Error(Exception):
def __init__(self, message=None):
super(Error, self).__init__(message)
class ApiError(Error):
def __init__(self, message='Unknown', code='Unknown'):
self.message = message
self.code = code
super(ApiError, self).__init__('%s: %s' % (code, message))
class NotFound(Error):
pass
class UnknownScheme(Error):
msg = "Unknown scheme '%s' found in URI"
def __init__(self, scheme):
msg = self.__class__.msg % scheme
super(UnknownScheme, self).__init__(msg)
class BadStoreUri(Error):
msg = "The Store URI %s was malformed. Reason: %s"
def __init__(self, uri, reason):
msg = self.__class__.msg % (uri, reason)
super(BadStoreUri, self).__init__(msg)
class Duplicate(Error):
pass
class NotAuthorized(Error):
pass
class NotEmpty(Error):
pass
class Invalid(Error):
pass
class BadInputError(Exception):
"""Error resulting from a client sending bad input to a server"""
pass
class MissingArgumentError(Error):
pass
class DatabaseMigrationError(Error):
pass
class ClientConnectionError(Exception):
"""Error resulting from a client connecting to a server"""
pass
def wrap_exception(f):
def _wrap(*args, **kw):
try:
return f(*args, **kw)
except Exception as e:
if not isinstance(e, Error):
#exc_type, exc_value, exc_traceback = sys.exc_info()
logging.exception(_('Uncaught exception'))
#logging.error(traceback.extract_stack(exc_traceback))
raise Error(str(e))
raise
_wrap.func_name = f.func_name
return _wrap
class OpenstackException(Exception):
"""
Base Exception
To correctly use this class, inherit from it and define
a 'message' property. That message will get printf'd
with the keyword arguments provided to the constructor.
"""
message = "An unknown exception occurred"
def __init__(self, **kwargs):
try:
self._error_string = self.message % kwargs
except Exception as e:
if _FATAL_EXCEPTION_FORMAT_ERRORS:
raise e
else:
# at least get the core message out if something happened
self._error_string = self.message
def __str__(self):
return self._error_string
class MalformedRequestBody(OpenstackException):
message = "Malformed message body: %(reason)s"
class InvalidContentType(OpenstackException):
message = "Invalid content type %(content_type)s"
| apache-2.0 |
mizzao/ggplot | ggplot/stats/stat_function.py | 12 | 4439 | from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import pandas as pd
from ggplot.utils import make_iterable_ntimes
from ggplot.utils.exceptions import GgplotError
from .stat import stat
class stat_function(stat):
"""
Superimpose a function onto a plot
Uses a
Parameters
----------
x : list, 1darray
x values of data
fun : function
Function to draw.
n : int
Number of points to interpolate over. Must be greater than zero.
Defaults to 101.
color : str
Color to draw function with.
args : list, dict, object
List or dict of additional arguments to pass to function. If neither
list or dict, object is passed as second argument.
Examples
--------
Sin vs cos.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
gg = ggplot(pd.DataFrame({'x':np.arange(10)}),aes(x='x'))
gg = gg + stat_function(fun=np.sin,color="red")
gg = gg + stat_function(fun=np.cos,color="blue")
print(gg)
Compare random sample density to normal distribution.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
x = np.random.normal(size=100)
# normal distribution function
def dnorm(n):
return (1.0 / np.sqrt(2 * np.pi)) * (np.e ** (-0.5 * (n ** 2)))
data = pd.DataFrame({'x':x})
gg = ggplot(aes(x='x'),data=data) + geom_density()
gg = gg + stat_function(fun=dnorm,n=150)
print(gg)
Passing additional arguments to function as list.
.. plot::
:include-source:
import numpy as np
import pandas as pd
from ggplot import *
x = np.random.randn(100)
to_the_power_of = lambda n, p: n ** p
y = x ** 3
y += np.random.randn(100) # add noise
data = pd.DataFrame({'x':x,'y':y})
gg = ggplot(aes(x='x',y='y'),data=data) + geom_point()
gg = gg + stat_function(fun=to_the_power_of,args=[3])
print(gg)
Passing additional arguments to function as dict.
.. plot::
:include-source:
import scipy
import numpy as np
import pandas as pd
from ggplot import *
def dnorm(x, mean, var):
return scipy.stats.norm(mean,var).pdf(x)
data = pd.DataFrame({'x':np.arange(-5,6)})
gg = ggplot(aes(x='x'),data=data)
gg = gg + stat_function(fun=dnorm,color="blue",args={'mean':0.0,'var':0.2})
gg = gg + stat_function(fun=dnorm,color="red",args={'mean':0.0,'var':1.0})
gg = gg + stat_function(fun=dnorm,color="yellow",args={'mean':0.0,'var':5.0})
gg = gg + stat_function(fun=dnorm,color="green",args={'mean':-2.0,'var':0.5})
print(gg)
"""
# TODO: Should not have a required aesthetic, use the scale information
# maybe that is where the "scale trainning" helps
REQUIRED_AES = {'x'}
DEFAULT_PARAMS = {'geom': 'path', 'position': 'identity', 'fun': None,
'n': 101, 'args': None}
_aes_renames = {'size': 'linewidth', 'linetype': 'linestyle'}
CREATES = {'y'}
def _calculate(self, data):
x = data.pop('x')
fun = self.params['fun']
n = self.params['n']
args = self.params['args']
if not hasattr(fun, '__call__'):
raise GgplotError("stat_function requires parameter 'fun' to be " +
"a function or any other callable object")
old_fun = fun
if isinstance(args,list):
fun = lambda x: old_fun(x, *args)
elif isinstance(args,dict):
fun = lambda x: old_fun(x, **args)
elif args is not None:
fun = lambda x: old_fun(x, args)
else:
fun = lambda x: old_fun(x)
x = np.linspace(x.min(), x.max(),n)
y = list(map(fun, x))
new_data = pd.DataFrame({'x': x, 'y': y})
# Copy the other aesthetics into the new dataframe
# Don't copy the any previous 'y' assignments
try:
del data['y']
except KeyError:
pass
n = len(x)
for ae in data:
new_data[ae] = make_iterable_ntimes(data[ae].iloc[0], n)
return new_data
| bsd-2-clause |
BhavySinghal/qassam-le2 | tools/perf/scripts/python/net_dropmonitor.py | 2669 | 1738 | # Monitor the system for dropped packets and proudce a report of drop locations and counts
import os
import sys
sys.path.append(os.environ['PERF_EXEC_PATH'] + \
'/scripts/python/Perf-Trace-Util/lib/Perf/Trace')
from perf_trace_context import *
from Core import *
from Util import *
drop_log = {}
kallsyms = []
def get_kallsyms_table():
global kallsyms
try:
f = open("/proc/kallsyms", "r")
except:
return
for line in f:
loc = int(line.split()[0], 16)
name = line.split()[2]
kallsyms.append((loc, name))
kallsyms.sort()
def get_sym(sloc):
loc = int(sloc)
# Invariant: kallsyms[i][0] <= loc for all 0 <= i <= start
# kallsyms[i][0] > loc for all end <= i < len(kallsyms)
start, end = -1, len(kallsyms)
while end != start + 1:
pivot = (start + end) // 2
if loc < kallsyms[pivot][0]:
end = pivot
else:
start = pivot
# Now (start == -1 or kallsyms[start][0] <= loc)
# and (start == len(kallsyms) - 1 or loc < kallsyms[start + 1][0])
if start >= 0:
symloc, name = kallsyms[start]
return (name, loc - symloc)
else:
return (None, 0)
def print_drop_table():
print "%25s %25s %25s" % ("LOCATION", "OFFSET", "COUNT")
for i in drop_log.keys():
(sym, off) = get_sym(i)
if sym == None:
sym = i
print "%25s %25s %25s" % (sym, off, drop_log[i])
def trace_begin():
print "Starting trace (Ctrl-C to dump results)"
def trace_end():
print "Gathering kallsyms data"
get_kallsyms_table()
print_drop_table()
# called from perf, when it finds a correspoinding event
def skb__kfree_skb(name, context, cpu, sec, nsec, pid, comm,
skbaddr, location, protocol):
slocation = str(location)
try:
drop_log[slocation] = drop_log[slocation] + 1
except:
drop_log[slocation] = 1
| gpl-2.0 |
CiscoSystems/jujucharm-n1k | charms/precise/swift-storage/hooks/misc_utils.py | 1 | 2174 | from charmhelpers.contrib.storage.linux.utils import (
is_block_device,
zap_disk,
)
from charmhelpers.contrib.storage.linux.loopback import (
ensure_loopback_device,
)
from charmhelpers.contrib.storage.linux.lvm import (
deactivate_lvm_volume_group,
is_lvm_physical_volume,
remove_lvm_physical_volume,
)
from charmhelpers.core.host import (
mounts,
umount,
)
from charmhelpers.core.hookenv import (
log,
INFO,
ERROR,
)
DEFAULT_LOOPBACK_SIZE = '5G'
def ensure_block_device(block_device):
'''
Confirm block_device, create as loopback if necessary.
:param block_device: str: Full path of block device to ensure.
:returns: str: Full path of ensured block device.
'''
_none = ['None', 'none', None]
if (block_device in _none):
log('prepare_storage(): Missing required input: '
'block_device=%s.' % block_device, level=ERROR)
raise
if block_device.startswith('/dev/'):
bdev = block_device
elif block_device.startswith('/'):
_bd = block_device.split('|')
if len(_bd) == 2:
bdev, size = _bd
else:
bdev = block_device
size = DEFAULT_LOOPBACK_SIZE
bdev = ensure_loopback_device(bdev, size)
else:
bdev = '/dev/%s' % block_device
if not is_block_device(bdev):
log('Failed to locate valid block device at %s' % bdev, level=ERROR)
raise
return bdev
def clean_storage(block_device):
'''
Ensures a block device is clean. That is:
- unmounted
- any lvm volume groups are deactivated
- any lvm physical device signatures removed
- partition table wiped
:param block_device: str: Full path to block device to clean.
'''
for mp, d in mounts():
if d == block_device:
log('clean_storage(): Found %s mounted @ %s, unmounting.' %
(d, mp), level=INFO)
umount(mp, persist=True)
if is_lvm_physical_volume(block_device):
deactivate_lvm_volume_group(block_device)
remove_lvm_physical_volume(block_device)
else:
zap_disk(block_device)
| apache-2.0 |
timgraham/django-cms | cms/management/commands/subcommands/tree.py | 4 | 5434 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, print_function, unicode_literals
from collections import OrderedDict
from cms.models import Page, CMSPlugin
from .base import SubcommandsCommand
def get_descendant_ids(root_id):
"""
Returns the a generator of primary keys which represent
descendants of the given page ID (root_id)
"""
# Note this is done because get_descendants() can't be trusted
# as the tree can be corrupt.
children = Page.objects.filter(parent=root_id).values_list('pk', flat=True)
for child_id in children.iterator():
yield child_id
for descendant_id in get_descendant_ids(child_id):
yield descendant_id
class FixTreeCommand(SubcommandsCommand):
help_string = 'Repairing Materialized Path Tree for Pages'
command_name = 'fix-tree'
def handle(self, *args, **options):
"""
Repairs the tree
"""
self.stdout.write('fixing page tree')
Page.fix_tree()
root_draft_pages = Page.objects.filter(
publisher_is_draft=True,
parent__isnull=True,
)
last = None
try:
first = root_draft_pages.order_by('path')[0]
except IndexError:
first = None
for page in root_draft_pages.order_by('site__pk', 'path'):
if last:
last = last.reload()
page = page.reload()
page.move(target=last, pos='right')
elif first and first.pk != page.pk:
page.move(target=first, pos='left')
last = page.reload()
root_public_pages = Page.objects.filter(
publisher_is_draft=False,
parent__isnull=True,
).order_by('publisher_public__path')
# Filter out any root public pages whose draft page
# has a parent.
# This avoids a tree corruption where the public root page
# is added as a child of the draft page's draft parent
# instead of the draft page's public parent
root_public_pages = root_public_pages.filter(
publisher_public__parent__isnull=True
)
for page in root_public_pages:
page = page.reload()
public = page.publisher_public
page.move(target=public, pos='right')
for root in root_draft_pages.order_by('site__pk', 'path'):
self._update_descendants_tree(root)
self.stdout.write('fixing plugin tree')
CMSPlugin.fix_tree()
self.stdout.write('all done')
def _update_descendants_tree(self, root):
descendants_ids = get_descendant_ids(root.pk)
public_root_sibling = root.publisher_public
draft_descendants = (
Page
.objects
.filter(pk__in=descendants_ids)
.select_related('parent', 'publisher_public')
.order_by('depth', 'path')
)
descendants_by_parent = OrderedDict()
for descendant in draft_descendants.iterator():
parent = descendant.parent_id
descendants_by_parent.setdefault(parent, []).append(descendant)
for tree in descendants_by_parent.values():
last_draft = None
last_public = None
draft_parent = tree[0].parent
public_parent = draft_parent.publisher_public
for draft_page in tree:
draft_page.refresh_from_db()
if last_draft:
# This is not the loop so this is not the first draft
# child. Set this page a sibling of the last processed
# draft page.
draft_page.move(target=last_draft.reload(), pos='right')
else:
# This is the first time through the loop so this is the first
# draft child for this parent.
draft_page.move(target=draft_parent.reload(), pos='first-child')
last_draft = draft_page
if not draft_page.publisher_public_id:
continue
public_page = draft_page.publisher_public
if last_public:
public_target = last_public
public_position = 'right'
last_public = public_page
elif public_parent:
# always insert the first public child node found
# as the first child of the public parent
public_target = public_parent
public_position = 'first-child'
last_public = public_page
else:
# No public parent has been found
# Insert the node as a sibling to the last root sibling
# Its very unlikely but possible for the root to not have
# a public page. When this happens, use the root draft page
# as sibling.
public_target = public_root_sibling or root
public_position = 'right'
# This page now becomes the last root sibling
public_root_sibling = public_page
public_page.refresh_from_db()
public_page.move(
target=public_target.reload(),
pos=public_position,
)
| bsd-3-clause |
tammoippen/nest-simulator | pynest/nest/tests/test_split_simulation.py | 13 | 2259 | # -*- coding: utf-8 -*-
#
# test_split_simulation.py
#
# This file is part of NEST.
#
# Copyright (C) 2004 The NEST Initiative
#
# NEST is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
#
# NEST is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with NEST. If not, see <http://www.gnu.org/licenses/>.
import unittest
import nest
class TestSplit(unittest.TestCase):
steps = 100
time = 100
def __init__(self, *args, **kwargs):
super(TestSplit, self).__init__(*args, **kwargs)
self.spike = None
def setup(self):
nest.ResetKernel()
nest.SetDefaults('spike_detector', {'withtime': True})
n1 = nest.Create("iaf_psc_alpha")
nest.SetStatus(n1, {"I_e": 376.0})
self.spike = spike = nest.Create('spike_detector')
nest.Connect(n1, spike)
def runner(self, time, f):
spike = self.spike
nest.SetStatus(spike, [{'n_events': 0}])
f(time)
spikes = nest.GetStatus(spike, 'events')[0]
senders, times = spikes['senders'], spikes['times']
return zip(senders, times)
def runs(self):
self.setup()
steps, time = self.steps, self.time
with nest.RunManager():
return [
(s, t)
for _ in range(steps)
for s, t in self.runner(time, nest.Run)
]
def simulate(self):
self.setup()
steps, time = self.steps, self.time
return [
(s, t)
for s, t in self.runner(time * steps, nest.Simulate)
]
def test_split_match(self):
r0 = self.runs()
r1 = self.simulate()
self.assertEqual(r0, r1)
if __name__ == '__main__':
suite = unittest.TestLoader().loadTestsFromTestCase(TestSplit)
unittest.TextTestRunner(verbosity=2).run(suite)
| gpl-2.0 |
Kryz/sentry | src/sentry/migrations/0129_auto__chg_field_release_id__chg_field_pendingteammember_id__chg_field_.py | 36 | 26026 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
"""
This migration skips all bigint upgrades as they're not generally useful
for organizations, and they're incredibly expensive to apply
"""
def backwards(self, orm):
pass
models = {
'sentry.accessgroup': {
'Meta': {'unique_together': "(('team', 'name'),)", 'object_name': 'AccessGroup'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.User']", 'symmetrical': 'False'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'projects': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['sentry.Project']", 'symmetrical': 'False'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.activity': {
'Meta': {'object_name': 'Activity'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Event']", 'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'ident': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'type': ('django.db.models.fields.PositiveIntegerField', [], {}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'})
},
'sentry.alert': {
'Meta': {'object_name': 'Alert'},
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'related_groups': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'related_alerts'", 'symmetrical': 'False', 'through': "orm['sentry.AlertRelatedGroup']", 'to': "orm['sentry.Group']"}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'})
},
'sentry.alertrelatedgroup': {
'Meta': {'unique_together': "(('group', 'alert'),)", 'object_name': 'AlertRelatedGroup'},
'alert': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Alert']"}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'})
},
'sentry.broadcast': {
'Meta': {'object_name': 'Broadcast'},
'badge': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'blank': 'True'}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'db_index': 'True'}),
'link': ('django.db.models.fields.URLField', [], {'max_length': '200', 'null': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '256'})
},
'sentry.event': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'Event', 'db_table': "'sentry_message'", 'index_together': "(('group', 'datetime'),)"},
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'datetime': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True', 'db_column': "'message_id'"}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'blank': 'True', 'related_name': "'event_set'", 'null': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'time_spent': ('django.db.models.fields.IntegerField', [], {'null': 'True'})
},
'sentry.eventmapping': {
'Meta': {'unique_together': "(('project', 'event_id'),)", 'object_name': 'EventMapping'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'event_id': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.group': {
'Meta': {'unique_together': "(('project', 'checksum'),)", 'object_name': 'Group', 'db_table': "'sentry_groupedmessage'"},
'active_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'checksum': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'culprit': ('django.db.models.fields.CharField', [], {'max_length': '200', 'null': 'True', 'db_column': "'view'", 'blank': 'True'}),
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'is_public': ('django.db.models.fields.NullBooleanField', [], {'default': 'False', 'null': 'True', 'blank': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'db_index': 'True'}),
'level': ('django.db.models.fields.PositiveIntegerField', [], {'default': '40', 'db_index': 'True', 'blank': 'True'}),
'logger': ('django.db.models.fields.CharField', [], {'default': "'root'", 'max_length': '64', 'db_index': 'True', 'blank': 'True'}),
'message': ('django.db.models.fields.TextField', [], {}),
'num_comments': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'null': 'True'}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'resolved_at': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'db_index': 'True'}),
'score': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'time_spent_count': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'time_spent_total': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '1', 'db_index': 'True'})
},
'sentry.groupassignee': {
'Meta': {'object_name': 'GroupAssignee', 'db_table': "'sentry_groupasignee'"},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'unique': 'True', 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'assignee_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_assignee_set'", 'to': "orm['sentry.User']"})
},
'sentry.groupbookmark': {
'Meta': {'unique_together': "(('project', 'user', 'group'),)", 'object_name': 'GroupBookmark'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'bookmark_set'", 'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_bookmark_set'", 'to': "orm['sentry.User']"})
},
'sentry.grouphash': {
'Meta': {'unique_together': "(('project', 'hash'),)", 'object_name': 'GroupHash'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']", 'null': 'True'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'})
},
'sentry.groupmeta': {
'Meta': {'unique_together': "(('group', 'key'),)", 'object_name': 'GroupMeta'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'value': ('django.db.models.fields.TextField', [], {})
},
'sentry.grouprulestatus': {
'Meta': {'unique_together': "(('rule', 'group'),)", 'object_name': 'GroupRuleStatus'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'rule': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Rule']"}),
'status': ('django.db.models.fields.PositiveSmallIntegerField', [], {'default': '0'})
},
'sentry.groupseen': {
'Meta': {'unique_together': "(('user', 'group'),)", 'object_name': 'GroupSeen'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'db_index': 'False'})
},
'sentry.grouptagkey': {
'Meta': {'unique_together': "(('project', 'group', 'key'),)", 'object_name': 'GroupTagKey'},
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.grouptagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value', 'group'),)", 'object_name': 'GroupTagValue', 'db_table': "'sentry_messagefiltervalue'"},
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'group': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'to': "orm['sentry.Group']"}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'grouptag'", 'null': 'True', 'to': "orm['sentry.Project']"}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.lostpasswordhash': {
'Meta': {'object_name': 'LostPasswordHash'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'hash': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'unique': 'True'})
},
'sentry.option': {
'Meta': {'object_name': 'Option'},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '64'}),
'last_updated': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.pendingteammember': {
'Meta': {'unique_together': "(('team', 'email'),)", 'object_name': 'PendingTeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'pending_member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'})
},
'sentry.project': {
'Meta': {'unique_together': "(('team', 'slug'),)", 'object_name': 'Project'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '200'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_owned_project_set'", 'null': 'True', 'to': "orm['sentry.User']"}),
'platform': ('django.db.models.fields.CharField', [], {'max_length': '32', 'null': 'True'}),
'public': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '50', 'null': 'True'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0', 'db_index': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Team']", 'null': 'True'})
},
'sentry.projectkey': {
'Meta': {'object_name': 'ProjectKey'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True', 'blank': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'key_set'", 'to': "orm['sentry.Project']"}),
'public_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'roles': ('django.db.models.fields.BigIntegerField', [], {'default': '1'}),
'secret_key': ('django.db.models.fields.CharField', [], {'max_length': '32', 'unique': 'True', 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']", 'null': 'True'}),
'user_added': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'keys_added_set'", 'null': 'True', 'to': "orm['sentry.User']"})
},
'sentry.projectoption': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'ProjectOption', 'db_table': "'sentry_projectoptions'"},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
},
'sentry.release': {
'Meta': {'unique_together': "(('project', 'version'),)", 'object_name': 'Release'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'version': ('django.db.models.fields.CharField', [], {'max_length': '64'})
},
'sentry.rule': {
'Meta': {'object_name': 'Rule'},
'data': ('sentry.db.models.fields.gzippeddict.GzippedDictField', [], {}),
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"})
},
'sentry.tagkey': {
'Meta': {'unique_together': "(('project', 'key'),)", 'object_name': 'TagKey', 'db_table': "'sentry_filterkey'"},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'label': ('django.db.models.fields.CharField', [], {'max_length': '64', 'null': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']"}),
'values_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.tagvalue': {
'Meta': {'unique_together': "(('project', 'key', 'value'),)", 'object_name': 'TagValue', 'db_table': "'sentry_filtervalue'"},
'data': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'first_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '32'}),
'last_seen': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True', 'db_index': 'True'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'times_seen': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'}),
'value': ('django.db.models.fields.CharField', [], {'max_length': '200'})
},
'sentry.team': {
'Meta': {'object_name': 'Team'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now', 'null': 'True'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'members': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'team_memberships'", 'symmetrical': 'False', 'through': "orm['sentry.TeamMember']", 'to': "orm['sentry.User']"}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'owner': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'slug': ('django.db.models.fields.SlugField', [], {'unique': 'True', 'max_length': '50'}),
'status': ('django.db.models.fields.PositiveIntegerField', [], {'default': '0'})
},
'sentry.teammember': {
'Meta': {'unique_together': "(('team', 'user'),)", 'object_name': 'TeamMember'},
'date_added': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'team': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'member_set'", 'to': "orm['sentry.Team']"}),
'type': ('django.db.models.fields.IntegerField', [], {'default': '50'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'related_name': "'sentry_teammember_set'", 'to': "orm['sentry.User']"})
},
'sentry.user': {
'Meta': {'object_name': 'User', 'db_table': "'auth_user'"},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_managed': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '128'})
},
'sentry.useroption': {
'Meta': {'unique_together': "(('user', 'project', 'key'),)", 'object_name': 'UserOption'},
'id': ('sentry.db.models.fields.BoundedBigAutoField', [], {'primary_key': 'True'}),
'key': ('django.db.models.fields.CharField', [], {'max_length': '64'}),
'project': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.Project']", 'null': 'True'}),
'user': ('sentry.db.models.fields.FlexibleForeignKey', [], {'to': "orm['sentry.User']"}),
'value': ('sentry.db.models.fields.pickle.UnicodePickledObjectField', [], {})
}
}
complete_apps = ['sentry']
| bsd-3-clause |
JohnOrlando/gnuradio-bitshark | gr-radar-mono/src/python/usrp_radar_mono.py | 11 | 3905 | #!/usr/bin/env python
#
# Copyright 2007 Free Software Foundation, Inc.
#
# This file is part of GNU Radio
#
# GNU Radio is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3, or (at your option)
# any later version.
#
# GNU Radio is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with GNU Radio; see the file COPYING. If not, write to
# the Free Software Foundation, Inc., 51 Franklin Street,
# Boston, MA 02110-1301, USA.
#
from gnuradio import gr
from gnuradio.radar_mono import radar
from gnuradio import eng_notation
from gnuradio.eng_option import eng_option
from optparse import OptionParser
import sys, time
n2s = eng_notation.num_to_str
logfile = None
def process_echo(echo):
global logfile
if logfile is not None:
logfile.write(echo)
def main():
global logfile
parser = OptionParser(option_class=eng_option)
parser.add_option("-T", "--tx-subdev-spec", type="subdev", default=None,
help="use transmitter board side A or B (default is first found)")
parser.add_option("-R", "--rx-subdev-spec", type="subdev", default=None,
help="use receiver board side A or B (default is first found)")
parser.add_option("-g", "--gain", type="eng_float", default=None,
help="set gain in dB (default is midpoint)")
parser.add_option("-f", "--frequency", type="eng_float", default=0.0,
help="set transmitter center frequency to FREQ in Hz, default is %default", metavar="FREQ")
parser.add_option("-w", "--chirp-width", type="eng_float", default=32e6,
help="set LFM chirp bandwidth in Hz, default is %default", metavar="FREQ")
parser.add_option("-a", "--amplitude", type="eng_float", default=15,
help="set waveform amplitude in % full scale, default is %default,")
parser.add_option("", "--ton", type="eng_float", default=5e-6,
help="set pulse on period in seconds, default is %default,")
parser.add_option("", "--tsw", type="eng_float", default=0.0,
help="set transmitter switching period in seconds, default is %default,")
parser.add_option("", "--tlook", type="eng_float", default=5e-6,
help="set receiver look time in seconds, default is %default,")
parser.add_option("", "--prf", type="eng_float", default=100,
help="set pulse repetition frequency in Hz, default is %default,")
parser.add_option("-v", "--verbose", action="store_true", default=False,
help="enable verbose output, default is disabled")
parser.add_option("-D", "--debug", action="store_true", default=False,
help="enable debugging output, default is disabled")
parser.add_option("-F", "--filename", default=None,
help="log received echos to file")
(options, args) = parser.parse_args()
if len(args) != 0:
parser.print_help()
sys.exit(1)
if options.filename is not None:
if options.verbose:
print "Logging echo records to file: ", options.filename
logfile = open(options.filename, 'wb')
r = radar(options, process_echo)
r.set_ton(options.ton)
r.set_tsw(options.tsw)
r.set_tlook(options.tlook)
r.set_prf(options.prf)
r.set_amplitude(options.amplitude)
r.set_freq(options.frequency, options.chirp_width)
r.start()
raw_input("Press ENTER to stop.")
r.stop()
if logfile is not None:
logfile.close()
if __name__ == "__main__":
main()
| gpl-3.0 |
martydill/url_shortener | code/venv/lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/chardetect.py | 1786 | 2504 | #!/usr/bin/env python
"""
Script which takes one or more file paths and reports on their detected
encodings
Example::
% chardetect somefile someotherfile
somefile: windows-1252 with confidence 0.5
someotherfile: ascii with confidence 1.0
If no paths are provided, it takes its input from stdin.
"""
from __future__ import absolute_import, print_function, unicode_literals
import argparse
import sys
from io import open
from chardet import __version__
from chardet.universaldetector import UniversalDetector
def description_of(lines, name='stdin'):
"""
Return a string describing the probable encoding of a file or
list of strings.
:param lines: The lines to get the encoding of.
:type lines: Iterable of bytes
:param name: Name of file or collection of lines
:type name: str
"""
u = UniversalDetector()
for line in lines:
u.feed(line)
u.close()
result = u.result
if result['encoding']:
return '{0}: {1} with confidence {2}'.format(name, result['encoding'],
result['confidence'])
else:
return '{0}: no result'.format(name)
def main(argv=None):
'''
Handles command line arguments and gets things started.
:param argv: List of arguments, as if specified on the command-line.
If None, ``sys.argv[1:]`` is used instead.
:type argv: list of str
'''
# Get command line arguments
parser = argparse.ArgumentParser(
description="Takes one or more file paths and reports their detected \
encodings",
formatter_class=argparse.ArgumentDefaultsHelpFormatter,
conflict_handler='resolve')
parser.add_argument('input',
help='File whose encoding we would like to determine.',
type=argparse.FileType('rb'), nargs='*',
default=[sys.stdin])
parser.add_argument('--version', action='version',
version='%(prog)s {0}'.format(__version__))
args = parser.parse_args(argv)
for f in args.input:
if f.isatty():
print("You are running chardetect interactively. Press " +
"CTRL-D twice at the start of a blank line to signal the " +
"end of your input. If you want help, run chardetect " +
"--help\n", file=sys.stderr)
print(description_of(f, f.name))
if __name__ == '__main__':
main()
| mit |
charanpald/APGL | apgl/graph/test/GeneralVertexListTest.py | 1 | 1641 |
from apgl.graph.GeneralVertexList import GeneralVertexList
from apgl.graph.test.AbstractVertexListTest import AbstractVertexListTest
from apgl.util.PathDefaults import PathDefaults
import unittest
import logging
class GeneralVertexListTest(unittest.TestCase, AbstractVertexListTest):
def setUp(self):
self.VListType = GeneralVertexList
self.numVertices = 10
self.vList = GeneralVertexList(self.numVertices)
self.emptyVertex = None
self.initialise()
def testConstructor(self):
self.assertEquals(self.vList.getNumVertices(), self.numVertices)
def testSaveLoad(self):
try:
vList = GeneralVertexList(self.numVertices)
vList.setVertex(0, "abc")
vList.setVertex(1, 12)
vList.setVertex(2, "num")
tempDir = PathDefaults.getTempDir()
fileName = tempDir + "vList"
vList.save(fileName)
vList2 = GeneralVertexList.load(fileName)
for i in range(self.numVertices):
self.assertEquals(vList.getVertex(i), vList2.getVertex(i))
except IOError as e:
logging.warn(e)
pass
def testAddVertices(self):
vList = GeneralVertexList(10)
vList.setVertex(1, 2)
self.assertEquals(vList.getNumVertices(), 10)
self.assertEquals(vList[1], 2)
vList.addVertices(5)
self.assertEquals(vList.getNumVertices(), 15)
vList.setVertex(11, 2)
self.assertEquals(vList[1], 2)
self.assertEquals(vList[1], 2)
if __name__ == '__main__':
unittest.main()
| bsd-3-clause |
cdjones32/vertx-web | src/test/sockjs-protocol/venv/lib/python2.7/site-packages/pip/_vendor/distlib/_backport/sysconfig.py | 168 | 26964 | # -*- coding: utf-8 -*-
#
# Copyright (C) 2012 The Python Software Foundation.
# See LICENSE.txt and CONTRIBUTORS.txt.
#
"""Access to Python's configuration information."""
import codecs
import os
import re
import sys
from os.path import pardir, realpath
try:
import configparser
except ImportError:
import ConfigParser as configparser
__all__ = [
'get_config_h_filename',
'get_config_var',
'get_config_vars',
'get_makefile_filename',
'get_path',
'get_path_names',
'get_paths',
'get_platform',
'get_python_version',
'get_scheme_names',
'parse_config_h',
]
def _safe_realpath(path):
try:
return realpath(path)
except OSError:
return path
if sys.executable:
_PROJECT_BASE = os.path.dirname(_safe_realpath(sys.executable))
else:
# sys.executable can be empty if argv[0] has been changed and Python is
# unable to retrieve the real program name
_PROJECT_BASE = _safe_realpath(os.getcwd())
if os.name == "nt" and "pcbuild" in _PROJECT_BASE[-8:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir))
# PC/VS7.1
if os.name == "nt" and "\\pc\\v" in _PROJECT_BASE[-10:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
# PC/AMD64
if os.name == "nt" and "\\pcbuild\\amd64" in _PROJECT_BASE[-14:].lower():
_PROJECT_BASE = _safe_realpath(os.path.join(_PROJECT_BASE, pardir, pardir))
def is_python_build():
for fn in ("Setup.dist", "Setup.local"):
if os.path.isfile(os.path.join(_PROJECT_BASE, "Modules", fn)):
return True
return False
_PYTHON_BUILD = is_python_build()
_cfg_read = False
def _ensure_cfg_read():
global _cfg_read
if not _cfg_read:
from distlib.resources import finder
backport_package = __name__.rsplit('.', 1)[0]
_finder = finder(backport_package)
_cfgfile = _finder.find('sysconfig.cfg')
assert _cfgfile, 'sysconfig.cfg exists'
with _cfgfile.as_stream() as s:
_SCHEMES.readfp(s)
if _PYTHON_BUILD:
for scheme in ('posix_prefix', 'posix_home'):
_SCHEMES.set(scheme, 'include', '{srcdir}/Include')
_SCHEMES.set(scheme, 'platinclude', '{projectbase}/.')
_cfg_read = True
_SCHEMES = configparser.RawConfigParser()
_VAR_REPL = re.compile(r'\{([^{]*?)\}')
def _expand_globals(config):
_ensure_cfg_read()
if config.has_section('globals'):
globals = config.items('globals')
else:
globals = tuple()
sections = config.sections()
for section in sections:
if section == 'globals':
continue
for option, value in globals:
if config.has_option(section, option):
continue
config.set(section, option, value)
config.remove_section('globals')
# now expanding local variables defined in the cfg file
#
for section in config.sections():
variables = dict(config.items(section))
def _replacer(matchobj):
name = matchobj.group(1)
if name in variables:
return variables[name]
return matchobj.group(0)
for option, value in config.items(section):
config.set(section, option, _VAR_REPL.sub(_replacer, value))
#_expand_globals(_SCHEMES)
# FIXME don't rely on sys.version here, its format is an implementation detail
# of CPython, use sys.version_info or sys.hexversion
_PY_VERSION = sys.version.split()[0]
_PY_VERSION_SHORT = sys.version[:3]
_PY_VERSION_SHORT_NO_DOT = _PY_VERSION[0] + _PY_VERSION[2]
_PREFIX = os.path.normpath(sys.prefix)
_EXEC_PREFIX = os.path.normpath(sys.exec_prefix)
_CONFIG_VARS = None
_USER_BASE = None
def _subst_vars(path, local_vars):
"""In the string `path`, replace tokens like {some.thing} with the
corresponding value from the map `local_vars`.
If there is no corresponding value, leave the token unchanged.
"""
def _replacer(matchobj):
name = matchobj.group(1)
if name in local_vars:
return local_vars[name]
elif name in os.environ:
return os.environ[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, path)
def _extend_dict(target_dict, other_dict):
target_keys = target_dict.keys()
for key, value in other_dict.items():
if key in target_keys:
continue
target_dict[key] = value
def _expand_vars(scheme, vars):
res = {}
if vars is None:
vars = {}
_extend_dict(vars, get_config_vars())
for key, value in _SCHEMES.items(scheme):
if os.name in ('posix', 'nt'):
value = os.path.expanduser(value)
res[key] = os.path.normpath(_subst_vars(value, vars))
return res
def format_value(value, vars):
def _replacer(matchobj):
name = matchobj.group(1)
if name in vars:
return vars[name]
return matchobj.group(0)
return _VAR_REPL.sub(_replacer, value)
def _get_default_scheme():
if os.name == 'posix':
# the default scheme for posix is posix_prefix
return 'posix_prefix'
return os.name
def _getuserbase():
env_base = os.environ.get("PYTHONUSERBASE", None)
def joinuser(*args):
return os.path.expanduser(os.path.join(*args))
# what about 'os2emx', 'riscos' ?
if os.name == "nt":
base = os.environ.get("APPDATA") or "~"
if env_base:
return env_base
else:
return joinuser(base, "Python")
if sys.platform == "darwin":
framework = get_config_var("PYTHONFRAMEWORK")
if framework:
if env_base:
return env_base
else:
return joinuser("~", "Library", framework, "%d.%d" %
sys.version_info[:2])
if env_base:
return env_base
else:
return joinuser("~", ".local")
def _parse_makefile(filename, vars=None):
"""Parse a Makefile-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
# Regexes needed for parsing Makefile (and similar syntaxes,
# like old-style Setup files).
_variable_rx = re.compile("([a-zA-Z][a-zA-Z0-9_]+)\s*=\s*(.*)")
_findvar1_rx = re.compile(r"\$\(([A-Za-z][A-Za-z0-9_]*)\)")
_findvar2_rx = re.compile(r"\${([A-Za-z][A-Za-z0-9_]*)}")
if vars is None:
vars = {}
done = {}
notdone = {}
with codecs.open(filename, encoding='utf-8', errors="surrogateescape") as f:
lines = f.readlines()
for line in lines:
if line.startswith('#') or line.strip() == '':
continue
m = _variable_rx.match(line)
if m:
n, v = m.group(1, 2)
v = v.strip()
# `$$' is a literal `$' in make
tmpv = v.replace('$$', '')
if "$" in tmpv:
notdone[n] = v
else:
try:
v = int(v)
except ValueError:
# insert literal `$'
done[n] = v.replace('$$', '$')
else:
done[n] = v
# do variable interpolation here
variables = list(notdone.keys())
# Variables with a 'PY_' prefix in the makefile. These need to
# be made available without that prefix through sysconfig.
# Special care is needed to ensure that variable expansion works, even
# if the expansion uses the name without a prefix.
renamed_variables = ('CFLAGS', 'LDFLAGS', 'CPPFLAGS')
while len(variables) > 0:
for name in tuple(variables):
value = notdone[name]
m = _findvar1_rx.search(value) or _findvar2_rx.search(value)
if m is not None:
n = m.group(1)
found = True
if n in done:
item = str(done[n])
elif n in notdone:
# get it on a subsequent round
found = False
elif n in os.environ:
# do it like make: fall back to environment
item = os.environ[n]
elif n in renamed_variables:
if (name.startswith('PY_') and
name[3:] in renamed_variables):
item = ""
elif 'PY_' + n in notdone:
found = False
else:
item = str(done['PY_' + n])
else:
done[n] = item = ""
if found:
after = value[m.end():]
value = value[:m.start()] + item + after
if "$" in after:
notdone[name] = value
else:
try:
value = int(value)
except ValueError:
done[name] = value.strip()
else:
done[name] = value
variables.remove(name)
if (name.startswith('PY_') and
name[3:] in renamed_variables):
name = name[3:]
if name not in done:
done[name] = value
else:
# bogus variable reference (e.g. "prefix=$/opt/python");
# just drop it since we can't deal
done[name] = value
variables.remove(name)
# strip spurious spaces
for k, v in done.items():
if isinstance(v, str):
done[k] = v.strip()
# save the results in the global dictionary
vars.update(done)
return vars
def get_makefile_filename():
"""Return the path of the Makefile."""
if _PYTHON_BUILD:
return os.path.join(_PROJECT_BASE, "Makefile")
if hasattr(sys, 'abiflags'):
config_dir_name = 'config-%s%s' % (_PY_VERSION_SHORT, sys.abiflags)
else:
config_dir_name = 'config'
return os.path.join(get_path('stdlib'), config_dir_name, 'Makefile')
def _init_posix(vars):
"""Initialize the module as appropriate for POSIX systems."""
# load the installed Makefile:
makefile = get_makefile_filename()
try:
_parse_makefile(makefile, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % makefile
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# load the installed pyconfig.h:
config_h = get_config_h_filename()
try:
with open(config_h) as f:
parse_config_h(f, vars)
except IOError as e:
msg = "invalid Python installation: unable to open %s" % config_h
if hasattr(e, "strerror"):
msg = msg + " (%s)" % e.strerror
raise IOError(msg)
# On AIX, there are wrong paths to the linker scripts in the Makefile
# -- these paths are relative to the Python source, but when installed
# the scripts are in another directory.
if _PYTHON_BUILD:
vars['LDSHARED'] = vars['BLDSHARED']
def _init_non_posix(vars):
"""Initialize the module as appropriate for NT"""
# set basic install directories
vars['LIBDEST'] = get_path('stdlib')
vars['BINLIBDEST'] = get_path('platstdlib')
vars['INCLUDEPY'] = get_path('include')
vars['SO'] = '.pyd'
vars['EXE'] = '.exe'
vars['VERSION'] = _PY_VERSION_SHORT_NO_DOT
vars['BINDIR'] = os.path.dirname(_safe_realpath(sys.executable))
#
# public APIs
#
def parse_config_h(fp, vars=None):
"""Parse a config.h-style file.
A dictionary containing name/value pairs is returned. If an
optional dictionary is passed in as the second argument, it is
used instead of a new dictionary.
"""
if vars is None:
vars = {}
define_rx = re.compile("#define ([A-Z][A-Za-z0-9_]+) (.*)\n")
undef_rx = re.compile("/[*] #undef ([A-Z][A-Za-z0-9_]+) [*]/\n")
while True:
line = fp.readline()
if not line:
break
m = define_rx.match(line)
if m:
n, v = m.group(1, 2)
try:
v = int(v)
except ValueError:
pass
vars[n] = v
else:
m = undef_rx.match(line)
if m:
vars[m.group(1)] = 0
return vars
def get_config_h_filename():
"""Return the path of pyconfig.h."""
if _PYTHON_BUILD:
if os.name == "nt":
inc_dir = os.path.join(_PROJECT_BASE, "PC")
else:
inc_dir = _PROJECT_BASE
else:
inc_dir = get_path('platinclude')
return os.path.join(inc_dir, 'pyconfig.h')
def get_scheme_names():
"""Return a tuple containing the schemes names."""
return tuple(sorted(_SCHEMES.sections()))
def get_path_names():
"""Return a tuple containing the paths names."""
# xxx see if we want a static list
return _SCHEMES.options('posix_prefix')
def get_paths(scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a mapping containing an install scheme.
``scheme`` is the install scheme name. If not provided, it will
return the default scheme for the current platform.
"""
_ensure_cfg_read()
if expand:
return _expand_vars(scheme, vars)
else:
return dict(_SCHEMES.items(scheme))
def get_path(name, scheme=_get_default_scheme(), vars=None, expand=True):
"""Return a path corresponding to the scheme.
``scheme`` is the install scheme name.
"""
return get_paths(scheme, vars, expand)[name]
def get_config_vars(*args):
"""With no arguments, return a dictionary of all configuration
variables relevant for the current platform.
On Unix, this means every variable defined in Python's installed Makefile;
On Windows and Mac OS it's a much smaller set.
With arguments, return a list of values that result from looking up
each argument in the configuration variable dictionary.
"""
global _CONFIG_VARS
if _CONFIG_VARS is None:
_CONFIG_VARS = {}
# Normalized versions of prefix and exec_prefix are handy to have;
# in fact, these are the standard versions used most places in the
# distutils2 module.
_CONFIG_VARS['prefix'] = _PREFIX
_CONFIG_VARS['exec_prefix'] = _EXEC_PREFIX
_CONFIG_VARS['py_version'] = _PY_VERSION
_CONFIG_VARS['py_version_short'] = _PY_VERSION_SHORT
_CONFIG_VARS['py_version_nodot'] = _PY_VERSION[0] + _PY_VERSION[2]
_CONFIG_VARS['base'] = _PREFIX
_CONFIG_VARS['platbase'] = _EXEC_PREFIX
_CONFIG_VARS['projectbase'] = _PROJECT_BASE
try:
_CONFIG_VARS['abiflags'] = sys.abiflags
except AttributeError:
# sys.abiflags may not be defined on all platforms.
_CONFIG_VARS['abiflags'] = ''
if os.name in ('nt', 'os2'):
_init_non_posix(_CONFIG_VARS)
if os.name == 'posix':
_init_posix(_CONFIG_VARS)
# Setting 'userbase' is done below the call to the
# init function to enable using 'get_config_var' in
# the init-function.
if sys.version >= '2.6':
_CONFIG_VARS['userbase'] = _getuserbase()
if 'srcdir' not in _CONFIG_VARS:
_CONFIG_VARS['srcdir'] = _PROJECT_BASE
else:
_CONFIG_VARS['srcdir'] = _safe_realpath(_CONFIG_VARS['srcdir'])
# Convert srcdir into an absolute path if it appears necessary.
# Normally it is relative to the build directory. However, during
# testing, for example, we might be running a non-installed python
# from a different directory.
if _PYTHON_BUILD and os.name == "posix":
base = _PROJECT_BASE
try:
cwd = os.getcwd()
except OSError:
cwd = None
if (not os.path.isabs(_CONFIG_VARS['srcdir']) and
base != cwd):
# srcdir is relative and we are not in the same directory
# as the executable. Assume executable is in the build
# directory and make srcdir absolute.
srcdir = os.path.join(base, _CONFIG_VARS['srcdir'])
_CONFIG_VARS['srcdir'] = os.path.normpath(srcdir)
if sys.platform == 'darwin':
kernel_version = os.uname()[2] # Kernel version (8.4.3)
major_version = int(kernel_version.split('.')[0])
if major_version < 8:
# On Mac OS X before 10.4, check if -arch and -isysroot
# are in CFLAGS or LDFLAGS and remove them if they are.
# This is needed when building extensions on a 10.3 system
# using a universal build of python.
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = re.sub('-isysroot [^ \t]*', ' ', flags)
_CONFIG_VARS[key] = flags
else:
# Allow the user to override the architecture flags using
# an environment variable.
# NOTE: This name was introduced by Apple in OSX 10.5 and
# is used by several scripting languages distributed with
# that OS release.
if 'ARCHFLAGS' in os.environ:
arch = os.environ['ARCHFLAGS']
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-arch\s+\w+\s', ' ', flags)
flags = flags + ' ' + arch
_CONFIG_VARS[key] = flags
# If we're on OSX 10.5 or later and the user tries to
# compiles an extension using an SDK that is not present
# on the current machine it is better to not use an SDK
# than to fail.
#
# The major usecase for this is users using a Python.org
# binary installer on OSX 10.6: that installer uses
# the 10.4u SDK, but that SDK is not installed by default
# when you install Xcode.
#
CFLAGS = _CONFIG_VARS.get('CFLAGS', '')
m = re.search('-isysroot\s+(\S+)', CFLAGS)
if m is not None:
sdk = m.group(1)
if not os.path.exists(sdk):
for key in ('LDFLAGS', 'BASECFLAGS',
# a number of derived variables. These need to be
# patched up as well.
'CFLAGS', 'PY_CFLAGS', 'BLDSHARED'):
flags = _CONFIG_VARS[key]
flags = re.sub('-isysroot\s+\S+(\s|$)', ' ', flags)
_CONFIG_VARS[key] = flags
if args:
vals = []
for name in args:
vals.append(_CONFIG_VARS.get(name))
return vals
else:
return _CONFIG_VARS
def get_config_var(name):
"""Return the value of a single variable using the dictionary returned by
'get_config_vars()'.
Equivalent to get_config_vars().get(name)
"""
return get_config_vars().get(name)
def get_platform():
"""Return a string that identifies the current platform.
This is used mainly to distinguish platform-specific build directories and
platform-specific built distributions. Typically includes the OS name
and version and the architecture (as supplied by 'os.uname()'),
although the exact information included depends on the OS; eg. for IRIX
the architecture isn't particularly important (IRIX only runs on SGI
hardware), but for Linux the kernel version isn't particularly
important.
Examples of returned values:
linux-i586
linux-alpha (?)
solaris-2.6-sun4u
irix-5.3
irix64-6.2
Windows will return one of:
win-amd64 (64bit Windows on AMD64 (aka x86_64, Intel64, EM64T, etc)
win-ia64 (64bit Windows on Itanium)
win32 (all others - specifically, sys.platform is returned)
For other non-POSIX platforms, currently just returns 'sys.platform'.
"""
if os.name == 'nt':
# sniff sys.version for architecture.
prefix = " bit ("
i = sys.version.find(prefix)
if i == -1:
return sys.platform
j = sys.version.find(")", i)
look = sys.version[i+len(prefix):j].lower()
if look == 'amd64':
return 'win-amd64'
if look == 'itanium':
return 'win-ia64'
return sys.platform
if os.name != "posix" or not hasattr(os, 'uname'):
# XXX what about the architecture? NT is Intel or Alpha,
# Mac OS is M68k or PPC, etc.
return sys.platform
# Try to distinguish various flavours of Unix
osname, host, release, version, machine = os.uname()
# Convert the OS name to lowercase, remove '/' characters
# (to accommodate BSD/OS), and translate spaces (for "Power Macintosh")
osname = osname.lower().replace('/', '')
machine = machine.replace(' ', '_')
machine = machine.replace('/', '-')
if osname[:5] == "linux":
# At least on Linux/Intel, 'machine' is the processor --
# i386, etc.
# XXX what about Alpha, SPARC, etc?
return "%s-%s" % (osname, machine)
elif osname[:5] == "sunos":
if release[0] >= "5": # SunOS 5 == Solaris 2
osname = "solaris"
release = "%d.%s" % (int(release[0]) - 3, release[2:])
# fall through to standard osname-release-machine representation
elif osname[:4] == "irix": # could be "irix64"!
return "%s-%s" % (osname, release)
elif osname[:3] == "aix":
return "%s-%s.%s" % (osname, version, release)
elif osname[:6] == "cygwin":
osname = "cygwin"
rel_re = re.compile(r'[\d.]+')
m = rel_re.match(release)
if m:
release = m.group()
elif osname[:6] == "darwin":
#
# For our purposes, we'll assume that the system version from
# distutils' perspective is what MACOSX_DEPLOYMENT_TARGET is set
# to. This makes the compatibility story a bit more sane because the
# machine is going to compile and link as if it were
# MACOSX_DEPLOYMENT_TARGET.
cfgvars = get_config_vars()
macver = cfgvars.get('MACOSX_DEPLOYMENT_TARGET')
if True:
# Always calculate the release of the running machine,
# needed to determine if we can build fat binaries or not.
macrelease = macver
# Get the system version. Reading this plist is a documented
# way to get the system version (see the documentation for
# the Gestalt Manager)
try:
f = open('/System/Library/CoreServices/SystemVersion.plist')
except IOError:
# We're on a plain darwin box, fall back to the default
# behaviour.
pass
else:
try:
m = re.search(r'<key>ProductUserVisibleVersion</key>\s*'
r'<string>(.*?)</string>', f.read())
finally:
f.close()
if m is not None:
macrelease = '.'.join(m.group(1).split('.')[:2])
# else: fall back to the default behaviour
if not macver:
macver = macrelease
if macver:
release = macver
osname = "macosx"
if ((macrelease + '.') >= '10.4.' and
'-arch' in get_config_vars().get('CFLAGS', '').strip()):
# The universal build will build fat binaries, but not on
# systems before 10.4
#
# Try to detect 4-way universal builds, those have machine-type
# 'universal' instead of 'fat'.
machine = 'fat'
cflags = get_config_vars().get('CFLAGS')
archs = re.findall('-arch\s+(\S+)', cflags)
archs = tuple(sorted(set(archs)))
if len(archs) == 1:
machine = archs[0]
elif archs == ('i386', 'ppc'):
machine = 'fat'
elif archs == ('i386', 'x86_64'):
machine = 'intel'
elif archs == ('i386', 'ppc', 'x86_64'):
machine = 'fat3'
elif archs == ('ppc64', 'x86_64'):
machine = 'fat64'
elif archs == ('i386', 'ppc', 'ppc64', 'x86_64'):
machine = 'universal'
else:
raise ValueError(
"Don't know machine value for archs=%r" % (archs,))
elif machine == 'i386':
# On OSX the machine type returned by uname is always the
# 32-bit variant, even if the executable architecture is
# the 64-bit variant
if sys.maxsize >= 2**32:
machine = 'x86_64'
elif machine in ('PowerPC', 'Power_Macintosh'):
# Pick a sane name for the PPC architecture.
# See 'i386' case
if sys.maxsize >= 2**32:
machine = 'ppc64'
else:
machine = 'ppc'
return "%s-%s-%s" % (osname, release, machine)
def get_python_version():
return _PY_VERSION_SHORT
def _print_dict(title, data):
for index, (key, value) in enumerate(sorted(data.items())):
if index == 0:
print('%s: ' % (title))
print('\t%s = "%s"' % (key, value))
def _main():
"""Display all information sysconfig detains."""
print('Platform: "%s"' % get_platform())
print('Python version: "%s"' % get_python_version())
print('Current installation scheme: "%s"' % _get_default_scheme())
print()
_print_dict('Paths', get_paths())
print()
_print_dict('Variables', get_config_vars())
if __name__ == '__main__':
_main()
| apache-2.0 |
lungetech/luigi | luigi/contrib/sge.py | 11 | 11055 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""SGE batch system Tasks.
Adapted by Jake Feala (@jfeala) from
`LSF extension <https://github.com/dattalab/luigi/blob/lsf/luigi/lsf.py>`_
by Alex Wiltschko (@alexbw)
Maintained by Jake Feala (@jfeala)
SunGrid Engine is a job scheduler used to allocate compute resources on a
shared cluster. Jobs are submitted using the ``qsub`` command and monitored
using ``qstat``. To get started, install luigi on all nodes.
To run luigi workflows on an SGE cluster, subclass
:class:`luigi.contrib.sge.SGEJobTask` as you would any :class:`luigi.Task`,
but override the ``work()`` method, instead of ``run()``, to define the job
code. Then, run your Luigi workflow from the master node, assigning > 1
``workers`` in order to distribute the tasks in parallel across the cluster.
The following is an example usage (and can also be found in ``sge_tests.py``)
.. code-block:: python
import logging
import luigi
from luigi.contrib.sge import SGEJobTask
logger = logging.getLogger('luigi-interface')
class TestJobTask(SGEJobTask):
i = luigi.Parameter()
def work(self):
logger.info('Running test job...')
with open(self.output().path, 'w') as f:
f.write('this is a test')
def output(self):
return luigi.LocalTarget(os.path.join('/home', 'testfile_' + str(self.i)))
if __name__ == '__main__':
tasks = [TestJobTask(i=str(i), n_cpu=i+1) for i in range(3)]
luigi.build(tasks, local_scheduler=True, workers=3)
The ``n-cpu`` parameter allows you to define different compute resource
requirements (or slots, in SGE terms) for each task. In this example, the
third Task asks for 3 CPU slots. If your cluster only contains nodes with
2 CPUs, this task will hang indefinitely in the queue. See the docs for
:class:`luigi.contrib.sge.SGEJobTask` for other SGE parameters. As for any
task, you can also set these in your luigi configuration file as shown below.
The default values below were matched to the values used by MIT StarCluster,
an open-source SGE cluster manager for use with Amazon EC2::
[SGEJobTask]
shared-tmp-dir = /home
parallel-env = orte
n-cpu = 2
"""
# This extension is modeled after the hadoop.py approach.
#
# Implementation notes
# The procedure:
# - Pickle the class
# - Construct a qsub argument that runs a generic runner function with the path to the pickled class
# - Runner function loads the class from pickle
# - Runner function hits the work button on it
import os
import subprocess
import time
import sys
import logging
import random
import shutil
try:
import cPickle as pickle
except ImportError:
import pickle
import luigi
import luigi.hadoop
from luigi.contrib import sge_runner
logger = logging.getLogger('luigi-interface')
logger.propagate = 0
POLL_TIME = 5 # decided to hard-code rather than configure here
def _parse_qstat_state(qstat_out, job_id):
"""Parse "state" column from `qstat` output for given job_id
Returns state for the *first* job matching job_id. Returns 'u' if
`qstat` output is empty or job_id is not found.
"""
if qstat_out.strip() == '':
return 'u'
lines = qstat_out.split('\n')
# skip past header
while not lines.pop(0).startswith('---'):
pass
for line in lines:
if line:
job, prior, name, user, state = line.strip().split()[0:5]
if int(job) == int(job_id):
return state
return 'u'
def _parse_qsub_job_id(qsub_out):
"""Parse job id from qsub output string.
Assume format:
"Your job <job_id> ("<job_name>") has been submitted"
"""
return int(qsub_out.split()[2])
def _build_qsub_command(cmd, job_name, outfile, errfile, pe, n_cpu):
"""Submit shell command to SGE queue via `qsub`"""
qsub_template = """echo {cmd} | qsub -o ":{outfile}" -e ":{errfile}" -V -r y -pe {pe} {n_cpu} -N {job_name}"""
return qsub_template.format(
cmd=cmd, job_name=job_name, outfile=outfile, errfile=errfile,
pe=pe, n_cpu=n_cpu)
class SGEJobTask(luigi.Task):
"""Base class for executing a job on SunGrid Engine
Override ``work()`` (rather than ``run()``) with your job code.
Parameters:
- n_cpu: Number of CPUs (or "slots") to allocate for the Task. This
value is passed as ``qsub -pe {pe} {n_cpu}``
- parallel_env: SGE parallel environment name. The default is "orte",
the parallel environment installed with MIT StarCluster. If you
are using a different cluster environment, check with your
sysadmin for the right pe to use. This value is passed as {pe}
to the qsub command above.
- shared_tmp_dir: Shared drive accessible from all nodes in the cluster.
Task classes and dependencies are pickled to a temporary folder on
this drive. The default is ``/home``, the NFS share location setup
by StarCluster
"""
n_cpu = luigi.IntParameter(default=2, significant=False)
shared_tmp_dir = luigi.Parameter(default='/home', significant=False)
parallel_env = luigi.Parameter(default='orte', significant=False)
def _fetch_task_failures(self):
if not os.path.exists(self.errfile):
logger.info('No error file')
return []
with open(self.errfile, "r") as f:
errors = f.readlines()
if errors == []:
return errors
if errors[0].strip() == 'stdin: is not a tty': # SGE complains when we submit through a pipe
errors.pop(0)
return errors
def _init_local(self):
# Set up temp folder in shared directory (trim to max filename length)
base_tmp_dir = self.shared_tmp_dir
random_id = '%016x' % random.getrandbits(64)
folder_name = self.task_id + '-' + random_id
self.tmp_dir = os.path.join(base_tmp_dir, folder_name)
max_filename_length = os.fstatvfs(0).f_namemax
self.tmp_dir = self.tmp_dir[:max_filename_length]
logger.info("Tmp dir: %s", self.tmp_dir)
os.makedirs(self.tmp_dir)
# Dump the code to be run into a pickle file
logging.debug("Dumping pickled class")
self._dump(self.tmp_dir)
# Make sure that all the class's dependencies are tarred and available
logging.debug("Tarballing dependencies")
# Grab luigi and the module containing the code to be run
packages = [luigi] + [__import__(self.__module__, None, None, 'dummy')]
luigi.hadoop.create_packages_archive(packages, os.path.join(self.tmp_dir, "packages.tar"))
def run(self):
self._init_local()
self._run_job()
# The procedure:
# - Pickle the class
# - Tarball the dependencies
# - Construct a qsub argument that runs a generic runner function with the path to the pickled class
# - Runner function loads the class from pickle
# - Runner class untars the dependencies
# - Runner function hits the button on the class's work() method
def work(self):
"""Override this method, rather than ``run()``, for your actual work."""
pass
def _dump(self, out_dir=''):
"""Dump instance to file."""
self.job_file = os.path.join(out_dir, 'job-instance.pickle')
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
d = d.replace('(c__main__', "(c" + module_name)
open(self.job_file, "w").write(d)
else:
pickle.dump(self, open(self.job_file, "w"))
def _run_job(self):
# Build a qsub argument that will run sge_runner.py on the directory we've specified
runner_path = sge_runner.__file__
if runner_path.endswith("pyc"):
runner_path = runner_path[:-3] + "py"
job_str = 'python {0} "{1}"'.format(runner_path, self.tmp_dir) # enclose tmp_dir in quotes to protect from special escape chars
# Build qsub submit command
self.outfile = os.path.join(self.tmp_dir, 'job.out')
self.errfile = os.path.join(self.tmp_dir, 'job.err')
submit_cmd = _build_qsub_command(job_str, self.task_family, self.outfile,
self.errfile, self.parallel_env, self.n_cpu)
logger.debug('qsub command: \n' + submit_cmd)
# Submit the job and grab job ID
output = subprocess.check_output(submit_cmd, shell=True)
self.job_id = _parse_qsub_job_id(output)
logger.debug("Submitted job to qsub with response:\n" + output)
self._track_job()
# Now delete the temporaries, if they're there.
if self.tmp_dir and os.path.exists(self.tmp_dir):
logger.info('Removing temporary directory %s' % self.tmp_dir)
shutil.rmtree(self.tmp_dir)
def _track_job(self):
while True:
# Sleep for a little bit
time.sleep(POLL_TIME)
# See what the job's up to
# ASSUMPTION
qstat_out = subprocess.check_output(['qstat'])
sge_status = _parse_qstat_state(qstat_out, self.job_id)
if sge_status == 'r':
logger.info('Job is running...')
elif sge_status == 'qw':
logger.info('Job is pending...')
elif 'E' in sge_status:
logger.error('Job has FAILED:\n' + '\n'.join(self._fetch_task_failures()))
break
elif sge_status == 't' or sge_status == 'u':
# Then the job could either be failed or done.
errors = self._fetch_task_failures()
if not errors:
logger.info('Job is done')
else:
logger.error('Job has FAILED:\n' + '\n'.join(errors))
break
else:
logger.info('Job status is UNKNOWN!')
logger.info('Status is : %s' % sge_status)
raise Exception("job status isn't one of ['r', 'qw', 'E*', 't', 'u']: %s" % sge_status)
class LocalSGEJobTask(SGEJobTask):
"""A local version of SGEJobTask, for easier debugging.
This version skips the ``qsub`` steps and simply runs ``work()``
on the local node, so you don't need to be on an SGE cluster to
use your Task in a test workflow.
"""
def run(self):
self.work()
| apache-2.0 |
gauravbose/digital-menu | digimenu2/tests/initial_sql_regress/tests.py | 13 | 1556 | from django.core.management.color import no_style
from django.core.management.sql import custom_sql_for_model
from django.db import DEFAULT_DB_ALIAS, connections
from django.test import TestCase, override_settings
from .models import Simple
class InitialSQLTests(TestCase):
"""
The format of the included SQL file for this test suite is important.
It must end with a trailing newline in order to test the fix for #2161.
"""
def test_initial_sql(self):
"""
As pointed out by #14661, test data loaded by custom SQL
can't be relied upon; as a result, the test framework flushes the
data contents before every test. This test validates that this has
occurred.
"""
self.assertEqual(Simple.objects.count(), 0)
def test_custom_sql(self):
"""
Simulate the custom SQL loading by migrate.
"""
connection = connections[DEFAULT_DB_ALIAS]
custom_sql = custom_sql_for_model(Simple, no_style(), connection)
with connection.cursor() as cursor:
for sql in custom_sql:
cursor.execute(sql)
self.assertEqual(Simple.objects.count(), 9)
self.assertEqual(
Simple.objects.get(name__contains='placeholders').name,
'"100%" of % are not placeholders'
)
@override_settings(DEBUG=True)
def test_custom_sql_debug(self):
"""
Same test, ensure that CursorDebugWrapper doesn't alter sql loading
(#3485).
"""
self.test_custom_sql()
| bsd-3-clause |
blaxter/hamster-applet | hamster/storage.py | 1 | 4165 | # - coding: utf-8 -
# Copyright (C) 2007 Patryk Zawadzki <patrys at pld-linux.org>
# Copyright (C) 2007-2009 Toms Baugis <[email protected]>
# This file is part of Project Hamster.
# Project Hamster is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# Project Hamster is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with Project Hamster. If not, see <http://www.gnu.org/licenses/>.
import datetime
class Storage(object):
def __init__(self, parent):
self.parent = parent
def run_fixtures(self):
pass
def dispatch(self, event, data):
self.parent.dispatch(event, data)
def get_fact(self, id):
return self.__get_fact(id)
def add_fact(self, activity_name, start_time = None, end_time = None):
result = self.__add_fact(activity_name, start_time, end_time)
if result:
self.dispatch('day_updated', result['start_time'])
return result
def touch_fact(self, fact, end_time = None):
end_time = end_time or datetime.datetime.now()
result = self.__touch_fact(fact, end_time)
self.dispatch('day_updated', fact['start_time'])
return result
def get_facts(self, date, end_date = None, category_id = None):
return self.__get_facts(date, end_date, category_id)
def get_popular_categories(self):
return self.__get_popular_categories()
def remove_fact(self, fact_id):
fact = self.get_fact(fact_id)
if fact:
self.__remove_fact(fact_id)
self.dispatch('day_updated', fact['start_time'])
def get_activities(self, category_id = None):
return self.__get_activities(category_id = category_id)
def get_sorted_activities(self):
return self.__get_sorted_activities()
def get_autocomplete_activities(self):
return self.__get_autocomplete_activities()
def get_last_activity(self):
return self.__get_last_activity()
def remove_activity(self, id):
result = self.__remove_activity(id)
self.dispatch('activity_updated', ())
return result
def remove_category(self, id):
self.__remove_category(id)
self.dispatch('activity_updated', ())
def move_activity(self, source_id, target_order, insert_after = True):
self.__move_activity(source_id, target_order, insert_after)
self.dispatch('activity_updated', ())
def change_category(self, id, category_id):
changed = self.__change_category(id, category_id)
if changed:
self.dispatch('activity_updated', ())
return changed
def swap_activities(self, id1, priority1, id2, priority2):
res = self.__swap_activities(id1, priority1, id2, priority2)
self.dispatch('activity_updated', ())
return res
def update_activity(self, id, name, category_id):
self.__update_activity(id, name, category_id)
self.dispatch('activity_updated', ())
def add_activity(self, name, category_id = -1):
new_id = self.__add_activity(name, category_id)
self.dispatch('activity_updated', ())
return new_id
def update_category(self, id, name):
self.__update_category(id, name)
self.dispatch('activity_updated', ())
def add_category(self, name):
res = self.__add_category(name)
self.dispatch('activity_updated', ())
return res
def get_category_list(self):
return self.__get_category_list()
def get_category_by_name(self, category):
return self.__get_category_by_name(category)
def get_activity_by_name(self, activity, category_id):
return self.__get_activity_by_name(activity, category_id)
| gpl-3.0 |
Mazecreator/tensorflow | tensorflow/contrib/metrics/python/ops/metric_ops_test.py | 7 | 205973 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for metric_ops."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.contrib import metrics as metrics_lib
from tensorflow.contrib.metrics.python.ops import metric_ops
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import dtypes as dtypes_lib
from tensorflow.python.framework import errors_impl
from tensorflow.python.framework import ops
from tensorflow.python.framework import sparse_tensor
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import data_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import random_ops
from tensorflow.python.ops import variables
from tensorflow.python.platform import test
NAN = float('nan')
metrics = metrics_lib
def _enqueue_vector(sess, queue, values, shape=None):
if not shape:
shape = (1, len(values))
dtype = queue.dtypes[0]
sess.run(
queue.enqueue(constant_op.constant(
values, dtype=dtype, shape=shape)))
def _binary_2d_label_to_sparse_value(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
batch = 0
for row in labels:
label = 0
xi = 0
for x in row:
if x == 1:
indices.append([batch, xi])
values.append(label)
xi += 1
else:
assert x == 0
label += 1
batch += 1
shape = [len(labels), len(labels[0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_2d_label_to_sparse(labels):
"""Convert dense 2D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_2d_label_to_sparse_value(labels))
def _binary_3d_label_to_sparse_value(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensorValue` whose values are indices along the last dimension of
`labels`.
"""
indices = []
values = []
for d0, labels_d0 in enumerate(labels):
for d1, labels_d1 in enumerate(labels_d0):
d2 = 0
for class_id, label in enumerate(labels_d1):
if label == 1:
values.append(class_id)
indices.append([d0, d1, d2])
d2 += 1
else:
assert label == 0
shape = [len(labels), len(labels[0]), len(labels[0][0])]
return sparse_tensor.SparseTensorValue(
np.array(indices, np.int64),
np.array(values, np.int64), np.array(shape, np.int64))
def _binary_3d_label_to_sparse(labels):
"""Convert dense 3D binary indicator tensor to sparse tensor.
Only 1 values in `labels` are included in result.
Args:
labels: Dense 2D binary indicator tensor.
Returns:
`SparseTensor` whose values are indices along the last dimension of
`labels`.
"""
return sparse_tensor.SparseTensor.from_value(
_binary_3d_label_to_sparse_value(labels))
def _assert_nan(test_case, actual):
test_case.assertTrue(math.isnan(actual), 'Expected NAN, got %s.' % actual)
def _assert_local_variables(test_case, expected):
test_case.assertEquals(
set(expected), set(v.name for v in variables.local_variables()))
class StreamingMeanTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean(array_ops.ones([4, 3]))
_assert_local_variables(self, ('mean/count:0', 'mean/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean(values)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(1.475, sess.run(update_op), 5)
self.assertAlmostEqual(12.4 / 6.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(update_op), 5)
self.assertAlmostEqual(1.65, sess.run(mean), 5)
def test1dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [1])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test1dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 0, shape=(1,))
_enqueue_vector(sess, weights_queue, 1, shape=(1,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 3.2 + 4.0) / 4.0, mean.eval(), 5)
def test2dWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for _ in range(4):
update_op.eval()
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
def test2dWeightedValues_placeholders(self):
with self.test_session() as sess:
# Create the queue that populates the values.
feed_values = ((0, 1), (-4.2, 9.1), (6.5, 0), (-3.2, 4.0))
values = array_ops.placeholder(dtype=dtypes_lib.float32)
# Create the queue that populates the weighted labels.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(2,))
_enqueue_vector(sess, weights_queue, [1, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [1, 0], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 1], shape=(2,))
_enqueue_vector(sess, weights_queue, [0, 0], shape=(2,))
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean(values, weights)
variables.local_variables_initializer().run()
for i in range(4):
update_op.eval(feed_dict={values: feed_values[i]})
self.assertAlmostEqual((0 + 1 - 4.2 + 0) / 4.0, mean.eval(), 5)
class StreamingMeanTensorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_tensor(array_ops.ones([4, 3]))
_assert_local_variables(self, ('mean/total_tensor:0',
'mean/count_tensor:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_tensor(
array_ops.ones([4, 3]), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testBasic(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean))
def testMultiDimensional(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[1, 2], [1, 2]]],
shape=(2, 2, 2))
_enqueue_vector(
sess,
values_queue, [[[1, 2], [1, 2]], [[3, 4], [9, 10]]],
shape=(2, 2, 2))
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
for _ in range(2):
sess.run(update_op)
self.assertAllClose([[[1, 2], [1, 2]], [[2, 3], [5, 6]]], sess.run(mean))
def testUpdateOpsReturnsCurrentValue(self):
with self.test_session() as sess:
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values)
sess.run(variables.local_variables_initializer())
self.assertAllClose([[0, 1]], sess.run(update_op), 5)
self.assertAllClose([[-2.1, 5.05]], sess.run(update_op), 5)
self.assertAllClose([[2.3 / 3., 10.1 / 3.]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(update_op), 5)
self.assertAllClose([[-0.9 / 4., 3.525]], sess.run(mean), 5)
def testWeighted1d(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
_enqueue_vector(sess, weights_queue, [[1]])
_enqueue_vector(sess, weights_queue, [[0]])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[3.25, 0.5]], sess.run(mean), 5)
def testWeighted2d_1(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [1, 1])
_enqueue_vector(sess, weights_queue, [1, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[-2.1, 0.5]], sess.run(mean), 5)
def testWeighted2d_2(self):
with self.test_session() as sess:
# Create the queue that populates the values.
values_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, values_queue, [0, 1])
_enqueue_vector(sess, values_queue, [-4.2, 9.1])
_enqueue_vector(sess, values_queue, [6.5, 0])
_enqueue_vector(sess, values_queue, [-3.2, 4.0])
values = values_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 2))
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
_enqueue_vector(sess, weights_queue, [0, 1])
_enqueue_vector(sess, weights_queue, [0, 0])
weights = weights_queue.dequeue()
mean, update_op = metrics.streaming_mean_tensor(values, weights)
sess.run(variables.local_variables_initializer())
for _ in range(4):
sess.run(update_op)
self.assertAllClose([[0, 0.5]], sess.run(mean), 5)
class StreamingAccuracyTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
name='my_accuracy')
_assert_local_variables(self, ('my_accuracy/count:0',
'my_accuracy/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_accuracy(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 4))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels)
def testPredictionsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones((10, 3))
labels = array_ops.ones((10, 3))
weights = array_ops.ones((9, 3))
with self.assertRaises(ValueError):
metrics.streaming_accuracy(predictions, labels, weights)
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=3, dtype=dtypes_lib.int64, seed=2)
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_accuracy = accuracy.eval()
for _ in range(10):
self.assertEqual(initial_accuracy, accuracy.eval())
def testMultipleUpdates(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, accuracy.eval())
def testEffectivelyEquivalentSizes(self):
predictions = array_ops.ones((40, 1))
labels = array_ops.ones((40,))
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, update_op.eval())
self.assertEqual(1.0, accuracy.eval())
def testEffectivelyEquivalentSizesWithStaicShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = array_ops.expand_dims(ops.convert_to_tensor([100, 1, 1]),
1) # shape 3, 1
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(), .95)
self.assertGreater(accuracy.eval(), .95)
def testEffectivelyEquivalentSizesWithDynamicallyShapedWeight(self):
predictions = ops.convert_to_tensor([1, 1, 1]) # shape 3,
labels = array_ops.expand_dims(ops.convert_to_tensor([1, 0, 0]),
1) # shape 3, 1
weights = [[100], [1], [1]] # shape 3, 1
weights_placeholder = array_ops.placeholder(
dtype=dtypes_lib.int32, name='weights')
feed_dict = {weights_placeholder: weights}
with self.test_session() as sess:
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights_placeholder)
sess.run(variables.local_variables_initializer())
# if streaming_accuracy does not flatten the weight, accuracy would be
# 0.33333334 due to an intended broadcast of weight. Due to flattening,
# it will be higher than .95
self.assertGreater(update_op.eval(feed_dict=feed_dict), .95)
self.assertGreater(accuracy.eval(feed_dict=feed_dict), .95)
def testMultipleUpdatesWithWeightedValues(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
4, dtypes=dtypes_lib.int64, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [1])
_enqueue_vector(sess, weights_queue, [0])
_enqueue_vector(sess, weights_queue, [0])
weights = weights_queue.dequeue()
accuracy, update_op = metrics.streaming_accuracy(predictions, labels,
weights)
sess.run(variables.local_variables_initializer())
for _ in xrange(3):
sess.run(update_op)
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, accuracy.eval())
class StreamingTruePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives((0, 1, 0), (0, 1, 1))
_assert_local_variables(self, ('true_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tp, tp_update_op = metrics.streaming_true_positives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(1, tp_update_op.eval())
self.assertEqual(1, tp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
tp, tp_update_op = metrics.streaming_true_positives(
predictions, labels, weights=37.0)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tp.eval())
self.assertEqual(37.0, tp_update_op.eval())
self.assertEqual(37.0, tp.eval())
class StreamingFalseNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives((0, 1, 0),
(0, 1, 1))
_assert_local_variables(self, ('false_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fn, fn_update_op = metrics.streaming_false_negatives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(2, fn_update_op.eval())
self.assertEqual(2, fn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
fn, fn_update_op = metrics.streaming_false_negatives(
predictions, labels, weights=((3.0,), (5.0,), (7.0,)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fn.eval())
self.assertEqual(8.0, fn_update_op.eval())
self.assertEqual(8.0, fn.eval())
class StreamingFalsePositivesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives((0, 1, 0),
(0, 1, 1))
_assert_local_variables(self, ('false_positives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
fp, fp_update_op = metrics.streaming_false_positives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(4, fp_update_op.eval())
self.assertEqual(4, fp.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
fp, fp_update_op = metrics.streaming_false_positives(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0)))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, fp.eval())
self.assertEqual(42.0, fp_update_op.eval())
self.assertEqual(42.0, fp.eval())
class StreamingTrueNegativesTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives((0, 1, 0),
(0, 1, 1))
_assert_local_variables(self, ('true_negatives/count:0',))
def testUnweighted(self):
for expand_predictions in [True, False]:
for expand_labels in [True, False]:
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
if expand_predictions:
predictions = array_ops.expand_dims(predictions, 2)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
if expand_labels:
labels = array_ops.expand_dims(labels, 2)
tn, tn_update_op = metrics.streaming_true_negatives(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(5, tn_update_op.eval())
self.assertEqual(5, tn.eval())
def testWeighted(self):
for dtype in (dtypes_lib.bool, dtypes_lib.int32, dtypes_lib.float32):
predictions = math_ops.cast(constant_op.constant(
((1, 0, 1, 0),
(0, 1, 1, 1),
(0, 0, 0, 0))), dtype=dtype)
labels = math_ops.cast(constant_op.constant(
((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0))), dtype=dtype)
tn, tn_update_op = metrics.streaming_true_negatives(
predictions, labels, weights=((0.0, 2.0, 3.0, 5.0),))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, tn.eval())
self.assertEqual(15.0, tn_update_op.eval())
self.assertEqual(15.0, tn.eval())
class StreamingTruePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_local_variables(self, ('true_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tp.eval())
self.assertAllEqual((3, 1, 0), tp_update_op.eval())
self.assertAllEqual((3, 1, 0), tp.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tp, tp_update_op = metrics.streaming_true_positives_at_thresholds(
predictions, labels, weights=37.0, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tp.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp_update_op.eval())
self.assertAllEqual((111.0, 37.0, 0.0), tp.eval())
class StreamingFalseNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(
0.15,
0.5,
0.85,))
_assert_local_variables(self, ('false_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fn.eval())
self.assertAllEqual((0, 2, 3), fn_update_op.eval())
self.assertAllEqual((0, 2, 3), fn.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fn, fn_update_op = metrics.streaming_false_negatives_at_thresholds(
predictions,
labels,
weights=((3.0,), (5.0,), (7.0,)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fn.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn_update_op.eval())
self.assertAllEqual((0.0, 8.0, 11.0), fn.eval())
class StreamingFalsePositivesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_false_positives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_local_variables(self, ('false_positives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), fp.eval())
self.assertAllEqual((7, 4, 2), fp_update_op.eval())
self.assertAllEqual((7, 4, 2), fp.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
fp, fp_update_op = metrics.streaming_false_positives_at_thresholds(
predictions,
labels,
weights=((1.0, 2.0, 3.0, 5.0),
(7.0, 11.0, 13.0, 17.0),
(19.0, 23.0, 29.0, 31.0)),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), fp.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp_update_op.eval())
self.assertAllEqual((125.0, 42.0, 12.0), fp.eval())
class StreamingTrueNegativesAtThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_true_negatives_at_thresholds(
(0.0, 1.0, 0.0), (0, 1, 1), thresholds=(0.15, 0.5, 0.85))
_assert_local_variables(self, ('true_negatives:0',))
def testUnweighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions, labels, thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0, 0, 0), tn.eval())
self.assertAllEqual((2, 5, 7), tn_update_op.eval())
self.assertAllEqual((2, 5, 7), tn.eval())
def testWeighted(self):
predictions = constant_op.constant(((0.9, 0.2, 0.8, 0.1),
(0.2, 0.9, 0.7, 0.6),
(0.1, 0.2, 0.4, 0.3)))
labels = constant_op.constant(((0, 1, 1, 0),
(1, 0, 0, 0),
(0, 0, 0, 0)))
tn, tn_update_op = metrics.streaming_true_negatives_at_thresholds(
predictions,
labels,
weights=((0.0, 2.0, 3.0, 5.0),),
thresholds=(0.15, 0.5, 0.85))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAllEqual((0.0, 0.0, 0.0), tn.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn_update_op.eval())
self.assertAllEqual((5.0, 15.0, 23.0), tn.eval())
class StreamingPrecisionTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('precision/false_positives/count:0',
'precision/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_precision(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_precision = precision.eval()
for _ in range(10):
self.assertEqual(initial_precision, precision.eval())
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op))
self.assertAlmostEqual(1, precision.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, precision.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted1d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions, labels, weights=constant_op.constant([[2], [5]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 2.0 + 5.0
weighted_positives = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [1, 0, 1, 0]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, precision.eval())
def testWeighted2d_placeholders(self):
predictions = array_ops.placeholder(dtype=dtypes_lib.float32)
labels = array_ops.placeholder(dtype=dtypes_lib.float32)
feed_dict = {
predictions: ((1, 0, 1, 0), (1, 0, 1, 0)),
labels: ((0, 1, 1, 0), (1, 0, 0, 1))
}
precision, update_op = metrics.streaming_precision(
predictions,
labels,
weights=constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]]))
with self.test_session():
variables.local_variables_initializer().run()
weighted_tp = 3.0 + 4.0
weighted_positives = (1.0 + 3.0) + (4.0 + 2.0)
expected_precision = weighted_tp / weighted_positives
self.assertAlmostEqual(
expected_precision, update_op.eval(feed_dict=feed_dict))
self.assertAlmostEqual(
expected_precision, precision.eval(feed_dict=feed_dict))
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs)
labels = constant_op.constant(1 - inputs)
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(0, precision.eval())
def testZeroTrueAndFalsePositivesGivesZeroPrecision(self):
predictions = constant_op.constant([0, 0, 0, 0])
labels = constant_op.constant([0, 0, 0, 0])
precision, update_op = metrics.streaming_precision(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0.0, precision.eval())
class StreamingRecallTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_recall(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('recall/false_negatives/count:0',
'recall/true_positives/count:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_recall = recall.eval()
for _ in range(10):
self.assertEqual(initial_recall, recall.eval())
def testAllCorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(1, recall.eval())
def testSomeCorrect(self):
predictions = constant_op.constant([1, 0, 1, 0], shape=(1, 4))
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, update_op.eval())
self.assertAlmostEqual(0.5, recall.eval())
def testWeighted1d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[2], [5]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 2.0 + 5.0
weighted_t = (2.0 + 2.0) + (5.0 + 5.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testWeighted2d(self):
predictions = constant_op.constant([[1, 0, 1, 0], [0, 1, 0, 1]])
labels = constant_op.constant([[0, 1, 1, 0], [1, 0, 0, 1]])
weights = constant_op.constant([[1, 2, 3, 4], [4, 3, 2, 1]])
recall, update_op = metrics.streaming_recall(
predictions, labels, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
weighted_tp = 3.0 + 1.0
weighted_t = (2.0 + 3.0) + (4.0 + 1.0)
expected_precision = weighted_tp / weighted_t
self.assertAlmostEqual(expected_precision, update_op.eval())
self.assertAlmostEqual(expected_precision, recall.eval())
def testAllIncorrect(self):
np_inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(np_inputs)
labels = constant_op.constant(1 - np_inputs)
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
def testZeroTruePositivesAndFalseNegativesGivesZeroRecall(self):
predictions = array_ops.zeros((1, 4))
labels = array_ops.zeros((1, 4))
recall, update_op = metrics.streaming_recall(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertEqual(0, recall.eval())
class StreamingCurvePointsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metric_ops.streaming_curve_points(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(
self,
('curve_points/true_positives:0', 'curve_points/false_negatives:0',
'curve_points/false_positives:0', 'curve_points/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
points, _ = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [points])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metric_ops.streaming_curve_points(
labels=array_ops.ones((10, 1)),
predictions=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def _testValueTensorIsIdempotent(self, curve):
predictions = constant_op.constant(
np.random.uniform(size=(10, 3)), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np.random.uniform(high=2, size=(10, 3)), dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels, predictions=predictions, curve=curve)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
sess.run(update_op)
initial_points = points.eval()
sess.run(update_op)
self.assertAllClose(initial_points, points.eval())
def testValueTensorIsIdempotentROC(self):
self._testValueTensorIsIdempotent(curve='ROC')
def testValueTensorIsIdempotentPR(self):
self._testValueTensorIsIdempotent(curve='PR')
def _testCase(self, labels, predictions, curve, expected_points):
with self.test_session() as sess:
predictions_tensor = constant_op.constant(
predictions, dtype=dtypes_lib.float32)
labels_tensor = constant_op.constant(labels, dtype=dtypes_lib.float32)
points, update_op = metric_ops.streaming_curve_points(
labels=labels_tensor,
predictions=predictions_tensor,
num_thresholds=3,
curve=curve)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAllClose(expected_points, points.eval())
def testEdgeCasesROC(self):
self._testCase([[1]], [[1]], 'ROC', [[0, 1], [0, 1], [0, 0]])
self._testCase([[0]], [[0]], 'ROC', [[1, 1], [0, 1], [0, 1]])
self._testCase([[0]], [[1]], 'ROC', [[1, 1], [1, 1], [0, 1]])
self._testCase([[1]], [[0]], 'ROC', [[0, 1], [0, 0], [0, 0]])
def testManyValuesROC(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'ROC',
[[1.0, 1.0], [0.0, 0.75], [0.0, 0.0]])
def testEdgeCasesPR(self):
self._testCase([[1]], [[1]], 'PR', [[1, 1], [1, 1], [0, 1]])
self._testCase([[0]], [[0]], 'PR', [[1, 0], [1, 1], [1, 1]])
self._testCase([[0]], [[1]], 'PR', [[1, 0], [1, 0], [1, 1]])
self._testCase([[1]], [[0]], 'PR', [[1, 1], [0, 1], [0, 1]])
def testManyValuesPR(self):
self._testCase([[1.0, 0.0, 0.0, 1.0, 1.0, 1.0]],
[[0.2, 0.3, 0.4, 0.6, 0.7, 0.8]], 'PR',
[[1.0, 4.0 / 6.0], [0.75, 1.0], [0.0, 1.0]])
class StreamingAUCTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_auc(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self,
('auc/true_positives:0', 'auc/false_negatives:0',
'auc/false_positives:0', 'auc/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_auc(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
auc, update_op = metrics.streaming_auc(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_auc = auc.eval()
for _ in range(10):
self.assertAlmostEqual(initial_auc, auc.eval(), 5)
def testPredictionsOutOfRange(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, -1, 1, -1], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
_, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertRaises(errors_impl.InvalidArgumentError, update_op.eval)
def testAllCorrect(self):
self.allCorrectAsExpected('ROC')
def allCorrectAsExpected(self, curve):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
auc, update_op = metrics.streaming_auc(predictions, labels, curve=curve)
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, auc.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op))
self.assertAlmostEqual(0.5, auc.eval())
def testWeighted1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([2], shape=(1, 1))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.5, sess.run(update_op), 5)
self.assertAlmostEqual(0.5, auc.eval(), 5)
def testWeighted2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
weights = constant_op.constant([1, 2, 3, 4], shape=(1, 4))
auc, update_op = metrics.streaming_auc(
predictions, labels, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.7, sess.run(update_op), 5)
self.assertAlmostEqual(0.7, auc.eval(), 5)
def testAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 1], shape=(1, 4))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.79166, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.79166, auc.eval(), delta=1e-3)
def testAnotherAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.1, 0.4, 0.35, 0.8, 0.1, 0.135, 0.81],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 1, 0, 1, 0, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.610317, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.610317, auc.eval(), delta=1e-3)
def testThirdAUCPRSpecialCase(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[0.0, 0.1, 0.2, 0.33, 0.3, 0.4, 0.5],
shape=(1, 7),
dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 0, 0, 0, 1, 1, 1], shape=(1, 7))
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.90277, sess.run(update_op), delta=1e-3)
self.assertAlmostEqual(0.90277, auc.eval(), delta=1e-3)
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0, sess.run(update_op))
self.assertAlmostEqual(0, auc.eval())
def testZeroTruePositivesAndFalseNegativesGivesOneAUC(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
auc, update_op = metrics.streaming_auc(predictions, labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def testRecallOneAndPrecisionOneGivesOnePRAUC(self):
with self.test_session() as sess:
predictions = array_ops.ones([4], dtype=dtypes_lib.float32)
labels = array_ops.ones([4])
auc, update_op = metrics.streaming_auc(predictions, labels, curve='PR')
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 6)
self.assertAlmostEqual(1, auc.eval(), 6)
def np_auc(self, predictions, labels, weights):
"""Computes the AUC explicitly using Numpy.
Args:
predictions: an ndarray with shape [N].
labels: an ndarray with shape [N].
weights: an ndarray with shape [N].
Returns:
the area under the ROC curve.
"""
if weights is None:
weights = np.ones(np.size(predictions))
is_positive = labels > 0
num_positives = np.sum(weights[is_positive])
num_negatives = np.sum(weights[~is_positive])
# Sort descending:
inds = np.argsort(-predictions)
sorted_labels = labels[inds]
sorted_weights = weights[inds]
is_positive = sorted_labels > 0
tp = np.cumsum(sorted_weights * is_positive) / num_positives
return np.sum((sorted_weights * tp)[~is_positive]) / num_negatives
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=num_samples)
noise = np.random.normal(0.0, scale=0.2, size=num_samples)
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
def _enqueue_as_batches(x, enqueue_ops):
x_batches = x.astype(np.float32).reshape((num_batches, batch_size))
x_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(num_batches):
enqueue_ops[i].append(x_queue.enqueue(x_batches[i, :]))
return x_queue.dequeue()
for weights in (None, np.ones(num_samples), np.random.exponential(
scale=1.0, size=num_samples)):
expected_auc = self.np_auc(predictions, labels, weights)
with self.test_session() as sess:
enqueue_ops = [[] for i in range(num_batches)]
tf_predictions = _enqueue_as_batches(predictions, enqueue_ops)
tf_labels = _enqueue_as_batches(labels, enqueue_ops)
tf_weights = (_enqueue_as_batches(weights, enqueue_ops) if
weights is not None else None)
for i in range(num_batches):
sess.run(enqueue_ops[i])
auc, update_op = metrics.streaming_auc(
tf_predictions,
tf_labels,
curve='ROC',
num_thresholds=500,
weights=tf_weights)
sess.run(variables.local_variables_initializer())
for i in range(num_batches):
sess.run(update_op)
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_auc, auc.eval(), 2)
class StreamingSpecificityAtSensitivityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7)
_assert_local_variables(self,
('specificity_at_sensitivity/true_positives:0',
'specificity_at_sensitivity/false_negatives:0',
'specificity_at_sensitivity/false_positives:0',
'specificity_at_sensitivity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_specificity_at_sensitivity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
sensitivity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_specificity = specificity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_specificity, specificity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op))
self.assertAlmostEqual(1.0, specificity.eval())
def testSomeCorrectLowSensitivity(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted1d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [3]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted2d(self):
predictions_values = [0.1, 0.2, 0.4, 0.3, 0.0, 0.1, 0.2, 0.2, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_specificity_at_sensitivity(
predictions, labels, weights=weights, sensitivity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(8.0 / 15.0, sess.run(update_op))
self.assertAlmostEqual(8.0 / 15.0, specificity.eval())
class StreamingSensitivityAtSpecificityTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7)
_assert_local_variables(self,
('sensitivity_at_specificity/true_positives:0',
'sensitivity_at_specificity/false_negatives:0',
'sensitivity_at_specificity/false_positives:0',
'sensitivity_at_specificity/true_negatives:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_sensitivity_at_specificity(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
specificity=0.7,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=2, dtype=dtypes_lib.int64, seed=2)
sensitivity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_sensitivity = sensitivity.eval()
for _ in range(10):
self.assertAlmostEqual(initial_sensitivity, sensitivity.eval(), 5)
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.7)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, sess.run(update_op))
self.assertEqual(1, specificity.eval())
def testSomeCorrectHighSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.1, 0.45, 0.5, 0.8, 0.9]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.8)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.8, sess.run(update_op))
self.assertAlmostEqual(0.8, specificity.eval())
def testSomeCorrectLowSpecificity(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.6, sess.run(update_op))
self.assertAlmostEqual(0.6, specificity.eval())
def testWeighted(self):
predictions_values = [0.0, 0.1, 0.2, 0.3, 0.4, 0.01, 0.02, 0.25, 0.26, 0.26]
labels_values = [0, 0, 0, 0, 0, 1, 1, 1, 1, 1]
weights_values = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
predictions = constant_op.constant(
predictions_values, dtype=dtypes_lib.float32)
labels = constant_op.constant(labels_values)
weights = constant_op.constant(weights_values)
specificity, update_op = metrics.streaming_sensitivity_at_specificity(
predictions, labels, weights=weights, specificity=0.4)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(0.675, sess.run(update_op))
self.assertAlmostEqual(0.675, specificity.eval())
# TODO(nsilberman): Break this up into two sets of tests.
class StreamingPrecisionRecallThresholdsTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0])
_assert_local_variables(self, (
'precision_at_thresholds/true_positives:0',
'precision_at_thresholds/false_positives:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
prec, _ = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
rec, _ = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [prec, rec])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, precision_op = metrics.streaming_precision_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
_, recall_op = metrics.streaming_recall_at_thresholds(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
thresholds=[0, 0.5, 1.0],
updates_collections=[my_collection_name])
self.assertListEqual(
ops.get_collection(my_collection_name), [precision_op, recall_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.float32, seed=1)
labels = random_ops.random_uniform(
(10, 3), maxval=1, dtype=dtypes_lib.int64, seed=2)
thresholds = [0, 0.5, 1.0]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates, then verify idempotency.
sess.run([prec_op, rec_op])
initial_prec = prec.eval()
initial_rec = rec.eval()
for _ in range(10):
sess.run([prec_op, rec_op])
self.assertAllClose(initial_prec, prec.eval())
self.assertAllClose(initial_rec, rec.eval())
# TODO(nsilberman): fix tests (passing but incorrect).
def testAllCorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(inputs)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertEqual(1, prec.eval())
self.assertEqual(1, rec.eval())
def testSomeCorrect(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 0], shape=(1, 4))
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.5, prec.eval())
self.assertAlmostEqual(0.5, rec.eval())
def testAllIncorrect(self):
inputs = np.random.randint(0, 2, size=(100, 1))
with self.test_session() as sess:
predictions = constant_op.constant(inputs, dtype=dtypes_lib.float32)
labels = constant_op.constant(1 - inputs, dtype=dtypes_lib.float32)
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval())
self.assertAlmostEqual(0, rec.eval())
def testWeights1d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0], [1]], shape=(2, 1), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testWeights2d(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[[1, 0], [1, 0]], shape=(2, 2), dtype=dtypes_lib.float32)
labels = constant_op.constant([[0, 1], [1, 0]], shape=(2, 2))
weights = constant_op.constant(
[[0, 0], [1, 1]], shape=(2, 2), dtype=dtypes_lib.float32)
thresholds = [0.5, 1.1]
prec, prec_op = metrics.streaming_precision_at_thresholds(
predictions, labels, thresholds, weights=weights)
rec, rec_op = metrics.streaming_recall_at_thresholds(
predictions, labels, thresholds, weights=weights)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
prec_low = array_ops.reshape(prec_low, shape=())
prec_high = array_ops.reshape(prec_high, shape=())
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
rec_low = array_ops.reshape(rec_low, shape=())
rec_high = array_ops.reshape(rec_high, shape=())
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(1.0, prec_low.eval(), places=5)
self.assertAlmostEqual(0.0, prec_high.eval(), places=5)
self.assertAlmostEqual(1.0, rec_low.eval(), places=5)
self.assertAlmostEqual(0.0, rec_high.eval(), places=5)
def testExtremeThresholds(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[1, 0, 1, 0], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant([0, 1, 1, 1], shape=(1, 4))
thresholds = [-1.0, 2.0] # lower/higher than any values
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
[prec_low, prec_high] = array_ops.split(
value=prec, num_or_size_splits=2, axis=0)
[rec_low, rec_high] = array_ops.split(
value=rec, num_or_size_splits=2, axis=0)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0.75, prec_low.eval())
self.assertAlmostEqual(0.0, prec_high.eval())
self.assertAlmostEqual(1.0, rec_low.eval())
self.assertAlmostEqual(0.0, rec_high.eval())
def testZeroLabelsPredictions(self):
with self.test_session() as sess:
predictions = array_ops.zeros([4], dtype=dtypes_lib.float32)
labels = array_ops.zeros([4])
thresholds = [0.5]
prec, prec_op = metrics.streaming_precision_at_thresholds(predictions,
labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(predictions, labels,
thresholds)
sess.run(variables.local_variables_initializer())
sess.run([prec_op, rec_op])
self.assertAlmostEqual(0, prec.eval(), 6)
self.assertAlmostEqual(0, rec.eval(), 6)
def testWithMultipleUpdates(self):
num_samples = 1000
batch_size = 10
num_batches = int(num_samples / batch_size)
# Create the labels and data.
labels = np.random.randint(0, 2, size=(num_samples, 1))
noise = np.random.normal(0.0, scale=0.2, size=(num_samples, 1))
predictions = 0.4 + 0.2 * labels + noise
predictions[predictions > 1] = 1
predictions[predictions < 0] = 0
thresholds = [0.3]
tp = 0
fp = 0
fn = 0
tn = 0
for i in range(num_samples):
if predictions[i] > thresholds[0]:
if labels[i] == 1:
tp += 1
else:
fp += 1
else:
if labels[i] == 1:
fn += 1
else:
tn += 1
epsilon = 1e-7
expected_prec = tp / (epsilon + tp + fp)
expected_rec = tp / (epsilon + tp + fn)
labels = labels.astype(np.float32)
predictions = predictions.astype(np.float32)
with self.test_session() as sess:
# Reshape the data so its easy to queue up:
predictions_batches = predictions.reshape((batch_size, num_batches))
labels_batches = labels.reshape((batch_size, num_batches))
# Enqueue the data:
predictions_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
labels_queue = data_flow_ops.FIFOQueue(
num_batches, dtypes=dtypes_lib.float32, shapes=(batch_size,))
for i in range(int(num_batches)):
tf_prediction = constant_op.constant(predictions_batches[:, i])
tf_label = constant_op.constant(labels_batches[:, i])
sess.run([
predictions_queue.enqueue(tf_prediction),
labels_queue.enqueue(tf_label)
])
tf_predictions = predictions_queue.dequeue()
tf_labels = labels_queue.dequeue()
prec, prec_op = metrics.streaming_precision_at_thresholds(tf_predictions,
tf_labels,
thresholds)
rec, rec_op = metrics.streaming_recall_at_thresholds(tf_predictions,
tf_labels,
thresholds)
sess.run(variables.local_variables_initializer())
for _ in range(int(num_samples / batch_size)):
sess.run([prec_op, rec_op])
# Since this is only approximate, we can't expect a 6 digits match.
# Although with higher number of samples/thresholds we should see the
# accuracy improving
self.assertAlmostEqual(expected_prec, prec.eval(), 2)
self.assertAlmostEqual(expected_rec, rec.eval(), 2)
# TODO(ptucker): Remove when we remove `streaming_recall_at_k`.
# This op will be deprecated soon in favor of `streaming_sparse_recall_at_k`.
# Until then, this test validates that both ops yield the same results.
class StreamingRecallAtKTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
self._batch_size = 4
self._num_classes = 3
self._np_predictions = np.matrix(('0.1 0.2 0.7;'
'0.6 0.2 0.2;'
'0.0 0.9 0.1;'
'0.2 0.0 0.8'))
self._np_labels = [0, 0, 0, 0]
def testVars(self):
metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1)
_assert_local_variables(self, ('recall_at_1/count:0',
'recall_at_1/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_recall_at_k(
predictions=array_ops.ones((self._batch_size, self._num_classes)),
labels=array_ops.ones(
(self._batch_size,), dtype=dtypes_lib.int32),
k=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testSingleUpdateKIs1(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=1)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.25, sess.run(update_op))
self.assertEqual(0.25, recall.eval())
self.assertEqual(0.25, sess.run(sp_update_op))
self.assertEqual(0.25, sp_recall.eval())
def testSingleUpdateKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=2)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.5, sess.run(update_op))
self.assertEqual(0.5, recall.eval())
self.assertEqual(0.5, sess.run(sp_update_op))
self.assertEqual(0.5, sp_recall.eval())
def testSingleUpdateKIs3(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
recall, update_op = metrics.streaming_recall_at_k(predictions, labels, k=3)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions, array_ops.reshape(labels, (self._batch_size, 1)), k=3)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
def testSingleUpdateSomeMissingKIs2(self):
predictions = constant_op.constant(
self._np_predictions,
shape=(self._batch_size, self._num_classes),
dtype=dtypes_lib.float32)
labels = constant_op.constant(
self._np_labels, shape=(self._batch_size,), dtype=dtypes_lib.int64)
weights = constant_op.constant(
[0, 1, 0, 1], shape=(self._batch_size,), dtype=dtypes_lib.float32)
recall, update_op = metrics.streaming_recall_at_k(
predictions, labels, k=2, weights=weights)
sp_recall, sp_update_op = metrics.streaming_sparse_recall_at_k(
predictions,
array_ops.reshape(labels, (self._batch_size, 1)),
k=2,
weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.0, sess.run(update_op))
self.assertEqual(1.0, recall.eval())
self.assertEqual(1.0, sess.run(sp_update_op))
self.assertEqual(1.0, sp_recall.eval())
class StreamingSparsePrecisionTest(test.TestCase):
def _test_streaming_sparse_precision_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
labels=labels,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_k(self,
predictions,
labels,
k,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
predictions = constant_op.constant(predictions, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_k(
predictions, labels, k, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def _test_streaming_sparse_average_precision_at_top_k(self,
top_k_predictions,
labels,
expected,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_average_precision_at_top_k(
top_k_predictions, labels, weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
local_variables = variables.local_variables()
variables.variables_initializer(local_variables).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertAlmostEqual(expected, update.eval())
self.assertAlmostEqual(expected, metric.eval())
def test_top_k_rank_invalid(self):
with self.test_session():
# top_k_predictions has rank < 2.
top_k_predictions = [9, 4, 6, 2, 0]
sp_labels = sparse_tensor.SparseTensorValue(
indices=np.array([[0,], [1,], [2,]], np.int64),
values=np.array([2, 7, 8], np.int64),
dense_shape=np.array([10,], np.int64))
with self.assertRaises(ValueError):
precision, _ = metrics.streaming_sparse_precision_at_top_k(
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int64),
labels=sp_labels)
variables.variables_initializer(variables.local_variables()).run()
precision.eval()
def test_average_precision(self):
# Example 1.
# Matches example here:
# fastml.com/what-you-wanted-to-know-about-mean-average-precision
labels_ex1 = (0, 1, 2, 3, 4)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
# Example 2.
labels_ex2 = (0, 2, 4, 5, 6)
labels = np.array([labels_ex2], dtype=np.int64)
predictions_ex2 = (0.3, 0.5, 0.0, 0.4, 0.0, 0.1, 0.2)
predictions = (predictions_ex2,)
predictions_top_k_ex2 = (1, 3, 0, 6, 5)
precision_ex2 = (0.0 / 1, 0.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex2 = (0.0 / 1, 0.0 / 2, precision_ex2[2] / 3,
(precision_ex2[2] + precision_ex2[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex2[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=precision_ex2[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex2[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex2[:k],), labels, expected=avg_precision_ex2[i])
# Both examples, we expect both precision and average precision to be the
# average of the 2 examples.
labels = np.array([labels_ex1, labels_ex2], dtype=np.int64)
predictions = (predictions_ex1, predictions_ex2)
streaming_precision = [(ex1 + ex2) / 2
for ex1, ex2 in zip(precision_ex1, precision_ex2)]
streaming_average_precision = [
(ex1 + ex2) / 2
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=streaming_precision[i])
predictions_top_k = (predictions_top_k_ex1[:k], predictions_top_k_ex2[:k])
self._test_streaming_sparse_precision_at_top_k(
predictions_top_k, labels, expected=streaming_precision[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=streaming_average_precision[i])
self._test_streaming_sparse_average_precision_at_top_k(
predictions_top_k, labels, expected=streaming_average_precision[i])
# Weighted examples, we expect streaming average precision to be the
# weighted average of the 2 examples.
weights = (0.3, 0.6)
streaming_average_precision = [
(weights[0] * ex1 + weights[1] * ex2) / (weights[0] + weights[1])
for ex1, ex2 in zip(avg_precision_ex1, avg_precision_ex2)
]
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_average_precision_at_k(
predictions,
labels,
k,
expected=streaming_average_precision[i],
weights=weights)
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k], predictions_top_k_ex2[:k]),
labels,
expected=streaming_average_precision[i],
weights=weights)
def test_average_precision_some_labels_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
labels_ex1 = (-1, 0, 1, 2, 3, 4, 7)
labels = np.array([labels_ex1], dtype=np.int64)
predictions_ex1 = (0.2, 0.1, 0.0, 0.4, 0.0, 0.5, 0.3)
predictions = (predictions_ex1,)
predictions_top_k_ex1 = (5, 3, 6, 0, 1, 2)
precision_ex1 = (0.0 / 1, 1.0 / 2, 1.0 / 3, 2.0 / 4)
avg_precision_ex1 = (0.0 / 1, precision_ex1[1] / 2, precision_ex1[1] / 3,
(precision_ex1[1] + precision_ex1[3]) / 4)
for i in xrange(4):
k = i + 1
self._test_streaming_sparse_precision_at_k(
predictions, labels, k, expected=precision_ex1[i])
self._test_streaming_sparse_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=precision_ex1[i])
self._test_streaming_sparse_average_precision_at_k(
predictions, labels, k, expected=avg_precision_ex1[i])
self._test_streaming_sparse_average_precision_at_top_k(
(predictions_top_k_ex1[:k],), labels, expected=avg_precision_ex1[i])
def test_average_precision_at_top_k_static_shape_check(self):
predictions_top_k = array_ops.placeholder(shape=(2, None),
dtype=dtypes_lib.int64)
labels = np.array(((1,), (2,)), dtype=np.int64)
# Fails due to non-static predictions_idx shape.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
predictions_top_k = (2, 1)
# Fails since rank of predictions_idx is less than one.
with self.assertRaises(ValueError):
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
predictions_top_k = ((2,), (1,))
# Valid static shape.
metric_ops.streaming_sparse_average_precision_at_top_k(predictions_top_k,
labels)
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,1,2 have 0 predictions, classes -1 and 4 are out of range.
for class_id in (-1, 0, 1, 2, 4):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2, class_id=3)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_labels(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=3.0 / 10)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) range are ignored."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range and are ignored.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, 2 correct predictions.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=2.0 / 2, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=1.0 / 1, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=0.0 / 1, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=0.0 / 1, class_id=7)
# All classes: 10 predictions, 3 correct.
self._test_streaming_sparse_precision_at_k(
predictions, sp_labels, k=5, expected=3.0 / 10)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, sp_labels, expected=3.0 / 10)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 1,3,8 have 0 predictions, classes -1 and 10 are out of range.
for class_id in (-1, 1, 3, 8, 10):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, class_id=class_id)
def test_3d_no_labels(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Classes 0,4,6,9: 0 labels, >=1 prediction.
for class_id in (0, 4, 6, 9):
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 predictions, all correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=4.0 / 4, class_id=2)
# Class 5: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=2.0 / 2, class_id=5)
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=1.0 / 2, class_id=7)
# All classes: 20 predictions, 7 correct.
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=7.0 / 20)
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=7.0 / 20)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_precision_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions, labels, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0], [0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 predictions, both correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 incorrect prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 1 correct prediction.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: no predictions.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=NAN,
class_id=7,
weights=[[1, 0], [0, 1]])
# Class 7: 2 predictions, 1 correct.
self._test_streaming_sparse_precision_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
self._test_streaming_sparse_precision_at_top_k(
top_k_predictions,
labels,
expected=1.0 / 2.0,
class_id=7,
weights=[[0, 1], [1, 0]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 0, 1], [0, 0, 1, 0]]
expected_precision = 0.5
with self.test_session():
_, precision = metrics.streaming_sparse_precision_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_precision, precision.eval())
class StreamingSparseRecallTest(test.TestCase):
def _test_streaming_sparse_recall_at_k(self,
predictions,
labels,
k,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=labels,
k=k,
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
_assert_nan(self, update.eval())
_assert_nan(self, metric.eval())
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def _test_sparse_recall_at_top_k(self,
labels,
top_k_predictions,
expected,
class_id=None,
weights=None):
with ops.Graph().as_default() as g, self.test_session(g):
if weights is not None:
weights = constant_op.constant(weights, dtypes_lib.float32)
metric, update = metric_ops.sparse_recall_at_top_k(
labels=labels,
top_k_predictions=constant_op.constant(top_k_predictions,
dtypes_lib.int32),
class_id=class_id,
weights=weights)
# Fails without initialized vars.
self.assertRaises(errors_impl.OpError, metric.eval)
self.assertRaises(errors_impl.OpError, update.eval)
variables.variables_initializer(variables.local_variables()).run()
# Run per-step op and assert expected values.
if math.isnan(expected):
self.assertTrue(math.isnan(update.eval()))
self.assertTrue(math.isnan(metric.eval()))
else:
self.assertEqual(expected, update.eval())
self.assertEqual(expected, metric.eval())
def test_one_label_at_k1_nan(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
# Classes 0,1 have 0 labels, 0 predictions, classes -1 and 4 are out of
# range.
for labels in (sparse_labels, dense_labels):
for class_id in (-1, 0, 1, 4):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_one_label_at_k1_no_predictions(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 0 predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=2)
def test_one_label_at_k1(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, class_id=3)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=3)
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2)
def test_one_label_at_k1_weighted(self):
predictions = [[0.1, 0.3, 0.2, 0.4], [0.1, 0.2, 0.3, 0.4]]
top_k_predictions = [[3], [3]]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 0, 1], [0, 0, 1, 0]])
dense_labels = np.array([[3], [2]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 3: 1 label, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, class_id=3, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=3, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=3,
weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1,
class_id=3,
weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2,
class_id=3,
weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=3.0 / 3,
class_id=3,
weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.3 / 0.3,
class_id=3,
weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=1,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.6 / 0.6,
class_id=3,
weights=(0.6, 0.3))
# All classes: 2 labels, 2 predictions, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=NAN, weights=(0.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=(0.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(2.0,))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(2.0,))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, weights=(1.0, 0.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, weights=(0.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, weights=(1.0, 1.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 5, weights=(2.0, 3.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 5, weights=(3.0, 2.0))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.3 / 0.9, weights=(0.3, 0.6))
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=1, expected=0.6 / 0.9, weights=(0.6, 0.3))
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.6 / 0.9, weights=(0.6, 0.3))
def test_three_labels_at_k5_nan(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_three_labels_at_k5_no_predictions(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 8: 1 label, no predictions.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=8)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=8)
def test_three_labels_at_k5(self):
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sparse_labels = _binary_2d_label_to_sparse_value(
[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]])
dense_labels = np.array([[2, 7, 8], [1, 2, 5]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 1, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 1, class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0 / 1, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0 / 1, class_id=7)
# All classes: 6 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=3.0 / 6)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=3.0 / 6)
def test_three_labels_at_k5_some_out_of_range(self):
"""Tests that labels outside the [0, n_classes) count in denominator."""
predictions = [[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]]
top_k_predictions = [
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
]
sp_labels = sparse_tensor.SparseTensorValue(
indices=[[0, 0], [0, 1], [0, 2], [0, 3], [1, 0], [1, 1], [1, 2],
[1, 3]],
# values -1 and 10 are outside the [0, n_classes) range.
values=np.array([2, 7, -1, 8, 1, 2, 5, 10], np.int64),
dense_shape=[2, 4])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=2.0 / 2,
class_id=2)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=2.0 / 2,
class_id=2)
# Class 5: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=1.0 / 1,
class_id=5)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=1.0 / 1,
class_id=5)
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions=predictions,
labels=sp_labels,
k=5,
expected=0.0 / 1,
class_id=7)
self._test_sparse_recall_at_top_k(
sp_labels,
top_k_predictions,
expected=0.0 / 1,
class_id=7)
# All classes: 8 labels, 3 correct.
self._test_streaming_sparse_recall_at_k(
predictions=predictions, labels=sp_labels, k=5, expected=3.0 / 8)
self._test_sparse_recall_at_top_k(
sp_labels, top_k_predictions, expected=3.0 / 8)
def test_3d_nan(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0], [0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0], [0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 0,3,4,6,9 have 0 labels, class 10 is out of range.
for class_id in (0, 3, 4, 6, 9, 10):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, class_id=class_id)
def test_3d_no_predictions(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
sparse_labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 1, 1, 0]]])
dense_labels = np.array(
[[[2, 7, 8], [1, 2, 5]], [
[1, 2, 5],
[2, 7, 8],
]], dtype=np.int64)
for labels in (sparse_labels, dense_labels):
# Classes 1,8 have 0 predictions, >=1 label.
for class_id in (1, 8):
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=0.0, class_id=class_id)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=0.0, class_id=class_id)
def test_3d(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 4 labels, all correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=4.0 / 4, class_id=2)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=4.0 / 4, class_id=2)
# Class 5: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=2.0 / 2, class_id=5)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=2.0 / 2, class_id=5)
# Class 7: 2 labels, 1 incorrect.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=1.0 / 2, class_id=7)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=1.0 / 2, class_id=7)
# All classes: 12 labels, 7 correct.
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=7.0 / 12)
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=7.0 / 12)
def test_3d_ignore_all(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
for class_id in xrange(10):
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=class_id,
weights=[[0, 0], [0, 0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0], [0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0], [0]])
self._test_streaming_sparse_recall_at_k(
predictions, labels, k=5, expected=NAN, weights=[[0, 0], [0, 0]])
self._test_sparse_recall_at_top_k(
labels, top_k_predictions, expected=NAN, weights=[[0, 0], [0, 0]])
def test_3d_ignore_some(self):
predictions = [[[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9],
[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6]],
[[0.3, 0.0, 0.7, 0.2, 0.4, 0.9, 0.5, 0.8, 0.1, 0.6],
[0.5, 0.1, 0.6, 0.3, 0.8, 0.0, 0.7, 0.2, 0.4, 0.9]]]
top_k_predictions = [[
[9, 4, 6, 2, 0],
[5, 7, 2, 9, 6],
], [
[5, 7, 2, 9, 6],
[9, 4, 6, 2, 0],
]]
labels = _binary_3d_label_to_sparse_value(
[[[0, 0, 1, 0, 0, 0, 0, 1, 1, 0],
[0, 1, 1, 0, 0, 1, 0, 0, 0, 0]],
[[0, 1, 1, 0, 0, 1, 0, 1, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 1, 0]]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[1], [0]])
# Class 2: 2 labels, both correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=2.0 / 2.0,
class_id=2,
weights=[[0], [1]])
# Class 7: 1 label, correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 1.0,
class_id=7,
weights=[[0], [1]])
# Class 7: 1 label, incorrect.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=0.0 / 1.0,
class_id=7,
weights=[[1], [0]])
# Class 7: 2 labels, 1 correct.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=1.0 / 2.0,
class_id=7,
weights=[[1, 0], [1, 0]])
# Class 7: No labels.
self._test_streaming_sparse_recall_at_k(
predictions,
labels,
k=5,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
self._test_sparse_recall_at_top_k(
labels,
top_k_predictions,
expected=NAN,
class_id=7,
weights=[[0, 1], [0, 1]])
def test_sparse_tensor_value(self):
predictions = [[0.1, 0.3, 0.2, 0.4],
[0.1, 0.2, 0.3, 0.4]]
labels = [[0, 0, 1, 0],
[0, 0, 0, 1]]
expected_recall = 0.5
with self.test_session():
_, recall = metrics.streaming_sparse_recall_at_k(
predictions=constant_op.constant(predictions, dtypes_lib.float32),
labels=_binary_2d_label_to_sparse_value(labels),
k=1)
variables.variables_initializer(variables.local_variables()).run()
self.assertEqual(expected_recall, recall.eval())
class StreamingMeanAbsoluteErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_absolute_error/count:0',
'mean_absolute_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_absolute_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_absolute_error(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_absolute_error(predictions,
labels, weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(3, sess.run(update_op))
self.assertEqual(3, error.eval())
class StreamingMeanRelativeErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_relative_error/count:0',
'mean_relative_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_relative_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
normalizer=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
normalizer = random_ops.random_normal((10, 3), seed=3)
error, update_op = metrics.streaming_mean_relative_error(predictions,
labels, normalizer)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateNormalizedByLabels(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
np_labels = np.asarray([1, 3, 2, 3], dtype=np.float32)
expected_error = np.mean(
np.divide(np.absolute(np_predictions - np_labels), np_labels))
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(np_labels, shape=(1, 4))
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(expected_error, sess.run(update_op))
self.assertEqual(expected_error, error.eval())
def testSingleUpdateNormalizedByZeros(self):
np_predictions = np.asarray([2, 4, 6, 8], dtype=np.float32)
predictions = constant_op.constant(
np_predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_relative_error(
predictions, labels, normalizer=array_ops.zeros_like(labels))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0.0, sess.run(update_op))
self.assertEqual(0.0, error.eval())
class StreamingMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('mean_squared_error/count:0',
'mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
predictions = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
labels = array_ops.zeros((1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError(self):
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_squared_error(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(6, sess.run(update_op))
self.assertEqual(6, error.eval())
def testSingleUpdateWithErrorAndWeights(self):
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
error, update_op = metrics.streaming_mean_squared_error(predictions, labels,
weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(13, sess.run(update_op))
self.assertEqual(13, error.eval())
def testMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
error, update_op = metrics.streaming_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
sess.run(update_op)
self.assertAlmostEqual(208.0 / 6, sess.run(update_op), 5)
self.assertAlmostEqual(208.0 / 6, error.eval(), 5)
def testMetricsComputedConcurrently(self):
with self.test_session() as sess:
# Create the queue that populates one set of predictions.
preds_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue0, [10, 8, 6])
_enqueue_vector(sess, preds_queue0, [-4, 3, -1])
predictions0 = preds_queue0.dequeue()
# Create the queue that populates one set of predictions.
preds_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue1, [0, 1, 1])
_enqueue_vector(sess, preds_queue1, [1, 1, 0])
predictions1 = preds_queue1.dequeue()
# Create the queue that populates one set of labels.
labels_queue0 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue0, [1, 3, 2])
_enqueue_vector(sess, labels_queue0, [2, 4, 6])
labels0 = labels_queue0.dequeue()
# Create the queue that populates another set of labels.
labels_queue1 = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue1, [-5, -3, -1])
_enqueue_vector(sess, labels_queue1, [5, 4, 3])
labels1 = labels_queue1.dequeue()
mse0, update_op0 = metrics.streaming_mean_squared_error(
predictions0, labels0, name='msd0')
mse1, update_op1 = metrics.streaming_mean_squared_error(
predictions1, labels1, name='msd1')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1])
sess.run([update_op0, update_op1])
mse0, mse1 = sess.run([mse0, mse1])
self.assertAlmostEqual(208.0 / 6, mse0, 5)
self.assertAlmostEqual(79.0 / 6, mse1, 5)
def testMultipleMetricsOnMultipleBatchesOfSizeOne(self):
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, preds_queue, [10, 8, 6])
_enqueue_vector(sess, preds_queue, [-4, 3, -1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
2, dtypes=dtypes_lib.float32, shapes=(1, 3))
_enqueue_vector(sess, labels_queue, [1, 3, 2])
_enqueue_vector(sess, labels_queue, [2, 4, 6])
labels = labels_queue.dequeue()
mae, ma_update_op = metrics.streaming_mean_absolute_error(predictions,
labels)
mse, ms_update_op = metrics.streaming_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
sess.run([ma_update_op, ms_update_op])
sess.run([ma_update_op, ms_update_op])
self.assertAlmostEqual(32.0 / 6, mae.eval(), 5)
self.assertAlmostEqual(208.0 / 6, mse.eval(), 5)
class StreamingRootMeanSquaredErrorTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)), labels=array_ops.ones((10, 1)))
_assert_local_variables(self, ('root_mean_squared_error/count:0',
'root_mean_squared_error/total:0'))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_root_mean_squared_error(
predictions=array_ops.ones((10, 1)),
labels=array_ops.ones((10, 1)),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
0.0, shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(0.0, shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, rmse.eval())
def testSingleUpdateWithError(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(6), update_op.eval(), 5)
self.assertAlmostEqual(math.sqrt(6), rmse.eval(), 5)
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 3], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant([0, 1, 0, 1], shape=(1, 4))
rmse, update_op = metrics.streaming_root_mean_squared_error(predictions,
labels,
weights)
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(math.sqrt(13), sess.run(update_op))
self.assertAlmostEqual(math.sqrt(13), rmse.eval(), 5)
class StreamingCovarianceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_local_variables(self, (
'covariance/comoment:0',
'covariance/count:0',
'covariance/mean_label:0',
'covariance/mean_prediction:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
cov, _ = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [cov])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_covariance(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
cov, update_op = metrics.streaming_covariance(predictions, labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_cov = cov.eval()
for _ in range(10):
self.assertEqual(initial_cov, cov.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op), 5)
self.assertAlmostEqual(expected_cov, cov.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(predictions, labels)
expected_cov = np.cov([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, update_op.eval())
self.assertAlmostEqual(expected_cov, cov.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2, 7], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 3, 1], shape=(1, 4), dtype=dtypes_lib.float32)
cov, update_op = metrics.streaming_covariance(
predictions, labels, weights=weights)
expected_cov = np.cov([2, 4, 6, 8],
[1, 3, 2, 7],
fweights=[0, 1, 3, 1])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_cov, sess.run(update_op))
self.assertAlmostEqual(expected_cov, cov.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(
prev_expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_cov, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
prev_expected_cov = expected_cov
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
cov, update_op = metrics.streaming_covariance(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_cov = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_cov),
np.isnan(sess.run(cov, feed_dict=feed_dict)))
if not np.isnan(prev_expected_cov):
self.assertAlmostEqual(
prev_expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
expected_cov = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_cov, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_cov, sess.run(cov, feed_dict=feed_dict), 5)
prev_expected_cov = expected_cov
class StreamingPearsonRTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]))
_assert_local_variables(self, (
'pearson_r/covariance/comoment:0',
'pearson_r/covariance/count:0',
'pearson_r/covariance/mean_label:0',
'pearson_r/covariance/mean_prediction:0',
'pearson_r/variance_labels/count:0',
'pearson_r/variance_labels/comoment:0',
'pearson_r/variance_labels/mean_label:0',
'pearson_r/variance_labels/mean_prediction:0',
'pearson_r/variance_predictions/comoment:0',
'pearson_r/variance_predictions/count:0',
'pearson_r/variance_predictions/mean_label:0',
'pearson_r/variance_predictions/mean_prediction:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
pearson_r, _ = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [pearson_r])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_pearson_correlation(
predictions=math_ops.to_float(math_ops.range(10)) + array_ops.ones(
[10, 10]),
labels=math_ops.to_float(math_ops.range(10)) + array_ops.ones([10, 10]),
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
labels = random_ops.random_normal((10, 3), seed=2)
predictions = labels * 0.5 + random_ops.random_normal((10, 3), seed=1) * 0.5
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_r = pearson_r.eval()
for _ in range(10):
self.assertEqual(initial_r, pearson_r.eval())
def testSingleUpdateIdentical(self):
with self.test_session() as sess:
predictions = math_ops.to_float(math_ops.range(10))
labels = math_ops.to_float(math_ops.range(10))
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
expected_r = np.corrcoef(np.arange(10), np.arange(10))[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op), 5)
self.assertAlmostEqual(expected_r, pearson_r.eval(), 5)
def testSingleUpdateNonIdentical(self):
with self.test_session() as sess:
predictions = constant_op.constant(
[2, 4, 6], shape=(1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
[1, 3, 2], shape=(1, 3), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(predictions,
labels)
expected_r = np.corrcoef([2, 4, 6], [1, 3, 2])[0, 1]
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, update_op.eval())
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testSingleUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
predictions = np.array([2, 4, 6, 8])
labels = np.array([1, 3, 2, 7])
weights = np.array([0, 1, 3, 1])
predictions_t = constant_op.constant(
predictions, shape=(1, 4), dtype=dtypes_lib.float32)
labels_t = constant_op.constant(
labels, shape=(1, 4), dtype=dtypes_lib.float32)
weights_t = constant_op.constant(
weights, shape=(1, 4), dtype=dtypes_lib.float32)
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
cmat = np.cov(predictions, labels, fweights=weights)
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(expected_r, sess.run(update_op))
self.assertAlmostEqual(expected_r, pearson_r.eval())
def testMultiUpdateWithErrorNoWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(
prev_expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
expected_r = np.corrcoef(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)])[0, 1]
self.assertAlmostEqual(
expected_r, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndWeights(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
weights = np.tile(np.arange(n // 10), n // 10)
np.random.shuffle(weights)
stride = 10
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
prev_expected_r = NAN
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
self.assertEqual(np.isnan(prev_expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(prev_expected_r):
self.assertAlmostEqual(
prev_expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
cmat = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
self.assertAlmostEqual(
expected_r, sess.run(update_op, feed_dict=feed_dict), 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
prev_expected_r = expected_r
def testMultiUpdateWithErrorAndSingletonBatches(self):
with self.test_session() as sess:
np.random.seed(123)
n = 100
predictions = np.random.randn(n)
labels = 0.5 * predictions + np.random.randn(n)
stride = 10
weights = (np.arange(n).reshape(n//stride, stride) % stride == 0)
for row in weights:
np.random.shuffle(row)
# Now, weights is one-hot by row - one item per batch has non-zero weight.
weights = weights.reshape((n,))
predictions_t = array_ops.placeholder(dtypes_lib.float32, [stride])
labels_t = array_ops.placeholder(dtypes_lib.float32, [stride])
weights_t = array_ops.placeholder(dtypes_lib.float32, [stride])
pearson_r, update_op = metrics.streaming_pearson_correlation(
predictions_t, labels_t, weights=weights_t)
sess.run(variables.local_variables_initializer())
for i in range(n // stride):
feed_dict = {
predictions_t: predictions[stride * i:stride * (i + 1)],
labels_t: labels[stride * i:stride * (i + 1)],
weights_t: weights[stride * i:stride * (i + 1)]
}
cmat = np.cov(predictions[:stride * (i + 1)],
labels[:stride * (i + 1)],
fweights=weights[:stride * (i + 1)])
expected_r = cmat[0, 1] / np.sqrt(cmat[0, 0] * cmat[1, 1])
actual_r = sess.run(update_op, feed_dict=feed_dict)
self.assertEqual(np.isnan(expected_r), np.isnan(actual_r))
self.assertEqual(np.isnan(expected_r),
np.isnan(sess.run(pearson_r, feed_dict=feed_dict)))
if not np.isnan(expected_r):
self.assertAlmostEqual(
expected_r, actual_r, 5)
self.assertAlmostEqual(
expected_r, sess.run(pearson_r, feed_dict=feed_dict), 5)
class StreamingMeanCosineDistanceTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1)
_assert_local_variables(self, (
'mean_cosine_distance/count:0',
'mean_cosine_distance/total:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_cosine_distance(
predictions=array_ops.ones((10, 3)),
labels=array_ops.ones((10, 3)),
dim=1,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testValueTensorIsIdempotent(self):
predictions = random_ops.random_normal((10, 3), seed=1)
labels = random_ops.random_normal((10, 3), seed=2)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_error = error.eval()
for _ in range(10):
self.assertEqual(initial_error, error.eval())
def testSingleUpdateZeroError(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(1, 3, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithError1(self):
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1, sess.run(update_op), 5)
self.assertAlmostEqual(1, error.eval(), 5)
def testSingleUpdateWithError2(self):
np_predictions = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'-0.665139432070255 -0.739487441769973 -0.103671883216994;'
'0.707106781186548 -0.707106781186548 0'))
np_labels = np.matrix(
('0.819031913261206 0.567041924552012 0.087465312324590;'
'0.665139432070255 0.739487441769973 0.103671883216994;'
'0.707106781186548 0.707106781186548 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertAlmostEqual(1.0, sess.run(update_op), 5)
self.assertAlmostEqual(1.0, error.eval(), 5)
def testSingleUpdateWithErrorAndWeights1(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(0, sess.run(update_op))
self.assertEqual(0, error.eval())
def testSingleUpdateWithErrorAndWeights2(self):
np_predictions = np.matrix(('1 0 0;' '0 0 -1;' '1 0 0'))
np_labels = np.matrix(('1 0 0;' '0 0 1;' '0 1 0'))
predictions = constant_op.constant(
np_predictions, shape=(3, 1, 3), dtype=dtypes_lib.float32)
labels = constant_op.constant(
np_labels, shape=(3, 1, 3), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[0, 1, 1], shape=(3, 1, 1), dtype=dtypes_lib.float32)
error, update_op = metrics.streaming_mean_cosine_distance(
predictions, labels, dim=2, weights=weights)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1.5, update_op.eval())
self.assertEqual(1.5, error.eval())
class PcntBelowThreshTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_percentage_less(values=array_ops.ones((10,)), threshold=2)
_assert_local_variables(self, (
'percentage_below_threshold/count:0',
'percentage_below_threshold/total:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
mean, _ = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_percentage_less(
values=array_ops.ones((10,)),
threshold=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, name='low')
sess.run(variables.local_variables_initializer())
sess.run([update_op0, update_op1, update_op2])
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.75, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
def testSomePresentOneUpdate(self):
with self.test_session() as sess:
values = constant_op.constant(
[2, 4, 6, 8], shape=(1, 4), dtype=dtypes_lib.float32)
weights = constant_op.constant(
[1, 0, 0, 1], shape=(1, 4), dtype=dtypes_lib.float32)
pcnt0, update_op0 = metrics.streaming_percentage_less(
values, 100, weights=weights, name='high')
pcnt1, update_op1 = metrics.streaming_percentage_less(
values, 7, weights=weights, name='medium')
pcnt2, update_op2 = metrics.streaming_percentage_less(
values, 1, weights=weights, name='low')
sess.run(variables.local_variables_initializer())
self.assertListEqual([1.0, 0.5, 0.0],
sess.run([update_op0, update_op1, update_op2]))
pcnt0, pcnt1, pcnt2 = sess.run([pcnt0, pcnt1, pcnt2])
self.assertAlmostEqual(1.0, pcnt0, 5)
self.assertAlmostEqual(0.5, pcnt1, 5)
self.assertAlmostEqual(0.0, pcnt2, 5)
class StreamingMeanIOUTest(test.TestCase):
def setUp(self):
np.random.seed(1)
ops.reset_default_graph()
def testVars(self):
metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2)
_assert_local_variables(self, ('mean_iou/total_confusion_matrix:0',))
def testMetricsCollections(self):
my_collection_name = '__metrics__'
mean_iou, _ = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [mean_iou])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_mean_iou(
predictions=array_ops.ones([10, 1]),
labels=array_ops.ones([10, 1]),
num_classes=2,
updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testPredictionsAndLabelsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10, 3])
labels = array_ops.ones([10, 4])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(predictions, labels, num_classes=2)
def testLabelsAndWeightsOfDifferentSizeRaisesValueError(self):
predictions = array_ops.ones([10])
labels = array_ops.ones([10])
weights = array_ops.zeros([9])
with self.assertRaises(ValueError):
metrics.streaming_mean_iou(
predictions, labels, num_classes=2, weights=weights)
def testValueTensorIsIdempotent(self):
num_classes = 3
predictions = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=1)
labels = random_ops.random_uniform(
[10], maxval=num_classes, dtype=dtypes_lib.int64, seed=2)
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes=num_classes)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
# Run several updates.
for _ in range(10):
sess.run(update_op)
# Then verify idempotency.
initial_miou = miou.eval()
for _ in range(10):
self.assertEqual(initial_miou, miou.eval())
def testMultipleUpdates(self):
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [2])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [2])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 2.0, 1.0 / 4.0, 0.])
self.assertEqual(desired_output, miou.eval())
def testMultipleUpdatesWithWeights(self):
num_classes = 2
with self.test_session() as sess:
# Create the queue that populates the predictions.
preds_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
labels_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
# Create the queue that populates the weights.
weights_queue = data_flow_ops.FIFOQueue(
6, dtypes=dtypes_lib.float32, shapes=(1, 1))
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
_enqueue_vector(sess, weights_queue, [1.0])
_enqueue_vector(sess, weights_queue, [0.0])
weights = weights_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
for _ in range(6):
sess.run(update_op)
desired_output = np.mean([2.0 / 3.0, 1.0 / 2.0])
self.assertAlmostEqual(desired_output, miou.eval())
def testMultipleUpdatesWithMissingClass(self):
# Test the case where there are no predicions and labels for
# one class, and thus there is one row and one column with
# zero entries in the confusion matrix.
num_classes = 3
with self.test_session() as sess:
# Create the queue that populates the predictions.
# There is no prediction for class 2.
preds_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, preds_queue, [0])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [1])
_enqueue_vector(sess, preds_queue, [0])
predictions = preds_queue.dequeue()
# Create the queue that populates the labels.
# There is label for class 2.
labels_queue = data_flow_ops.FIFOQueue(
5, dtypes=dtypes_lib.int32, shapes=(1, 1))
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [1])
_enqueue_vector(sess, labels_queue, [0])
_enqueue_vector(sess, labels_queue, [1])
labels = labels_queue.dequeue()
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
for _ in range(5):
sess.run(update_op)
desired_output = np.mean([1.0 / 3.0, 2.0 / 4.0, 0.])
self.assertAlmostEqual(desired_output, miou.eval())
def testUpdateOpEvalIsAccumulatedConfusionMatrix(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
confusion_matrix = update_op.eval()
self.assertAllEqual([[3, 0], [2, 5]], confusion_matrix)
desired_miou = np.mean([3. / 5., 5. / 7.])
self.assertAlmostEqual(desired_miou, miou.eval())
def testAllCorrect(self):
predictions = array_ops.zeros([40])
labels = array_ops.zeros([40])
num_classes = 1
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertEqual(40, update_op.eval()[0])
self.assertEqual(1.0, miou.eval())
def testAllWrong(self):
predictions = array_ops.zeros([40])
labels = array_ops.ones([40])
num_classes = 2
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(predictions, labels,
num_classes)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[0, 0], [40, 0]], update_op.eval())
self.assertEqual(0., miou.eval())
def testResultsWithSomeMissing(self):
predictions = array_ops.concat(
[
constant_op.constant(
0, shape=[5]), constant_op.constant(
1, shape=[5])
],
0)
labels = array_ops.concat(
[
constant_op.constant(
0, shape=[3]), constant_op.constant(
1, shape=[7])
],
0)
num_classes = 2
weights = array_ops.concat(
[
constant_op.constant(
0, shape=[1]), constant_op.constant(
1, shape=[8]), constant_op.constant(
0, shape=[1])
],
0)
with self.test_session() as sess:
miou, update_op = metrics.streaming_mean_iou(
predictions, labels, num_classes, weights=weights)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([[2, 0], [2, 4]], update_op.eval())
desired_miou = np.mean([2. / 4., 4. / 6.])
self.assertAlmostEqual(desired_miou, miou.eval())
class StreamingConcatTest(test.TestCase):
def setUp(self):
ops.reset_default_graph()
def testVars(self):
metrics.streaming_concat(values=array_ops.ones((10,)))
_assert_local_variables(self, (
'streaming_concat/array:0',
'streaming_concat/size:0',))
def testMetricsCollection(self):
my_collection_name = '__metrics__'
value, _ = metrics.streaming_concat(
values=array_ops.ones((10,)), metrics_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [value])
def testUpdatesCollection(self):
my_collection_name = '__updates__'
_, update_op = metrics.streaming_concat(
values=array_ops.ones((10,)), updates_collections=[my_collection_name])
self.assertListEqual(ops.get_collection(my_collection_name), [update_op])
def testNextArraySize(self):
next_array_size = metric_ops._next_array_size # pylint: disable=protected-access
with self.test_session():
self.assertEqual(next_array_size(2, growth_factor=2).eval(), 2)
self.assertEqual(next_array_size(3, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(4, growth_factor=2).eval(), 4)
self.assertEqual(next_array_size(5, growth_factor=2).eval(), 8)
self.assertEqual(next_array_size(6, growth_factor=2).eval(), 8)
def testStreamingConcat(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([0, 1, 2, 3, 4], concatenated.eval())
sess.run([update_op], feed_dict={values: [5, 6, 7, 8, 9]})
self.assertAllEqual(np.arange(10), concatenated.eval())
def testStreamingConcatStringValues(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.string, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertItemsEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: ['a', 'b', 'c']})
self.assertItemsEqual([b'a', b'b', b'c'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['d', 'e']})
self.assertItemsEqual([b'a', b'b', b'c', b'd', b'e'], concatenated.eval())
sess.run([update_op], feed_dict={values: ['f', 'g', 'h', 'i', 'j']})
self.assertItemsEqual(
[b'a', b'b', b'c', b'd', b'e', b'f', b'g', b'h', b'i', b'j'],
concatenated.eval())
def testStreamingConcatMaxSize(self):
with self.test_session() as sess:
values = math_ops.range(3)
concatenated, update_op = metrics.streaming_concat(values, max_size=5)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
sess.run([update_op])
self.assertAllEqual([0, 1, 2, 0, 1], concatenated.eval())
def testStreamingConcat2D(self):
with self.test_session() as sess:
values = array_ops.reshape(math_ops.range(3), (3, 1))
concatenated, update_op = metrics.streaming_concat(values, axis=-1)
sess.run(variables.local_variables_initializer())
for _ in range(10):
sess.run([update_op])
self.assertAllEqual([[0] * 10, [1] * 10, [2] * 10], concatenated.eval())
def testStreamingConcatErrors(self):
with self.assertRaises(ValueError):
metrics.streaming_concat(array_ops.placeholder(dtypes_lib.float32))
values = array_ops.zeros((2, 3))
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=-3, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(values, axis=2, max_size=3)
with self.assertRaises(ValueError):
metrics.streaming_concat(
array_ops.placeholder(dtypes_lib.float32, [None, None]))
def testStreamingConcatReset(self):
with self.test_session() as sess:
values = array_ops.placeholder(dtypes_lib.int32, [None])
concatenated, update_op = metrics.streaming_concat(values)
sess.run(variables.local_variables_initializer())
self.assertAllEqual([], concatenated.eval())
sess.run([update_op], feed_dict={values: [0, 1, 2]})
self.assertAllEqual([0, 1, 2], concatenated.eval())
sess.run(variables.local_variables_initializer())
sess.run([update_op], feed_dict={values: [3, 4]})
self.assertAllEqual([3, 4], concatenated.eval())
class AggregateMetricsTest(test.TestCase):
def testAggregateNoMetricsRaisesValueError(self):
with self.assertRaises(ValueError):
metrics.aggregate_metrics()
def testAggregateSingleMetricReturnsOneItemLists(self):
values = array_ops.ones((10, 4))
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean(values))
self.assertEqual(len(value_tensors), 1)
self.assertEqual(len(update_ops), 1)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(1, update_ops[0].eval())
self.assertEqual(1, value_tensors[0].eval())
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
value_tensors, update_ops = metrics.aggregate_metrics(
metrics.streaming_mean_absolute_error(predictions, labels),
metrics.streaming_mean_squared_error(predictions, labels))
self.assertEqual(len(value_tensors), 2)
self.assertEqual(len(update_ops), 2)
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, update_ops[0].eval())
self.assertEqual(4, update_ops[1].eval())
self.assertEqual(2, value_tensors[0].eval())
self.assertEqual(4, value_tensors[1].eval())
class AggregateMetricMapTest(test.TestCase):
def testAggregateMultipleMetricsReturnsListsInOrder(self):
predictions = array_ops.ones((10, 4))
labels = array_ops.ones((10, 4)) * 3
names_to_values, names_to_updates = metrics.aggregate_metric_map({
'm1': metrics.streaming_mean_absolute_error(predictions, labels),
'm2': metrics.streaming_mean_squared_error(predictions, labels),
})
self.assertEqual(2, len(names_to_values))
self.assertEqual(2, len(names_to_updates))
with self.test_session() as sess:
sess.run(variables.local_variables_initializer())
self.assertEqual(2, names_to_updates['m1'].eval())
self.assertEqual(4, names_to_updates['m2'].eval())
self.assertEqual(2, names_to_values['m1'].eval())
self.assertEqual(4, names_to_values['m2'].eval())
if __name__ == '__main__':
test.main()
| apache-2.0 |
KeyWeeUsr/kivy | kivy/tests/test_doc_gallery.py | 17 | 1278 | from doc.gallery import *
def test_parse_docstring_info():
assert 'error' in parse_docstring_info("No Docstring")
assert 'error' in parse_docstring_info("'''No Docstring Title'''")
assert 'error' in parse_docstring_info(
"'''No Sentence\n======\nPeriods'''"
)
assert 'error' in parse_docstring_info(
"'\nSingle Quotes\n===\n\nNo singles.'")
d = parse_docstring_info("""'''
3D Rendering Monkey Head
========================
This example demonstrates using OpenGL to display a
rotating monkey head. This
includes loading a Blender OBJ file, shaders written in OpenGL's Shading
Language (GLSL), and using scheduled callbacks.
The file monkey.obj is a OBJ file output form the Blender free 3D creation
software. The file is text, listing vertices and faces. It is loaded
into a scene using objloader.py's ObjFile class. The file simple.glsl is
a simple vertex and fragment shader written in GLSL.
'''
blah blah
blah blah
""")
assert 'error' not in d
assert '3D Rendering' in d['docstring'] and \
'This example' in d['docstring']
assert '3D Rendering' in d['title']
assert 'monkey head' in d['first_sentence']
if __name__ == '__main__':
test_parse_docstring_info()
| mit |
jtyr/ansible-modules-core | cloud/openstack/_quantum_floating_ip_associate.py | 6 | 8225 | #!/usr/bin/python
#coding: utf-8 -*-
# (c) 2013, Benno Joy <[email protected]>
#
# This module is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This software is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this software. If not, see <http://www.gnu.org/licenses/>.
import time
try:
from novaclient.v1_1 import client as nova_client
try:
from neutronclient.neutron import client
except ImportError:
from quantumclient.quantum import client
from keystoneclient.v2_0 import client as ksclient
HAVE_DEPS = True
except ImportError:
HAVE_DEPS = False
DOCUMENTATION = '''
---
module: quantum_floating_ip_associate
version_added: "1.2"
author: "Benno Joy (@bennojoy)"
deprecated: Deprecated in 2.0. Use os_floating_ip instead
short_description: Associate or disassociate a particular floating IP with an instance
description:
- Associates or disassociates a specific floating IP with a particular instance
options:
login_username:
description:
- login username to authenticate to keystone
required: true
default: admin
login_password:
description:
- password of login user
required: true
default: 'yes'
login_tenant_name:
description:
- the tenant name of the login user
required: true
default: true
auth_url:
description:
- the keystone url for authentication
required: false
default: 'http://127.0.0.1:35357/v2.0/'
region_name:
description:
- name of the region
required: false
default: None
state:
description:
- indicates the desired state of the resource
choices: ['present', 'absent']
default: present
instance_name:
description:
- name of the instance to which the public IP should be assigned
required: true
default: None
ip_address:
description:
- floating ip that should be assigned to the instance
required: true
default: None
requirements:
- "python >= 2.6"
- "python-novaclient"
- "python-neutronclient or python-quantumclient"
- "python-keystoneclient"
'''
EXAMPLES = '''
- name: Associate a specific floating IP with an Instance
quantum_floating_ip_associate:
state: present
login_username: admin
login_password: admin
login_tenant_name: admin
ip_address: 1.1.1.1
instance_name: vm1
'''
def _get_ksclient(module, kwargs):
try:
kclient = ksclient.Client(username=kwargs.get('login_username'),
password=kwargs.get('login_password'),
tenant_name=kwargs.get('login_tenant_name'),
auth_url=kwargs.get('auth_url'))
except Exception as e:
module.fail_json(msg = "Error authenticating to the keystone: %s " % e.message)
global _os_keystone
_os_keystone = kclient
return kclient
def _get_endpoint(module, ksclient):
try:
endpoint = ksclient.service_catalog.url_for(service_type='network', endpoint_type='publicURL')
except Exception as e:
module.fail_json(msg = "Error getting network endpoint: %s" % e.message)
return endpoint
def _get_neutron_client(module, kwargs):
_ksclient = _get_ksclient(module, kwargs)
token = _ksclient.auth_token
endpoint = _get_endpoint(module, _ksclient)
kwargs = {
'token': token,
'endpoint_url': endpoint
}
try:
neutron = client.Client('2.0', **kwargs)
except Exception as e:
module.fail_json(msg = "Error in connecting to neutron: %s " % e.message)
return neutron
def _get_server_state(module, nova):
server_info = None
server = None
try:
for server in nova.servers.list():
if server:
info = server._info
if info['name'] == module.params['instance_name']:
if info['status'] != 'ACTIVE' and module.params['state'] == 'present':
module.fail_json(msg="The VM is available but not Active. state:" + info['status'])
server_info = info
break
except Exception as e:
module.fail_json(msg = "Error in getting the server list: %s" % e.message)
return server_info, server
def _get_port_id(neutron, module, instance_id):
kwargs = dict(device_id = instance_id)
try:
ports = neutron.list_ports(**kwargs)
except Exception as e:
module.fail_json( msg = "Error in listing ports: %s" % e.message)
if not ports['ports']:
return None
return ports['ports'][0]['id']
def _get_floating_ip_id(module, neutron):
kwargs = {
'floating_ip_address': module.params['ip_address']
}
try:
ips = neutron.list_floatingips(**kwargs)
except Exception as e:
module.fail_json(msg = "error in fetching the floatingips's %s" % e.message)
if not ips['floatingips']:
module.fail_json(msg = "Could find the ip specified in parameter, Please check")
ip = ips['floatingips'][0]['id']
if not ips['floatingips'][0]['port_id']:
state = "detached"
else:
state = "attached"
return state, ip
def _update_floating_ip(neutron, module, port_id, floating_ip_id):
kwargs = {
'port_id': port_id
}
try:
result = neutron.update_floatingip(floating_ip_id, {'floatingip': kwargs})
except Exception as e:
module.fail_json(msg = "There was an error in updating the floating ip address: %s" % e.message)
module.exit_json(changed = True, result = result, public_ip=module.params['ip_address'])
def main():
argument_spec = openstack_argument_spec()
argument_spec.update(dict(
ip_address = dict(required=True),
instance_name = dict(required=True),
state = dict(default='present', choices=['absent', 'present'])
))
module = AnsibleModule(argument_spec=argument_spec)
if not HAVE_DEPS:
module.fail_json(msg='python-novaclient, python-keystoneclient, and either python-neutronclient or python-quantumclient are required')
try:
nova = nova_client.Client(module.params['login_username'], module.params['login_password'],
module.params['login_tenant_name'], module.params['auth_url'], service_type='compute')
except Exception as e:
module.fail_json( msg = " Error in authenticating to nova: %s" % e.message)
neutron = _get_neutron_client(module, module.params)
state, floating_ip_id = _get_floating_ip_id(module, neutron)
if module.params['state'] == 'present':
if state == 'attached':
module.exit_json(changed = False, result = 'attached', public_ip=module.params['ip_address'])
server_info, server_obj = _get_server_state(module, nova)
if not server_info:
module.fail_json(msg = " The instance name provided cannot be found")
port_id = _get_port_id(neutron, module, server_info['id'])
if not port_id:
module.fail_json(msg = "Cannot find a port for this instance, maybe fixed ip is not assigned")
_update_floating_ip(neutron, module, port_id, floating_ip_id)
if module.params['state'] == 'absent':
if state == 'detached':
module.exit_json(changed = False, result = 'detached')
if state == 'attached':
_update_floating_ip(neutron, module, None, floating_ip_id)
module.exit_json(changed = True, result = "detached")
# this is magic, see lib/ansible/module.params['common.py
from ansible.module_utils.basic import *
from ansible.module_utils.openstack import *
if __name__ == '__main__':
main()
| gpl-3.0 |
boada/photometrypipeline | pp_combine.py | 2 | 13483 | #!/usr/bin/env python3
""" PP_COMBINE - combine frames based on wcs
v1.0: 2017-10-03, [email protected]
"""
from __future__ import print_function, division
# Photometry Pipeline
# Copyright (C) 2016-2018 Michael Mommert, [email protected]
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see
# <http://www.gnu.org/licenses/>.
import numpy
import os
import sys
import shutil
import logging
import subprocess
import argparse
import shlex
import time
from astropy.io import fits
from past.utils import old_div
from astroquery.jplhorizons import Horizons
# pipeline-specific modules
import _pp_conf
import toolbox
# create a portable DEVNULL
# necessary to prevent subprocess.PIPE and STDOUT from clogging if
# Source Extractor runs for too long
try:
from subprocess import DEVNULL # Py3
except ImportError:
import os # Py2
DEVNULL = open(os.devnull, 'wb')
# only import if Python3 is used
if sys.version_info > (3, 0):
from builtins import str
# setup logging
logging.basicConfig(filename=_pp_conf.log_filename,
level=_pp_conf.log_level,
format=_pp_conf.log_formatline,
datefmt=_pp_conf.log_datefmt)
def combine(filenames, obsparam, comoving, targetname,
manual_rates, combine_method, keep_files,
backsub=False, display=True, diagnostics=True):
"""
image combination wrapper
output: diagnostic properties
"""
# start logging
logging.info('starting image combination with parameters: %s' %
(', '.join([('%s: %s' % (var, str(val))) for
var, val in list(locals().items())])))
# check if images have been run through pp_prepare
try:
midtime_jd = fits.open(filenames[0], verify='silentfix',
ignore_missing_end=True)[0].header['MIDTIMJD']
except KeyError:
raise KeyError(('%s image header incomplete, have the data run ' +
'through pp_prepare?') % filenames[0])
return None
# adopt first frame as reference frame
hdulist = fits.open(filenames[0])
header = hdulist[0].header
refdate = float(header['MIDTIMJD'])
# read out ra and dec from header
if obsparam['radec_separator'] == 'XXX':
ref_ra_deg = float(header[obsparam['ra']])
ref_dec_deg = float(header[obsparam['dec']])
if obsparam['telescope_keyword'] == 'UKIRTWFCAM':
ref_ra_deg = ref_ra_deg/24.*360. - 795/3600.
ref_dec_deg -= 795/3600.
else:
ra_string = header[obsparam['ra']].split(
obsparam['radec_separator'])
dec_string = header[obsparam['dec']].split(
obsparam['radec_separator'])
ref_ra_deg = 15.*(float(ra_string[0]) +
old_div(float(ra_string[1]), 60.) +
old_div(float(ra_string[2]), 3600.))
ref_dec_deg = (abs(float(dec_string[0])) +
old_div(float(dec_string[1]), 60.) +
old_div(float(dec_string[2]), 3600.))
if dec_string[0].find('-') > -1:
ref_dec_deg = -1 * ref_dec_deg
if obsparam['telescope_keyword'] == 'UKIRTWFCAM':
ref_ra_deg = ref_ra_deg/24.*360.
if obsparam['telescope_keyword'] == "UKIRTWFCAM":
ref_ra_deg -= float(header['TRAOFF'])/3600
ref_dec_deg -= float(header['TDECOFF'])/3600
hdulist.close()
# modify individual frames if comoving == True
if comoving:
movingfilenames = []
# sort filenames by MIDTIMJD
mjds = []
for filename in filenames:
hdulist = fits.open(filename)
mjds.append(float(hdulist[0].header['MIDTIMJD']))
filenames = [filenames[i] for i in numpy.argsort(mjds)]
for filename in filenames:
movingfilename = filename[:filename.find('.fits')]+'_moving.fits'
print('shifting %s -> %s' % (filename, movingfilename))
logging.info('shifting %s -> %s' % (filename, movingfilename))
# read out date and pointing information
hdulist = fits.open(filename)
header = hdulist[0].header
date = hdulist[0].header['MIDTIMJD']
data = hdulist[0].data
hdulist.close()
# use ephemerides from Horizons if no manual rates are provided
if manual_rates is None:
# call HORIZONS to get target coordinates
obj = Horizons(targetname.replace('_', ' '), epochs=date,
location=str(obsparam['observatory_code']))
try:
eph = obj.ephemerides()
n = len(eph)
except ValueError:
print('Target (%s) not an asteroid' % targetname)
logging.warning('Target (%s) not an asteroid' % targetname)
n = None
time.sleep(0.5)
if n is None or n == 0:
logging.warning('WARNING: No position from Horizons!' +
'Name (%s) correct?' % targetname)
logging.warning('HORIZONS call: %s' % eph.url)
raise(ValueError, 'no Horizons ephemerides available')
else:
logging.info('ephemerides for %s pulled from Horizons' %
targetname)
logging.info('Horizons call: %s' %
obj.uri)
target_ra, target_dec = eph[0]['RA'], eph[0]['DEC']
# get image pointing from header
if obsparam['radec_separator'] == 'XXX':
ra_deg = float(header[obsparam['ra']])
dec_deg = float(header[obsparam['dec']])
if obsparam['telescope_keyword'] == 'UKIRTWFCAM':
ra_deg = ra_deg/24.*360. - 795/3600.
dec_deg -= 795/3600.
else:
ra_string = header[obsparam['ra']].split(
obsparam['radec_separator'])
dec_string = header[obsparam['dec']].split(
obsparam['radec_separator'])
ra_deg = 15.*(float(ra_string[0]) +
old_div(float(ra_string[1]), 60.) +
old_div(float(ra_string[2]), 3600.))
dec_deg = (abs(float(dec_string[0])) +
old_div(float(dec_string[1]), 60.) +
old_div(float(dec_string[2]), 3600.))
if dec_string[0].find('-') > -1:
dec_deg = -1 * dec_deg
if filename == filenames[0]:
ref_offset_ra = target_ra - ref_ra_deg
ref_offset_dec = target_dec - ref_dec_deg
offset_ra = target_ra - ref_ra_deg - ref_offset_ra
offset_dec = target_dec - ref_dec_deg - ref_offset_dec
else:
# use manual rates (since they are provided)
offset_ra = ((float(header['MIDTIMJD'])-refdate)*86400 *
float(manual_rates[0]))/3600
offset_dec = ((float(header['MIDTIMJD'])-refdate)*86400 *
float(manual_rates[1]))/3600
logging.info('offsets in RA and Dec: %f, %f arcsec' %
(offset_ra*3600, offset_dec*3600))
crval1 = float(header['CRVAL1'])
crval2 = float(header['CRVAL2'])
# write new CRVALi keywords in different file
new_hdu = fits.PrimaryHDU(data)
new_hdu.header = header
new_hdu.header['CRVAL1'] = (crval1-offset_ra,
'updated in the moving frame of the object')
new_hdu.header['CRVAL2'] = (crval2-offset_dec,
'updated in the moving frame of the object')
movingfilenames.append(movingfilename)
new_hdu.writeto(movingfilename, overwrite=True,
output_verify='silentfix')
if comoving:
outfile_name = 'comove.fits'
fileline = " ".join(movingfilenames)
n_frames = len(movingfilenames)
else:
outfile_name = 'skycoadd.fits'
fileline = " ".join(filenames)
n_frames = len(filenames)
# run swarp on all image catalogs using different catalogs
commandline = (('swarp -combine Y -combine_type %s -delete_tmpfiles ' +
'Y -imageout_name %s -interpolate Y -subtract_back %s ' +
'-weight_type NONE -copy_keywords %s -write_xml N ' +
'-CENTER_TYPE MOST %s') %
({'median': 'MEDIAN', 'average': 'AVERAGE',
'clipped': 'CLIPPED -CLIP_AMPFRAC 0.2 -CLIP_SIGMA 0.1 '}
[combine_method],
outfile_name,
{True: 'Y', False: 'N'}[backsub],
obsparam['copy_keywords'], fileline))
logging.info('call SWARP as: %s' % commandline)
print('running SWARP to combine {:d} frames...'.format(n_frames))
try:
swarp = subprocess.Popen(shlex.split(commandline),
stdout=DEVNULL,
stderr=DEVNULL,
close_fds=True)
# do not direct stdout to subprocess.PIPE:
# for large FITS files, PIPE will clog, stalling
# subprocess.Popen
except Exception as e:
print('SWARP call:', (e))
logging.error('SWARP call:', (e))
return None
swarp.wait()
print('done!')
# remove files that are not needed anymore
if not keep_files:
if comoving:
for filename in movingfilenames:
os.remove(filename)
# update combined image header
total_exptime = 0
for filename in filenames:
hdulist = fits.open(filename)
total_exptime += float(hdulist[0].header[obsparam['exptime']])
hdulist = fits.open(outfile_name, mode='update')
hdulist[0].header[obsparam['exptime']] = (total_exptime, 'PP: cumulative')
hdulist[0].header['COMBO_N'] = (len(filenames), 'PP: N files combo')
hdulist[0].header['COMBO_M'] = (combine_method, 'PP: combo method')
hdulist[0].header['COMOVE'] = (str(comoving), 'PP: comoving?')
hdulist.flush()
return n_frames
if __name__ == '__main__':
# command line arguments
parser = argparse.ArgumentParser(description='image combination')
parser.add_argument("-comoving", action="store_true",
help='combine in moving target frame')
parser.add_argument("-targetname",
help='moving target name')
parser.add_argument("-manual_rates", help='manual rates in arcsec/s',
nargs=2)
parser.add_argument('-method',
help='combination method',
choices=['average', 'median', 'clipped'],
default='clipped')
parser.add_argument("-backsub", action="store_true",
help='subtract background in each frame ')
parser.add_argument("-keep_files", action="store_true",
help='keep intermediate files', default=False)
parser.add_argument('images', help='images to process', nargs='+')
args = parser.parse_args()
comoving = args.comoving
targetname = args.targetname
manual_rates = args.manual_rates
combine_method = args.method
backsub = args.backsub
keep_files = args.keep_files
filenames = args.images
# read telescope and filter information from fits headers
# check that they are the same for all images
instruments = []
for filename in filenames:
hdulist = fits.open(filename, ignore_missing_end=True,
verify='silentfix')
header = hdulist[0].header
for key in _pp_conf.instrument_keys:
if key in header:
instruments.append(header[key])
if len(instruments) == 0:
raise KeyError('cannot identify telescope/instrument; please update'
'_pp_conf.instrument_keys accordingly')
# assign telescope parameters (telescopes.py)
telescope = _pp_conf.instrument_identifiers[instruments[0]]
obsparam = _pp_conf.telescope_parameters[telescope]
if manual_rates is not None:
comoving = True
if comoving and targetname is None:
targetname = header[obsparam['object']]
# run image combination wrapper
combination = combine(filenames, obsparam, comoving, targetname,
manual_rates, combine_method, keep_files,
backsub, display=True, diagnostics=True)
| gpl-3.0 |
aarsan/azure-sdk-for-python | azure-servicemanagement-legacy/azure/servicemanagement/_common_error.py | 13 | 1505 | #-------------------------------------------------------------------------
# Copyright (c) Microsoft. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#--------------------------------------------------------------------------
from azure.common import (
AzureHttpError,
)
_ERROR_CONFLICT = 'Conflict ({0})'
_ERROR_NOT_FOUND = 'Not found ({0})'
_ERROR_UNKNOWN = 'Unknown error ({0})'
_ERROR_VALUE_NONE = '{0} should not be None.'
_ERROR_ASYNC_OP_FAILURE = 'Asynchronous operation did not succeed.'
_ERROR_ASYNC_OP_TIMEOUT = 'Timed out waiting for async operation to complete.'
def _general_error_handler(http_error):
''' Simple error handler for azure.'''
message = str(http_error)
if http_error.respbody is not None:
message += '\n' + http_error.respbody.decode('utf-8-sig')
raise AzureHttpError(message, http_error.status)
def _validate_not_none(param_name, param):
if param is None:
raise ValueError(_ERROR_VALUE_NONE.format(param_name))
| apache-2.0 |
Nitaco/ansible | lib/ansible/modules/network/f5/bigip_device_group_member.py | 8 | 7464 | #!/usr/bin/python
# -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = r'''
---
module: bigip_device_group_member
short_description: Manages members in a device group
description:
- Manages members in a device group. Members in a device group can only
be added or removed, never updated. This is because the members are
identified by unique name values and changing that name would invalidate
the uniqueness.
version_added: 2.5
options:
name:
description:
- Specifies the name of the device that you want to add to the
device group. Often this will be the hostname of the device.
This member must be trusted by the device already. Trusting
can be done with the C(bigip_device_trust) module and the
C(peer_hostname) option to that module.
required: True
device_group:
description:
- The device group that you want to add the member to.
required: True
state:
description:
- When C(present), ensures that the device group member.
- When C(absent), ensures the device group member is removed.
default: present
choices:
- present
- absent
extends_documentation_fragment: f5
author:
- Tim Rupp (@caphrim007)
'''
EXAMPLES = r'''
- name: Add the current device to the "device_trust_group" device group
bigip_device_group_member:
name: "{{ inventory_hostname }}"
device_group: device_trust_group
password: secret
server: lb.mydomain.com
state: present
user: admin
delegate_to: localhost
- name: Add the hosts in the current scope to "device_trust_group"
bigip_device_group_member:
name: "{{ item }}"
device_group: device_trust_group
password: secret
server: lb.mydomain.com
state: present
user: admin
with_items: "{{ hostvars.keys() }}"
run_once: true
delegate_to: localhost
'''
RETURN = r'''
# only common fields returned
'''
from ansible.module_utils.basic import AnsibleModule
try:
from library.module_utils.network.f5.bigip import HAS_F5SDK
from library.module_utils.network.f5.bigip import F5Client
from library.module_utils.network.f5.common import F5ModuleError
from library.module_utils.network.f5.common import AnsibleF5Parameters
from library.module_utils.network.f5.common import cleanup_tokens
from library.module_utils.network.f5.common import f5_argument_spec
try:
from library.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
except ImportError:
from ansible.module_utils.network.f5.bigip import HAS_F5SDK
from ansible.module_utils.network.f5.bigip import F5Client
from ansible.module_utils.network.f5.common import F5ModuleError
from ansible.module_utils.network.f5.common import AnsibleF5Parameters
from ansible.module_utils.network.f5.common import cleanup_tokens
from ansible.module_utils.network.f5.common import f5_argument_spec
try:
from ansible.module_utils.network.f5.common import iControlUnexpectedHTTPError
except ImportError:
HAS_F5SDK = False
class Parameters(AnsibleF5Parameters):
api_map = {}
api_attributes = []
returnables = []
updatables = []
def to_return(self):
result = {}
try:
for returnable in self.returnables:
result[returnable] = getattr(self, returnable)
result = self._filter_params(result)
except Exception:
pass
return result
class Changes(Parameters):
pass
class ModuleManager(object):
def __init__(self, *args, **kwargs):
self.module = kwargs.get('module', None)
self.client = kwargs.get('client', None)
self.want = Parameters(params=self.module.params)
self.have = None
self.changes = Changes()
def _set_changed_options(self):
changed = {}
for key in Parameters.returnables:
if getattr(self.want, key) is not None:
changed[key] = getattr(self.want, key)
if changed:
self.changes = Changes(params=changed)
def exec_module(self):
changed = False
result = dict()
state = self.want.state
try:
if state == "present":
changed = self.present()
elif state == "absent":
changed = self.absent()
except iControlUnexpectedHTTPError as e:
raise F5ModuleError(str(e))
changes = self.changes.to_return()
result.update(**changes)
result.update(dict(changed=changed))
return result
def present(self):
if self.exists():
return False
else:
return self.create()
def exists(self):
parent = self.client.api.tm.cm.device_groups.device_group.load(
name=self.want.device_group
)
exists = parent.devices_s.devices.exists(name=self.want.name)
if exists:
return True
return False
def remove(self):
if self.module.check_mode:
return True
self.remove_from_device()
if self.exists():
raise F5ModuleError("Failed to remove the member from the device group.")
return True
def create(self):
self._set_changed_options()
if self.module.check_mode:
return True
self.create_on_device()
return True
def create_on_device(self):
parent = self.client.api.tm.cm.device_groups.device_group.load(
name=self.want.device_group
)
parent.devices_s.devices.create(name=self.want.name)
def absent(self):
if self.exists():
return self.remove()
return False
def remove_from_device(self):
parent = self.client.api.tm.cm.device_groups.device_group.load(
name=self.want.device_group
)
resource = parent.devices_s.devices.load(name=self.want.name)
if resource:
resource.delete()
class ArgumentSpec(object):
def __init__(self):
self.supports_check_mode = True
argument_spec = dict(
name=dict(required=True),
device_group=dict(required=True),
state=dict(
default='present',
choices=['absent', 'present']
)
)
self.argument_spec = {}
self.argument_spec.update(f5_argument_spec)
self.argument_spec.update(argument_spec)
def main():
spec = ArgumentSpec()
module = AnsibleModule(
argument_spec=spec.argument_spec,
supports_check_mode=spec.supports_check_mode
)
if not HAS_F5SDK:
module.fail_json(msg="The python f5-sdk module is required")
try:
client = F5Client(**module.params)
mm = ModuleManager(module=module, client=client)
results = mm.exec_module()
cleanup_tokens(client)
module.exit_json(**results)
except F5ModuleError as ex:
cleanup_tokens(client)
module.fail_json(msg=str(ex))
if __name__ == '__main__':
main()
| gpl-3.0 |
sencha/chromium-spacewalk | tools/telemetry/telemetry/core/platform/profiler/perf_profiler.py | 6 | 8619 | # Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import logging
import os
import re
import signal
import subprocess
import sys
import tempfile
from telemetry.core import platform
from telemetry.core import util
from telemetry.core.platform import profiler
from telemetry.core.platform.profiler import android_profiling_helper
from telemetry.util import support_binaries
util.AddDirToPythonPath(util.GetChromiumSrcDir(), 'build', 'android')
from pylib.perf import perf_control # pylint: disable=F0401
_PERF_OPTIONS = [
# In perf 3.13 --call-graph requires an argument, so use the -g short-hand
# which does not.
'-g',
# Increase sampling frequency for better coverage.
'--freq', '2000',
]
_PERF_OPTIONS_ANDROID = [
# Increase priority to avoid dropping samples. Requires root.
'--realtime', '80',
]
def _NicePath(path):
rel_path = os.path.relpath(path, os.curdir)
return rel_path if len(rel_path) < len(path) else path
def _PrepareHostForPerf():
kptr_file = '/proc/sys/kernel/kptr_restrict'
with open(kptr_file) as f:
if f.read().strip() != '0':
logging.warning('Making kernel symbols unrestricted. You might have to '
'enter your password for "sudo".')
with tempfile.NamedTemporaryFile() as zero:
zero.write('0')
zero.flush()
subprocess.call(['sudo', 'cp', zero.name, kptr_file])
def _InstallPerfHost():
host = platform.GetHostPlatform()
if not host.CanLaunchApplication('perfhost'):
host.InstallApplication('perfhost')
return support_binaries.FindPath('perfhost', host.GetOSName())
class _SingleProcessPerfProfiler(object):
"""An internal class for using perf for a given process.
On android, this profiler uses pre-built binaries from AOSP.
See more details in prebuilt/android/README.txt.
"""
def __init__(self, pid, output_file, browser_backend, platform_backend,
perf_binary, perfhost_binary):
self._pid = pid
self._browser_backend = browser_backend
self._platform_backend = platform_backend
self._output_file = output_file
self._tmp_output_file = tempfile.NamedTemporaryFile('w', 0)
self._is_android = platform_backend.GetOSName() == 'android'
self._perfhost_binary = perfhost_binary
cmd_prefix = []
perf_args = ['record', '--pid', str(pid)]
if self._is_android:
cmd_prefix = ['adb', '-s', browser_backend.adb.device_serial(), 'shell',
perf_binary]
perf_args += _PERF_OPTIONS_ANDROID
output_file = os.path.join('/sdcard', 'perf_profiles',
os.path.basename(output_file))
self._device_output_file = output_file
browser_backend.adb.RunShellCommand(
'mkdir -p ' + os.path.dirname(self._device_output_file))
browser_backend.adb.RunShellCommand('rm -f ' + self._device_output_file)
else:
cmd_prefix = [perf_binary]
perf_args += ['--output', output_file] + _PERF_OPTIONS
self._proc = subprocess.Popen(cmd_prefix + perf_args,
stdout=self._tmp_output_file, stderr=subprocess.STDOUT)
def CollectProfile(self):
if ('renderer' in self._output_file and
not self._is_android and
not self._platform_backend.GetCommandLine(self._pid)):
logging.warning('Renderer was swapped out during profiling. '
'To collect a full profile rerun with '
'"--extra-browser-args=--single-process"')
if self._is_android:
device = self._browser_backend.adb.device()
device.KillAll('perf', signum=signal.SIGINT, blocking=True)
self._proc.send_signal(signal.SIGINT)
exit_code = self._proc.wait()
try:
if exit_code == 128:
raise Exception(
"""perf failed with exit code 128.
Try rerunning this script under sudo or setting
/proc/sys/kernel/perf_event_paranoid to "-1".\nOutput:\n%s""" %
self._GetStdOut())
elif exit_code not in (0, -2):
raise Exception(
'perf failed with exit code %d. Output:\n%s' % (exit_code,
self._GetStdOut()))
finally:
self._tmp_output_file.close()
cmd = '%s report -n -i %s' % (_NicePath(self._perfhost_binary),
self._output_file)
if self._is_android:
device = self._browser_backend.adb.device()
device.old_interface.Adb().Pull(self._device_output_file,
self._output_file)
required_libs = \
android_profiling_helper.GetRequiredLibrariesForPerfProfile(
self._output_file)
symfs_root = os.path.dirname(self._output_file)
kallsyms = android_profiling_helper.CreateSymFs(device,
symfs_root,
required_libs,
use_symlinks=True)
cmd += ' --symfs %s --kallsyms %s' % (symfs_root, kallsyms)
for lib in required_libs:
lib = os.path.join(symfs_root, lib[1:])
if not os.path.exists(lib):
continue
objdump_path = android_profiling_helper.GetToolchainBinaryPath(
lib, 'objdump')
if objdump_path:
cmd += ' --objdump %s' % _NicePath(objdump_path)
break
print 'To view the profile, run:'
print ' ', cmd
return self._output_file
def _GetStdOut(self):
self._tmp_output_file.flush()
try:
with open(self._tmp_output_file.name) as f:
return f.read()
except IOError:
return ''
class PerfProfiler(profiler.Profiler):
def __init__(self, browser_backend, platform_backend, output_path, state):
super(PerfProfiler, self).__init__(
browser_backend, platform_backend, output_path, state)
process_output_file_map = self._GetProcessOutputFileMap()
self._process_profilers = []
self._perf_control = None
perf_binary = perfhost_binary = _InstallPerfHost()
try:
if platform_backend.GetOSName() == 'android':
device = browser_backend.adb.device()
perf_binary = android_profiling_helper.PrepareDeviceForPerf(device)
self._perf_control = perf_control.PerfControl(device)
self._perf_control.SetPerfProfilingMode()
else:
_PrepareHostForPerf()
for pid, output_file in process_output_file_map.iteritems():
if 'zygote' in output_file:
continue
self._process_profilers.append(
_SingleProcessPerfProfiler(
pid, output_file, browser_backend, platform_backend,
perf_binary, perfhost_binary))
except:
if self._perf_control:
self._perf_control.SetDefaultPerfMode()
raise
@classmethod
def name(cls):
return 'perf'
@classmethod
def is_supported(cls, browser_type):
if sys.platform != 'linux2':
return False
if browser_type.startswith('cros'):
return False
return True
@classmethod
def CustomizeBrowserOptions(cls, browser_type, options):
options.AppendExtraBrowserArgs([
'--no-sandbox',
'--allow-sandbox-debugging',
])
def CollectProfile(self):
if self._perf_control:
self._perf_control.SetDefaultPerfMode()
output_files = []
for single_process in self._process_profilers:
output_files.append(single_process.CollectProfile())
return output_files
@classmethod
def GetTopSamples(cls, file_name, number):
"""Parses the perf generated profile in |file_name| and returns a
{function: period} dict of the |number| hottests functions.
"""
assert os.path.exists(file_name)
with open(os.devnull, 'w') as devnull:
_InstallPerfHost()
report = subprocess.Popen(
['perfhost', 'report', '--show-total-period', '-U', '-t', '^', '-i',
file_name],
stdout=subprocess.PIPE, stderr=devnull).communicate()[0]
period_by_function = {}
for line in report.split('\n'):
if not line or line.startswith('#'):
continue
fields = line.split('^')
if len(fields) != 5:
continue
period = int(fields[1])
function = fields[4].partition(' ')[2]
function = re.sub('<.*>', '', function) # Strip template params.
function = re.sub('[(].*[)]', '', function) # Strip function params.
period_by_function[function] = period
if len(period_by_function) == number:
break
return period_by_function
| bsd-3-clause |
yyjiang/scikit-learn | sklearn/covariance/tests/test_covariance.py | 142 | 11068 | # Author: Alexandre Gramfort <[email protected]>
# Gael Varoquaux <[email protected]>
# Virgile Fritsch <[email protected]>
#
# License: BSD 3 clause
import numpy as np
from sklearn.utils.testing import assert_almost_equal
from sklearn.utils.testing import assert_array_almost_equal
from sklearn.utils.testing import assert_array_equal
from sklearn.utils.testing import assert_raises
from sklearn.utils.testing import assert_warns
from sklearn import datasets
from sklearn.covariance import empirical_covariance, EmpiricalCovariance, \
ShrunkCovariance, shrunk_covariance, \
LedoitWolf, ledoit_wolf, ledoit_wolf_shrinkage, OAS, oas
X = datasets.load_diabetes().data
X_1d = X[:, 0]
n_samples, n_features = X.shape
def test_covariance():
# Tests Covariance module on a simple dataset.
# test covariance fit from data
cov = EmpiricalCovariance()
cov.fit(X)
emp_cov = empirical_covariance(X)
assert_array_almost_equal(emp_cov, cov.covariance_, 4)
assert_almost_equal(cov.error_norm(emp_cov), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='spectral'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, norm='frobenius'), 0)
assert_almost_equal(
cov.error_norm(emp_cov, scaling=False), 0)
assert_almost_equal(
cov.error_norm(emp_cov, squared=False), 0)
assert_raises(NotImplementedError,
cov.error_norm, emp_cov, norm='foo')
# Mahalanobis distances computation test
mahal_dist = cov.mahalanobis(X)
print(np.amin(mahal_dist), np.amax(mahal_dist))
assert(np.amin(mahal_dist) > 0)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = EmpiricalCovariance()
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
assert_almost_equal(cov.error_norm(empirical_covariance(X_1d)), 0)
assert_almost_equal(
cov.error_norm(empirical_covariance(X_1d), norm='spectral'), 0)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
cov = EmpiricalCovariance()
assert_warns(UserWarning, cov.fit, X_1sample)
assert_array_almost_equal(cov.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test integer type
X_integer = np.asarray([[0, 1], [1, 0]])
result = np.asarray([[0.25, -0.25], [-0.25, 0.25]])
assert_array_almost_equal(empirical_covariance(X_integer), result)
# test centered case
cov = EmpiricalCovariance(assume_centered=True)
cov.fit(X)
assert_array_equal(cov.location_, np.zeros(X.shape[1]))
def test_shrunk_covariance():
# Tests ShrunkCovariance module on a simple dataset.
# compare shrunk covariance obtained from data and from MLE estimate
cov = ShrunkCovariance(shrinkage=0.5)
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X), shrinkage=0.5),
cov.covariance_, 4)
# same test with shrinkage not provided
cov = ShrunkCovariance()
cov.fit(X)
assert_array_almost_equal(
shrunk_covariance(empirical_covariance(X)), cov.covariance_, 4)
# same test with shrinkage = 0 (<==> empirical_covariance)
cov = ShrunkCovariance(shrinkage=0.)
cov.fit(X)
assert_array_almost_equal(empirical_covariance(X), cov.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
cov = ShrunkCovariance(shrinkage=0.3)
cov.fit(X_1d)
assert_array_almost_equal(empirical_covariance(X_1d), cov.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
cov = ShrunkCovariance(shrinkage=0.5, store_precision=False)
cov.fit(X)
assert(cov.precision_ is None)
def test_ledoit_wolf():
# Tests LedoitWolf module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
lw = LedoitWolf(assume_centered=True)
lw.fit(X_centered)
shrinkage_ = lw.shrinkage_
score_ = lw.score(X_centered)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered,
assume_centered=True),
shrinkage_)
assert_almost_equal(ledoit_wolf_shrinkage(X_centered, assume_centered=True,
block_size=6),
shrinkage_)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_centered,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf(assume_centered=True)
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d,
assume_centered=True)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, lw.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False, assume_centered=True)
lw.fit(X_centered)
assert_almost_equal(lw.score(X_centered), score_, 4)
assert(lw.precision_ is None)
# Same tests without assuming centered data
# test shrinkage coeff on a simple data set
lw = LedoitWolf()
lw.fit(X)
assert_almost_equal(lw.shrinkage_, shrinkage_, 4)
assert_almost_equal(lw.shrinkage_, ledoit_wolf_shrinkage(X))
assert_almost_equal(lw.shrinkage_, ledoit_wolf(X)[1])
assert_almost_equal(lw.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
# compare estimates given by LW and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=lw.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, lw.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
lw = LedoitWolf()
lw.fit(X_1d)
lw_cov_from_mle, lw_shinkrage_from_mle = ledoit_wolf(X_1d)
assert_array_almost_equal(lw_cov_from_mle, lw.covariance_, 4)
assert_almost_equal(lw_shinkrage_from_mle, lw.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), lw.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
lw = LedoitWolf()
assert_warns(UserWarning, lw.fit, X_1sample)
assert_array_almost_equal(lw.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
lw = LedoitWolf(store_precision=False)
lw.fit(X)
assert_almost_equal(lw.score(X), score_, 4)
assert(lw.precision_ is None)
def test_ledoit_wolf_large():
# test that ledoit_wolf doesn't error on data that is wider than block_size
rng = np.random.RandomState(0)
# use a number of features that is larger than the block-size
X = rng.normal(size=(10, 20))
lw = LedoitWolf(block_size=10).fit(X)
# check that covariance is about diagonal (random normal noise)
assert_almost_equal(lw.covariance_, np.eye(20), 0)
cov = lw.covariance_
# check that the result is consistent with not splitting data into blocks.
lw = LedoitWolf(block_size=25).fit(X)
assert_almost_equal(lw.covariance_, cov)
def test_oas():
# Tests OAS module on a simple dataset.
# test shrinkage coeff on a simple data set
X_centered = X - X.mean(axis=0)
oa = OAS(assume_centered=True)
oa.fit(X_centered)
shrinkage_ = oa.shrinkage_
score_ = oa.score(X_centered)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_centered,
assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_, assume_centered=True)
scov.fit(X_centered)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS(assume_centered=True)
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d, assume_centered=True)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal((X_1d ** 2).sum() / n_samples, oa.covariance_, 4)
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False, assume_centered=True)
oa.fit(X_centered)
assert_almost_equal(oa.score(X_centered), score_, 4)
assert(oa.precision_ is None)
# Same tests without assuming centered data--------------------------------
# test shrinkage coeff on a simple data set
oa = OAS()
oa.fit(X)
assert_almost_equal(oa.shrinkage_, shrinkage_, 4)
assert_almost_equal(oa.score(X), score_, 4)
# compare shrunk covariance obtained from data and from MLE estimate
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
# compare estimates given by OAS and ShrunkCovariance
scov = ShrunkCovariance(shrinkage=oa.shrinkage_)
scov.fit(X)
assert_array_almost_equal(scov.covariance_, oa.covariance_, 4)
# test with n_features = 1
X_1d = X[:, 0].reshape((-1, 1))
oa = OAS()
oa.fit(X_1d)
oa_cov_from_mle, oa_shinkrage_from_mle = oas(X_1d)
assert_array_almost_equal(oa_cov_from_mle, oa.covariance_, 4)
assert_almost_equal(oa_shinkrage_from_mle, oa.shrinkage_)
assert_array_almost_equal(empirical_covariance(X_1d), oa.covariance_, 4)
# test with one sample
# FIXME I don't know what this test does
X_1sample = np.arange(5)
oa = OAS()
assert_warns(UserWarning, oa.fit, X_1sample)
assert_array_almost_equal(oa.covariance_,
np.zeros(shape=(5, 5), dtype=np.float64))
# test shrinkage coeff on a simple data set (without saving precision)
oa = OAS(store_precision=False)
oa.fit(X)
assert_almost_equal(oa.score(X), score_, 4)
assert(oa.precision_ is None)
| bsd-3-clause |
rickerc/nova_audit | nova/api/openstack/compute/views/limits.py | 14 | 3505 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2010-2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import datetime
from nova.openstack.common import timeutils
class ViewBuilder(object):
"""OpenStack API base limits view builder."""
def build(self, rate_limits, absolute_limits):
rate_limits = self._build_rate_limits(rate_limits)
absolute_limits = self._build_absolute_limits(absolute_limits)
output = {
"limits": {
"rate": rate_limits,
"absolute": absolute_limits,
},
}
return output
def _build_absolute_limits(self, absolute_limits):
"""Builder for absolute limits
absolute_limits should be given as a dict of limits.
For example: {"ram": 512, "gigabytes": 1024}.
"""
limit_names = {
"ram": ["maxTotalRAMSize"],
"instances": ["maxTotalInstances"],
"cores": ["maxTotalCores"],
"key_pairs": ["maxTotalKeypairs"],
"floating_ips": ["maxTotalFloatingIps"],
"metadata_items": ["maxServerMeta", "maxImageMeta"],
"injected_files": ["maxPersonality"],
"injected_file_content_bytes": ["maxPersonalitySize"],
"security_groups": ["maxSecurityGroups"],
"security_group_rules": ["maxSecurityGroupRules"],
}
limits = {}
for name, value in absolute_limits.iteritems():
if name in limit_names and value is not None:
for name in limit_names[name]:
limits[name] = value
return limits
def _build_rate_limits(self, rate_limits):
limits = []
for rate_limit in rate_limits:
_rate_limit_key = None
_rate_limit = self._build_rate_limit(rate_limit)
# check for existing key
for limit in limits:
if (limit["uri"] == rate_limit["URI"] and
limit["regex"] == rate_limit["regex"]):
_rate_limit_key = limit
break
# ensure we have a key if we didn't find one
if not _rate_limit_key:
_rate_limit_key = {
"uri": rate_limit["URI"],
"regex": rate_limit["regex"],
"limit": [],
}
limits.append(_rate_limit_key)
_rate_limit_key["limit"].append(_rate_limit)
return limits
def _build_rate_limit(self, rate_limit):
_get_utc = datetime.datetime.utcfromtimestamp
next_avail = _get_utc(rate_limit["resetTime"])
return {
"verb": rate_limit["verb"],
"value": rate_limit["value"],
"remaining": int(rate_limit["remaining"]),
"unit": rate_limit["unit"],
"next-available": timeutils.isotime(at=next_avail),
}
| apache-2.0 |
tommyip/zulip | zerver/lib/bugdown/api_code_examples.py | 1 | 10016 | import re
import json
import inspect
from markdown.extensions import Extension
from markdown.preprocessors import Preprocessor
from typing import Any, Dict, Optional, List, Tuple
import markdown
import zerver.openapi.python_examples
from zerver.lib.openapi import get_openapi_fixture, openapi_spec, \
get_openapi_parameters
MACRO_REGEXP = re.compile(r'\{generate_code_example(\(\s*(.+?)\s*\))*\|\s*(.+?)\s*\|\s*(.+?)\s*(\(\s*(.+)\s*\))?\}')
CODE_EXAMPLE_REGEX = re.compile(r'\# \{code_example\|\s*(.+?)\s*\}')
PYTHON_CLIENT_CONFIG = """
#!/usr/bin/env python3
import zulip
# Pass the path to your zuliprc file here.
client = zulip.Client(config_file="~/zuliprc")
"""
PYTHON_CLIENT_ADMIN_CONFIG = """
#!/usr/bin/env python
import zulip
# The user for this zuliprc file must be an organization administrator
client = zulip.Client(config_file="~/zuliprc-admin")
"""
DEFAULT_API_URL = "localhost:9991/api"
DEFAULT_AUTH_EMAIL = "BOT_EMAIL_ADDRESS"
DEFAULT_AUTH_API_KEY = "BOT_API_KEY"
DEFAULT_EXAMPLE = {
"integer": 1,
"string": "demo",
"boolean": False,
}
def parse_language_and_options(input_str: Optional[str]) -> Tuple[str, Dict[str, Any]]:
if not input_str:
return ("", {})
language_and_options = re.match(r"(?P<language>\w+)(,\s*(?P<options>[\"\'\w\d\[\],= ]+))?", input_str)
assert(language_and_options is not None)
kwargs_pattern = re.compile(r"(?P<key>\w+)\s*=\s*(?P<value>[\'\"\w\d]+|\[[\'\",\w\d ]+\])")
language = language_and_options.group("language")
assert(language is not None)
if language_and_options.group("options"):
_options = kwargs_pattern.finditer(language_and_options.group("options"))
options = {}
for m in _options:
options[m.group("key")] = json.loads(m.group("value").replace("'", '"'))
return (language, options)
return (language, {})
def extract_python_code_example(source: List[str], snippet: List[str]) -> List[str]:
start = -1
end = -1
for line in source:
match = CODE_EXAMPLE_REGEX.search(line)
if match:
if match.group(1) == 'start':
start = source.index(line)
elif match.group(1) == 'end':
end = source.index(line)
break
if (start == -1 and end == -1):
return snippet
snippet.extend(source[start + 1: end])
snippet.append(' print(result)')
snippet.append('\n')
source = source[end + 1:]
return extract_python_code_example(source, snippet)
def render_python_code_example(function: str, admin_config: Optional[bool]=False) -> List[str]:
method = zerver.openapi.python_examples.TEST_FUNCTIONS[function]
function_source_lines = inspect.getsourcelines(method)[0]
if admin_config:
config = PYTHON_CLIENT_ADMIN_CONFIG.splitlines()
else:
config = PYTHON_CLIENT_CONFIG.splitlines()
snippet = extract_python_code_example(function_source_lines, [])
code_example = []
code_example.append('```python')
code_example.extend(config)
for line in snippet:
# Remove one level of indentation and strip newlines
code_example.append(line[4:].rstrip())
code_example.append('```')
return code_example
def curl_method_arguments(endpoint: str, method: str,
api_url: str) -> List[str]:
method = method.upper()
url = "{}/v1{}".format(api_url, endpoint)
valid_methods = ["GET", "POST", "DELETE", "PUT", "PATCH", "OPTIONS"]
if method == valid_methods[0]:
# Then we need to make sure that each -d option translates to becoming
# a GET parameter (in the URL) and not a POST parameter (in the body).
# TODO: remove the -X part by updating the linting rule. It's redundant.
return ["-X", "GET", "-G", url]
elif method in valid_methods:
return ["-X", method, url]
else:
msg = "The request method {} is not one of {}".format(method,
valid_methods)
raise ValueError(msg)
def generate_curl_example(endpoint: str, method: str,
auth_email: str=DEFAULT_AUTH_EMAIL,
auth_api_key: str=DEFAULT_AUTH_API_KEY,
api_url: str=DEFAULT_API_URL,
exclude: List[str]=[]) -> List[str]:
lines = ["```curl"]
openapi_entry = openapi_spec.spec()['paths'][endpoint][method.lower()]
curl_first_line_parts = ["curl"] + curl_method_arguments(endpoint, method,
api_url)
lines.append(" ".join(curl_first_line_parts))
authentication_required = openapi_entry.get("security", False)
if authentication_required:
lines.append(" -u %s:%s" % (auth_email, auth_api_key))
openapi_example_params = get_openapi_parameters(endpoint, method)
for packet in openapi_example_params:
param_name = packet["name"]
if param_name in exclude:
continue
param_type = packet["schema"]["type"]
if param_type in ["object", "array"]:
example_value = packet.get("example", None)
if not example_value:
msg = """All array and object type request parameters must have
concrete examples. The openAPI documentation for {}/{} is missing an example
value for the {} parameter. Without this we cannot automatically generate a
cURL example.""".format(endpoint, method, param_name)
raise ValueError(msg)
ordered_ex_val_str = json.dumps(example_value, sort_keys=True)
line = " --data-urlencode {}='{}'".format(param_name, ordered_ex_val_str)
else:
example_value = packet.get("example", DEFAULT_EXAMPLE[param_type])
if type(example_value) == bool:
example_value = str(example_value).lower()
line = " -d '{}={}'".format(param_name, example_value)
lines.append(line)
for i in range(1, len(lines)-1):
lines[i] = lines[i] + " \\"
lines.append("```")
return lines
def render_curl_example(function: str, exclude: List[str]=[]) -> List[str]:
""" A simple wrapper around generate_curl_example. """
parts = function.split(":")
endpoint = parts[0]
method = parts[1]
kwargs = dict() # type: Dict[str, Any]
if len(parts) > 2:
kwargs["auth_email"] = parts[2]
if len(parts) > 3:
kwargs["auth_api_key"] = parts[3]
if len(parts) > 4:
kwargs["api_url"] = parts[4]
kwargs["exclude"] = exclude
return generate_curl_example(endpoint, method, **kwargs)
SUPPORTED_LANGUAGES = {
'python': {
'client_config': PYTHON_CLIENT_CONFIG,
'admin_config': PYTHON_CLIENT_ADMIN_CONFIG,
'render': render_python_code_example,
},
'curl': {
'render': render_curl_example
}
} # type: Dict[str, Any]
class APICodeExamplesGenerator(Extension):
def extendMarkdown(self, md: markdown.Markdown, md_globals: Dict[str, Any]) -> None:
md.preprocessors.add(
'generate_code_example', APICodeExamplesPreprocessor(md, self.getConfigs()), '_begin'
)
class APICodeExamplesPreprocessor(Preprocessor):
def __init__(self, md: markdown.Markdown, config: Dict[str, Any]) -> None:
super(APICodeExamplesPreprocessor, self).__init__(md)
def run(self, lines: List[str]) -> List[str]:
done = False
while not done:
for line in lines:
loc = lines.index(line)
match = MACRO_REGEXP.search(line)
if match:
language, options = parse_language_and_options(match.group(2))
function = match.group(3)
key = match.group(4)
argument = match.group(6)
if key == 'fixture':
if argument:
text = self.render_fixture(function, name=argument)
else:
text = self.render_fixture(function)
elif key == 'example':
if argument == 'admin_config=True':
text = SUPPORTED_LANGUAGES[language]['render'](function, admin_config=True)
else:
text = SUPPORTED_LANGUAGES[language]['render'](function, **options)
# The line that contains the directive to include the macro
# may be preceded or followed by text or tags, in that case
# we need to make sure that any preceding or following text
# stays the same.
line_split = MACRO_REGEXP.split(line, maxsplit=0)
preceding = line_split[0]
following = line_split[-1]
text = [preceding] + text + [following]
lines = lines[:loc] + text + lines[loc+1:]
break
else:
done = True
return lines
def render_fixture(self, function: str, name: Optional[str]=None) -> List[str]:
fixture = []
# We assume that if the function we're rendering starts with a slash
# it's a path in the endpoint and therefore it uses the new OpenAPI
# format.
if function.startswith('/'):
path, method = function.rsplit(':', 1)
fixture_dict = get_openapi_fixture(path, method, name)
else:
fixture_dict = zerver.openapi.python_examples.FIXTURES[function]
fixture_json = json.dumps(fixture_dict, indent=4, sort_keys=True,
separators=(',', ': '))
fixture.append('```')
fixture.extend(fixture_json.splitlines())
fixture.append('```')
return fixture
def makeExtension(*args: Any, **kwargs: str) -> APICodeExamplesGenerator:
return APICodeExamplesGenerator(**kwargs)
| apache-2.0 |
binhex/moviegrabber | lib/site-packages/cherrypy/_cpchecker.py | 87 | 14739 | import os
import warnings
import cherrypy
from cherrypy._cpcompat import iteritems, copykeys, builtins
class Checker(object):
"""A checker for CherryPy sites and their mounted applications.
When this object is called at engine startup, it executes each
of its own methods whose names start with ``check_``. If you wish
to disable selected checks, simply add a line in your global
config which sets the appropriate method to False::
[global]
checker.check_skipped_app_config = False
You may also dynamically add or replace ``check_*`` methods in this way.
"""
on = True
"""If True (the default), run all checks; if False, turn off all checks."""
def __init__(self):
self._populate_known_types()
def __call__(self):
"""Run all check_* methods."""
if self.on:
oldformatwarning = warnings.formatwarning
warnings.formatwarning = self.formatwarning
try:
for name in dir(self):
if name.startswith("check_"):
method = getattr(self, name)
if method and hasattr(method, '__call__'):
method()
finally:
warnings.formatwarning = oldformatwarning
def formatwarning(self, message, category, filename, lineno, line=None):
"""Function to format a warning."""
return "CherryPy Checker:\n%s\n\n" % message
# This value should be set inside _cpconfig.
global_config_contained_paths = False
def check_app_config_entries_dont_start_with_script_name(self):
"""Check for Application config with sections that repeat script_name."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
if sn == '':
continue
sn_atoms = sn.strip("/").split("/")
for key in app.config.keys():
key_atoms = key.strip("/").split("/")
if key_atoms[:len(sn_atoms)] == sn_atoms:
warnings.warn(
"The application mounted at %r has config " \
"entries that start with its script name: %r" % (sn, key))
def check_site_config_entries_in_app_config(self):
"""Check for mounted Applications that have site-scoped config."""
for sn, app in iteritems(cherrypy.tree.apps):
if not isinstance(app, cherrypy.Application):
continue
msg = []
for section, entries in iteritems(app.config):
if section.startswith('/'):
for key, value in iteritems(entries):
for n in ("engine.", "server.", "tree.", "checker."):
if key.startswith(n):
msg.append("[%s] %s = %s" % (section, key, value))
if msg:
msg.insert(0,
"The application mounted at %r contains the following "
"config entries, which are only allowed in site-wide "
"config. Move them to a [global] section and pass them "
"to cherrypy.config.update() instead of tree.mount()." % sn)
warnings.warn(os.linesep.join(msg))
def check_skipped_app_config(self):
"""Check for mounted Applications that have no config."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
msg = "The Application mounted at %r has an empty config." % sn
if self.global_config_contained_paths:
msg += (" It looks like the config you passed to "
"cherrypy.config.update() contains application-"
"specific sections. You must explicitly pass "
"application config via "
"cherrypy.tree.mount(..., config=app_config)")
warnings.warn(msg)
return
def check_app_config_brackets(self):
"""Check for Application config with extraneous brackets in section names."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
if not app.config:
continue
for key in app.config.keys():
if key.startswith("[") or key.endswith("]"):
warnings.warn(
"The application mounted at %r has config " \
"section names with extraneous brackets: %r. "
"Config *files* need brackets; config *dicts* "
"(e.g. passed to tree.mount) do not." % (sn, key))
def check_static_paths(self):
"""Check Application config for incorrect static paths."""
# Use the dummy Request object in the main thread.
request = cherrypy.request
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
request.app = app
for section in app.config:
# get_resource will populate request.config
request.get_resource(section + "/dummy.html")
conf = request.config.get
if conf("tools.staticdir.on", False):
msg = ""
root = conf("tools.staticdir.root")
dir = conf("tools.staticdir.dir")
if dir is None:
msg = "tools.staticdir.dir is not set."
else:
fulldir = ""
if os.path.isabs(dir):
fulldir = dir
if root:
msg = ("dir is an absolute path, even "
"though a root is provided.")
testdir = os.path.join(root, dir[1:])
if os.path.exists(testdir):
msg += ("\nIf you meant to serve the "
"filesystem folder at %r, remove "
"the leading slash from dir." % testdir)
else:
if not root:
msg = "dir is a relative path and no root provided."
else:
fulldir = os.path.join(root, dir)
if not os.path.isabs(fulldir):
msg = "%r is not an absolute path." % fulldir
if fulldir and not os.path.exists(fulldir):
if msg:
msg += "\n"
msg += ("%r (root + dir) is not an existing "
"filesystem path." % fulldir)
if msg:
warnings.warn("%s\nsection: [%s]\nroot: %r\ndir: %r"
% (msg, section, root, dir))
# -------------------------- Compatibility -------------------------- #
obsolete = {
'server.default_content_type': 'tools.response_headers.headers',
'log_access_file': 'log.access_file',
'log_config_options': None,
'log_file': 'log.error_file',
'log_file_not_found': None,
'log_request_headers': 'tools.log_headers.on',
'log_to_screen': 'log.screen',
'show_tracebacks': 'request.show_tracebacks',
'throw_errors': 'request.throw_errors',
'profiler.on': ('cherrypy.tree.mount(profiler.make_app('
'cherrypy.Application(Root())))'),
}
deprecated = {}
def _compat(self, config):
"""Process config and warn on each obsolete or deprecated entry."""
for section, conf in config.items():
if isinstance(conf, dict):
for k, v in conf.items():
if k in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead.\n"
"section: [%s]" %
(k, self.obsolete[k], section))
elif k in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead.\n"
"section: [%s]" %
(k, self.deprecated[k], section))
else:
if section in self.obsolete:
warnings.warn("%r is obsolete. Use %r instead."
% (section, self.obsolete[section]))
elif section in self.deprecated:
warnings.warn("%r is deprecated. Use %r instead."
% (section, self.deprecated[section]))
def check_compatibility(self):
"""Process config and warn on each obsolete or deprecated entry."""
self._compat(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._compat(app.config)
# ------------------------ Known Namespaces ------------------------ #
extra_config_namespaces = []
def _known_ns(self, app):
ns = ["wsgi"]
ns.extend(copykeys(app.toolboxes))
ns.extend(copykeys(app.namespaces))
ns.extend(copykeys(app.request_class.namespaces))
ns.extend(copykeys(cherrypy.config.namespaces))
ns += self.extra_config_namespaces
for section, conf in app.config.items():
is_path_section = section.startswith("/")
if is_path_section and isinstance(conf, dict):
for k, v in conf.items():
atoms = k.split(".")
if len(atoms) > 1:
if atoms[0] not in ns:
# Spit out a special warning if a known
# namespace is preceded by "cherrypy."
if (atoms[0] == "cherrypy" and atoms[1] in ns):
msg = ("The config entry %r is invalid; "
"try %r instead.\nsection: [%s]"
% (k, ".".join(atoms[1:]), section))
else:
msg = ("The config entry %r is invalid, because "
"the %r config namespace is unknown.\n"
"section: [%s]" % (k, atoms[0], section))
warnings.warn(msg)
elif atoms[0] == "tools":
if atoms[1] not in dir(cherrypy.tools):
msg = ("The config entry %r may be invalid, "
"because the %r tool was not found.\n"
"section: [%s]" % (k, atoms[1], section))
warnings.warn(msg)
def check_config_namespaces(self):
"""Process config and warn on each unknown config namespace."""
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_ns(app)
# -------------------------- Config Types -------------------------- #
known_config_types = {}
def _populate_known_types(self):
b = [x for x in vars(builtins).values()
if type(x) is type(str)]
def traverse(obj, namespace):
for name in dir(obj):
# Hack for 3.2's warning about body_params
if name == 'body_params':
continue
vtype = type(getattr(obj, name, None))
if vtype in b:
self.known_config_types[namespace + "." + name] = vtype
traverse(cherrypy.request, "request")
traverse(cherrypy.response, "response")
traverse(cherrypy.server, "server")
traverse(cherrypy.engine, "engine")
traverse(cherrypy.log, "log")
def _known_types(self, config):
msg = ("The config entry %r in section %r is of type %r, "
"which does not match the expected type %r.")
for section, conf in config.items():
if isinstance(conf, dict):
for k, v in conf.items():
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
else:
k, v = section, conf
if v is not None:
expected_type = self.known_config_types.get(k, None)
vtype = type(v)
if expected_type and vtype != expected_type:
warnings.warn(msg % (k, section, vtype.__name__,
expected_type.__name__))
def check_config_types(self):
"""Assert that config values are of the same type as default values."""
self._known_types(cherrypy.config)
for sn, app in cherrypy.tree.apps.items():
if not isinstance(app, cherrypy.Application):
continue
self._known_types(app.config)
# -------------------- Specific config warnings -------------------- #
def check_localhost(self):
"""Warn if any socket_host is 'localhost'. See #711."""
for k, v in cherrypy.config.items():
if k == 'server.socket_host' and v == 'localhost':
warnings.warn("The use of 'localhost' as a socket host can "
"cause problems on newer systems, since 'localhost' can "
"map to either an IPv4 or an IPv6 address. You should "
"use '127.0.0.1' or '[::1]' instead.")
| gpl-3.0 |
nitishaggarwal/wed | wed/users/views.py | 55 | 1681 | # -*- coding: utf-8 -*-
# Import the reverse lookup function
from django.core.urlresolvers import reverse
# view imports
from django.views.generic import DetailView
from django.views.generic import RedirectView
from django.views.generic import UpdateView
from django.views.generic import ListView
# Only authenticated users can access views using this.
from braces.views import LoginRequiredMixin
# Import the form from users/forms.py
from .forms import UserForm
# Import the customized User model
from .models import User
class UserDetailView(LoginRequiredMixin, DetailView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username"
class UserRedirectView(LoginRequiredMixin, RedirectView):
permanent = False
def get_redirect_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
class UserUpdateView(LoginRequiredMixin, UpdateView):
form_class = UserForm
# we already imported User in the view code above, remember?
model = User
# send the user back to their own page after a successful update
def get_success_url(self):
return reverse("users:detail",
kwargs={"username": self.request.user.username})
def get_object(self):
# Only get the User record for the user making the request
return User.objects.get(username=self.request.user.username)
class UserListView(LoginRequiredMixin, ListView):
model = User
# These next two lines tell the view to index lookups by username
slug_field = "username"
slug_url_kwarg = "username" | bsd-3-clause |
mcking49/apache-flask | Python/Lib/htmllib.py | 312 | 12869 | """HTML 2.0 parser.
See the HTML 2.0 specification:
http://www.w3.org/hypertext/WWW/MarkUp/html-spec/html-spec_toc.html
"""
from warnings import warnpy3k
warnpy3k("the htmllib module has been removed in Python 3.0",
stacklevel=2)
del warnpy3k
import sgmllib
from formatter import AS_IS
__all__ = ["HTMLParser", "HTMLParseError"]
class HTMLParseError(sgmllib.SGMLParseError):
"""Error raised when an HTML document can't be parsed."""
class HTMLParser(sgmllib.SGMLParser):
"""This is the basic HTML parser class.
It supports all entity names required by the XHTML 1.0 Recommendation.
It also defines handlers for all HTML 2.0 and many HTML 3.0 and 3.2
elements.
"""
from htmlentitydefs import entitydefs
def __init__(self, formatter, verbose=0):
"""Creates an instance of the HTMLParser class.
The formatter parameter is the formatter instance associated with
the parser.
"""
sgmllib.SGMLParser.__init__(self, verbose)
self.formatter = formatter
def error(self, message):
raise HTMLParseError(message)
def reset(self):
sgmllib.SGMLParser.reset(self)
self.savedata = None
self.isindex = 0
self.title = None
self.base = None
self.anchor = None
self.anchorlist = []
self.nofill = 0
self.list_stack = []
# ------ Methods used internally; some may be overridden
# --- Formatter interface, taking care of 'savedata' mode;
# shouldn't need to be overridden
def handle_data(self, data):
if self.savedata is not None:
self.savedata = self.savedata + data
else:
if self.nofill:
self.formatter.add_literal_data(data)
else:
self.formatter.add_flowing_data(data)
# --- Hooks to save data; shouldn't need to be overridden
def save_bgn(self):
"""Begins saving character data in a buffer instead of sending it
to the formatter object.
Retrieve the stored data via the save_end() method. Use of the
save_bgn() / save_end() pair may not be nested.
"""
self.savedata = ''
def save_end(self):
"""Ends buffering character data and returns all data saved since
the preceding call to the save_bgn() method.
If the nofill flag is false, whitespace is collapsed to single
spaces. A call to this method without a preceding call to the
save_bgn() method will raise a TypeError exception.
"""
data = self.savedata
self.savedata = None
if not self.nofill:
data = ' '.join(data.split())
return data
# --- Hooks for anchors; should probably be overridden
def anchor_bgn(self, href, name, type):
"""This method is called at the start of an anchor region.
The arguments correspond to the attributes of the <A> tag with
the same names. The default implementation maintains a list of
hyperlinks (defined by the HREF attribute for <A> tags) within
the document. The list of hyperlinks is available as the data
attribute anchorlist.
"""
self.anchor = href
if self.anchor:
self.anchorlist.append(href)
def anchor_end(self):
"""This method is called at the end of an anchor region.
The default implementation adds a textual footnote marker using an
index into the list of hyperlinks created by the anchor_bgn()method.
"""
if self.anchor:
self.handle_data("[%d]" % len(self.anchorlist))
self.anchor = None
# --- Hook for images; should probably be overridden
def handle_image(self, src, alt, *args):
"""This method is called to handle images.
The default implementation simply passes the alt value to the
handle_data() method.
"""
self.handle_data(alt)
# --------- Top level elememts
def start_html(self, attrs): pass
def end_html(self): pass
def start_head(self, attrs): pass
def end_head(self): pass
def start_body(self, attrs): pass
def end_body(self): pass
# ------ Head elements
def start_title(self, attrs):
self.save_bgn()
def end_title(self):
self.title = self.save_end()
def do_base(self, attrs):
for a, v in attrs:
if a == 'href':
self.base = v
def do_isindex(self, attrs):
self.isindex = 1
def do_link(self, attrs):
pass
def do_meta(self, attrs):
pass
def do_nextid(self, attrs): # Deprecated
pass
# ------ Body elements
# --- Headings
def start_h1(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h1', 0, 1, 0))
def end_h1(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h2(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h2', 0, 1, 0))
def end_h2(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h3(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h3', 0, 1, 0))
def end_h3(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h4(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h4', 0, 1, 0))
def end_h4(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h5(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h5', 0, 1, 0))
def end_h5(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
def start_h6(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font(('h6', 0, 1, 0))
def end_h6(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
# --- Block Structuring Elements
def do_p(self, attrs):
self.formatter.end_paragraph(1)
def start_pre(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
self.nofill = self.nofill + 1
def end_pre(self):
self.formatter.end_paragraph(1)
self.formatter.pop_font()
self.nofill = max(0, self.nofill - 1)
def start_xmp(self, attrs):
self.start_pre(attrs)
self.setliteral('xmp') # Tell SGML parser
def end_xmp(self):
self.end_pre()
def start_listing(self, attrs):
self.start_pre(attrs)
self.setliteral('listing') # Tell SGML parser
def end_listing(self):
self.end_pre()
def start_address(self, attrs):
self.formatter.end_paragraph(0)
self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
def end_address(self):
self.formatter.end_paragraph(0)
self.formatter.pop_font()
def start_blockquote(self, attrs):
self.formatter.end_paragraph(1)
self.formatter.push_margin('blockquote')
def end_blockquote(self):
self.formatter.end_paragraph(1)
self.formatter.pop_margin()
# --- List Elements
def start_ul(self, attrs):
self.formatter.end_paragraph(not self.list_stack)
self.formatter.push_margin('ul')
self.list_stack.append(['ul', '*', 0])
def end_ul(self):
if self.list_stack: del self.list_stack[-1]
self.formatter.end_paragraph(not self.list_stack)
self.formatter.pop_margin()
def do_li(self, attrs):
self.formatter.end_paragraph(0)
if self.list_stack:
[dummy, label, counter] = top = self.list_stack[-1]
top[2] = counter = counter+1
else:
label, counter = '*', 0
self.formatter.add_label_data(label, counter)
def start_ol(self, attrs):
self.formatter.end_paragraph(not self.list_stack)
self.formatter.push_margin('ol')
label = '1.'
for a, v in attrs:
if a == 'type':
if len(v) == 1: v = v + '.'
label = v
self.list_stack.append(['ol', label, 0])
def end_ol(self):
if self.list_stack: del self.list_stack[-1]
self.formatter.end_paragraph(not self.list_stack)
self.formatter.pop_margin()
def start_menu(self, attrs):
self.start_ul(attrs)
def end_menu(self):
self.end_ul()
def start_dir(self, attrs):
self.start_ul(attrs)
def end_dir(self):
self.end_ul()
def start_dl(self, attrs):
self.formatter.end_paragraph(1)
self.list_stack.append(['dl', '', 0])
def end_dl(self):
self.ddpop(1)
if self.list_stack: del self.list_stack[-1]
def do_dt(self, attrs):
self.ddpop()
def do_dd(self, attrs):
self.ddpop()
self.formatter.push_margin('dd')
self.list_stack.append(['dd', '', 0])
def ddpop(self, bl=0):
self.formatter.end_paragraph(bl)
if self.list_stack:
if self.list_stack[-1][0] == 'dd':
del self.list_stack[-1]
self.formatter.pop_margin()
# --- Phrase Markup
# Idiomatic Elements
def start_cite(self, attrs): self.start_i(attrs)
def end_cite(self): self.end_i()
def start_code(self, attrs): self.start_tt(attrs)
def end_code(self): self.end_tt()
def start_em(self, attrs): self.start_i(attrs)
def end_em(self): self.end_i()
def start_kbd(self, attrs): self.start_tt(attrs)
def end_kbd(self): self.end_tt()
def start_samp(self, attrs): self.start_tt(attrs)
def end_samp(self): self.end_tt()
def start_strong(self, attrs): self.start_b(attrs)
def end_strong(self): self.end_b()
def start_var(self, attrs): self.start_i(attrs)
def end_var(self): self.end_i()
# Typographic Elements
def start_i(self, attrs):
self.formatter.push_font((AS_IS, 1, AS_IS, AS_IS))
def end_i(self):
self.formatter.pop_font()
def start_b(self, attrs):
self.formatter.push_font((AS_IS, AS_IS, 1, AS_IS))
def end_b(self):
self.formatter.pop_font()
def start_tt(self, attrs):
self.formatter.push_font((AS_IS, AS_IS, AS_IS, 1))
def end_tt(self):
self.formatter.pop_font()
def start_a(self, attrs):
href = ''
name = ''
type = ''
for attrname, value in attrs:
value = value.strip()
if attrname == 'href':
href = value
if attrname == 'name':
name = value
if attrname == 'type':
type = value.lower()
self.anchor_bgn(href, name, type)
def end_a(self):
self.anchor_end()
# --- Line Break
def do_br(self, attrs):
self.formatter.add_line_break()
# --- Horizontal Rule
def do_hr(self, attrs):
self.formatter.add_hor_rule()
# --- Image
def do_img(self, attrs):
align = ''
alt = '(image)'
ismap = ''
src = ''
width = 0
height = 0
for attrname, value in attrs:
if attrname == 'align':
align = value
if attrname == 'alt':
alt = value
if attrname == 'ismap':
ismap = value
if attrname == 'src':
src = value
if attrname == 'width':
try: width = int(value)
except ValueError: pass
if attrname == 'height':
try: height = int(value)
except ValueError: pass
self.handle_image(src, alt, ismap, align, width, height)
# --- Really Old Unofficial Deprecated Stuff
def do_plaintext(self, attrs):
self.start_pre(attrs)
self.setnomoretags() # Tell SGML parser
# --- Unhandled tags
def unknown_starttag(self, tag, attrs):
pass
def unknown_endtag(self, tag):
pass
def test(args = None):
import sys, formatter
if not args:
args = sys.argv[1:]
silent = args and args[0] == '-s'
if silent:
del args[0]
if args:
file = args[0]
else:
file = 'test.html'
if file == '-':
f = sys.stdin
else:
try:
f = open(file, 'r')
except IOError, msg:
print file, ":", msg
sys.exit(1)
data = f.read()
if f is not sys.stdin:
f.close()
if silent:
f = formatter.NullFormatter()
else:
f = formatter.AbstractFormatter(formatter.DumbWriter())
p = HTMLParser(f)
p.feed(data)
p.close()
if __name__ == '__main__':
test()
| mit |
rbalda/neural_ocr | env/lib/python2.7/site-packages/scipy/optimize/tests/test_nonlin.py | 67 | 15160 | """ Unit tests for nonlinear solvers
Author: Ondrej Certik
May 2007
"""
from __future__ import division, print_function, absolute_import
from numpy.testing import assert_, dec, TestCase, run_module_suite
from scipy._lib.six import xrange
from scipy.optimize import nonlin, root
from numpy import matrix, diag, dot
from numpy.linalg import inv
import numpy as np
from test_minpack import pressure_network
SOLVERS = {'anderson': nonlin.anderson, 'diagbroyden': nonlin.diagbroyden,
'linearmixing': nonlin.linearmixing, 'excitingmixing': nonlin.excitingmixing,
'broyden1': nonlin.broyden1, 'broyden2': nonlin.broyden2,
'krylov': nonlin.newton_krylov}
MUST_WORK = {'anderson': nonlin.anderson, 'broyden1': nonlin.broyden1,
'broyden2': nonlin.broyden2, 'krylov': nonlin.newton_krylov}
#-------------------------------------------------------------------------------
# Test problems
#-------------------------------------------------------------------------------
def F(x):
x = np.asmatrix(x).T
d = matrix(diag([3,2,1.5,1,0.5]))
c = 0.01
f = -d*x - c*float(x.T*x)*x
return f
F.xin = [1,1,1,1,1]
F.KNOWN_BAD = {}
def F2(x):
return x
F2.xin = [1,2,3,4,5,6]
F2.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing}
def F2_lucky(x):
return x
F2_lucky.xin = [0,0,0,0,0,0]
F2_lucky.KNOWN_BAD = {}
def F3(x):
A = np.mat('-2 1 0; 1 -2 1; 0 1 -2')
b = np.mat('1 2 3')
return np.dot(A, x) - b
F3.xin = [1,2,3]
F3.KNOWN_BAD = {}
def F4_powell(x):
A = 1e4
return [A*x[0]*x[1] - 1, np.exp(-x[0]) + np.exp(-x[1]) - (1 + 1/A)]
F4_powell.xin = [-1, -2]
F4_powell.KNOWN_BAD = {'linearmixing': nonlin.linearmixing,
'excitingmixing': nonlin.excitingmixing,
'diagbroyden': nonlin.diagbroyden}
def F5(x):
return pressure_network(x, 4, np.array([.5, .5, .5, .5]))
F5.xin = [2., 0, 2, 0]
F5.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
def F6(x):
x1, x2 = x
J0 = np.array([[-4.256, 14.7],
[0.8394989, 0.59964207]])
v = np.array([(x1 + 3) * (x2**5 - 7) + 3*6,
np.sin(x2 * np.exp(x1) - 1)])
return -np.linalg.solve(J0, v)
F6.xin = [-0.5, 1.4]
F6.KNOWN_BAD = {'excitingmixing': nonlin.excitingmixing,
'linearmixing': nonlin.linearmixing,
'diagbroyden': nonlin.diagbroyden}
#-------------------------------------------------------------------------------
# Tests
#-------------------------------------------------------------------------------
class TestNonlin(object):
"""
Check the Broyden methods for a few test problems.
broyden1, broyden2, and newton_krylov must succeed for
all functions. Some of the others don't -- tests in KNOWN_BAD are skipped.
"""
def _check_nonlin_func(self, f, func, f_tol=1e-2):
x = func(f, f.xin, f_tol=f_tol, maxiter=200, verbose=0)
assert_(np.absolute(f(x)).max() < f_tol)
def _check_root(self, f, method, f_tol=1e-2):
res = root(f, f.xin, method=method,
options={'ftol': f_tol, 'maxiter': 200, 'disp': 0})
assert_(np.absolute(res.fun).max() < f_tol)
@dec.knownfailureif(True)
def _check_func_fail(self, *a, **kw):
pass
def test_problem_nonlin(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for func in SOLVERS.values():
if func in f.KNOWN_BAD.values():
if func in MUST_WORK.values():
yield self._check_func_fail, f, func
continue
yield self._check_nonlin_func, f, func
def test_tol_norm_called(self):
# Check that supplying tol_norm keyword to nonlin_solve works
self._tol_norm_used = False
def local_norm_func(x):
self._tol_norm_used = True
return np.absolute(x).max()
nonlin.newton_krylov(F, F.xin, f_tol=1e-2, maxiter=200, verbose=0,
tol_norm=local_norm_func)
assert_(self._tol_norm_used)
def test_problem_root(self):
for f in [F, F2, F2_lucky, F3, F4_powell, F5, F6]:
for meth in SOLVERS:
if meth in f.KNOWN_BAD:
if meth in MUST_WORK:
yield self._check_func_fail, f, meth
continue
yield self._check_root, f, meth
class TestSecant(TestCase):
"""Check that some Jacobian approximations satisfy the secant condition"""
xs = [np.array([1,2,3,4,5], float),
np.array([2,3,4,5,1], float),
np.array([3,4,5,1,2], float),
np.array([4,5,1,2,3], float),
np.array([9,1,9,1,3], float),
np.array([0,1,9,1,3], float),
np.array([5,5,7,1,1], float),
np.array([1,2,7,5,1], float),]
fs = [x**2 - 1 for x in xs]
def _check_secant(self, jac_cls, npoints=1, **kw):
"""
Check that the given Jacobian approximation satisfies secant
conditions for last `npoints` points.
"""
jac = jac_cls(**kw)
jac.setup(self.xs[0], self.fs[0], None)
for j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
jac.update(x, f)
for k in xrange(min(npoints, j+1)):
dx = self.xs[j-k+1] - self.xs[j-k]
df = self.fs[j-k+1] - self.fs[j-k]
assert_(np.allclose(dx, jac.solve(df)))
# Check that the `npoints` secant bound is strict
if j >= npoints:
dx = self.xs[j-npoints+1] - self.xs[j-npoints]
df = self.fs[j-npoints+1] - self.fs[j-npoints]
assert_(not np.allclose(dx, jac.solve(df)))
def test_broyden1(self):
self._check_secant(nonlin.BroydenFirst)
def test_broyden2(self):
self._check_secant(nonlin.BroydenSecond)
def test_broyden1_update(self):
# Check that BroydenFirst update works as for a dense matrix
jac = nonlin.BroydenFirst(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
B = np.identity(5) * (-1/0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
B += (df - dot(B, dx))[:,None] * dx[None,:] / dot(dx, dx)
jac.update(x, f)
assert_(np.allclose(jac.todense(), B, rtol=1e-10, atol=1e-13))
def test_broyden2_update(self):
# Check that BroydenSecond update works as for a dense matrix
jac = nonlin.BroydenSecond(alpha=0.1)
jac.setup(self.xs[0], self.fs[0], None)
H = np.identity(5) * (-0.1)
for last_j, (x, f) in enumerate(zip(self.xs[1:], self.fs[1:])):
df = f - self.fs[last_j]
dx = x - self.xs[last_j]
H += (dx - dot(H, df))[:,None] * df[None,:] / dot(df, df)
jac.update(x, f)
assert_(np.allclose(jac.todense(), inv(H), rtol=1e-10, atol=1e-13))
def test_anderson(self):
# Anderson mixing (with w0=0) satisfies secant conditions
# for the last M iterates, see [Ey]_
#
# .. [Ey] V. Eyert, J. Comp. Phys., 124, 271 (1996).
self._check_secant(nonlin.Anderson, M=3, w0=0, npoints=3)
class TestLinear(TestCase):
"""Solve a linear equation;
some methods find the exact solution in a finite number of steps"""
def _check(self, jac, N, maxiter, complex=False, **kw):
np.random.seed(123)
A = np.random.randn(N, N)
if complex:
A = A + 1j*np.random.randn(N, N)
b = np.random.randn(N)
if complex:
b = b + 1j*np.random.randn(N)
def func(x):
return dot(A, x) - b
sol = nonlin.nonlin_solve(func, np.zeros(N), jac, maxiter=maxiter,
f_tol=1e-6, line_search=None, verbose=0)
assert_(np.allclose(dot(A, sol), b, atol=1e-6))
def test_broyden1(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenFirst(alpha=1.0), 20, 41, True)
def test_broyden2(self):
# Broyden methods solve linear systems exactly in 2*N steps
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, False)
self._check(nonlin.BroydenSecond(alpha=1.0), 20, 41, True)
def test_anderson(self):
# Anderson is rather similar to Broyden, if given enough storage space
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, False)
self._check(nonlin.Anderson(M=50, alpha=1.0), 20, 29, True)
def test_krylov(self):
# Krylov methods solve linear systems exactly in N inner steps
self._check(nonlin.KrylovJacobian, 20, 2, False, inner_m=10)
self._check(nonlin.KrylovJacobian, 20, 2, True, inner_m=10)
class TestJacobianDotSolve(object):
"""Check that solve/dot methods in Jacobian approximations are consistent"""
def _func(self, x):
return x**2 - 1 + np.dot(self.A, x)
def _check_dot(self, jac_cls, complex=False, tol=1e-6, **kw):
np.random.seed(123)
N = 7
def rand(*a):
q = np.random.rand(*a)
if complex:
q = q + 1j*np.random.rand(*a)
return q
def assert_close(a, b, msg):
d = abs(a - b).max()
f = tol + abs(b).max()*tol
if d > f:
raise AssertionError('%s: err %g' % (msg, d))
self.A = rand(N, N)
# initialize
x0 = np.random.rand(N)
jac = jac_cls(**kw)
jac.setup(x0, self._func(x0), self._func)
# check consistency
for k in xrange(2*N):
v = rand(N)
if hasattr(jac, '__array__'):
Jd = np.array(jac)
if hasattr(jac, 'solve'):
Gv = jac.solve(v)
Gv2 = np.linalg.solve(Jd, v)
assert_close(Gv, Gv2, 'solve vs array')
if hasattr(jac, 'rsolve'):
Gv = jac.rsolve(v)
Gv2 = np.linalg.solve(Jd.T.conj(), v)
assert_close(Gv, Gv2, 'rsolve vs array')
if hasattr(jac, 'matvec'):
Jv = jac.matvec(v)
Jv2 = np.dot(Jd, v)
assert_close(Jv, Jv2, 'dot vs array')
if hasattr(jac, 'rmatvec'):
Jv = jac.rmatvec(v)
Jv2 = np.dot(Jd.T.conj(), v)
assert_close(Jv, Jv2, 'rmatvec vs array')
if hasattr(jac, 'matvec') and hasattr(jac, 'solve'):
Jv = jac.matvec(v)
Jv2 = jac.solve(jac.matvec(Jv))
assert_close(Jv, Jv2, 'dot vs solve')
if hasattr(jac, 'rmatvec') and hasattr(jac, 'rsolve'):
Jv = jac.rmatvec(v)
Jv2 = jac.rmatvec(jac.rsolve(Jv))
assert_close(Jv, Jv2, 'rmatvec vs rsolve')
x = rand(N)
jac.update(x, self._func(x))
def test_broyden1(self):
self._check_dot(nonlin.BroydenFirst, complex=False)
self._check_dot(nonlin.BroydenFirst, complex=True)
def test_broyden2(self):
self._check_dot(nonlin.BroydenSecond, complex=False)
self._check_dot(nonlin.BroydenSecond, complex=True)
def test_anderson(self):
self._check_dot(nonlin.Anderson, complex=False)
self._check_dot(nonlin.Anderson, complex=True)
def test_diagbroyden(self):
self._check_dot(nonlin.DiagBroyden, complex=False)
self._check_dot(nonlin.DiagBroyden, complex=True)
def test_linearmixing(self):
self._check_dot(nonlin.LinearMixing, complex=False)
self._check_dot(nonlin.LinearMixing, complex=True)
def test_excitingmixing(self):
self._check_dot(nonlin.ExcitingMixing, complex=False)
self._check_dot(nonlin.ExcitingMixing, complex=True)
def test_krylov(self):
self._check_dot(nonlin.KrylovJacobian, complex=False, tol=1e-4)
self._check_dot(nonlin.KrylovJacobian, complex=True, tol=1e-4)
class TestNonlinOldTests(TestCase):
""" Test case for a simple constrained entropy maximization problem
(the machine translation example of Berger et al in
Computational Linguistics, vol 22, num 1, pp 39--72, 1996.)
"""
def test_broyden1(self):
x = nonlin.broyden1(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_broyden2(self):
x = nonlin.broyden2(F,F.xin,iter=12,alpha=1)
assert_(nonlin.norm(x) < 1e-9)
assert_(nonlin.norm(F(x)) < 1e-9)
def test_anderson(self):
x = nonlin.anderson(F,F.xin,iter=12,alpha=0.03,M=5)
assert_(nonlin.norm(x) < 0.33)
def test_linearmixing(self):
x = nonlin.linearmixing(F,F.xin,iter=60,alpha=0.5)
assert_(nonlin.norm(x) < 1e-7)
assert_(nonlin.norm(F(x)) < 1e-7)
def test_exciting(self):
x = nonlin.excitingmixing(F,F.xin,iter=20,alpha=0.5)
assert_(nonlin.norm(x) < 1e-5)
assert_(nonlin.norm(F(x)) < 1e-5)
def test_diagbroyden(self):
x = nonlin.diagbroyden(F,F.xin,iter=11,alpha=1)
assert_(nonlin.norm(x) < 1e-8)
assert_(nonlin.norm(F(x)) < 1e-8)
def test_root_broyden1(self):
res = root(F, F.xin, method='broyden1',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_broyden2(self):
res = root(F, F.xin, method='broyden2',
options={'nit': 12, 'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-9)
assert_(nonlin.norm(res.fun) < 1e-9)
def test_root_anderson(self):
res = root(F, F.xin, method='anderson',
options={'nit': 12,
'jac_options': {'alpha': 0.03, 'M': 5}})
assert_(nonlin.norm(res.x) < 0.33)
def test_root_linearmixing(self):
res = root(F, F.xin, method='linearmixing',
options={'nit': 60,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-7)
assert_(nonlin.norm(res.fun) < 1e-7)
def test_root_excitingmixing(self):
res = root(F, F.xin, method='excitingmixing',
options={'nit': 20,
'jac_options': {'alpha': 0.5}})
assert_(nonlin.norm(res.x) < 1e-5)
assert_(nonlin.norm(res.fun) < 1e-5)
def test_root_diagbroyden(self):
res = root(F, F.xin, method='diagbroyden',
options={'nit': 11,
'jac_options': {'alpha': 1}})
assert_(nonlin.norm(res.x) < 1e-8)
assert_(nonlin.norm(res.fun) < 1e-8)
if __name__ == "__main__":
run_module_suite()
| mit |
hydrospanner/DForurm | DForurm/env/Lib/site-packages/django/contrib/auth/base_user.py | 59 | 4973 | """
This module allows importing AbstractBaseUser even when django.contrib.auth is
not in INSTALLED_APPS.
"""
from __future__ import unicode_literals
import unicodedata
from django.contrib.auth import password_validation
from django.contrib.auth.hashers import (
check_password, is_password_usable, make_password,
)
from django.db import models
from django.utils.crypto import get_random_string, salted_hmac
from django.utils.deprecation import CallableFalse, CallableTrue
from django.utils.encoding import force_text, python_2_unicode_compatible
from django.utils.translation import ugettext_lazy as _
class BaseUserManager(models.Manager):
@classmethod
def normalize_email(cls, email):
"""
Normalize the email address by lowercasing the domain part of it.
"""
email = email or ''
try:
email_name, domain_part = email.strip().rsplit('@', 1)
except ValueError:
pass
else:
email = '@'.join([email_name, domain_part.lower()])
return email
def make_random_password(self, length=10,
allowed_chars='abcdefghjkmnpqrstuvwxyz'
'ABCDEFGHJKLMNPQRSTUVWXYZ'
'23456789'):
"""
Generate a random password with the given length and given
allowed_chars. The default value of allowed_chars does not have "I" or
"O" or letters and digits that look similar -- just to avoid confusion.
"""
return get_random_string(length, allowed_chars)
def get_by_natural_key(self, username):
return self.get(**{self.model.USERNAME_FIELD: username})
@python_2_unicode_compatible
class AbstractBaseUser(models.Model):
password = models.CharField(_('password'), max_length=128)
last_login = models.DateTimeField(_('last login'), blank=True, null=True)
is_active = True
REQUIRED_FIELDS = []
class Meta:
abstract = True
def get_username(self):
"Return the identifying username for this User"
return getattr(self, self.USERNAME_FIELD)
def __init__(self, *args, **kwargs):
super(AbstractBaseUser, self).__init__(*args, **kwargs)
# Stores the raw password if set_password() is called so that it can
# be passed to password_changed() after the model is saved.
self._password = None
def __str__(self):
return self.get_username()
def clean(self):
setattr(self, self.USERNAME_FIELD, self.normalize_username(self.get_username()))
def save(self, *args, **kwargs):
super(AbstractBaseUser, self).save(*args, **kwargs)
if self._password is not None:
password_validation.password_changed(self._password, self)
self._password = None
def natural_key(self):
return (self.get_username(),)
@property
def is_anonymous(self):
"""
Always return False. This is a way of comparing User objects to
anonymous users.
"""
return CallableFalse
@property
def is_authenticated(self):
"""
Always return True. This is a way to tell if the user has been
authenticated in templates.
"""
return CallableTrue
def set_password(self, raw_password):
self.password = make_password(raw_password)
self._password = raw_password
def check_password(self, raw_password):
"""
Return a boolean of whether the raw_password was correct. Handles
hashing formats behind the scenes.
"""
def setter(raw_password):
self.set_password(raw_password)
# Password hash upgrades shouldn't be considered password changes.
self._password = None
self.save(update_fields=["password"])
return check_password(raw_password, self.password, setter)
def set_unusable_password(self):
# Set a value that will never be a valid hash
self.password = make_password(None)
def has_usable_password(self):
return is_password_usable(self.password)
def get_full_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_full_name() method')
def get_short_name(self):
raise NotImplementedError('subclasses of AbstractBaseUser must provide a get_short_name() method.')
def get_session_auth_hash(self):
"""
Return an HMAC of the password field.
"""
key_salt = "django.contrib.auth.models.AbstractBaseUser.get_session_auth_hash"
return salted_hmac(key_salt, self.password).hexdigest()
@classmethod
def get_email_field_name(cls):
try:
return cls.EMAIL_FIELD
except AttributeError:
return 'email'
@classmethod
def normalize_username(cls, username):
return unicodedata.normalize('NFKC', force_text(username))
| mit |
Chandlercjy/OnePy | OnePy/environment.py | 1 | 5171 | import logging
from collections import defaultdict
import arrow
import OnePy as op
from OnePy.event_engine import EventEngine
from OnePy.utils.easy_func import get_day_ratio
class Environment(object):
"""作为全局共享变量为各模块提供支持"""
# general context
sys_date: str = None
sys_frequency: str = None
instrument: str = None
fromdate: str = None
todate: str = None
tickers: list = []
# general setting
execute_on_close_or_next_open: str = 'open'
is_save_original: bool = False # 是否保存原始信号的开关
is_live_trading: bool = False
is_show_today_signals: bool = False # 是否显示当前最新信号的开关
# backtest modules dict
readers: dict = {}
feeds: dict = {}
cleaners: dict = {}
cleaners_feeds: dict = {}
strategies: dict = {}
brokers: dict = {}
risk_managers: dict = {}
recorders: dict = {}
recorder = None # type: op.RecorderBase
# system memory
signals_normal: list = [] # 保存最原始的所有信号
signals_pending: list = [] # 保存最原始的所有挂单信号
signals_trigger: list = [] # 保存最原始的所有触发单信号
signals_cancel: list = [] # 保存最原始的所有挂单信号
# 动态地临时信号, 会不断刷新
signals_normal_cur: list = []
signals_pending_cur: list = []
signals_trigger_cur: list = []
signals_cancel_cur: list = []
orders_mkt_normal_cur: list = [] # 动态地保存当前订单, 会不断刷新
orders_child_of_mkt_dict: dict = {} # 动态地保存跟随市价单的挂单
orders_mkt_absolute_cur: list = [] # 动态地保存触发的挂单并成交信息, 会不断刷新
orders_mkt_submitted_cur: list = [] # 动态地保存成交单, 会不断刷新
orders_pending: list = [] # 动态地保存挂单,触发会删除
orders_cancel_cur: list = [] # 动态地保存撤单, 会不断刷新
orders_cancel_submitted_cur: list = [] # 动态地保存撤单, 会不断刷新
cur_suspended_tickers: list = [] # 动态保存当前停牌或者没更新数据的ticker
suspended_tickers_record: defaultdict = defaultdict(list) # 记录停牌
# system modules
logger = logging.getLogger("OnePy")
event_engine = EventEngine()
cache: dict = {}
@classmethod
def initialize_env(cls):
"""刷新environment防止缓存累积"""
cls.signals_normal.clear()
cls.signals_pending.clear()
cls.signals_trigger.clear()
cls.signals_cancel.clear()
cls.signals_normal_cur.clear()
cls.signals_pending_cur.clear()
cls.signals_trigger_cur.clear()
cls.signals_cancel_cur.clear()
cls.orders_mkt_normal_cur.clear()
cls.orders_mkt_absolute_cur.clear()
cls.orders_mkt_submitted_cur.clear()
cls.orders_pending.clear()
cls.orders_child_of_mkt_dict.clear()
cls.orders_cancel_cur.clear()
cls.orders_cancel_submitted_cur.clear()
cls.tickers.clear()
cls.cur_suspended_tickers.clear()
cls.suspended_tickers_record.clear()
cls.cache.clear()
if not cls.is_live_trading:
ratio = get_day_ratio(cls.sys_frequency)
cls.sys_date = arrow.get(cls.fromdate).shift(
days=-ratio).format('YYYY-MM-DD HH:mm:ss')
cls.reset_all_counters()
@classmethod
def clear_modules(cls):
"""刷新environment防止缓存累积"""
cls.sys_date: str = None
cls.sys_frequency: str = None
cls.instrument: str = None
cls.fromdate: str = None
cls.todate: str = None
cls.tickers: list = []
cls.cur_suspended_tickers: list = []
cls.suspended_tickers_record: defaultdict = defaultdict(list)
cls.market_maker = None
cls.readers: dict = {}
cls.feeds: dict = {}
cls.cleaners: dict = {}
cls.cleaners_feeds: dict = {}
cls.strategies: dict = {}
cls.brokers: dict = {}
cls.risk_managers: dict = {}
cls.recorders: dict = {}
cls.recorder = None # type: op.RecorderBase
cls.event_loop = None # type: List[Dict]
cls.cache = {}
cls.execute_on_close_or_next_open: str = 'open'
cls.is_save_original: bool = False
cls.is_live_trading: bool = False
cls.is_show_today_signals: bool = False
@classmethod
def reset_all_counters(cls):
from itertools import count
from OnePy.sys_module.models import signals
from OnePy.sys_module.base_cleaner import CleanerBase
from OnePy.sys_module.models.orders.base_order import OrderBase
from OnePy.sys_module.components.order_generator import OrderGenerator
CleanerBase.counter = count(1)
signals.Signal.counter = count(1)
signals.SignalByTrigger.counter = count(1)
signals.SignalForPending.counter = count(1)
signals.SignalCancelTST.counter = count(1)
signals.SignalCancelPending.counter = count(1)
OrderBase.counter = count(1)
OrderGenerator.counter = count(1)
| mit |
marco-lancini/Showcase | django/utils/dates.py | 488 | 2237 | "Commonly-used date structures"
from django.utils.translation import ugettext_lazy as _, pgettext_lazy
WEEKDAYS = {
0:_('Monday'), 1:_('Tuesday'), 2:_('Wednesday'), 3:_('Thursday'), 4:_('Friday'),
5:_('Saturday'), 6:_('Sunday')
}
WEEKDAYS_ABBR = {
0:_('Mon'), 1:_('Tue'), 2:_('Wed'), 3:_('Thu'), 4:_('Fri'),
5:_('Sat'), 6:_('Sun')
}
WEEKDAYS_REV = {
'monday':0, 'tuesday':1, 'wednesday':2, 'thursday':3, 'friday':4,
'saturday':5, 'sunday':6
}
MONTHS = {
1:_('January'), 2:_('February'), 3:_('March'), 4:_('April'), 5:_('May'), 6:_('June'),
7:_('July'), 8:_('August'), 9:_('September'), 10:_('October'), 11:_('November'),
12:_('December')
}
MONTHS_3 = {
1:_('jan'), 2:_('feb'), 3:_('mar'), 4:_('apr'), 5:_('may'), 6:_('jun'),
7:_('jul'), 8:_('aug'), 9:_('sep'), 10:_('oct'), 11:_('nov'), 12:_('dec')
}
MONTHS_3_REV = {
'jan':1, 'feb':2, 'mar':3, 'apr':4, 'may':5, 'jun':6, 'jul':7, 'aug':8,
'sep':9, 'oct':10, 'nov':11, 'dec':12
}
MONTHS_AP = { # month names in Associated Press style
1: pgettext_lazy('abbrev. month', 'Jan.'),
2: pgettext_lazy('abbrev. month', 'Feb.'),
3: pgettext_lazy('abbrev. month', 'March'),
4: pgettext_lazy('abbrev. month', 'April'),
5: pgettext_lazy('abbrev. month', 'May'),
6: pgettext_lazy('abbrev. month', 'June'),
7: pgettext_lazy('abbrev. month', 'July'),
8: pgettext_lazy('abbrev. month', 'Aug.'),
9: pgettext_lazy('abbrev. month', 'Sept.'),
10: pgettext_lazy('abbrev. month', 'Oct.'),
11: pgettext_lazy('abbrev. month', 'Nov.'),
12: pgettext_lazy('abbrev. month', 'Dec.')
}
MONTHS_ALT = { # required for long date representation by some locales
1: pgettext_lazy('alt. month', 'January'),
2: pgettext_lazy('alt. month', 'February'),
3: pgettext_lazy('alt. month', 'March'),
4: pgettext_lazy('alt. month', 'April'),
5: pgettext_lazy('alt. month', 'May'),
6: pgettext_lazy('alt. month', 'June'),
7: pgettext_lazy('alt. month', 'July'),
8: pgettext_lazy('alt. month', 'August'),
9: pgettext_lazy('alt. month', 'September'),
10: pgettext_lazy('alt. month', 'October'),
11: pgettext_lazy('alt. month', 'November'),
12: pgettext_lazy('alt. month', 'December')
}
| mit |
rzhxeo/youtube-dl | youtube_dl/extractor/radiode.py | 22 | 1776 | from __future__ import unicode_literals
import json
from .common import InfoExtractor
class RadioDeIE(InfoExtractor):
IE_NAME = 'radio.de'
_VALID_URL = r'https?://(?P<id>.+?)\.(?:radio\.(?:de|at|fr|pt|es|pl|it)|rad\.io)'
_TEST = {
'url': 'http://ndr2.radio.de/',
'md5': '3b4cdd011bc59174596b6145cda474a4',
'info_dict': {
'id': 'ndr2',
'ext': 'mp3',
'title': 're:^NDR 2 [0-9]{4}-[0-9]{2}-[0-9]{2} [0-9]{2}:[0-9]{2}$',
'description': 'md5:591c49c702db1a33751625ebfb67f273',
'thumbnail': 're:^https?://.*\.png',
},
'params': {
'skip_download': True,
}
}
def _real_extract(self, url):
radio_id = self._match_id(url)
webpage = self._download_webpage(url, radio_id)
broadcast = json.loads(self._search_regex(
r'_getBroadcast\s*=\s*function\(\s*\)\s*{\s*return\s+({.+?})\s*;\s*}',
webpage, 'broadcast'))
title = self._live_title(broadcast['name'])
description = broadcast.get('description') or broadcast.get('shortDescription')
thumbnail = broadcast.get('picture4Url') or broadcast.get('picture4TransUrl')
formats = [{
'url': stream['streamUrl'],
'ext': stream['streamContentFormat'].lower(),
'acodec': stream['streamContentFormat'],
'abr': stream['bitRate'],
'asr': stream['sampleRate']
} for stream in broadcast['streamUrls']]
self._sort_formats(formats)
return {
'id': radio_id,
'title': title,
'description': description,
'thumbnail': thumbnail,
'is_live': True,
'formats': formats,
}
| unlicense |
andykimpe/chromium-test-npapi | tools/resources/list_resources_removed_by_repack.py | 95 | 3297 | #!/usr/bin/env python
# Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import os
import re
import sys
usage = """%s BUILDTYPE BUILDDIR
BUILDTYPE: either chromium or chrome.
BUILDDIR: The path to the output directory. e.g. relpath/to/out/Release
Prints out (to stdout) the sorted list of resource ids that are marked as
unused during the repacking process in the given build log (via stdin).
Additionally, attempt to print out the name of the resource and the generated
header file that contains the resource.
This script is used to print the list of resources that are not used so that
developers will notice and fix their .grd files.
"""
def GetResourceIdsFromRepackMessage(in_data):
"""Returns sorted set of resource ids that are not used from in_data.
"""
unused_resources = set()
unused_pattern = re.compile(
'RePackFromDataPackStrings Removed Key: (?P<resource_id>[0-9]+)')
for line in in_data:
match = unused_pattern.match(line)
if match:
resource_id = int(match.group('resource_id'))
unused_resources.add(resource_id)
return sorted(unused_resources)
def Main():
if len(sys.argv) != 3:
sys.stderr.write(usage % sys.argv[0])
return 1
build_type = sys.argv[1]
build_dir = sys.argv[2]
if build_type not in ('chromium', 'chrome'):
sys.stderr.write(usage % sys.argv[0])
return 1
generated_output_dir = os.path.join(build_dir, 'gen')
if not os.path.exists(generated_output_dir):
sys.stderr.write('Cannot find gen dir %s' % generated_output_dir)
return 1
if build_type == 'chromium':
excluded_header = 'google_chrome_strings.h'
else:
excluded_header = 'chromium_strings.h'
data_files = []
for root, dirs, files in os.walk(generated_output_dir):
if os.path.basename(root) != 'grit':
continue
header_files = [header for header in files if header.endswith('.h')]
if excluded_header in header_files:
header_files.remove(excluded_header)
data_files.extend([os.path.join(root, header) for header in header_files])
resource_id_to_name_file_map = {}
resource_pattern = re.compile('#define (?P<resource_name>[A-Z0-9_]+).* '
'(?P<resource_id>[0-9]+)$')
for f in data_files:
data = open(f).read()
for line in data.splitlines():
match = resource_pattern.match(line)
if match:
resource_id = int(match.group('resource_id'))
resource_name = match.group('resource_name')
if resource_id in resource_id_to_name_file_map:
print 'Duplicate:', resource_id
print (resource_name, f)
print resource_id_to_name_file_map[resource_id]
raise
resource_id_to_name_file_map[resource_id] = (resource_name, f)
unused_resources = GetResourceIdsFromRepackMessage(sys.stdin)
for resource_id in unused_resources:
if resource_id not in resource_id_to_name_file_map:
print 'WARNING: Unknown resource id', resource_id
continue
(resource_name, filename) = resource_id_to_name_file_map[resource_id]
sys.stdout.write('%d: %s in %s\n' % (resource_id, resource_name, filename))
return 0
if __name__ == '__main__':
sys.exit(Main())
| bsd-3-clause |
gitaarik/monkful | tests/tests/basic_resource/put_identifier_field.py | 2 | 5212 | import unittest
import json
from datetime import datetime
from pymongo import MongoClient
from apps.basic_resource import server
from apps.basic_resource.documents import Article, Comment
class ResourcePutIdentifierField(unittest.TestCase):
"""
Test if a HTTP PUT that updates a resource that has an embedded
document with an identifier field which is used in the update gives
the right response and updates the document correctly.
"""
@classmethod
def setUpClass(cls):
cls.app = server.app.test_client()
cls.mongo_client = MongoClient()
cls.initial_data = {
'title': "Test title",
'text': "Test text",
'publish': True,
'publish_date': datetime(2013, 10, 9, 8, 7, 8),
'comments': [
Comment(text="Test comment "),
Comment(text="Test comment 2"),
Comment(text="Test comment 3"),
],
'top_comment': Comment(text="Top comment"),
'tags': ['test', 'unittest', 'python', 'flask']
}
cls.article = Article(**cls.initial_data).save()
# the `id` field is the identifier field (duh)
cls.comments_update = {
'comments': [
{
'id': unicode(cls.article['comments'][0]['id']),
'text': "Test comment update"
},
{
'id': unicode(cls.article['comments'][1]['id']),
'text': "Test comment update 2"
}
]
}
cls.response = cls.app.put(
'/articles/{}/'.format(unicode(cls.article['id'])),
headers={'content-type': 'application/json'},
data=json.dumps(cls.comments_update)
)
@classmethod
def tearDownClass(cls):
cls.mongo_client.unittest_monkful.article.remove()
def test_status_code(self):
"""
Test if the response status code is 200.
"""
self.assertEqual(self.response.status_code, 200)
def test_content_type(self):
"""
Test if the content-type header is 'application/json'.
"""
self.assertEqual(
self.response.headers['content-type'],
'application/json'
)
def test_json(self):
"""
Test if the response data is valid JSON.
"""
try:
json.loads(self.response.data)
except:
self.fail("Response is not valid JSON.")
def test_content(self):
"""
Test if the deserialized response data evaluates back to our
data we posted to the resource in `setUpClass`.
"""
response_data = json.loads(self.response.data)
# Remap the response data so that it only has the fields our
# orignal data also had.
response_data = {
'title': response_data['title'],
'text': response_data['text'],
'publish': response_data['publish'],
'publish_date': response_data['publish_date'],
'comments': [
{
'id': response_data['comments'][0]['id'],
'text': response_data['comments'][0]['text']
},
{
'id': response_data['comments'][1]['id'],
'text': response_data['comments'][1]['text']
}
],
'top_comment': {
'text': response_data['top_comment']['text']
},
'tags': response_data['tags']
}
self.assertEqual(
response_data,
{
'title': self.initial_data['title'],
'text': self.initial_data['text'],
'publish': self.initial_data['publish'],
'publish_date': self.initial_data['publish_date'].isoformat(),
'comments': self.comments_update['comments'],
'top_comment': {
'text': self.initial_data['top_comment']['text']
},
'tags': self.initial_data['tags']
}
)
def test_documents(self):
"""
Test if the POST-ed data really ended up in the documents.
"""
article = Article.objects[0]
self.assertEqual(article.title, self.initial_data['title'])
self.assertEqual(article.text, self.initial_data['text'])
self.assertEqual(article.publish, self.initial_data['publish'])
self.assertEqual(
article.publish_date,
self.initial_data['publish_date']
)
self.assertEqual(
article.comments[0].text,
self.comments_update['comments'][0]['text']
)
self.assertEqual(
article.comments[1].text,
self.comments_update['comments'][1]['text']
)
# The complete `comments` field should've been overwritten so
# there should be only 2 comments instead of 3.
self.assertEqual(len(article.comments), 2)
self.assertEqual(
article.tags,
self.initial_data['tags']
)
| lgpl-3.0 |
nvie/python-mode | pymode/environment.py | 11 | 6338 | """ Define interfaces. """
from __future__ import print_function
import vim
import json
import time
import os.path
from ._compat import PY2
class VimPymodeEnviroment(object):
""" Vim User interface. """
prefix = '[Pymode]'
def __init__(self):
""" Init VIM environment. """
self.current = vim.current
self.options = dict(encoding=vim.eval('&enc'))
self.options['debug'] = self.var('g:pymode_debug', True)
@property
def curdir(self):
""" Return current working directory. """
return self.var('getcwd()')
@property
def curbuf(self):
""" Return current buffer. """
return self.current.buffer
@property
def cursor(self):
""" Return current window position.
:return tuple: (row, col)
"""
return self.current.window.cursor
@property
def source(self):
""" Return source of current buffer. """
return "\n".join(self.lines)
@property
def lines(self):
""" Iterate by lines in current file.
:return list:
"""
if not PY2:
return self.curbuf
return [l.decode(self.options.get('encoding')) for l in self.curbuf]
@staticmethod
def var(name, to_bool=False, silence=False):
""" Get vim variable.
:return vimobj:
"""
try:
value = vim.eval(name)
except vim.error:
if silence:
return None
raise
if to_bool:
try:
value = bool(int(value))
except ValueError:
value = value
return value
@staticmethod
def message(msg, history=False):
""" Show message to user.
:return: :None
"""
if history:
return vim.command('echom "%s"' % str(msg))
return vim.command('call pymode#wide_message("%s")' % str(msg))
def user_input(self, msg, default=''):
""" Return user input or default.
:return str:
"""
msg = '%s %s ' % (self.prefix, msg)
if default != '':
msg += '[%s] ' % default
try:
vim.command('echohl Debug')
input_str = vim.eval('input("%s> ")' % msg)
vim.command('echohl none')
except KeyboardInterrupt:
input_str = ''
return input_str or default
def user_confirm(self, msg, yes=False):
""" Get user confirmation.
:return bool:
"""
default = 'yes' if yes else 'no'
action = self.user_input(msg, default)
return action and 'yes'.startswith(action)
def user_input_choices(self, msg, *options):
""" Get one of many options.
:return str: A choosen option
"""
choices = ['%s %s' % (self.prefix, msg)]
choices += [
"%s. %s" % (num, opt) for num, opt in enumerate(options, 1)]
try:
input_str = int(
vim.eval('inputlist(%s)' % self.prepare_value(choices)))
except (KeyboardInterrupt, ValueError):
input_str = 0
if not input_str:
self.message('Cancelled!')
return False
try:
return options[input_str - 1]
except (IndexError, ValueError):
self.error('Invalid option: %s' % input_str)
return self.user_input_choices(msg, *options)
@staticmethod
def error(msg):
""" Show error to user. """
vim.command('call pymode#error("%s")' % str(msg))
def debug(self, msg, *args):
""" Print debug information. """
if self.options.get('debug'):
print("%s %s [%s]" % (
int(time.time()), msg, ', '.join([str(a) for a in args])))
def stop(self, value=None):
""" Break Vim function. """
cmd = 'return'
if value is not None:
cmd += ' ' + self.prepare_value(value)
vim.command(cmd)
def catch_exceptions(self, func):
""" Decorator. Make execution more silence.
:return func:
"""
def _wrapper(*args, **kwargs):
try:
return func(*args, **kwargs)
except (Exception, vim.error) as e: # noqa
if self.options.get('debug'):
raise
self.error(e)
return None
return _wrapper
def run(self, name, *args):
""" Run vim function. """
vim.command('call %s(%s)' % (name, ", ".join([
self.prepare_value(a) for a in args
])))
def let(self, name, value):
""" Set variable. """
cmd = 'let %s = %s' % (name, self.prepare_value(value))
self.debug(cmd)
vim.command(cmd)
def prepare_value(self, value, dumps=True):
""" Decode bstr to vim encoding.
:return unicode string:
"""
if dumps:
value = json.dumps(value)
if PY2:
value = value.decode('utf-8').encode(self.options.get('encoding'))
return value
def get_offset_params(self, cursor=None, base=""):
""" Calculate current offset.
:return tuple: (source, offset)
"""
row, col = cursor or env.cursor
source = ""
offset = 0
for i, line in enumerate(self.lines, 1):
if i == row:
source += line[:col] + base
offset = len(source)
source += line[col:]
else:
source += line
source += '\n'
env.debug('Get offset', base or None, row, col, offset)
return source, offset
@staticmethod
def goto_line(line):
""" Go to line. """
vim.command('normal %sggzz' % line)
def goto_file(self, path, cmd='e', force=False):
""" Function description. """
if force or os.path.abspath(path) != self.curbuf.name:
self.debug('read', path)
if ' ' in path and os.name == 'posix':
path = path.replace(' ', '\\ ')
vim.command("%s %s" % (cmd, path))
@staticmethod
def goto_buffer(bufnr):
""" Open buffer. """
if str(bufnr) != '-1':
vim.command('buffer %s' % bufnr)
env = VimPymodeEnviroment()
| lgpl-3.0 |
ZLLab-Mooc/edx-platform | cms/djangoapps/contentstore/views/program.py | 15 | 2058 | """Programs views for use with Studio."""
from django.contrib.auth.decorators import login_required
from django.core.urlresolvers import reverse
from django.http import Http404, JsonResponse
from django.utils.decorators import method_decorator
from django.views.generic import View
from edxmako.shortcuts import render_to_response
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.lib.token_utils import get_id_token
class ProgramAuthoringView(View):
"""View rendering a template which hosts the Programs authoring app.
The Programs authoring app is a Backbone SPA maintained in a separate repository.
The app handles its own routing and provides a UI which can be used to create and
publish new Programs (e.g, XSeries).
"""
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
"""Populate the template context with values required for the authoring app to run."""
programs_config = ProgramsApiConfig.current()
if programs_config.is_studio_tab_enabled and request.user.is_staff:
return render_to_response('program_authoring.html', {
'show_programs_header': programs_config.is_studio_tab_enabled,
'authoring_app_config': programs_config.authoring_app_config,
'programs_api_url': programs_config.public_api_url,
'programs_token_url': reverse('programs_id_token'),
'studio_home_url': reverse('home'),
})
else:
raise Http404
class ProgramsIdTokenView(View):
"""Provides id tokens to JavaScript clients for use with the Programs API."""
@method_decorator(login_required)
def get(self, request, *args, **kwargs):
"""Generate and return a token, if the integration is enabled."""
if ProgramsApiConfig.current().is_studio_tab_enabled:
id_token = get_id_token(request.user, 'programs')
return JsonResponse({'id_token': id_token})
else:
raise Http404
| agpl-3.0 |
pombredanne/acora | acora/__init__.py | 2 | 10484 | """\
Acora - a multi-keyword search engine based on Aho-Corasick trees.
Usage::
>>> from acora import AcoraBuilder
Collect some keywords::
>>> builder = AcoraBuilder('ab', 'bc', 'de')
>>> builder.add('a', 'b')
Generate the Acora search engine::
>>> ac = builder.build()
Search a string for all occurrences::
>>> ac.findall('abc')
[('a', 0), ('ab', 0), ('b', 1), ('bc', 1)]
>>> ac.findall('abde')
[('a', 0), ('ab', 0), ('b', 1), ('de', 2)]
"""
from __future__ import absolute_import
import sys
IS_PY3 = sys.version_info[0] >= 3
if IS_PY3:
unicode = str
FILE_BUFFER_SIZE = 32 * 1024
class PyAcora(object):
"""A simple (and very slow) Python implementation of the Acora
search engine.
"""
transitions = None
def __init__(self, machine, transitions=None):
if transitions is not None:
# old style format
start_state = machine
self.transitions = dict([
((state.id, char), (target_state.id, target_state.matches))
for ((state, char), target_state) in transitions.items()])
else:
# new style Machine format
start_state = machine.start_state
ignore_case = machine.ignore_case
self.transitions = transitions = {}
child_states = machine.child_states
child_targets = {}
state_matches = {}
needs_bytes_conversion = None
for state in child_states:
state_id = state.id
child_targets[state_id], state_matches[state_id] = (
_merge_targets(state, ignore_case))
if needs_bytes_conversion is None and state_matches[state_id]:
if IS_PY3:
needs_bytes_conversion = any(
isinstance(s, bytes) for s in state_matches[state_id])
elif any(isinstance(s, unicode) for s in state_matches[state_id]):
# in Py2, some keywords might be str even though we're processing unicode
needs_bytes_conversion = False
if needs_bytes_conversion is None and not IS_PY3:
needs_bytes_conversion = True
if needs_bytes_conversion:
if IS_PY3:
convert = ord
else:
from codecs import latin_1_encode
def convert(s):
return latin_1_encode(s)[0]
else:
convert = None
get_child_targets = child_targets.get
get_matches = state_matches.get
state_id = start_state.id
for ch, child in _merge_targets(start_state, ignore_case)[0].items():
child_id = child.id
if convert is not None:
ch = convert(ch)
transitions[(state_id, ch)] = (child_id, get_matches(child_id))
for state in child_states:
state_id = state.id
for ch, child in get_child_targets(state_id).items():
child_id = child.id
if convert is not None:
ch = convert(ch)
transitions[(state_id, ch)] = (child_id, get_matches(child_id))
self.start_state = start_state.id
def finditer(self, s):
"""Iterate over all occurrences of any keyword in the string.
Returns (keyword, offset) pairs.
"""
state = self.start_state
start_state = (state, [])
next_state = self.transitions.get
pos = 0
for char in s:
pos += 1
state, matches = next_state((state, char), start_state)
if matches:
for match in matches:
yield (match, pos-len(match))
def findall(self, s):
"""Find all occurrences of any keyword in the string.
Returns a list of (keyword, offset) pairs.
"""
return list(self.finditer(s))
def filefind(self, f):
"""Iterate over all occurrences of any keyword in a file.
Returns (keyword, offset) pairs.
"""
opened = False
if not hasattr(f, 'read'):
f = open(f, 'rb')
opened = True
try:
state = self.start_state
start_state = (state, ())
next_state = self.transitions.get
pos = 0
while 1:
data = f.read(FILE_BUFFER_SIZE)
if not data:
break
for char in data:
pos += 1
state, matches = next_state((state, char), start_state)
if matches:
for match in matches:
yield (match, pos-len(match))
finally:
if opened:
f.close()
def filefindall(self, f):
"""Find all occurrences of any keyword in a file.
Returns a list of (keyword, offset) pairs.
"""
return list(self.filefind(f))
# import from shared Python/Cython module
from acora._acora import (
insert_bytes_keyword, insert_unicode_keyword,
build_trie as _build_trie, build_MachineState as _MachineState, merge_targets as _merge_targets)
# import from Cython module if available
try:
from acora._cacora import (
UnicodeAcora, BytesAcora, insert_bytes_keyword, insert_unicode_keyword)
except ImportError:
# C module not there ...
UnicodeAcora = BytesAcora = PyAcora
class AcoraBuilder(object):
"""The main builder class for an Acora search engine.
Add keywords by calling ``.add(*keywords)`` or by passing them
into the constructor. Then build the search engine by calling
``.build()``.
Builds a case insensitive search engine when passing
``ignore_case=True``, and a case sensitive engine otherwise.
"""
ignore_case = False
def __init__(self, *keywords, **kwargs):
if kwargs:
self.ignore_case = kwargs.pop('ignore_case', False)
if kwargs:
raise TypeError(
"%s() got unexpected keyword argument %s" % (
self.__class__.__name__, next(iter(kwargs))))
if len(keywords) == 1 and isinstance(keywords[0], (list, tuple)):
keywords = keywords[0]
self.for_unicode = None
self.state_counter = 1
self.keywords = set()
self.tree = _MachineState(0)
if keywords:
self.update(keywords)
def __update(self, keywords):
"""Add more keywords to the search engine builder.
Adding keywords does not impact previously built search
engines.
"""
if not keywords:
return
self.tree = None
self.keywords.update(keywords)
if self.for_unicode is None:
for keyword in keywords:
if isinstance(keyword, unicode):
self.for_unicode = True
elif isinstance(keyword, bytes):
self.for_unicode = False
else:
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
break
# validate input string types
marker = object()
if self.for_unicode:
for keyword in keywords:
if not isinstance(keyword, unicode):
break
else:
keyword = marker
else:
for keyword in keywords:
if not isinstance(keyword, bytes):
break
else:
keyword = marker
if keyword is not marker:
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
def add(self, *keywords):
"""Add more keywords to the search engine builder.
Adding keywords does not impact previously built search
engines.
"""
if keywords:
self.update(keywords)
def build(self, ignore_case=None, acora=None):
"""Build a search engine from the aggregated keywords.
Builds a case insensitive search engine when passing
``ignore_case=True``, and a case sensitive engine otherwise.
"""
if acora is None:
if self.for_unicode:
acora = UnicodeAcora
else:
acora = BytesAcora
if self.for_unicode == False and ignore_case:
import sys
if sys.version_info[0] >= 3:
raise ValueError(
"Case insensitive search is not supported for byte strings in Python 3")
if ignore_case is not None and ignore_case != self.ignore_case:
# must rebuild tree
builder = type(self)(ignore_case=ignore_case)
builder.update(self.keywords)
return builder.build(acora=acora)
return acora(_build_trie(self.tree, ignore_case=self.ignore_case))
def update(self, keywords):
for_unicode = self.for_unicode
ignore_case = self.ignore_case
insert_keyword = insert_unicode_keyword if for_unicode else insert_bytes_keyword
for keyword in keywords:
if for_unicode is None:
for_unicode = self.for_unicode = isinstance(keyword, unicode)
insert_keyword = (
insert_unicode_keyword if for_unicode else insert_bytes_keyword)
elif for_unicode != isinstance(keyword, unicode):
raise TypeError(
"keywords must be either bytes or unicode, not mixed (got %s)" %
type(keyword))
self.state_counter = insert_keyword(
self.tree, keyword, self.state_counter, ignore_case)
self.keywords.update(keywords)
### convenience functions
def search(s, *keywords):
"""Convenience function to search a string for keywords.
"""
acora = AcoraBuilder(keywords).build()
return acora.findall(s)
def search_ignore_case(s, *keywords):
"""Convenience function to search a string for keywords. Case
insensitive version.
"""
acora = AcoraBuilder(keywords, ignore_case=True).build()
return acora.findall(s)
| bsd-3-clause |
skerit/shotfactory | shotfactory04/gui/windows/flock.py | 1 | 3182 | # browsershots.org - Test your web design in different browsers
# Copyright (C) 2007 Johann C. Rocholl <[email protected]>
#
# Browsershots is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# Browsershots is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
GUI-specific interface functions for Flock on Microsoft Windows.
"""
__revision__ = "$Rev$"
__date__ = "$Date$"
__author__ = "$Author$"
import os
import time
from win32com.shell import shellcon
from win32com.shell import shell
from shotfactory04.gui import windows
class Gui(windows.Gui):
"""
Special functions for Flock on Windows.
"""
def reset_browser(self):
"""
Delete previous session and browser cache.
"""
appdata = shell.SHGetFolderPath(0, shellcon.CSIDL_LOCAL_APPDATA, 0, 0)
self.delete_if_exists(os.path.join(
appdata, 'Flock', 'Browsers', 'Profiles', '*', 'Cache'))
appdata = shell.SHGetFolderPath(0, shellcon.CSIDL_APPDATA, 0, 0)
self.delete_if_exists(os.path.join(
appdata, 'Flock', 'Browser', 'Profiles', '*', 'sessionstore.js'))
self.delete_if_exists(os.path.join(
appdata, 'Flock', 'Browser', 'Profiles', '*', 'history.dat'))
self.delete_if_exists(os.path.join(
appdata, 'Flock', 'Browser', 'Profiles', '*', 'cookies.txt'))
self.delete_if_exists(os.path.join(
appdata, 'Flock', 'Browser', 'Profiles', '*', 'historysearch'))
self.delete_if_exists(os.path.join(
appdata, 'Flock', 'Browser', 'Profiles', '*', 'lucene'))
def start_browser(self, config, url, options):
"""
Start browser and load website.
"""
if config['major'] == 2:
defcmd = r'c:\progra~1\flock\flock.exe'
else:
defcmd = r'c:\progra~1\flock\flock\flock.exe'
command = config['command'] or defcmd
print 'running', command
try:
import subprocess
except ImportError:
os.spawnl(os.P_DETACH, command, os.path.basename(command), url)
else:
subprocess.Popen([command, url])
print "Sleeping %d seconds while page is loading." % options.wait
time.sleep(options.wait)
def find_scrollable(self):
"""Find scrollable window."""
flock = self.find_window_by_title_suffix(' Flock')
return self.get_child_window(flock)
# Test scrolling from command line
if __name__ == '__main__':
config = {
'width': 1024,
'bpp': 24,
}
class Options:
verbose = 3
gui = Gui(config, Options())
gui.down()
time.sleep(1)
gui.scroll_bottom()
| gpl-3.0 |
uroybd/parsexl | parsexl.py | 1 | 1228 | """This Module Parse xls/xslx files and return data in json."""
import xlrd, datetime
from collections import OrderedDict
import simplejson as json
""" This function take 5 arguments:
inp = Input file
outp = Output file
sheet = Worksheet to work with in input file.
start = Starting row
end = Ending row
fields = A list of field-names to be used in json."""
def xlparse(inp, outp, sheet, start, end, fields):
inpt = inp
outpt = outp
wb = xlrd.open_workbook(inpt)
sh = wb.sheet_by_name(sheet)
json_list = []
for rownum in range(start - 1, end):
dicto = OrderedDict()
row_values = sh.row_values(rownum)
counter = 0
for i in fields:
if i.find('date') != -1:
try:
timestr = xlrd.xldate_as_tuple(row_values[counter], wb.datemode)
dicto[i] = str(datetime.datetime(*timestr)).split(' ')[0]
except:
dicto[i] = row_values[counter]
else:
dicto[i] = row_values[counter]
counter = counter + 1
json_list.append(dicto)
out = json.dumps(json_list)
with open(outpt, 'w') as f:
f.write(out)
| gpl-3.0 |
matthiascy/panda3d | direct/src/directscripts/doxygen_filter.py | 9 | 2333 | """ This script converts a file into a format that
doxygen can understand and process. It can be used
as an INPUT_FILTER in doxygen. """
import sys, os
# Explicitly include these files. Besides these, all
# files ending in _src will be explicitly included too.
INCLUDE_FILES = ["fltnames.h", "dblnames.h",
"flt2dblnames.h", "dbl2fltnames.h"]
def filter_file(infile):
desc = ""
reading_desc = False
license = True
indent = 0
for line in infile.readlines():
line = line.rstrip()
if line.startswith("////"):
if reading_desc:
# Probably the end of a comment reading_desc.
line = "*/"
reading_desc = False
else:
line = ""
elif line.startswith("//"):
strline = line.lstrip('/ \t')
if reading_desc:
line = line[min(indent, len(line) - len(strline)):]
else:
# A "Description:" text starts the description.
if strline.startswith("Description"):
strline = strline[11:].lstrip(': \t')
indent = len(line) - len(strline)
reading_desc = True
line = "/** " + strline
else:
license = False
if reading_desc:
line = "*/" + line
reading_desc = False
if line.startswith("#include"):
fname = line.split(' ', 1)[1].strip().strip('"')
if fname.rsplit('.', 1)[0].endswith("_src") or fname in INCLUDE_FILES:
# We handle these in a special way, because
# doxygen won't do this properly for us.
# This breaks line numbering, but beh.
if not os.path.isfile(fname):
fname = os.path.join(os.path.dirname(filename), fname)
if os.path.isfile(fname):
filter_file(open(fname, 'r'))
continue # Skip the #include
if license:
line = ""
print(line)
if __name__ == "__main__":
assert len(sys.argv) == 2, "please specify a filename"
filename = sys.argv[1]
infile = open(filename, 'r')
filter_file(infile)
infile.close()
| bsd-3-clause |
xen0l/ansible | lib/ansible/module_utils/network/f5/common.py | 2 | 18920 | # -*- coding: utf-8 -*-
#
# Copyright (c) 2017 F5 Networks Inc.
# GNU General Public License v3.0 (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
import os
import re
from ansible.module_utils._text import to_text
from ansible.module_utils.basic import env_fallback
from ansible.module_utils.connection import exec_command
from ansible.module_utils.network.common.utils import to_list
from ansible.module_utils.network.common.utils import ComplexList
from ansible.module_utils.six import iteritems
from ansible.module_utils.parsing.convert_bool import BOOLEANS_TRUE
from ansible.module_utils.parsing.convert_bool import BOOLEANS_FALSE
from collections import defaultdict
try:
from icontrol.exceptions import iControlUnexpectedHTTPError
HAS_F5SDK = True
except ImportError:
HAS_F5SDK = False
f5_provider_spec = {
'server': dict(
fallback=(env_fallback, ['F5_SERVER'])
),
'server_port': dict(
type='int',
fallback=(env_fallback, ['F5_SERVER_PORT'])
),
'user': dict(
fallback=(env_fallback, ['F5_USER', 'ANSIBLE_NET_USERNAME'])
),
'password': dict(
no_log=True,
aliases=['pass', 'pwd'],
fallback=(env_fallback, ['F5_PASSWORD', 'ANSIBLE_NET_PASSWORD'])
),
'ssh_keyfile': dict(
type='path'
),
'validate_certs': dict(
type='bool',
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
),
'transport': dict(
choices=['cli', 'rest'],
default='rest'
),
'timeout': dict(type='int'),
}
f5_argument_spec = {
'provider': dict(type='dict', options=f5_provider_spec),
}
f5_top_spec = {
'server': dict(
removed_in_version=2.9,
fallback=(env_fallback, ['F5_SERVER'])
),
'user': dict(
removed_in_version=2.9,
fallback=(env_fallback, ['F5_USER', 'ANSIBLE_NET_USERNAME'])
),
'password': dict(
removed_in_version=2.9,
no_log=True,
aliases=['pass', 'pwd'],
fallback=(env_fallback, ['F5_PASSWORD', 'ANSIBLE_NET_PASSWORD'])
),
'validate_certs': dict(
removed_in_version=2.9,
type='bool',
fallback=(env_fallback, ['F5_VALIDATE_CERTS'])
),
'server_port': dict(
removed_in_version=2.9,
type='int',
fallback=(env_fallback, ['F5_SERVER_PORT'])
),
'transport': dict(
removed_in_version=2.9,
choices=['cli', 'rest']
)
}
f5_argument_spec.update(f5_top_spec)
def get_provider_argspec():
return f5_provider_spec
def load_params(params):
provider = params.get('provider') or dict()
for key, value in iteritems(provider):
if key in f5_argument_spec:
if params.get(key) is None and value is not None:
params[key] = value
# Fully Qualified name (with the partition)
def fqdn_name(partition, value):
"""This method is not used
This was the original name of a method that was used throughout all
the F5 Ansible modules. This is now deprecated, and should be removed
in 2.9. All modules should be changed to use ``fq_name``.
TODO(Remove in Ansible 2.9)
"""
return fq_name(partition, value)
def fq_name(partition, value):
"""Returns a 'Fully Qualified' name
A BIG-IP expects most names of resources to be in a fully-qualified
form. This means that both the simple name, and the partition need
to be combined.
The Ansible modules, however, can accept (as names for several
resources) their name in the FQ format. This becomes an issue when
the FQ name and the partition are both specified as separate values.
Consider the following examples.
# Name not FQ
name: foo
partition: Common
# Name FQ
name: /Common/foo
partition: Common
This method will rectify the above situation and will, in both cases,
return the following for name.
/Common/foo
Args:
partition (string): The partition that you would want attached to
the name if the name has no partition.
value (string): The name that you want to attach a partition to.
This value will be returned unchanged if it has a partition
attached to it already.
Returns:
string: The fully qualified name, given the input parameters.
"""
if value is not None:
try:
int(value)
return '/{0}/{1}'.format(partition, value)
except (ValueError, TypeError):
if not value.startswith('/'):
return '/{0}/{1}'.format(partition, value)
return value
# Fully Qualified name (with partition) for a list
def fq_list_names(partition, list_names):
if list_names is None:
return None
return map(lambda x: fqdn_name(partition, x), list_names)
def to_commands(module, commands):
spec = {
'command': dict(key=True),
'prompt': dict(),
'answer': dict()
}
transform = ComplexList(spec, module)
return transform(commands)
def run_commands(module, commands, check_rc=True):
responses = list()
commands = to_commands(module, to_list(commands))
for cmd in commands:
cmd = module.jsonify(cmd)
rc, out, err = exec_command(module, cmd)
if check_rc and rc != 0:
raise F5ModuleError(to_text(err, errors='surrogate_then_replace'))
result = to_text(out, errors='surrogate_then_replace')
responses.append(result)
return responses
def flatten_boolean(value):
truthy = list(BOOLEANS_TRUE) + ['enabled']
falsey = list(BOOLEANS_FALSE) + ['disabled']
if value is None:
return None
elif value in truthy:
return 'yes'
elif value in falsey:
return 'no'
def cleanup_tokens(client):
try:
# isinstance cannot be used here because to import it creates a
# circular dependency with teh module_utils.network.f5.bigip file.
#
# TODO(consider refactoring cleanup_tokens)
if 'F5RestClient' in type(client).__name__:
token = client._client.headers.get('X-F5-Auth-Token', None)
if not token:
return
uri = "https://{0}:{1}/mgmt/shared/authz/tokens/{2}".format(
client.provider['server'],
client.provider['server_port'],
token
)
resp = client.api.delete(uri)
try:
resp.json()
except ValueError as ex:
raise F5ModuleError(str(ex))
return True
else:
resource = client.api.shared.authz.tokens_s.token.load(
name=client.api.icrs.token
)
resource.delete()
except Exception as ex:
pass
def is_cli(module):
transport = module.params['transport']
provider_transport = (module.params['provider'] or {}).get('transport')
result = 'cli' in (transport, provider_transport)
return result
def is_valid_hostname(host):
"""Reasonable attempt at validating a hostname
Compiled from various paragraphs outlined here
https://tools.ietf.org/html/rfc3696#section-2
https://tools.ietf.org/html/rfc1123
Notably,
* Host software MUST handle host names of up to 63 characters and
SHOULD handle host names of up to 255 characters.
* The "LDH rule", after the characters that it permits. (letters, digits, hyphen)
* If the hyphen is used, it is not permitted to appear at
either the beginning or end of a label
:param host:
:return:
"""
if len(host) > 255:
return False
host = host.rstrip(".")
allowed = re.compile(r'(?!-)[A-Z0-9-]{1,63}(?<!-)$', re.IGNORECASE)
result = all(allowed.match(x) for x in host.split("."))
return result
def is_valid_fqdn(host):
"""Reasonable attempt at validating a hostname
Compiled from various paragraphs outlined here
https://tools.ietf.org/html/rfc3696#section-2
https://tools.ietf.org/html/rfc1123
Notably,
* Host software MUST handle host names of up to 63 characters and
SHOULD handle host names of up to 255 characters.
* The "LDH rule", after the characters that it permits. (letters, digits, hyphen)
* If the hyphen is used, it is not permitted to appear at
either the beginning or end of a label
:param host:
:return:
"""
if len(host) > 255:
return False
host = host.rstrip(".")
allowed = re.compile(r'(?!-)[A-Z0-9-]{1,63}(?<!-)$', re.IGNORECASE)
result = all(allowed.match(x) for x in host.split("."))
if result:
parts = host.split('.')
if len(parts) > 1:
return True
return False
def transform_name(partition='', name='', sub_path=''):
if name:
name = name.replace('/', '~')
if partition:
partition = '~' + partition
else:
if sub_path:
raise F5ModuleError(
'When giving the subPath component include partition as well.'
)
if sub_path and partition:
sub_path = '~' + sub_path
if name and partition:
name = '~' + name
result = partition + sub_path + name
return result
def dict2tuple(items):
"""Convert a dictionary to a list of tuples
This method is used in cases where dictionaries need to be compared. Due
to dictionaries inherently having no order, it is easier to compare list
of tuples because these lists can be converted to sets.
This conversion only supports dicts of simple values. Do not give it dicts
that contain sub-dicts. This will not give you the result you want when using
the returned tuple for comparison.
Args:
items (dict): The dictionary of items that should be converted
Returns:
list: Returns a list of tuples upon success. Otherwise, an empty list.
"""
result = []
for x in items:
tmp = [(str(k), str(v)) for k, v in iteritems(x)]
result += tmp
return result
def compare_dictionary(want, have):
"""Performs a dictionary comparison
Args:
want (dict): Dictionary to compare with second parameter.
have (dict): Dictionary to compare with first parameter.
Returns:
bool:
"""
if want == [] and have is None:
return None
if want is None:
return None
w = dict2tuple(want)
h = dict2tuple(have)
if set(w) == set(h):
return None
else:
return want
def is_ansible_debug(module):
if module._debug and module._verbosity >= 4:
return True
return False
def fail_json(module, ex, client=None):
if is_ansible_debug(module) and client:
module.fail_json(msg=str(ex), __f5debug__=client.api.debug_output)
module.fail_json(msg=str(ex))
def exit_json(module, results, client=None):
if is_ansible_debug(module) and client:
results['__f5debug__'] = client.api.debug_output
module.exit_json(**results)
def is_uuid(uuid=None):
"""Check to see if value is an F5 UUID
UUIDs are used in BIG-IQ and in select areas of BIG-IP (notably ASM). This method
will check to see if the provided value matches a UUID as known by these products.
Args:
uuid (string): The value to check for UUID-ness
Returns:
bool:
"""
if uuid is None:
return False
pattern = r'[A-Za-z0-9]{8}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{4}-[A-Za-z0-9]{12}'
if re.match(pattern, uuid):
return True
return False
def on_bigip():
if os.path.exists('/usr/bin/tmsh'):
return True
return False
class Noop(object):
"""Represent no-operation required
This class is used in the Difference engine to specify when an attribute
has not changed. Difference attributes may return an instance of this
class as a means to indicate when the attribute has not changed.
The Noop object allows attributes to be set to None when sending updates
to the API. `None` is technically a valid value in some cases (it indicates
that the attribute should be removed from the resource).
"""
pass
class F5BaseClient(object):
def __init__(self, *args, **kwargs):
self.params = kwargs
self.module = kwargs.get('module', None)
load_params(self.params)
self._client = None
@property
def api(self):
raise F5ModuleError("Management root must be used from the concrete product classes.")
def reconnect(self):
"""Attempts to reconnect to a device
The existing token from a ManagementRoot can become invalid if you,
for example, upgrade the device (such as is done in the *_software
module.
This method can be used to reconnect to a remote device without
having to re-instantiate the ArgumentSpec and AnsibleF5Client classes
it will use the same values that were initially provided to those
classes
:return:
:raises iControlUnexpectedHTTPError
"""
self._client = None
@staticmethod
def validate_params(key, store):
if key in store and store[key] is not None:
return True
else:
return False
def merge_provider_params(self):
result = dict()
provider = self.params.get('provider', {})
if self.validate_params('server', provider):
result['server'] = provider['server']
elif self.validate_params('server', self.params):
result['server'] = self.params['server']
elif self.validate_params('F5_SERVER', os.environ):
result['server'] = os.environ['F5_SERVER']
else:
raise F5ModuleError('Server parameter cannot be None or missing, please provide a valid value')
if self.validate_params('server_port', provider):
result['server_port'] = provider['server_port']
elif self.validate_params('server_port', self.params):
result['server_port'] = self.params['server_port']
elif self.validate_params('F5_SERVER_PORT', os.environ):
result['server_port'] = os.environ['F5_SERVER_PORT']
else:
result['server_port'] = 443
if self.validate_params('validate_certs', provider):
result['validate_certs'] = provider['validate_certs']
elif self.validate_params('validate_certs', self.params):
result['validate_certs'] = self.params['validate_certs']
elif self.validate_params('F5_VALIDATE_CERTS', os.environ):
result['validate_certs'] = os.environ['F5_VALIDATE_CERTS']
else:
result['validate_certs'] = True
if self.validate_params('auth_provider', provider):
result['auth_provider'] = provider['auth_provider']
elif self.validate_params('auth_provider', self.params):
result['auth_provider'] = self.params['auth_provider']
else:
result['auth_provider'] = None
if self.validate_params('user', provider):
result['user'] = provider['user']
elif self.validate_params('user', self.params):
result['user'] = self.params['user']
elif self.validate_params('F5_USER', os.environ):
result['user'] = os.environ.get('F5_USER')
elif self.validate_params('ANSIBLE_NET_USERNAME', os.environ):
result['user'] = os.environ.get('ANSIBLE_NET_USERNAME')
else:
result['user'] = None
if self.validate_params('password', provider):
result['password'] = provider['password']
elif self.validate_params('password', self.params):
result['password'] = self.params['password']
elif self.validate_params('F5_PASSWORD', os.environ):
result['password'] = os.environ.get('F5_PASSWORD')
elif self.validate_params('ANSIBLE_NET_PASSWORD', os.environ):
result['password'] = os.environ.get('ANSIBLE_NET_PASSWORD')
else:
result['password'] = None
if result['validate_certs'] in BOOLEANS_TRUE:
result['validate_certs'] = True
else:
result['validate_certs'] = False
return result
class AnsibleF5Parameters(object):
def __init__(self, *args, **kwargs):
self._values = defaultdict(lambda: None)
self._values['__warnings'] = []
self.client = kwargs.pop('client', None)
self._module = kwargs.pop('module', None)
self._params = {}
params = kwargs.pop('params', None)
if params:
self.update(params=params)
self._params.update(params)
def update(self, params=None):
if params:
self._params.update(params)
for k, v in iteritems(params):
if self.api_map is not None and k in self.api_map:
map_key = self.api_map[k]
else:
map_key = k
# Handle weird API parameters like `dns.proxy.__iter__` by
# using a map provided by the module developer
class_attr = getattr(type(self), map_key, None)
if isinstance(class_attr, property):
# There is a mapped value for the api_map key
if class_attr.fset is None:
# If the mapped value does not have
# an associated setter
self._values[map_key] = v
else:
# The mapped value has a setter
setattr(self, map_key, v)
else:
# If the mapped value is not a @property
self._values[map_key] = v
def api_params(self):
result = {}
for api_attribute in self.api_attributes:
if self.api_map is not None and api_attribute in self.api_map:
result[api_attribute] = getattr(self, self.api_map[api_attribute])
else:
result[api_attribute] = getattr(self, api_attribute)
result = self._filter_params(result)
return result
def __getattr__(self, item):
# Ensures that properties that weren't defined, and therefore stashed
# in the `_values` dict, will be retrievable.
return self._values[item]
@property
def partition(self):
if self._values['partition'] is None:
return 'Common'
return self._values['partition'].strip('/')
@partition.setter
def partition(self, value):
self._values['partition'] = value
def _filter_params(self, params):
return dict((k, v) for k, v in iteritems(params) if v is not None)
class F5ModuleError(Exception):
pass
| gpl-3.0 |
yanheven/glance | glance/api/v2/model/metadef_property_type.py | 20 | 2354 | # Copyright (c) 2014 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import wsme
from wsme import types
from glance.api.v2.model.metadef_property_item_type import ItemType
from glance.common.wsme_utils import WSMEModelTransformer
class PropertyType(types.Base, WSMEModelTransformer):
# When used in collection of PropertyTypes, name is a dictionary key
# and not included as separate field.
name = wsme.wsattr(types.text, mandatory=False)
type = wsme.wsattr(types.text, mandatory=True)
title = wsme.wsattr(types.text, mandatory=True)
description = wsme.wsattr(types.text, mandatory=False)
operators = wsme.wsattr([types.text], mandatory=False)
default = wsme.wsattr(types.bytes, mandatory=False)
readonly = wsme.wsattr(bool, mandatory=False)
# fields for type = string
minimum = wsme.wsattr(int, mandatory=False)
maximum = wsme.wsattr(int, mandatory=False)
enum = wsme.wsattr([types.text], mandatory=False)
pattern = wsme.wsattr(types.text, mandatory=False)
# fields for type = integer, number
minLength = wsme.wsattr(int, mandatory=False)
maxLength = wsme.wsattr(int, mandatory=False)
confidential = wsme.wsattr(bool, mandatory=False)
# fields for type = array
items = wsme.wsattr(ItemType, mandatory=False)
uniqueItems = wsme.wsattr(bool, mandatory=False)
minItems = wsme.wsattr(int, mandatory=False)
maxItems = wsme.wsattr(int, mandatory=False)
additionalItems = wsme.wsattr(bool, mandatory=False)
def __init__(self, **kwargs):
super(PropertyType, self).__init__(**kwargs)
class PropertyTypes(types.Base, WSMEModelTransformer):
properties = wsme.wsattr({types.text: PropertyType}, mandatory=False)
def __init__(self, **kwargs):
super(PropertyTypes, self).__init__(**kwargs)
| apache-2.0 |
buguelos/odoo | addons/lunch/wizard/lunch_order.py | 440 | 1299 | # -*- encoding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2012 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp.osv import fields, osv
class lunch_order_order(osv.TransientModel):
""" lunch order meal """
_name = 'lunch.order.order'
_description = 'Wizard to order a meal'
def order(self,cr,uid,ids,context=None):
return self.pool.get('lunch.order.line').order(cr, uid, ids, context=context)
| agpl-3.0 |
smileboywtu/python-enhance | static/demo/tic-tac-toe.py | 1 | 5575 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Tic-Tac-Toe
# Plays the game of tic-tac-toe against a human opponent
# global constants
X = "X"
O = "O"
EMPTY = " "
TIE = "TIE"
NUM_SQUARES = 9
def display_instruct():
"""Display game instructions."""
print(
"""
Welcome to the greatest intellectual challenge of all time: Tic-Tac-Toe.
This will be a showdown between your human brain and my silicon processor.
You will make your move known by entering a number, 0 - 8. The number
will correspond to the board position as illustrated:
0 | 1 | 2
---------
3 | 4 | 5
---------
6 | 7 | 8
Prepare yourself, human. The ultimate battle is about to begin. \n
"""
)
def ask_yes_no(question):
"""Ask a yes or no question."""
response = None
while response not in ("y", "n"):
response = input(question).lower()
return response
def ask_number(question, low, high):
"""Ask for a number within a range."""
response = None
while response not in range(low, high):
response = int(input(question))
return response
def pieces():
"""Determine if player or computer goes first."""
go_first = ask_yes_no("Do you require the first move? (y/n): ")
if go_first == "y":
print("\nThen take the first move. You will need it.")
human = X
computer = O
else:
print("\nYour bravery will be your undoing... I will go first.")
computer = X
human = O
return computer, human
def new_board():
"""Create new game board."""
board = []
for square in range(NUM_SQUARES):
board.append(EMPTY)
return board
def display_board(board):
"""Display game board on screen."""
print("\n\t", board[0], "|", board[1], "|", board[2])
print("\t", "---------")
print("\t", board[3], "|", board[4], "|", board[5])
print("\t", "---------")
print("\t", board[6], "|", board[7], "|", board[8], "\n")
def legal_moves(board):
"""Create list of legal moves."""
moves = []
for square in range(NUM_SQUARES):
if board[square] == EMPTY:
moves.append(square)
return moves
def winner(board):
"""Determine the game winner."""
WAYS_TO_WIN = ((0, 1, 2),
(3, 4, 5),
(6, 7, 8),
(0, 3, 6),
(1, 4, 7),
(2, 5, 8),
(0, 4, 8),
(2, 4, 6))
for row in WAYS_TO_WIN:
if board[row[0]] == board[row[1]] == board[row[2]] != EMPTY:
winner = board[row[0]]
return winner
if EMPTY not in board:
return TIE
return None
def human_move(board, human):
"""Get human move."""
legal = legal_moves(board)
move = None
while move not in legal:
move = ask_number("Where will you move? (0 - 8):", 0, NUM_SQUARES)
if move not in legal:
print("\nThat square is already occupied, foolish human. Choose another.\n")
print("Fine...")
return move
def computer_move(board, computer, human):
"""Make computer move."""
# make a copy to work with since function will be changing list
board = board[:]
# the best positions to have, in order
BEST_MOVES = (4, 0, 2, 6, 8, 1, 3, 5, 7)
print("I shall take square number", end=" ")
# if computer can win, take that move
for move in legal_moves(board):
board[move] = computer
if winner(board) == computer:
print(move)
return move
# done checking this move, undo it
board[move] = EMPTY
# if human can win, block that move
for move in legal_moves(board):
board[move] = human
if winner(board) == human:
print(move)
return move
# done checkin this move, undo it
board[move] = EMPTY
# since no one can win on next move, pick best open square
for move in BEST_MOVES:
if move in legal_moves(board):
print(move)
return move
def next_turn(turn):
"""Switch turns."""
if turn == X:
return O
else:
return X
def congrat_winner(the_winner, computer, human):
"""Congratulate the winner."""
if the_winner != TIE:
print(the_winner, "won!\n")
else:
print("It's a tie!\n")
if the_winner == computer:
print("As I predicted, human, I am triumphant once more. \n" \
"Proof that computers are superior to humans in all regards.")
elif the_winner == human:
print("No, no! It cannot be! Somehow you tricked me, human. \n" \
"But never again! I, the computer, so swear it!")
elif the_winner == TIE:
print("You were most lucky, human, and somehow managed to tie me. \n" \
"Celebrate today... for this is the best you will ever achieve.")
def main():
display_instruct()
computer, human = pieces()
turn = X
board = new_board()
display_board(board)
while not winner(board):
if turn == human:
move = human_move(board, human)
board[move] = human
else:
move = computer_move(board, computer, human)
board[move] = computer
display_board(board)
turn = next_turn(turn)
the_winner = winner(board)
congrat_winner(the_winner, computer, human)
# start the program
main()
input("\n\nPress the enter key to quit.") | mit |
OCA/vertical-medical | medical_prescription_sale/models/sale_order_line.py | 2 | 4340 | # -*- coding: utf-8 -*-
##############################################################################
#
# Author: Dave Lasley <[email protected]>
# Copyright: 2015 LasLabs, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
from openerp import fields, models, api, _
from openerp.exceptions import ValidationError
import logging
_logger = logging.getLogger(__name__)
class SaleOrderLine(models.Model):
_inherit = 'sale.order.line'
@api.model
def _compute_dispense_qty(self, ):
rx_line = self.prescription_order_line_id
if self.product_uom == rx_line.dispense_uom_id:
self.dispense_qty = self.product_uom_qty
else:
self.dispense_qty = self.product_uom._compute_qty_obj(
self.product_uom_qty, rx_line.dispense_uom_id
)
patient_id = fields.Many2one(
string='Patient',
comodel_name='medical.patient',
related='prescription_order_line_id.patient_id',
)
prescription_order_line_id = fields.Many2one(
string='Prescription Line',
comodel_name='medical.prescription.order.line',
)
medication_id = fields.Many2one(
string='Medication',
comodel_name='medical.patient.medication',
related='prescription_order_line_id.medical_medication_id',
)
dispense_qty = fields.Float(
default=0.0,
readonly=True,
compute='_compute_dispense_qty',
)
@api.one
@api.constrains(
'product_id', 'prescription_order_line_id', 'patient_id',
)
def _check_sale_line_prescription(self, ):
'''
Validate whether the line can be dispensed based on Rx, pending
dispensings, etc.
:returns: bool -- If line can be processed
:raises: :class:`openerp.exceptions.ValidationError`
'''
if not self.medication_id.medicament_id.is_medicament:
return True
if not self.medication_id.medicament_id.is_prescription:
return True
rx_line = self.prescription_order_line_id
if self.patient_id != rx_line.patient_id:
raise ValidationError(_(
'Patients must be same on Order and Rx lines. '
'Got %s on order line %d, expected %s from rx line %d' % (
self.patient_id.name, self.id,
rx_line.patient_id.name, rx_line.id,
),
))
if rx_line.product_id != self.product_id:
if not self.is_substitutable:
raise ValidationError(_(
'Products must be same on Order and Rx lines. '
'Got %s on order line %d, expected %s from rx line %d' % (
self.product_id.name, self.id,
rx_line.product_id.name, rx_line.id,
),
))
else:
raise NotImplementedError(_(
'Drug substitution validation has not been implemented.'
))
if not rx_line.can_dispense:
raise ValidationError(_(
'Cannot dispense - currently %f pending and %f exception.' % (
rx_line.pending_dispense_qty,
rx_line.exception_dispense_qty,
)
))
if self.dispense_qty > rx_line.can_dispense_qty:
raise ValidationError(_(
'Cannot dispense - Order line %s goes over Rx qty by %d' % (
self.name, self.dispense_qty - rx_line.can_dispense_qty
)
))
return True
| gpl-3.0 |
zstackio/zstack-woodpecker | integrationtest/vm/installation/upgrade/test_zs_degd_latest_1.5_on_cos7.py | 2 | 1889 | '''
@author: MengLai
'''
import os
import tempfile
import uuid
import time
import zstackwoodpecker.test_util as test_util
import zstackwoodpecker.test_lib as test_lib
import zstackwoodpecker.test_state as test_state
test_stub = test_lib.lib_get_test_stub()
test_obj_dict = test_state.TestStateDict()
tmp_file = '/tmp/%s' % uuid.uuid1().get_hex()
def test():
test_util.test_dsc('Create test vm to test zstack upgrade by -u.')
image_name = os.environ.get('imageName_i_c7')
vm = test_stub.create_vlan_vm(image_name)
test_obj_dict.add_vm(vm)
if os.environ.get('zstackManagementIp') == None:
vm.check()
else:
time.sleep(60)
vm_inv = vm.get_vm()
vm_ip = vm_inv.vmNics[0].ip
test_util.test_dsc('Install latest zstack')
target_file = '/root/zstack-all-in-one.tgz'
test_stub.prepare_test_env(vm_inv, target_file)
ssh_cmd = 'ssh -oStrictHostKeyChecking=no -oCheckHostIP=no -oUserKnownHostsFile=/dev/null %s' % vm_ip
test_stub.copy_id_dsa(vm_inv, ssh_cmd, tmp_file)
test_stub.copy_id_dsa_pub(vm_inv)
test_stub.execute_all_install(ssh_cmd, target_file, tmp_file)
test_stub.check_installation(ssh_cmd, tmp_file, vm_inv)
test_util.test_dsc('Degrade zstack to 1.5')
degrade_target_file = '/root/zstack-degrade-all-in-one.tgz'
install_pkg = os.environ.get('zstackPkg_1.5')
test_stub.prepare_upgrade_test_env(vm_inv, degrade_target_file, install_pkg)
test_stub.upgrade_zstack(ssh_cmd, degrade_target_file, tmp_file)
test_stub.check_installation(ssh_cmd, tmp_file, vm_inv)
os.system('rm -f %s' % tmp_file)
vm.destroy()
test_util.test_pass('ZStack upgrade Test Success')
#Will be called only if exception happens in test().
def error_cleanup():
os.system('rm -f %s' % tmp_file)
test_lib.lib_error_cleanup(test_obj_dict)
| apache-2.0 |
MoonshineSG/OctoPrint | src/octoprint/util/avr_isp/stk500v2.py | 8 | 4705 | from __future__ import absolute_import, division, print_function
import os, struct, sys, time
from serial import Serial
from serial import SerialException
from builtins import range
from . import ispBase, intelHex
class Stk500v2(ispBase.IspBase):
def __init__(self):
self.serial = None
self.seq = 1
self.lastAddr = -1
self.progressCallback = None
def connect(self, port = 'COM22', speed = 115200):
if self.serial != None:
self.close()
try:
self.serial = Serial(str(port), speed, timeout=1, writeTimeout=10000)
except SerialException as e:
raise ispBase.IspError("Failed to open serial port")
except:
raise ispBase.IspError("Unexpected error while connecting to serial port:" + port + ":" + str(sys.exc_info()[0]))
self.seq = 1
#Reset the controller
self.serial.setDTR(1)
time.sleep(0.1)
self.serial.setDTR(0)
time.sleep(0.2)
self.sendMessage([1])
if self.sendMessage([0x10, 0xc8, 0x64, 0x19, 0x20, 0x00, 0x53, 0x03, 0xac, 0x53, 0x00, 0x00]) != [0x10, 0x00]:
self.close()
raise ispBase.IspError("Failed to enter programming mode")
def close(self):
if self.serial != None:
self.serial.close()
self.serial = None
#Leave ISP does not reset the serial port, only resets the device, and returns the serial port after disconnecting it from the programming interface.
# This allows you to use the serial port without opening it again.
def leaveISP(self):
if self.serial != None:
if self.sendMessage([0x11]) != [0x11, 0x00]:
raise ispBase.IspError("Failed to leave programming mode")
ret = self.serial
self.serial = None
return ret
return None
def isConnected(self):
return self.serial != None
def sendISP(self, data):
recv = self.sendMessage([0x1D, 4, 4, 0, data[0], data[1], data[2], data[3]])
return recv[2:6]
def writeFlash(self, flashData):
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
pageSize = self.chip['pageSize'] * 2
flashSize = pageSize * self.chip['pageCount']
if flashSize > 0xFFFF:
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
else:
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
loadCount = (len(flashData) + pageSize - 1) // pageSize
for i in range(0, loadCount):
recv = self.sendMessage([0x13, pageSize >> 8, pageSize & 0xFF, 0xc1, 0x0a, 0x40, 0x4c, 0x20, 0x00, 0x00] + flashData[(i * pageSize):(i * pageSize + pageSize)])
if self.progressCallback != None:
self.progressCallback(i + 1, loadCount*2)
def verifyFlash(self, flashData):
#Set load addr to 0, in case we have more then 64k flash we need to enable the address extension
flashSize = self.chip['pageSize'] * 2 * self.chip['pageCount']
if flashSize > 0xFFFF:
self.sendMessage([0x06, 0x80, 0x00, 0x00, 0x00])
else:
self.sendMessage([0x06, 0x00, 0x00, 0x00, 0x00])
loadCount = (len(flashData) + 0xFF) // 0x100
for i in range(0, loadCount):
recv = self.sendMessage([0x14, 0x01, 0x00, 0x20])[2:0x102]
if self.progressCallback != None:
self.progressCallback(loadCount + i + 1, loadCount*2)
for j in range(0, 0x100):
if i * 0x100 + j < len(flashData) and flashData[i * 0x100 + j] != recv[j]:
raise ispBase.IspError('Verify error at: 0x%x' % (i * 0x100 + j))
def sendMessage(self, data):
message = struct.pack(">BBHB", 0x1B, self.seq, len(data), 0x0E)
for c in data:
message += struct.pack(">B", c)
checksum = 0
for c in message:
checksum ^= ord(c)
message += struct.pack(">B", checksum)
try:
self.serial.write(message)
self.serial.flush()
except SerialTimeoutException:
raise ispBase.IspError('Serial send timeout')
self.seq = (self.seq + 1) & 0xFF
return self.recvMessage()
def recvMessage(self):
state = 'Start'
checksum = 0
while True:
s = self.serial.read()
if len(s) < 1:
raise ispBase.IspError("Timeout")
b = struct.unpack(">B", s)[0]
checksum ^= b
#print(hex(b))
if state == 'Start':
if b == 0x1B:
state = 'GetSeq'
checksum = 0x1B
elif state == 'GetSeq':
state = 'MsgSize1'
elif state == 'MsgSize1':
msgSize = b << 8
state = 'MsgSize2'
elif state == 'MsgSize2':
msgSize |= b
state = 'Token'
elif state == 'Token':
if b != 0x0E:
state = 'Start'
else:
state = 'Data'
data = []
elif state == 'Data':
data.append(b)
if len(data) == msgSize:
state = 'Checksum'
elif state == 'Checksum':
if checksum != 0:
state = 'Start'
else:
return data
def main():
programmer = Stk500v2()
programmer.connect(port = sys.argv[1])
programmer.programChip(intelHex.readHex(sys.argv[2]))
sys.exit(1)
if __name__ == '__main__':
main()
| agpl-3.0 |
pleaseproject/python-for-android | python3-alpha/python3-src/Lib/encodings/raw_unicode_escape.py | 852 | 1208 | """ Python 'raw-unicode-escape' Codec
Written by Marc-Andre Lemburg ([email protected]).
(c) Copyright CNRI, All Rights Reserved. NO WARRANTY.
"""
import codecs
### Codec APIs
class Codec(codecs.Codec):
# Note: Binding these as C functions will result in the class not
# converting them to methods. This is intended.
encode = codecs.raw_unicode_escape_encode
decode = codecs.raw_unicode_escape_decode
class IncrementalEncoder(codecs.IncrementalEncoder):
def encode(self, input, final=False):
return codecs.raw_unicode_escape_encode(input, self.errors)[0]
class IncrementalDecoder(codecs.IncrementalDecoder):
def decode(self, input, final=False):
return codecs.raw_unicode_escape_decode(input, self.errors)[0]
class StreamWriter(Codec,codecs.StreamWriter):
pass
class StreamReader(Codec,codecs.StreamReader):
pass
### encodings module API
def getregentry():
return codecs.CodecInfo(
name='raw-unicode-escape',
encode=Codec.encode,
decode=Codec.decode,
incrementalencoder=IncrementalEncoder,
incrementaldecoder=IncrementalDecoder,
streamwriter=StreamWriter,
streamreader=StreamReader,
)
| apache-2.0 |
abhijeet9920/python_project | develop/lib/python3.4/site-packages/pip/_vendor/requests/packages/urllib3/util/request.py | 780 | 2128 | from __future__ import absolute_import
from base64 import b64encode
from ..packages.six import b
ACCEPT_ENCODING = 'gzip,deflate'
def make_headers(keep_alive=None, accept_encoding=None, user_agent=None,
basic_auth=None, proxy_basic_auth=None, disable_cache=None):
"""
Shortcuts for generating request headers.
:param keep_alive:
If ``True``, adds 'connection: keep-alive' header.
:param accept_encoding:
Can be a boolean, list, or string.
``True`` translates to 'gzip,deflate'.
List will get joined by comma.
String will be used as provided.
:param user_agent:
String representing the user-agent you want, such as
"python-urllib3/0.6"
:param basic_auth:
Colon-separated username:password string for 'authorization: basic ...'
auth header.
:param proxy_basic_auth:
Colon-separated username:password string for 'proxy-authorization: basic ...'
auth header.
:param disable_cache:
If ``True``, adds 'cache-control: no-cache' header.
Example::
>>> make_headers(keep_alive=True, user_agent="Batman/1.0")
{'connection': 'keep-alive', 'user-agent': 'Batman/1.0'}
>>> make_headers(accept_encoding=True)
{'accept-encoding': 'gzip,deflate'}
"""
headers = {}
if accept_encoding:
if isinstance(accept_encoding, str):
pass
elif isinstance(accept_encoding, list):
accept_encoding = ','.join(accept_encoding)
else:
accept_encoding = ACCEPT_ENCODING
headers['accept-encoding'] = accept_encoding
if user_agent:
headers['user-agent'] = user_agent
if keep_alive:
headers['connection'] = 'keep-alive'
if basic_auth:
headers['authorization'] = 'Basic ' + \
b64encode(b(basic_auth)).decode('utf-8')
if proxy_basic_auth:
headers['proxy-authorization'] = 'Basic ' + \
b64encode(b(proxy_basic_auth)).decode('utf-8')
if disable_cache:
headers['cache-control'] = 'no-cache'
return headers
| mit |
ad-uistyleguide/ad-uistyleguide.github.io | node_modules/gulp-sass/node_modules/node-sass/node_modules/pangyp/gyp/pylib/gyp/flock_tool.py | 604 | 1533 | #!/usr/bin/env python
# Copyright (c) 2011 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""These functions are executed via gyp-flock-tool when using the Makefile
generator. Used on systems that don't have a built-in flock."""
import fcntl
import os
import struct
import subprocess
import sys
def main(args):
executor = FlockTool()
executor.Dispatch(args)
class FlockTool(object):
"""This class emulates the 'flock' command."""
def Dispatch(self, args):
"""Dispatches a string command to a method."""
if len(args) < 1:
raise Exception("Not enough arguments")
method = "Exec%s" % self._CommandifyName(args[0])
getattr(self, method)(*args[1:])
def _CommandifyName(self, name_string):
"""Transforms a tool name like copy-info-plist to CopyInfoPlist"""
return name_string.title().replace('-', '')
def ExecFlock(self, lockfile, *cmd_list):
"""Emulates the most basic behavior of Linux's flock(1)."""
# Rely on exception handling to report errors.
# Note that the stock python on SunOS has a bug
# where fcntl.flock(fd, LOCK_EX) always fails
# with EBADF, that's why we use this F_SETLK
# hack instead.
fd = os.open(lockfile, os.O_WRONLY|os.O_NOCTTY|os.O_CREAT, 0666)
op = struct.pack('hhllhhl', fcntl.F_WRLCK, 0, 0, 0, 0, 0, 0)
fcntl.fcntl(fd, fcntl.F_SETLK, op)
return subprocess.call(cmd_list)
if __name__ == '__main__':
sys.exit(main(sys.argv[1:]))
| mit |
naturali/tensorflow | tensorflow/contrib/distributions/python/ops/mvn.py | 1 | 28414 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Multivariate Normal distribution classes."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import math
from tensorflow.contrib.distributions.python.ops import distribution
from tensorflow.contrib.distributions.python.ops import distribution_util
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.contrib.distributions.python.ops import operator_pd_cholesky
from tensorflow.contrib.distributions.python.ops import operator_pd_diag
from tensorflow.contrib.distributions.python.ops import operator_pd_full
from tensorflow.contrib.distributions.python.ops import operator_pd_vdvt_update
from tensorflow.contrib.framework.python.framework import tensor_util as contrib_tensor_util
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import ops
from tensorflow.python.ops import array_ops
from tensorflow.python.ops import check_ops
from tensorflow.python.ops import control_flow_ops
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn
from tensorflow.python.ops import random_ops
__all__ = [
"MultivariateNormalDiag",
"MultivariateNormalDiagWithSoftplusStDev",
"MultivariateNormalCholesky",
"MultivariateNormalFull",
"MultivariateNormalDiagPlusVDVT",
]
class _MultivariateNormalOperatorPD(distribution.Distribution):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and an instance of
`OperatorPDBase`, which provides access to a symmetric positive definite
operator, which defines the covariance.
#### Mathematical details
With `C` the covariance matrix represented by the operator, the PDF of this
distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian.
mu = [1, 2, 3]
chol = [[1, 0, 0.], [1, 3, 0], [1, 2, 3]]
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions._MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1.])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
cov = tf.contrib.distributions.OperatorPDCholesky(chol)
dist = tf.contrib.distributions._MultivariateNormalOperatorPD(mu, cov)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
cov,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalCov"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu`, and an instance of `OperatorPDBase`, `cov`,
which determines the covariance.
Args:
mu: Floating point tensor with shape `[N1,...,Nb, k]`, `b >= 0`.
cov: Instance of `OperatorPDBase` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `cov` are different dtypes.
"""
with ops.name_scope(name) as ns:
with ops.name_scope("init", values=[mu] + cov.inputs):
self._mu = array_ops.identity(mu, name="mu")
self._cov = cov
self._validate_args = validate_args # Needed by _assert_valid_mu.
self._mu = self._assert_valid_mu(self._mu)
super(_MultivariateNormalOperatorPD, self).__init__(
dtype=self._mu.dtype,
parameters={"mu": self._mu, "cov": self._cov},
is_reparameterized=True,
is_continuous=True,
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
def _assert_valid_mu(self, mu):
"""Return `mu` after validity checks and possibly with assertations."""
cov = self._cov
if mu.dtype != cov.dtype:
raise TypeError(
"mu and cov must have the same dtype. Found mu.dtype = %s, "
"cov.dtype = %s" % (mu.dtype, cov.dtype))
# Try to validate with static checks.
mu_shape = mu.get_shape()
cov_shape = cov.get_shape()
if mu_shape.is_fully_defined() and cov_shape.is_fully_defined():
if mu_shape != cov_shape[:-1]:
raise ValueError(
"mu.shape and cov.shape[:-1] should match. Found: mu.shape=%s, "
"cov.shape=%s" % (mu_shape, cov_shape))
else:
return mu
# Static checks could not be run, so possibly do dynamic checks.
if not self.validate_args:
return mu
else:
assert_same_rank = check_ops.assert_equal(
array_ops.rank(mu) + 1,
cov.rank(),
data=["mu should have rank 1 less than cov. Found: rank(mu) = ",
array_ops.rank(mu), " rank(cov) = ", cov.rank()],
)
with ops.control_dependencies([assert_same_rank]):
assert_same_shape = check_ops.assert_equal(
array_ops.shape(mu),
cov.vector_shape(),
data=["mu.shape and cov.shape[:-1] should match. "
"Found: shape(mu) = "
, array_ops.shape(mu), " shape(cov) = ", cov.shape()],
)
return control_flow_ops.with_dependencies([assert_same_shape], mu)
@property
def mu(self):
return self._mu
@property
def sigma(self):
"""Dense (batch) covariance matrix, if available."""
with ops.name_scope(self.name):
return self._cov.to_dense()
def log_sigma_det(self, name="log_sigma_det"):
"""Log of determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self._cov.inputs):
return self._cov.log_det()
def sigma_det(self, name="sigma_det"):
"""Determinant of covariance matrix."""
with ops.name_scope(self.name):
with ops.name_scope(name, values=self._cov.inputs):
return math_ops.exp(self._cov.log_det())
def _batch_shape(self):
return self._cov.batch_shape()
def _get_batch_shape(self):
return self._cov.get_batch_shape()
def _event_shape(self):
return array_ops.pack([self._cov.vector_space_dimension()])
def _get_event_shape(self):
return self._cov.get_shape()[-1:]
def _sample_n(self, n, seed=None):
# Recall _assert_valid_mu ensures mu and self._cov have same batch shape.
shape = array_ops.concat(0, [self._cov.vector_shape(), [n]])
white_samples = random_ops.random_normal(shape=shape,
mean=0,
stddev=1,
dtype=self.dtype,
seed=seed)
correlated_samples = self._cov.sqrt_matmul(white_samples)
# Move the last dimension to the front
perm = array_ops.concat(0, (
array_ops.pack([array_ops.rank(correlated_samples) - 1]),
math_ops.range(0, array_ops.rank(correlated_samples) - 1)))
# TODO(ebrevdo): Once we get a proper tensor contraction op,
# perform the inner product using that instead of batch_matmul
# and this slow transpose can go away!
correlated_samples = array_ops.transpose(correlated_samples, perm)
samples = correlated_samples + self.mu
return samples
def _log_prob(self, x):
# Q: Why are shape requirements as stated above?
# A: The compatible shapes are precisely the ones that will broadcast to
# a shape compatible with self._cov.
# See Operator base class for notes about shapes compatible with self._cov.
x = ops.convert_to_tensor(x)
contrib_tensor_util.assert_same_float_dtype((self._mu, x))
# _assert_valid_mu asserts that self.mu has same batch shape as self.cov.
# so batch shape of self.mu = that of self._cov and self, and the
# batch shape of x_centered is a broadcast version of these. If this
# broadcast results in a shape like
# [M1,...,Mm] + self.batch_shape + self.event_shape
# OR
# self.batch_shape + self.event_shape
# then subsequent operator calls are guaranteed to work.
x_centered = x - self.mu
# Compute the term x^{-1} sigma^{-1} x which appears in the exponent of
# the pdf.
x_whitened_norm = self._cov.inv_quadratic_form_on_vectors(x_centered)
k = math_ops.cast(self._cov.vector_space_dimension(), self.dtype)
log_prob_value = -0.5 * (self.log_sigma_det() +
k * math.log(2. * math.pi) +
x_whitened_norm)
output_static_shape = x_centered.get_shape()[:-1]
log_prob_value.set_shape(output_static_shape)
return log_prob_value
def _prob(self, x):
return math_ops.exp(self.log_prob(x))
def _entropy(self):
log_sigma_det = self.log_sigma_det()
one_plus_log_two_pi = constant_op.constant(1 + math.log(2 * math.pi),
dtype=self.dtype)
# Use broadcasting rules to calculate the full broadcast sigma.
k = math_ops.cast(self._cov.vector_space_dimension(), dtype=self.dtype)
entropy_value = (k * one_plus_log_two_pi + log_sigma_det) / 2
entropy_value.set_shape(log_sigma_det.get_shape())
return entropy_value
def _mean(self):
return array_ops.identity(self._mu)
def _variance(self):
return self.sigma
def _mode(self):
return array_ops.identity(self._mu)
_prob_note = """
`x` is a batch vector with compatible shape if `x` is a `Tensor` whose
shape can be broadcast up to either:
````
self.batch_shape + self.event_shape
OR
[M1,...,Mm] + self.batch_shape + self.event_shape
```
"""
distribution_util.append_class_fun_doc(_MultivariateNormalOperatorPD.log_prob,
doc_str=_prob_note)
distribution_util.append_class_fun_doc(_MultivariateNormalOperatorPD.prob,
doc_str=_prob_note)
class MultivariateNormalDiag(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a 1-D diagonal
`diag_stdev`, representing the standard deviations. This distribution
assumes the random variables, `(X_1,...,X_k)` are independent, thus no
non-diagonal terms of the covariance matrix are needed.
This allows for `O(k)` pdf evaluation, sampling, and storage.
#### Mathematical details
The PDF of this distribution is defined in terms of the diagonal covariance
determined by `diag_stdev`: `C_{ii} = diag_stdev[i]**2`.
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and the square roots of the (independent) random variables.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal standard deviation.
mu = [1, 2, 3.]
diag_stdev = [4, 5, 6.]
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_stdev = ... # shape 2 x 3, positive.
dist = tf.contrib.distributions.MultivariateNormalDiag(mu, diag_stdev)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_stdev,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiag"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and standard deviations `diag_stdev`.
Each batch member represents a random vector `(X_1,...,X_k)` of independent
random normals.
The mean of `X_i` is `mu[i]`, and the standard deviation is `diag_stdev[i]`.
Args:
mu: Rank `N + 1` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
diag_stdev: Rank `N + 1` `Tensor` with same `dtype` and shape as `mu`,
representing the standard deviations. Must be positive.
validate_args: `Boolean`, default `False`. Whether to validate
input with asserts. If `validate_args` is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `diag_stdev` are different dtypes.
"""
cov = operator_pd_diag.OperatorPDSqrtDiag(
diag_stdev, verify_pd=validate_args)
super(MultivariateNormalDiag, self).__init__(
mu, cov, allow_nan_stats=allow_nan_stats, validate_args=validate_args,
name=name)
class MultivariateNormalDiagWithSoftplusStDev(MultivariateNormalDiag):
"""MultivariateNormalDiag with `diag_stddev = softplus(diag_stddev)`."""
def __init__(self,
mu,
diag_stdev,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagWithSoftplusStdDev"):
with ops.name_scope(name, values=[mu, diag_stdev]) as ns:
super(MultivariateNormalDiagWithSoftplusStDev, self).__init__(
mu=mu,
diag_stdev=nn.softplus(diag_stdev),
validate_args=validate_args,
allow_nan_stats=allow_nan_stats,
name=ns)
class MultivariateNormalDiagPlusVDVT(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
Every batch member of this distribution is defined by a mean and a lightweight
covariance matrix `C`.
#### Mathematical details
The PDF of this distribution in terms of the mean `mu` and covariance `C` is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a lightweight
definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
This allows for `O(kr + r^3)` pdf evaluation and determinant, and `O(kr)`
sampling and storage (per batch member).
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and square root of the covariance `S = M + V D V^T`. Extra
leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with covariance square root
# S = M + V D V^T, where V D V^T is a matrix-rank 2 update.
mu = [1, 2, 3.]
diag_large = [1.1, 2.2, 3.3]
v = ... # shape 3 x 2
diag_small = [4., 5.]
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v, diag_small=diag_small)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians. This time, don't provide
# diag_small. This means S = M + V V^T.
mu = [[1, 2, 3], [11, 22, 33]] # shape 2 x 3
diag_large = ... # shape 2 x 3
v = ... # shape 2 x 3 x 1, a matrix-rank 1 update.
dist = tf.contrib.distributions.MultivariateNormalDiagPlusVDVT(
mu, diag_large, v)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(
self,
mu,
diag_large,
v,
diag_small=None,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalDiagPlusVDVT"):
"""Multivariate Normal distributions on `R^k`.
For every batch member, this distribution represents `k` random variables
`(X_1,...,X_k)`, with mean `E[X_i] = mu[i]`, and covariance matrix
`C_{ij} := E[(X_i - mu[i])(X_j - mu[j])]`
The user initializes this class by providing the mean `mu`, and a
lightweight definition of `C`:
```
C = SS^T = SS = (M + V D V^T) (M + V D V^T)
M is diagonal (k x k)
V = is shape (k x r), typically r << k
D = is diagonal (r x r), optional (defaults to identity).
```
Args:
mu: Rank `n + 1` floating point tensor with shape `[N1,...,Nn, k]`,
`n >= 0`. The means.
diag_large: Optional rank `n + 1` floating point tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `M`.
v: Rank `n + 1` floating point tensor, shape `[N1,...,Nn, k, r]`
`n >= 0`. Defines the matrix `V`.
diag_small: Rank `n + 1` floating point tensor, shape
`[N1,...,Nn, k]` `n >= 0`. Defines the diagonal matrix `D`. Default
is `None`, which means `D` will be the identity matrix.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`,
and the inputs are invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
"""
m = operator_pd_diag.OperatorPDDiag(diag_large, verify_pd=validate_args)
cov = operator_pd_vdvt_update.OperatorPDSqrtVDVTUpdate(
m, v, diag=diag_small, verify_pd=validate_args,
verify_shapes=validate_args)
super(MultivariateNormalDiagPlusVDVT, self).__init__(
mu, cov, allow_nan_stats=allow_nan_stats, validate_args=validate_args,
name=name)
class MultivariateNormalCholesky(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and a Cholesky factor `chol`.
Providing the Cholesky factor allows for `O(k^2)` pdf evaluation and sampling,
and requires `O(k^2)` storage.
#### Mathematical details
The Cholesky factor `chol` defines the covariance matrix: `C = chol chol^T`.
The PDF of this distribution is then:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
# Note, this would be more efficient with MultivariateNormalDiag.
mu = [1, 2, 3.]
chol = [[1, 0, 0], [0, 3, 0], [0, 0, 2]]
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33]]
chol = ... # shape 2 x 3 x 3, lower triangular, positive diagonal.
dist = tf.contrib.distributions.MultivariateNormalCholesky(mu, chol)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11]] # Shape 2 x 3.
dist.pdf(x)
```
Trainable (batch) Choesky matrices can be created with
`tf.contrib.distributions.matrix_diag_transform()`
"""
def __init__(self,
mu,
chol,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalCholesky"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `chol` which holds the (batch) Cholesky
factors, such that the covariance of each batch member is `chol chol^T`.
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
chol: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. The upper triangular part is ignored (treated as
though it is zero), and the diagonal must be positive.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `chol` are different dtypes.
"""
cov = operator_pd_cholesky.OperatorPDCholesky(chol, verify_pd=validate_args)
super(MultivariateNormalCholesky, self).__init__(
mu,
cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=name)
class MultivariateNormalFull(_MultivariateNormalOperatorPD):
"""The multivariate normal distribution on `R^k`.
This distribution is defined by a 1-D mean `mu` and covariance matrix `sigma`.
Evaluation of the pdf, determinant, and sampling are all `O(k^3)` operations.
#### Mathematical details
With `C = sigma`, the PDF of this distribution is:
```
f(x) = (2 pi)^(-k/2) |det(C)|^(-1/2) exp(-1/2 (x - mu)^T C^{-1} (x - mu))
```
#### Examples
A single multi-variate Gaussian distribution is defined by a vector of means
of length `k`, and a covariance matrix of shape `k x k`.
Extra leading dimensions, if provided, allow for batches.
```python
# Initialize a single 3-variate Gaussian with diagonal covariance.
mu = [1, 2, 3.]
sigma = [[1, 0, 0], [0, 3, 0], [0, 0, 2.]]
dist = tf.contrib.distributions.MultivariateNormalFull(mu, chol)
# Evaluate this on an observation in R^3, returning a scalar.
dist.pdf([-1, 0, 1])
# Initialize a batch of two 3-variate Gaussians.
mu = [[1, 2, 3], [11, 22, 33.]]
sigma = ... # shape 2 x 3 x 3, positive definite.
dist = tf.contrib.distributions.MultivariateNormalFull(mu, sigma)
# Evaluate this on a two observations, each in R^3, returning a length two
# tensor.
x = [[-1, 0, 1], [-11, 0, 11.]] # Shape 2 x 3.
dist.pdf(x)
```
"""
def __init__(self,
mu,
sigma,
validate_args=False,
allow_nan_stats=True,
name="MultivariateNormalFull"):
"""Multivariate Normal distributions on `R^k`.
User must provide means `mu` and `sigma`, the mean and covariance.
Args:
mu: `(N+1)-D` floating point tensor with shape `[N1,...,Nb, k]`,
`b >= 0`.
sigma: `(N+2)-D` `Tensor` with same `dtype` as `mu` and shape
`[N1,...,Nb, k, k]`. Each batch member must be positive definite.
validate_args: `Boolean`, default `False`. Whether to validate input
with asserts. If `validate_args` is `False`, and the inputs are
invalid, correct behavior is not guaranteed.
allow_nan_stats: `Boolean`, default `True`. If `False`, raise an
exception if a statistic (e.g. mean/mode/etc...) is undefined for any
batch member If `True`, batch members with valid parameters leading to
undefined statistics will return NaN for this statistic.
name: The name to give Ops created by the initializer.
Raises:
TypeError: If `mu` and `sigma` are different dtypes.
"""
cov = operator_pd_full.OperatorPDFull(sigma, verify_pd=validate_args)
super(MultivariateNormalFull, self).__init__(
mu,
cov,
allow_nan_stats=allow_nan_stats,
validate_args=validate_args,
name=name)
def _kl_mvn_mvn_brute_force(mvn_a, mvn_b, name=None):
"""Batched KL divergence `KL(mvn_a || mvn_b)` for multivariate normals.
With `X`, `Y` both multivariate normals in `R^k` with means `mu_x`, `mu_y` and
covariance `C_x`, `C_y` respectively,
```
KL(X || Y) = 0.5 * ( T + Q + - k + L ),
T := trace(C_b^{-1} C_a),
Q := (mu_b - mu_a)^T C_b^{-1} (mu_b - mu_a),
L := Log[Det(C_b)] - Log[Det(C_a)]
```
This `Op` computes the trace by solving `C_b^{-1} C_a`. Although efficient
methods for solving systems with `C_b` may be available, a dense version of
(the square root of) `C_a` is used, so performance is `O(B s k^2)` where `B`
is the batch size, and `s` is the cost of solving `C_b x = y` for vectors `x`
and `y`.
Args:
mvn_a: Instance of subclass of `_MultivariateNormalOperatorPD`.
mvn_b: Instance of subclass of `_MultivariateNormalOperatorPD`.
name: (optional) name to use for created ops. Default "kl_mvn_mvn".
Returns:
Batchwise `KL(mvn_a || mvn_b)`.
"""
# Access the "private" OperatorPD that each mvn is built from.
cov_a = mvn_a._cov # pylint: disable=protected-access
cov_b = mvn_b._cov # pylint: disable=protected-access
mu_a = mvn_a.mu
mu_b = mvn_b.mu
inputs = [mu_a, mu_b] + cov_a.inputs + cov_b.inputs
with ops.name_scope(name, "kl_mvn_mvn", inputs):
# If Ca = AA', Cb = BB', then
# tr[inv(Cb) Ca] = tr[inv(B)' inv(B) A A']
# = tr[inv(B) A A' inv(B)']
# = tr[(inv(B) A) (inv(B) A)']
# = sum_{ik} (inv(B) A)_{ik}^2
# The second equality follows from the cyclic permutation property.
b_inv_a = cov_b.sqrt_solve(cov_a.sqrt_to_dense())
t = math_ops.reduce_sum(
math_ops.square(b_inv_a),
reduction_indices=[-1, -2])
q = cov_b.inv_quadratic_form_on_vectors(mu_b - mu_a)
k = math_ops.cast(cov_a.vector_space_dimension(), mvn_a.dtype)
one_half_l = cov_b.sqrt_log_det() - cov_a.sqrt_log_det()
return 0.5 * (t + q - k) + one_half_l
# Register KL divergences.
kl_classes = [
MultivariateNormalFull,
MultivariateNormalCholesky,
MultivariateNormalDiag,
MultivariateNormalDiagPlusVDVT,
]
for mvn_aa in kl_classes:
# Register when they are the same here, and do not register when they are the
# same below because that would result in a repeated registration.
kullback_leibler.RegisterKL(mvn_aa, mvn_aa)(_kl_mvn_mvn_brute_force)
for mvn_bb in kl_classes:
if mvn_bb != mvn_aa:
kullback_leibler.RegisterKL(mvn_aa, mvn_bb)(_kl_mvn_mvn_brute_force)
| apache-2.0 |
pyoceans/pocean-core | pocean/dsg/trajectory/cr.py | 1 | 10376 | #!python
# coding=utf-8
from copy import copy
from collections import OrderedDict
import numpy as np
import pandas as pd
from pocean.utils import (
create_ncvar_from_series,
dict_update,
downcast_dataframe,
generic_masked,
get_default_axes,
get_dtype,
get_mapped_axes_variables,
get_masked_datetime_array,
get_ncdata_from_series,
nativize_times,
normalize_countable_array,
)
from pocean.cf import CFDataset, cf_safe_name
from pocean.dsg.trajectory import trajectory_calculated_metadata
from pocean import logger as L # noqa
class ContiguousRaggedTrajectory(CFDataset):
@classmethod
def is_mine(cls, dsg, strict=False):
try:
rvars = dsg.filter_by_attrs(cf_role='trajectory_id')
assert len(rvars) == 1
assert dsg.featureType.lower() == 'trajectory'
assert len(dsg.t_axes()) >= 1
assert len(dsg.x_axes()) >= 1
assert len(dsg.y_axes()) >= 1
assert len(dsg.z_axes()) >= 1
o_index_vars = dsg.filter_by_attrs(
sample_dimension=lambda x: x is not None
)
assert len(o_index_vars) == 1
assert o_index_vars[0].sample_dimension in dsg.dimensions # Sample dimension
# Allow for string variables
rvar = rvars[0]
# 0 = single
# 1 = array of strings/ints/bytes/etc
# 2 = array of character arrays
assert 0 <= len(rvar.dimensions) <= 2
except BaseException:
if strict is True:
raise
return False
return True
@classmethod
def from_dataframe(cls, df, output, **kwargs):
axes = get_default_axes(kwargs.pop('axes', {}))
daxes = axes
# Should never be a CR file with one trajectory so we ignore the "reduce_dims" attribute
_ = kwargs.pop('reduce_dims', False) # noqa
unlimited = kwargs.pop('unlimited', False)
unique_dims = kwargs.pop('unique_dims', False)
if unique_dims is True:
# Rename the dimension to avoid a dimension and coordinate having the same name
# which is not support in xarray
changed_axes = { k: '{}_dim'.format(v) for k, v in axes._asdict().items() }
daxes = get_default_axes(changed_axes)
# Downcast anything from int64 to int32
# Convert any timezone aware datetimes to native UTC times
df = downcast_dataframe(nativize_times(df))
with ContiguousRaggedTrajectory(output, 'w') as nc:
trajectory_groups = df.groupby(axes.trajectory)
unique_trajectories = list(trajectory_groups.groups.keys())
num_trajectories = len(unique_trajectories)
nc.createDimension(daxes.trajectory, num_trajectories)
trajectory = nc.createVariable(axes.trajectory, get_dtype(df[axes.trajectory]), (daxes.trajectory,))
# Get unique obs by grouping on traj getting the max size
if unlimited is True:
nc.createDimension(daxes.sample, None)
else:
nc.createDimension(daxes.sample, len(df))
# Number of observations in each trajectory
row_size = nc.createVariable('rowSize', 'i4', (daxes.trajectory,))
attributes = dict_update(nc.nc_attributes(axes, daxes), kwargs.pop('attributes', {}))
# Variables defined on only the trajectory axis
traj_vars = kwargs.pop('traj_vars', [])
traj_columns = [ p for p in traj_vars if p in df.columns ]
for c in traj_columns:
var_name = cf_safe_name(c)
if var_name not in nc.variables:
create_ncvar_from_series(
nc,
var_name,
(daxes.trajectory,),
df[c],
zlib=True,
complevel=1
)
for i, (trajid, trg) in enumerate(trajectory_groups):
trajectory[i] = trajid
row_size[i] = len(trg)
# Save any trajectory variables using the first value found
# in the column.
for c in traj_columns:
var_name = cf_safe_name(c)
if var_name not in nc.variables:
continue
v = nc.variables[var_name]
vvalues = get_ncdata_from_series(trg[c], v)[0]
try:
v[i] = vvalues
except BaseException:
L.exception('Failed to add {}'.format(c))
continue
# Add all of the columns based on the sample dimension. Take all columns and remove the
# trajectory, rowSize and other trajectory based columns.
sample_columns = [
f for f in df.columns if f not in traj_columns + ['rowSize', axes.trajectory]
]
for c in sample_columns:
var_name = cf_safe_name(c)
if var_name not in nc.variables:
v = create_ncvar_from_series(
nc,
var_name,
(daxes.sample,),
df[c],
zlib=True,
complevel=1
)
else:
v = nc.variables[var_name]
vvalues = get_ncdata_from_series(df[c], v)
try:
if unlimited is True:
v[:] = vvalues
else:
v[:] = vvalues.reshape(v.shape)
except BaseException:
L.exception('Failed to add {}'.format(c))
continue
# Metadata variables
if 'crs' not in nc.variables:
nc.createVariable('crs', 'i4')
# Set attributes
nc.update_attributes(attributes)
return ContiguousRaggedTrajectory(output, **kwargs)
def calculated_metadata(self, df=None, geometries=True, clean_cols=True, clean_rows=True, **kwargs):
axes = get_default_axes(kwargs.pop('axes', {}))
if df is None:
df = self.to_dataframe(clean_cols=clean_cols, clean_rows=clean_rows, axes=axes)
return trajectory_calculated_metadata(df, axes, geometries)
def to_dataframe(self, clean_cols=True, clean_rows=True, **kwargs):
axes = get_default_axes(kwargs.pop('axes', {}))
axv = get_mapped_axes_variables(self, axes)
o_index_var = self.filter_by_attrs(sample_dimension=lambda x: x is not None)
if not o_index_var:
raise ValueError(
'Could not find the "sample_dimension" attribute on any variables, '
'is this a valid {}?'.format(self.__class__.__name__)
)
else:
o_index_var = o_index_var[0]
o_dim = self.dimensions[o_index_var.sample_dimension] # Sample dimension
t_dim = o_index_var.dimensions
# Trajectory
row_sizes = o_index_var[:]
traj_data = normalize_countable_array(axv.trajectory)
traj_data = np.repeat(traj_data, row_sizes)
# time
time_data = get_masked_datetime_array(axv.t[:], axv.t).flatten()
df_data = OrderedDict([
(axes.t, time_data),
(axes.trajectory, traj_data)
])
building_index_to_drop = np.ones(o_dim.size, dtype=bool)
extract_vars = copy(self.variables)
# Skip the time and row index variables
del extract_vars[o_index_var.name]
del extract_vars[axes.t]
for i, (dnam, dvar) in enumerate(extract_vars.items()):
# Trajectory dimensions
if dvar.dimensions == t_dim:
vdata = np.repeat(generic_masked(dvar[:], attrs=self.vatts(dnam)), row_sizes)
# Sample dimensions
elif dvar.dimensions == (o_dim.name,):
vdata = generic_masked(dvar[:].flatten().astype(dvar.dtype), attrs=self.vatts(dnam))
else:
vdata = generic_masked(dvar[:].flatten().astype(dvar.dtype), attrs=self.vatts(dnam))
# Carry through size 1 variables
if vdata.size == 1:
if vdata[0] is np.ma.masked:
L.warning("Skipping variable {} that is completely masked".format(dnam))
continue
else:
L.warning("Skipping variable {} since it didn't match any dimension sizes".format(dnam))
continue
# Mark rows with data so we don't remove them with clear_rows
if vdata.size == building_index_to_drop.size:
building_index_to_drop = (building_index_to_drop == True) & (vdata.mask == True) # noqa
# Handle scalars here at the end
if vdata.size == 1:
vdata = vdata[0]
df_data[dnam] = vdata
df = pd.DataFrame(df_data)
# Drop all data columns with no data
if clean_cols:
df = df.dropna(axis=1, how='all')
# Drop all data rows with no data variable data
if clean_rows:
df = df.iloc[~building_index_to_drop]
return df
def nc_attributes(self, axes, daxes):
atts = super(ContiguousRaggedTrajectory, self).nc_attributes()
return dict_update(atts, {
'global' : {
'featureType': 'trajectory',
'cdm_data_type': 'Trajectory'
},
axes.trajectory: {
'cf_role': 'trajectory_id',
'long_name' : 'trajectory identifier',
'ioos_category': 'identifier'
},
axes.x: {
'axis': 'X'
},
axes.y: {
'axis': 'Y'
},
axes.z: {
'axis': 'Z'
},
axes.t: {
'units': self.default_time_unit,
'standard_name': 'time',
'axis': 'T'
},
'rowSize': {
'sample_dimension': daxes.sample
}
})
| mit |
scenarios/tensorflow | tensorflow/contrib/distributions/python/kernel_tests/gamma_test.py | 7 | 14250 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from scipy import special
from scipy import stats
from tensorflow.contrib.distributions.python.ops import gamma as gamma_lib
from tensorflow.contrib.distributions.python.ops import kullback_leibler
from tensorflow.python.client import session
from tensorflow.python.framework import constant_op
from tensorflow.python.framework import tensor_shape
from tensorflow.python.ops import math_ops
from tensorflow.python.ops import nn_ops
from tensorflow.python.platform import test
class GammaTest(test.TestCase):
def testGammaShape(self):
with self.test_session():
alpha = constant_op.constant([3.0] * 5)
beta = constant_op.constant(11.0)
gamma = gamma_lib.Gamma(alpha=alpha, beta=beta)
self.assertEqual(gamma.batch_shape().eval(), (5,))
self.assertEqual(gamma.get_batch_shape(), tensor_shape.TensorShape([5]))
self.assertAllEqual(gamma.event_shape().eval(), [])
self.assertEqual(gamma.get_event_shape(), tensor_shape.TensorShape([]))
def testGammaLogPDF(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = gamma_lib.Gamma(alpha=alpha, beta=beta)
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
log_pdf = gamma.log_pdf(x)
self.assertEqual(log_pdf.get_shape(), (6,))
self.assertAllClose(log_pdf.eval(), expected_log_pdf)
pdf = gamma.pdf(x)
self.assertEqual(pdf.get_shape(), (6,))
self.assertAllClose(pdf.eval(), np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensional(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant([[3.0, 4.0]] * batch_size)
alpha_v = np.array([2.0, 4.0])
beta_v = np.array([3.0, 4.0])
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = gamma_lib.Gamma(alpha=alpha, beta=beta)
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
log_pdf = gamma.log_pdf(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(log_pdf_values, expected_log_pdf)
pdf = gamma.pdf(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaLogPDFMultidimensionalBroadcasting(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([[2.0, 4.0]] * batch_size)
beta = constant_op.constant(3.0)
alpha_v = np.array([2.0, 4.0])
beta_v = 3.0
x = np.array([[2.5, 2.5, 4.0, 0.1, 1.0, 2.0]], dtype=np.float32).T
gamma = gamma_lib.Gamma(alpha=alpha, beta=beta)
expected_log_pdf = stats.gamma.logpdf(x, alpha_v, scale=1 / beta_v)
log_pdf = gamma.log_pdf(x)
log_pdf_values = log_pdf.eval()
self.assertEqual(log_pdf.get_shape(), (6, 2))
self.assertAllClose(log_pdf_values, expected_log_pdf)
pdf = gamma.pdf(x)
pdf_values = pdf.eval()
self.assertEqual(pdf.get_shape(), (6, 2))
self.assertAllClose(pdf_values, np.exp(expected_log_pdf))
def testGammaCDF(self):
with self.test_session():
batch_size = 6
alpha = constant_op.constant([2.0] * batch_size)
beta = constant_op.constant([3.0] * batch_size)
alpha_v = 2.0
beta_v = 3.0
x = np.array([2.5, 2.5, 4.0, 0.1, 1.0, 2.0], dtype=np.float32)
gamma = gamma_lib.Gamma(alpha=alpha, beta=beta)
expected_cdf = stats.gamma.cdf(x, alpha_v, scale=1 / beta_v)
cdf = gamma.cdf(x)
self.assertEqual(cdf.get_shape(), (6,))
self.assertAllClose(cdf.eval(), expected_cdf)
def testGammaMean(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v)
expected_means = stats.gamma.mean(alpha_v, scale=1 / beta_v)
self.assertEqual(gamma.mean().get_shape(), (3,))
self.assertAllClose(gamma.mean().eval(), expected_means)
def testGammaModeAllowNanStatsIsFalseWorksWhenAllBatchMembersAreDefined(self):
with self.test_session():
alpha_v = np.array([5.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v)
expected_modes = (alpha_v - 1) / beta_v
self.assertEqual(gamma.mode().get_shape(), (3,))
self.assertAllClose(gamma.mode().eval(), expected_modes)
def testGammaModeAllowNanStatsFalseRaisesForUndefinedBatchMembers(self):
with self.test_session():
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v, allow_nan_stats=False)
with self.assertRaisesOpError("x < y"):
gamma.mode().eval()
def testGammaModeAllowNanStatsIsTrueReturnsNaNforUndefinedBatchMembers(self):
with self.test_session():
# Mode will not be defined for the first entry.
alpha_v = np.array([0.5, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v, allow_nan_stats=True)
expected_modes = (alpha_v - 1) / beta_v
expected_modes[0] = np.nan
self.assertEqual(gamma.mode().get_shape(), (3,))
self.assertAllClose(gamma.mode().eval(), expected_modes)
def testGammaVariance(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v)
expected_variances = stats.gamma.var(alpha_v, scale=1 / beta_v)
self.assertEqual(gamma.variance().get_shape(), (3,))
self.assertAllClose(gamma.variance().eval(), expected_variances)
def testGammaStd(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v)
expected_std = stats.gamma.std(alpha_v, scale=1 / beta_v)
self.assertEqual(gamma.std().get_shape(), (3,))
self.assertAllClose(gamma.std().eval(), expected_std)
def testGammaEntropy(self):
with self.test_session():
alpha_v = np.array([1.0, 3.0, 2.5])
beta_v = np.array([1.0, 4.0, 5.0])
expected_entropy = stats.gamma.entropy(alpha_v, scale=1 / beta_v)
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v)
self.assertEqual(gamma.entropy().get_shape(), (3,))
self.assertAllClose(gamma.entropy().eval(), expected_entropy)
def testGammaSampleSmallAlpha(self):
with session.Session():
alpha_v = 0.05
beta_v = 1.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
gamma = gamma_lib.Gamma(alpha=alpha, beta=beta)
samples = gamma.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(),
stats.gamma.mean(
alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
def testGammaSample(self):
with session.Session():
alpha_v = 4.0
beta_v = 3.0
alpha = constant_op.constant(alpha_v)
beta = constant_op.constant(beta_v)
n = 100000
gamma = gamma_lib.Gamma(alpha=alpha, beta=beta)
samples = gamma.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n,))
self.assertEqual(sample_values.shape, (n,))
self.assertAllClose(
sample_values.mean(),
stats.gamma.mean(
alpha_v, scale=1 / beta_v),
atol=.01)
self.assertAllClose(
sample_values.var(),
stats.gamma.var(alpha_v, scale=1 / beta_v),
atol=.15)
self.assertTrue(self._kstest(alpha_v, beta_v, sample_values))
def testGammaSampleMultiDimensional(self):
with session.Session():
alpha_v = np.array([np.arange(1, 101, dtype=np.float32)]) # 1 x 100
beta_v = np.array([np.arange(1, 11, dtype=np.float32)]).T # 10 x 1
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v)
n = 10000
samples = gamma.sample(n, seed=137)
sample_values = samples.eval()
self.assertEqual(samples.get_shape(), (n, 10, 100))
self.assertEqual(sample_values.shape, (n, 10, 100))
zeros = np.zeros_like(alpha_v + beta_v) # 10 x 100
alpha_bc = alpha_v + zeros
beta_bc = beta_v + zeros
self.assertAllClose(
sample_values.mean(axis=0),
stats.gamma.mean(
alpha_bc, scale=1 / beta_bc),
rtol=.035)
self.assertAllClose(
sample_values.var(axis=0),
stats.gamma.var(alpha_bc, scale=1 / beta_bc),
atol=4.5)
fails = 0
trials = 0
for ai, a in enumerate(np.reshape(alpha_v, [-1])):
for bi, b in enumerate(np.reshape(beta_v, [-1])):
s = sample_values[:, bi, ai]
trials += 1
fails += 0 if self._kstest(a, b, s) else 1
self.assertLess(fails, trials * 0.03)
def _kstest(self, alpha, beta, samples):
# Uses the Kolmogorov-Smirnov test for goodness of fit.
ks, _ = stats.kstest(samples, stats.gamma(alpha, scale=1 / beta).cdf)
# Return True when the test passes.
return ks < 0.02
def testGammaPdfOfSampleMultiDims(self):
with session.Session() as sess:
gamma = gamma_lib.Gamma(alpha=[7., 11.], beta=[[5.], [6.]])
num = 50000
samples = gamma.sample(num, seed=137)
pdfs = gamma.pdf(samples)
sample_vals, pdf_vals = sess.run([samples, pdfs])
self.assertEqual(samples.get_shape(), (num, 2, 2))
self.assertEqual(pdfs.get_shape(), (num, 2, 2))
self.assertAllClose(
stats.gamma.mean(
[[7., 11.], [7., 11.]], scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.mean(axis=0),
atol=.1)
self.assertAllClose(
stats.gamma.var([[7., 11.], [7., 11.]],
scale=1 / np.array([[5., 5.], [6., 6.]])),
sample_vals.var(axis=0),
atol=.1)
self._assertIntegral(sample_vals[:, 0, 0], pdf_vals[:, 0, 0], err=0.02)
self._assertIntegral(sample_vals[:, 0, 1], pdf_vals[:, 0, 1], err=0.02)
self._assertIntegral(sample_vals[:, 1, 0], pdf_vals[:, 1, 0], err=0.02)
self._assertIntegral(sample_vals[:, 1, 1], pdf_vals[:, 1, 1], err=0.02)
def _assertIntegral(self, sample_vals, pdf_vals, err=1e-3):
s_p = zip(sample_vals, pdf_vals)
prev = (0, 0)
total = 0
for k in sorted(s_p, key=lambda x: x[0]):
pair_pdf = (k[1] + prev[1]) / 2
total += (k[0] - prev[0]) * pair_pdf
prev = k
self.assertNear(1., total, err=err)
def testGammaNonPositiveInitializationParamsRaises(self):
with self.test_session():
alpha_v = constant_op.constant(0.0, name="alpha")
beta_v = constant_op.constant(1.0, name="beta")
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v, validate_args=True)
with self.assertRaisesOpError("alpha"):
gamma.mean().eval()
alpha_v = constant_op.constant(1.0, name="alpha")
beta_v = constant_op.constant(0.0, name="beta")
gamma = gamma_lib.Gamma(alpha=alpha_v, beta=beta_v, validate_args=True)
with self.assertRaisesOpError("beta"):
gamma.mean().eval()
def testGammaWithSoftplusAlphaBeta(self):
with self.test_session():
alpha_v = constant_op.constant([0.0, -2.1], name="alpha")
beta_v = constant_op.constant([1.0, -3.6], name="beta")
gamma = gamma_lib.GammaWithSoftplusAlphaBeta(alpha=alpha_v, beta=beta_v)
self.assertAllEqual(nn_ops.softplus(alpha_v).eval(), gamma.alpha.eval())
self.assertAllEqual(nn_ops.softplus(beta_v).eval(), gamma.beta.eval())
def testGammaGammaKL(self):
alpha0 = np.array([3.])
beta0 = np.array([1., 2., 3., 1.5, 2.5, 3.5])
alpha1 = np.array([0.4])
beta1 = np.array([0.5, 1., 1.5, 2., 2.5, 3.])
# Build graph.
with self.test_session() as sess:
g0 = gamma_lib.Gamma(alpha=alpha0, beta=beta0)
g1 = gamma_lib.Gamma(alpha=alpha1, beta=beta1)
x = g0.sample(int(1e4), seed=0)
kl_sample = math_ops.reduce_mean(g0.log_prob(x) - g1.log_prob(x), 0)
kl_actual = kullback_leibler.kl(g0, g1)
# Execute graph.
[kl_sample_, kl_actual_] = sess.run([kl_sample, kl_actual])
kl_expected = ((alpha0 - alpha1) * special.digamma(alpha0)
+ special.gammaln(alpha1)
- special.gammaln(alpha0)
+ alpha1 * np.log(beta0)
- alpha1 * np.log(beta1)
+ alpha0 * (beta1 / beta0 - 1.))
self.assertEqual(beta0.shape, kl_actual.get_shape())
self.assertAllClose(kl_expected, kl_actual_, atol=0., rtol=1e-6)
self.assertAllClose(kl_sample_, kl_actual_, atol=0., rtol=1e-2)
if __name__ == "__main__":
test.main()
| apache-2.0 |
xionzz/earthquake | venv/lib/python2.7/site-packages/numpy/core/setup.py | 8 | 42574 | from __future__ import division, print_function
import imp
import os
import sys
import shutil
import pickle
import copy
import warnings
import re
from os.path import join
from numpy.distutils import log
from distutils.dep_util import newer
from distutils.sysconfig import get_config_var
from setup_common import *
# Set to True to enable multiple file compilations (experimental)
ENABLE_SEPARATE_COMPILATION = (os.environ.get('NPY_SEPARATE_COMPILATION', "1") != "0")
# Set to True to enable relaxed strides checking. This (mostly) means
# that `strides[dim]` is ignored if `shape[dim] == 1` when setting flags.
NPY_RELAXED_STRIDES_CHECKING = (os.environ.get('NPY_RELAXED_STRIDES_CHECKING', "0") != "0")
# XXX: ugly, we use a class to avoid calling twice some expensive functions in
# config.h/numpyconfig.h. I don't see a better way because distutils force
# config.h generation inside an Extension class, and as such sharing
# configuration informations between extensions is not easy.
# Using a pickled-based memoize does not work because config_cmd is an instance
# method, which cPickle does not like.
#
# Use pickle in all cases, as cPickle is gone in python3 and the difference
# in time is only in build. -- Charles Harris, 2013-03-30
class CallOnceOnly(object):
def __init__(self):
self._check_types = None
self._check_ieee_macros = None
self._check_complex = None
def check_types(self, *a, **kw):
if self._check_types is None:
out = check_types(*a, **kw)
self._check_types = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_types))
return out
def check_ieee_macros(self, *a, **kw):
if self._check_ieee_macros is None:
out = check_ieee_macros(*a, **kw)
self._check_ieee_macros = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_ieee_macros))
return out
def check_complex(self, *a, **kw):
if self._check_complex is None:
out = check_complex(*a, **kw)
self._check_complex = pickle.dumps(out)
else:
out = copy.deepcopy(pickle.loads(self._check_complex))
return out
PYTHON_HAS_UNICODE_WIDE = True
def pythonlib_dir():
"""return path where libpython* is."""
if sys.platform == 'win32':
return os.path.join(sys.prefix, "libs")
else:
return get_config_var('LIBDIR')
def is_npy_no_signal():
"""Return True if the NPY_NO_SIGNAL symbol must be defined in configuration
header."""
return sys.platform == 'win32'
def is_npy_no_smp():
"""Return True if the NPY_NO_SMP symbol must be defined in public
header (when SMP support cannot be reliably enabled)."""
# Python 2.3 causes a segfault when
# trying to re-acquire the thread-state
# which is done in error-handling
# ufunc code. NPY_ALLOW_C_API and friends
# cause the segfault. So, we disable threading
# for now.
if sys.version[:5] < '2.4.2':
nosmp = 1
else:
# Perhaps a fancier check is in order here.
# so that threads are only enabled if there
# are actually multiple CPUS? -- but
# threaded code can be nice even on a single
# CPU so that long-calculating code doesn't
# block.
try:
nosmp = os.environ['NPY_NOSMP']
nosmp = 1
except KeyError:
nosmp = 0
return nosmp == 1
def win32_checks(deflist):
from numpy.distutils.misc_util import get_build_architecture
a = get_build_architecture()
# Distutils hack on AMD64 on windows
print('BUILD_ARCHITECTURE: %r, os.name=%r, sys.platform=%r' %
(a, os.name, sys.platform))
if a == 'AMD64':
deflist.append('DISTUTILS_USE_SDK')
# On win32, force long double format string to be 'g', not
# 'Lg', since the MS runtime does not support long double whose
# size is > sizeof(double)
if a == "Intel" or a == "AMD64":
deflist.append('FORCE_NO_LONG_DOUBLE_FORMATTING')
def check_math_capabilities(config, moredefs, mathlibs):
def check_func(func_name):
return config.check_func(func_name, libraries=mathlibs,
decl=True, call=True)
def check_funcs_once(funcs_name):
decl = dict([(f, True) for f in funcs_name])
st = config.check_funcs_once(funcs_name, libraries=mathlibs,
decl=decl, call=decl)
if st:
moredefs.extend([(fname2def(f), 1) for f in funcs_name])
return st
def check_funcs(funcs_name):
# Use check_funcs_once first, and if it does not work, test func per
# func. Return success only if all the functions are available
if not check_funcs_once(funcs_name):
# Global check failed, check func per func
for f in funcs_name:
if check_func(f):
moredefs.append((fname2def(f), 1))
return 0
else:
return 1
#use_msvc = config.check_decl("_MSC_VER")
if not check_funcs_once(MANDATORY_FUNCS):
raise SystemError("One of the required function to build numpy is not"
" available (the list is %s)." % str(MANDATORY_FUNCS))
# Standard functions which may not be available and for which we have a
# replacement implementation. Note that some of these are C99 functions.
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
for f in OPTIONAL_STDFUNCS_MAYBE:
if config.check_decl(fname2def(f),
headers=["Python.h", "math.h"]):
OPTIONAL_STDFUNCS.remove(f)
check_funcs(OPTIONAL_STDFUNCS)
for h in OPTIONAL_HEADERS:
if config.check_func("", decl=False, call=False, headers=[h]):
moredefs.append((fname2def(h).replace(".", "_"), 1))
for tup in OPTIONAL_INTRINSICS:
headers = None
if len(tup) == 2:
f, args = tup
else:
f, args, headers = tup[0], tup[1], [tup[2]]
if config.check_func(f, decl=False, call=True, call_args=args,
headers=headers):
moredefs.append((fname2def(f), 1))
for dec, fn in OPTIONAL_FUNCTION_ATTRIBUTES:
if config.check_func(fn, decl='int %s %s(void *);' % (dec, fn),
call=False):
moredefs.append((fname2def(fn), 1))
for fn in OPTIONAL_VARIABLE_ATTRIBUTES:
if config.check_func(fn, decl='int %s a;' % (fn), call=False):
m = fn.replace("(", "_").replace(")", "_")
moredefs.append((fname2def(m), 1))
# C99 functions: float and long double versions
check_funcs(C99_FUNCS_SINGLE)
check_funcs(C99_FUNCS_EXTENDED)
def check_complex(config, mathlibs):
priv = []
pub = []
try:
if os.uname()[0] == "Interix":
warnings.warn("Disabling broken complex support. See #1365")
return priv, pub
except:
# os.uname not available on all platforms. blanket except ugly but safe
pass
# Check for complex support
st = config.check_header('complex.h')
if st:
priv.append(('HAVE_COMPLEX_H', 1))
pub.append(('NPY_USE_C99_COMPLEX', 1))
for t in C99_COMPLEX_TYPES:
st = config.check_type(t, headers=["complex.h"])
if st:
pub.append(('NPY_HAVE_%s' % type2def(t), 1))
def check_prec(prec):
flist = [f + prec for f in C99_COMPLEX_FUNCS]
decl = dict([(f, True) for f in flist])
if not config.check_funcs_once(flist, call=decl, decl=decl,
libraries=mathlibs):
for f in flist:
if config.check_func(f, call=True, decl=True,
libraries=mathlibs):
priv.append((fname2def(f), 1))
else:
priv.extend([(fname2def(f), 1) for f in flist])
check_prec('')
check_prec('f')
check_prec('l')
return priv, pub
def check_ieee_macros(config):
priv = []
pub = []
macros = []
def _add_decl(f):
priv.append(fname2def("decl_%s" % f))
pub.append('NPY_%s' % fname2def("decl_%s" % f))
# XXX: hack to circumvent cpp pollution from python: python put its
# config.h in the public namespace, so we have a clash for the common
# functions we test. We remove every function tested by python's
# autoconf, hoping their own test are correct
_macros = ["isnan", "isinf", "signbit", "isfinite"]
for f in _macros:
py_symbol = fname2def("decl_%s" % f)
already_declared = config.check_decl(py_symbol,
headers=["Python.h", "math.h"])
if already_declared:
if config.check_macro_true(py_symbol,
headers=["Python.h", "math.h"]):
pub.append('NPY_%s' % fname2def("decl_%s" % f))
else:
macros.append(f)
# Normally, isnan and isinf are macro (C99), but some platforms only have
# func, or both func and macro version. Check for macro only, and define
# replacement ones if not found.
# Note: including Python.h is necessary because it modifies some math.h
# definitions
for f in macros:
st = config.check_decl(f, headers = ["Python.h", "math.h"])
if st:
_add_decl(f)
return priv, pub
def check_types(config_cmd, ext, build_dir):
private_defines = []
public_defines = []
# Expected size (in number of bytes) for each type. This is an
# optimization: those are only hints, and an exhaustive search for the size
# is done if the hints are wrong.
expected = {}
expected['short'] = [2]
expected['int'] = [4]
expected['long'] = [8, 4]
expected['float'] = [4]
expected['double'] = [8]
expected['long double'] = [8, 12, 16]
expected['Py_intptr_t'] = [4, 8]
expected['PY_LONG_LONG'] = [8]
expected['long long'] = [8]
expected['off_t'] = [4, 8]
# Check we have the python header (-dev* packages on Linux)
result = config_cmd.check_header('Python.h')
if not result:
raise SystemError(
"Cannot compile 'Python.h'. Perhaps you need to "\
"install python-dev|python-devel.")
res = config_cmd.check_header("endian.h")
if res:
private_defines.append(('HAVE_ENDIAN_H', 1))
public_defines.append(('NPY_HAVE_ENDIAN_H', 1))
# Check basic types sizes
for type in ('short', 'int', 'long'):
res = config_cmd.check_decl("SIZEOF_%s" % sym2def(type), headers = ["Python.h"])
if res:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), "SIZEOF_%s" % sym2def(type)))
else:
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
for type in ('float', 'double', 'long double'):
already_declared = config_cmd.check_decl("SIZEOF_%s" % sym2def(type),
headers = ["Python.h"])
res = config_cmd.check_type_size(type, expected=expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
if not already_declared and not type == 'long double':
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# Compute size of corresponding complex type: used to check that our
# definition is binary compatible with C99 complex type (check done at
# build time in npy_common.h)
complex_def = "struct {%s __x; %s __y;}" % (type, type)
res = config_cmd.check_type_size(complex_def, expected=2*expected[type])
if res >= 0:
public_defines.append(('NPY_SIZEOF_COMPLEX_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % complex_def)
for type in ('Py_intptr_t', 'off_t'):
res = config_cmd.check_type_size(type, headers=["Python.h"],
library_dirs=[pythonlib_dir()],
expected=expected[type])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def(type), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def(type), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % type)
# We check declaration AND type because that's how distutils does it.
if config_cmd.check_decl('PY_LONG_LONG', headers=['Python.h']):
res = config_cmd.check_type_size('PY_LONG_LONG', headers=['Python.h'],
library_dirs=[pythonlib_dir()],
expected=expected['PY_LONG_LONG'])
if res >= 0:
private_defines.append(('SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('PY_LONG_LONG'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'PY_LONG_LONG')
res = config_cmd.check_type_size('long long',
expected=expected['long long'])
if res >= 0:
#private_defines.append(('SIZEOF_%s' % sym2def('long long'), '%d' % res))
public_defines.append(('NPY_SIZEOF_%s' % sym2def('long long'), '%d' % res))
else:
raise SystemError("Checking sizeof (%s) failed !" % 'long long')
if not config_cmd.check_decl('CHAR_BIT', headers=['Python.h']):
raise RuntimeError(
"Config wo CHAR_BIT is not supported"\
", please contact the maintainers")
return private_defines, public_defines
def check_mathlib(config_cmd):
# Testing the C math library
mathlibs = []
mathlibs_choices = [[], ['m'], ['cpml']]
mathlib = os.environ.get('MATHLIB')
if mathlib:
mathlibs_choices.insert(0, mathlib.split(','))
for libs in mathlibs_choices:
if config_cmd.check_func("exp", libraries=libs, decl=True, call=True):
mathlibs = libs
break
else:
raise EnvironmentError("math library missing; rerun "
"setup.py after setting the "
"MATHLIB env variable")
return mathlibs
def visibility_define(config):
"""Return the define value to use for NPY_VISIBILITY_HIDDEN (may be empty
string)."""
if config.check_compiler_gcc4():
return '__attribute__((visibility("hidden")))'
else:
return ''
def configuration(parent_package='',top_path=None):
from numpy.distutils.misc_util import Configuration, dot_join
from numpy.distutils.system_info import get_info, default_lib_dirs
config = Configuration('core', parent_package, top_path)
local_dir = config.local_path
codegen_dir = join(local_dir, 'code_generators')
if is_released(config):
warnings.simplefilter('error', MismatchCAPIWarning)
# Check whether we have a mismatch between the set C API VERSION and the
# actual C API VERSION
check_api_version(C_API_VERSION, codegen_dir)
generate_umath_py = join(codegen_dir, 'generate_umath.py')
n = dot_join(config.name, 'generate_umath')
generate_umath = imp.load_module('_'.join(n.split('.')),
open(generate_umath_py, 'U'), generate_umath_py,
('.py', 'U', 1))
header_dir = 'include/numpy' # this is relative to config.path_in_package
cocache = CallOnceOnly()
def generate_config_h(ext, build_dir):
target = join(build_dir, header_dir, 'config.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
moredefs, ignored = cocache.check_types(config_cmd, ext, build_dir)
# Check math library and C99 math funcs availability
mathlibs = check_mathlib(config_cmd)
moredefs.append(('MATHLIB', ','.join(mathlibs)))
check_math_capabilities(config_cmd, moredefs, mathlibs)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[0])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[0])
# Signal check
if is_npy_no_signal():
moredefs.append('__NPY_PRIVATE_NO_SIGNAL')
# Windows checks
if sys.platform=='win32' or os.name=='nt':
win32_checks(moredefs)
# Inline check
inline = config_cmd.check_inline()
# Check whether we need our own wide character support
if not config_cmd.check_decl('Py_UNICODE_WIDE', headers=['Python.h']):
PYTHON_HAS_UNICODE_WIDE = True
else:
PYTHON_HAS_UNICODE_WIDE = False
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Get long double representation
if sys.platform != 'darwin':
rep = check_long_double_representation(config_cmd)
if rep in ['INTEL_EXTENDED_12_BYTES_LE',
'INTEL_EXTENDED_16_BYTES_LE',
'MOTOROLA_EXTENDED_12_BYTES_BE',
'IEEE_QUAD_LE', 'IEEE_QUAD_BE',
'IEEE_DOUBLE_LE', 'IEEE_DOUBLE_BE',
'DOUBLE_DOUBLE_BE', 'DOUBLE_DOUBLE_LE']:
moredefs.append(('HAVE_LDOUBLE_%s' % rep, 1))
else:
raise ValueError("Unrecognized long double format: %s" % rep)
# Py3K check
if sys.version_info[0] == 3:
moredefs.append(('NPY_PY3K', 1))
# Generate the config.h file from moredefs
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# define inline to our keyword, or nothing
target_f.write('#ifndef __cplusplus\n')
if inline == 'inline':
target_f.write('/* #undef inline */\n')
else:
target_f.write('#define inline %s\n' % inline)
target_f.write('#endif\n')
# add the guard to make sure config.h is never included directly,
# but always through npy_config.h
target_f.write("""
#ifndef _NPY_NPY_CONFIG_H_
#error config.h should never be included directly, include npy_config.h instead
#endif
""")
target_f.close()
print('File:', target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
else:
mathlibs = []
target_f = open(target)
for line in target_f:
s = '#define MATHLIB'
if line.startswith(s):
value = line[len(s):].strip()
if value:
mathlibs.extend(value.split(','))
target_f.close()
# Ugly: this can be called within a library and not an extension,
# in which case there is no libraries attributes (and none is
# needed).
if hasattr(ext, 'libraries'):
ext.libraries.extend(mathlibs)
incl_dir = os.path.dirname(target)
if incl_dir not in config.numpy_include_dirs:
config.numpy_include_dirs.append(incl_dir)
return target
def generate_numpyconfig_h(ext, build_dir):
"""Depends on config.h: generate_config_h has to be called before !"""
# put private include directory in build_dir on search path
# allows using code generation in headers headers
config.add_include_dirs(join(build_dir, "src", "private"))
target = join(build_dir, header_dir, '_numpyconfig.h')
d = os.path.dirname(target)
if not os.path.exists(d):
os.makedirs(d)
if newer(__file__, target):
config_cmd = config.get_config_cmd()
log.info('Generating %s', target)
# Check sizeof
ignored, moredefs = cocache.check_types(config_cmd, ext, build_dir)
if is_npy_no_signal():
moredefs.append(('NPY_NO_SIGNAL', 1))
if is_npy_no_smp():
moredefs.append(('NPY_NO_SMP', 1))
else:
moredefs.append(('NPY_NO_SMP', 0))
mathlibs = check_mathlib(config_cmd)
moredefs.extend(cocache.check_ieee_macros(config_cmd)[1])
moredefs.extend(cocache.check_complex(config_cmd, mathlibs)[1])
if ENABLE_SEPARATE_COMPILATION:
moredefs.append(('NPY_ENABLE_SEPARATE_COMPILATION', 1))
if NPY_RELAXED_STRIDES_CHECKING:
moredefs.append(('NPY_RELAXED_STRIDES_CHECKING', 1))
# Check wether we can use inttypes (C99) formats
if config_cmd.check_decl('PRIdPTR', headers = ['inttypes.h']):
moredefs.append(('NPY_USE_C99_FORMATS', 1))
# visibility check
hidden_visibility = visibility_define(config_cmd)
moredefs.append(('NPY_VISIBILITY_HIDDEN', hidden_visibility))
# Add the C API/ABI versions
moredefs.append(('NPY_ABI_VERSION', '0x%.8X' % C_ABI_VERSION))
moredefs.append(('NPY_API_VERSION', '0x%.8X' % C_API_VERSION))
# Add moredefs to header
target_f = open(target, 'w')
for d in moredefs:
if isinstance(d, str):
target_f.write('#define %s\n' % (d))
else:
target_f.write('#define %s %s\n' % (d[0], d[1]))
# Define __STDC_FORMAT_MACROS
target_f.write("""
#ifndef __STDC_FORMAT_MACROS
#define __STDC_FORMAT_MACROS 1
#endif
""")
target_f.close()
# Dump the numpyconfig.h header to stdout
print('File: %s' % target)
target_f = open(target)
print(target_f.read())
target_f.close()
print('EOF')
config.add_data_files((header_dir, target))
return target
def generate_api_func(module_name):
def generate_api(ext, build_dir):
script = join(codegen_dir, module_name + '.py')
sys.path.insert(0, codegen_dir)
try:
m = __import__(module_name)
log.info('executing %s', script)
h_file, c_file, doc_file = m.generate_api(os.path.join(build_dir, header_dir))
finally:
del sys.path[0]
config.add_data_files((header_dir, h_file),
(header_dir, doc_file))
return (h_file,)
return generate_api
generate_numpy_api = generate_api_func('generate_numpy_api')
generate_ufunc_api = generate_api_func('generate_ufunc_api')
config.add_include_dirs(join(local_dir, "src", "private"))
config.add_include_dirs(join(local_dir, "src"))
config.add_include_dirs(join(local_dir))
config.add_data_files('include/numpy/*.h')
config.add_include_dirs(join('src', 'npymath'))
config.add_include_dirs(join('src', 'multiarray'))
config.add_include_dirs(join('src', 'umath'))
config.add_include_dirs(join('src', 'npysort'))
config.add_define_macros([("HAVE_NPY_CONFIG_H", "1")])
config.add_define_macros([("_FILE_OFFSET_BITS", "64")])
config.add_define_macros([('_LARGEFILE_SOURCE', '1')])
config.add_define_macros([('_LARGEFILE64_SOURCE', '1')])
config.numpy_include_dirs.extend(config.paths('include'))
deps = [join('src', 'npymath', '_signbit.c'),
join('include', 'numpy', '*object.h'),
'include/numpy/fenv/fenv.c',
'include/numpy/fenv/fenv.h',
join(codegen_dir, 'genapi.py'),
]
# Don't install fenv unless we need them.
if sys.platform == 'cygwin':
config.add_data_dir('include/numpy/fenv')
#######################################################################
# dummy module #
#######################################################################
# npymath needs the config.h and numpyconfig.h files to be generated, but
# build_clib cannot handle generate_config_h and generate_numpyconfig_h
# (don't ask). Because clib are generated before extensions, we have to
# explicitly add an extension which has generate_config_h and
# generate_numpyconfig_h as sources *before* adding npymath.
config.add_extension('_dummy',
sources = [join('src', 'dummymodule.c'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api]
)
#######################################################################
# npymath library #
#######################################################################
subst_dict = dict([("sep", os.path.sep), ("pkgname", "numpy.core")])
def get_mathlib_info(*args):
# Another ugly hack: the mathlib info is known once build_src is run,
# but we cannot use add_installed_pkg_config here either, so we only
# update the substition dictionary during npymath build
config_cmd = config.get_config_cmd()
# Check that the toolchain works, to fail early if it doesn't
# (avoid late errors with MATHLIB which are confusing if the
# compiler does not work).
st = config_cmd.try_link('int main(void) { return 0;}')
if not st:
raise RuntimeError("Broken toolchain: cannot link a simple C program")
mlibs = check_mathlib(config_cmd)
posix_mlib = ' '.join(['-l%s' % l for l in mlibs])
msvc_mlib = ' '.join(['%s.lib' % l for l in mlibs])
subst_dict["posix_mathlib"] = posix_mlib
subst_dict["msvc_mathlib"] = msvc_mlib
npymath_sources = [join('src', 'npymath', 'npy_math.c.src'),
join('src', 'npymath', 'ieee754.c.src'),
join('src', 'npymath', 'npy_math_complex.c.src'),
join('src', 'npymath', 'halffloat.c')]
config.add_installed_library('npymath',
sources=npymath_sources + [get_mathlib_info],
install_dir='lib')
config.add_npy_pkg_config("npymath.ini.in", "lib/npy-pkg-config",
subst_dict)
config.add_npy_pkg_config("mlib.ini.in", "lib/npy-pkg-config",
subst_dict)
#######################################################################
# npysort library #
#######################################################################
# This library is created for the build but it is not installed
npysort_sources=[join('src', 'npysort', 'quicksort.c.src'),
join('src', 'npysort', 'mergesort.c.src'),
join('src', 'npysort', 'heapsort.c.src'),
join('src', 'private', 'npy_partition.h.src'),
join('src', 'npysort', 'selection.c.src'),
join('src', 'private', 'npy_binsearch.h.src'),
join('src', 'npysort', 'binsearch.c.src'),
]
config.add_library('npysort',
sources=npysort_sources,
include_dirs=[])
#######################################################################
# multiarray module #
#######################################################################
# Multiarray version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_multiarray_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'multiarray')
sources = [join(local_dir, subpath, 'scalartypes.c.src'),
join(local_dir, subpath, 'arraytypes.c.src'),
join(local_dir, subpath, 'nditer_templ.c.src'),
join(local_dir, subpath, 'lowlevel_strided_loops.c.src'),
join(local_dir, subpath, 'einsum.c.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
multiarray_deps = [
join('src', 'multiarray', 'arrayobject.h'),
join('src', 'multiarray', 'arraytypes.h'),
join('src', 'multiarray', 'array_assign.h'),
join('src', 'multiarray', 'buffer.h'),
join('src', 'multiarray', 'calculation.h'),
join('src', 'multiarray', 'common.h'),
join('src', 'multiarray', 'convert_datatype.h'),
join('src', 'multiarray', 'convert.h'),
join('src', 'multiarray', 'conversion_utils.h'),
join('src', 'multiarray', 'ctors.h'),
join('src', 'multiarray', 'descriptor.h'),
join('src', 'multiarray', 'getset.h'),
join('src', 'multiarray', 'hashdescr.h'),
join('src', 'multiarray', 'iterators.h'),
join('src', 'multiarray', 'mapping.h'),
join('src', 'multiarray', 'methods.h'),
join('src', 'multiarray', 'multiarraymodule.h'),
join('src', 'multiarray', 'nditer_impl.h'),
join('src', 'multiarray', 'numpymemoryview.h'),
join('src', 'multiarray', 'number.h'),
join('src', 'multiarray', 'numpyos.h'),
join('src', 'multiarray', 'refcount.h'),
join('src', 'multiarray', 'scalartypes.h'),
join('src', 'multiarray', 'sequence.h'),
join('src', 'multiarray', 'shape.h'),
join('src', 'multiarray', 'ucsnarrow.h'),
join('src', 'multiarray', 'usertypes.h'),
join('src', 'private', 'lowlevel_strided_loops.h'),
join('include', 'numpy', 'arrayobject.h'),
join('include', 'numpy', '_neighborhood_iterator_imp.h'),
join('include', 'numpy', 'npy_endian.h'),
join('include', 'numpy', 'arrayscalars.h'),
join('include', 'numpy', 'noprefix.h'),
join('include', 'numpy', 'npy_interrupt.h'),
join('include', 'numpy', 'npy_3kcompat.h'),
join('include', 'numpy', 'npy_math.h'),
join('include', 'numpy', 'halffloat.h'),
join('include', 'numpy', 'npy_common.h'),
join('include', 'numpy', 'npy_os.h'),
join('include', 'numpy', 'utils.h'),
join('include', 'numpy', 'ndarrayobject.h'),
join('include', 'numpy', 'npy_cpu.h'),
join('include', 'numpy', 'numpyconfig.h'),
join('include', 'numpy', 'ndarraytypes.h'),
join('include', 'numpy', 'npy_1_7_deprecated_api.h'),
join('include', 'numpy', '_numpyconfig.h.in'),
# add library sources as distuils does not consider libraries
# dependencies
] + npysort_sources + npymath_sources
multiarray_src = [
join('src', 'multiarray', 'alloc.c'),
join('src', 'multiarray', 'arrayobject.c'),
join('src', 'multiarray', 'arraytypes.c.src'),
join('src', 'multiarray', 'array_assign.c'),
join('src', 'multiarray', 'array_assign_scalar.c'),
join('src', 'multiarray', 'array_assign_array.c'),
join('src', 'multiarray', 'buffer.c'),
join('src', 'multiarray', 'calculation.c'),
join('src', 'multiarray', 'common.c'),
join('src', 'multiarray', 'convert.c'),
join('src', 'multiarray', 'convert_datatype.c'),
join('src', 'multiarray', 'conversion_utils.c'),
join('src', 'multiarray', 'ctors.c'),
join('src', 'multiarray', 'datetime.c'),
join('src', 'multiarray', 'datetime_strings.c'),
join('src', 'multiarray', 'datetime_busday.c'),
join('src', 'multiarray', 'datetime_busdaycal.c'),
join('src', 'multiarray', 'descriptor.c'),
join('src', 'multiarray', 'dtype_transfer.c'),
join('src', 'multiarray', 'einsum.c.src'),
join('src', 'multiarray', 'flagsobject.c'),
join('src', 'multiarray', 'getset.c'),
join('src', 'multiarray', 'hashdescr.c'),
join('src', 'multiarray', 'item_selection.c'),
join('src', 'multiarray', 'iterators.c'),
join('src', 'multiarray', 'lowlevel_strided_loops.c.src'),
join('src', 'multiarray', 'mapping.c'),
join('src', 'multiarray', 'methods.c'),
join('src', 'multiarray', 'multiarraymodule.c'),
join('src', 'multiarray', 'nditer_templ.c.src'),
join('src', 'multiarray', 'nditer_api.c'),
join('src', 'multiarray', 'nditer_constr.c'),
join('src', 'multiarray', 'nditer_pywrap.c'),
join('src', 'multiarray', 'number.c'),
join('src', 'multiarray', 'numpymemoryview.c'),
join('src', 'multiarray', 'numpyos.c'),
join('src', 'multiarray', 'refcount.c'),
join('src', 'multiarray', 'sequence.c'),
join('src', 'multiarray', 'shape.c'),
join('src', 'multiarray', 'scalarapi.c'),
join('src', 'multiarray', 'scalartypes.c.src'),
join('src', 'multiarray', 'usertypes.c'),
join('src', 'multiarray', 'ucsnarrow.c')]
if not ENABLE_SEPARATE_COMPILATION:
multiarray_deps.extend(multiarray_src)
multiarray_src = [join('src', 'multiarray', 'multiarraymodule_onefile.c')]
multiarray_src.append(generate_multiarray_templated_sources)
config.add_extension('multiarray',
sources = multiarray_src +
[generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
join(codegen_dir, 'generate_numpy_api.py'),
join('*.py')],
depends = deps + multiarray_deps,
libraries = ['npymath', 'npysort'])
#######################################################################
# umath module #
#######################################################################
# umath version: this function is needed to build foo.c from foo.c.src
# when foo.c is included in another file and as such not in the src
# argument of build_ext command
def generate_umath_templated_sources(ext, build_dir):
from numpy.distutils.misc_util import get_cmd
subpath = join('src', 'umath')
sources = [
join(local_dir, subpath, 'loops.h.src'),
join(local_dir, subpath, 'loops.c.src'),
join(local_dir, subpath, 'simd.inc.src')]
# numpy.distutils generate .c from .c.src in weird directories, we have
# to add them there as they depend on the build_dir
config.add_include_dirs(join(build_dir, subpath))
cmd = get_cmd('build_src')
cmd.ensure_finalized()
cmd.template_sources(sources, ext)
def generate_umath_c(ext, build_dir):
target = join(build_dir, header_dir, '__umath_generated.c')
dir = os.path.dirname(target)
if not os.path.exists(dir):
os.makedirs(dir)
script = generate_umath_py
if newer(script, target):
f = open(target, 'w')
f.write(generate_umath.make_code(generate_umath.defdict,
generate_umath.__file__))
f.close()
return []
umath_src = [
join('src', 'umath', 'umathmodule.c'),
join('src', 'umath', 'reduction.c'),
join('src', 'umath', 'funcs.inc.src'),
join('src', 'umath', 'simd.inc.src'),
join('src', 'umath', 'loops.h.src'),
join('src', 'umath', 'loops.c.src'),
join('src', 'umath', 'ufunc_object.c'),
join('src', 'umath', 'ufunc_type_resolution.c')]
umath_deps = [
generate_umath_py,
join('src', 'multiarray', 'common.h'),
join('src', 'umath', 'simd.inc.src'),
join(codegen_dir, 'generate_ufunc_api.py'),
join('src', 'private', 'ufunc_override.h')] + npymath_sources
if not ENABLE_SEPARATE_COMPILATION:
umath_deps.extend(umath_src)
umath_src = [join('src', 'umath', 'umathmodule_onefile.c')]
umath_src.append(generate_umath_templated_sources)
umath_src.append(join('src', 'umath', 'funcs.inc.src'))
umath_src.append(join('src', 'umath', 'simd.inc.src'))
config.add_extension('umath',
sources = umath_src +
[generate_config_h,
generate_numpyconfig_h,
generate_umath_c,
generate_ufunc_api],
depends = deps + umath_deps,
libraries = ['npymath'],
)
#######################################################################
# scalarmath module #
#######################################################################
config.add_extension('scalarmath',
sources = [join('src', 'scalarmathmodule.c.src'),
join('src', 'private', 'scalarmathmodule.h.src'),
generate_config_h,
generate_numpyconfig_h,
generate_numpy_api,
generate_ufunc_api],
depends = deps + npymath_sources,
libraries = ['npymath'],
)
#######################################################################
# _dotblas module #
#######################################################################
# Configure blasdot
blas_info = get_info('blas_opt', 0)
#blas_info = {}
def get_dotblas_sources(ext, build_dir):
if blas_info:
if ('NO_ATLAS_INFO', 1) in blas_info.get('define_macros', []):
return None # dotblas needs ATLAS, Fortran compiled blas will not be sufficient.
return ext.depends[:2]
return None # no extension module will be built
config.add_extension('_dotblas',
sources = [get_dotblas_sources],
depends = [join('blasdot', '_dotblas.c'),
join('blasdot', 'apple_sgemv_patch.c'),
join('blasdot', 'cblas.h'),
],
include_dirs = ['blasdot'],
extra_info = blas_info
)
#######################################################################
# umath_tests module #
#######################################################################
config.add_extension('umath_tests',
sources = [join('src', 'umath', 'umath_tests.c.src')])
#######################################################################
# custom rational dtype module #
#######################################################################
config.add_extension('test_rational',
sources = [join('src', 'umath', 'test_rational.c.src')])
#######################################################################
# struct_ufunc_test module #
#######################################################################
config.add_extension('struct_ufunc_test',
sources = [join('src', 'umath', 'struct_ufunc_test.c.src')])
#######################################################################
# multiarray_tests module #
#######################################################################
config.add_extension('multiarray_tests',
sources = [join('src', 'multiarray', 'multiarray_tests.c.src')])
#######################################################################
# operand_flag_tests module #
#######################################################################
config.add_extension('operand_flag_tests',
sources = [join('src', 'umath', 'operand_flag_tests.c.src')])
config.add_data_dir('tests')
config.add_data_dir('tests/data')
config.make_svn_version_py()
return config
if __name__=='__main__':
from numpy.distutils.core import setup
setup(configuration=configuration)
| mit |
MalloyDelacroix/DownloaderForReddit | Tests/unittests/utils/importers/test_text_importer.py | 1 | 1164 | from unittest import TestCase
from DownloaderForReddit.utils.importers import text_importer
class TestTextImporter(TestCase):
def test_remove_forbidden_chars(self):
text = ' this \n is a\nname-for-import '
clean = text_importer.remove_forbidden_chars(text)
self.assertEqual('thisisaname-for-import', clean)
def test_split_names(self):
names = 'name_one, name_two, name_three, name_four'
names = text_importer.split_names(names)
self.assertEqual(['name_one', 'name_two', 'name_three', 'name_four'], names)
def test_split_names_with_extra_commas(self):
names = ', name_one, name_two, name_three, name_four, '
names = text_importer.split_names(names)
self.assertEqual(['name_one', 'name_two', 'name_three', 'name_four'], names)
def test_filter_import_list(self):
names = ['one', 'two', 'one', 'three', 'One', 'ONE', 'oNe', 'four', 'one', '', 'five', 'one', 'ONE', 'six']
filtered_names = text_importer.filter_import_list(names)
correct_names = ['one', 'two', 'three', 'four', 'five', 'six']
self.assertEqual(correct_names, filtered_names)
| gpl-3.0 |
RavilN/freeopcua | tests/gtest/scripts/gen_gtest_pred_impl.py | 2538 | 21986 | #!/usr/bin/env python
#
# Copyright 2006, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""gen_gtest_pred_impl.py v0.1
Generates the implementation of Google Test predicate assertions and
accompanying tests.
Usage:
gen_gtest_pred_impl.py MAX_ARITY
where MAX_ARITY is a positive integer.
The command generates the implementation of up-to MAX_ARITY-ary
predicate assertions, and writes it to file gtest_pred_impl.h in the
directory where the script is. It also generates the accompanying
unit test in file gtest_pred_impl_unittest.cc.
"""
__author__ = '[email protected] (Zhanyong Wan)'
import os
import sys
import time
# Where this script is.
SCRIPT_DIR = os.path.dirname(sys.argv[0])
# Where to store the generated header.
HEADER = os.path.join(SCRIPT_DIR, '../include/gtest/gtest_pred_impl.h')
# Where to store the generated unit test.
UNIT_TEST = os.path.join(SCRIPT_DIR, '../test/gtest_pred_impl_unittest.cc')
def HeaderPreamble(n):
"""Returns the preamble for the header file.
Args:
n: the maximum arity of the predicate macros to be generated.
"""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), n),
'n' : n
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
//
// Implements a family of generic predicate assertion macros.
#ifndef GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
#define GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
// Makes sure this header is not included before gtest.h.
#ifndef GTEST_INCLUDE_GTEST_GTEST_H_
# error Do not include gtest_pred_impl.h directly. Include gtest.h instead.
#endif // GTEST_INCLUDE_GTEST_GTEST_H_
// This header implements a family of generic predicate assertion
// macros:
//
// ASSERT_PRED_FORMAT1(pred_format, v1)
// ASSERT_PRED_FORMAT2(pred_format, v1, v2)
// ...
//
// where pred_format is a function or functor that takes n (in the
// case of ASSERT_PRED_FORMATn) values and their source expression
// text, and returns a testing::AssertionResult. See the definition
// of ASSERT_EQ in gtest.h for an example.
//
// If you don't care about formatting, you can use the more
// restrictive version:
//
// ASSERT_PRED1(pred, v1)
// ASSERT_PRED2(pred, v1, v2)
// ...
//
// where pred is an n-ary function or functor that returns bool,
// and the values v1, v2, ..., must support the << operator for
// streaming to std::ostream.
//
// We also define the EXPECT_* variations.
//
// For now we only support predicates whose arity is at most %(n)s.
// Please email [email protected] if you need
// support for higher arities.
// GTEST_ASSERT_ is the basic statement to which all of the assertions
// in this file reduce. Don't use this in your code.
#define GTEST_ASSERT_(expression, on_failure) \\
GTEST_AMBIGUOUS_ELSE_BLOCKER_ \\
if (const ::testing::AssertionResult gtest_ar = (expression)) \\
; \\
else \\
on_failure(gtest_ar.failure_message())
""" % DEFS)
def Arity(n):
"""Returns the English name of the given arity."""
if n < 0:
return None
elif n <= 3:
return ['nullary', 'unary', 'binary', 'ternary'][n]
else:
return '%s-ary' % n
def Title(word):
"""Returns the given word in title case. The difference between
this and string's title() method is that Title('4-ary') is '4-ary'
while '4-ary'.title() is '4-Ary'."""
return word[0].upper() + word[1:]
def OneTo(n):
"""Returns the list [1, 2, 3, ..., n]."""
return range(1, n + 1)
def Iter(n, format, sep=''):
"""Given a positive integer n, a format string that contains 0 or
more '%s' format specs, and optionally a separator string, returns
the join of n strings, each formatted with the format string on an
iterator ranged from 1 to n.
Example:
Iter(3, 'v%s', sep=', ') returns 'v1, v2, v3'.
"""
# How many '%s' specs are in format?
spec_count = len(format.split('%s')) - 1
return sep.join([format % (spec_count * (i,)) for i in OneTo(n)])
def ImplementationForArity(n):
"""Returns the implementation of n-ary predicate assertions."""
# A map the defines the values used in the implementation template.
DEFS = {
'n' : str(n),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'arity' : Arity(n),
'Arity' : Title(Arity(n))
}
impl = """
// Helper function for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
template <typename Pred""" % DEFS
impl += Iter(n, """,
typename T%s""")
impl += """>
AssertionResult AssertPred%(n)sHelper(const char* pred_text""" % DEFS
impl += Iter(n, """,
const char* e%s""")
impl += """,
Pred pred"""
impl += Iter(n, """,
const T%s& v%s""")
impl += """) {
if (pred(%(vs)s)) return AssertionSuccess();
""" % DEFS
impl += ' return AssertionFailure() << pred_text << "("'
impl += Iter(n, """
<< e%s""", sep=' << ", "')
impl += ' << ") evaluates to false, where"'
impl += Iter(n, """
<< "\\n" << e%s << " evaluates to " << v%s""")
impl += """;
}
// Internal macro for implementing {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
// Don't use this in your code.
#define GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, on_failure)\\
GTEST_ASSERT_(pred_format(%(vts)s, %(vs)s), \\
on_failure)
// Internal macro for implementing {EXPECT|ASSERT}_PRED%(n)s. Don't use
// this in your code.
#define GTEST_PRED%(n)s_(pred, %(vs)s, on_failure)\\
GTEST_ASSERT_(::testing::AssertPred%(n)sHelper(#pred""" % DEFS
impl += Iter(n, """, \\
#v%s""")
impl += """, \\
pred"""
impl += Iter(n, """, \\
v%s""")
impl += """), on_failure)
// %(Arity)s predicate assertion macros.
#define EXPECT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define EXPECT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_NONFATAL_FAILURE_)
#define ASSERT_PRED_FORMAT%(n)s(pred_format, %(vs)s) \\
GTEST_PRED_FORMAT%(n)s_(pred_format, %(vs)s, GTEST_FATAL_FAILURE_)
#define ASSERT_PRED%(n)s(pred, %(vs)s) \\
GTEST_PRED%(n)s_(pred, %(vs)s, GTEST_FATAL_FAILURE_)
""" % DEFS
return impl
def HeaderPostamble():
"""Returns the postamble for the header file."""
return """
#endif // GTEST_INCLUDE_GTEST_GTEST_PRED_IMPL_H_
"""
def GenerateFile(path, content):
"""Given a file path and a content string, overwrites it with the
given content."""
print 'Updating file %s . . .' % path
f = file(path, 'w+')
print >>f, content,
f.close()
print 'File %s has been updated.' % path
def GenerateHeader(n):
"""Given the maximum arity n, updates the header file that implements
the predicate assertions."""
GenerateFile(HEADER,
HeaderPreamble(n)
+ ''.join([ImplementationForArity(i) for i in OneTo(n)])
+ HeaderPostamble())
def UnitTestPreamble():
"""Returns the preamble for the unit test file."""
# A map that defines the values used in the preamble template.
DEFS = {
'today' : time.strftime('%m/%d/%Y'),
'year' : time.strftime('%Y'),
'command' : '%s %s' % (os.path.basename(sys.argv[0]), sys.argv[1]),
}
return (
"""// Copyright 2006, Google Inc.
// All rights reserved.
//
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
//
// * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
// * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
// * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
//
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
// This file is AUTOMATICALLY GENERATED on %(today)s by command
// '%(command)s'. DO NOT EDIT BY HAND!
// Regression test for gtest_pred_impl.h
//
// This file is generated by a script and quite long. If you intend to
// learn how Google Test works by reading its unit tests, read
// gtest_unittest.cc instead.
//
// This is intended as a regression test for the Google Test predicate
// assertions. We compile it as part of the gtest_unittest target
// only to keep the implementation tidy and compact, as it is quite
// involved to set up the stage for testing Google Test using Google
// Test itself.
//
// Currently, gtest_unittest takes ~11 seconds to run in the testing
// daemon. In the future, if it grows too large and needs much more
// time to finish, we should consider separating this file into a
// stand-alone regression test.
#include <iostream>
#include "gtest/gtest.h"
#include "gtest/gtest-spi.h"
// A user-defined data type.
struct Bool {
explicit Bool(int val) : value(val != 0) {}
bool operator>(int n) const { return value > Bool(n).value; }
Bool operator+(const Bool& rhs) const { return Bool(value + rhs.value); }
bool operator==(const Bool& rhs) const { return value == rhs.value; }
bool value;
};
// Enables Bool to be used in assertions.
std::ostream& operator<<(std::ostream& os, const Bool& x) {
return os << (x.value ? "true" : "false");
}
""" % DEFS)
def TestsForArity(n):
"""Returns the tests for n-ary predicate assertions."""
# A map that defines the values used in the template for the tests.
DEFS = {
'n' : n,
'es' : Iter(n, 'e%s', sep=', '),
'vs' : Iter(n, 'v%s', sep=', '),
'vts' : Iter(n, '#v%s', sep=', '),
'tvs' : Iter(n, 'T%s v%s', sep=', '),
'int_vs' : Iter(n, 'int v%s', sep=', '),
'Bool_vs' : Iter(n, 'Bool v%s', sep=', '),
'types' : Iter(n, 'typename T%s', sep=', '),
'v_sum' : Iter(n, 'v%s', sep=' + '),
'arity' : Arity(n),
'Arity' : Title(Arity(n)),
}
tests = (
"""// Sample functions/functors for testing %(arity)s predicate assertions.
// A %(arity)s predicate function.
template <%(types)s>
bool PredFunction%(n)s(%(tvs)s) {
return %(v_sum)s > 0;
}
// The following two functions are needed to circumvent a bug in
// gcc 2.95.3, which sometimes has problem with the above template
// function.
bool PredFunction%(n)sInt(%(int_vs)s) {
return %(v_sum)s > 0;
}
bool PredFunction%(n)sBool(%(Bool_vs)s) {
return %(v_sum)s > 0;
}
""" % DEFS)
tests += """
// A %(arity)s predicate functor.
struct PredFunctor%(n)s {
template <%(types)s>
bool operator()(""" % DEFS
tests += Iter(n, 'const T%s& v%s', sep=""",
""")
tests += """) {
return %(v_sum)s > 0;
}
};
""" % DEFS
tests += """
// A %(arity)s predicate-formatter function.
template <%(types)s>
testing::AssertionResult PredFormatFunction%(n)s(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) {
if (PredFunction%(n)s(%(vs)s))
return testing::AssertionSuccess();
return testing::AssertionFailure()
<< """ % DEFS
tests += Iter(n, 'e%s', sep=' << " + " << ')
tests += """
<< " is expected to be positive, but evaluates to "
<< %(v_sum)s << ".";
}
""" % DEFS
tests += """
// A %(arity)s predicate-formatter functor.
struct PredFormatFunctor%(n)s {
template <%(types)s>
testing::AssertionResult operator()(""" % DEFS
tests += Iter(n, 'const char* e%s', sep=""",
""")
tests += Iter(n, """,
const T%s& v%s""")
tests += """) const {
return PredFormatFunction%(n)s(%(es)s, %(vs)s);
}
};
""" % DEFS
tests += """
// Tests for {EXPECT|ASSERT}_PRED_FORMAT%(n)s.
class Predicate%(n)sTest : public testing::Test {
protected:
virtual void SetUp() {
expected_to_finish_ = true;
finished_ = false;""" % DEFS
tests += """
""" + Iter(n, 'n%s_ = ') + """0;
}
"""
tests += """
virtual void TearDown() {
// Verifies that each of the predicate's arguments was evaluated
// exactly once."""
tests += ''.join(["""
EXPECT_EQ(1, n%s_) <<
"The predicate assertion didn't evaluate argument %s "
"exactly once.";""" % (i, i + 1) for i in OneTo(n)])
tests += """
// Verifies that the control flow in the test function is expected.
if (expected_to_finish_ && !finished_) {
FAIL() << "The predicate assertion unexpactedly aborted the test.";
} else if (!expected_to_finish_ && finished_) {
FAIL() << "The failed predicate assertion didn't abort the test "
"as expected.";
}
}
// true iff the test function is expected to run to finish.
static bool expected_to_finish_;
// true iff the test function did run to finish.
static bool finished_;
""" % DEFS
tests += Iter(n, """
static int n%s_;""")
tests += """
};
bool Predicate%(n)sTest::expected_to_finish_;
bool Predicate%(n)sTest::finished_;
""" % DEFS
tests += Iter(n, """int Predicate%%(n)sTest::n%s_;
""") % DEFS
tests += """
typedef Predicate%(n)sTest EXPECT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED_FORMAT%(n)sTest;
typedef Predicate%(n)sTest EXPECT_PRED%(n)sTest;
typedef Predicate%(n)sTest ASSERT_PRED%(n)sTest;
""" % DEFS
def GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type):
"""Returns the test for a predicate assertion macro.
Args:
use_format: true iff the assertion is a *_PRED_FORMAT*.
use_assert: true iff the assertion is a ASSERT_*.
expect_failure: true iff the assertion is expected to fail.
use_functor: true iff the first argument of the assertion is
a functor (as opposed to a function)
use_user_type: true iff the predicate functor/function takes
argument(s) of a user-defined type.
Example:
GenTest(1, 0, 0, 1, 0) returns a test that tests the behavior
of a successful EXPECT_PRED_FORMATn() that takes a functor
whose arguments have built-in types."""
if use_assert:
assrt = 'ASSERT' # 'assert' is reserved, so we cannot use
# that identifier here.
else:
assrt = 'EXPECT'
assertion = assrt + '_PRED'
if use_format:
pred_format = 'PredFormat'
assertion += '_FORMAT'
else:
pred_format = 'Pred'
assertion += '%(n)s' % DEFS
if use_functor:
pred_format_type = 'functor'
pred_format += 'Functor%(n)s()'
else:
pred_format_type = 'function'
pred_format += 'Function%(n)s'
if not use_format:
if use_user_type:
pred_format += 'Bool'
else:
pred_format += 'Int'
test_name = pred_format_type.title()
if use_user_type:
arg_type = 'user-defined type (Bool)'
test_name += 'OnUserType'
if expect_failure:
arg = 'Bool(n%s_++)'
else:
arg = 'Bool(++n%s_)'
else:
arg_type = 'built-in type (int)'
test_name += 'OnBuiltInType'
if expect_failure:
arg = 'n%s_++'
else:
arg = '++n%s_'
if expect_failure:
successful_or_failed = 'failed'
expected_or_not = 'expected.'
test_name += 'Failure'
else:
successful_or_failed = 'successful'
expected_or_not = 'UNEXPECTED!'
test_name += 'Success'
# A map that defines the values used in the test template.
defs = DEFS.copy()
defs.update({
'assert' : assrt,
'assertion' : assertion,
'test_name' : test_name,
'pf_type' : pred_format_type,
'pf' : pred_format,
'arg_type' : arg_type,
'arg' : arg,
'successful' : successful_or_failed,
'expected' : expected_or_not,
})
test = """
// Tests a %(successful)s %(assertion)s where the
// predicate-formatter is a %(pf_type)s on a %(arg_type)s.
TEST_F(%(assertion)sTest, %(test_name)s) {""" % defs
indent = (len(assertion) + 3)*' '
extra_indent = ''
if expect_failure:
extra_indent = ' '
if use_assert:
test += """
expected_to_finish_ = false;
EXPECT_FATAL_FAILURE({ // NOLINT"""
else:
test += """
EXPECT_NONFATAL_FAILURE({ // NOLINT"""
test += '\n' + extra_indent + """ %(assertion)s(%(pf)s""" % defs
test = test % defs
test += Iter(n, ',\n' + indent + extra_indent + '%(arg)s' % defs)
test += ');\n' + extra_indent + ' finished_ = true;\n'
if expect_failure:
test += ' }, "");\n'
test += '}\n'
return test
# Generates tests for all 2**6 = 64 combinations.
tests += ''.join([GenTest(use_format, use_assert, expect_failure,
use_functor, use_user_type)
for use_format in [0, 1]
for use_assert in [0, 1]
for expect_failure in [0, 1]
for use_functor in [0, 1]
for use_user_type in [0, 1]
])
return tests
def UnitTestPostamble():
"""Returns the postamble for the tests."""
return ''
def GenerateUnitTest(n):
"""Returns the tests for up-to n-ary predicate assertions."""
GenerateFile(UNIT_TEST,
UnitTestPreamble()
+ ''.join([TestsForArity(i) for i in OneTo(n)])
+ UnitTestPostamble())
def _Main():
"""The entry point of the script. Generates the header file and its
unit test."""
if len(sys.argv) != 2:
print __doc__
print 'Author: ' + __author__
sys.exit(1)
n = int(sys.argv[1])
GenerateHeader(n)
GenerateUnitTest(n)
if __name__ == '__main__':
_Main()
| lgpl-3.0 |
primiano/blink-gitcs | Tools/Scripts/webkitpy/layout_tests/models/test_configuration.py | 59 | 13691 | # Copyright (C) 2011 Google Inc. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the Google name nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import copy
class TestConfiguration(object):
def __init__(self, version, architecture, build_type):
self.version = version
self.architecture = architecture
self.build_type = build_type
@classmethod
def category_order(cls):
"""The most common human-readable order in which the configuration properties are listed."""
return ['version', 'architecture', 'build_type']
def items(self):
return self.__dict__.items()
def keys(self):
return self.__dict__.keys()
def __str__(self):
return ("<%(version)s, %(architecture)s, %(build_type)s>" %
self.__dict__)
def __repr__(self):
return "TestConfig(version='%(version)s', architecture='%(architecture)s', build_type='%(build_type)s')" % self.__dict__
def __hash__(self):
return hash(self.version + self.architecture + self.build_type)
def __eq__(self, other):
return self.__hash__() == other.__hash__()
def values(self):
"""Returns the configuration values of this instance as a tuple."""
return self.__dict__.values()
class SpecifierSorter(object):
def __init__(self, all_test_configurations=None, macros=None):
self._specifier_to_category = {}
if not all_test_configurations:
return
for test_configuration in all_test_configurations:
for category, specifier in test_configuration.items():
self.add_specifier(category, specifier)
self.add_macros(macros)
def add_specifier(self, category, specifier):
self._specifier_to_category[specifier] = category
def add_macros(self, macros):
if not macros:
return
# Assume well-formed macros.
for macro, specifier_list in macros.items():
self.add_specifier(self.category_for_specifier(specifier_list[0]), macro)
@classmethod
def category_priority(cls, category):
return TestConfiguration.category_order().index(category)
def specifier_priority(self, specifier):
return self.category_priority(self._specifier_to_category[specifier])
def category_for_specifier(self, specifier):
return self._specifier_to_category.get(specifier)
def sort_specifiers(self, specifiers):
category_slots = map(lambda x: [], TestConfiguration.category_order())
for specifier in specifiers:
category_slots[self.specifier_priority(specifier)].append(specifier)
def sort_and_return(result, specifier_list):
specifier_list.sort()
return result + specifier_list
return reduce(sort_and_return, category_slots, [])
class TestConfigurationConverter(object):
def __init__(self, all_test_configurations, configuration_macros=None):
self._all_test_configurations = all_test_configurations
self._configuration_macros = configuration_macros or {}
self._specifier_to_configuration_set = {}
self._specifier_sorter = SpecifierSorter()
self._collapsing_sets_by_size = {}
self._junk_specifier_combinations = {}
self._collapsing_sets_by_category = {}
matching_sets_by_category = {}
for configuration in all_test_configurations:
for category, specifier in configuration.items():
self._specifier_to_configuration_set.setdefault(specifier, set()).add(configuration)
self._specifier_sorter.add_specifier(category, specifier)
self._collapsing_sets_by_category.setdefault(category, set()).add(specifier)
# FIXME: This seems extra-awful.
for cat2, spec2 in configuration.items():
if category == cat2:
continue
matching_sets_by_category.setdefault(specifier, {}).setdefault(cat2, set()).add(spec2)
for collapsing_set in self._collapsing_sets_by_category.values():
self._collapsing_sets_by_size.setdefault(len(collapsing_set), set()).add(frozenset(collapsing_set))
for specifier, sets_by_category in matching_sets_by_category.items():
for category, set_by_category in sets_by_category.items():
if len(set_by_category) == 1 and self._specifier_sorter.category_priority(category) > self._specifier_sorter.specifier_priority(specifier):
self._junk_specifier_combinations[specifier] = set_by_category
self._specifier_sorter.add_macros(configuration_macros)
def specifier_sorter(self):
return self._specifier_sorter
def _expand_macros(self, specifier):
expanded_specifiers = self._configuration_macros.get(specifier)
return expanded_specifiers or [specifier]
def to_config_set(self, specifier_set, error_list=None):
"""Convert a list of specifiers into a set of TestConfiguration instances."""
if len(specifier_set) == 0:
return copy.copy(self._all_test_configurations)
matching_sets = {}
for specifier in specifier_set:
for expanded_specifier in self._expand_macros(specifier):
configurations = self._specifier_to_configuration_set.get(expanded_specifier)
if not configurations:
if error_list is not None:
error_list.append("Unrecognized specifier '" + expanded_specifier + "'")
return set()
category = self._specifier_sorter.category_for_specifier(expanded_specifier)
matching_sets.setdefault(category, set()).update(configurations)
return reduce(set.intersection, matching_sets.values())
@classmethod
def collapse_macros(cls, macros_dict, specifiers_list):
for macro_specifier, macro in macros_dict.items():
if len(macro) == 1:
continue
for combination in cls.combinations(specifiers_list, len(macro)):
if cls.symmetric_difference(combination) == set(macro):
for item in combination:
specifiers_list.remove(item)
new_specifier_set = cls.intersect_combination(combination)
new_specifier_set.add(macro_specifier)
specifiers_list.append(frozenset(new_specifier_set))
def collapse_individual_specifier_set(macro_specifier, macro):
specifiers_to_remove = []
specifiers_to_add = []
for specifier_set in specifiers_list:
macro_set = set(macro)
if macro_set.intersection(specifier_set) == macro_set:
specifiers_to_remove.append(specifier_set)
specifiers_to_add.append(frozenset((set(specifier_set) - macro_set) | set([macro_specifier])))
for specifier in specifiers_to_remove:
specifiers_list.remove(specifier)
for specifier in specifiers_to_add:
specifiers_list.append(specifier)
for macro_specifier, macro in macros_dict.items():
collapse_individual_specifier_set(macro_specifier, macro)
# FIXME: itertools.combinations in buggy in Python 2.6.1 (the version that ships on SL).
# It seems to be okay in 2.6.5 or later; until then, this is the implementation given
# in http://docs.python.org/library/itertools.html (from 2.7).
@staticmethod
def combinations(iterable, r):
# combinations('ABCD', 2) --> AB AC AD BC BD CD
# combinations(range(4), 3) --> 012 013 023 123
pool = tuple(iterable)
n = len(pool)
if r > n:
return
indices = range(r)
yield tuple(pool[i] for i in indices)
while True:
for i in reversed(range(r)):
if indices[i] != i + n - r:
break
else:
return
indices[i] += 1 # pylint: disable=W0631
for j in range(i + 1, r): # pylint: disable=W0631
indices[j] = indices[j - 1] + 1
yield tuple(pool[i] for i in indices)
@classmethod
def intersect_combination(cls, combination):
return reduce(set.intersection, [set(specifiers) for specifiers in combination])
@classmethod
def symmetric_difference(cls, iterable):
union = set()
intersection = iterable[0]
for item in iterable:
union = union | item
intersection = intersection.intersection(item)
return union - intersection
def to_specifiers_list(self, test_configuration_set):
"""Convert a set of TestConfiguration instances into one or more list of specifiers."""
# Easy out: if the set is all configurations, the specifier is empty.
if len(test_configuration_set) == len(self._all_test_configurations):
return [[]]
# 1) Build a list of specifier sets, discarding specifiers that don't add value.
specifiers_list = []
for config in test_configuration_set:
values = set(config.values())
for specifier, junk_specifier_set in self._junk_specifier_combinations.items():
if specifier in values:
values -= junk_specifier_set
specifiers_list.append(frozenset(values))
def try_collapsing(size, collapsing_sets):
if len(specifiers_list) < size:
return False
for combination in self.combinations(specifiers_list, size):
if self.symmetric_difference(combination) in collapsing_sets:
for item in combination:
specifiers_list.remove(item)
specifiers_list.append(frozenset(self.intersect_combination(combination)))
return True
return False
# 2) Collapse specifier sets with common specifiers:
# (xp, release), (xp, debug) --> (xp, x86)
for size, collapsing_sets in self._collapsing_sets_by_size.items():
while try_collapsing(size, collapsing_sets):
pass
def try_abbreviating(collapsing_sets):
if len(specifiers_list) < 2:
return False
for combination in self.combinations(specifiers_list, 2):
for collapsing_set in collapsing_sets:
diff = self.symmetric_difference(combination)
if diff <= collapsing_set:
common = self.intersect_combination(combination)
for item in combination:
specifiers_list.remove(item)
specifiers_list.append(frozenset(common | diff))
return True
return False
# 3) Abbreviate specifier sets by combining specifiers across categories.
# (xp, release), (win7, release) --> (xp, win7, release)
while try_abbreviating(self._collapsing_sets_by_size.values()):
pass
# 4) Substitute specifier subsets that match macros witin each set:
# (xp, win7, release) -> (win, release)
self.collapse_macros(self._configuration_macros, specifiers_list)
macro_keys = set(self._configuration_macros.keys())
# 5) Collapsing macros may have created combinations the can now be abbreviated.
# (xp, release), (linux, x86, release), (linux, x86_64, release) --> (xp, release), (linux, release) --> (xp, linux, release)
while try_abbreviating([self._collapsing_sets_by_category['version'] | macro_keys]):
pass
# 6) Remove cases where we have collapsed but have all macros.
# (android, win, mac, linux, release) --> (release)
specifiers_to_remove = []
for specifier_set in specifiers_list:
if macro_keys <= specifier_set:
specifiers_to_remove.append(specifier_set)
for specifier_set in specifiers_to_remove:
specifiers_list.remove(specifier_set)
specifiers_list.append(frozenset(specifier_set - macro_keys))
return specifiers_list
| bsd-3-clause |
denisff/python-for-android | python-build/python-libs/gdata/src/gdata/spreadsheet/__init__.py | 147 | 17942 | #!/usr/bin/python
#
# Copyright (C) 2007 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Contains extensions to Atom objects used with Google Spreadsheets.
"""
__author__ = '[email protected] (Laura Beth Lincoln)'
try:
from xml.etree import cElementTree as ElementTree
except ImportError:
try:
import cElementTree as ElementTree
except ImportError:
try:
from xml.etree import ElementTree
except ImportError:
from elementtree import ElementTree
import atom
import gdata
import re
import string
# XML namespaces which are often used in Google Spreadsheets entities.
GSPREADSHEETS_NAMESPACE = 'http://schemas.google.com/spreadsheets/2006'
GSPREADSHEETS_TEMPLATE = '{http://schemas.google.com/spreadsheets/2006}%s'
GSPREADSHEETS_EXTENDED_NAMESPACE = ('http://schemas.google.com/spreadsheets'
'/2006/extended')
GSPREADSHEETS_EXTENDED_TEMPLATE = ('{http://schemas.google.com/spreadsheets'
'/2006/extended}%s')
class ColCount(atom.AtomBase):
"""The Google Spreadsheets colCount element """
_tag = 'colCount'
_namespace = GSPREADSHEETS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def ColCountFromString(xml_string):
return atom.CreateClassFromXMLString(ColCount, xml_string)
class RowCount(atom.AtomBase):
"""The Google Spreadsheets rowCount element """
_tag = 'rowCount'
_namespace = GSPREADSHEETS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, text=None, extension_elements=None,
extension_attributes=None):
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def RowCountFromString(xml_string):
return atom.CreateClassFromXMLString(RowCount, xml_string)
class Cell(atom.AtomBase):
"""The Google Spreadsheets cell element """
_tag = 'cell'
_namespace = GSPREADSHEETS_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
_attributes['row'] = 'row'
_attributes['col'] = 'col'
_attributes['inputValue'] = 'inputValue'
_attributes['numericValue'] = 'numericValue'
def __init__(self, text=None, row=None, col=None, inputValue=None,
numericValue=None, extension_elements=None, extension_attributes=None):
self.text = text
self.row = row
self.col = col
self.inputValue = inputValue
self.numericValue = numericValue
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def CellFromString(xml_string):
return atom.CreateClassFromXMLString(Cell, xml_string)
class Custom(atom.AtomBase):
"""The Google Spreadsheets custom element"""
_namespace = GSPREADSHEETS_EXTENDED_NAMESPACE
_children = atom.AtomBase._children.copy()
_attributes = atom.AtomBase._attributes.copy()
def __init__(self, column=None, text=None, extension_elements=None,
extension_attributes=None):
self.column = column # The name of the column
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def _BecomeChildElement(self, tree):
new_child = ElementTree.Element('')
tree.append(new_child)
new_child.tag = '{%s}%s' % (self.__class__._namespace,
self.column)
self._AddMembersToElementTree(new_child)
def _ToElementTree(self):
new_tree = ElementTree.Element('{%s}%s' % (self.__class__._namespace,
self.column))
self._AddMembersToElementTree(new_tree)
return new_tree
def _HarvestElementTree(self, tree):
namespace_uri, local_tag = string.split(tree.tag[1:], "}", 1)
self.column = local_tag
# Fill in the instance members from the contents of the XML tree.
for child in tree:
self._ConvertElementTreeToMember(child)
for attribute, value in tree.attrib.iteritems():
self._ConvertElementAttributeToMember(attribute, value)
self.text = tree.text
def CustomFromString(xml_string):
element_tree = ElementTree.fromstring(xml_string)
return _CustomFromElementTree(element_tree)
def _CustomFromElementTree(element_tree):
namespace_uri, local_tag = string.split(element_tree.tag[1:], "}", 1)
if namespace_uri == GSPREADSHEETS_EXTENDED_NAMESPACE:
new_custom = Custom()
new_custom._HarvestElementTree(element_tree)
new_custom.column = local_tag
return new_custom
return None
class SpreadsheetsSpreadsheet(gdata.GDataEntry):
"""A Google Spreadsheets flavor of a Spreadsheet Atom Entry """
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, control=None, updated=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.updated = updated
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SpreadsheetsSpreadsheetFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsSpreadsheet,
xml_string)
class SpreadsheetsWorksheet(gdata.GDataEntry):
"""A Google Spreadsheets flavor of a Worksheet Atom Entry """
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
_children['{%s}rowCount' % GSPREADSHEETS_NAMESPACE] = ('row_count',
RowCount)
_children['{%s}colCount' % GSPREADSHEETS_NAMESPACE] = ('col_count',
ColCount)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, control=None, updated=None,
row_count=None, col_count=None, text=None, extension_elements=None,
extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.updated = updated
self.row_count = row_count
self.col_count = col_count
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SpreadsheetsWorksheetFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsWorksheet,
xml_string)
class SpreadsheetsCell(gdata.BatchEntry):
"""A Google Spreadsheets flavor of a Cell Atom Entry """
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.BatchEntry._children.copy()
_attributes = gdata.BatchEntry._attributes.copy()
_children['{%s}cell' % GSPREADSHEETS_NAMESPACE] = ('cell', Cell)
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, control=None, updated=None,
cell=None, batch_operation=None, batch_id=None, batch_status=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.batch_operation = batch_operation
self.batch_id = batch_id
self.batch_status = batch_status
self.updated = updated
self.cell = cell
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
def SpreadsheetsCellFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsCell,
xml_string)
class SpreadsheetsList(gdata.GDataEntry):
"""A Google Spreadsheets flavor of a List Atom Entry """
_tag = 'entry'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataEntry._children.copy()
_attributes = gdata.GDataEntry._attributes.copy()
def __init__(self, author=None, category=None, content=None,
contributor=None, atom_id=None, link=None, published=None, rights=None,
source=None, summary=None, title=None, control=None, updated=None,
custom=None,
text=None, extension_elements=None, extension_attributes=None):
self.author = author or []
self.category = category or []
self.content = content
self.contributor = contributor or []
self.id = atom_id
self.link = link or []
self.published = published
self.rights = rights
self.source = source
self.summary = summary
self.control = control
self.title = title
self.updated = updated
self.custom = custom or {}
self.text = text
self.extension_elements = extension_elements or []
self.extension_attributes = extension_attributes or {}
# We need to overwrite _ConvertElementTreeToMember to add special logic to
# convert custom attributes to members
def _ConvertElementTreeToMember(self, child_tree):
# Find the element's tag in this class's list of child members
if self.__class__._children.has_key(child_tree.tag):
member_name = self.__class__._children[child_tree.tag][0]
member_class = self.__class__._children[child_tree.tag][1]
# If the class member is supposed to contain a list, make sure the
# matching member is set to a list, then append the new member
# instance to the list.
if isinstance(member_class, list):
if getattr(self, member_name) is None:
setattr(self, member_name, [])
getattr(self, member_name).append(atom._CreateClassFromElementTree(
member_class[0], child_tree))
else:
setattr(self, member_name,
atom._CreateClassFromElementTree(member_class, child_tree))
elif child_tree.tag.find('{%s}' % GSPREADSHEETS_EXTENDED_NAMESPACE) == 0:
# If this is in the custom namespace, make add it to the custom dict.
name = child_tree.tag[child_tree.tag.index('}')+1:]
custom = _CustomFromElementTree(child_tree)
if custom:
self.custom[name] = custom
else:
ExtensionContainer._ConvertElementTreeToMember(self, child_tree)
# We need to overwtite _AddMembersToElementTree to add special logic to
# convert custom members to XML nodes.
def _AddMembersToElementTree(self, tree):
# Convert the members of this class which are XML child nodes.
# This uses the class's _children dictionary to find the members which
# should become XML child nodes.
member_node_names = [values[0] for tag, values in
self.__class__._children.iteritems()]
for member_name in member_node_names:
member = getattr(self, member_name)
if member is None:
pass
elif isinstance(member, list):
for instance in member:
instance._BecomeChildElement(tree)
else:
member._BecomeChildElement(tree)
# Convert the members of this class which are XML attributes.
for xml_attribute, member_name in self.__class__._attributes.iteritems():
member = getattr(self, member_name)
if member is not None:
tree.attrib[xml_attribute] = member
# Convert all special custom item attributes to nodes
for name, custom in self.custom.iteritems():
custom._BecomeChildElement(tree)
# Lastly, call the ExtensionContainers's _AddMembersToElementTree to
# convert any extension attributes.
atom.ExtensionContainer._AddMembersToElementTree(self, tree)
def SpreadsheetsListFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsList,
xml_string)
element_tree = ElementTree.fromstring(xml_string)
return _SpreadsheetsListFromElementTree(element_tree)
class SpreadsheetsSpreadsheetsFeed(gdata.GDataFeed):
"""A feed containing Google Spreadsheets Spreadsheets"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[SpreadsheetsSpreadsheet])
def SpreadsheetsSpreadsheetsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsSpreadsheetsFeed,
xml_string)
class SpreadsheetsWorksheetsFeed(gdata.GDataFeed):
"""A feed containing Google Spreadsheets Spreadsheets"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[SpreadsheetsWorksheet])
def SpreadsheetsWorksheetsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsWorksheetsFeed,
xml_string)
class SpreadsheetsCellsFeed(gdata.BatchFeed):
"""A feed containing Google Spreadsheets Cells"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.BatchFeed._children.copy()
_attributes = gdata.BatchFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[SpreadsheetsCell])
_children['{%s}rowCount' % GSPREADSHEETS_NAMESPACE] = ('row_count',
RowCount)
_children['{%s}colCount' % GSPREADSHEETS_NAMESPACE] = ('col_count',
ColCount)
def __init__(self, author=None, category=None, contributor=None,
generator=None, icon=None, atom_id=None, link=None, logo=None,
rights=None, subtitle=None, title=None, updated=None,
entry=None, total_results=None, start_index=None,
items_per_page=None, extension_elements=None,
extension_attributes=None, text=None, row_count=None,
col_count=None, interrupted=None):
gdata.BatchFeed.__init__(self, author=author, category=category,
contributor=contributor, generator=generator,
icon=icon, atom_id=atom_id, link=link,
logo=logo, rights=rights, subtitle=subtitle,
title=title, updated=updated, entry=entry,
total_results=total_results,
start_index=start_index,
items_per_page=items_per_page,
extension_elements=extension_elements,
extension_attributes=extension_attributes,
text=text, interrupted=interrupted)
self.row_count = row_count
self.col_count = col_count
def GetBatchLink(self):
for link in self.link:
if link.rel == 'http://schemas.google.com/g/2005#batch':
return link
return None
def SpreadsheetsCellsFeedFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsCellsFeed,
xml_string)
class SpreadsheetsListFeed(gdata.GDataFeed):
"""A feed containing Google Spreadsheets Spreadsheets"""
_tag = 'feed'
_namespace = atom.ATOM_NAMESPACE
_children = gdata.GDataFeed._children.copy()
_attributes = gdata.GDataFeed._attributes.copy()
_children['{%s}entry' % atom.ATOM_NAMESPACE] = ('entry',
[SpreadsheetsList])
def SpreadsheetsListFeedFromString(xml_string):
return atom.CreateClassFromXMLString(SpreadsheetsListFeed,
xml_string)
| apache-2.0 |
overcome/elasticsearch | dev-tools/upload-s3.py | 255 | 2375 | # Licensed to Elasticsearch under one or more contributor
# license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright
# ownership. Elasticsearch licenses this file to you under
# the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on
# an 'AS IS' BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND,
# either express or implied. See the License for the specific
# language governing permissions and limitations under the License.
import os
import sys
import argparse
try:
import boto.s3
except:
raise RuntimeError("""
S3 upload requires boto to be installed
Use one of:
'pip install -U boto'
'apt-get install python-boto'
'easy_install boto'
""")
import boto.s3
def list_buckets(conn):
return conn.get_all_buckets()
def upload_s3(conn, path, key, file, bucket):
print 'Uploading %s to Amazon S3 bucket %s/%s' % \
(file, bucket, os.path.join(path, key))
def percent_cb(complete, total):
sys.stdout.write('.')
sys.stdout.flush()
bucket = conn.create_bucket(bucket)
k = bucket.new_key(os.path.join(path, key))
k.set_contents_from_filename(file, cb=percent_cb, num_cb=100)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='Uploads files to Amazon S3')
parser.add_argument('--file', '-f', metavar='path to file',
help='the branch to release from', required=True)
parser.add_argument('--bucket', '-b', metavar='B42', default='download.elasticsearch.org',
help='The S3 Bucket to upload to')
parser.add_argument('--path', '-p', metavar='elasticsearch/elasticsearch', default='elasticsearch/elasticsearch',
help='The key path to use')
parser.add_argument('--key', '-k', metavar='key', default=None,
help='The key - uses the file name as default key')
args = parser.parse_args()
if args.key:
key = args.key
else:
key = os.path.basename(args.file)
connection = boto.connect_s3()
upload_s3(connection, args.path, key, args.file, args.bucket);
| apache-2.0 |
wilvk/ansible | lib/ansible/modules/cloud/profitbricks/profitbricks_volume_attachments.py | 27 | 7796 | #!/usr/bin/python
# Copyright: Ansible Project
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
from __future__ import absolute_import, division, print_function
__metaclass__ = type
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: profitbricks_volume_attachments
short_description: Attach or detach a volume.
description:
- Allows you to attach or detach a volume from a ProfitBricks server. This module has a dependency on profitbricks >= 1.0.0
version_added: "2.0"
options:
datacenter:
description:
- The datacenter in which to operate.
required: true
server:
description:
- The name of the server you wish to detach or attach the volume.
required: true
volume:
description:
- The volume name or ID.
required: true
subscription_user:
description:
- The ProfitBricks username. Overrides the PB_SUBSCRIPTION_ID environment variable.
required: false
subscription_password:
description:
- THe ProfitBricks password. Overrides the PB_PASSWORD environment variable.
required: false
wait:
description:
- wait for the operation to complete before returning
required: false
default: "yes"
choices: [ "yes", "no" ]
wait_timeout:
description:
- how long before wait gives up, in seconds
default: 600
state:
description:
- Indicate desired state of the resource
required: false
default: 'present'
choices: ["present", "absent"]
requirements: [ "profitbricks" ]
author: Matt Baldwin ([email protected])
'''
EXAMPLES = '''
# Attach a Volume
- profitbricks_volume_attachments:
datacenter: Tardis One
server: node002
volume: vol01
wait_timeout: 500
state: present
# Detach a Volume
- profitbricks_volume_attachments:
datacenter: Tardis One
server: node002
volume: vol01
wait_timeout: 500
state: absent
'''
import re
import time
HAS_PB_SDK = True
try:
from profitbricks.client import ProfitBricksService
except ImportError:
HAS_PB_SDK = False
from ansible.module_utils.basic import AnsibleModule
uuid_match = re.compile(
r'[\w]{8}-[\w]{4}-[\w]{4}-[\w]{4}-[\w]{12}', re.I)
def _wait_for_completion(profitbricks, promise, wait_timeout, msg):
if not promise:
return
wait_timeout = time.time() + wait_timeout
while wait_timeout > time.time():
time.sleep(5)
operation_result = profitbricks.get_request(
request_id=promise['requestId'],
status=True)
if operation_result['metadata']['status'] == "DONE":
return
elif operation_result['metadata']['status'] == "FAILED":
raise Exception(
'Request failed to complete ' + msg + ' "' + str(
promise['requestId']) + '" to complete.')
raise Exception(
'Timed out waiting for async operation ' + msg + ' "' + str(
promise['requestId']
) + '" to complete.')
def attach_volume(module, profitbricks):
"""
Attaches a volume.
This will attach a volume to the server.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if the volume was attached, false otherwise
"""
datacenter = module.params.get('datacenter')
server = module.params.get('server')
volume = module.params.get('volume')
# Locate UUID for Datacenter
if not (uuid_match.match(datacenter)):
datacenter_list = profitbricks.list_datacenters()
for d in datacenter_list['items']:
dc = profitbricks.get_datacenter(d['id'])
if datacenter == dc['properties']['name']:
datacenter = d['id']
break
# Locate UUID for Server
if not (uuid_match.match(server)):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
server = s['id']
break
# Locate UUID for Volume
if not (uuid_match.match(volume)):
volume_list = profitbricks.list_volumes(datacenter)
for v in volume_list['items']:
if volume == v['properties']['name']:
volume = v['id']
break
return profitbricks.attach_volume(datacenter, server, volume)
def detach_volume(module, profitbricks):
"""
Detaches a volume.
This will remove a volume from the server.
module : AnsibleModule object
profitbricks: authenticated profitbricks object.
Returns:
True if the volume was detached, false otherwise
"""
datacenter = module.params.get('datacenter')
server = module.params.get('server')
volume = module.params.get('volume')
# Locate UUID for Datacenter
if not (uuid_match.match(datacenter)):
datacenter_list = profitbricks.list_datacenters()
for d in datacenter_list['items']:
dc = profitbricks.get_datacenter(d['id'])
if datacenter == dc['properties']['name']:
datacenter = d['id']
break
# Locate UUID for Server
if not (uuid_match.match(server)):
server_list = profitbricks.list_servers(datacenter)
for s in server_list['items']:
if server == s['properties']['name']:
server = s['id']
break
# Locate UUID for Volume
if not (uuid_match.match(volume)):
volume_list = profitbricks.list_volumes(datacenter)
for v in volume_list['items']:
if volume == v['properties']['name']:
volume = v['id']
break
return profitbricks.detach_volume(datacenter, server, volume)
def main():
module = AnsibleModule(
argument_spec=dict(
datacenter=dict(),
server=dict(),
volume=dict(),
subscription_user=dict(),
subscription_password=dict(no_log=True),
wait=dict(type='bool', default=True),
wait_timeout=dict(type='int', default=600),
state=dict(default='present'),
)
)
if not HAS_PB_SDK:
module.fail_json(msg='profitbricks required for this module')
if not module.params.get('subscription_user'):
module.fail_json(msg='subscription_user parameter is required')
if not module.params.get('subscription_password'):
module.fail_json(msg='subscription_password parameter is required')
if not module.params.get('datacenter'):
module.fail_json(msg='datacenter parameter is required')
if not module.params.get('server'):
module.fail_json(msg='server parameter is required')
if not module.params.get('volume'):
module.fail_json(msg='volume parameter is required')
subscription_user = module.params.get('subscription_user')
subscription_password = module.params.get('subscription_password')
profitbricks = ProfitBricksService(
username=subscription_user,
password=subscription_password)
state = module.params.get('state')
if state == 'absent':
try:
(changed) = detach_volume(module, profitbricks)
module.exit_json(changed=changed)
except Exception as e:
module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
elif state == 'present':
try:
attach_volume(module, profitbricks)
module.exit_json()
except Exception as e:
module.fail_json(msg='failed to set volume_attach state: %s' % str(e))
if __name__ == '__main__':
main()
| gpl-3.0 |
bright-sparks/chromium-spacewalk | tools/perf/measurements/record_per_area_unittest.py | 33 | 1163 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
from measurements import record_per_area
from telemetry.core import wpr_modes
from telemetry.unittest import options_for_unittests
from telemetry.unittest import page_test_test_case
from telemetry.unittest import test
class RecordPerAreaUnitTest(page_test_test_case.PageTestTestCase):
"""Smoke test for record_per_area measurement
Runs record_per_area measurement on a simple page and verifies
that all metrics were added to the results. The test is purely functional,
i.e. it only checks if the metrics are present and non-zero.
"""
def setUp(self):
self._options = options_for_unittests.GetCopy()
self._options.browser_options.wpr_mode = wpr_modes.WPR_OFF
@test.Disabled('android')
def testRecordPerArea(self):
ps = self.CreatePageSetFromFileInUnittestDataDir('scrollable_page.html')
measurement = record_per_area.RecordPerArea()
results = self.RunMeasurement(measurement, ps, options=self._options)
self.assertEquals(0, len(results.failures))
| bsd-3-clause |
bolkedebruin/airflow | airflow/_vendor/nvd3/cumulativeLineChart.py | 6 | 4048 | #!/usr/bin/python
# -*- coding: utf-8 -*-
"""
Python-nvd3 is a Python wrapper for NVD3 graph library.
NVD3 is an attempt to build re-usable charts and chart components
for d3.js without taking away the power that d3.js gives you.
Project location : https://github.com/areski/python-nvd3
"""
from .NVD3Chart import NVD3Chart, TemplateMixin
class cumulativeLineChart(TemplateMixin, NVD3Chart):
"""
A cumulative line chart is used when you have one important grouping representing
an ordered set of data and one value to show, summed over time.
Python example::
from nvd3 import cumulativeLineChart
chart = cumulativeLineChart(name='cumulativeLineChart', x_is_date=True)
xdata = [1365026400000000, 1365026500000000, 1365026600000000]
ydata = [6, 5, 1]
y2data = [36, 55, 11]
extra_serie = {"tooltip": {"y_start": "There are ", "y_end": " calls"}}
chart.add_serie(name="Serie 1", y=ydata, x=xdata, extra=extra_serie)
extra_serie = {"tooltip": {"y_start": "", "y_end": " mins"}}
chart.add_serie(name="Serie 2", y=y2data, x=xdata, extra=extra_serie)
chart.buildhtml()
Javascript generated:
.. raw:: html
<div id="cumulativeLineChart"><svg style="height:450px; width:100%"></svg></div>
<script>
data_cumulativeLineChart=[{"values": [{"y": 6, "x": 1365026400000000},
{"y": 5, "x": 1365026500000000},
{"y": 1, "x": 1365026600000000}],
"key": "Serie 1", "yAxis": "1"},
{"values": [{"y": 36, "x": 1365026400000000},
{"y": 55, "x": 1365026500000000},
{"y": 11, "x": 1365026600000000}], "key": "Serie 2", "yAxis": "1"}];
nv.addGraph(function() {
var chart = nv.models.cumulativeLineChart();
chart.margin({top: 30, right: 60, bottom: 20, left: 60});
var datum = data_cumulativeLineChart;
chart.xAxis
.tickFormat(function(d) { return d3.time.format('%d %b %Y')(new Date(parseInt(d))) });
chart.yAxis
.tickFormat(d3.format(',.1%'));
chart.tooltipContent(function(key, y, e, graph) {
var x = d3.time.format("%d %b %Y")(new Date(parseInt(graph.point.x)));
var y = String(graph.point.y);
if(key == 'Serie 1'){
var y = 'There are ' + String(e) + ' calls';
}if(key == 'Serie 2'){
var y = String(e) + ' mins';
}
tooltip_str = '<center><b>'+key+'</b></center>' + y + ' on ' + x;
return tooltip_str;
});
chart.showLegend(true);
d3.select('#cumulativeLineChart svg')
.datum(datum)
.transition().duration(500)
.attr('height', 450)
.call(chart); });
</script>
"""
CHART_FILENAME = "./cumulativelinechart.html"
template_chart_nvd3 = NVD3Chart.template_environment.get_template(CHART_FILENAME)
def __init__(self, **kwargs):
super().__init__(**kwargs)
self.model = 'cumulativeLineChart'
height = kwargs.get('height', 450)
width = kwargs.get('width', None)
if kwargs.get('x_is_date', False):
self.set_date_flag(True)
self.create_x_axis('xAxis',
format=kwargs.get('x_axis_format', '%d %b %Y'),
date=True)
self.set_custom_tooltip_flag(True)
else:
self.create_x_axis('xAxis', format=kwargs.get(
'x_axis_format', '.2f'))
self.create_y_axis('yAxis', format=kwargs.get('y_axis_format', '.1%'))
self.set_graph_height(height)
if width:
self.set_graph_width(width)
| apache-2.0 |
leeamen/eva | 2017/cutwords.py | 1 | 2739 | #!/usr/bin/python
#coding:utf-8
import mybaselib
import logging
import jieba
import jieba.analyse
import numpy as np
import csv
import sys
import stat
import os
import re
reload(sys)
sys.setdefaultencoding('utf-8')
logger = logging.getLogger(__name__)
logger.setLevel(logging.DEBUG)
class Row(object):
def __init__(self, row):
arr = filter(lambda x:len(x.strip()) > 0, re.split(' ', row))
if len(arr) is not 2:
print 'row:',row
sys.exit()
self.sentence = arr[0]
self.class_label = arr[1]
def GetSentence(self):
return self.sentence
def GetClassLabel(self):
return self.class_label
def walktree(dirname, callback, userdata):
for f in os.listdir(dirname):
pathname = os.path.join(dirname, f)
mode = os.stat(pathname).st_mode
if stat.S_ISDIR(mode):
pass
# It's a directory, recurse into it
#walktree(pathname, callback)
elif stat.S_ISREG(mode):
# It's a file, call the callback function
callback(pathname, userdata)
else:
# Unknown file type, print a message
logger.error('Skipping %s', pathname)
def cutwords(filename, userdata):
if '.txt' != filename[-4:]:
return
logger.debug('start process file:%s', filename)
wf = open(userdata['output'], 'a')
with open(filename, 'rb') as rf:
while True:
line = rf.readline()
if line is None or len(line) == 0:
break;
row = Row(line)
sentence = row.GetSentence()
#切词
cut_list = jieba.lcut(sentence, cut_all = False)
wf.write(' '.join(cut_list) + '\n')
wf.flush()
wf.close()
if __name__ == '__main__':
#加载自定义词典
jieba.load_userdict('./data/userdict.data')
sentence = '该类会将文本中的词语转换为词频矩阵'
sentence = '春川辣炒鸡排外表是古典的吗?'
print '|'.join(jieba.lcut(sentence, cut_all = False))
#所有txt文件切词
userdata = {}
userdata['output'] = './data/all.cuts'
os.system('rm -f ./data/all.cuts')
walktree('data', cutwords, userdata)
sys.exit()
# jieba.analyse.extract_tags(sentence, topK=20, withWeight=False, allowPOS=())
#自己设置语料库
# corpus_file = '***.corpus'
# tags = jieba.analyse.extract_tags('该类会将文本中的词语转换为词频矩阵', topK=5)
# print '|'.join(tags)
filename = sys.argv[1]
wf = open(filename + '.cuts', 'wb')
with open(filename, 'rb') as rf:
while True:
line = rf.readline()
if line is None or len(line) == 0:
break;
row = mybaselib.Row(line)
sentence = row.GetSentence()
sentence = sentence.strip()
#切词
cut_list = jieba.lcut(sentence, cut_all = False)
wf.write(' '.join(cut_list) + ' ')
wf.flush()
wf.close()
| apache-2.0 |
40223232/2015cd_midterm | static/Brython3.1.1-20150328-091302/Lib/site-packages/pygame/display.py | 603 | 25179 | #!/usr/bin/env python
'''Pygame module to control the display window and screen.
This module offers control over the pygame display. Pygame has a single display
Surface that is either contained in a window or runs full screen. Once you
create the display you treat it as a regular Surface. Changes are not
immediately visible onscreen, you must choose one of the two flipping functions
to update the actual display.
The pygame display can actually be initialized in one of several modes. By
default the display is a basic software driven framebuffer. You can request
special modules like hardware acceleration and OpenGL support. These are
controlled by flags passed to pygame.display.set_mode().
Pygame can only have a single display active at any time. Creating a new one
with pygame.display.set_mode() will close the previous display. If precise
control is needed over the pixel format or display resolutions, use the
functions pygame.display.mode_ok(), pygame.display.list_modes(), and
pygame.display.Info() to query information about the display.
Once the display Surface is created, the functions from this module
effect the single existing display. The Surface becomes invalid if the module
is uninitialized. If a new display mode is set, the existing Surface will
automatically switch to operate on the new display.
Then the display mode is set, several events are placed on the pygame
event queue. pygame.QUIT is sent when the user has requested the program
to shutdown. The window will receive pygame.ACTIVEEVENT events as the
display gains and loses input focus. If the display is set with the
pygame.RESIZABLE flag, pygame.VIDEORESIZE events will be sent when the
user adjusts the window dimensions. Hardware displays that draw direct
to the screen will get pygame.VIDEOEXPOSE events when portions of the
window must be redrawn.
'''
__docformat__ = 'restructuredtext'
__version__ = '$Id$'
import sys
from SDL import *
import pygame.base
import pygame.pkgdata
import pygame.surface
#brython
import pygame.constants
from browser import window
#from javascript import console
_display_surface = None
_icon_was_set = 0
_icon_defaultname = 'pygame_icon.bmp'
_init_video=False
def __PYGAMEinit__():
pygame.base.register_quit(_display_autoquit)
def _display_autoquit():
global _display_surface
_display_surface = None
def init():
'''Initialize the display module.
Initializes the pygame display module. The display module cannot do
anything until it is initialized. This is usually handled for you
automatically when you call the higher level `pygame.init`.
Pygame will select from one of several internal display backends when it
is initialized. The display mode will be chosen depending on the platform
and permissions of current user. Before the display module is initialized
the environment variable SDL_VIDEODRIVER can be set to control which
backend is used. The systems with multiple choices are listed here.
Windows
windib, directx
Unix
x11, dga, fbcon, directfb, ggi, vgl, svgalib, aalib
On some platforms it is possible to embed the pygame display into an already
existing window. To do this, the environment variable SDL_WINDOWID must be
set to a string containing the window id or handle. The environment variable
is checked when the pygame display is initialized. Be aware that there can
be many strange side effects when running in an embedded display.
It is harmless to call this more than once, repeated calls have no effect.
'''
pygame.base._video_autoinit()
__PYGAMEinit__()
def quit():
'''Uninitialize the display module.
This will shut down the entire display module. This means any active
displays will be closed. This will also be handled automatically when the
program exits.
It is harmless to call this more than once, repeated calls have no effect.
'''
pygame.base._video_autoquit()
_display_autoquit()
def get_init():
'''Get status of display module initialization.
:rtype: bool
:return: True if SDL's video system is currently initialized.
'''
return SDL_WasInit(SDL_INIT_VIDEO) != 0
def set_mode(resolution, flags=0, depth=0):
'''Initialize a window or screen for display.
This function will create a display Surface. The arguments passed in are
requests for a display type. The actual created display will be the best
possible match supported by the system.
The `resolution` argument is a pair of numbers representing the width and
height. The `flags` argument is a collection of additional options.
The `depth` argument represents the number of bits to use for color.
The Surface that gets returned can be drawn to like a regular Surface but
changes will eventually be seen on the monitor.
It is usually best to not pass the depth argument. It will default to the
best and fastest color depth for the system. If your game requires a
specific color format you can control the depth with this argument. Pygame
will emulate an unavailable color depth which can be slow.
When requesting fullscreen display modes, sometimes an exact match for the
requested resolution cannot be made. In these situations pygame will select
the closest compatable match. The returned surface will still always match
the requested resolution.
The flags argument controls which type of display you want. There are
several to choose from, and you can even combine multiple types using the
bitwise or operator, (the pipe "|" character). If you pass 0 or no flags
argument it will default to a software driven window. Here are the display
flags you will want to choose from:
pygame.FULLSCREEN
create a fullscreen display
pygame.DOUBLEBUF
recommended for HWSURFACE or OPENGL
pygame.HWSURFACE
hardware accelereated, only in FULLSCREEN
pygame.OPENGL
create an opengl renderable display
pygame.RESIZABLE
display window should be sizeable
pygame.NOFRAME
display window will have no border or controls
:Parameters:
- `resolution`: int, int
- `flags`: int
- `depth`: int
:rtype: `Surface`
'''
global _display_surface
w, h = resolution
if w <= 0 or h <= 0:
raise pygame.base.error('Cannot set 0 sized display mode')
if not SDL_WasInit(SDL_INIT_VIDEO):
init()
if flags & SDL_OPENGL:
if flags & SDL_DOUBLEBUF:
flags &= ~SDL_DOUBLEBUF
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 1)
else:
SDL_GL_SetAttribute(SDL_GL_DOUBLEBUFFER, 0)
if depth:
SDL_GL_SetAttribute(SDL_GL_DEPTH_SIZE, depth)
surf = SDL_SetVideoMode(w, h, depth, flags)
if SDL_GL_GetAttribute(SDL_GL_DOUBLEBUFFER):
surf.flags |= SDL_DOUBLEBUF
else:
if not depth:
flags |= SDL_ANYFORMAT
surf = SDL_SetVideoMode(w, h, depth, flags)
title, icontitle = SDL_WM_GetCaption()
if not title:
SDL_WM_SetCaption('pygame window', 'pygame')
SDL_PumpEvents()
if _display_surface:
_display_surface._surf = surf
else:
#_display_surface = pygame.surface.Surface(surf=surf)
_display_surface = pygame.surface.Surface(dim=(w,h))
document['pydiv'] <= _display_surface.canvas
if sys.platform != 'darwin':
if not _icon_was_set:
try:
file = pygame.pkgdata.getResource(_icon_defaultname)
iconsurf = pygame.image.load(file)
SDL_SetColorKey(iconsurf._surf, SDL_SRCCOLORKEY, 0)
set_icon(iconsurf)
except IOError:
# Not worth dying over.
pass
return _display_surface
def get_surface():
'''Get current display surface.
Returns a `Surface` object representing the current display. Will
return None if called before the display mode is set.
:rtype: `Surface`
'''
return _display_surface
def flip():
'''Update the full display surface to the screen.
This will update the contents of the entire display. If your display mode
is using the flags pygame.HWSURFACE and pygame.DOUBLEBUF, this will wait
for a vertical retrace and swap the surfaces. If you are using a different
type of display mode, it will simply update the entire contents of the
surface.
When using an pygame.OPENGL display mode this will perform a gl buffer
swap.
'''
pass
_video_init_check()
screen = SDL_GetVideoSurface()
if not screen:
raise pygame.base.error('Display mode not set')
if screen.flags & SDL_OPENGL:
SDL_GL_SwapBuffers()
else:
SDL_Flip(screen)
def _crop_rect(w, h, rect):
if rect.x >= w or rect.y >= h or \
rect.x + rect.w <= 0 or rect.y + rect.h <= 0:
return None
rect.x = max(rect.x, 0)
rect.y = max(rect.y, 0)
rect.w = min(rect.x + rect.w, w) - rect.x
rect.h = min(rect.y + rect.h, h) - rect.y
return rect
def update(*rectangle):
'''Update portions of the screen for software displays.
This function is like an optimized version of pygame.display.flip() for
software displays. It allows only a portion of the screen to updated,
instead of the entire area. If no argument is passed it updates the entire
Surface area like `flip`.
You can pass the function a single rectangle, or a sequence of rectangles.
It is more efficient to pass many rectangles at once than to call update
multiple times with single or a partial list of rectangles. If passing
a sequence of rectangles it is safe to include None values in the list,
which will be skipped.
This call cannot be used on pygame.OPENGL displays and will generate an
exception.
:Parameters:
`rectangle` : Rect or sequence of Rect
Area(s) to update
'''
# Undocumented: also allows argument tuple to represent one rect;
# e.g. update(0, 0, 10, 10) or update((0, 0), (10, 10))
_video_init_check()
screen = SDL_GetVideoSurface()
if not screen:
raise pygame.base.error('Display mode not set')
if screen.flags & SDL_OPENGL:
raise pygame.base.error('Cannot update an OPENGL display')
if not rectangle:
SDL_UpdateRect(screen, 0, 0, 0, 0)
else:
w, h = screen.w, screen.h
w, h = screen.width, screen.height
try:
rect = pygame.rect._rect_from_object(rectangle)._r
rect = _crop_rect(w, h, rect)
if rect:
SDL_UpdateRect(screen, rect.x, rect.y, rect.w, rect.h)
except TypeError:
rectangle = rectangle[0]
rects = [_crop_rect(w, h, pygame.rect._rect_from_object(r)._r) \
for r in rectangle if r]
SDL_UpdateRects(screen, rects)
def get_driver():
'''Get name of the pygame display backend.
Pygame chooses one of many available display backends when it is
initialized. This returns the internal name used for the display backend.
This can be used to provide limited information about what display
capabilities might be accelerated.
:rtype: str
'''
_video_init_check()
return SDL_VideoDriverName()
def Info():
'''Create a video display information object.
Creates a simple object containing several attributes to describe the
current graphics environment. If this is called before
`set_mode` some platforms can provide information about the default
display mode. This can also be called after setting the display mode to
verify specific display options were satisfied.
:see: `VideoInfo`
:rtype: `VideoInfo`
'''
_video_init_check()
return VideoInfo()
class VideoInfo:
'''Video display information.
:Ivariables:
`hw` : bool
True if the display is hardware accelerated.
`wm` : bool
True if windowed display modes can be used.
`video_mem` : int
The amount of video memory on the displaoy, in megabytes. 0 if
unknown.
`bitsize` : int
Number of bits used to store each pixel.
`bytesize` : int
Number of bytes used to store each pixel.
`masks` : (int, int, int, int)
RGBA component mask.
`shifts` : (int, int, int, int)
RGBA component shift amounts.
`losses` : (int, int, int, int)
Number of bits lost from a 32 bit depth for each RGBA component.
`blit_hw` : bool
True if hardware Surface blitting is accelerated
`blit_hw_CC` : bool
True if hardware Surface colorkey blitting is accelerated
`blit_hw_A` : bool
True if hardware Surface pixel alpha blitting is accelerated
`blit_sw` : bool
True if software Surface blitting is accelerated
`blit_sw_CC` : bool
True if software Surface colorkey blitting is accelerated
`blit_sw_A` : bool
True if software Surface pixel alpha blitting is acclerated
'''
def __init__(self):
#brython
#info = SDL_GetVideoInfo()
info=None
if not info:
raise pygame.base.error('Could not retrieve video info')
self.hw = info.hw_available
self.wm = info.wm_available
self.blit_hw = info.blit_hw
self.blit_hw_CC = info.blit_hw_CC
self.blit_hw_A = info.blit_hw_A
self.blit_sw = info.blit_sw
self.blit_sw_CC = info.blit_sw_CC
self.blit_sw_A = info.blit_sw_A
self.blit_fill = info.blit_fill
self.video_mem = info.video_mem
self.bitsize = info.vfmt.BitsPerPixel
self.bytesize = info.vfmt.BytesPerPixel
self.masks = (info.vfmt.Rmask, info.vfmt.Gmask,
info.vfmt.Bmask, info.vfmt.Amask)
self.shifts = (info.vfmt.Rshift, info.vfmt.Gshift,
info.vfmt.Bshift, info.vfmt.Ashift)
self.losses = (info.vfmt.Rloss, info.vfmt.Gloss,
info.vfmt.Bloss, info.vfmt.Aloss)
def __str__(self):
return ('<VideoInfo(hw = %d, wm = %d,video_mem = %d\n' + \
' blit_hw = %d, blit_hw_CC = %d, blit_hw_A = %d,\n'
' blit_sw = %d, blit_sw_CC = %d, blit_sw_A = %d,\n'
' bitsize = %d, bytesize = %d,\n'
' masks = (%d, %d, %d, %d),\n'
' shifts = (%d, %d, %d, %d),\n'
' losses = (%d, %d, %d, %d)>\n') % \
(self.hw, self.wm, self.video_mem,
self.blit_hw, self.blit_hw_CC, self.blit_hw_A,
self.blit_sw, self.blit_sw_CC, self.blit_sw_A,
self.bitsize, self.bytesize,
self.masks[0], self.masks[1], self.masks[2], self.masks[3],
self.shifts[0], self.shifts[1], self.shifts[2], self.shifts[3],
self.losses[0], self.losses[1], self.losses[2], self.losses[3])
def __repr__(self):
return str(self)
def get_wm_info():
'''Get settings from the system window manager.
:note: Currently unimplemented, returns an empty dict.
:rtype: dict
'''
_video_init_check()
return {}
def list_modes(depth=0, flags=pygame.constants.FULLSCREEN):
'''Get list of available fullscreen modes.
This function returns a list of possible dimensions for a specified color
depth. The return value will be an empty list if no display modes are
available with the given arguments. A return value of -1 means that any
requested resolution should work (this is likely the case for windowed
modes). Mode sizes are sorted from biggest to smallest.
If depth is 0, SDL will choose the current/best color depth for the
display. The flags defaults to pygame.FULLSCREEN, but you may need to add
additional flags for specific fullscreen modes.
:rtype: list of (int, int), or -1
:return: list of (width, height) pairs, or -1 if any mode is suitable.
'''
_video_init_check()
#brython
#format = SDL_PixelFormat()
#format.BitsPerPixel = depth
#brython
#if not format.BitsPerPixel:
# format.BitsPerPixel = SDL_GetVideoInfo().vfmt.BitsPerPixel
#brython
#rects = SDL_ListModes(format, flags)
if rects == -1:
return -1
return [(r.w, r.h) for r in rects]
def mode_ok(size, flags=0, depth=0):
'''Pick the best color depth for a display mode
This function uses the same arguments as pygame.display.set_mode(). It is
used to depermine if a requested display mode is available. It will return
0 if the display mode cannot be set. Otherwise it will return a pixel
depth that best matches the display asked for.
Usually the depth argument is not passed, but some platforms can support
multiple display depths. If passed it will hint to which depth is a better
match.
The most useful flags to pass will be pygame.HWSURFACE, pygame.DOUBLEBUF,
and maybe pygame.FULLSCREEN. The function will return 0 if these display
flags cannot be set.
:rtype: int
:return: depth, in bits per pixel, or 0 if the requested mode cannot be
set.
'''
_video_init_check()
if not depth:
depth = SDL_GetVideoInfo().vfmt.BitsPerPixel
return SDL_VideoModeOK(size[0], size[1], depth, flags)
def gl_set_attribute(flag, value):
'''Set special OpenGL attributes.
When calling `pygame.display.set_mode` with the OPENGL flag,
pygame automatically handles setting the OpenGL attributes like
color and doublebuffering. OpenGL offers several other attributes
you may want control over. Pass one of these attributes as the
flag, and its appropriate value.
This must be called before `pygame.display.set_mode`.
The OPENGL flags are: GL_ALPHA_SIZE, GL_DEPTH_SIZE, GL_STENCIL_SIZE,
GL_ACCUM_RED_SIZE, GL_ACCUM_GREEN_SIZE, GL_ACCUM_BLUE_SIZE,
GL_ACCUM_ALPHA_SIZE GL_MULTISAMPLEBUFFERS, GL_MULTISAMPLESAMPLES,
GL_STEREO.
:Parameters:
- `flag`: int
- `value`: int
'''
_video_init_check()
SDL_GL_SetAttribute(flag, value)
def gl_get_attribute(flag):
'''Get special OpenGL attributes.
After calling `pygame.display.set_mode` with the OPENGL flag
you will likely want to check the value of any special OpenGL
attributes you requested. You will not always get what you
requested.
See `gl_set_attribute` for a list of flags.
:Parameters:
- `flag`: int
:rtype: int
'''
_video_init_check()
return SDL_GL_GetAttribute(flag)
def get_active():
'''Get state of display mode
Returns True if the current display is active on the screen. This
done with the call to ``pygame.display.set_mode()``. It is
potentially subject to the activity of a running window manager.
Calling `set_mode` will change all existing display surface
to reference the new display mode. The old display surface will
be lost after this call.
'''
brython
return SDL_GetAppState() & SDL_APPACTIVE != 0
def iconify():
'''Iconify the display surface.
Request the window for the display surface be iconified or hidden. Not all
systems and displays support an iconified display. The function will
return True if successfull.
When the display is iconified pygame.display.get_active() will return
False. The event queue should receive a pygame.APPACTIVE event when the
window has been iconified.
:rtype: bool
:return: True on success
'''
_video_init_check()
try:
SDL_WM_IconifyWindow()
return True
except SDL_Exception:
return False
def toggle_fullscreen():
'''Switch between fullscreen and windowed displays.
Switches the display window between windowed and fullscreen modes. This
function only works under the unix x11 video driver. For most situations
it is better to call pygame.display.set_mode() with new display flags.
:rtype: bool
'''
_video_init_check()
screen = SDL_GetVideoSurface()
try:
SDL_WM_ToggleFullScreen(screen)
return True
except SDL_Exception:
return False
return False
def set_gamma(red, green=None, blue=None):
'''Change the hardware gamma ramps.
Set the red, green, and blue gamma values on the display hardware. If the
green and blue arguments are not passed, they will both be the same as
red. Not all systems and hardware support gamma ramps, if the function
succeeds it will return True.
A gamma value of 1.0 creates a linear color table. Lower values will
darken the display and higher values will brighten.
:Parameters:
`red` : float
Red gamma value
`green` : float
Green gamma value
`blue` : float
Blue gamma value
:rtype: bool
'''
brython
_video_init_check()
if not green or not blue:
green = red
blue = red
try:
SDL_SetGamma(red, green, blue)
return True
except SDL_Exception:
return False
def set_gamma_ramp(red, green, blue):
'''Change the hardware gamma ramps with a custom lookup.
Set the red, green, and blue gamma ramps with an explicit lookup table.
Each argument should be sequence of 256 integers. The integers should
range between 0 and 0xffff. Not all systems and hardware support gamma
ramps, if the function succeeds it will return True.
:Parameters:
`red` : sequence of int
Sequence of 256 ints in range [0, 0xffff] giving red component
lookup.
`green` : sequence of int
Sequence of 256 ints in range [0, 0xffff] giving green component
lookup.
`blue` : sequence of int
Sequence of 256 ints in range [0, 0xffff] giving blue component
lookup.
:rtype: bool
'''
_video_init_check()
try:
SDL_SetGammaRamp(red, green, blue)
return True
except SDL_Exception:
return False
def set_icon(surface):
'''Change the system image for the display window.
Sets the runtime icon the system will use to represent the display window.
All windows default to a simple pygame logo for the window icon.
You can pass any surface, but most systems want a smaller image around
32x32. The image can have colorkey transparency which will be passed to
the system.
Some systems do not allow the window icon to change after it has been
shown. This function can be called before `set_mode` to
create the icon before the display mode is set.
:Parameters:
`surface` : `Surface`
Surface containing image to set.
'''
global _icon_was_set
pygame.base._video_autoinit()
SDL_WM_SetIcon(surface._surf, None)
_icon_was_set = 1
def set_caption(title, icontitle=None):
'''Set the current window caption.
If the display has a window title, this function will change the name on
the window. Some systems support an alternate shorter title to be used for
minimized displays.
:Parameters:
`title` : unicode
Window caption
`icontitle` : unicode
Icon caption, if supported
'''
if not icontitle:
icontitle = title
SDL_WM_SetCaption(title, icontitle)
def get_caption():
'''Get the current window caption.
Returns the title and icontitle for the display Surface. These will often
be the same value.
:rtype: (unicode, unicode)
:return: title, icontitle
'''
# XXX deviation from pygame, don't return () if title == None
#return SDL_WM_GetCaption()
return "", ""
def set_palette(palette=None):
'''Set the display color palette for indexed displays.
This will change the video display color palette for 8bit displays. This
does not change the palette for the actual display Surface, only the
palette that is used to display the Surface. If no palette argument is
passed, the system default palette will be restored. The palette is a
sequence of RGB triplets.
:Parameters:
`palette` : sequence of (int, int, int)
Sequence having at most 256 RGB triplets.
'''
_video_init_check()
surf = SDL_GetVideoSurface()
if not surf:
raise pygame.base.error('No display mode is set')
if surf.format.BytesPerPixel != 1 or not surf.format._palette:
raise pygame.base.error('Display mode is not colormapped')
if not palette:
SDL_SetPalette(surf, SDL_PHYSPAL, surf.format.palette.colors, 0)
length = min(surf.format.palette.ncolors, len(palette))
colors = [SDL_Color(r, g, b) for r, g, b in palette[:length]]
SDL_SetPalette(surf, SDL_PHYSPAL, colors, 0)
def _video_init_check():
if not SDL_WasInit(SDL_INIT_VIDEO):
raise pygame.base.error('video system not initialized')
| gpl-3.0 |
asrar7787/Test-Frontools | node_modules/node-gyp/gyp/pylib/gyp/xcode_ninja.py | 1789 | 10585 | # Copyright (c) 2014 Google Inc. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Xcode-ninja wrapper project file generator.
This updates the data structures passed to the Xcode gyp generator to build
with ninja instead. The Xcode project itself is transformed into a list of
executable targets, each with a build step to build with ninja, and a target
with every source and resource file. This appears to sidestep some of the
major performance headaches experienced using complex projects and large number
of targets within Xcode.
"""
import errno
import gyp.generator.ninja
import os
import re
import xml.sax.saxutils
def _WriteWorkspace(main_gyp, sources_gyp, params):
""" Create a workspace to wrap main and sources gyp paths. """
(build_file_root, build_file_ext) = os.path.splitext(main_gyp)
workspace_path = build_file_root + '.xcworkspace'
options = params['options']
if options.generator_output:
workspace_path = os.path.join(options.generator_output, workspace_path)
try:
os.makedirs(workspace_path)
except OSError, e:
if e.errno != errno.EEXIST:
raise
output_string = '<?xml version="1.0" encoding="UTF-8"?>\n' + \
'<Workspace version = "1.0">\n'
for gyp_name in [main_gyp, sources_gyp]:
name = os.path.splitext(os.path.basename(gyp_name))[0] + '.xcodeproj'
name = xml.sax.saxutils.quoteattr("group:" + name)
output_string += ' <FileRef location = %s></FileRef>\n' % name
output_string += '</Workspace>\n'
workspace_file = os.path.join(workspace_path, "contents.xcworkspacedata")
try:
with open(workspace_file, 'r') as input_file:
input_string = input_file.read()
if input_string == output_string:
return
except IOError:
# Ignore errors if the file doesn't exist.
pass
with open(workspace_file, 'w') as output_file:
output_file.write(output_string)
def _TargetFromSpec(old_spec, params):
""" Create fake target for xcode-ninja wrapper. """
# Determine ninja top level build dir (e.g. /path/to/out).
ninja_toplevel = None
jobs = 0
if params:
options = params['options']
ninja_toplevel = \
os.path.join(options.toplevel_dir,
gyp.generator.ninja.ComputeOutputDir(params))
jobs = params.get('generator_flags', {}).get('xcode_ninja_jobs', 0)
target_name = old_spec.get('target_name')
product_name = old_spec.get('product_name', target_name)
product_extension = old_spec.get('product_extension')
ninja_target = {}
ninja_target['target_name'] = target_name
ninja_target['product_name'] = product_name
if product_extension:
ninja_target['product_extension'] = product_extension
ninja_target['toolset'] = old_spec.get('toolset')
ninja_target['default_configuration'] = old_spec.get('default_configuration')
ninja_target['configurations'] = {}
# Tell Xcode to look in |ninja_toplevel| for build products.
new_xcode_settings = {}
if ninja_toplevel:
new_xcode_settings['CONFIGURATION_BUILD_DIR'] = \
"%s/$(CONFIGURATION)$(EFFECTIVE_PLATFORM_NAME)" % ninja_toplevel
if 'configurations' in old_spec:
for config in old_spec['configurations'].iterkeys():
old_xcode_settings = \
old_spec['configurations'][config].get('xcode_settings', {})
if 'IPHONEOS_DEPLOYMENT_TARGET' in old_xcode_settings:
new_xcode_settings['CODE_SIGNING_REQUIRED'] = "NO"
new_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET'] = \
old_xcode_settings['IPHONEOS_DEPLOYMENT_TARGET']
ninja_target['configurations'][config] = {}
ninja_target['configurations'][config]['xcode_settings'] = \
new_xcode_settings
ninja_target['mac_bundle'] = old_spec.get('mac_bundle', 0)
ninja_target['ios_app_extension'] = old_spec.get('ios_app_extension', 0)
ninja_target['ios_watchkit_extension'] = \
old_spec.get('ios_watchkit_extension', 0)
ninja_target['ios_watchkit_app'] = old_spec.get('ios_watchkit_app', 0)
ninja_target['type'] = old_spec['type']
if ninja_toplevel:
ninja_target['actions'] = [
{
'action_name': 'Compile and copy %s via ninja' % target_name,
'inputs': [],
'outputs': [],
'action': [
'env',
'PATH=%s' % os.environ['PATH'],
'ninja',
'-C',
new_xcode_settings['CONFIGURATION_BUILD_DIR'],
target_name,
],
'message': 'Compile and copy %s via ninja' % target_name,
},
]
if jobs > 0:
ninja_target['actions'][0]['action'].extend(('-j', jobs))
return ninja_target
def IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
"""Limit targets for Xcode wrapper.
Xcode sometimes performs poorly with too many targets, so only include
proper executable targets, with filters to customize.
Arguments:
target_extras: Regular expression to always add, matching any target.
executable_target_pattern: Regular expression limiting executable targets.
spec: Specifications for target.
"""
target_name = spec.get('target_name')
# Always include targets matching target_extras.
if target_extras is not None and re.search(target_extras, target_name):
return True
# Otherwise just show executable targets.
if spec.get('type', '') == 'executable' and \
spec.get('product_extension', '') != 'bundle':
# If there is a filter and the target does not match, exclude the target.
if executable_target_pattern is not None:
if not re.search(executable_target_pattern, target_name):
return False
return True
return False
def CreateWrapper(target_list, target_dicts, data, params):
"""Initialize targets for the ninja wrapper.
This sets up the necessary variables in the targets to generate Xcode projects
that use ninja as an external builder.
Arguments:
target_list: List of target pairs: 'base/base.gyp:base'.
target_dicts: Dict of target properties keyed on target pair.
data: Dict of flattened build files keyed on gyp path.
params: Dict of global options for gyp.
"""
orig_gyp = params['build_files'][0]
for gyp_name, gyp_dict in data.iteritems():
if gyp_name == orig_gyp:
depth = gyp_dict['_DEPTH']
# Check for custom main gyp name, otherwise use the default CHROMIUM_GYP_FILE
# and prepend .ninja before the .gyp extension.
generator_flags = params.get('generator_flags', {})
main_gyp = generator_flags.get('xcode_ninja_main_gyp', None)
if main_gyp is None:
(build_file_root, build_file_ext) = os.path.splitext(orig_gyp)
main_gyp = build_file_root + ".ninja" + build_file_ext
# Create new |target_list|, |target_dicts| and |data| data structures.
new_target_list = []
new_target_dicts = {}
new_data = {}
# Set base keys needed for |data|.
new_data[main_gyp] = {}
new_data[main_gyp]['included_files'] = []
new_data[main_gyp]['targets'] = []
new_data[main_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
# Normally the xcode-ninja generator includes only valid executable targets.
# If |xcode_ninja_executable_target_pattern| is set, that list is reduced to
# executable targets that match the pattern. (Default all)
executable_target_pattern = \
generator_flags.get('xcode_ninja_executable_target_pattern', None)
# For including other non-executable targets, add the matching target name
# to the |xcode_ninja_target_pattern| regular expression. (Default none)
target_extras = generator_flags.get('xcode_ninja_target_pattern', None)
for old_qualified_target in target_list:
spec = target_dicts[old_qualified_target]
if IsValidTargetForWrapper(target_extras, executable_target_pattern, spec):
# Add to new_target_list.
target_name = spec.get('target_name')
new_target_name = '%s:%s#target' % (main_gyp, target_name)
new_target_list.append(new_target_name)
# Add to new_target_dicts.
new_target_dicts[new_target_name] = _TargetFromSpec(spec, params)
# Add to new_data.
for old_target in data[old_qualified_target.split(':')[0]]['targets']:
if old_target['target_name'] == target_name:
new_data_target = {}
new_data_target['target_name'] = old_target['target_name']
new_data_target['toolset'] = old_target['toolset']
new_data[main_gyp]['targets'].append(new_data_target)
# Create sources target.
sources_target_name = 'sources_for_indexing'
sources_target = _TargetFromSpec(
{ 'target_name' : sources_target_name,
'toolset': 'target',
'default_configuration': 'Default',
'mac_bundle': '0',
'type': 'executable'
}, None)
# Tell Xcode to look everywhere for headers.
sources_target['configurations'] = {'Default': { 'include_dirs': [ depth ] } }
sources = []
for target, target_dict in target_dicts.iteritems():
base = os.path.dirname(target)
files = target_dict.get('sources', []) + \
target_dict.get('mac_bundle_resources', [])
for action in target_dict.get('actions', []):
files.extend(action.get('inputs', []))
# Remove files starting with $. These are mostly intermediate files for the
# build system.
files = [ file for file in files if not file.startswith('$')]
# Make sources relative to root build file.
relative_path = os.path.dirname(main_gyp)
sources += [ os.path.relpath(os.path.join(base, file), relative_path)
for file in files ]
sources_target['sources'] = sorted(set(sources))
# Put sources_to_index in it's own gyp.
sources_gyp = \
os.path.join(os.path.dirname(main_gyp), sources_target_name + ".gyp")
fully_qualified_target_name = \
'%s:%s#target' % (sources_gyp, sources_target_name)
# Add to new_target_list, new_target_dicts and new_data.
new_target_list.append(fully_qualified_target_name)
new_target_dicts[fully_qualified_target_name] = sources_target
new_data_target = {}
new_data_target['target_name'] = sources_target['target_name']
new_data_target['_DEPTH'] = depth
new_data_target['toolset'] = "target"
new_data[sources_gyp] = {}
new_data[sources_gyp]['targets'] = []
new_data[sources_gyp]['included_files'] = []
new_data[sources_gyp]['xcode_settings'] = \
data[orig_gyp].get('xcode_settings', {})
new_data[sources_gyp]['targets'].append(new_data_target)
# Write workspace to file.
_WriteWorkspace(main_gyp, sources_gyp, params)
return (new_target_list, new_target_dicts, new_data)
| mit |
abhinavmoudgil95/root | tutorials/tmva/keras/MulticlassKeras.py | 15 | 2346 | #!/usr/bin/env python
from ROOT import TMVA, TFile, TTree, TCut, gROOT
from os.path import isfile
from keras.models import Sequential
from keras.layers.core import Dense, Activation
from keras.regularizers import l2
from keras import initializations
from keras.optimizers import SGD
# Setup TMVA
TMVA.Tools.Instance()
TMVA.PyMethodBase.PyInitialize()
output = TFile.Open('TMVA.root', 'RECREATE')
factory = TMVA.Factory('TMVAClassification', output,
'!V:!Silent:Color:DrawProgressBar:Transformations=D,G:AnalysisType=multiclass')
# Load data
if not isfile('tmva_example_multiple_background.root'):
createDataMacro = str(gROOT.GetTutorialDir()) + '/tmva/createData.C'
print(createDataMacro)
gROOT.ProcessLine('.L {}'.format(createDataMacro))
gROOT.ProcessLine('create_MultipleBackground(4000)')
data = TFile.Open('tmva_example_multiple_background.root')
signal = data.Get('TreeS')
background0 = data.Get('TreeB0')
background1 = data.Get('TreeB1')
background2 = data.Get('TreeB2')
dataloader = TMVA.DataLoader('dataset')
for branch in signal.GetListOfBranches():
dataloader.AddVariable(branch.GetName())
dataloader.AddTree(signal, 'Signal')
dataloader.AddTree(background0, 'Background_0')
dataloader.AddTree(background1, 'Background_1')
dataloader.AddTree(background2, 'Background_2')
dataloader.PrepareTrainingAndTestTree(TCut(''),
'SplitMode=Random:NormMode=NumEvents:!V')
# Generate model
# Define initialization
def normal(shape, name=None):
return initializations.normal(shape, scale=0.05, name=name)
# Define model
model = Sequential()
model.add(Dense(32, init=normal, activation='relu', W_regularizer=l2(1e-5), input_dim=4))
#model.add(Dense(32, init=normal, activation='relu', W_regularizer=l2(1e-5)))
model.add(Dense(4, init=normal, activation='softmax'))
# Set loss and optimizer
model.compile(loss='categorical_crossentropy', optimizer=SGD(lr=0.01), metrics=['accuracy',])
# Store model to file
model.save('model.h5')
model.summary()
# Book methods
factory.BookMethod(dataloader, TMVA.Types.kFisher, 'Fisher',
'!H:!V:Fisher:VarTransform=D,G')
factory.BookMethod(dataloader, TMVA.Types.kPyKeras, "PyKeras",
'H:!V:VarTransform=D,G:FilenameModel=model.h5:NumEpochs=20:BatchSize=32')
# Run TMVA
factory.TrainAllMethods()
factory.TestAllMethods()
factory.EvaluateAllMethods()
| lgpl-2.1 |
mvaled/OpenUpgrade | addons/survey/controllers/main.py | 75 | 21298 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2013-Today OpenERP SA (<http://www.openerp.com>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import json
import logging
import werkzeug
import werkzeug.utils
from datetime import datetime
from math import ceil
from openerp import SUPERUSER_ID
from openerp.addons.web import http
from openerp.addons.web.http import request
from openerp.tools.misc import DEFAULT_SERVER_DATETIME_FORMAT as DTF, ustr
_logger = logging.getLogger(__name__)
class WebsiteSurvey(http.Controller):
## HELPER METHODS ##
def _check_bad_cases(self, cr, uid, request, survey_obj, survey, user_input_obj, context=None):
# In case of bad survey, redirect to surveys list
if survey_obj.exists(cr, SUPERUSER_ID, survey.id, context=context) == []:
return werkzeug.utils.redirect("/survey/")
# In case of auth required, block public user
if survey.auth_required and uid == request.website.user_id.id:
return request.website.render("survey.auth_required", {'survey': survey})
# In case of non open surveys
if survey.stage_id.closed:
return request.website.render("survey.notopen")
# If there is no pages
if not survey.page_ids:
return request.website.render("survey.nopages")
# Everything seems to be ok
return None
def _check_deadline(self, cr, uid, user_input, context=None):
'''Prevent opening of the survey if the deadline has turned out
! This will NOT disallow access to users who have already partially filled the survey !'''
if user_input.deadline:
dt_deadline = datetime.strptime(user_input.deadline, DTF)
dt_now = datetime.now()
if dt_now > dt_deadline: # survey is not open anymore
return request.website.render("survey.notopen")
return None
## ROUTES HANDLERS ##
# Survey start
@http.route(['/survey/start/<model("survey.survey"):survey>',
'/survey/start/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def start_survey(self, survey, token=None, **post):
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
user_input_obj = request.registry['survey.user_input']
# Test mode
if token and token == "phantom":
_logger.info("[survey] Phantom mode")
user_input_id = user_input_obj.create(cr, uid, {'survey_id': survey.id, 'test_entry': True}, context=context)
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
data = {'survey': survey, 'page': None, 'token': user_input.token}
return request.website.render('survey.survey_init', data)
# END Test mode
# Controls if the survey can be displayed
errpage = self._check_bad_cases(cr, uid, request, survey_obj, survey, user_input_obj, context=context)
if errpage:
return errpage
# Manual surveying
if not token:
vals = {'survey_id': survey.id}
if request.website.user_id.id != uid:
vals['partner_id'] = request.registry['res.users'].browse(cr, uid, uid, context=context).partner_id.id
user_input_id = user_input_obj.create(cr, uid, vals, context=context)
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
else:
try:
user_input_id = user_input_obj.search(cr, uid, [('token', '=', token)], context=context)[0]
except IndexError: # Invalid token
return request.website.render("website.403")
else:
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
# Do not open expired survey
errpage = self._check_deadline(cr, uid, user_input, context=context)
if errpage:
return errpage
# Select the right page
if user_input.state == 'new': # Intro page
data = {'survey': survey, 'page': None, 'token': user_input.token}
return request.website.render('survey.survey_init', data)
else:
return request.redirect('/survey/fill/%s/%s' % (survey.id, user_input.token))
# Survey displaying
@http.route(['/survey/fill/<model("survey.survey"):survey>/<string:token>',
'/survey/fill/<model("survey.survey"):survey>/<string:token>/<string:prev>'],
type='http', auth='public', website=True)
def fill_survey(self, survey, token, prev=None, **post):
'''Display and validates a survey'''
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
user_input_obj = request.registry['survey.user_input']
# Controls if the survey can be displayed
errpage = self._check_bad_cases(cr, uid, request, survey_obj, survey, user_input_obj, context=context)
if errpage:
return errpage
# Load the user_input
try:
user_input_id = user_input_obj.search(cr, uid, [('token', '=', token)])[0]
except IndexError: # Invalid token
return request.website.render("website.403")
else:
user_input = user_input_obj.browse(cr, uid, [user_input_id], context=context)[0]
# Do not display expired survey (even if some pages have already been
# displayed -- There's a time for everything!)
errpage = self._check_deadline(cr, uid, user_input, context=context)
if errpage:
return errpage
# Select the right page
if user_input.state == 'new': # First page
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, 0, go_back=False, context=context)
data = {'survey': survey, 'page': page, 'page_nr': page_nr, 'token': user_input.token}
if last:
data.update({'last': True})
return request.website.render('survey.survey', data)
elif user_input.state == 'done': # Display success message
return request.website.render('survey.sfinished', {'survey': survey,
'token': token,
'user_input': user_input})
elif user_input.state == 'skip':
flag = (True if prev and prev == 'prev' else False)
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, user_input.last_displayed_page_id.id, go_back=flag, context=context)
#special case if you click "previous" from the last page, then leave the survey, then reopen it from the URL, avoid crash
if not page:
page, page_nr, last = survey_obj.next_page(cr, uid, user_input, user_input.last_displayed_page_id.id, go_back=True, context=context)
data = {'survey': survey, 'page': page, 'page_nr': page_nr, 'token': user_input.token}
if last:
data.update({'last': True})
return request.website.render('survey.survey', data)
else:
return request.website.render("website.403")
# AJAX prefilling of a survey
@http.route(['/survey/prefill/<model("survey.survey"):survey>/<string:token>',
'/survey/prefill/<model("survey.survey"):survey>/<string:token>/<model("survey.page"):page>'],
type='http', auth='public', website=True)
def prefill(self, survey, token, page=None, **post):
cr, uid, context = request.cr, request.uid, request.context
user_input_line_obj = request.registry['survey.user_input_line']
ret = {}
# Fetch previous answers
if page:
ids = user_input_line_obj.search(cr, uid, [('user_input_id.token', '=', token), ('page_id', '=', page.id)], context=context)
else:
ids = user_input_line_obj.search(cr, uid, [('user_input_id.token', '=', token)], context=context)
previous_answers = user_input_line_obj.browse(cr, uid, ids, context=context)
# Return non empty answers in a JSON compatible format
for answer in previous_answers:
if not answer.skipped:
answer_tag = '%s_%s_%s' % (answer.survey_id.id, answer.page_id.id, answer.question_id.id)
answer_value = None
if answer.answer_type == 'free_text':
answer_value = answer.value_free_text
elif answer.answer_type == 'text' and answer.question_id.type == 'textbox':
answer_value = answer.value_text
elif answer.answer_type == 'text' and answer.question_id.type != 'textbox':
# here come comment answers for matrices, simple choice and multiple choice
answer_tag = "%s_%s" % (answer_tag, 'comment')
answer_value = answer.value_text
elif answer.answer_type == 'number':
answer_value = answer.value_number.__str__()
elif answer.answer_type == 'date':
answer_value = answer.value_date
elif answer.answer_type == 'suggestion' and not answer.value_suggested_row:
answer_value = answer.value_suggested.id
elif answer.answer_type == 'suggestion' and answer.value_suggested_row:
answer_tag = "%s_%s" % (answer_tag, answer.value_suggested_row.id)
answer_value = answer.value_suggested.id
if answer_value:
dict_soft_update(ret, answer_tag, answer_value)
else:
_logger.warning("[survey] No answer has been found for question %s marked as non skipped" % answer_tag)
return json.dumps(ret)
# AJAX scores loading for quiz correction mode
@http.route(['/survey/scores/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def get_scores(self, survey, token, page=None, **post):
cr, uid, context = request.cr, request.uid, request.context
user_input_line_obj = request.registry['survey.user_input_line']
ret = {}
# Fetch answers
ids = user_input_line_obj.search(cr, uid, [('user_input_id.token', '=', token)], context=context)
previous_answers = user_input_line_obj.browse(cr, uid, ids, context=context)
# Compute score for each question
for answer in previous_answers:
tmp_score = ret.get(answer.question_id.id, 0.0)
ret.update({answer.question_id.id: tmp_score + answer.quizz_mark})
return json.dumps(ret)
# AJAX submission of a page
@http.route(['/survey/submit/<model("survey.survey"):survey>'],
type='http', methods=['POST'], auth='public', website=True)
def submit(self, survey, **post):
_logger.debug('Incoming data: %s', post)
page_id = int(post['page_id'])
cr, uid, context = request.cr, request.uid, request.context
survey_obj = request.registry['survey.survey']
questions_obj = request.registry['survey.question']
questions_ids = questions_obj.search(cr, uid, [('page_id', '=', page_id)], context=context)
questions = questions_obj.browse(cr, uid, questions_ids, context=context)
# Answer validation
errors = {}
for question in questions:
answer_tag = "%s_%s_%s" % (survey.id, page_id, question.id)
errors.update(questions_obj.validate_question(cr, uid, question, post, answer_tag, context=context))
ret = {}
if (len(errors) != 0):
# Return errors messages to webpage
ret['errors'] = errors
else:
# Store answers into database
user_input_obj = request.registry['survey.user_input']
user_input_line_obj = request.registry['survey.user_input_line']
try:
user_input_id = user_input_obj.search(cr, uid, [('token', '=', post['token'])], context=context)[0]
except KeyError: # Invalid token
return request.website.render("website.403")
for question in questions:
answer_tag = "%s_%s_%s" % (survey.id, page_id, question.id)
user_input_line_obj.save_lines(cr, uid, user_input_id, question, post, answer_tag, context=context)
user_input = user_input_obj.browse(cr, uid, user_input_id, context=context)
go_back = post['button_submit'] == 'previous'
next_page, _, last = survey_obj.next_page(cr, uid, user_input, page_id, go_back=go_back, context=context)
vals = {'last_displayed_page_id': page_id}
if next_page is None and not go_back:
vals.update({'state': 'done'})
else:
vals.update({'state': 'skip'})
user_input_obj.write(cr, uid, user_input_id, vals, context=context)
ret['redirect'] = '/survey/fill/%s/%s' % (survey.id, post['token'])
if go_back:
ret['redirect'] += '/prev'
return json.dumps(ret)
# Printing routes
@http.route(['/survey/print/<model("survey.survey"):survey>',
'/survey/print/<model("survey.survey"):survey>/<string:token>'],
type='http', auth='public', website=True)
def print_survey(self, survey, token=None, **post):
'''Display an survey in printable view; if <token> is set, it will
grab the answers of the user_input_id that has <token>.'''
return request.website.render('survey.survey_print',
{'survey': survey,
'token': token,
'page_nr': 0,
'quizz_correction': True if survey.quizz_mode and token else False})
@http.route(['/survey/results/<model("survey.survey"):survey>'],
type='http', auth='user', website=True)
def survey_reporting(self, survey, token=None, **post):
'''Display survey Results & Statistics for given survey.'''
result_template ='survey.result'
current_filters = []
filter_display_data = []
filter_finish = False
survey_obj = request.registry['survey.survey']
if not survey.user_input_ids or not [input_id.id for input_id in survey.user_input_ids if input_id.state != 'new']:
result_template = 'survey.no_result'
if 'finished' in post:
post.pop('finished')
filter_finish = True
if post or filter_finish:
filter_data = self.get_filter_data(post)
current_filters = survey_obj.filter_input_ids(request.cr, request.uid, survey, filter_data, filter_finish, context=request.context)
filter_display_data = survey_obj.get_filter_display_data(request.cr, request.uid, filter_data, context=request.context)
return request.website.render(result_template,
{'survey': survey,
'survey_dict': self.prepare_result_dict(survey, current_filters),
'page_range': self.page_range,
'current_filters': current_filters,
'filter_display_data': filter_display_data,
'filter_finish': filter_finish
})
# Quick retroengineering of what is injected into the template for now:
# (TODO: flatten and simplify this)
#
# survey: a browse record of the survey
# survey_dict: very messy dict containing all the info to display answers
# {'page_ids': [
#
# ...
#
# {'page': browse record of the page,
# 'question_ids': [
#
# ...
#
# {'graph_data': data to be displayed on the graph
# 'input_summary': number of answered, skipped...
# 'prepare_result': {
# answers displayed in the tables
# }
# 'question': browse record of the question_ids
# }
#
# ...
#
# ]
# }
#
# ...
#
# ]
# }
#
# page_range: pager helper function
# current_filters: a list of ids
# filter_display_data: [{'labels': ['a', 'b'], question_text} ... ]
# filter_finish: boolean => only finished surveys or not
#
def prepare_result_dict(self,survey, current_filters=None):
"""Returns dictionary having values for rendering template"""
current_filters = current_filters if current_filters else []
survey_obj = request.registry['survey.survey']
result = {'page_ids': []}
for page in survey.page_ids:
page_dict = {'page': page, 'question_ids': []}
for question in page.question_ids:
question_dict = {'question':question, 'input_summary':survey_obj.get_input_summary(request.cr, request.uid, question, current_filters, context=request.context), 'prepare_result':survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context), 'graph_data': self.get_graph_data(question, current_filters)}
page_dict['question_ids'].append(question_dict)
result['page_ids'].append(page_dict)
return result
def get_filter_data(self, post):
"""Returns data used for filtering the result"""
filters = []
for ids in post:
#if user add some random data in query URI, ignore it
try:
row_id, answer_id = ids.split(',')
filters.append({'row_id': int(row_id), 'answer_id': int(answer_id)})
except:
return filters
return filters
def page_range(self, total_record, limit):
'''Returns number of pages required for pagination'''
total = ceil(total_record / float(limit))
return range(1, int(total + 1))
def get_graph_data(self, question, current_filters=None):
'''Returns formatted data required by graph library on basis of filter'''
# TODO refactor this terrible method and merge it with prepare_result_dict
current_filters = current_filters if current_filters else []
survey_obj = request.registry['survey.survey']
result = []
if question.type == 'multiple_choice':
result.append({'key': ustr(question.question),
'values': survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)['answers']
})
if question.type == 'simple_choice':
result = survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)['answers']
if question.type == 'matrix':
data = survey_obj.prepare_result(request.cr, request.uid, question, current_filters, context=request.context)
for answer in data['answers']:
values = []
for row in data['rows']:
values.append({'text': data['rows'].get(row), 'count': data['result'].get((row, answer))})
result.append({'key': data['answers'].get(answer), 'values': values})
return json.dumps(result)
def dict_soft_update(dictionary, key, value):
''' Insert the pair <key>: <value> into the <dictionary>. If <key> is
already present, this function will append <value> to the list of
existing data (instead of erasing it) '''
if key in dictionary:
dictionary[key].append(value)
else:
dictionary.update({key: [value]})
| agpl-3.0 |
ksmaheshkumar/weevely3 | utils/code.py | 15 | 1763 | from core.loggers import log
from distutils import spawn
from core import messages
import subprocess
# Minify PHP code removing white spaces and comments.
# Returns None in case of errors.
def minify_php(original_code):
php_binary = spawn.find_executable('php')
if not php_binary:
log.debug(messages.utils_code.minify_php_missing_binary)
return None
try:
output = subprocess.check_output(
[
php_binary, '-r', """function is_label($str) {
return preg_match('~[a-zA-Z_\x7f-\xff][a-zA-Z0-9_\x7f-\xff]*+~',$str);
}
function get_tiny($snippet,
$remove_whitespace=TRUE,
$remove_comments=TRUE) {
//generate tokens from snippet
$tokens = token_get_all($snippet);
//replace all variables, remove whitespace, remove comments
$new_source = '';
foreach ($tokens as $i => $token) {
if(!is_array($token)) {
$new_source .= $token;
continue;
}
if($remove_comments) {
if(in_array($token[0],array(T_COMMENT,T_DOC_COMMENT))) {
continue;
}
}
if ($token[0] == T_WHITESPACE && $remove_whitespace) {
if (isset($tokens[$i-1]) && isset($tokens[$i+1]) && is_array($tokens[$i-1]) && is_array($tokens[$i+1]) && is_label($tokens[$i-1][1]) && is_label($tokens[$i+1][1])) {
$new_source .= ' ';
}
} elseif($token[0]==T_CASE) {
$new_source .= $token[1].' ';
} else {
$new_source .= $token[1];
}
}
return $new_source;
}
$d=<<<'EOD'
%s
EOD;
print(get_tiny($d));
""" % ('<?php %s ?>' % str(original_code)),
])
except Exception as e:
import traceback; log.debug(traceback.format_exc())
log.debug(messages.utils_code.minify_php_error_minifying)
return None
if len(output) < 8:
return None
return output[6:-2]
| gpl-3.0 |
fxfitz/ansible | lib/ansible/modules/network/avi/avi_cloudconnectoruser.py | 41 | 4186 | #!/usr/bin/python
#
# @author: Gaurav Rastogi ([email protected])
# Eric Anderson ([email protected])
# module_check: supported
# Avi Version: 17.1.1
#
# Copyright: (c) 2017 Gaurav Rastogi, <[email protected]>
# GNU General Public License v3.0+ (see COPYING or https://www.gnu.org/licenses/gpl-3.0.txt)
#
ANSIBLE_METADATA = {'metadata_version': '1.1',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: avi_cloudconnectoruser
author: Gaurav Rastogi ([email protected])
short_description: Module for setup of CloudConnectorUser Avi RESTful Object
description:
- This module is used to configure CloudConnectorUser object
- more examples at U(https://github.com/avinetworks/devops)
requirements: [ avisdk ]
version_added: "2.4"
options:
state:
description:
- The state that should be applied on the entity.
default: present
choices: ["absent", "present"]
avi_api_update_method:
description:
- Default method for object update is HTTP PUT.
- Setting to patch will override that behavior to use HTTP PATCH.
version_added: "2.5"
default: put
choices: ["put", "patch"]
avi_api_patch_op:
description:
- Patch operation to use when using avi_api_update_method as patch.
version_added: "2.5"
choices: ["add", "replace", "delete"]
azure_serviceprincipal:
description:
- Field introduced in 17.2.1.
version_added: "2.5"
azure_userpass:
description:
- Field introduced in 17.2.1.
version_added: "2.5"
name:
description:
- Name of the object.
required: true
private_key:
description:
- Private_key of cloudconnectoruser.
public_key:
description:
- Public_key of cloudconnectoruser.
tenant_ref:
description:
- It is a reference to an object of type tenant.
url:
description:
- Avi controller URL of the object.
uuid:
description:
- Unique object identifier of the object.
extends_documentation_fragment:
- avi
'''
EXAMPLES = """
- name: Create a Cloud connector user that is used for integration into cloud platforms
avi_cloudconnectoruser:
controller: '{{ controller }}'
name: root
password: '{{ password }}'
private_key: |
-----BEGIN RSA PRIVATE KEY-----
-----END RSA PRIVATE KEY-----'
public_key: 'ssh-rsa ...'
tenant_ref: admin
username: '{{ username }}'
"""
RETURN = '''
obj:
description: CloudConnectorUser (api/cloudconnectoruser) object
returned: success, changed
type: dict
'''
from ansible.module_utils.basic import AnsibleModule
try:
from ansible.module_utils.network.avi.avi import (
avi_common_argument_spec, HAS_AVI, avi_ansible_api)
except ImportError:
HAS_AVI = False
def main():
argument_specs = dict(
state=dict(default='present',
choices=['absent', 'present']),
avi_api_update_method=dict(default='put',
choices=['put', 'patch']),
avi_api_patch_op=dict(choices=['add', 'replace', 'delete']),
azure_serviceprincipal=dict(type='dict',),
azure_userpass=dict(type='dict',),
name=dict(type='str', required=True),
private_key=dict(type='str', no_log=True,),
public_key=dict(type='str',),
tenant_ref=dict(type='str',),
url=dict(type='str',),
uuid=dict(type='str',),
)
argument_specs.update(avi_common_argument_spec())
module = AnsibleModule(
argument_spec=argument_specs, supports_check_mode=True)
if not HAS_AVI:
return module.fail_json(msg=(
'Avi python API SDK (avisdk>=17.1) is not installed. '
'For more details visit https://github.com/avinetworks/sdk.'))
return avi_ansible_api(module, 'cloudconnectoruser',
set(['private_key']))
if __name__ == '__main__':
main()
| gpl-3.0 |
2013Commons/hue | desktop/core/ext-py/requests-2.0.0/requests/packages/urllib3/_collections.py | 76 | 2898 | # urllib3/_collections.py
# Copyright 2008-2013 Andrey Petrov and contributors (see CONTRIBUTORS.txt)
#
# This module is part of urllib3 and is released under
# the MIT License: http://www.opensource.org/licenses/mit-license.php
from collections import MutableMapping
from threading import RLock
try: # Python 2.7+
from collections import OrderedDict
except ImportError:
from .packages.ordered_dict import OrderedDict
__all__ = ['RecentlyUsedContainer']
_Null = object()
class RecentlyUsedContainer(MutableMapping):
"""
Provides a thread-safe dict-like container which maintains up to
``maxsize`` keys while throwing away the least-recently-used keys beyond
``maxsize``.
:param maxsize:
Maximum number of recent elements to retain.
:param dispose_func:
Every time an item is evicted from the container,
``dispose_func(value)`` is called. Callback which will get called
"""
ContainerCls = OrderedDict
def __init__(self, maxsize=10, dispose_func=None):
self._maxsize = maxsize
self.dispose_func = dispose_func
self._container = self.ContainerCls()
self.lock = RLock()
def __getitem__(self, key):
# Re-insert the item, moving it to the end of the eviction line.
with self.lock:
item = self._container.pop(key)
self._container[key] = item
return item
def __setitem__(self, key, value):
evicted_value = _Null
with self.lock:
# Possibly evict the existing value of 'key'
evicted_value = self._container.get(key, _Null)
self._container[key] = value
# If we didn't evict an existing value, we might have to evict the
# least recently used item from the beginning of the container.
if len(self._container) > self._maxsize:
_key, evicted_value = self._container.popitem(last=False)
if self.dispose_func and evicted_value is not _Null:
self.dispose_func(evicted_value)
def __delitem__(self, key):
with self.lock:
value = self._container.pop(key)
if self.dispose_func:
self.dispose_func(value)
def __len__(self):
with self.lock:
return len(self._container)
def __iter__(self):
raise NotImplementedError('Iteration over this class is unlikely to be threadsafe.')
def clear(self):
with self.lock:
# Copy pointers to all values, then wipe the mapping
# under Python 2, this copies the list of values twice :-|
values = list(self._container.values())
self._container.clear()
if self.dispose_func:
for value in values:
self.dispose_func(value)
def keys(self):
with self.lock:
return self._container.keys()
| apache-2.0 |
zengluyang/ns3-d2d | src/core/bindings/modulegen_customizations.py | 121 | 7665 | import re
import os
import sys
from pybindgen.typehandlers import base as typehandlers
from pybindgen import ReturnValue, Parameter
from pybindgen.cppmethod import CustomCppMethodWrapper, CustomCppConstructorWrapper
from pybindgen.typehandlers.codesink import MemoryCodeSink
from pybindgen.typehandlers import ctypeparser
from pybindgen import cppclass, param, retval
import warnings
from pybindgen.typehandlers.base import CodeGenerationError
class ArgvParam(Parameter):
"""
Converts a python list-of-strings argument to a pair of 'int argc,
char *argv[]' arguments to pass into C.
One Python argument becomes two C function arguments -> it's a miracle!
Note: this parameter type handler is not registered by any name;
must be used explicitly.
"""
DIRECTIONS = [Parameter.DIRECTION_IN]
CTYPES = []
def convert_c_to_python(self, wrapper):
raise NotImplementedError
def convert_python_to_c(self, wrapper):
py_name = wrapper.declarations.declare_variable('PyObject*', 'py_' + self.name)
argc_var = wrapper.declarations.declare_variable('int', 'argc')
name = wrapper.declarations.declare_variable('char**', self.name)
idx = wrapper.declarations.declare_variable('Py_ssize_t', 'idx')
wrapper.parse_params.add_parameter('O!', ['&PyList_Type', '&'+py_name], self.name)
#wrapper.before_call.write_error_check('!PyList_Check(%s)' % py_name) # XXX
wrapper.before_call.write_code("%s = (char **) malloc(sizeof(char*)*PyList_Size(%s));"
% (name, py_name))
wrapper.before_call.add_cleanup_code('free(%s);' % name)
wrapper.before_call.write_code('''
for (%(idx)s = 0; %(idx)s < PyList_Size(%(py_name)s); %(idx)s++)
{
''' % vars())
wrapper.before_call.sink.indent()
wrapper.before_call.write_code('''
PyObject *item = PyList_GET_ITEM(%(py_name)s, %(idx)s);
''' % vars())
#wrapper.before_call.write_error_check('item == NULL')
wrapper.before_call.write_error_check(
'\n'
'#if PY_VERSION_HEX >= 0x03000000\n'
'!PyUnicode_Check(item)\n'
'#else\n'
'!PyString_Check(item)\n'
'#endif\n',
failure_cleanup=('PyErr_SetString(PyExc_TypeError, '
'"argument %s must be a list of strings");') % self.name)
wrapper.before_call.write_code(
'#if PY_VERSION_HEX >= 0x03000000\n'
'{var}[{idx}] = PyUnicode_AsUTF8(item);\n'
'#else\n'
'{var}[{idx}] = PyString_AsString(item);\n'
'#endif\n'
.format(var=name, idx=idx))
wrapper.before_call.sink.unindent()
wrapper.before_call.write_code('}')
wrapper.before_call.write_code('%s = PyList_Size(%s);' % (argc_var, py_name))
wrapper.call_params.append(argc_var)
wrapper.call_params.append(name)
def Simulator_customizations(module):
Simulator = module['ns3::Simulator']
## Simulator::Schedule(delay, callback, ...user..args...)
Simulator.add_custom_method_wrapper("Schedule", "_wrap_Simulator_Schedule",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleNow(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleNow", "_wrap_Simulator_ScheduleNow",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleWithContext(delay, callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleWithContext", "_wrap_Simulator_ScheduleWithContext",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
## Simulator::ScheduleDestroy(callback, ...user..args...)
Simulator.add_custom_method_wrapper("ScheduleDestroy", "_wrap_Simulator_ScheduleDestroy",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
Simulator.add_custom_method_wrapper("Run", "_wrap_Simulator_Run",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def CommandLine_customizations(module):
CommandLine = module['ns3::CommandLine']
CommandLine.add_method('Parse', None, [ArgvParam(None, 'argv')],
is_static=False)
CommandLine.add_custom_method_wrapper("AddValue", "_wrap_CommandLine_AddValue",
flags=["METH_VARARGS", "METH_KEYWORDS"])
def TypeId_customizations(module):
TypeId = module['ns3::TypeId']
TypeId.add_custom_method_wrapper("LookupByNameFailSafe", "_wrap_TypeId_LookupByNameFailSafe",
flags=["METH_VARARGS", "METH_KEYWORDS", "METH_STATIC"])
def add_std_ofstream(module):
module.add_include('<fstream>')
ostream = module.add_class('ostream', foreign_cpp_namespace='::std')
ostream.set_cannot_be_constructed("abstract base class")
ofstream = module.add_class('ofstream', foreign_cpp_namespace='::std', parent=ostream)
ofstream.add_enum('openmode', [
('app', 'std::ios_base::app'),
('ate', 'std::ios_base::ate'),
('binary', 'std::ios_base::binary'),
('in', 'std::ios_base::in'),
('out', 'std::ios_base::out'),
('trunc', 'std::ios_base::trunc'),
])
ofstream.add_constructor([Parameter.new("const char *", 'filename'),
Parameter.new("::std::ofstream::openmode", 'mode', default_value="std::ios_base::out")])
ofstream.add_method('close', None, [])
import pybindgen.typehandlers.base
for alias in "std::_Ios_Openmode", "std::ios::openmode":
pybindgen.typehandlers.base.param_type_matcher.add_type_alias(alias, "int")
for flag in 'in', 'out', 'ate', 'app', 'trunc', 'binary':
module.after_init.write_code('PyModule_AddIntConstant(m, (char *) "STD_IOS_%s", std::ios::%s);'
% (flag.upper(), flag))
def add_ipv4_address_tp_hash(module):
module.body.writeln('''
long
_ns3_Ipv4Address_tp_hash (PyObject *obj)
{
PyNs3Ipv4Address *addr = reinterpret_cast<PyNs3Ipv4Address *> (obj);
return static_cast<long> (ns3::Ipv4AddressHash () (*addr->obj));
}
''')
module.header.writeln('long _ns3_Ipv4Address_tp_hash (PyObject *obj);')
module['Ipv4Address'].pytype.slots['tp_hash'] = "_ns3_Ipv4Address_tp_hash"
def post_register_types(root_module):
Simulator_customizations(root_module)
CommandLine_customizations(root_module)
TypeId_customizations(root_module)
add_std_ofstream(root_module)
enabled_features = os.environ['NS3_ENABLED_FEATURES'].split(',')
if 'Threading' not in enabled_features:
for clsname in ['SystemThread', 'SystemMutex', 'SystemCondition', 'CriticalSection',
'SimpleRefCount< ns3::SystemThread, ns3::empty, ns3::DefaultDeleter<ns3::SystemThread> >']:
root_module.classes.remove(root_module['ns3::%s' % clsname])
if 'RealTime' not in enabled_features:
for clsname in ['WallClockSynchronizer', 'RealtimeSimulatorImpl']:
root_module.classes.remove(root_module['ns3::%s' % clsname])
root_module.enums.remove(root_module['ns3::RealtimeSimulatorImpl::SynchronizationMode'])
root_module.after_init.write_code("PyEval_InitThreads();")
# these are already in the main script, so commented out here
# Object_customizations(root_module)
# Attribute_customizations(root_module)
#def post_register_functions(root_module):
# pass
| gpl-2.0 |
evansd/django | django/conf/locale/sk/formats.py | 65 | 1106 | # This file is distributed under the same license as the Django package.
#
# The *_FORMAT strings use the Django date format syntax,
# see http://docs.djangoproject.com/en/dev/ref/templates/builtins/#date
DATE_FORMAT = 'j. F Y'
TIME_FORMAT = 'G:i'
DATETIME_FORMAT = 'j. F Y G:i'
YEAR_MONTH_FORMAT = 'F Y'
MONTH_DAY_FORMAT = 'j. F'
SHORT_DATE_FORMAT = 'd.m.Y'
SHORT_DATETIME_FORMAT = 'd.m.Y G:i'
FIRST_DAY_OF_WEEK = 1 # Monday
# The *_INPUT_FORMATS strings use the Python strftime format syntax,
# see http://docs.python.org/library/datetime.html#strftime-strptime-behavior
DATE_INPUT_FORMATS = [
'%d.%m.%Y', '%d.%m.%y', # '25.10.2006', '25.10.06'
'%y-%m-%d', # '06-10-25'
# '%d. %B %Y', '%d. %b. %Y', # '25. October 2006', '25. Oct. 2006'
]
DATETIME_INPUT_FORMATS = [
'%d.%m.%Y %H:%M:%S', # '25.10.2006 14:30:59'
'%d.%m.%Y %H:%M:%S.%f', # '25.10.2006 14:30:59.000200'
'%d.%m.%Y %H:%M', # '25.10.2006 14:30'
'%d.%m.%Y', # '25.10.2006'
]
DECIMAL_SEPARATOR = ','
THOUSAND_SEPARATOR = '\xa0' # non-breaking space
NUMBER_GROUPING = 3
| bsd-3-clause |
watonyweng/horizon | openstack_dashboard/api/rest/network.py | 50 | 1431 |
# Copyright 2015, Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""API for the network abstraction APIs.
"""
from django.views import generic
from openstack_dashboard import api
from openstack_dashboard.api.rest import urls
from openstack_dashboard.api.rest import utils as rest_utils
@urls.register
class SecurityGroups(generic.View):
"""API for Network Abstraction
Handles differences between Nova and Neutron.
"""
url_regex = r'network/securitygroups/$'
@rest_utils.ajax()
def get(self, request):
"""Get a list of security groups.
The listing result is an object with property "items". Each item is
an image.
Example GET:
http://localhost/api/network/securitygroups
"""
security_groups = api.network.security_group_list(request)
return {'items': [sg.to_dict() for sg in security_groups]}
| apache-2.0 |
fener06/pyload | module/lib/Getch.py | 43 | 2048 | class Getch:
"""
Gets a single character from standard input. Does not echo to
the screen.
"""
def __init__(self):
try:
self.impl = _GetchWindows()
except ImportError:
try:
self.impl = _GetchMacCarbon()
except(AttributeError, ImportError):
self.impl = _GetchUnix()
def __call__(self): return self.impl()
class _GetchUnix:
def __init__(self):
import tty
import sys
def __call__(self):
import sys
import tty
import termios
fd = sys.stdin.fileno()
old_settings = termios.tcgetattr(fd)
try:
tty.setraw(sys.stdin.fileno())
ch = sys.stdin.read(1)
finally:
termios.tcsetattr(fd, termios.TCSADRAIN, old_settings)
return ch
class _GetchWindows:
def __init__(self):
import msvcrt
def __call__(self):
import msvcrt
return msvcrt.getch()
class _GetchMacCarbon:
"""
A function which returns the current ASCII key that is down;
if no ASCII key is down, the null string is returned. The
page http://www.mactech.com/macintosh-c/chap02-1.html was
very helpful in figuring out how to do this.
"""
def __init__(self):
import Carbon
Carbon.Evt #see if it has this (in Unix, it doesn't)
def __call__(self):
import Carbon
if Carbon.Evt.EventAvail(0x0008)[0] == 0: # 0x0008 is the keyDownMask
return ''
else:
#
# The event contains the following info:
# (what,msg,when,where,mod)=Carbon.Evt.GetNextEvent(0x0008)[1]
#
# The message (msg) contains the ASCII char which is
# extracted with the 0x000000FF charCodeMask; this
# number is converted to an ASCII character with chr() and
# returned
#
(what, msg, when, where, mod) = Carbon.Evt.GetNextEvent(0x0008)[1]
return chr(msg) | gpl-3.0 |
VMatrixTeam/open-matrix | src/webservice/model/snippet/snippet.py | 1 | 2067 | import model.base
import tornado.gen
import json
from MySQLdb import escape_string
class Snippet(object):
@staticmethod
@tornado.gen.coroutine
def get_snippets(id_from, count):
result = yield model.MatrixDB.query("select * from snippet_snippet order by createAt desc limit {0}, {1}".format(id_from, count))
raise tornado.gen.Return(result)
@staticmethod
@tornado.gen.coroutine
def get_snippet_count():
result = yield model.MatrixDB.get("select count(*) as count from snippet_snippet")
raise tornado.gen.Return(result.count)
@staticmethod
@tornado.gen.coroutine
def get_top_snippets_by_count(count):
result = yield model.MatrixDB.query("\
select ss.sid as sid, max(ss.author) as author, max(ss.createAt) as createAt, max(ss.content) as content, max(ss.code) as code, max(ss.pictures) as pictures, count(*) as count \
from snippet_snippet ss left join snippet_praise sp on ss.sid = sp.sid \
group by ss.sid \
order by count desc \
limit 0, {0}".format(count))
raise tornado.gen.Return(result)
@staticmethod
@tornado.gen.coroutine
def get_snippets_by_uid_latest_count(user_id, count):
result = yield model.MatrixDB.query("select * from snippet_snippet where author = {0} order by createAt desc limit 0, {1}".format(user_id, count))
raise tornado.gen.Return(result)
@staticmethod
@tornado.gen.coroutine
def get_snippet_by_sid(sid):
result = yield model.MatrixDB.get("select * from snippet_snippet where sid = {0}".format(sid))
raise tornado.gen.Return(result)
@staticmethod
@tornado.gen.coroutine
def create_snippet(content, code, pictures, user_id):
row_id = yield model.MatrixDB.execute("insert into snippet_snippet (author, createAt, content, code, pictures) values ({0}, now(), '{1}', '{2}', '{3}')".format(user_id, escape_string(content), escape_string(code), escape_string(json.dumps(pictures))))
raise tornado.gen.Return(row_id)
| mit |
tudorian/eden | controllers/setup.py | 23 | 9689 | # -*- coding: utf-8 -*-
"""
Setup Tool
"""
module = request.controller
resourcename = request.function
if not settings.has_module(module):
raise HTTP(404, body="Module disabled: %s" % module)
def index():
""" Show the index """
return dict()
# -----------------------------------------------------------------------------
def deployment():
from s3 import S3SQLCustomForm, S3SQLInlineComponent, S3SQLInlineLink
#s3db.configure("setup_deployment", onvalidation=validate_deployment)
crud_form = S3SQLCustomForm("name",
"distro",
"remote_user",
"secret_key",
"access_key",
"private_key",
"webserver_type",
"db_type",
"db_password",
"db_type",
"db_password",
"repo_url",
"template",
S3SQLInlineComponent("server",
label = T("Server Role"),
fields = ["role", "host_ip", "hostname"],
),
S3SQLInlineComponent("instance",
label = T("Instance Type"),
fields = ["type", "url", "prepop_options"],
#filterby=dict(field = "type",
#options = ["prod", "demo"]
#),
multiple = False,
),
)
s3db.configure("setup_deployment", crud_form=crud_form)
def prep(r):
if r.method in ("create", None):
s3.scripts.append("/%s/static/scripts/S3/s3.setup.js" % appname)
if r.interactive:
if r.component and r.id:
# Set up the prepop options according to the template
prepop_options = s3db.setup_get_prepop_options(r.record.template)
db.setup_instance.prepop_options.requires = IS_IN_SET(prepop_options, multiple=True)
# No new servers once deployment is created
s3db.configure("setup_server",
insertable = False
)
# Check if no scheduler task is pending
itable = db.setup_instance
sctable = db.scheduler_task
query = (itable.deployment_id == r.id) & \
((sctable.status != "COMPLETED") & \
(sctable.status != "FAILED"))
rows = db(query).select(itable.scheduler_id,
join = itable.on(itable.scheduler_id == sctable.id)
)
if rows:
# Disable creation of new instances
s3db.configure("setup_instance",
insertable = False
)
elif r.component.name == "instance":
if r.method in (None, "create"):
# Remove deployed instances from drop down
itable = db.setup_instance
sctable = db.scheduler_task
query = (itable.deployment_id == r.id) & \
(sctable.status == "COMPLETED")
rows = db(query).select(itable.type,
join = itable.on(itable.scheduler_id == sctable.id)
)
types = {1: "prod", 2: "test", 3: "demo", 4: "dev"}
for row in rows:
del types[row.type]
itable.type.requires = IS_IN_SET(types)
return True
s3.prep = prep
def postp(r, output):
if r.component is None:
if r.method in (None, "read") and r.id:
# get scheduler status for the last queued task
itable = db.setup_instance
sctable = db.scheduler_task
query = (db.setup_instance.deployment_id == r.id)
row = db(query).select(sctable.id,
sctable.status,
join = itable.on(itable.scheduler_id==sctable.id),
orderby = itable.scheduler_id
).last()
item_append = output["item"][0].append
item_append(TR(TD(LABEL("Status"), _class="w2p_fl")))
item_append(TR(TD(row.status)))
if row.status == "FAILED":
resource = s3db.resource("scheduler_run")
task = db(resource.table.task_id == row.id).select().first()
item_append(TR(TD(LABEL("Traceback"), _class="w2p_fl")))
item_append(TR(TD(task.traceback)))
item_append(TR(TD(LABEL("Output"), _class="w2p_fl")))
item_append(TR(TD(task.run_output)))
elif r.component.name == "instance":
if r.method in (None, "read"):
s3.actions = [{"url": URL(c = module,
f = "management",
vars = {"instance": "[id]",
"type": "clean",
"deployment": r.id,
}
),
"_class": "action-btn",
"label": "Clean"
},
{"url": URL(c = module,
f = "management",
vars = {"instance": "[id]",
"type": "eden",
"deployment": r.id
}
),
"_class": "action-btn",
"label": "Upgrade Eden"
},
]
return output
s3.postp = postp
return s3_rest_controller(rheader=s3db.setup_rheader)
# -----------------------------------------------------------------------------
def management():
try:
_id = get_vars["instance"]
deployment_id = get_vars["deployment"]
_type = get_vars["type"]
except:
session.error = T("Record Not Found")
redirect(URL(c="setup", f="index"))
# Check if management task already running
exists = s3db.setup_management_exists(_type, _id, deployment_id)
if exists:
current.session.error = T("A management task is running for the instance")
redirect(URL(c="setup", f="deployment", args=[deployment_id, "instance"]))
# Check if instance was successfully deployed
ttable = s3db.scheduler_task
itable = s3db.setup_instance
query = (ttable.status == "COMPLETED") & \
(itable.id == _id)
success = db(query).select(itable.id,
join=ttable.on(ttable.id == itable.scheduler_id),
limitby=(0, 1)).first()
if success:
# add the task to scheduler
current.s3task.schedule_task("setup_management",
args = [_type, _id, deployment_id],
timeout = 3600,
repeats = 1,
)
current.session.flash = T("Task queued in scheduler")
redirect(URL(c="setup", f="deployment", args=[deployment_id, "instance"]))
else:
current.session.error = T("The instance was not successfully deployed")
redirect(URL(c="setup", f="deployment", args=[deployment_id, "instance"]))
# -----------------------------------------------------------------------------
def prepop_setting():
if request.ajax:
template = request.post_vars.get("template")
return json.dumps(s3db.setup_get_prepop_options(template))
# -----------------------------------------------------------------------------
def refresh():
try:
id = request.args[0]
except:
current.session.error = T("Record Not Found")
redirect(URL(c="setup", f="index"))
result = s3db.setup_refresh(id)
if result["success"]:
current.session.flash = result["msg"]
redirect(URL(c="setup", f=result["f"], args=result["args"]))
else:
current.session.error = result["msg"]
redirect(URL(c="setup", f=result["f"], args=result["args"]))
# -----------------------------------------------------------------------------
def upgrade_status():
if request.ajax:
_id = request.post_vars.get("id")
status = s3db.setup_upgrade_status(_id)
if status:
return json.dumps(status)
| mit |
sdague/home-assistant | homeassistant/components/flexit/climate.py | 16 | 5180 | """Platform for Flexit AC units with CI66 Modbus adapter."""
import logging
from typing import List
from pyflexit.pyflexit import pyflexit
import voluptuous as vol
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateEntity
from homeassistant.components.climate.const import (
HVAC_MODE_COOL,
SUPPORT_FAN_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.components.modbus.const import CONF_HUB, DEFAULT_HUB, MODBUS_DOMAIN
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_NAME,
CONF_SLAVE,
DEVICE_DEFAULT_NAME,
TEMP_CELSIUS,
)
import homeassistant.helpers.config_validation as cv
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_HUB, default=DEFAULT_HUB): cv.string,
vol.Required(CONF_SLAVE): vol.All(int, vol.Range(min=0, max=32)),
vol.Optional(CONF_NAME, default=DEVICE_DEFAULT_NAME): cv.string,
}
)
_LOGGER = logging.getLogger(__name__)
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_FAN_MODE
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Set up the Flexit Platform."""
modbus_slave = config.get(CONF_SLAVE)
name = config.get(CONF_NAME)
hub = hass.data[MODBUS_DOMAIN][config.get(CONF_HUB)]
add_entities([Flexit(hub, modbus_slave, name)], True)
class Flexit(ClimateEntity):
"""Representation of a Flexit AC unit."""
def __init__(self, hub, modbus_slave, name):
"""Initialize the unit."""
self._hub = hub
self._name = name
self._slave = modbus_slave
self._target_temperature = None
self._current_temperature = None
self._current_fan_mode = None
self._current_operation = None
self._fan_modes = ["Off", "Low", "Medium", "High"]
self._current_operation = None
self._filter_hours = None
self._filter_alarm = None
self._heat_recovery = None
self._heater_enabled = False
self._heating = None
self._cooling = None
self._alarm = False
self.unit = pyflexit(hub, modbus_slave)
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
def update(self):
"""Update unit attributes."""
if not self.unit.update():
_LOGGER.warning("Modbus read failed")
self._target_temperature = self.unit.get_target_temp
self._current_temperature = self.unit.get_temp
self._current_fan_mode = self._fan_modes[self.unit.get_fan_speed]
self._filter_hours = self.unit.get_filter_hours
# Mechanical heat recovery, 0-100%
self._heat_recovery = self.unit.get_heat_recovery
# Heater active 0-100%
self._heating = self.unit.get_heating
# Cooling active 0-100%
self._cooling = self.unit.get_cooling
# Filter alarm 0/1
self._filter_alarm = self.unit.get_filter_alarm
# Heater enabled or not. Does not mean it's necessarily heating
self._heater_enabled = self.unit.get_heater_enabled
# Current operation mode
self._current_operation = self.unit.get_operation
@property
def device_state_attributes(self):
"""Return device specific state attributes."""
return {
"filter_hours": self._filter_hours,
"filter_alarm": self._filter_alarm,
"heat_recovery": self._heat_recovery,
"heating": self._heating,
"heater_enabled": self._heater_enabled,
"cooling": self._cooling,
}
@property
def should_poll(self):
"""Return the polling state."""
return True
@property
def name(self):
"""Return the name of the climate device."""
return self._name
@property
def temperature_unit(self):
"""Return the unit of measurement."""
return TEMP_CELSIUS
@property
def current_temperature(self):
"""Return the current temperature."""
return self._current_temperature
@property
def target_temperature(self):
"""Return the temperature we try to reach."""
return self._target_temperature
@property
def hvac_mode(self):
"""Return current operation ie. heat, cool, idle."""
return self._current_operation
@property
def hvac_modes(self) -> List[str]:
"""Return the list of available hvac operation modes.
Need to be a subset of HVAC_MODES.
"""
return [HVAC_MODE_COOL]
@property
def fan_mode(self):
"""Return the fan setting."""
return self._current_fan_mode
@property
def fan_modes(self):
"""Return the list of available fan modes."""
return self._fan_modes
def set_temperature(self, **kwargs):
"""Set new target temperature."""
if kwargs.get(ATTR_TEMPERATURE) is not None:
self._target_temperature = kwargs.get(ATTR_TEMPERATURE)
self.unit.set_temp(self._target_temperature)
def set_fan_mode(self, fan_mode):
"""Set new fan mode."""
self.unit.set_fan_speed(self._fan_modes.index(fan_mode))
| apache-2.0 |
chemelnucfin/tensorflow | tensorflow/python/keras/distribute/multi_worker_callback_test.py | 2 | 23430 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests Keras multi worker callbacks."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import sys
import tempfile
import threading
from absl.testing import parameterized
# pylint: disable=g-direct-tensorflow-import
from tensorflow.python import keras
from tensorflow.python.distribute import collective_all_reduce_strategy as collective_strategy
from tensorflow.python.distribute import combinations
from tensorflow.python.distribute import distribute_coordinator as dc
from tensorflow.python.distribute import mirrored_strategy
from tensorflow.python.distribute import multi_worker_test_base as test_base
from tensorflow.python.distribute import multi_worker_util
from tensorflow.python.keras import backend as K
from tensorflow.python.keras import callbacks
from tensorflow.python.keras import testing_utils
from tensorflow.python.keras.distribute import multi_worker_testing_utils
from tensorflow.python.keras.distribute import multi_worker_training_state as training_state
from tensorflow.python.platform import test
def get_strategy_object(strategy_cls):
if strategy_cls == mirrored_strategy.MirroredStrategy:
return strategy_cls(mirrored_strategy.all_local_devices())
else:
# CollectiveAllReduceStrategy and ParameterServerStrategy.
return strategy_cls()
def generate_callback_test_function(custom_callable):
"""Generic template for callback tests using mnist synthetic dataset."""
@combinations.generate(
combinations.combine(
mode=['graph'],
strategy_cls=[collective_strategy.CollectiveAllReduceStrategy],
required_gpus=[0, 1],
file_format=['h5', 'tf']))
def test_template(self, strategy_cls, file_format):
num_workers = 2
num_epoch = 2
cluster_spec = test_base.create_cluster_spec(num_workers=num_workers)
self._barrier = dc._Barrier(2)
def _independent_worker_fn(*args, **kwargs): # pylint: disable=unused-argument
"""Simulates an Independent Worker inside of a thread."""
with test.mock.patch.object(dc, '_run_std_server',
self._make_mock_run_std_server()):
strategy = get_strategy_object(strategy_cls)
batch_size = 64
steps = 2
train_ds, _ = multi_worker_testing_utils.mnist_synthetic_dataset(
batch_size, steps)
with strategy.scope():
model = multi_worker_testing_utils.get_mnist_model((28, 28, 1))
custom_callable(
model,
self,
train_ds,
num_epoch,
steps,
strategy,
saving_filepath=kwargs['saving_filepath'],
barrier=kwargs['barrier'],
threading_local=kwargs['threading_local'])
# Pass saving_filepath from the parent thread to ensure every worker has the
# same fileapth to save.
saving_filepath = os.path.join(self.get_temp_dir(),
'checkpoint.' + file_format)
barrier = dc._Barrier(2)
threading_local = threading.local()
threads = self.run_multiple_tasks_in_threads(
_independent_worker_fn,
cluster_spec,
saving_filepath=saving_filepath,
barrier=barrier,
threading_local=threading_local)
self.assertFalse(training_state.checkpoint_exists(saving_filepath))
threads_to_join = []
strategy = get_strategy_object(strategy_cls)
if strategy.extended.experimental_between_graph:
for ts in threads.values():
threads_to_join.extend(ts)
else:
threads_to_join = [threads['worker'][0]]
self.join_independent_workers(threads_to_join)
return test_template
class KerasMultiWorkerCallbackTest(test_base.IndependentWorkerTestBase,
parameterized.TestCase):
# The callables of the actual testing content to be run go below.
@staticmethod
def callableForTestChiefOnlyCallback(model, test_obj, train_ds, num_epoch,
steps, strategy, saving_filepath,
**kwargs):
class ChiefOnly(keras.callbacks.Callback):
def __init__(self):
self._chief_worker_only = True
self.filtered_correctly = True
def on_train_begin(self, logs):
if not multi_worker_util.is_chief():
# Non-chief workers shouldn't run this callback.
self.filtered_correctly = False
cb = ChiefOnly()
model.fit(
x=train_ds, epochs=num_epoch, steps_per_epoch=steps, callbacks=[cb])
test_obj.assertTrue(cb.filtered_correctly)
@staticmethod
def callableForTestModelCheckpointSavesOnChiefButNotOtherwise(
model, test_obj, train_ds, num_epoch, steps, strategy, saving_filepath,
**kwargs):
extension = os.path.splitext(saving_filepath)[1]
# Incorporate type/index information and thread id in saving_filepath to
# ensure every worker has a unique path. Note that in normal use case the
# saving_filepath will be the same for all workers, but we use different
# ones here just to test out chief saves checkpoint but non-chief doesn't.
saving_filepath = os.path.join(
test_obj.get_temp_dir(), 'checkpoint_%s_%d%s' %
(test_base.get_task_type(), test_base.get_task_index(), extension))
# The saving_filepath shouldn't exist at the beginning (as it's unique).
test_obj.assertFalse(training_state.checkpoint_exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[callbacks.ModelCheckpoint(filepath=saving_filepath)])
# If it's chief, the model should be saved; if not, the model shouldn't.
test_obj.assertEqual(
training_state.checkpoint_exists(saving_filepath), test_base.is_chief())
@staticmethod
def initialFitting(test_obj, model, train_ds, num_epoch, steps,
saving_filepath):
# The saving_filepath shouldn't exist at the beginning.
test_obj.assertFalse(training_state.checkpoint_exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath, save_weights_only=True)
])
# The saving_filepath should exist after fitting with callback. Both chief
# and non-chief worker should both see it exists (which was saved only by
# chief).
test_obj.assertTrue(training_state.checkpoint_exists(saving_filepath))
history_after_one_more_epoch = model.fit(
x=train_ds, epochs=1, steps_per_epoch=steps)
# The saving_filepath should continue to exist (if it did) after fitting
# without callback.
test_obj.assertTrue(training_state.checkpoint_exists(saving_filepath))
return saving_filepath, history_after_one_more_epoch
@staticmethod
def callableForTestLoadWeightFromModelCheckpoint(model, test_obj, train_ds,
num_epoch, steps, strategy,
saving_filepath, **kwargs):
filepaths = []
real_mkstemp = tempfile.mkstemp
def mocked_mkstemp():
# Only non-chief should call tempfile.mkstemp() inside fit() in sync
# training.
assert not test_base.is_chief()
file_handle, temp_file_name = real_mkstemp()
extension = os.path.splitext(saving_filepath)[1]
temp_filepath = temp_file_name + extension
filepaths.append(temp_filepath)
return file_handle, temp_file_name
# Mock tempfile.mkstemp() so the filepaths can be stored and verified later.
with test.mock.patch.object(tempfile, 'mkstemp', mocked_mkstemp):
saving_filepath, history_after_one_more_epoch = \
KerasMultiWorkerCallbackTest.initialFitting(
test_obj, model, train_ds, num_epoch, steps, saving_filepath)
with strategy.scope():
model.load_weights(saving_filepath)
history_after_loading_weight_and_one_more_epoch = model.fit(
x=train_ds, epochs=1, steps_per_epoch=steps)
test_obj.assertAllClose(
history_after_one_more_epoch.history,
history_after_loading_weight_and_one_more_epoch.history,
rtol=5e-5)
# Verify the temp files are indeed removed (no trace left behind).
for filepath in filepaths:
assert not training_state.checkpoint_exists(filepath)
@staticmethod
def callableForTestModelRestoreCallback(model, test_obj, train_ds, num_epoch,
steps, strategy, saving_filepath,
**kwargs):
saving_filepath, history_after_one_more_epoch = \
KerasMultiWorkerCallbackTest.initialFitting(
test_obj, model, train_ds, num_epoch, steps, saving_filepath)
# The model should get restored to the weights previously saved, by
# adding a ModelCheckpoint callback (which results in a
# _ModelRestoreCallback being added), with load_weights_on_restart=True.
history_after_model_restoring_and_one_more_epoch = model.fit(
x=train_ds,
epochs=1,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath,
save_weights_only=True,
load_weights_on_restart=True)
])
# Asserting the history one epoch after initial fitting and one epoch after
# restoring are closed.
test_obj.assertAllClose(
history_after_one_more_epoch.history,
history_after_model_restoring_and_one_more_epoch.history,
rtol=5e-5)
history_one_more_epoch_without_model_restoring = model.fit(
x=train_ds, epochs=1, steps_per_epoch=steps)
# Ensuring training for another epoch gives different result.
test_obj.assertNotAllClose(
history_after_model_restoring_and_one_more_epoch.history,
history_one_more_epoch_without_model_restoring.history,
rtol=5e-5)
@staticmethod
def callableForTestBackupModelRemoved(model, test_obj, train_ds, num_epoch,
steps, strategy, saving_filepath,
**kwargs):
# `barrier` object needs to be passed in from parent
# thread so both threads refer to the same object.
barrier = kwargs['barrier']
num_epoch = 3
# Testing the backup filepath `multi_worker_training_state` uses.
_, backup_filepath = training_state._get_backup_filepath(saving_filepath)
# The backup_filepath shouldn't exist at the beginning.
test_obj.assertFalse(training_state.checkpoint_exists(backup_filepath))
# Callback to verify that the backup file exists in the middle of training.
class BackupFilepathVerifyingCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
if epoch > 1:
# Asserting that after the first two epochs, the backup file should
# exist.
test_obj.assertTrue(training_state.checkpoint_exists(backup_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath, save_weights_only=True),
BackupFilepathVerifyingCallback()
])
# Sync on the two threads so we make sure the backup file is removed before
# we move on.
barrier.wait()
# The back up file should not exist at successful exit of `model.fit()`.
test_obj.assertFalse(training_state.checkpoint_exists(backup_filepath))
@staticmethod
def callableForTestBackupModelNotRemovedIfInterrupted(model, test_obj,
train_ds, num_epoch,
steps, strategy,
saving_filepath,
**kwargs):
# `barrier` object needs to be passed in from parent
# thread so both threads refer to the same object.
barrier = kwargs['barrier']
num_epoch = 4
# Testing the backup filepath `multi_worker_training_state` uses.
_, backup_filepath = training_state._get_backup_filepath(saving_filepath)
# The backup_filepath shouldn't exist at the beginning.
test_obj.assertFalse(training_state.checkpoint_exists(backup_filepath))
# Callback to interrupt in the middle of training.
class InterruptingCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs=None):
if epoch == 2:
raise RuntimeError('Interrupting!')
try:
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath, save_weights_only=True),
InterruptingCallback()
])
except RuntimeError as e:
if 'Interrupting!' not in e.message:
raise
# Sync on the two threads.
barrier.wait()
# The back up file should exist after interruption of `model.fit()`.
test_obj.assertTrue(training_state.checkpoint_exists(backup_filepath))
@staticmethod
def callableForTestUnmatchedModelFile(model, test_obj, train_ds, num_epoch,
steps, strategy, saving_filepath,
**kwargs):
# The saving_filepath shouldn't exist at the beginning.
test_obj.assertFalse(training_state.checkpoint_exists(saving_filepath))
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath, save_weights_only=True)
])
(train_ds, _), (_, _) = testing_utils.get_test_data(
train_samples=10, test_samples=10, input_shape=(3,), num_classes=2)
# Switch to a model of different structure.
with strategy.scope():
model = keras.models.Sequential()
model.add(keras.layers.Dense(5, input_dim=3, activation='relu'))
model.add(keras.layers.Dense(2, activation='softmax'))
model.compile(
loss='categorical_crossentropy', optimizer='rmsprop', metrics=['acc'])
test_obj.assertTrue(training_state.checkpoint_exists(saving_filepath))
if saving_filepath.endswith('.tf'):
test_obj.skipTest('Loading mismatched TF checkpoint would cause Fatal '
'Python error: Aborted. Skipping.')
# Unmatched format. Should raise ValueError.
with test_obj.assertRaisesRegexp(ValueError, 'Error loading file from'):
model.fit(
x=train_ds,
epochs=num_epoch,
batch_size=8,
callbacks=[
callbacks.ModelCheckpoint(
filepath=saving_filepath,
save_weights_only=True,
load_weights_on_restart=True)
])
@staticmethod
def callableForTestReduceLROnPlateau(model, test_obj, train_ds, num_epoch,
steps, strategy, saving_filepath,
**kwargs):
cbks = [
callbacks.ReduceLROnPlateau(
monitor='loss',
factor=0.1,
min_delta=1,
patience=1,
cooldown=5,
verbose=1)
]
# It is expected that the learning rate would drop by `factor` within
# 3 epochs with `min_delta=1`.
model.fit(x=train_ds, epochs=3, steps_per_epoch=steps, callbacks=cbks)
test_obj.assertAllClose(
float(K.get_value(model.optimizer.lr)), 0.0001, atol=1e-8)
# It is expected that the learning rate would drop by another `factor`
# within 3 epochs with `min_delta=1`.
model.fit(x=train_ds, epochs=3, steps_per_epoch=steps, callbacks=cbks)
test_obj.assertAllClose(
float(K.get_value(model.optimizer.lr)), 0.00001, atol=1e-8)
@staticmethod
def callableForTestEarlyStopping(model, test_obj, train_ds, num_epoch, steps,
strategy, saving_filepath, **kwargs):
class EpochCounterCallback(callbacks.Callback):
def on_epoch_begin(self, epoch, logs):
self.last_epoch = epoch
epoch_counter_cbk = EpochCounterCallback()
cbks = [
callbacks.EarlyStopping(
monitor='loss', min_delta=0.05, patience=1, verbose=1),
epoch_counter_cbk
]
# Empirically, it is expected that `model.fit()` would terminate around the
# 22th epoch. Asserting that it should have been stopped before the 50th
# epoch to avoid flakiness and be more predictable.
model.fit(x=train_ds, epochs=100, steps_per_epoch=steps, callbacks=cbks)
test_obj.assertLess(epoch_counter_cbk.last_epoch, 50)
@staticmethod
def callableForTestLearningRateScheduler(model, test_obj, train_ds, num_epoch,
steps, strategy, saving_filepath,
**kwargs):
cbks = [
callbacks.LearningRateScheduler(
schedule=lambda x: 1. / (1. + x), verbose=1)
]
# It is expected that with `epochs=2`, the learning rate would drop to
# 1 / (1 + 2) = 0.5.
model.fit(x=train_ds, epochs=2, steps_per_epoch=steps, callbacks=cbks)
test_obj.assertAllClose(
float(K.get_value(model.optimizer.lr)), 0.5, atol=1e-8)
# It is expected that with `epochs=4`, the learning rate would drop to
# 1 / (1 + 4) = 0.25.
model.fit(x=train_ds, epochs=4, steps_per_epoch=steps, callbacks=cbks)
test_obj.assertAllClose(
float(K.get_value(model.optimizer.lr)), 0.25, atol=1e-8)
# pylint: disable=g-doc-args
@staticmethod
def callableForTestIntermediateDirForFTAreRemoved(model, test_obj, train_ds,
num_epoch, steps, strategy,
saving_filepath, **kwargs):
"""Testing that the temporary directory are removed.
Some temporary directories are created for the purpose of fault tolerance.
This test ensures that such directories should have been removed at the time
`model.fit()` finishes successfully.
"""
# `threading_local` and `barrier` objects have to be passed in from parent
# thread so both threads refer to the same object.
threading_local = kwargs['threading_local']
barrier = kwargs['barrier']
# Two threads will each has one copy of `temp_dirs_supposed_to_be_removed`
# list.
threading_local.temp_dirs_supposed_to_be_removed = []
callbacks_list = [
callbacks.ModelCheckpoint(
filepath=saving_filepath,
save_weights_only=True,
load_weights_on_restart=True),
]
# Keep the references to the real function objects.
real_os_path_join = os.path.join
real_tempfile_mkdtemp = tempfile.mkdtemp
# Make a `os.path.join` wrapper, which will be patched onto the real
# function, so the temporary directories can be tracked.
def wrapper_os_path_join(path, *paths):
join_result = real_os_path_join(path, *paths)
if len(paths) == 1 and paths[0] == 'backup':
threading_local.temp_dirs_supposed_to_be_removed.append(join_result)
return join_result
# Likewise for `tempfile.mkdtemp`.
def wrapper_tempfile_mkdtemp():
result = real_tempfile_mkdtemp()
threading_local.temp_dirs_supposed_to_be_removed.append(result)
return result
# Now the two threads must sync here: if they are out of sync, one thread
# can go ahead and patch `os.path.join` while the other has not even
# assigned the real `os.path.join` to `real_os_path_join`. If this happened,
# the "real" `os.path.join` the slower thread would see is actually the
# wrapper of the other.
barrier.wait()
# Note that `os.path.join` will respect the second patch (there are two
# patches because of the two threads). Both threads will refer to the same
# copy of `wrapper_os_path_join` because of the `barrier` preceding
# `model.fit()`. Likewise for `wrapper_tempfile_mkdtemp`.
os.path.join = wrapper_os_path_join
tempfile.mkdtemp = wrapper_tempfile_mkdtemp
barrier.wait()
model.fit(
x=train_ds,
epochs=num_epoch,
steps_per_epoch=steps,
callbacks=callbacks_list)
# Sync before un-patching to prevent either thread from accessing the real
# functions. Also to make sure `model.fit()` is done on both threads (so we
# can safely assert the directories are removed).
barrier.wait()
os.path.join = real_os_path_join
tempfile.mkdtemp = real_tempfile_mkdtemp
# There should be directory (names) that are supposed to be removed.
test_obj.assertTrue(threading_local.temp_dirs_supposed_to_be_removed)
for temp_dir_supposed_to_be_removed in (
threading_local.temp_dirs_supposed_to_be_removed):
# They should have been removed and thus don't exist.
test_obj.assertFalse(os.path.exists(temp_dir_supposed_to_be_removed))
# The actual testing methods go here.
test_chief_only_callback = generate_callback_test_function(
callableForTestChiefOnlyCallback.__func__)
test_model_checkpoint_saves_on_chief_but_not_otherwise = \
generate_callback_test_function(
callableForTestModelCheckpointSavesOnChiefButNotOtherwise.__func__)
test_load_weight_from_model_checkpoint = generate_callback_test_function(
callableForTestLoadWeightFromModelCheckpoint.__func__)
test_model_restore_callback = generate_callback_test_function(
callableForTestModelRestoreCallback.__func__)
test_unmatched_model_file = generate_callback_test_function(
callableForTestUnmatchedModelFile.__func__)
test_reduce_lr_on_plateau = generate_callback_test_function(
callableForTestReduceLROnPlateau.__func__)
test_early_stopping = generate_callback_test_function(
callableForTestEarlyStopping.__func__)
test_learning_rate_scheduler = generate_callback_test_function(
callableForTestLearningRateScheduler.__func__)
test_intermediate_dir_for_ft_are_removed = generate_callback_test_function(
callableForTestIntermediateDirForFTAreRemoved.__func__)
test_backup_model_removed = generate_callback_test_function(
callableForTestBackupModelRemoved.__func__)
test_backup_model_not_removed_if_interrupted = \
generate_callback_test_function(
callableForTestBackupModelNotRemovedIfInterrupted.__func__)
if __name__ == '__main__':
with test.mock.patch.object(sys, 'exit', os._exit):
test.main()
| apache-2.0 |
VWApplications/VWCourses | accounts/migrations/0004_auto_20170207_0350.py | 1 | 2273 | # -*- coding: utf-8 -*-
# Generated by Django 1.10.5 on 2017-02-07 03:50
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
dependencies = [
('accounts', '0003_auto_20170203_0220'),
]
operations = [
migrations.CreateModel(
name='Address',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('country', models.CharField(max_length=30, verbose_name='País')),
('city', models.CharField(max_length=30, verbose_name='Cidade')),
('state', models.CharField(max_length=30, verbose_name='Estado')),
('complement', models.CharField(blank=True, max_length=100, verbose_name='Complemento')),
],
options={
'verbose_name': 'Endereço',
'verbose_name_plural': 'Endereços',
},
),
migrations.CreateModel(
name='Phone',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('DDD', models.IntegerField(verbose_name='DDD')),
('number', models.IntegerField(verbose_name='Número')),
],
options={
'verbose_name': 'Telefone',
'verbose_name_plural': 'Telefones',
},
),
migrations.AlterField(
model_name='user',
name='name',
field=models.CharField(blank=True, max_length=30, verbose_name='Nome'),
),
migrations.AddField(
model_name='user',
name='address',
field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to='accounts.Address', verbose_name='Endereço'),
preserve_default=False,
),
migrations.AddField(
model_name='user',
name='phone',
field=models.OneToOneField(default=1, on_delete=django.db.models.deletion.CASCADE, to='accounts.Phone', verbose_name='Telefone'),
preserve_default=False,
),
]
| mpl-2.0 |
alephu5/Soundbyte | environment/lib/python3.3/site-packages/tornado/template.py | 18 | 31175 | #!/usr/bin/env python
#
# Copyright 2009 Facebook
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""A simple template system that compiles templates to Python code.
Basic usage looks like::
t = template.Template("<html>{{ myvalue }}</html>")
print t.generate(myvalue="XXX")
`Loader` is a class that loads templates from a root directory and caches
the compiled templates::
loader = template.Loader("/home/btaylor")
print loader.load("test.html").generate(myvalue="XXX")
We compile all templates to raw Python. Error-reporting is currently... uh,
interesting. Syntax for the templates::
### base.html
<html>
<head>
<title>{% block title %}Default title{% end %}</title>
</head>
<body>
<ul>
{% for student in students %}
{% block student %}
<li>{{ escape(student.name) }}</li>
{% end %}
{% end %}
</ul>
</body>
</html>
### bold.html
{% extends "base.html" %}
{% block title %}A bolder title{% end %}
{% block student %}
<li><span style="bold">{{ escape(student.name) }}</span></li>
{% end %}
Unlike most other template systems, we do not put any restrictions on the
expressions you can include in your statements. ``if`` and ``for`` blocks get
translated exactly into Python, so you can do complex expressions like::
{% for student in [p for p in people if p.student and p.age > 23] %}
<li>{{ escape(student.name) }}</li>
{% end %}
Translating directly to Python means you can apply functions to expressions
easily, like the ``escape()`` function in the examples above. You can pass
functions in to your template just like any other variable
(In a `.RequestHandler`, override `.RequestHandler.get_template_namespace`)::
### Python code
def add(x, y):
return x + y
template.execute(add=add)
### The template
{{ add(1, 2) }}
We provide the functions `escape() <.xhtml_escape>`, `.url_escape()`,
`.json_encode()`, and `.squeeze()` to all templates by default.
Typical applications do not create `Template` or `Loader` instances by
hand, but instead use the `~.RequestHandler.render` and
`~.RequestHandler.render_string` methods of
`tornado.web.RequestHandler`, which load templates automatically based
on the ``template_path`` `.Application` setting.
Variable names beginning with ``_tt_`` are reserved by the template
system and should not be used by application code.
Syntax Reference
----------------
Template expressions are surrounded by double curly braces: ``{{ ... }}``.
The contents may be any python expression, which will be escaped according
to the current autoescape setting and inserted into the output. Other
template directives use ``{% %}``. These tags may be escaped as ``{{!``
and ``{%!`` if you need to include a literal ``{{`` or ``{%`` in the output.
To comment out a section so that it is omitted from the output, surround it
with ``{# ... #}``.
``{% apply *function* %}...{% end %}``
Applies a function to the output of all template code between ``apply``
and ``end``::
{% apply linkify %}{{name}} said: {{message}}{% end %}
Note that as an implementation detail apply blocks are implemented
as nested functions and thus may interact strangely with variables
set via ``{% set %}``, or the use of ``{% break %}`` or ``{% continue %}``
within loops.
``{% autoescape *function* %}``
Sets the autoescape mode for the current file. This does not affect
other files, even those referenced by ``{% include %}``. Note that
autoescaping can also be configured globally, at the `.Application`
or `Loader`.::
{% autoescape xhtml_escape %}
{% autoescape None %}
``{% block *name* %}...{% end %}``
Indicates a named, replaceable block for use with ``{% extends %}``.
Blocks in the parent template will be replaced with the contents of
the same-named block in a child template.::
<!-- base.html -->
<title>{% block title %}Default title{% end %}</title>
<!-- mypage.html -->
{% extends "base.html" %}
{% block title %}My page title{% end %}
``{% comment ... %}``
A comment which will be removed from the template output. Note that
there is no ``{% end %}`` tag; the comment goes from the word ``comment``
to the closing ``%}`` tag.
``{% extends *filename* %}``
Inherit from another template. Templates that use ``extends`` should
contain one or more ``block`` tags to replace content from the parent
template. Anything in the child template not contained in a ``block``
tag will be ignored. For an example, see the ``{% block %}`` tag.
``{% for *var* in *expr* %}...{% end %}``
Same as the python ``for`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop.
``{% from *x* import *y* %}``
Same as the python ``import`` statement.
``{% if *condition* %}...{% elif *condition* %}...{% else %}...{% end %}``
Conditional statement - outputs the first section whose condition is
true. (The ``elif`` and ``else`` sections are optional)
``{% import *module* %}``
Same as the python ``import`` statement.
``{% include *filename* %}``
Includes another template file. The included file can see all the local
variables as if it were copied directly to the point of the ``include``
directive (the ``{% autoescape %}`` directive is an exception).
Alternately, ``{% module Template(filename, **kwargs) %}`` may be used
to include another template with an isolated namespace.
``{% module *expr* %}``
Renders a `~tornado.web.UIModule`. The output of the ``UIModule`` is
not escaped::
{% module Template("foo.html", arg=42) %}
``UIModules`` are a feature of the `tornado.web.RequestHandler`
class (and specifically its ``render`` method) and will not work
when the template system is used on its own in other contexts.
``{% raw *expr* %}``
Outputs the result of the given expression without autoescaping.
``{% set *x* = *y* %}``
Sets a local variable.
``{% try %}...{% except %}...{% finally %}...{% else %}...{% end %}``
Same as the python ``try`` statement.
``{% while *condition* %}... {% end %}``
Same as the python ``while`` statement. ``{% break %}`` and
``{% continue %}`` may be used inside the loop.
"""
from __future__ import absolute_import, division, print_function, with_statement
import datetime
import linecache
import os.path
import posixpath
import re
import threading
from tornado import escape
from tornado.log import app_log
from tornado.util import bytes_type, ObjectDict, exec_in, unicode_type
try:
from cStringIO import StringIO # py2
except ImportError:
from io import StringIO # py3
_DEFAULT_AUTOESCAPE = "xhtml_escape"
_UNSET = object()
class Template(object):
"""A compiled template.
We compile into Python from the given template_string. You can generate
the template from variables with generate().
"""
# note that the constructor's signature is not extracted with
# autodoc because _UNSET looks like garbage. When changing
# this signature update website/sphinx/template.rst too.
def __init__(self, template_string, name="<string>", loader=None,
compress_whitespace=None, autoescape=_UNSET):
self.name = name
if compress_whitespace is None:
compress_whitespace = name.endswith(".html") or \
name.endswith(".js")
if autoescape is not _UNSET:
self.autoescape = autoescape
elif loader:
self.autoescape = loader.autoescape
else:
self.autoescape = _DEFAULT_AUTOESCAPE
self.namespace = loader.namespace if loader else {}
reader = _TemplateReader(name, escape.native_str(template_string))
self.file = _File(self, _parse(reader, self))
self.code = self._generate_python(loader, compress_whitespace)
self.loader = loader
try:
# Under python2.5, the fake filename used here must match
# the module name used in __name__ below.
# The dont_inherit flag prevents template.py's future imports
# from being applied to the generated code.
self.compiled = compile(
escape.to_unicode(self.code),
"%s.generated.py" % self.name.replace('.', '_'),
"exec", dont_inherit=True)
except Exception:
formatted_code = _format_code(self.code).rstrip()
app_log.error("%s code:\n%s", self.name, formatted_code)
raise
def generate(self, **kwargs):
"""Generate this template with the given arguments."""
namespace = {
"escape": escape.xhtml_escape,
"xhtml_escape": escape.xhtml_escape,
"url_escape": escape.url_escape,
"json_encode": escape.json_encode,
"squeeze": escape.squeeze,
"linkify": escape.linkify,
"datetime": datetime,
"_tt_utf8": escape.utf8, # for internal use
"_tt_string_types": (unicode_type, bytes_type),
# __name__ and __loader__ allow the traceback mechanism to find
# the generated source code.
"__name__": self.name.replace('.', '_'),
"__loader__": ObjectDict(get_source=lambda name: self.code),
}
namespace.update(self.namespace)
namespace.update(kwargs)
exec_in(self.compiled, namespace)
execute = namespace["_tt_execute"]
# Clear the traceback module's cache of source data now that
# we've generated a new template (mainly for this module's
# unittests, where different tests reuse the same name).
linecache.clearcache()
return execute()
def _generate_python(self, loader, compress_whitespace):
buffer = StringIO()
try:
# named_blocks maps from names to _NamedBlock objects
named_blocks = {}
ancestors = self._get_ancestors(loader)
ancestors.reverse()
for ancestor in ancestors:
ancestor.find_named_blocks(loader, named_blocks)
writer = _CodeWriter(buffer, named_blocks, loader, ancestors[0].template,
compress_whitespace)
ancestors[0].generate(writer)
return buffer.getvalue()
finally:
buffer.close()
def _get_ancestors(self, loader):
ancestors = [self.file]
for chunk in self.file.body.chunks:
if isinstance(chunk, _ExtendsBlock):
if not loader:
raise ParseError("{% extends %} block found, but no "
"template loader")
template = loader.load(chunk.name, self.name)
ancestors.extend(template._get_ancestors(loader))
return ancestors
class BaseLoader(object):
"""Base class for template loaders.
You must use a template loader to use template constructs like
``{% extends %}`` and ``{% include %}``. The loader caches all
templates after they are loaded the first time.
"""
def __init__(self, autoescape=_DEFAULT_AUTOESCAPE, namespace=None):
"""``autoescape`` must be either None or a string naming a function
in the template namespace, such as "xhtml_escape".
"""
self.autoescape = autoescape
self.namespace = namespace or {}
self.templates = {}
# self.lock protects self.templates. It's a reentrant lock
# because templates may load other templates via `include` or
# `extends`. Note that thanks to the GIL this code would be safe
# even without the lock, but could lead to wasted work as multiple
# threads tried to compile the same template simultaneously.
self.lock = threading.RLock()
def reset(self):
"""Resets the cache of compiled templates."""
with self.lock:
self.templates = {}
def resolve_path(self, name, parent_path=None):
"""Converts a possibly-relative path to absolute (used internally)."""
raise NotImplementedError()
def load(self, name, parent_path=None):
"""Loads a template."""
name = self.resolve_path(name, parent_path=parent_path)
with self.lock:
if name not in self.templates:
self.templates[name] = self._create_template(name)
return self.templates[name]
def _create_template(self, name):
raise NotImplementedError()
class Loader(BaseLoader):
"""A template loader that loads from a single root directory.
"""
def __init__(self, root_directory, **kwargs):
super(Loader, self).__init__(**kwargs)
self.root = os.path.abspath(root_directory)
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
current_path = os.path.join(self.root, parent_path)
file_dir = os.path.dirname(os.path.abspath(current_path))
relative_path = os.path.abspath(os.path.join(file_dir, name))
if relative_path.startswith(self.root):
name = relative_path[len(self.root) + 1:]
return name
def _create_template(self, name):
path = os.path.join(self.root, name)
f = open(path, "rb")
template = Template(f.read(), name=name, loader=self)
f.close()
return template
class DictLoader(BaseLoader):
"""A template loader that loads from a dictionary."""
def __init__(self, dict, **kwargs):
super(DictLoader, self).__init__(**kwargs)
self.dict = dict
def resolve_path(self, name, parent_path=None):
if parent_path and not parent_path.startswith("<") and \
not parent_path.startswith("/") and \
not name.startswith("/"):
file_dir = posixpath.dirname(parent_path)
name = posixpath.normpath(posixpath.join(file_dir, name))
return name
def _create_template(self, name):
return Template(self.dict[name], name=name, loader=self)
class _Node(object):
def each_child(self):
return ()
def generate(self, writer):
raise NotImplementedError()
def find_named_blocks(self, loader, named_blocks):
for child in self.each_child():
child.find_named_blocks(loader, named_blocks)
class _File(_Node):
def __init__(self, template, body):
self.template = template
self.body = body
self.line = 0
def generate(self, writer):
writer.write_line("def _tt_execute():", self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
def each_child(self):
return (self.body,)
class _ChunkList(_Node):
def __init__(self, chunks):
self.chunks = chunks
def generate(self, writer):
for chunk in self.chunks:
chunk.generate(writer)
def each_child(self):
return self.chunks
class _NamedBlock(_Node):
def __init__(self, name, body, template, line):
self.name = name
self.body = body
self.template = template
self.line = line
def each_child(self):
return (self.body,)
def generate(self, writer):
block = writer.named_blocks[self.name]
with writer.include(block.template, self.line):
block.body.generate(writer)
def find_named_blocks(self, loader, named_blocks):
named_blocks[self.name] = self
_Node.find_named_blocks(self, loader, named_blocks)
class _ExtendsBlock(_Node):
def __init__(self, name):
self.name = name
class _IncludeBlock(_Node):
def __init__(self, name, reader, line):
self.name = name
self.template_name = reader.name
self.line = line
def find_named_blocks(self, loader, named_blocks):
included = loader.load(self.name, self.template_name)
included.file.find_named_blocks(loader, named_blocks)
def generate(self, writer):
included = writer.loader.load(self.name, self.template_name)
with writer.include(included, self.line):
included.file.body.generate(writer)
class _ApplyBlock(_Node):
def __init__(self, method, line, body=None):
self.method = method
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
method_name = "_tt_apply%d" % writer.apply_counter
writer.apply_counter += 1
writer.write_line("def %s():" % method_name, self.line)
with writer.indent():
writer.write_line("_tt_buffer = []", self.line)
writer.write_line("_tt_append = _tt_buffer.append", self.line)
self.body.generate(writer)
writer.write_line("return _tt_utf8('').join(_tt_buffer)", self.line)
writer.write_line("_tt_append(_tt_utf8(%s(%s())))" % (
self.method, method_name), self.line)
class _ControlBlock(_Node):
def __init__(self, statement, line, body=None):
self.statement = statement
self.line = line
self.body = body
def each_child(self):
return (self.body,)
def generate(self, writer):
writer.write_line("%s:" % self.statement, self.line)
with writer.indent():
self.body.generate(writer)
# Just in case the body was empty
writer.write_line("pass", self.line)
class _IntermediateControlBlock(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
# In case the previous block was empty
writer.write_line("pass", self.line)
writer.write_line("%s:" % self.statement, self.line, writer.indent_size() - 1)
class _Statement(_Node):
def __init__(self, statement, line):
self.statement = statement
self.line = line
def generate(self, writer):
writer.write_line(self.statement, self.line)
class _Expression(_Node):
def __init__(self, expression, line, raw=False):
self.expression = expression
self.line = line
self.raw = raw
def generate(self, writer):
writer.write_line("_tt_tmp = %s" % self.expression, self.line)
writer.write_line("if isinstance(_tt_tmp, _tt_string_types):"
" _tt_tmp = _tt_utf8(_tt_tmp)", self.line)
writer.write_line("else: _tt_tmp = _tt_utf8(str(_tt_tmp))", self.line)
if not self.raw and writer.current_template.autoescape is not None:
# In python3 functions like xhtml_escape return unicode,
# so we have to convert to utf8 again.
writer.write_line("_tt_tmp = _tt_utf8(%s(_tt_tmp))" %
writer.current_template.autoescape, self.line)
writer.write_line("_tt_append(_tt_tmp)", self.line)
class _Module(_Expression):
def __init__(self, expression, line):
super(_Module, self).__init__("_tt_modules." + expression, line,
raw=True)
class _Text(_Node):
def __init__(self, value, line):
self.value = value
self.line = line
def generate(self, writer):
value = self.value
# Compress lots of white space to a single character. If the whitespace
# breaks a line, have it continue to break a line, but just with a
# single \n character
if writer.compress_whitespace and "<pre>" not in value:
value = re.sub(r"([\t ]+)", " ", value)
value = re.sub(r"(\s*\n\s*)", "\n", value)
if value:
writer.write_line('_tt_append(%r)' % escape.utf8(value), self.line)
class ParseError(Exception):
"""Raised for template syntax errors."""
pass
class _CodeWriter(object):
def __init__(self, file, named_blocks, loader, current_template,
compress_whitespace):
self.file = file
self.named_blocks = named_blocks
self.loader = loader
self.current_template = current_template
self.compress_whitespace = compress_whitespace
self.apply_counter = 0
self.include_stack = []
self._indent = 0
def indent_size(self):
return self._indent
def indent(self):
class Indenter(object):
def __enter__(_):
self._indent += 1
return self
def __exit__(_, *args):
assert self._indent > 0
self._indent -= 1
return Indenter()
def include(self, template, line):
self.include_stack.append((self.current_template, line))
self.current_template = template
class IncludeTemplate(object):
def __enter__(_):
return self
def __exit__(_, *args):
self.current_template = self.include_stack.pop()[0]
return IncludeTemplate()
def write_line(self, line, line_number, indent=None):
if indent is None:
indent = self._indent
line_comment = ' # %s:%d' % (self.current_template.name, line_number)
if self.include_stack:
ancestors = ["%s:%d" % (tmpl.name, lineno)
for (tmpl, lineno) in self.include_stack]
line_comment += ' (via %s)' % ', '.join(reversed(ancestors))
print(" " * indent + line + line_comment, file=self.file)
class _TemplateReader(object):
def __init__(self, name, text):
self.name = name
self.text = text
self.line = 1
self.pos = 0
def find(self, needle, start=0, end=None):
assert start >= 0, start
pos = self.pos
start += pos
if end is None:
index = self.text.find(needle, start)
else:
end += pos
assert end >= start
index = self.text.find(needle, start, end)
if index != -1:
index -= pos
return index
def consume(self, count=None):
if count is None:
count = len(self.text) - self.pos
newpos = self.pos + count
self.line += self.text.count("\n", self.pos, newpos)
s = self.text[self.pos:newpos]
self.pos = newpos
return s
def remaining(self):
return len(self.text) - self.pos
def __len__(self):
return self.remaining()
def __getitem__(self, key):
if type(key) is slice:
size = len(self)
start, stop, step = key.indices(size)
if start is None:
start = self.pos
else:
start += self.pos
if stop is not None:
stop += self.pos
return self.text[slice(start, stop, step)]
elif key < 0:
return self.text[key]
else:
return self.text[self.pos + key]
def __str__(self):
return self.text[self.pos:]
def _format_code(code):
lines = code.splitlines()
format = "%%%dd %%s\n" % len(repr(len(lines) + 1))
return "".join([format % (i + 1, line) for (i, line) in enumerate(lines)])
def _parse(reader, template, in_block=None, in_loop=None):
body = _ChunkList([])
while True:
# Find next template directive
curly = 0
while True:
curly = reader.find("{", curly)
if curly == -1 or curly + 1 == reader.remaining():
# EOF
if in_block:
raise ParseError("Missing {%% end %%} block for %s" %
in_block)
body.chunks.append(_Text(reader.consume(), reader.line))
return body
# If the first curly brace is not the start of a special token,
# start searching from the character after it
if reader[curly + 1] not in ("{", "%", "#"):
curly += 1
continue
# When there are more than 2 curlies in a row, use the
# innermost ones. This is useful when generating languages
# like latex where curlies are also meaningful
if (curly + 2 < reader.remaining() and
reader[curly + 1] == '{' and reader[curly + 2] == '{'):
curly += 1
continue
break
# Append any text before the special token
if curly > 0:
cons = reader.consume(curly)
body.chunks.append(_Text(cons, reader.line))
start_brace = reader.consume(2)
line = reader.line
# Template directives may be escaped as "{{!" or "{%!".
# In this case output the braces and consume the "!".
# This is especially useful in conjunction with jquery templates,
# which also use double braces.
if reader.remaining() and reader[0] == "!":
reader.consume(1)
body.chunks.append(_Text(start_brace, line))
continue
# Comment
if start_brace == "{#":
end = reader.find("#}")
if end == -1:
raise ParseError("Missing end expression #} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
continue
# Expression
if start_brace == "{{":
end = reader.find("}}")
if end == -1:
raise ParseError("Missing end expression }} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty expression on line %d" % line)
body.chunks.append(_Expression(contents, line))
continue
# Block
assert start_brace == "{%", start_brace
end = reader.find("%}")
if end == -1:
raise ParseError("Missing end block %%} on line %d" % line)
contents = reader.consume(end).strip()
reader.consume(2)
if not contents:
raise ParseError("Empty block tag ({%% %%}) on line %d" % line)
operator, space, suffix = contents.partition(" ")
suffix = suffix.strip()
# Intermediate ("else", "elif", etc) blocks
intermediate_blocks = {
"else": set(["if", "for", "while", "try"]),
"elif": set(["if"]),
"except": set(["try"]),
"finally": set(["try"]),
}
allowed_parents = intermediate_blocks.get(operator)
if allowed_parents is not None:
if not in_block:
raise ParseError("%s outside %s block" %
(operator, allowed_parents))
if in_block not in allowed_parents:
raise ParseError("%s block cannot be attached to %s block" % (operator, in_block))
body.chunks.append(_IntermediateControlBlock(contents, line))
continue
# End tag
elif operator == "end":
if not in_block:
raise ParseError("Extra {%% end %%} block on line %d" % line)
return body
elif operator in ("extends", "include", "set", "import", "from",
"comment", "autoescape", "raw", "module"):
if operator == "comment":
continue
if operator == "extends":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("extends missing file path on line %d" % line)
block = _ExtendsBlock(suffix)
elif operator in ("import", "from"):
if not suffix:
raise ParseError("import missing statement on line %d" % line)
block = _Statement(contents, line)
elif operator == "include":
suffix = suffix.strip('"').strip("'")
if not suffix:
raise ParseError("include missing file path on line %d" % line)
block = _IncludeBlock(suffix, reader, line)
elif operator == "set":
if not suffix:
raise ParseError("set missing statement on line %d" % line)
block = _Statement(suffix, line)
elif operator == "autoescape":
fn = suffix.strip()
if fn == "None":
fn = None
template.autoescape = fn
continue
elif operator == "raw":
block = _Expression(suffix, line, raw=True)
elif operator == "module":
block = _Module(suffix, line)
body.chunks.append(block)
continue
elif operator in ("apply", "block", "try", "if", "for", "while"):
# parse inner body recursively
if operator in ("for", "while"):
block_body = _parse(reader, template, operator, operator)
elif operator == "apply":
# apply creates a nested function so syntactically it's not
# in the loop.
block_body = _parse(reader, template, operator, None)
else:
block_body = _parse(reader, template, operator, in_loop)
if operator == "apply":
if not suffix:
raise ParseError("apply missing method name on line %d" % line)
block = _ApplyBlock(suffix, line, block_body)
elif operator == "block":
if not suffix:
raise ParseError("block missing name on line %d" % line)
block = _NamedBlock(suffix, block_body, template, line)
else:
block = _ControlBlock(contents, line, block_body)
body.chunks.append(block)
continue
elif operator in ("break", "continue"):
if not in_loop:
raise ParseError("%s outside %s block" % (operator, set(["for", "while"])))
body.chunks.append(_Statement(contents, line))
continue
else:
raise ParseError("unknown operator: %r" % operator)
| gpl-3.0 |
beezee/GAE-Django-base-app | django/core/management/commands/flush.py | 249 | 3437 | from optparse import make_option
from django.conf import settings
from django.db import connections, router, transaction, models, DEFAULT_DB_ALIAS
from django.core.management import call_command
from django.core.management.base import NoArgsCommand, CommandError
from django.core.management.color import no_style
from django.core.management.sql import sql_flush, emit_post_sync_signal
from django.utils.importlib import import_module
class Command(NoArgsCommand):
option_list = NoArgsCommand.option_list + (
make_option('--noinput', action='store_false', dest='interactive', default=True,
help='Tells Django to NOT prompt the user for input of any kind.'),
make_option('--database', action='store', dest='database',
default=DEFAULT_DB_ALIAS, help='Nominates a database to flush. '
'Defaults to the "default" database.'),
)
help = "Executes ``sqlflush`` on the current database."
def handle_noargs(self, **options):
db = options.get('database', DEFAULT_DB_ALIAS)
connection = connections[db]
verbosity = int(options.get('verbosity', 1))
interactive = options.get('interactive')
self.style = no_style()
# Import the 'management' module within each installed app, to register
# dispatcher events.
for app_name in settings.INSTALLED_APPS:
try:
import_module('.management', app_name)
except ImportError:
pass
sql_list = sql_flush(self.style, connection, only_django=True)
if interactive:
confirm = raw_input("""You have requested a flush of the database.
This will IRREVERSIBLY DESTROY all data currently in the %r database,
and return each table to the state it was in after syncdb.
Are you sure you want to do this?
Type 'yes' to continue, or 'no' to cancel: """ % connection.settings_dict['NAME'])
else:
confirm = 'yes'
if confirm == 'yes':
try:
cursor = connection.cursor()
for sql in sql_list:
cursor.execute(sql)
except Exception, e:
transaction.rollback_unless_managed(using=db)
raise CommandError("""Database %s couldn't be flushed. Possible reasons:
* The database isn't running or isn't configured correctly.
* At least one of the expected database tables doesn't exist.
* The SQL was invalid.
Hint: Look at the output of 'django-admin.py sqlflush'. That's the SQL this command wasn't able to run.
The full error: %s""" % (connection.settings_dict['NAME'], e))
transaction.commit_unless_managed(using=db)
# Emit the post sync signal. This allows individual
# applications to respond as if the database had been
# sync'd from scratch.
all_models = []
for app in models.get_apps():
all_models.extend([
m for m in models.get_models(app, include_auto_created=True)
if router.allow_syncdb(db, m)
])
emit_post_sync_signal(set(all_models), verbosity, interactive, db)
# Reinstall the initial_data fixture.
kwargs = options.copy()
kwargs['database'] = db
call_command('loaddata', 'initial_data', **kwargs)
else:
print "Flush cancelled."
| bsd-3-clause |
edulramirez/nova | nova/api/openstack/compute/server_diagnostics.py | 33 | 2605 | # Copyright 2011 OpenStack Foundation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from nova.api.openstack import common
from nova.api.openstack import extensions
from nova.api.openstack import wsgi
from nova import compute
from nova import exception
ALIAS = "os-server-diagnostics"
authorize = extensions.os_compute_authorizer(ALIAS)
class ServerDiagnosticsController(wsgi.Controller):
def __init__(self):
self.compute_api = compute.API(skip_policy_check=True)
@extensions.expected_errors((404, 409, 501))
def index(self, req, server_id):
context = req.environ["nova.context"]
authorize(context)
instance = common.get_instance(self.compute_api, context, server_id)
try:
# NOTE(gmann): To make V21 same as V2 API, this method will call
# 'get_diagnostics' instead of 'get_instance_diagnostics'.
# In future, 'get_instance_diagnostics' needs to be called to
# provide VM diagnostics in a defined format for all driver.
# BP - https://blueprints.launchpad.net/nova/+spec/v3-diagnostics.
return self.compute_api.get_diagnostics(context, instance)
except exception.InstanceInvalidState as state_error:
common.raise_http_conflict_for_instance_invalid_state(state_error,
'get_diagnostics', server_id)
except NotImplementedError:
common.raise_feature_not_supported()
class ServerDiagnostics(extensions.V21APIExtensionBase):
"""Allow Admins to view server diagnostics through server action."""
name = "ServerDiagnostics"
alias = ALIAS
version = 1
def get_resources(self):
parent_def = {'member_name': 'server', 'collection_name': 'servers'}
resources = [
extensions.ResourceExtension('diagnostics',
ServerDiagnosticsController(),
parent=parent_def)]
return resources
def get_controller_extensions(self):
return []
| apache-2.0 |
bgxavier/neutron | neutron/cmd/eventlet/plugins/hyperv_neutron_agent.py | 46 | 1411 | # Copyright 2013 Cloudbase Solutions SRL
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
from oslo_config import cfg
from oslo_log import log as logging
from neutron.agent.common import config
from neutron.common import config as common_config
from neutron.i18n import _LI
from neutron.plugins.hyperv.agent import config as hyperv_config
from neutron.plugins.hyperv.agent import l2_agent
LOG = logging.getLogger(__name__)
def register_options():
config.register_agent_state_opts_helper(cfg.CONF)
cfg.CONF.register_opts(hyperv_config.HYPERV_AGENT_OPTS, "AGENT")
def main():
register_options()
common_config.init(sys.argv[1:])
config.setup_logging()
hyperv_agent = l2_agent.HyperVNeutronAgent()
# Start everything.
LOG.info(_LI("Agent initialized successfully, now running... "))
hyperv_agent.daemon_loop()
| apache-2.0 |
arista-eosplus/ansible | lib/ansible/modules/network/aos/aos_blueprint.py | 78 | 9017 | #!/usr/bin/python
#
# (c) 2017 Apstra Inc, <[email protected]>
#
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
#
ANSIBLE_METADATA = {'metadata_version': '1.0',
'status': ['preview'],
'supported_by': 'community'}
DOCUMENTATION = '''
---
module: aos_blueprint
author: [email protected] (@jeremyschulman)
version_added: "2.3"
short_description: Manage AOS blueprint instance
description:
- Apstra AOS Blueprint module let you manage your Blueprint easily. You can create
create and delete Blueprint by Name or ID. You can also use it to retrieve
all data from a blueprint. This module is idempotent
and support the I(check) mode. It's using the AOS REST API.
requirements:
- "aos-pyez >= 0.6.0"
options:
session:
description:
- An existing AOS session as obtained by M(aos_login) module.
required: true
name:
description:
- Name of the Blueprint to manage.
Only one of I(name) or I(id) can be set.
id:
description:
- AOS Id of the IP Pool to manage (can't be used to create a new IP Pool).
Only one of I(name) or I(id) can be set.
state:
description:
- Indicate what is the expected state of the Blueprint.
choices: ['present', 'absent', 'build-ready']
default: present
timeout:
description:
- When I(state=build-ready), this timeout identifies timeout in seconds to wait before
declaring a failure.
default: 5
template:
description:
- When creating a blueprint, this value identifies, by name, an existing engineering
design template within the AOS-server.
reference_arch:
description:
- When creating a blueprint, this value identifies a known AOS reference
architecture value. I(Refer to AOS-server documentation for available values).
'''
EXAMPLES = '''
- name: Creating blueprint
aos_blueprint:
session: "{{ aos_session }}"
name: "my-blueprint"
template: "my-template"
reference_arch: two_stage_l3clos
state: present
- name: Access a blueprint and get content
aos_blueprint:
session: "{{ aos_session }}"
name: "{{ blueprint_name }}"
template: "{{ blueprint_template }}"
state: present
register: bp
- name: Delete a blueprint
aos_blueprint:
session: "{{ aos_session }}"
name: "my-blueprint"
state: absent
- name: Await blueprint build-ready, and obtain contents
aos_blueprint:
session: "{{ aos_session }}"
name: "{{ blueprint_name }}"
state: build-ready
register: bp
'''
RETURNS = '''
name:
description: Name of the Blueprint
returned: always
type: str
sample: My-Blueprint
id:
description: AOS unique ID assigned to the Blueprint
returned: always
type: str
sample: fcc4ac1c-e249-4fe7-b458-2138bfb44c06
value:
description: Information about the Blueprint
returned: always
type: dict
sample: {'...'}
contents:
description: Blueprint contents data-dictionary
returned: always
type: dict
sample: { ... }
build_errors:
description: When state='build-ready', and build errors exist, this contains list of errors
returned: only when build-ready returns fail
type: list
sample: [{...}, {...}]
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.aos import get_aos_session, check_aos_version, find_collection_item
from ansible.module_utils.pycompat24 import get_exception
def create_blueprint(module, aos, name):
margs = module.params
try:
template_id = aos.DesignTemplates[margs['template']].id
# Create a new Object based on the name
blueprint = aos.Blueprints[name]
blueprint.create(template_id, reference_arch=margs['reference_arch'])
except:
exc = get_exception()
if 'UNPROCESSABLE ENTITY' in exc.message:
msg = 'likely missing dependencies'
else:
msg = exc.message
module.fail_json(msg="Unable to create blueprint: %s" % exc.message)
return blueprint
def ensure_absent(module, aos, blueprint):
if blueprint.exists is False:
module.exit_json(changed=False)
else:
if not module.check_mode:
try:
blueprint.delete()
except:
exc = get_exception()
module.fail_json(msg='Unable to delete blueprint, %s' % exc.message)
module.exit_json(changed=True,
id=blueprint.id,
name=blueprint.name)
def ensure_present(module, aos, blueprint):
margs = module.params
if blueprint.exists:
module.exit_json(changed=False,
id=blueprint.id,
name=blueprint.name,
value=blueprint.value,
contents=blueprint.contents)
else:
# Check if template is defined and is valid
if margs['template'] is None:
module.fail_json(msg="You must define a 'template' name to create a new blueprint, currently missing")
elif aos.DesignTemplates.find(label=margs['template']) is None:
module.fail_json(msg="You must define a Valid 'template' name to create a new blueprint, %s is not valid" % margs['template'])
# Check if reference_arch
if margs['reference_arch'] is None:
module.fail_json(msg="You must define a 'reference_arch' to create a new blueprint, currently missing")
if not module.check_mode:
blueprint = create_blueprint(module, aos, margs['name'])
module.exit_json(changed=True,
id=blueprint.id,
name=blueprint.name,
value=blueprint.value,
contents=blueprint.contents)
else:
module.exit_json(changed=True,
name=margs['name'])
def ensure_build_ready(module, aos, blueprint):
margs = module.params
if not blueprint.exists:
module.fail_json(msg='blueprint %s does not exist' % blueprint.name)
if blueprint.await_build_ready(timeout=margs['timeout']*1000):
module.exit_json(contents=blueprint.contents)
else:
module.fail_json(msg='blueprint %s has build errors',
build_erros=blueprint.build_errors)
def aos_blueprint(module):
margs = module.params
try:
aos = get_aos_session(module, margs['session'])
except:
module.fail_json(msg="Unable to login to the AOS server")
item_name = False
item_id = False
if margs['name'] is not None:
item_name = margs['name']
elif margs['id'] is not None:
item_id = margs['id']
#----------------------------------------------------
# Find Object if available based on ID or Name
#----------------------------------------------------
try:
my_blueprint = find_collection_item(aos.Blueprints,
item_name=item_name,
item_id=item_id)
except:
module.fail_json(msg="Unable to find the Blueprint based on name or ID, something went wrong")
#----------------------------------------------------
# Proceed based on State value
#----------------------------------------------------
if margs['state'] == 'absent':
ensure_absent(module, aos, my_blueprint)
elif margs['state'] == 'present':
ensure_present(module, aos, my_blueprint)
elif margs['state'] == 'build-ready':
ensure_build_ready(module, aos, my_blueprint)
def main():
module = AnsibleModule(
argument_spec=dict(
session=dict(required=True, type="dict"),
name=dict(required=False),
id=dict(required=False ),
state=dict(choices=[
'present', 'absent', 'build-ready'],
default='present'),
timeout=dict(type="int", default=5),
template=dict(required=False),
reference_arch=dict(required=False)
),
mutually_exclusive = [('name', 'id')],
required_one_of=[('name', 'id')],
supports_check_mode=True
)
# Check if aos-pyez is present and match the minimum version
check_aos_version(module, '0.6.0')
aos_blueprint(module)
if __name__ == '__main__':
main()
| gpl-3.0 |
PokeHunterProject/pogom-updated | pogom/pgoapi/protos/POGOProtos/Networking/Responses/SetBuddyPokemonResponse_pb2.py | 6 | 4585 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# source: POGOProtos/Networking/Responses/SetBuddyPokemonResponse.proto
import sys
_b=sys.version_info[0]<3 and (lambda x:x) or (lambda x:x.encode('latin1'))
from google.protobuf import descriptor as _descriptor
from google.protobuf import message as _message
from google.protobuf import reflection as _reflection
from google.protobuf import symbol_database as _symbol_database
from google.protobuf import descriptor_pb2
# @@protoc_insertion_point(imports)
_sym_db = _symbol_database.Default()
from POGOProtos.Data import BuddyPokemon_pb2 as POGOProtos_dot_Data_dot_BuddyPokemon__pb2
DESCRIPTOR = _descriptor.FileDescriptor(
name='POGOProtos/Networking/Responses/SetBuddyPokemonResponse.proto',
package='POGOProtos.Networking.Responses',
syntax='proto3',
serialized_pb=_b('\n=POGOProtos/Networking/Responses/SetBuddyPokemonResponse.proto\x12\x1fPOGOProtos.Networking.Responses\x1a\"POGOProtos/Data/BuddyPokemon.proto\"\x95\x02\n\x17SetBuddyPokemonResponse\x12O\n\x06result\x18\x01 \x01(\x0e\x32?.POGOProtos.Networking.Responses.SetBuddyPokemonResponse.Result\x12\x34\n\rupdated_buddy\x18\x02 \x01(\x0b\x32\x1d.POGOProtos.Data.BuddyPokemon\"s\n\x06Result\x12\t\n\x05UNEST\x10\x00\x12\x0b\n\x07SUCCESS\x10\x01\x12\x1a\n\x16\x45RROR_POKEMON_DEPLOYED\x10\x02\x12\x1b\n\x17\x45RROR_POKEMON_NOT_OWNED\x10\x03\x12\x18\n\x14\x45RROR_POKEMON_IS_EGG\x10\x04\x62\x06proto3')
,
dependencies=[POGOProtos_dot_Data_dot_BuddyPokemon__pb2.DESCRIPTOR,])
_sym_db.RegisterFileDescriptor(DESCRIPTOR)
_SETBUDDYPOKEMONRESPONSE_RESULT = _descriptor.EnumDescriptor(
name='Result',
full_name='POGOProtos.Networking.Responses.SetBuddyPokemonResponse.Result',
filename=None,
file=DESCRIPTOR,
values=[
_descriptor.EnumValueDescriptor(
name='UNEST', index=0, number=0,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='SUCCESS', index=1, number=1,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_POKEMON_DEPLOYED', index=2, number=2,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_POKEMON_NOT_OWNED', index=3, number=3,
options=None,
type=None),
_descriptor.EnumValueDescriptor(
name='ERROR_POKEMON_IS_EGG', index=4, number=4,
options=None,
type=None),
],
containing_type=None,
options=None,
serialized_start=297,
serialized_end=412,
)
_sym_db.RegisterEnumDescriptor(_SETBUDDYPOKEMONRESPONSE_RESULT)
_SETBUDDYPOKEMONRESPONSE = _descriptor.Descriptor(
name='SetBuddyPokemonResponse',
full_name='POGOProtos.Networking.Responses.SetBuddyPokemonResponse',
filename=None,
file=DESCRIPTOR,
containing_type=None,
fields=[
_descriptor.FieldDescriptor(
name='result', full_name='POGOProtos.Networking.Responses.SetBuddyPokemonResponse.result', index=0,
number=1, type=14, cpp_type=8, label=1,
has_default_value=False, default_value=0,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
_descriptor.FieldDescriptor(
name='updated_buddy', full_name='POGOProtos.Networking.Responses.SetBuddyPokemonResponse.updated_buddy', index=1,
number=2, type=11, cpp_type=10, label=1,
has_default_value=False, default_value=None,
message_type=None, enum_type=None, containing_type=None,
is_extension=False, extension_scope=None,
options=None),
],
extensions=[
],
nested_types=[],
enum_types=[
_SETBUDDYPOKEMONRESPONSE_RESULT,
],
options=None,
is_extendable=False,
syntax='proto3',
extension_ranges=[],
oneofs=[
],
serialized_start=135,
serialized_end=412,
)
_SETBUDDYPOKEMONRESPONSE.fields_by_name['result'].enum_type = _SETBUDDYPOKEMONRESPONSE_RESULT
_SETBUDDYPOKEMONRESPONSE.fields_by_name['updated_buddy'].message_type = POGOProtos_dot_Data_dot_BuddyPokemon__pb2._BUDDYPOKEMON
_SETBUDDYPOKEMONRESPONSE_RESULT.containing_type = _SETBUDDYPOKEMONRESPONSE
DESCRIPTOR.message_types_by_name['SetBuddyPokemonResponse'] = _SETBUDDYPOKEMONRESPONSE
SetBuddyPokemonResponse = _reflection.GeneratedProtocolMessageType('SetBuddyPokemonResponse', (_message.Message,), dict(
DESCRIPTOR = _SETBUDDYPOKEMONRESPONSE,
__module__ = 'POGOProtos.Networking.Responses.SetBuddyPokemonResponse_pb2'
# @@protoc_insertion_point(class_scope:POGOProtos.Networking.Responses.SetBuddyPokemonResponse)
))
_sym_db.RegisterMessage(SetBuddyPokemonResponse)
# @@protoc_insertion_point(module_scope)
| mit |
mrshelly/openerp71313 | openerp/tools/float_utils.py | 151 | 9267 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Business Applications
# Copyright (c) 2011 OpenERP S.A. <http://openerp.com>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import math
def _float_check_precision(precision_digits=None, precision_rounding=None):
assert (precision_digits is not None or precision_rounding is not None) and \
not (precision_digits and precision_rounding),\
"exactly one of precision_digits and precision_rounding must be specified"
if precision_digits is not None:
return 10 ** -precision_digits
return precision_rounding
def float_round(value, precision_digits=None, precision_rounding=None):
"""Return ``value`` rounded to ``precision_digits``
decimal digits, minimizing IEEE-754 floating point representation
errors, and applying HALF-UP (away from zero) tie-breaking rule.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
:param float value: the value to round
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:return: rounded float
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
if rounding_factor == 0 or value == 0: return 0.0
# NORMALIZE - ROUND - DENORMALIZE
# In order to easily support rounding to arbitrary 'steps' (e.g. coin values),
# we normalize the value before rounding it as an integer, and de-normalize
# after rounding: e.g. float_round(1.3, precision_rounding=.5) == 1.5
# TIE-BREAKING: HALF-UP
# We want to apply HALF-UP tie-breaking rules, i.e. 0.5 rounds away from 0.
# Due to IEE754 float/double representation limits, the approximation of the
# real value may be slightly below the tie limit, resulting in an error of
# 1 unit in the last place (ulp) after rounding.
# For example 2.675 == 2.6749999999999998.
# To correct this, we add a very small epsilon value, scaled to the
# the order of magnitude of the value, to tip the tie-break in the right
# direction.
# Credit: discussion with OpenERP community members on bug 882036
normalized_value = value / rounding_factor # normalize
epsilon_magnitude = math.log(abs(normalized_value), 2)
epsilon = 2**(epsilon_magnitude-53)
normalized_value += cmp(normalized_value,0) * epsilon
rounded_value = round(normalized_value) # round to integer
result = rounded_value * rounding_factor # de-normalize
return result
def float_is_zero(value, precision_digits=None, precision_rounding=None):
"""Returns true if ``value`` is small enough to be treated as
zero at the given precision (smaller than the corresponding *epsilon*).
The precision (``10**-precision_digits`` or ``precision_rounding``)
is used as the zero *epsilon*: values less than that are considered
to be zero.
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value: value to compare with the precision's zero
:return: True if ``value`` is considered zero
"""
epsilon = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
return abs(float_round(value, precision_rounding=epsilon)) < epsilon
def float_compare(value1, value2, precision_digits=None, precision_rounding=None):
"""Compare ``value1`` and ``value2`` after rounding them according to the
given precision. A value is considered lower/greater than another value
if their rounded value is different. This is not the same as having a
non-zero difference!
Precision must be given by ``precision_digits`` or ``precision_rounding``,
not both!
Example: 1.432 and 1.431 are equal at 2 digits precision,
so this method would return 0
However 0.006 and 0.002 are considered different (this method returns 1)
because they respectively round to 0.01 and 0.0, even though
0.006-0.002 = 0.004 which would be considered zero at 2 digits precision.
Warning: ``float_is_zero(value1-value2)`` is not equivalent to
``float_compare(value1,value2) == 0``, as the former will round after
computing the difference, while the latter will round before, giving
different results for e.g. 0.006 and 0.002 at 2 digits precision.
:param int precision_digits: number of fractional digits to round to.
:param float precision_rounding: decimal number representing the minimum
non-zero value at the desired precision (for example, 0.01 for a
2-digit precision).
:param float value1: first value to compare
:param float value2: second value to compare
:return: (resp.) -1, 0 or 1, if ``value1`` is (resp.) lower than,
equal to, or greater than ``value2``, at the given precision.
"""
rounding_factor = _float_check_precision(precision_digits=precision_digits,
precision_rounding=precision_rounding)
value1 = float_round(value1, precision_rounding=rounding_factor)
value2 = float_round(value2, precision_rounding=rounding_factor)
delta = value1 - value2
if float_is_zero(delta, precision_rounding=rounding_factor): return 0
return -1 if delta < 0.0 else 1
def float_repr(value, precision_digits):
"""Returns a string representation of a float with the
the given number of fractional digits. This should not be
used to perform a rounding operation (this is done via
:meth:`~.float_round`), but only to produce a suitable
string representation for a float.
:param int precision_digits: number of fractional digits to
include in the output
"""
# Can't use str() here because it seems to have an intrisic
# rounding to 12 significant digits, which causes a loss of
# precision. e.g. str(123456789.1234) == str(123456789.123)!!
return ("%%.%sf" % precision_digits) % value
if __name__ == "__main__":
import time
start = time.time()
count = 0
errors = 0
def try_round(amount, expected, precision_digits=3):
global count, errors; count += 1
result = float_repr(float_round(amount, precision_digits=precision_digits),
precision_digits=precision_digits)
if result != expected:
errors += 1
print '###!!! Rounding error: got %s , expected %s' % (result, expected)
# Extended float range test, inspired by Cloves Almeida's test on bug #882036.
fractions = [.0, .015, .01499, .675, .67499, .4555, .4555, .45555]
expecteds = ['.00', '.02', '.01', '.68', '.67', '.46', '.456', '.4556']
precisions = [2, 2, 2, 2, 2, 2, 3, 4]
for magnitude in range(7):
for i in xrange(len(fractions)):
frac, exp, prec = fractions[i], expecteds[i], precisions[i]
for sign in [-1,1]:
for x in xrange(0,10000,97):
n = x * 10**magnitude
f = sign * (n + frac)
f_exp = ('-' if f != 0 and sign == -1 else '') + str(n) + exp
try_round(f, f_exp, precision_digits=prec)
stop = time.time()
# Micro-bench results:
# 47130 round calls in 0.422306060791 secs, with Python 2.6.7 on Core i3 x64
# with decimal:
# 47130 round calls in 6.612248100021 secs, with Python 2.6.7 on Core i3 x64
print count, " round calls, ", errors, "errors, done in ", (stop-start), 'secs'
| agpl-3.0 |
ric2b/Vivaldi-browser | chromium/third_party/blink/web_tests/external/wpt/tools/third_party/pytest/src/pytest.py | 34 | 1712 | # PYTHON_ARGCOMPLETE_OK
"""
pytest: unit and functional testing with Python.
"""
# else we are imported
from _pytest.config import main, UsageError, cmdline, hookspec, hookimpl
from _pytest.fixtures import fixture, yield_fixture
from _pytest.assertion import register_assert_rewrite
from _pytest.freeze_support import freeze_includes
from _pytest import __version__
from _pytest.debugging import pytestPDB as __pytestPDB
from _pytest.recwarn import warns, deprecated_call
from _pytest.outcomes import fail, skip, importorskip, exit, xfail
from _pytest.mark import MARK_GEN as mark, param
from _pytest.main import Session
from _pytest.nodes import Item, Collector, File
from _pytest.fixtures import fillfixtures as _fillfuncargs
from _pytest.python import Module, Class, Instance, Function, Generator
from _pytest.python_api import approx, raises
set_trace = __pytestPDB.set_trace
__all__ = [
"main",
"UsageError",
"cmdline",
"hookspec",
"hookimpl",
"__version__",
"register_assert_rewrite",
"freeze_includes",
"set_trace",
"warns",
"deprecated_call",
"fixture",
"yield_fixture",
"fail",
"skip",
"xfail",
"importorskip",
"exit",
"mark",
"param",
"approx",
"_fillfuncargs",
"Item",
"File",
"Collector",
"Session",
"Module",
"Class",
"Instance",
"Function",
"Generator",
"raises",
]
if __name__ == "__main__":
# if run as a script or by 'python -m pytest'
# we trigger the below "else" condition by the following import
import pytest
raise SystemExit(pytest.main())
else:
from _pytest.compat import _setup_collect_fakemodule
_setup_collect_fakemodule()
| bsd-3-clause |
BT-ojossen/odoo | addons/share/__init__.py | 448 | 1093 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import ir_model
import res_users
import wizard
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
variac/bazel | src/test/py/bazel/test_base.py | 1 | 10391 | # pylint: disable=g-bad-file-header
# Copyright 2017 The Bazel Authors. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import locale
import os
import subprocess
import sys
import tempfile
import unittest
class Error(Exception):
"""Base class for errors in this module."""
pass
class ArgumentError(Error):
"""A function received a bad argument."""
pass
class EnvVarUndefinedError(Error):
"""An expected environment variable is not defined."""
def __init__(self, name):
Error.__init__(self, 'Environment variable "%s" is not defined' % name)
class TestBase(unittest.TestCase):
_runfiles = None
_temp = None
_tests_root = None
_test_cwd = None
def setUp(self):
unittest.TestCase.setUp(self)
if self._runfiles is None:
self._runfiles = TestBase._LoadRunfiles()
test_tmpdir = TestBase._CreateDirs(TestBase.GetEnv('TEST_TMPDIR'))
self._tests_root = TestBase._CreateDirs(
os.path.join(test_tmpdir, 'tests_root'))
self._temp = TestBase._CreateDirs(os.path.join(test_tmpdir, 'tmp'))
self._test_cwd = tempfile.mkdtemp(dir=self._tests_root)
os.chdir(self._test_cwd)
def AssertExitCode(self, actual_exit_code, expected_exit_code, stderr_lines):
"""Assert that `actual_exit_code` == `expected_exit_code`."""
if actual_exit_code != expected_exit_code:
self.fail('\n'.join([
'Bazel exited with %d (expected %d), stderr:' % (actual_exit_code,
expected_exit_code),
'(start stderr)----------------------------------------',
] + (stderr_lines or []) + [
'(end stderr)------------------------------------------',
]))
@staticmethod
def GetEnv(name, default=None):
"""Returns environment variable `name`.
Args:
name: string; name of the environment variable
default: anything; return this value if the envvar is not defined
Returns:
string, the envvar's value if defined, or `default` if the envvar is not
defined but `default` is
Raises:
EnvVarUndefinedError: if `name` is not a defined envvar and `default` is
None
"""
value = os.getenv(name, '__undefined_envvar__')
if value == '__undefined_envvar__':
if default:
return default
raise EnvVarUndefinedError(name)
return value
@staticmethod
def IsWindows():
"""Returns true if the current platform is Windows."""
return os.name == 'nt'
def Path(self, path):
"""Returns the absolute path of `path` relative to the scratch directory.
Args:
path: string; a path, relative to the test's scratch directory,
e.g. "foo/bar/BUILD"
Returns:
an absolute path
Raises:
ArgumentError: if `path` is absolute or contains uplevel references
"""
if os.path.isabs(path) or '..' in path:
raise ArgumentError(('path="%s" may not be absolute and may not contain '
'uplevel references') % path)
return os.path.join(self._tests_root, path)
def Rlocation(self, runfile):
"""Returns the absolute path to a runfile."""
if TestBase.IsWindows():
return self._runfiles.get(runfile)
else:
return os.path.join(self._runfiles, runfile)
def ScratchDir(self, path):
"""Creates directories under the test's scratch directory.
Args:
path: string; a path, relative to the test's scratch directory,
e.g. "foo/bar"
Raises:
ArgumentError: if `path` is absolute or contains uplevel references
IOError: if an I/O error occurs
"""
if not path:
return
abspath = self.Path(path)
if os.path.exists(abspath):
if os.path.isdir(abspath):
return
raise IOError('"%s" (%s) exists and is not a directory' % (path, abspath))
os.makedirs(abspath)
def ScratchFile(self, path, lines=None):
"""Creates a file under the test's scratch directory.
Args:
path: string; a path, relative to the test's scratch directory,
e.g. "foo/bar/BUILD"
lines: [string]; the contents of the file (newlines are added
automatically)
Returns:
The absolute path of the scratch file.
Raises:
ArgumentError: if `path` is absolute or contains uplevel references
IOError: if an I/O error occurs
"""
if not path:
return
abspath = self.Path(path)
if os.path.exists(abspath) and not os.path.isfile(abspath):
raise IOError('"%s" (%s) exists and is not a file' % (path, abspath))
self.ScratchDir(os.path.dirname(path))
with open(abspath, 'w') as f:
if lines:
for l in lines:
f.write(l)
f.write('\n')
return abspath
def RunBazel(self, args, env_remove=None, env_add=None):
"""Runs "bazel <args>", waits for it to exit.
Args:
args: [string]; flags to pass to bazel (e.g. ['--batch', 'build', '//x'])
env_remove: set(string); optional; environment variables to NOT pass to
Bazel
env_add: set(string); optional; environment variables to pass to
Bazel, won't be removed by env_remove.
Returns:
(int, [string], [string]) tuple: exit code, stdout lines, stderr lines
"""
return self.RunProgram([
self.Rlocation('io_bazel/src/bazel'),
'--bazelrc=/dev/null',
'--nomaster_bazelrc',
] + args, env_remove, env_add)
def RunProgram(self, args, env_remove=None, env_add=None):
"""Runs a program (args[0]), waits for it to exit.
Args:
args: [string]; the args to run; args[0] should be the program itself
env_remove: set(string); optional; environment variables to NOT pass to
the program
env_add: set(string); optional; environment variables to pass to
the program, won't be removed by env_remove.
Returns:
(int, [string], [string]) tuple: exit code, stdout lines, stderr lines
"""
with tempfile.TemporaryFile(dir=self._test_cwd) as stdout:
with tempfile.TemporaryFile(dir=self._test_cwd) as stderr:
proc = subprocess.Popen(
args,
stdout=stdout,
stderr=stderr,
cwd=self._test_cwd,
env=self._EnvMap(env_remove, env_add))
exit_code = proc.wait()
stdout.seek(0)
stdout_lines = [
l.decode(locale.getpreferredencoding()).strip()
for l in stdout.readlines()
]
stderr.seek(0)
stderr_lines = [
l.decode(locale.getpreferredencoding()).strip()
for l in stderr.readlines()
]
return exit_code, stdout_lines, stderr_lines
def _EnvMap(self, env_remove=None, env_add=None):
"""Returns the environment variable map to run Bazel or other programs."""
if TestBase.IsWindows():
result = []
if sys.version_info.major == 3:
# Python 3.2 has os.listdir
result = [
n for n in os.listdir('c:\\program files\\java')
if n.startswith('jdk')
]
else:
# Python 2.7 has os.path.walk
def _Visit(result, _, names):
result.extend(n for n in names if n.startswith('jdk'))
while names:
names.pop()
os.path.walk('c:\\program files\\java\\', _Visit, result)
env = {
'SYSTEMROOT': TestBase.GetEnv('SYSTEMROOT'),
# TODO(laszlocsomor): Let Bazel pass BAZEL_SH and JAVA_HOME to tests
# and use those here instead of hardcoding paths.
'JAVA_HOME': 'c:\\program files\\java\\' + sorted(result)[-1],
'BAZEL_SH': 'c:\\tools\\msys64\\usr\\bin\\bash.exe',
# TODO(pcloudy): Remove this after no longer need to debug
# https://github.com/bazelbuild/bazel/issues/3273
'CC_CONFIGURE_DEBUG': '1'
}
# TODO(pcloudy): Remove these hardcoded paths after resolving
# https://github.com/bazelbuild/bazel/issues/3273
env['BAZEL_VC'] = 'visual-studio-not-found'
for p in [
(r'C:\Program Files (x86)\Microsoft Visual Studio\2017\Professional'
r'\VC'),
r'C:\Program Files (x86)\Microsoft Visual Studio\2017\BuildTools\VC',
r'C:\Program Files (x86)\Microsoft Visual Studio 14.0\VC'
]:
if os.path.exists(p):
env['BAZEL_VC'] = p
break
else:
env = {'HOME': os.path.join(self._temp, 'home')}
env['PATH'] = TestBase.GetEnv('PATH')
# The inner Bazel must know that it's running as part of a test (so that it
# uses --max_idle_secs=15 by default instead of 3 hours, etc.), and it knows
# that by checking for TEST_TMPDIR.
env['TEST_TMPDIR'] = TestBase.GetEnv('TEST_TMPDIR')
env['TMP'] = self._temp
if env_remove:
for e in env_remove:
del env[e]
if env_add:
for e in env_add:
env[e] = env_add[e]
return env
@staticmethod
def _LoadRunfiles():
"""Loads the runfiles manifest from ${TEST_SRCDIR}/MANIFEST.
Only necessary to use on Windows, where runfiles are not symlinked in to the
runfiles directory, but are written to a MANIFEST file instead.
Returns:
on Windows: {string: string} dictionary, keys are runfiles-relative paths,
values are absolute paths that the runfiles entry is mapped to;
on other platforms: string; value of $TEST_SRCDIR
"""
test_srcdir = TestBase.GetEnv('TEST_SRCDIR')
if not TestBase.IsWindows():
return test_srcdir
result = {}
with open(os.path.join(test_srcdir, 'MANIFEST'), 'r') as f:
for l in f:
tokens = l.strip().split(' ')
if len(tokens) == 2:
result[tokens[0]] = tokens[1]
return result
@staticmethod
def _CreateDirs(path):
if not os.path.exists(path):
os.makedirs(path)
elif not os.path.isdir(path):
os.remove(path)
os.makedirs(path)
return path
| apache-2.0 |
tetherless-world/graphene | whyis/task_utils.py | 1 | 1444 | from celery.task.control import inspect
# def setup_task(service):
# service.app = app
# print(service)
# result = None
# if service.query_predicate == self.NS.whyis.globalChangeQuery:
# result = process_resource
# else:
# result = process_nanopub
# result.service = lambda : service
# return result
def is_running_waiting(service_name):
"""
Check if a task is running or waiting.
"""
if is_waiting(service_name):
return True
running_tasks = list(inspect().active().values())[0]
for task in running_tasks:
if 'kwargs' in task:
args = eval(task['kwargs'])
if service_name == args.get('service_name',None):
return True
return False
def is_waiting(service_name):
"""
Check if a task is waiting.
"""
scheduled_tasks = list(inspect().scheduled().values())[0]
for task in scheduled_tasks:
if 'kwargs' in task:
args = eval(task['kwargs'])
if service_name == args.get('service_name',None):
return True
return False
def is_waiting_importer(entity_name, exclude=None):
"""
Check if a task is running or waiting.
"""
if inspect().scheduled():
tasks = list(inspect().scheduled().values())
for task in tasks:
if 'args' in task and entity_name in task['args']:
return True
return False
| apache-2.0 |
DecisionSystemsGroup/DSGos | airootfs/usr/share/DSGos-Installer/DSGos_Installer/installation/automatic.py | 2 | 14419 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# automatic.py
#
# Copyright © 2013-2015 DSGos
#
# This file is part of DSGos_Installer.
#
# DSGos_Installer is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 3 of the License, or
# (at your option) any later version.
#
# DSGos_Installer is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# The following additional terms are in effect as per Section 7 of the license:
#
# The preservation of all legal notices and author attributions in
# the material or in the Appropriate Legal Notices displayed
# by works containing it is required.
#
# You should have received a copy of the GNU General Public License
# along with DSGos_Installer; If not, see <http://www.gnu.org/licenses/>.
""" Automatic installation screen """
from gi.repository import Gtk
import os
import sys
import logging
if __name__ == '__main__':
# Insert the parent directory at the front of the path.
# This is used only when we want to test this screen
base_dir = os.path.dirname(__file__) or '.'
parent_dir = os.path.join(base_dir, '..')
sys.path.insert(0, parent_dir)
import misc.misc as misc
import parted3.fs_module as fs
import parted
from gtkbasebox import GtkBaseBox
from installation import install
from installation import action
from installation import auto_partition
DEST_DIR = "/install"
class InstallationAutomatic(GtkBaseBox):
def __init__(self, params, prev_page="installation_ask", next_page="summary"):
super().__init__(self, params, "automatic", prev_page, next_page)
self.auto_device = None
self.device_store = self.ui.get_object('part_auto_select_drive')
self.device_label = self.ui.get_object('part_auto_select_drive_label')
self.entry = {'luks_password': self.ui.get_object('entry_luks_password'),
'luks_password_confirm': self.ui.get_object('entry_luks_password_confirm')}
self.image_password_ok = self.ui.get_object('image_password_ok')
self.devices = {}
self.installation = None
self.bootloader = "grub2"
self.bootloader_entry = self.ui.get_object('bootloader_entry')
self.bootloader_device_entry = self.ui.get_object('bootloader_device_entry')
self.bootloader_devices = {}
self.bootloader_device = {}
self.mount_devices = {}
self.fs_devices = {}
def translate_ui(self):
txt = _("Select drive:")
self.device_label.set_markup(txt)
label = self.ui.get_object('text_automatic')
txt = _("WARNING! This will overwrite everything currently on your drive!")
txt = "<b>{0}</b>".format(txt)
label.set_markup(txt)
label = self.ui.get_object('info_label')
txt = _("Select the drive we should use to install DSGos and then click above to start the process.")
label.set_markup(txt)
label = self.ui.get_object('label_luks_password')
txt = _("Encryption Password:")
label.set_markup(txt)
label = self.ui.get_object('label_luks_password_confirm')
txt = _("Confirm your password:")
label.set_markup(txt)
label = self.ui.get_object('label_luks_password_warning')
txt = _("LUKS Password. Do not use special characters or accents!")
label.set_markup(txt)
btn = self.ui.get_object('checkbutton_show_password')
btn.set_label(_("Show password"))
self.header.set_subtitle(_("Automatic Installation Mode"))
txt = _("Use the device below for boot loader installation:")
txt = "<span weight='bold' size='small'>{0}</span>".format(txt)
label = self.ui.get_object('bootloader_device_info_label')
label.set_markup(txt)
txt = _("Bootloader:")
label = self.ui.get_object('bootloader_label')
label.set_markup(txt)
txt = _("Device:")
label = self.ui.get_object('bootloader_device_label')
label.set_markup(txt)
def on_checkbutton_show_password_toggled(self, widget):
""" show/hide LUKS passwords """
btn = self.ui.get_object('checkbutton_show_password')
show = btn.get_active()
self.entry['luks_password'].set_visibility(show)
self.entry['luks_password_confirm'].set_visibility(show)
def populate_devices(self):
with misc.raised_privileges():
device_list = parted.getAllDevices()
self.device_store.remove_all()
self.devices = {}
self.bootloader_device_entry.remove_all()
self.bootloader_devices.clear()
for dev in device_list:
# avoid cdrom and any raid, lvm volumes or encryptfs
if not dev.path.startswith("/dev/sr") and \
not dev.path.startswith("/dev/mapper"):
# hard drives measure themselves assuming kilo=1000, mega=1mil, etc
size_in_gigabytes = int((dev.length * dev.sectorSize) / 1000000000)
line = '{0} [{1} GB] ({2})'.format(dev.model, size_in_gigabytes, dev.path)
self.device_store.append_text(line)
self.devices[line] = dev.path
self.bootloader_device_entry.append_text(line)
self.bootloader_devices[line] = dev.path
logging.debug(line)
self.select_first_combobox_item(self.device_store)
self.select_first_combobox_item(self.bootloader_device_entry)
@staticmethod
def select_first_combobox_item(combobox):
tree_model = combobox.get_model()
tree_iter = tree_model.get_iter_first()
combobox.set_active_iter(tree_iter)
def on_select_drive_changed(self, widget):
line = self.device_store.get_active_text()
if line is not None:
self.auto_device = self.devices[line]
self.forward_button.set_sensitive(True)
def prepare(self, direction):
self.translate_ui()
self.populate_devices()
# image = Gtk.Image.new_from_icon_name("go-next-symbolic", Gtk.IconSize.BUTTON)
# self.forward_button.set_label("")
# self.forward_button.set_image(image)
# self.forward_button.set_always_show_image(True)
# self.forward_button.set_name('fwd_btn')
self.show_all()
self.fill_bootloader_entry()
luks_grid = self.ui.get_object('luks_grid')
luks_grid.set_sensitive(self.settings.get('use_luks'))
# self.forward_button.set_sensitive(False)
def store_values(self):
""" Let's do our installation! """
#response = self.show_warning()
#if response == Gtk.ResponseType.NO:
# return False
luks_password = self.entry['luks_password'].get_text()
self.settings.set('luks_root_password', luks_password)
if luks_password != "":
logging.debug("A root LUKS password has been set")
self.set_bootloader()
return True
def on_luks_password_changed(self, widget):
luks_password = self.entry['luks_password'].get_text()
luks_password_confirm = self.entry['luks_password_confirm'].get_text()
install_ok = True
if len(luks_password) <= 0:
self.image_password_ok.set_opacity(0)
self.forward_button.set_sensitive(True)
else:
if luks_password == luks_password_confirm:
icon = "emblem-default"
else:
icon = "dialog-warning"
install_ok = False
self.image_password_ok.set_from_icon_name(icon, Gtk.IconSize.LARGE_TOOLBAR)
self.image_password_ok.set_opacity(1)
self.forward_button.set_sensitive(install_ok)
def fill_bootloader_entry(self):
""" Put the bootloaders for the user to choose """
self.bootloader_entry.remove_all()
if os.path.exists('/sys/firmware/efi'):
self.bootloader_entry.append_text("Grub2")
self.bootloader_entry.append_text("Gummiboot")
self.bootloader_entry.set_active(0)
self.bootloader_entry.show()
else:
self.bootloader_entry.hide()
widget_ids = ["bootloader_label", "bootloader_device_label"]
for widget_id in widget_ids:
widget = self.ui.get_object(widget_id)
widget.hide()
def on_bootloader_device_check_toggled(self, checkbox):
status = checkbox.get_active()
widget_ids = [
"bootloader_device_entry",
"bootloader_entry",
"bootloader_label",
"bootloader_device_label"]
for widget_id in widget_ids:
widget = self.ui.get_object(widget_id)
widget.set_sensitive(status)
self.settings.set('bootloader_install', status)
def on_bootloader_device_entry_changed(self, widget):
""" Get new selected bootloader device """
line = self.bootloader_device_entry.get_active_text()
if line is not None:
self.bootloader_device = self.bootloader_devices[line]
def on_bootloader_entry_changed(self, widget):
""" Get new selected bootloader """
line = self.bootloader_entry.get_active_text()
if line is not None:
self.bootloader = line.lower()
def show_warning(self):
txt = _("Do you really want to proceed and delete all your content on your hard drive?")
txt = txt + "\n\n" + self.device_store.get_active_text()
message = Gtk.MessageDialog(
transient_for=self.get_toplevel(),
modal=True,
destroy_with_parent=True,
message_type=Gtk.MessageType.QUESTION,
buttons=Gtk.ButtonsType.YES_NO,
text=txt)
response = message.run()
message.destroy()
return response
def get_changes(self):
""" Grab all changes for confirmation """
change_list = [action.Action("delete", self.auto_device)]
auto = auto_partition.AutoPartition(dest_dir=DEST_DIR,
auto_device=self.auto_device,
use_luks=self.settings.get("use_luks"),
luks_password=self.settings.get("luks_root_password"),
use_lvm=self.settings.get("use_lvm"),
use_home=self.settings.get("use_home"),
bootloader=self.settings.get("bootloader"),
callback_queue=self.callback_queue)
devices = auto.get_devices()
mount_devices = auto.get_mount_devices()
fs_devices = auto.get_fs_devices()
mount_points = {}
for mount_point in mount_devices:
device = mount_devices[mount_point]
mount_points[device] = mount_point
for device in sorted(fs_devices.keys()):
try:
txt = _("Device {0} will be created ({1} filesystem) as {2}").format(device, fs_devices[device], mount_points[device])
except KeyError:
txt = _("Device {0} will be created ({1} filesystem)").format(device, fs_devices[device])
act = action.Action("info", txt)
change_list.append(act)
return change_list
def run_format(self):
logging.debug("Creating partitions and their filesystems in %s", self.auto_device)
# If no key password is given a key file is generated and stored in /boot
# (see auto_partition.py)
auto = auto_partition.AutoPartition(dest_dir=DEST_DIR,
auto_device=self.auto_device,
use_luks=self.settings.get("use_luks"),
luks_password=self.settings.get("luks_root_password"),
use_lvm=self.settings.get("use_lvm"),
use_home=self.settings.get("use_home"),
bootloader=self.settings.get("bootloader"),
callback_queue=self.callback_queue)
auto.run()
# Get mount_devices and fs_devices
# (mount_devices will be used when configuring GRUB in modify_grub_default)
# (fs_devices will be used when configuring the fstab file)
self.mount_devices = auto.get_mount_devices()
self.fs_devices = auto.get_fs_devices()
def set_bootloader(self):
checkbox = self.ui.get_object("bootloader_device_check")
if not checkbox.get_active():
self.settings.set('bootloader_install', False)
logging.info("DSGos_Installer will not install any bootloader")
else:
self.settings.set('bootloader_install', True)
self.settings.set('bootloader_device', self.bootloader_device)
self.settings.set('bootloader', self.bootloader)
msg = _("DSGos will install the bootloader '{0}' in device '{1}'")
msg = msg.format(self.bootloader, self.bootloader_device)
logging.info(msg)
def run_install(self, packages, metalinks):
txt = _("DSGos_Installer will install DSGos on device %s")
logging.info(txt, self.auto_device)
self.settings.set('auto_device', self.auto_device)
ssd = {self.auto_device: fs.is_ssd(self.auto_device)}
if not self.testing:
self.installation = install.Installation(
self.settings,
self.callback_queue,
packages,
metalinks,
self.mount_devices,
self.fs_devices,
ssd)
self.installation.start()
else:
logging.debug("Testing mode, not changing anything")
# When testing, no _() is available
try:
_("")
except NameError as err:
def _(message):
return message
if __name__ == '__main__':
from test_screen import _, run
run('InstallationAutomatic')
| mit |
hrashk/sympy | sympy/mpmath/tests/test_special.py | 37 | 2854 | from sympy.mpmath import *
def test_special():
assert inf == inf
assert inf != -inf
assert -inf == -inf
assert inf != nan
assert nan != nan
assert isnan(nan)
assert --inf == inf
assert abs(inf) == inf
assert abs(-inf) == inf
assert abs(nan) != abs(nan)
assert isnan(inf - inf)
assert isnan(inf + (-inf))
assert isnan(-inf - (-inf))
assert isnan(inf + nan)
assert isnan(-inf + nan)
assert mpf(2) + inf == inf
assert 2 + inf == inf
assert mpf(2) - inf == -inf
assert 2 - inf == -inf
assert inf > 3
assert 3 < inf
assert 3 > -inf
assert -inf < 3
assert inf > mpf(3)
assert mpf(3) < inf
assert mpf(3) > -inf
assert -inf < mpf(3)
assert not (nan < 3)
assert not (nan > 3)
assert isnan(inf * 0)
assert isnan(-inf * 0)
assert inf * 3 == inf
assert inf * -3 == -inf
assert -inf * 3 == -inf
assert -inf * -3 == inf
assert inf * inf == inf
assert -inf * -inf == inf
assert isnan(nan / 3)
assert inf / -3 == -inf
assert inf / 3 == inf
assert 3 / inf == 0
assert -3 / inf == 0
assert 0 / inf == 0
assert isnan(inf / inf)
assert isnan(inf / -inf)
assert isnan(inf / nan)
assert mpf('inf') == mpf('+inf') == inf
assert mpf('-inf') == -inf
assert isnan(mpf('nan'))
assert isinf(inf)
assert isinf(-inf)
assert not isinf(mpf(0))
assert not isinf(nan)
def test_special_powers():
assert inf**3 == inf
assert isnan(inf**0)
assert inf**-3 == 0
assert (-inf)**2 == inf
assert (-inf)**3 == -inf
assert isnan((-inf)**0)
assert (-inf)**-2 == 0
assert (-inf)**-3 == 0
assert isnan(nan**5)
assert isnan(nan**0)
def test_functions_special():
assert exp(inf) == inf
assert exp(-inf) == 0
assert isnan(exp(nan))
assert log(inf) == inf
assert isnan(log(nan))
assert isnan(sin(inf))
assert isnan(sin(nan))
assert atan(inf).ae(pi/2)
assert atan(-inf).ae(-pi/2)
assert isnan(sqrt(nan))
assert sqrt(inf) == inf
def test_convert_special():
float_inf = 1e300 * 1e300
float_ninf = -float_inf
float_nan = float_inf/float_ninf
assert mpf(3) * float_inf == inf
assert mpf(3) * float_ninf == -inf
assert isnan(mpf(3) * float_nan)
assert not (mpf(3) < float_nan)
assert not (mpf(3) > float_nan)
assert not (mpf(3) <= float_nan)
assert not (mpf(3) >= float_nan)
assert float(mpf('1e1000')) == float_inf
assert float(mpf('-1e1000')) == float_ninf
assert float(mpf('1e100000000000000000')) == float_inf
assert float(mpf('-1e100000000000000000')) == float_ninf
assert float(mpf('1e-100000000000000000')) == 0.0
def test_div_bug():
assert isnan(nan/1)
assert isnan(nan/2)
assert inf/2 == inf
assert (-inf)/2 == -inf
| bsd-3-clause |
nicobustillos/odoo | addons/hr_attendance/hr_attendance.py | 56 | 9198 | # -*- coding: utf-8 -*-
##############################################################################
#
# OpenERP, Open Source Management Solution
# Copyright (C) 2004-2010 Tiny SPRL (<http://tiny.be>).
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
##############################################################################
import time
from datetime import datetime
from openerp.osv import fields, osv
from openerp.tools.translate import _
class hr_action_reason(osv.osv):
_name = "hr.action.reason"
_description = "Action Reason"
_columns = {
'name': fields.char('Reason', required=True, help='Specifies the reason for Signing In/Signing Out.'),
'action_type': fields.selection([('sign_in', 'Sign in'), ('sign_out', 'Sign out')], "Action Type"),
}
_defaults = {
'action_type': 'sign_in',
}
def _employee_get(obj, cr, uid, context=None):
ids = obj.pool.get('hr.employee').search(cr, uid, [('user_id', '=', uid)], context=context)
return ids and ids[0] or False
class hr_attendance(osv.osv):
_name = "hr.attendance"
_description = "Attendance"
def _worked_hours_compute(self, cr, uid, ids, fieldnames, args, context=None):
"""For each hr.attendance record of action sign-in: assign 0.
For each hr.attendance record of action sign-out: assign number of hours since last sign-in.
"""
res = {}
for obj in self.browse(cr, uid, ids, context=context):
if obj.action == 'sign_in':
res[obj.id] = 0
elif obj.action == 'sign_out':
# Get the associated sign-in
last_signin_id = self.search(cr, uid, [
('employee_id', '=', obj.employee_id.id),
('name', '<', obj.name), ('action', '=', 'sign_in')
], limit=1, order='name DESC')
if last_signin_id:
last_signin = self.browse(cr, uid, last_signin_id, context=context)[0]
# Compute time elapsed between sign-in and sign-out
last_signin_datetime = datetime.strptime(last_signin.name, '%Y-%m-%d %H:%M:%S')
signout_datetime = datetime.strptime(obj.name, '%Y-%m-%d %H:%M:%S')
workedhours_datetime = (signout_datetime - last_signin_datetime)
res[obj.id] = ((workedhours_datetime.seconds) / 60) / 60
else:
res[obj.id] = False
return res
_columns = {
'name': fields.datetime('Date', required=True, select=1),
'action': fields.selection([('sign_in', 'Sign In'), ('sign_out', 'Sign Out'), ('action','Action')], 'Action', required=True),
'action_desc': fields.many2one("hr.action.reason", "Action Reason", domain="[('action_type', '=', action)]", help='Specifies the reason for Signing In/Signing Out in case of extra hours.'),
'employee_id': fields.many2one('hr.employee', "Employee", required=True, select=True),
'worked_hours': fields.function(_worked_hours_compute, type='float', string='Worked Hours', store=True),
}
_defaults = {
'name': lambda *a: time.strftime('%Y-%m-%d %H:%M:%S'), #please don't remove the lambda, if you remove it then the current time will not change
'employee_id': _employee_get,
}
def _altern_si_so(self, cr, uid, ids, context=None):
""" Alternance sign_in/sign_out check.
Previous (if exists) must be of opposite action.
Next (if exists) must be of opposite action.
"""
for att in self.browse(cr, uid, ids, context=context):
# search and browse for first previous and first next records
prev_att_ids = self.search(cr, uid, [('employee_id', '=', att.employee_id.id), ('name', '<', att.name), ('action', 'in', ('sign_in', 'sign_out'))], limit=1, order='name DESC')
next_add_ids = self.search(cr, uid, [('employee_id', '=', att.employee_id.id), ('name', '>', att.name), ('action', 'in', ('sign_in', 'sign_out'))], limit=1, order='name ASC')
prev_atts = self.browse(cr, uid, prev_att_ids, context=context)
next_atts = self.browse(cr, uid, next_add_ids, context=context)
# check for alternance, return False if at least one condition is not satisfied
if prev_atts and prev_atts[0].action == att.action: # previous exists and is same action
return False
if next_atts and next_atts[0].action == att.action: # next exists and is same action
return False
if (not prev_atts) and (not next_atts) and att.action != 'sign_in': # first attendance must be sign_in
return False
return True
_constraints = [(_altern_si_so, 'Error ! Sign in (resp. Sign out) must follow Sign out (resp. Sign in)', ['action'])]
_order = 'name desc'
class hr_employee(osv.osv):
_inherit = "hr.employee"
_description = "Employee"
def _state(self, cr, uid, ids, name, args, context=None):
result = {}
if not ids:
return result
for id in ids:
result[id] = 'absent'
cr.execute('SELECT hr_attendance.action, hr_attendance.employee_id \
FROM ( \
SELECT MAX(name) AS name, employee_id \
FROM hr_attendance \
WHERE action in (\'sign_in\', \'sign_out\') \
GROUP BY employee_id \
) AS foo \
LEFT JOIN hr_attendance \
ON (hr_attendance.employee_id = foo.employee_id \
AND hr_attendance.name = foo.name) \
WHERE hr_attendance.employee_id IN %s',(tuple(ids),))
for res in cr.fetchall():
result[res[1]] = res[0] == 'sign_in' and 'present' or 'absent'
return result
def _last_sign(self, cr, uid, ids, name, args, context=None):
result = {}
if not ids:
return result
for id in ids:
result[id] = False
cr.execute("""select max(name) as name
from hr_attendance
where action in ('sign_in', 'sign_out') and employee_id = %s""",(id,))
for res in cr.fetchall():
result[id] = res[0]
return result
def _attendance_access(self, cr, uid, ids, name, args, context=None):
# this function field use to hide attendance button to singin/singout from menu
group = self.pool.get('ir.model.data').get_object(cr, uid, 'base', 'group_hr_attendance')
visible = False
if uid in [user.id for user in group.users]:
visible = True
return dict([(x, visible) for x in ids])
_columns = {
'state': fields.function(_state, type='selection', selection=[('absent', 'Absent'), ('present', 'Present')], string='Attendance'),
'last_sign': fields.function(_last_sign, type='datetime', string='Last Sign'),
'attendance_access': fields.function(_attendance_access, string='Attendance Access', type='boolean'),
}
def _action_check(self, cr, uid, emp_id, dt=False, context=None):
cr.execute('SELECT MAX(name) FROM hr_attendance WHERE employee_id=%s', (emp_id,))
res = cr.fetchone()
return not (res and (res[0]>=(dt or time.strftime('%Y-%m-%d %H:%M:%S'))))
def attendance_action_change(self, cr, uid, ids, context=None):
if context is None:
context = {}
action_date = context.get('action_date', False)
action = context.get('action', False)
hr_attendance = self.pool.get('hr.attendance')
warning_sign = {'sign_in': _('Sign In'), 'sign_out': _('Sign Out')}
for employee in self.browse(cr, uid, ids, context=context):
if not action:
if employee.state == 'present': action = 'sign_out'
if employee.state == 'absent': action = 'sign_in'
if not self._action_check(cr, uid, employee.id, action_date, context):
raise osv.except_osv(_('Warning'), _('You tried to %s with a date anterior to another event !\nTry to contact the HR Manager to correct attendances.')%(warning_sign[action],))
vals = {'action': action, 'employee_id': employee.id}
if action_date:
vals['name'] = action_date
hr_attendance.create(cr, uid, vals, context=context)
return True
# vim:expandtab:smartindent:tabstop=4:softtabstop=4:shiftwidth=4:
| agpl-3.0 |
theonlydude/RandomMetroidSolver | solver/commonSolver.py | 1 | 37327 | import logging, time
from logic.smboolmanager import SMBoolManagerPlando as SMBoolManager
from logic.smbool import SMBool, smboolFalse
from logic.helpers import Bosses
from rom.romloader import RomLoader
from rom.rom_patches import RomPatches
from graph.graph import AccessGraphSolver as AccessGraph
from utils.utils import PresetLoader
from solver.conf import Conf
from graph.graph_utils import vanillaTransitions, vanillaBossesTransitions, vanillaEscapeTransitions, GraphUtils, getAccessPoint
from utils.parameters import easy, medium, hard, harder, hardcore, mania, infinity
from utils.doorsmanager import DoorsManager
from logic.logic import Logic
class CommonSolver(object):
def loadRom(self, rom, interactive=False, magic=None, startLocation=None):
self.scavengerOrder = []
# startLocation param is only use for seedless
if rom == None:
# TODO::add a --logic parameter for seedless
Logic.factory('vanilla')
self.romFileName = 'seedless'
self.majorsSplit = 'Full'
self.masterMajorsSplit = 'Full'
self.areaRando = True
self.bossRando = True
self.escapeRando = False
self.escapeTimer = "03:00"
self.startLocation = startLocation
RomPatches.setDefaultPatches(startLocation)
self.startArea = getAccessPoint(startLocation).Start['solveArea']
# in seedless load all the vanilla transitions
self.areaTransitions = vanillaTransitions[:]
self.bossTransitions = vanillaBossesTransitions[:]
self.escapeTransition = [vanillaEscapeTransitions[0]]
# in seedless we allow mixing of area and boss transitions
self.hasMixedTransitions = True
self.curGraphTransitions = self.bossTransitions + self.areaTransitions + self.escapeTransition
self.locations = Logic.locations
for loc in self.locations:
loc.itemName = 'Nothing'
# set doors related to default patches
DoorsManager.setDoorsColor()
self.doorsRando = False
self.hasNothing = False
else:
self.romFileName = rom
self.romLoader = RomLoader.factory(rom, magic)
Logic.factory(self.romLoader.readLogic())
self.romLoader.readNothingId()
self.locations = Logic.locations
(self.majorsSplit, self.masterMajorsSplit) = self.romLoader.assignItems(self.locations)
(self.startLocation, self.startArea, startPatches) = self.romLoader.getStartAP()
if not GraphUtils.isStandardStart(self.startLocation) and self.majorsSplit != 'Full':
# update major/chozo locs in non standard start
self.romLoader.updateSplitLocs(self.majorsSplit, self.locations)
(self.areaRando, self.bossRando, self.escapeRando) = self.romLoader.loadPatches()
RomPatches.ActivePatches += startPatches
self.escapeTimer = self.romLoader.getEscapeTimer()
self.doorsRando = self.romLoader.loadDoorsColor()
self.hasNothing = self.checkLocsForNothing()
if self.majorsSplit == 'Scavenger':
self.scavengerOrder = self.romLoader.loadScavengerOrder(self.locations)
if interactive == False:
print("ROM {} majors: {} area: {} boss: {} escape: {} patches: {} activePatches: {}".format(rom, self.majorsSplit, self.areaRando, self.bossRando, self.escapeRando, sorted(self.romLoader.getPatches()), sorted(RomPatches.ActivePatches)))
else:
print("majors: {} area: {} boss: {} escape: {} activepatches: {}".format(self.majorsSplit, self.areaRando, self.bossRando, self.escapeRando, sorted(RomPatches.ActivePatches)))
(self.areaTransitions, self.bossTransitions, self.escapeTransition, self.hasMixedTransitions) = self.romLoader.getTransitions()
if interactive == True and self.debug == False:
# in interactive area mode we build the graph as we play along
if self.areaRando == True and self.bossRando == True:
self.curGraphTransitions = []
elif self.areaRando == True:
self.curGraphTransitions = self.bossTransitions[:]
elif self.bossRando == True:
self.curGraphTransitions = self.areaTransitions[:]
else:
self.curGraphTransitions = self.bossTransitions + self.areaTransitions
if self.escapeRando == False:
self.curGraphTransitions += self.escapeTransition
else:
self.curGraphTransitions = self.bossTransitions + self.areaTransitions + self.escapeTransition
self.smbm = SMBoolManager()
self.areaGraph = AccessGraph(Logic.accessPoints, self.curGraphTransitions)
# store at each step how many locations are available
self.nbAvailLocs = []
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("Display items at locations:")
for loc in self.locations:
self.log.debug('{:>50}: {:>16}'.format(loc.Name, loc.itemName))
def loadPreset(self, presetFileName):
presetLoader = PresetLoader.factory(presetFileName)
presetLoader.load()
self.smbm.createKnowsFunctions()
if self.log.getEffectiveLevel() == logging.DEBUG:
presetLoader.printToScreen()
def getLoc(self, locName):
for loc in self.locations:
if loc.Name == locName:
return loc
def getNextDifficulty(self, difficulty):
nextDiffs = {
0: easy,
easy: medium,
medium: hard,
hard: harder,
harder: hardcore,
hardcore: mania,
mania: infinity
}
return nextDiffs[difficulty]
def checkLocsForNothing(self):
# for the auto tracker, need to know if we have to track nothing items
return any(loc.itemName == "Nothing" for loc in self.locations)
def computeLocationsDifficulty(self, locations, phase="major"):
difficultyTarget = Conf.difficultyTarget
nextLocations = locations
# before looping on all diff targets, get only the available locations with diff target infinity
if difficultyTarget != infinity:
self.areaGraph.getAvailableLocations(nextLocations, self.smbm, infinity, self.lastAP)
nextLocations = [loc for loc in nextLocations if loc.difficulty]
while True:
self.areaGraph.getAvailableLocations(nextLocations, self.smbm, difficultyTarget, self.lastAP)
# check post available functions too
for loc in nextLocations:
loc.evalPostAvailable(self.smbm)
self.areaGraph.useCache(True)
# also check if we can come back to current AP from the location
for loc in nextLocations:
loc.evalComeBack(self.smbm, self.areaGraph, self.lastAP)
self.areaGraph.useCache(False)
nextLocations = [loc for loc in nextLocations if not loc.difficulty]
if not nextLocations:
break
if difficultyTarget == infinity:
# we've tested all the difficulties
break
# start a new loop with next difficulty
difficultyTarget = self.getNextDifficulty(difficultyTarget)
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("available {} locs:".format(phase))
for loc in locations:
if loc.difficulty.bool == True:
print("{:>48}: {:>8}".format(loc.Name, round(loc.difficulty.difficulty, 2)))
print(" smbool: {}".format(loc.difficulty))
print(" path: {}".format([ap.Name for ap in loc.path]))
def collectMajor(self, loc, itemName=None):
self.majorLocations.remove(loc)
self.visitedLocations.append(loc)
self.collectItem(loc, itemName)
return loc
def collectMinor(self, loc):
self.minorLocations.remove(loc)
self.visitedLocations.append(loc)
self.collectItem(loc)
return loc
def collectItem(self, loc, item=None):
if item == None:
item = loc.itemName
if self.vcr != None:
self.vcr.addLocation(loc.Name, item)
if self.firstLogFile is not None:
if item not in self.collectedItems:
self.firstLogFile.write("{};{};{};{}\n".format(item, loc.Name, loc.Area, loc.GraphArea))
if item not in Conf.itemsForbidden:
self.collectedItems.append(item)
if self.checkDuplicateMajor == True:
if item not in ['Nothing', 'NoEnergy', 'Missile', 'Super', 'PowerBomb', 'ETank', 'Reserve']:
if self.smbm.haveItem(item):
print("WARNING: {} has already been picked up".format(item))
self.smbm.addItem(item)
else:
# update the name of the item
item = "-{}-".format(item)
loc.itemName = item
self.collectedItems.append(item)
# we still need the boss difficulty
if not loc.isBoss():
loc.difficulty = smboolFalse
if self.log.getEffectiveLevel() == logging.DEBUG:
print("---------------------------------------------------------------")
print("collectItem: {:<16} at {:<48}".format(item, loc.Name))
print("---------------------------------------------------------------")
# last loc is used as root node for the graph.
# when loading a plando we can load locations from non connected areas, so they don't have an access point.
if loc.accessPoint is not None:
self.lastAP = loc.accessPoint
self.lastArea = loc.SolveArea
def getLocIndex(self, locName):
for (i, loc) in enumerate(self.visitedLocations):
if loc.Name == locName:
return i
def removeItemAt(self, locNameWeb):
locName = self.locNameWeb2Internal(locNameWeb)
locIndex = self.getLocIndex(locName)
if locIndex is None:
self.errorMsg = "Location '{}' has not been visited".format(locName)
return
loc = self.visitedLocations.pop(locIndex)
# removeItemAt is only used from the tracker, so all the locs are in majorLocations
self.majorLocations.append(loc)
# access point
if len(self.visitedLocations) == 0:
self.lastAP = self.startLocation
self.lastArea = self.startArea
else:
self.lastAP = self.visitedLocations[-1].accessPoint
self.lastArea = self.visitedLocations[-1].SolveArea
# delete location params which are set when the location is available
if loc.difficulty is not None:
loc.difficulty = None
if loc.distance is not None:
loc.distance = None
if loc.accessPoint is not None:
loc.accessPoint = None
if loc.path is not None:
loc.path = None
# item
item = loc.itemName
if self.mode in ['seedless', 'race', 'debug']:
# in seedless remove the first nothing found as collectedItems is not ordered
self.collectedItems.remove(item)
else:
self.collectedItems.pop(locIndex)
# if multiple majors in plando mode, remove it from smbm only when it's the last occurence of it
if self.smbm.isCountItem(item):
self.smbm.removeItem(item)
else:
if item not in self.collectedItems:
self.smbm.removeItem(item)
def cancelLastItems(self, count):
if self.vcr != None:
self.vcr.addRollback(count)
if self.interactive == False:
self.nbAvailLocs = self.nbAvailLocs[:-count]
for _ in range(count):
if len(self.visitedLocations) == 0:
return
loc = self.visitedLocations.pop()
if self.majorsSplit == 'Full':
self.majorLocations.append(loc)
else:
if loc.isClass(self.majorsSplit) or loc.isBoss():
self.majorLocations.append(loc)
else:
self.minorLocations.append(loc)
# access point
if len(self.visitedLocations) == 0:
self.lastAP = self.startLocation
self.lastArea = self.startArea
else:
self.lastAP = self.visitedLocations[-1].accessPoint
if self.lastAP is None:
# default to location first access from access point
self.lastAP = list(self.visitedLocations[-1].AccessFrom.keys())[0]
self.lastArea = self.visitedLocations[-1].SolveArea
# delete location params which are set when the location is available
if loc.difficulty is not None:
loc.difficulty = None
if loc.distance is not None:
loc.distance = None
if loc.accessPoint is not None:
loc.accessPoint = None
if loc.path is not None:
loc.path = None
# item
item = loc.itemName
if item != self.collectedItems[-1]:
raise Exception("Item of last collected loc {}: {} is different from last collected item: {}".format(loc.Name, item, self.collectedItems[-1]))
# in plando we have to remove the last added item,
# else it could be used in computing the postAvailable of a location
if self.mode in ['plando', 'seedless', 'race', 'debug']:
loc.itemName = 'Nothing'
self.collectedItems.pop()
# if multiple majors in plando mode, remove it from smbm only when it's the last occurence of it
if self.smbm.isCountItem(item):
self.smbm.removeItem(item)
else:
if item not in self.collectedItems:
self.smbm.removeItem(item)
def printLocs(self, locs, phase):
if len(locs) > 0:
print("{}:".format(phase))
print('{:>48} {:>12} {:>8} {:>8} {:>34} {:>10}'.format("Location Name", "Difficulty", "Distance", "ComeBack", "SolveArea", "AreaWeight"))
for loc in locs:
print('{:>48} {:>12} {:>8} {:>8} {:>34} {:>10}'.
format(loc.Name, round(loc.difficulty[1], 2), round(loc.distance, 2),
loc.comeBack, loc.SolveArea, loc.areaWeight if loc.areaWeight is not None else -1))
def getAvailableItemsList(self, locations, threshold):
# locations without distance are not available
locations = [loc for loc in locations if loc.distance is not None]
if len(locations) == 0:
return []
# add nocomeback locations which has been selected by the comeback step (areaWeight == 1)
around = [loc for loc in locations if( (loc.areaWeight is not None and loc.areaWeight == 1)
or ((loc.SolveArea == self.lastArea or loc.distance < 3)
and loc.difficulty.difficulty <= threshold
and not Bosses.areaBossDead(self.smbm, self.lastArea)
and loc.comeBack is not None and loc.comeBack == True) )]
outside = [loc for loc in locations if not loc in around]
if self.log.getEffectiveLevel() == logging.DEBUG:
self.printLocs(around, "around1")
self.printLocs(outside, "outside1")
around.sort(key=lambda loc: (
# locs in the same area
0 if loc.SolveArea == self.lastArea
else 1,
# nearest locs
loc.distance,
# beating a boss
0 if loc.isBoss()
else 1,
# easiest first
loc.difficulty.difficulty
)
)
if self.log.getEffectiveLevel() == logging.DEBUG:
self.printLocs(around, "around2")
# we want to sort the outside locations by putting the ones in the same area first,
# then we sort the remaining areas starting whith boss dead status.
# we also want to sort by range of difficulty and not only with the difficulty threshold.
ranged = {
"areaWeight": [],
"easy": [],
"medium": [],
"hard": [],
"harder": [],
"hardcore": [],
"mania": [],
"noComeBack": []
}
for loc in outside:
if loc.areaWeight is not None:
ranged["areaWeight"].append(loc)
elif loc.comeBack is None or loc.comeBack == False:
ranged["noComeBack"].append(loc)
else:
difficulty = loc.difficulty.difficulty
if difficulty < medium:
ranged["easy"].append(loc)
elif difficulty < hard:
ranged["medium"].append(loc)
elif difficulty < harder:
ranged["hard"].append(loc)
elif difficulty < hardcore:
ranged["harder"].append(loc)
elif difficulty < mania:
ranged["hardcore"].append(loc)
else:
ranged["mania"].append(loc)
for key in ranged:
ranged[key].sort(key=lambda loc: (
# first locs in the same area
0 if loc.SolveArea == self.lastArea else 1,
# first nearest locs
loc.distance,
# beating a boss
loc.difficulty.difficulty if (not Bosses.areaBossDead(self.smbm, loc.Area)
and loc.isBoss())
else 100000,
# areas with boss still alive
loc.difficulty.difficulty if (not Bosses.areaBossDead(self.smbm, loc.Area))
else 100000,
loc.difficulty.difficulty))
if self.log.getEffectiveLevel() == logging.DEBUG:
for key in ["areaWeight", "easy", "medium", "hard", "harder", "hardcore", "mania", "noComeBack"]:
self.printLocs(ranged[key], "outside2:{}".format(key))
outside = []
for key in ["areaWeight", "easy", "medium", "hard", "harder", "hardcore", "mania", "noComeBack"]:
outside += ranged[key]
return around + outside
def nextDecision(self, majorsAvailable, minorsAvailable, hasEnoughMinors, diffThreshold):
# first take major items of acceptable difficulty in the current area
if (len(majorsAvailable) > 0
and majorsAvailable[0].SolveArea == self.lastArea
and majorsAvailable[0].difficulty.difficulty <= diffThreshold
and majorsAvailable[0].comeBack == True):
return self.collectMajor(majorsAvailable.pop(0))
# next item decision
elif len(minorsAvailable) == 0 and len(majorsAvailable) > 0:
self.log.debug('MAJOR')
return self.collectMajor(majorsAvailable.pop(0))
elif len(majorsAvailable) == 0 and len(minorsAvailable) > 0:
# we don't check for hasEnoughMinors here, because we would be stuck, so pickup
# what we can and hope it gets better
self.log.debug('MINOR')
return self.collectMinor(minorsAvailable.pop(0))
elif len(majorsAvailable) > 0 and len(minorsAvailable) > 0:
self.log.debug('BOTH|M={}, m={}'.format(majorsAvailable[0].Name, minorsAvailable[0].Name))
# if both are available, decide based on area, difficulty and comeBack
nextMajDifficulty = majorsAvailable[0].difficulty.difficulty
nextMinDifficulty = minorsAvailable[0].difficulty.difficulty
nextMajArea = majorsAvailable[0].SolveArea
nextMinArea = minorsAvailable[0].SolveArea
nextMajComeBack = majorsAvailable[0].comeBack
nextMinComeBack = minorsAvailable[0].comeBack
nextMajDistance = majorsAvailable[0].distance
nextMinDistance = minorsAvailable[0].distance
maxAreaWeigth = 10000
nextMajAreaWeight = majorsAvailable[0].areaWeight if majorsAvailable[0].areaWeight is not None else maxAreaWeigth
nextMinAreaWeight = minorsAvailable[0].areaWeight if minorsAvailable[0] .areaWeight is not None else maxAreaWeigth
if self.log.getEffectiveLevel() == logging.DEBUG:
print(" : {:>4} {:>32} {:>4} {:>4} {:>6}".format("diff", "area", "back", "dist", "weight"))
print("major: {:>4} {:>32} {:>4} {:>4} {:>6}".format(round(nextMajDifficulty, 2), nextMajArea, nextMajComeBack, round(nextMajDistance, 2), nextMajAreaWeight))
print("minor: {:>4} {:>32} {:>4} {:>4} {:>6}".format(round(nextMinDifficulty, 2), nextMinArea, nextMinComeBack, round(nextMinDistance, 2), nextMinAreaWeight))
if hasEnoughMinors == True and self.haveAllMinorTypes() == True and self.smbm.haveItem('Charge') and nextMajAreaWeight != maxAreaWeigth:
# we have charge, no longer need minors
self.log.debug("we have charge, no longer need minors, take major")
return self.collectMajor(majorsAvailable.pop(0))
else:
# respect areaweight first
if nextMajAreaWeight != nextMinAreaWeight:
self.log.debug("maj/min != area weight")
if nextMajAreaWeight < nextMinAreaWeight:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# then take item from loc where you can come back
elif nextMajComeBack != nextMinComeBack:
self.log.debug("maj/min != combeback")
if nextMajComeBack == True:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# difficulty over area (this is a difficulty estimator, not a speedrunning simulator)
elif nextMinDifficulty <= diffThreshold and nextMajDifficulty <= diffThreshold:
# take the closer one
if nextMajDistance != nextMinDistance:
self.log.debug("!= distance and <= diffThreshold")
if nextMajDistance < nextMinDistance:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# take the easier
elif nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMajDifficulty < nextMinDifficulty:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
# same difficulty and distance for minor and major, take major first
else:
return self.collectMajor(majorsAvailable.pop(0))
# if not all the minors type are collected, start with minors
elif nextMinDifficulty <= diffThreshold and not self.haveAllMinorTypes():
self.log.debug("not all minors types")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMinArea == self.lastArea and nextMinDifficulty <= diffThreshold:
self.log.debug("not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMinDifficulty > diffThreshold and nextMajDifficulty > diffThreshold:
# take the easier
if nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
elif nextMajDifficulty < nextMinDifficulty:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
# take the closer one
elif nextMajDistance != nextMinDistance:
self.log.debug("!= distance and > diffThreshold")
if nextMajDistance < nextMinDistance:
return self.collectMajor(majorsAvailable.pop(0))
else:
return self.collectMinor(minorsAvailable.pop(0))
# same difficulty and distance for minor and major, take major first
else:
return self.collectMajor(majorsAvailable.pop(0))
else:
if nextMinDifficulty < nextMajDifficulty:
self.log.debug("min easier and not enough minors")
return self.collectMinor(minorsAvailable.pop(0))
else:
self.log.debug("maj easier")
return self.collectMajor(majorsAvailable.pop(0))
raise Exception("Can't take a decision")
def checkMB(self, mbLoc, justCheck=False):
# add mother brain loc and check if it's accessible
self.majorLocations.append(mbLoc)
self.computeLocationsDifficulty(self.majorLocations)
if justCheck:
self.majorLocations.remove(mbLoc)
return mbLoc.difficulty == True
if mbLoc.difficulty == True:
self.log.debug("MB loc accessible")
self.collectMajor(mbLoc)
self.motherBrainKilled = True
else:
self.log.debug("MB loc not accessible")
self.majorLocations.remove(mbLoc)
self.motherBrainKilled = False
return self.motherBrainKilled
def computeDifficulty(self):
# loop on the available locations depending on the collected items.
# before getting a new item, loop on all of them and get their difficulty,
# the next collected item is the one with the smallest difficulty,
# if equality between major and minor, take major first.
# remove mother brain location (there items pickup conditions on top of going to mother brain location)
mbLoc = self.getLoc('Mother Brain')
self.locations.remove(mbLoc)
if self.majorsSplit == 'Major':
self.majorLocations = [loc for loc in self.locations if loc.isMajor() or loc.isBoss()]
self.minorLocations = [loc for loc in self.locations if loc.isMinor()]
elif self.majorsSplit == 'Chozo':
self.majorLocations = [loc for loc in self.locations if loc.isChozo() or loc.isBoss()]
self.minorLocations = [loc for loc in self.locations if not loc.isChozo() and not loc.isBoss()]
elif self.majorsSplit == 'Scavenger':
self.majorLocations = [loc for loc in self.locations if loc.isScavenger() or loc.isBoss()]
self.minorLocations = [loc for loc in self.locations if not loc.isScavenger() and not loc.isBoss()]
else:
# Full
self.majorLocations = self.locations[:] # copy
self.minorLocations = self.majorLocations
self.visitedLocations = []
self.collectedItems = []
self.log.debug("{}: available major: {}, available minor: {}, visited: {}".format(Conf.itemsPickup, len(self.majorLocations), len(self.minorLocations), len(self.visitedLocations)))
isEndPossible = False
endDifficulty = mania
diffThreshold = self.getDiffThreshold()
self.motherBrainKilled = False
self.motherBrainCouldBeKilled = False
while True:
# actual while condition
hasEnoughMinors = self.pickup.enoughMinors(self.smbm, self.minorLocations)
hasEnoughMajors = self.pickup.enoughMajors(self.smbm, self.majorLocations)
hasEnoughItems = hasEnoughMajors and hasEnoughMinors
canEndGame = self.canEndGame()
(isEndPossible, endDifficulty) = (canEndGame.bool, canEndGame.difficulty)
if isEndPossible and hasEnoughItems and self.scavengerHuntComplete():
if endDifficulty <= diffThreshold:
if self.checkMB(mbLoc):
self.log.debug("checkMB: all end game checks are ok, END")
break
else:
self.log.debug("checkMB: canEnd but MB loc not accessible")
else:
if not self.motherBrainCouldBeKilled:
self.motherBrainCouldBeKilled = self.checkMB(mbLoc, justCheck=True)
self.log.debug("checkMB: end checks ok except MB difficulty, MB could be killed: {}".format(self.motherBrainCouldBeKilled))
# check time limit
if self.runtimeLimit_s > 0:
if time.process_time() - self.startTime > self.runtimeLimit_s:
self.log.debug("time limit exceeded ({})".format(self.runtimeLimit_s))
return (-1, False)
self.log.debug("Current AP/Area: {}/{}".format(self.lastAP, self.lastArea))
# compute the difficulty of all the locations
self.computeLocationsDifficulty(self.majorLocations)
if self.majorsSplit != 'Full':
self.computeLocationsDifficulty(self.minorLocations, phase="minor")
# keep only the available locations
majorsAvailable = [loc for loc in self.majorLocations if loc.difficulty is not None and loc.difficulty.bool == True]
minorsAvailable = [loc for loc in self.minorLocations if loc.difficulty is not None and loc.difficulty.bool == True]
self.nbAvailLocs.append(len(self.getAllLocs(majorsAvailable, minorsAvailable)))
# remove next scavenger locs before checking if we're stuck
if self.majorsSplit == 'Scavenger':
majorsAvailable = self.filterScavengerLocs(majorsAvailable)
# check if we're stuck
if len(majorsAvailable) == 0 and len(minorsAvailable) == 0:
if not isEndPossible:
self.log.debug("STUCK MAJORS and MINORS")
if self.comeBack.rewind(len(self.collectedItems)) == True:
continue
else:
# we're really stucked
self.log.debug("STUCK CAN'T REWIND")
break
else:
self.log.debug("HARD END 2")
if self.checkMB(mbLoc):
self.log.debug("all end game checks are ok, END")
break
else:
self.log.debug("We're stucked somewhere and can't reach mother brain")
# check if we were able to access MB and kill it.
# we do it before rollbacks to avoid endless rollbacks.
if self.motherBrainCouldBeKilled:
self.log.debug("we're stucked but we could have killed MB before")
self.motherBrainKilled = True
break
else:
# we're really stucked, try to rollback
if self.comeBack.rewind(len(self.collectedItems)) == True:
continue
else:
self.log.debug("We could end but we're STUCK CAN'T REWIND")
return (-1, False)
# handle no comeback locations
rewindRequired = self.comeBack.handleNoComeBack(self.getAllLocs(majorsAvailable, minorsAvailable),
len(self.collectedItems))
if rewindRequired == True:
if self.comeBack.rewind(len(self.collectedItems)) == True:
continue
else:
# we're really stucked
self.log.debug("STUCK CAN'T REWIND")
break
# sort them on difficulty and proximity
self.log.debug("getAvailableItemsList majors")
majorsAvailable = self.getAvailableItemsList(majorsAvailable, diffThreshold)
if self.majorsSplit == 'Full':
minorsAvailable = majorsAvailable
else:
self.log.debug("getAvailableItemsList minors")
minorsAvailable = self.getAvailableItemsList(minorsAvailable, diffThreshold)
# choose one to pick up
self.nextDecision(majorsAvailable, minorsAvailable, hasEnoughMinors, diffThreshold)
self.comeBack.cleanNoComeBack(self.getAllLocs(self.majorLocations, self.minorLocations))
# compute difficulty value
(difficulty, itemsOk) = self.computeDifficultyValue()
if self.log.getEffectiveLevel() == logging.DEBUG:
self.log.debug("difficulty={}".format(difficulty))
self.log.debug("itemsOk={}".format(itemsOk))
self.log.debug("{}: remaining major: {}, remaining minor: {}, visited: {}".format(Conf.itemsPickup, len(self.majorLocations), len(self.minorLocations), len(self.visitedLocations)))
self.log.debug("remaining majors:")
for loc in self.majorLocations:
self.log.debug("{} ({})".format(loc.Name, loc.itemName))
self.log.debug("bosses: {}".format([(boss, Bosses.bossDead(self.smbm, boss)) for boss in Bosses.Golden4()]))
return (difficulty, itemsOk)
def haveAllMinorTypes(self):
# the first minor of each type can be seen as a major, so check for them first before going to far in zebes
hasPB = 'PowerBomb' in self.collectedItems
hasSuper = 'Super' in self.collectedItems
hasMissile = 'Missile' in self.collectedItems
return (hasPB and hasSuper and hasMissile)
def canEndGame(self):
# to finish the game you must:
# - beat golden 4
# - defeat metroids
# - destroy/skip the zebetites
# - beat Mother Brain
return self.smbm.wand(Bosses.allBossesDead(self.smbm), self.smbm.enoughStuffTourian())
def getAllLocs(self, majorsAvailable, minorsAvailable):
if self.majorsSplit == 'Full':
return majorsAvailable
else:
return majorsAvailable+minorsAvailable
def computeDifficultyValue(self):
if not self.canEndGame() or not self.motherBrainKilled:
# we have aborted
return (-1, False)
else:
# return the maximum difficulty
difficultyMax = 0
for loc in self.visitedLocations:
difficultyMax = max(difficultyMax, loc.difficulty.difficulty)
difficulty = difficultyMax
# check if we have taken all the requested items
if (self.pickup.enoughMinors(self.smbm, self.minorLocations)
and self.pickup.enoughMajors(self.smbm, self.majorLocations)):
return (difficulty, True)
else:
# can finish but can't take all the requested items
return (difficulty, False)
def filterScavengerLocs(self, majorsAvailable):
# check where we are in the scavenger hunt
huntInProgress = False
for index, loc in enumerate(self.scavengerOrder):
if loc not in self.visitedLocations:
huntInProgress = True
break
if huntInProgress and index < len(self.scavengerOrder)-1:
self.log.debug("Scavenger hunt in progress, {}/{}".format(index, len(self.scavengerOrder)-1))
# remove all next locs in the hunt
nextHuntLocs = self.scavengerOrder[index+1:]
for loc in nextHuntLocs:
self.log.debug("Scavenger hunt, try to remove loc {}".format(loc.Name))
try:
majorsAvailable.remove(loc)
except:
pass
return majorsAvailable
def scavengerHuntComplete(self):
if self.majorsSplit != 'Scavenger':
return True
else:
# check that last loc from the scavenger hunt list has been visited
lastLoc = self.scavengerOrder[-1]
return lastLoc in self.visitedLocations
| gpl-3.0 |
mitocw/edx-platform | openedx/core/lib/blockstore_api/tests/test_blockstore_api.py | 4 | 8710 | # -*- coding: utf-8 -*-
"""
Tests for xblock_utils.py
"""
import unittest
from uuid import UUID
from django.conf import settings
from openedx.core.lib import blockstore_api as api
# A fake UUID that won't represent any real bundle/draft/collection:
BAD_UUID = UUID('12345678-0000-0000-0000-000000000000')
@unittest.skipUnless(settings.RUN_BLOCKSTORE_TESTS, "Requires a running Blockstore server")
class BlockstoreApiClientTest(unittest.TestCase):
"""
Test for the Blockstore API Client.
The goal of these tests is not to test that Blockstore works correctly, but
that the API client can interact with it and all the API client methods
work.
"""
# Collections
def test_nonexistent_collection(self):
""" Request a collection that doesn't exist -> CollectionNotFound """
with self.assertRaises(api.CollectionNotFound):
api.get_collection(BAD_UUID)
def test_collection_crud(self):
""" Create, Fetch, Update, and Delete a Collection """
title = "Fire 🔥 Collection"
# Create:
coll = api.create_collection(title)
self.assertEqual(coll.title, title)
self.assertIsInstance(coll.uuid, UUID)
# Fetch:
coll2 = api.get_collection(coll.uuid)
self.assertEqual(coll, coll2)
# Update:
new_title = "Air 🌀 Collection"
coll3 = api.update_collection(coll.uuid, title=new_title)
self.assertEqual(coll3.title, new_title)
coll4 = api.get_collection(coll.uuid)
self.assertEqual(coll4.title, new_title)
# Delete:
api.delete_collection(coll.uuid)
with self.assertRaises(api.CollectionNotFound):
api.get_collection(coll.uuid)
# Bundles
def test_nonexistent_bundle(self):
""" Request a bundle that doesn't exist -> BundleNotFound """
with self.assertRaises(api.BundleNotFound):
api.get_bundle(BAD_UUID)
def test_bundle_crud(self):
""" Create, Fetch, Update, and Delete a Bundle """
coll = api.create_collection("Test Collection")
args = {
"title": "Water 💧 Bundle",
"slug": "h2o",
"description": "Sploosh",
}
# Create:
bundle = api.create_bundle(coll.uuid, **args)
for attr, value in args.items():
self.assertEqual(getattr(bundle, attr), value)
self.assertIsInstance(bundle.uuid, UUID)
# Fetch:
bundle2 = api.get_bundle(bundle.uuid)
self.assertEqual(bundle, bundle2)
# Update:
new_description = "Water Nation Bending Lessons"
bundle3 = api.update_bundle(bundle.uuid, description=new_description)
self.assertEqual(bundle3.description, new_description)
bundle4 = api.get_bundle(bundle.uuid)
self.assertEqual(bundle4.description, new_description)
# Delete:
api.delete_bundle(bundle.uuid)
with self.assertRaises(api.BundleNotFound):
api.get_bundle(bundle.uuid)
# Drafts, files, and reading/writing file contents:
def test_nonexistent_draft(self):
""" Request a draft that doesn't exist -> DraftNotFound """
with self.assertRaises(api.DraftNotFound):
api.get_draft(BAD_UUID)
def test_drafts_and_files(self):
"""
Test creating, reading, writing, committing, and reverting drafts and
files.
"""
coll = api.create_collection("Test Collection")
bundle = api.create_bundle(coll.uuid, title="Earth 🗿 Bundle", slug="earth", description="another test bundle")
# Create a draft
draft = api.get_or_create_bundle_draft(bundle.uuid, draft_name="test-draft")
self.assertEqual(draft.bundle_uuid, bundle.uuid)
self.assertEqual(draft.name, "test-draft")
self.assertGreaterEqual(draft.updated_at.year, 2019)
# And retrieve it again:
draft2 = api.get_or_create_bundle_draft(bundle.uuid, draft_name="test-draft")
self.assertEqual(draft, draft2)
# Also test retrieving using get_draft
draft3 = api.get_draft(draft.uuid)
self.assertEqual(draft, draft3)
# Write a file into the bundle:
api.write_draft_file(draft.uuid, "test.txt", b"initial version")
# Now the file should be visible in the draft:
draft_contents = api.get_bundle_file_data(bundle.uuid, "test.txt", use_draft=draft.name)
self.assertEqual(draft_contents, b"initial version")
api.commit_draft(draft.uuid)
# Write a new version into the draft:
api.write_draft_file(draft.uuid, "test.txt", b"modified version")
published_contents = api.get_bundle_file_data(bundle.uuid, "test.txt")
self.assertEqual(published_contents, b"initial version")
draft_contents2 = api.get_bundle_file_data(bundle.uuid, "test.txt", use_draft=draft.name)
self.assertEqual(draft_contents2, b"modified version")
# Now delete the draft:
api.delete_draft(draft.uuid)
draft_contents3 = api.get_bundle_file_data(bundle.uuid, "test.txt", use_draft=draft.name)
# Confirm the file is now reset:
self.assertEqual(draft_contents3, b"initial version")
# Finaly, test the get_bundle_file* methods:
file_info1 = api.get_bundle_file_metadata(bundle.uuid, "test.txt")
self.assertEqual(file_info1.path, "test.txt")
self.assertEqual(file_info1.size, len(b"initial version"))
self.assertEqual(file_info1.hash_digest, "a45a5c6716276a66c4005534a51453ab16ea63c4")
self.assertEqual(list(api.get_bundle_files(bundle.uuid)), [file_info1])
self.assertEqual(api.get_bundle_files_dict(bundle.uuid), {
"test.txt": file_info1,
})
# Links
def test_links(self):
"""
Test operations involving bundle links.
"""
coll = api.create_collection("Test Collection")
# Create two library bundles and a course bundle:
lib1_bundle = api.create_bundle(coll.uuid, title="Library 1", slug="lib1")
lib1_draft = api.get_or_create_bundle_draft(lib1_bundle.uuid, draft_name="test-draft")
lib2_bundle = api.create_bundle(coll.uuid, title="Library 1", slug="lib2")
lib2_draft = api.get_or_create_bundle_draft(lib2_bundle.uuid, draft_name="other-draft")
course_bundle = api.create_bundle(coll.uuid, title="Library 1", slug="course")
course_draft = api.get_or_create_bundle_draft(course_bundle.uuid, draft_name="test-draft")
# To create links, we need valid BundleVersions, which requires having committed at least one change:
api.write_draft_file(lib1_draft.uuid, "lib1-data.txt", "hello world")
api.commit_draft(lib1_draft.uuid) # Creates version 1
api.write_draft_file(lib2_draft.uuid, "lib2-data.txt", "hello world")
api.commit_draft(lib2_draft.uuid) # Creates version 1
# Lib2 has no links:
self.assertFalse(api.get_bundle_links(lib2_bundle.uuid))
# Create a link from lib2 to lib1
link1_name = "lib2_to_lib1"
api.set_draft_link(lib2_draft.uuid, link1_name, lib1_bundle.uuid, version=1)
# Now confirm the link exists in the draft:
lib2_draft_links = api.get_bundle_links(lib2_bundle.uuid, use_draft=lib2_draft.name)
self.assertIn(link1_name, lib2_draft_links)
self.assertEqual(lib2_draft_links[link1_name].direct.bundle_uuid, lib1_bundle.uuid)
self.assertEqual(lib2_draft_links[link1_name].direct.version, 1)
# Now commit the change to lib2:
api.commit_draft(lib2_draft.uuid) # Creates version 2
# Now create a link from course to lib2
link2_name = "course_to_lib2"
api.set_draft_link(course_draft.uuid, link2_name, lib2_bundle.uuid, version=2)
api.commit_draft(course_draft.uuid)
# And confirm the link exists in the resulting bundle version:
course_links = api.get_bundle_links(course_bundle.uuid)
self.assertIn(link2_name, course_links)
self.assertEqual(course_links[link2_name].direct.bundle_uuid, lib2_bundle.uuid)
self.assertEqual(course_links[link2_name].direct.version, 2)
# And since the links go course->lib2->lib1, course has an indirect link to lib1:
self.assertEqual(course_links[link2_name].indirect[0].bundle_uuid, lib1_bundle.uuid)
self.assertEqual(course_links[link2_name].indirect[0].version, 1)
# Finally, test deleting a link from course's draft:
api.set_draft_link(course_draft.uuid, link2_name, None, None)
self.assertFalse(api.get_bundle_links(course_bundle.uuid, use_draft=course_draft.name))
| agpl-3.0 |
OSURoboticsClub/underwater | manual-control/test.py | 1 | 1354 | #!/usr/bin/env python2
import ctypes
from ctypes import byref
from time import sleep
class SensorData(ctypes.Structure):
_fields_ = (
('a', ctypes.c_uint16),
('b', ctypes.c_uint16),
('c', ctypes.c_uint8),
('d', ctypes.c_double),
('e', ctypes.c_uint32),
)
class ThrusterData(ctypes.Structure):
_fields_ = (
('ls', ctypes.c_uint8),
('rs', ctypes.c_uint8),
('fl', ctypes.c_uint16),
('fr', ctypes.c_uint16),
('bl', ctypes.c_uint16),
('br', ctypes.c_uint16),
)
class Robot(ctypes.Structure):
_fields_ = (
('state', ctypes.c_void_p),
)
w = ctypes.cdll.LoadLibrary('./libworker.so')
w.init.argtypes = ()
w.init.restype = Robot
w.wait_for_tick.argtypes = (ctypes.POINTER(Robot),)
w.wait_for_tick.restype = SensorData
w.set_thruster_data.argtypes = (
ctypes.POINTER(Robot), ctypes.POINTER(ThrusterData))
w.set_thruster_data.restype = None
robot = w.init()
thruster_data = ThrusterData(20, 20, 800, 300, 800, 300)
while True:
sensor_data = w.wait_for_sensor_data(byref(robot))
thruster_data.ls += 1
thruster_data.rs += 1
thruster_data.fl += 1
thruster_data.fr += 1
thruster_data.bl += 1
thruster_data.br += 1
sleep(.3)
w.set_thruster_data(byref(robot), byref(thruster_data))
print
| mit |
partofthething/home-assistant | homeassistant/components/soma/config_flow.py | 22 | 2346 | """Config flow for Soma."""
import logging
from api.soma_api import SomaApi
from requests import RequestException
import voluptuous as vol
from homeassistant import config_entries
from homeassistant.const import CONF_HOST, CONF_PORT
from .const import DOMAIN
_LOGGER = logging.getLogger(__name__)
DEFAULT_PORT = 3000
class SomaFlowHandler(config_entries.ConfigFlow, domain=DOMAIN):
"""Handle a config flow."""
VERSION = 1
CONNECTION_CLASS = config_entries.CONN_CLASS_LOCAL_POLL
def __init__(self):
"""Instantiate config flow."""
async def async_step_user(self, user_input=None):
"""Handle a flow start."""
if user_input is None:
data = {
vol.Required(CONF_HOST): str,
vol.Required(CONF_PORT, default=DEFAULT_PORT): int,
}
return self.async_show_form(step_id="user", data_schema=vol.Schema(data))
return await self.async_step_creation(user_input)
async def async_step_creation(self, user_input=None):
"""Finish config flow."""
api = SomaApi(user_input["host"], user_input["port"])
try:
result = await self.hass.async_add_executor_job(api.list_devices)
_LOGGER.info("Successfully set up Soma Connect")
if result["result"] == "success":
return self.async_create_entry(
title="Soma Connect",
data={"host": user_input["host"], "port": user_input["port"]},
)
_LOGGER.error(
"Connection to SOMA Connect failed (result:%s)", result["result"]
)
return self.async_abort(reason="result_error")
except RequestException:
_LOGGER.error("Connection to SOMA Connect failed with RequestException")
return self.async_abort(reason="connection_error")
except KeyError:
_LOGGER.error("Connection to SOMA Connect failed with KeyError")
return self.async_abort(reason="connection_error")
async def async_step_import(self, user_input=None):
"""Handle flow start from existing config section."""
if self.hass.config_entries.async_entries(DOMAIN):
return self.async_abort(reason="already_setup")
return await self.async_step_creation(user_input)
| mit |
gcode-mirror/audacity | lib-src/lv2/lv2/plugins/eg03-metro.lv2/waflib/Scripting.py | 85 | 10612 | #! /usr/bin/env python
# encoding: utf-8
# WARNING! Do not edit! http://waf.googlecode.com/git/docs/wafbook/single.html#_obtaining_the_waf_file
import os,shlex,shutil,traceback,errno,sys,stat
from waflib import Utils,Configure,Logs,Options,ConfigSet,Context,Errors,Build,Node
build_dir_override=None
no_climb_commands=['configure']
default_cmd="build"
def waf_entry_point(current_directory,version,wafdir):
Logs.init_log()
if Context.WAFVERSION!=version:
Logs.error('Waf script %r and library %r do not match (directory %r)'%(version,Context.WAFVERSION,wafdir))
sys.exit(1)
if'--version'in sys.argv:
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Context.waf_dir=wafdir
Context.launch_dir=current_directory
no_climb=os.environ.get('NOCLIMB',None)
if not no_climb:
for k in no_climb_commands:
if k in sys.argv:
no_climb=True
break
cur=current_directory
while cur:
lst=os.listdir(cur)
if Options.lockfile in lst:
env=ConfigSet.ConfigSet()
try:
env.load(os.path.join(cur,Options.lockfile))
ino=os.stat(cur)[stat.ST_INO]
except Exception:
pass
else:
for x in[env.run_dir,env.top_dir,env.out_dir]:
if Utils.is_win32:
if cur==x:
load=True
break
else:
try:
ino2=os.stat(x)[stat.ST_INO]
except OSError:
pass
else:
if ino==ino2:
load=True
break
else:
Logs.warn('invalid lock file in %s'%cur)
load=False
if load:
Context.run_dir=env.run_dir
Context.top_dir=env.top_dir
Context.out_dir=env.out_dir
break
if not Context.run_dir:
if Context.WSCRIPT_FILE in lst:
Context.run_dir=cur
next=os.path.dirname(cur)
if next==cur:
break
cur=next
if no_climb:
break
if not Context.run_dir:
if'-h'in sys.argv or'--help'in sys.argv:
Logs.warn('No wscript file found: the help message may be incomplete')
Context.run_dir=current_directory
ctx=Context.create_context('options')
ctx.curdir=current_directory
ctx.parse_args()
sys.exit(0)
Logs.error('Waf: Run from a directory containing a file named %r'%Context.WSCRIPT_FILE)
sys.exit(1)
try:
os.chdir(Context.run_dir)
except OSError:
Logs.error('Waf: The folder %r is unreadable'%Context.run_dir)
sys.exit(1)
try:
set_main_module(Context.run_dir+os.sep+Context.WSCRIPT_FILE)
except Errors.WafError ,e:
Logs.pprint('RED',e.verbose_msg)
Logs.error(str(e))
sys.exit(1)
except Exception ,e:
Logs.error('Waf: The wscript in %r is unreadable'%Context.run_dir,e)
traceback.print_exc(file=sys.stdout)
sys.exit(2)
try:
run_commands()
except Errors.WafError ,e:
if Logs.verbose>1:
Logs.pprint('RED',e.verbose_msg)
Logs.error(e.msg)
sys.exit(1)
except SystemExit:
raise
except Exception ,e:
traceback.print_exc(file=sys.stdout)
sys.exit(2)
except KeyboardInterrupt:
Logs.pprint('RED','Interrupted')
sys.exit(68)
def set_main_module(file_path):
Context.g_module=Context.load_module(file_path)
Context.g_module.root_path=file_path
def set_def(obj):
name=obj.__name__
if not name in Context.g_module.__dict__:
setattr(Context.g_module,name,obj)
for k in[update,dist,distclean,distcheck,update]:
set_def(k)
if not'init'in Context.g_module.__dict__:
Context.g_module.init=Utils.nada
if not'shutdown'in Context.g_module.__dict__:
Context.g_module.shutdown=Utils.nada
if not'options'in Context.g_module.__dict__:
Context.g_module.options=Utils.nada
def parse_options():
Context.create_context('options').execute()
if not Options.commands:
Options.commands=[default_cmd]
Options.commands=[x for x in Options.commands if x!='options']
Logs.verbose=Options.options.verbose
Logs.init_log()
if Options.options.zones:
Logs.zones=Options.options.zones.split(',')
if not Logs.verbose:
Logs.verbose=1
elif Logs.verbose>0:
Logs.zones=['runner']
if Logs.verbose>2:
Logs.zones=['*']
def run_command(cmd_name):
ctx=Context.create_context(cmd_name)
ctx.log_timer=Utils.Timer()
ctx.options=Options.options
ctx.cmd=cmd_name
ctx.execute()
return ctx
def run_commands():
parse_options()
run_command('init')
while Options.commands:
cmd_name=Options.commands.pop(0)
ctx=run_command(cmd_name)
Logs.info('%r finished successfully (%s)'%(cmd_name,str(ctx.log_timer)))
run_command('shutdown')
def _can_distclean(name):
for k in'.o .moc .exe'.split():
if name.endswith(k):
return True
return False
def distclean_dir(dirname):
for(root,dirs,files)in os.walk(dirname):
for f in files:
if _can_distclean(f):
fname=root+os.sep+f
try:
os.remove(fname)
except OSError:
Logs.warn('Could not remove %r'%fname)
for x in[Context.DBFILE,'config.log']:
try:
os.remove(x)
except OSError:
pass
try:
shutil.rmtree('c4che')
except OSError:
pass
def distclean(ctx):
'''removes the build directory'''
lst=os.listdir('.')
for f in lst:
if f==Options.lockfile:
try:
proj=ConfigSet.ConfigSet(f)
except IOError:
Logs.warn('Could not read %r'%f)
continue
if proj['out_dir']!=proj['top_dir']:
try:
shutil.rmtree(proj['out_dir'])
except IOError:
pass
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('project %r cannot be removed'%proj[Context.OUT])
else:
distclean_dir(proj['out_dir'])
for k in(proj['out_dir'],proj['top_dir'],proj['run_dir']):
try:
os.remove(os.path.join(k,Options.lockfile))
except OSError ,e:
if e.errno!=errno.ENOENT:
Logs.warn('file %r cannot be removed'%f)
if f.startswith('.waf')and not Options.commands:
shutil.rmtree(f,ignore_errors=True)
class Dist(Context.Context):
'''creates an archive containing the project source code'''
cmd='dist'
fun='dist'
algo='tar.bz2'
ext_algo={}
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
def archive(self):
import tarfile
arch_name=self.get_arch_name()
try:
self.base_path
except AttributeError:
self.base_path=self.path
node=self.base_path.make_node(arch_name)
try:
node.delete()
except Exception:
pass
files=self.get_files()
if self.algo.startswith('tar.'):
tar=tarfile.open(arch_name,'w:'+self.algo.replace('tar.',''))
for x in files:
self.add_tar_file(x,tar)
tar.close()
elif self.algo=='zip':
import zipfile
zip=zipfile.ZipFile(arch_name,'w',compression=zipfile.ZIP_DEFLATED)
for x in files:
archive_name=self.get_base_name()+'/'+x.path_from(self.base_path)
zip.write(x.abspath(),archive_name,zipfile.ZIP_DEFLATED)
zip.close()
else:
self.fatal('Valid algo types are tar.bz2, tar.gz or zip')
try:
from hashlib import sha1 as sha
except ImportError:
from sha import sha
try:
digest=" (sha=%r)"%sha(node.read()).hexdigest()
except Exception:
digest=''
Logs.info('New archive created: %s%s'%(self.arch_name,digest))
def get_tar_path(self,node):
return node.abspath()
def add_tar_file(self,x,tar):
p=self.get_tar_path(x)
tinfo=tar.gettarinfo(name=p,arcname=self.get_tar_prefix()+'/'+x.path_from(self.base_path))
tinfo.uid=0
tinfo.gid=0
tinfo.uname='root'
tinfo.gname='root'
fu=None
try:
fu=open(p,'rb')
tar.addfile(tinfo,fileobj=fu)
finally:
if fu:
fu.close()
def get_tar_prefix(self):
try:
return self.tar_prefix
except AttributeError:
return self.get_base_name()
def get_arch_name(self):
try:
self.arch_name
except AttributeError:
self.arch_name=self.get_base_name()+'.'+self.ext_algo.get(self.algo,self.algo)
return self.arch_name
def get_base_name(self):
try:
self.base_name
except AttributeError:
appname=getattr(Context.g_module,Context.APPNAME,'noname')
version=getattr(Context.g_module,Context.VERSION,'1.0')
self.base_name=appname+'-'+version
return self.base_name
def get_excl(self):
try:
return self.excl
except AttributeError:
self.excl=Node.exclude_regs+' **/waf-1.7.* **/.waf-1.7* **/waf3-1.7.* **/.waf3-1.7* **/*~ **/*.rej **/*.orig **/*.pyc **/*.pyo **/*.bak **/*.swp **/.lock-w*'
nd=self.root.find_node(Context.out_dir)
if nd:
self.excl+=' '+nd.path_from(self.base_path)
return self.excl
def get_files(self):
try:
files=self.files
except AttributeError:
files=self.base_path.ant_glob('**/*',excl=self.get_excl())
return files
def dist(ctx):
'''makes a tarball for redistributing the sources'''
pass
class DistCheck(Dist):
fun='distcheck'
cmd='distcheck'
def execute(self):
self.recurse([os.path.dirname(Context.g_module.root_path)])
self.archive()
self.check()
def check(self):
import tempfile,tarfile
t=None
try:
t=tarfile.open(self.get_arch_name())
for x in t:
t.extract(x)
finally:
if t:
t.close()
cfg=[]
if Options.options.distcheck_args:
cfg=shlex.split(Options.options.distcheck_args)
else:
cfg=[x for x in sys.argv if x.startswith('-')]
instdir=tempfile.mkdtemp('.inst',self.get_base_name())
ret=Utils.subprocess.Popen([sys.executable,sys.argv[0],'configure','install','uninstall','--destdir='+instdir]+cfg,cwd=self.get_base_name()).wait()
if ret:
raise Errors.WafError('distcheck failed with code %i'%ret)
if os.path.exists(instdir):
raise Errors.WafError('distcheck succeeded, but files were left in %s'%instdir)
shutil.rmtree(self.get_base_name())
def distcheck(ctx):
'''checks if the project compiles (tarball from 'dist')'''
pass
def update(ctx):
'''updates the plugins from the *waflib/extras* directory'''
lst=Options.options.files.split(',')
if not lst:
lst=[x for x in Utils.listdir(Context.waf_dir+'/waflib/extras')if x.endswith('.py')]
for x in lst:
tool=x.replace('.py','')
try:
Configure.download_tool(tool,force=True,ctx=ctx)
except Errors.WafError:
Logs.error('Could not find the tool %s in the remote repository'%x)
def autoconfigure(execute_method):
def execute(self):
if not Configure.autoconfig:
return execute_method(self)
env=ConfigSet.ConfigSet()
do_config=False
try:
env.load(os.path.join(Context.top_dir,Options.lockfile))
except Exception:
Logs.warn('Configuring the project')
do_config=True
else:
if env.run_dir!=Context.run_dir:
do_config=True
else:
h=0
for f in env['files']:
h=hash((h,Utils.readf(f,'rb')))
do_config=h!=env.hash
if do_config:
Options.commands.insert(0,self.cmd)
Options.commands.insert(0,'configure')
return
return execute_method(self)
return execute
Build.BuildContext.execute=autoconfigure(Build.BuildContext.execute)
| gpl-2.0 |
Gchorba/Ask | lib/python2.7/site-packages/pip/_vendor/requests/packages/chardet/gb2312freq.py | 3132 | 36011 | ######################## BEGIN LICENSE BLOCK ########################
# The Original Code is Mozilla Communicator client code.
#
# The Initial Developer of the Original Code is
# Netscape Communications Corporation.
# Portions created by the Initial Developer are Copyright (C) 1998
# the Initial Developer. All Rights Reserved.
#
# Contributor(s):
# Mark Pilgrim - port to Python
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2.1 of the License, or (at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this library; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA
# 02110-1301 USA
######################### END LICENSE BLOCK #########################
# GB2312 most frequently used character table
#
# Char to FreqOrder table , from hz6763
# 512 --> 0.79 -- 0.79
# 1024 --> 0.92 -- 0.13
# 2048 --> 0.98 -- 0.06
# 6768 --> 1.00 -- 0.02
#
# Ideal Distribution Ratio = 0.79135/(1-0.79135) = 3.79
# Random Distribution Ration = 512 / (3755 - 512) = 0.157
#
# Typical Distribution Ratio about 25% of Ideal one, still much higher that RDR
GB2312_TYPICAL_DISTRIBUTION_RATIO = 0.9
GB2312_TABLE_SIZE = 3760
GB2312CharToFreqOrder = (
1671, 749,1443,2364,3924,3807,2330,3921,1704,3463,2691,1511,1515, 572,3191,2205,
2361, 224,2558, 479,1711, 963,3162, 440,4060,1905,2966,2947,3580,2647,3961,3842,
2204, 869,4207, 970,2678,5626,2944,2956,1479,4048, 514,3595, 588,1346,2820,3409,
249,4088,1746,1873,2047,1774, 581,1813, 358,1174,3590,1014,1561,4844,2245, 670,
1636,3112, 889,1286, 953, 556,2327,3060,1290,3141, 613, 185,3477,1367, 850,3820,
1715,2428,2642,2303,2732,3041,2562,2648,3566,3946,1349, 388,3098,2091,1360,3585,
152,1687,1539, 738,1559, 59,1232,2925,2267,1388,1249,1741,1679,2960, 151,1566,
1125,1352,4271, 924,4296, 385,3166,4459, 310,1245,2850, 70,3285,2729,3534,3575,
2398,3298,3466,1960,2265, 217,3647, 864,1909,2084,4401,2773,1010,3269,5152, 853,
3051,3121,1244,4251,1895, 364,1499,1540,2313,1180,3655,2268, 562, 715,2417,3061,
544, 336,3768,2380,1752,4075, 950, 280,2425,4382, 183,2759,3272, 333,4297,2155,
1688,2356,1444,1039,4540, 736,1177,3349,2443,2368,2144,2225, 565, 196,1482,3406,
927,1335,4147, 692, 878,1311,1653,3911,3622,1378,4200,1840,2969,3149,2126,1816,
2534,1546,2393,2760, 737,2494, 13, 447, 245,2747, 38,2765,2129,2589,1079, 606,
360, 471,3755,2890, 404, 848, 699,1785,1236, 370,2221,1023,3746,2074,2026,2023,
2388,1581,2119, 812,1141,3091,2536,1519, 804,2053, 406,1596,1090, 784, 548,4414,
1806,2264,2936,1100, 343,4114,5096, 622,3358, 743,3668,1510,1626,5020,3567,2513,
3195,4115,5627,2489,2991, 24,2065,2697,1087,2719, 48,1634, 315, 68, 985,2052,
198,2239,1347,1107,1439, 597,2366,2172, 871,3307, 919,2487,2790,1867, 236,2570,
1413,3794, 906,3365,3381,1701,1982,1818,1524,2924,1205, 616,2586,2072,2004, 575,
253,3099, 32,1365,1182, 197,1714,2454,1201, 554,3388,3224,2748, 756,2587, 250,
2567,1507,1517,3529,1922,2761,2337,3416,1961,1677,2452,2238,3153, 615, 911,1506,
1474,2495,1265,1906,2749,3756,3280,2161, 898,2714,1759,3450,2243,2444, 563, 26,
3286,2266,3769,3344,2707,3677, 611,1402, 531,1028,2871,4548,1375, 261,2948, 835,
1190,4134, 353, 840,2684,1900,3082,1435,2109,1207,1674, 329,1872,2781,4055,2686,
2104, 608,3318,2423,2957,2768,1108,3739,3512,3271,3985,2203,1771,3520,1418,2054,
1681,1153, 225,1627,2929, 162,2050,2511,3687,1954, 124,1859,2431,1684,3032,2894,
585,4805,3969,2869,2704,2088,2032,2095,3656,2635,4362,2209, 256, 518,2042,2105,
3777,3657, 643,2298,1148,1779, 190, 989,3544, 414, 11,2135,2063,2979,1471, 403,
3678, 126, 770,1563, 671,2499,3216,2877, 600,1179, 307,2805,4937,1268,1297,2694,
252,4032,1448,1494,1331,1394, 127,2256, 222,1647,1035,1481,3056,1915,1048, 873,
3651, 210, 33,1608,2516, 200,1520, 415, 102, 0,3389,1287, 817, 91,3299,2940,
836,1814, 549,2197,1396,1669,2987,3582,2297,2848,4528,1070, 687, 20,1819, 121,
1552,1364,1461,1968,2617,3540,2824,2083, 177, 948,4938,2291, 110,4549,2066, 648,
3359,1755,2110,2114,4642,4845,1693,3937,3308,1257,1869,2123, 208,1804,3159,2992,
2531,2549,3361,2418,1350,2347,2800,2568,1291,2036,2680, 72, 842,1990, 212,1233,
1154,1586, 75,2027,3410,4900,1823,1337,2710,2676, 728,2810,1522,3026,4995, 157,
755,1050,4022, 710, 785,1936,2194,2085,1406,2777,2400, 150,1250,4049,1206, 807,
1910, 534, 529,3309,1721,1660, 274, 39,2827, 661,2670,1578, 925,3248,3815,1094,
4278,4901,4252, 41,1150,3747,2572,2227,4501,3658,4902,3813,3357,3617,2884,2258,
887, 538,4187,3199,1294,2439,3042,2329,2343,2497,1255, 107, 543,1527, 521,3478,
3568, 194,5062, 15, 961,3870,1241,1192,2664, 66,5215,3260,2111,1295,1127,2152,
3805,4135, 901,1164,1976, 398,1278, 530,1460, 748, 904,1054,1966,1426, 53,2909,
509, 523,2279,1534, 536,1019, 239,1685, 460,2353, 673,1065,2401,3600,4298,2272,
1272,2363, 284,1753,3679,4064,1695, 81, 815,2677,2757,2731,1386, 859, 500,4221,
2190,2566, 757,1006,2519,2068,1166,1455, 337,2654,3203,1863,1682,1914,3025,1252,
1409,1366, 847, 714,2834,2038,3209, 964,2970,1901, 885,2553,1078,1756,3049, 301,
1572,3326, 688,2130,1996,2429,1805,1648,2930,3421,2750,3652,3088, 262,1158,1254,
389,1641,1812, 526,1719, 923,2073,1073,1902, 468, 489,4625,1140, 857,2375,3070,
3319,2863, 380, 116,1328,2693,1161,2244, 273,1212,1884,2769,3011,1775,1142, 461,
3066,1200,2147,2212, 790, 702,2695,4222,1601,1058, 434,2338,5153,3640, 67,2360,
4099,2502, 618,3472,1329, 416,1132, 830,2782,1807,2653,3211,3510,1662, 192,2124,
296,3979,1739,1611,3684, 23, 118, 324, 446,1239,1225, 293,2520,3814,3795,2535,
3116, 17,1074, 467,2692,2201, 387,2922, 45,1326,3055,1645,3659,2817, 958, 243,
1903,2320,1339,2825,1784,3289, 356, 576, 865,2315,2381,3377,3916,1088,3122,1713,
1655, 935, 628,4689,1034,1327, 441, 800, 720, 894,1979,2183,1528,5289,2702,1071,
4046,3572,2399,1571,3281, 79, 761,1103, 327, 134, 758,1899,1371,1615, 879, 442,
215,2605,2579, 173,2048,2485,1057,2975,3317,1097,2253,3801,4263,1403,1650,2946,
814,4968,3487,1548,2644,1567,1285, 2, 295,2636, 97, 946,3576, 832, 141,4257,
3273, 760,3821,3521,3156,2607, 949,1024,1733,1516,1803,1920,2125,2283,2665,3180,
1501,2064,3560,2171,1592, 803,3518,1416, 732,3897,4258,1363,1362,2458, 119,1427,
602,1525,2608,1605,1639,3175, 694,3064, 10, 465, 76,2000,4846,4208, 444,3781,
1619,3353,2206,1273,3796, 740,2483, 320,1723,2377,3660,2619,1359,1137,1762,1724,
2345,2842,1850,1862, 912, 821,1866, 612,2625,1735,2573,3369,1093, 844, 89, 937,
930,1424,3564,2413,2972,1004,3046,3019,2011, 711,3171,1452,4178, 428, 801,1943,
432, 445,2811, 206,4136,1472, 730, 349, 73, 397,2802,2547, 998,1637,1167, 789,
396,3217, 154,1218, 716,1120,1780,2819,4826,1931,3334,3762,2139,1215,2627, 552,
3664,3628,3232,1405,2383,3111,1356,2652,3577,3320,3101,1703, 640,1045,1370,1246,
4996, 371,1575,2436,1621,2210, 984,4033,1734,2638, 16,4529, 663,2755,3255,1451,
3917,2257,1253,1955,2234,1263,2951, 214,1229, 617, 485, 359,1831,1969, 473,2310,
750,2058, 165, 80,2864,2419, 361,4344,2416,2479,1134, 796,3726,1266,2943, 860,
2715, 938, 390,2734,1313,1384, 248, 202, 877,1064,2854, 522,3907, 279,1602, 297,
2357, 395,3740, 137,2075, 944,4089,2584,1267,3802, 62,1533,2285, 178, 176, 780,
2440, 201,3707, 590, 478,1560,4354,2117,1075, 30, 74,4643,4004,1635,1441,2745,
776,2596, 238,1077,1692,1912,2844, 605, 499,1742,3947, 241,3053, 980,1749, 936,
2640,4511,2582, 515,1543,2162,5322,2892,2993, 890,2148,1924, 665,1827,3581,1032,
968,3163, 339,1044,1896, 270, 583,1791,1720,4367,1194,3488,3669, 43,2523,1657,
163,2167, 290,1209,1622,3378, 550, 634,2508,2510, 695,2634,2384,2512,1476,1414,
220,1469,2341,2138,2852,3183,2900,4939,2865,3502,1211,3680, 854,3227,1299,2976,
3172, 186,2998,1459, 443,1067,3251,1495, 321,1932,3054, 909, 753,1410,1828, 436,
2441,1119,1587,3164,2186,1258, 227, 231,1425,1890,3200,3942, 247, 959, 725,5254,
2741, 577,2158,2079, 929, 120, 174, 838,2813, 591,1115, 417,2024, 40,3240,1536,
1037, 291,4151,2354, 632,1298,2406,2500,3535,1825,1846,3451, 205,1171, 345,4238,
18,1163, 811, 685,2208,1217, 425,1312,1508,1175,4308,2552,1033, 587,1381,3059,
2984,3482, 340,1316,4023,3972, 792,3176, 519, 777,4690, 918, 933,4130,2981,3741,
90,3360,2911,2200,5184,4550, 609,3079,2030, 272,3379,2736, 363,3881,1130,1447,
286, 779, 357,1169,3350,3137,1630,1220,2687,2391, 747,1277,3688,2618,2682,2601,
1156,3196,5290,4034,3102,1689,3596,3128, 874, 219,2783, 798, 508,1843,2461, 269,
1658,1776,1392,1913,2983,3287,2866,2159,2372, 829,4076, 46,4253,2873,1889,1894,
915,1834,1631,2181,2318, 298, 664,2818,3555,2735, 954,3228,3117, 527,3511,2173,
681,2712,3033,2247,2346,3467,1652, 155,2164,3382, 113,1994, 450, 899, 494, 994,
1237,2958,1875,2336,1926,3727, 545,1577,1550, 633,3473, 204,1305,3072,2410,1956,
2471, 707,2134, 841,2195,2196,2663,3843,1026,4940, 990,3252,4997, 368,1092, 437,
3212,3258,1933,1829, 675,2977,2893, 412, 943,3723,4644,3294,3283,2230,2373,5154,
2389,2241,2661,2323,1404,2524, 593, 787, 677,3008,1275,2059, 438,2709,2609,2240,
2269,2246,1446, 36,1568,1373,3892,1574,2301,1456,3962, 693,2276,5216,2035,1143,
2720,1919,1797,1811,2763,4137,2597,1830,1699,1488,1198,2090, 424,1694, 312,3634,
3390,4179,3335,2252,1214, 561,1059,3243,2295,2561, 975,5155,2321,2751,3772, 472,
1537,3282,3398,1047,2077,2348,2878,1323,3340,3076, 690,2906, 51, 369, 170,3541,
1060,2187,2688,3670,2541,1083,1683, 928,3918, 459, 109,4427, 599,3744,4286, 143,
2101,2730,2490, 82,1588,3036,2121, 281,1860, 477,4035,1238,2812,3020,2716,3312,
1530,2188,2055,1317, 843, 636,1808,1173,3495, 649, 181,1002, 147,3641,1159,2414,
3750,2289,2795, 813,3123,2610,1136,4368, 5,3391,4541,2174, 420, 429,1728, 754,
1228,2115,2219, 347,2223,2733, 735,1518,3003,2355,3134,1764,3948,3329,1888,2424,
1001,1234,1972,3321,3363,1672,1021,1450,1584, 226, 765, 655,2526,3404,3244,2302,
3665, 731, 594,2184, 319,1576, 621, 658,2656,4299,2099,3864,1279,2071,2598,2739,
795,3086,3699,3908,1707,2352,2402,1382,3136,2475,1465,4847,3496,3865,1085,3004,
2591,1084, 213,2287,1963,3565,2250, 822, 793,4574,3187,1772,1789,3050, 595,1484,
1959,2770,1080,2650, 456, 422,2996, 940,3322,4328,4345,3092,2742, 965,2784, 739,
4124, 952,1358,2498,2949,2565, 332,2698,2378, 660,2260,2473,4194,3856,2919, 535,
1260,2651,1208,1428,1300,1949,1303,2942, 433,2455,2450,1251,1946, 614,1269, 641,
1306,1810,2737,3078,2912, 564,2365,1419,1415,1497,4460,2367,2185,1379,3005,1307,
3218,2175,1897,3063, 682,1157,4040,4005,1712,1160,1941,1399, 394, 402,2952,1573,
1151,2986,2404, 862, 299,2033,1489,3006, 346, 171,2886,3401,1726,2932, 168,2533,
47,2507,1030,3735,1145,3370,1395,1318,1579,3609,4560,2857,4116,1457,2529,1965,
504,1036,2690,2988,2405, 745,5871, 849,2397,2056,3081, 863,2359,3857,2096, 99,
1397,1769,2300,4428,1643,3455,1978,1757,3718,1440, 35,4879,3742,1296,4228,2280,
160,5063,1599,2013, 166, 520,3479,1646,3345,3012, 490,1937,1545,1264,2182,2505,
1096,1188,1369,1436,2421,1667,2792,2460,1270,2122, 727,3167,2143, 806,1706,1012,
1800,3037, 960,2218,1882, 805, 139,2456,1139,1521, 851,1052,3093,3089, 342,2039,
744,5097,1468,1502,1585,2087, 223, 939, 326,2140,2577, 892,2481,1623,4077, 982,
3708, 135,2131, 87,2503,3114,2326,1106, 876,1616, 547,2997,2831,2093,3441,4530,
4314, 9,3256,4229,4148, 659,1462,1986,1710,2046,2913,2231,4090,4880,5255,3392,
3274,1368,3689,4645,1477, 705,3384,3635,1068,1529,2941,1458,3782,1509, 100,1656,
2548, 718,2339, 408,1590,2780,3548,1838,4117,3719,1345,3530, 717,3442,2778,3220,
2898,1892,4590,3614,3371,2043,1998,1224,3483, 891, 635, 584,2559,3355, 733,1766,
1729,1172,3789,1891,2307, 781,2982,2271,1957,1580,5773,2633,2005,4195,3097,1535,
3213,1189,1934,5693,3262, 586,3118,1324,1598, 517,1564,2217,1868,1893,4445,3728,
2703,3139,1526,1787,1992,3882,2875,1549,1199,1056,2224,1904,2711,5098,4287, 338,
1993,3129,3489,2689,1809,2815,1997, 957,1855,3898,2550,3275,3057,1105,1319, 627,
1505,1911,1883,3526, 698,3629,3456,1833,1431, 746, 77,1261,2017,2296,1977,1885,
125,1334,1600, 525,1798,1109,2222,1470,1945, 559,2236,1186,3443,2476,1929,1411,
2411,3135,1777,3372,2621,1841,1613,3229, 668,1430,1839,2643,2916, 195,1989,2671,
2358,1387, 629,3205,2293,5256,4439, 123,1310, 888,1879,4300,3021,3605,1003,1162,
3192,2910,2010, 140,2395,2859, 55,1082,2012,2901, 662, 419,2081,1438, 680,2774,
4654,3912,1620,1731,1625,5035,4065,2328, 512,1344, 802,5443,2163,2311,2537, 524,
3399, 98,1155,2103,1918,2606,3925,2816,1393,2465,1504,3773,2177,3963,1478,4346,
180,1113,4655,3461,2028,1698, 833,2696,1235,1322,1594,4408,3623,3013,3225,2040,
3022, 541,2881, 607,3632,2029,1665,1219, 639,1385,1686,1099,2803,3231,1938,3188,
2858, 427, 676,2772,1168,2025, 454,3253,2486,3556, 230,1950, 580, 791,1991,1280,
1086,1974,2034, 630, 257,3338,2788,4903,1017, 86,4790, 966,2789,1995,1696,1131,
259,3095,4188,1308, 179,1463,5257, 289,4107,1248, 42,3413,1725,2288, 896,1947,
774,4474,4254, 604,3430,4264, 392,2514,2588, 452, 237,1408,3018, 988,4531,1970,
3034,3310, 540,2370,1562,1288,2990, 502,4765,1147, 4,1853,2708, 207, 294,2814,
4078,2902,2509, 684, 34,3105,3532,2551, 644, 709,2801,2344, 573,1727,3573,3557,
2021,1081,3100,4315,2100,3681, 199,2263,1837,2385, 146,3484,1195,2776,3949, 997,
1939,3973,1008,1091,1202,1962,1847,1149,4209,5444,1076, 493, 117,5400,2521, 972,
1490,2934,1796,4542,2374,1512,2933,2657, 413,2888,1135,2762,2314,2156,1355,2369,
766,2007,2527,2170,3124,2491,2593,2632,4757,2437, 234,3125,3591,1898,1750,1376,
1942,3468,3138, 570,2127,2145,3276,4131, 962, 132,1445,4196, 19, 941,3624,3480,
3366,1973,1374,4461,3431,2629, 283,2415,2275, 808,2887,3620,2112,2563,1353,3610,
955,1089,3103,1053, 96, 88,4097, 823,3808,1583, 399, 292,4091,3313, 421,1128,
642,4006, 903,2539,1877,2082, 596, 29,4066,1790, 722,2157, 130, 995,1569, 769,
1485, 464, 513,2213, 288,1923,1101,2453,4316, 133, 486,2445, 50, 625, 487,2207,
57, 423, 481,2962, 159,3729,1558, 491, 303, 482, 501, 240,2837, 112,3648,2392,
1783, 362, 8,3433,3422, 610,2793,3277,1390,1284,1654, 21,3823, 734, 367, 623,
193, 287, 374,1009,1483, 816, 476, 313,2255,2340,1262,2150,2899,1146,2581, 782,
2116,1659,2018,1880, 255,3586,3314,1110,2867,2137,2564, 986,2767,5185,2006, 650,
158, 926, 762, 881,3157,2717,2362,3587, 306,3690,3245,1542,3077,2427,1691,2478,
2118,2985,3490,2438, 539,2305, 983, 129,1754, 355,4201,2386, 827,2923, 104,1773,
2838,2771, 411,2905,3919, 376, 767, 122,1114, 828,2422,1817,3506, 266,3460,1007,
1609,4998, 945,2612,4429,2274, 726,1247,1964,2914,2199,2070,4002,4108, 657,3323,
1422, 579, 455,2764,4737,1222,2895,1670, 824,1223,1487,2525, 558, 861,3080, 598,
2659,2515,1967, 752,2583,2376,2214,4180, 977, 704,2464,4999,2622,4109,1210,2961,
819,1541, 142,2284, 44, 418, 457,1126,3730,4347,4626,1644,1876,3671,1864, 302,
1063,5694, 624, 723,1984,3745,1314,1676,2488,1610,1449,3558,3569,2166,2098, 409,
1011,2325,3704,2306, 818,1732,1383,1824,1844,3757, 999,2705,3497,1216,1423,2683,
2426,2954,2501,2726,2229,1475,2554,5064,1971,1794,1666,2014,1343, 783, 724, 191,
2434,1354,2220,5065,1763,2752,2472,4152, 131, 175,2885,3434, 92,1466,4920,2616,
3871,3872,3866, 128,1551,1632, 669,1854,3682,4691,4125,1230, 188,2973,3290,1302,
1213, 560,3266, 917, 763,3909,3249,1760, 868,1958, 764,1782,2097, 145,2277,3774,
4462, 64,1491,3062, 971,2132,3606,2442, 221,1226,1617, 218, 323,1185,3207,3147,
571, 619,1473,1005,1744,2281, 449,1887,2396,3685, 275, 375,3816,1743,3844,3731,
845,1983,2350,4210,1377, 773, 967,3499,3052,3743,2725,4007,1697,1022,3943,1464,
3264,2855,2722,1952,1029,2839,2467, 84,4383,2215, 820,1391,2015,2448,3672, 377,
1948,2168, 797,2545,3536,2578,2645, 94,2874,1678, 405,1259,3071, 771, 546,1315,
470,1243,3083, 895,2468, 981, 969,2037, 846,4181, 653,1276,2928, 14,2594, 557,
3007,2474, 156, 902,1338,1740,2574, 537,2518, 973,2282,2216,2433,1928, 138,2903,
1293,2631,1612, 646,3457, 839,2935, 111, 496,2191,2847, 589,3186, 149,3994,2060,
4031,2641,4067,3145,1870, 37,3597,2136,1025,2051,3009,3383,3549,1121,1016,3261,
1301, 251,2446,2599,2153, 872,3246, 637, 334,3705, 831, 884, 921,3065,3140,4092,
2198,1944, 246,2964, 108,2045,1152,1921,2308,1031, 203,3173,4170,1907,3890, 810,
1401,2003,1690, 506, 647,1242,2828,1761,1649,3208,2249,1589,3709,2931,5156,1708,
498, 666,2613, 834,3817,1231, 184,2851,1124, 883,3197,2261,3710,1765,1553,2658,
1178,2639,2351, 93,1193, 942,2538,2141,4402, 235,1821, 870,1591,2192,1709,1871,
3341,1618,4126,2595,2334, 603, 651, 69, 701, 268,2662,3411,2555,1380,1606, 503,
448, 254,2371,2646, 574,1187,2309,1770, 322,2235,1292,1801, 305, 566,1133, 229,
2067,2057, 706, 167, 483,2002,2672,3295,1820,3561,3067, 316, 378,2746,3452,1112,
136,1981, 507,1651,2917,1117, 285,4591, 182,2580,3522,1304, 335,3303,1835,2504,
1795,1792,2248, 674,1018,2106,2449,1857,2292,2845, 976,3047,1781,2600,2727,1389,
1281, 52,3152, 153, 265,3950, 672,3485,3951,4463, 430,1183, 365, 278,2169, 27,
1407,1336,2304, 209,1340,1730,2202,1852,2403,2883, 979,1737,1062, 631,2829,2542,
3876,2592, 825,2086,2226,3048,3625, 352,1417,3724, 542, 991, 431,1351,3938,1861,
2294, 826,1361,2927,3142,3503,1738, 463,2462,2723, 582,1916,1595,2808, 400,3845,
3891,2868,3621,2254, 58,2492,1123, 910,2160,2614,1372,1603,1196,1072,3385,1700,
3267,1980, 696, 480,2430, 920, 799,1570,2920,1951,2041,4047,2540,1321,4223,2469,
3562,2228,1271,2602, 401,2833,3351,2575,5157, 907,2312,1256, 410, 263,3507,1582,
996, 678,1849,2316,1480, 908,3545,2237, 703,2322, 667,1826,2849,1531,2604,2999,
2407,3146,2151,2630,1786,3711, 469,3542, 497,3899,2409, 858, 837,4446,3393,1274,
786, 620,1845,2001,3311, 484, 308,3367,1204,1815,3691,2332,1532,2557,1842,2020,
2724,1927,2333,4440, 567, 22,1673,2728,4475,1987,1858,1144,1597, 101,1832,3601,
12, 974,3783,4391, 951,1412, 1,3720, 453,4608,4041, 528,1041,1027,3230,2628,
1129, 875,1051,3291,1203,2262,1069,2860,2799,2149,2615,3278, 144,1758,3040, 31,
475,1680, 366,2685,3184, 311,1642,4008,2466,5036,1593,1493,2809, 216,1420,1668,
233, 304,2128,3284, 232,1429,1768,1040,2008,3407,2740,2967,2543, 242,2133, 778,
1565,2022,2620, 505,2189,2756,1098,2273, 372,1614, 708, 553,2846,2094,2278, 169,
3626,2835,4161, 228,2674,3165, 809,1454,1309, 466,1705,1095, 900,3423, 880,2667,
3751,5258,2317,3109,2571,4317,2766,1503,1342, 866,4447,1118, 63,2076, 314,1881,
1348,1061, 172, 978,3515,1747, 532, 511,3970, 6, 601, 905,2699,3300,1751, 276,
1467,3725,2668, 65,4239,2544,2779,2556,1604, 578,2451,1802, 992,2331,2624,1320,
3446, 713,1513,1013, 103,2786,2447,1661, 886,1702, 916, 654,3574,2031,1556, 751,
2178,2821,2179,1498,1538,2176, 271, 914,2251,2080,1325, 638,1953,2937,3877,2432,
2754, 95,3265,1716, 260,1227,4083, 775, 106,1357,3254, 426,1607, 555,2480, 772,
1985, 244,2546, 474, 495,1046,2611,1851,2061, 71,2089,1675,2590, 742,3758,2843,
3222,1433, 267,2180,2576,2826,2233,2092,3913,2435, 956,1745,3075, 856,2113,1116,
451, 3,1988,2896,1398, 993,2463,1878,2049,1341,2718,2721,2870,2108, 712,2904,
4363,2753,2324, 277,2872,2349,2649, 384, 987, 435, 691,3000, 922, 164,3939, 652,
1500,1184,4153,2482,3373,2165,4848,2335,3775,3508,3154,2806,2830,1554,2102,1664,
2530,1434,2408, 893,1547,2623,3447,2832,2242,2532,3169,2856,3223,2078, 49,3770,
3469, 462, 318, 656,2259,3250,3069, 679,1629,2758, 344,1138,1104,3120,1836,1283,
3115,2154,1437,4448, 934, 759,1999, 794,2862,1038, 533,2560,1722,2342, 855,2626,
1197,1663,4476,3127, 85,4240,2528, 25,1111,1181,3673, 407,3470,4561,2679,2713,
768,1925,2841,3986,1544,1165, 932, 373,1240,2146,1930,2673, 721,4766, 354,4333,
391,2963, 187, 61,3364,1442,1102, 330,1940,1767, 341,3809,4118, 393,2496,2062,
2211, 105, 331, 300, 439, 913,1332, 626, 379,3304,1557, 328, 689,3952, 309,1555,
931, 317,2517,3027, 325, 569, 686,2107,3084, 60,1042,1333,2794, 264,3177,4014,
1628, 258,3712, 7,4464,1176,1043,1778, 683, 114,1975, 78,1492, 383,1886, 510,
386, 645,5291,2891,2069,3305,4138,3867,2939,2603,2493,1935,1066,1848,3588,1015,
1282,1289,4609, 697,1453,3044,2666,3611,1856,2412, 54, 719,1330, 568,3778,2459,
1748, 788, 492, 551,1191,1000, 488,3394,3763, 282,1799, 348,2016,1523,3155,2390,
1049, 382,2019,1788,1170, 729,2968,3523, 897,3926,2785,2938,3292, 350,2319,3238,
1718,1717,2655,3453,3143,4465, 161,2889,2980,2009,1421, 56,1908,1640,2387,2232,
1917,1874,2477,4921, 148, 83,3438, 592,4245,2882,1822,1055, 741, 115,1496,1624,
381,1638,4592,1020, 516,3214, 458, 947,4575,1432, 211,1514,2926,1865,2142, 189,
852,1221,1400,1486, 882,2299,4036, 351, 28,1122, 700,6479,6480,6481,6482,6483, # last 512
#Everything below is of no interest for detection purpose
5508,6484,3900,3414,3974,4441,4024,3537,4037,5628,5099,3633,6485,3148,6486,3636,
5509,3257,5510,5973,5445,5872,4941,4403,3174,4627,5873,6276,2286,4230,5446,5874,
5122,6102,6103,4162,5447,5123,5323,4849,6277,3980,3851,5066,4246,5774,5067,6278,
3001,2807,5695,3346,5775,5974,5158,5448,6487,5975,5976,5776,3598,6279,5696,4806,
4211,4154,6280,6488,6489,6490,6281,4212,5037,3374,4171,6491,4562,4807,4722,4827,
5977,6104,4532,4079,5159,5324,5160,4404,3858,5359,5875,3975,4288,4610,3486,4512,
5325,3893,5360,6282,6283,5560,2522,4231,5978,5186,5449,2569,3878,6284,5401,3578,
4415,6285,4656,5124,5979,2506,4247,4449,3219,3417,4334,4969,4329,6492,4576,4828,
4172,4416,4829,5402,6286,3927,3852,5361,4369,4830,4477,4867,5876,4173,6493,6105,
4657,6287,6106,5877,5450,6494,4155,4868,5451,3700,5629,4384,6288,6289,5878,3189,
4881,6107,6290,6495,4513,6496,4692,4515,4723,5100,3356,6497,6291,3810,4080,5561,
3570,4430,5980,6498,4355,5697,6499,4724,6108,6109,3764,4050,5038,5879,4093,3226,
6292,5068,5217,4693,3342,5630,3504,4831,4377,4466,4309,5698,4431,5777,6293,5778,
4272,3706,6110,5326,3752,4676,5327,4273,5403,4767,5631,6500,5699,5880,3475,5039,
6294,5562,5125,4348,4301,4482,4068,5126,4593,5700,3380,3462,5981,5563,3824,5404,
4970,5511,3825,4738,6295,6501,5452,4516,6111,5881,5564,6502,6296,5982,6503,4213,
4163,3454,6504,6112,4009,4450,6113,4658,6297,6114,3035,6505,6115,3995,4904,4739,
4563,4942,4110,5040,3661,3928,5362,3674,6506,5292,3612,4791,5565,4149,5983,5328,
5259,5021,4725,4577,4564,4517,4364,6298,5405,4578,5260,4594,4156,4157,5453,3592,
3491,6507,5127,5512,4709,4922,5984,5701,4726,4289,6508,4015,6116,5128,4628,3424,
4241,5779,6299,4905,6509,6510,5454,5702,5780,6300,4365,4923,3971,6511,5161,3270,
3158,5985,4100, 867,5129,5703,6117,5363,3695,3301,5513,4467,6118,6512,5455,4232,
4242,4629,6513,3959,4478,6514,5514,5329,5986,4850,5162,5566,3846,4694,6119,5456,
4869,5781,3779,6301,5704,5987,5515,4710,6302,5882,6120,4392,5364,5705,6515,6121,
6516,6517,3736,5988,5457,5989,4695,2457,5883,4551,5782,6303,6304,6305,5130,4971,
6122,5163,6123,4870,3263,5365,3150,4871,6518,6306,5783,5069,5706,3513,3498,4409,
5330,5632,5366,5458,5459,3991,5990,4502,3324,5991,5784,3696,4518,5633,4119,6519,
4630,5634,4417,5707,4832,5992,3418,6124,5993,5567,4768,5218,6520,4595,3458,5367,
6125,5635,6126,4202,6521,4740,4924,6307,3981,4069,4385,6308,3883,2675,4051,3834,
4302,4483,5568,5994,4972,4101,5368,6309,5164,5884,3922,6127,6522,6523,5261,5460,
5187,4164,5219,3538,5516,4111,3524,5995,6310,6311,5369,3181,3386,2484,5188,3464,
5569,3627,5708,6524,5406,5165,4677,4492,6312,4872,4851,5885,4468,5996,6313,5709,
5710,6128,2470,5886,6314,5293,4882,5785,3325,5461,5101,6129,5711,5786,6525,4906,
6526,6527,4418,5887,5712,4808,2907,3701,5713,5888,6528,3765,5636,5331,6529,6530,
3593,5889,3637,4943,3692,5714,5787,4925,6315,6130,5462,4405,6131,6132,6316,5262,
6531,6532,5715,3859,5716,5070,4696,5102,3929,5788,3987,4792,5997,6533,6534,3920,
4809,5000,5998,6535,2974,5370,6317,5189,5263,5717,3826,6536,3953,5001,4883,3190,
5463,5890,4973,5999,4741,6133,6134,3607,5570,6000,4711,3362,3630,4552,5041,6318,
6001,2950,2953,5637,4646,5371,4944,6002,2044,4120,3429,6319,6537,5103,4833,6538,
6539,4884,4647,3884,6003,6004,4758,3835,5220,5789,4565,5407,6540,6135,5294,4697,
4852,6320,6321,3206,4907,6541,6322,4945,6542,6136,6543,6323,6005,4631,3519,6544,
5891,6545,5464,3784,5221,6546,5571,4659,6547,6324,6137,5190,6548,3853,6549,4016,
4834,3954,6138,5332,3827,4017,3210,3546,4469,5408,5718,3505,4648,5790,5131,5638,
5791,5465,4727,4318,6325,6326,5792,4553,4010,4698,3439,4974,3638,4335,3085,6006,
5104,5042,5166,5892,5572,6327,4356,4519,5222,5573,5333,5793,5043,6550,5639,5071,
4503,6328,6139,6551,6140,3914,3901,5372,6007,5640,4728,4793,3976,3836,4885,6552,
4127,6553,4451,4102,5002,6554,3686,5105,6555,5191,5072,5295,4611,5794,5296,6556,
5893,5264,5894,4975,5466,5265,4699,4976,4370,4056,3492,5044,4886,6557,5795,4432,
4769,4357,5467,3940,4660,4290,6141,4484,4770,4661,3992,6329,4025,4662,5022,4632,
4835,4070,5297,4663,4596,5574,5132,5409,5895,6142,4504,5192,4664,5796,5896,3885,
5575,5797,5023,4810,5798,3732,5223,4712,5298,4084,5334,5468,6143,4052,4053,4336,
4977,4794,6558,5335,4908,5576,5224,4233,5024,4128,5469,5225,4873,6008,5045,4729,
4742,4633,3675,4597,6559,5897,5133,5577,5003,5641,5719,6330,6560,3017,2382,3854,
4406,4811,6331,4393,3964,4946,6561,2420,3722,6562,4926,4378,3247,1736,4442,6332,
5134,6333,5226,3996,2918,5470,4319,4003,4598,4743,4744,4485,3785,3902,5167,5004,
5373,4394,5898,6144,4874,1793,3997,6334,4085,4214,5106,5642,4909,5799,6009,4419,
4189,3330,5899,4165,4420,5299,5720,5227,3347,6145,4081,6335,2876,3930,6146,3293,
3786,3910,3998,5900,5300,5578,2840,6563,5901,5579,6147,3531,5374,6564,6565,5580,
4759,5375,6566,6148,3559,5643,6336,6010,5517,6337,6338,5721,5902,3873,6011,6339,
6567,5518,3868,3649,5722,6568,4771,4947,6569,6149,4812,6570,2853,5471,6340,6341,
5644,4795,6342,6012,5723,6343,5724,6013,4349,6344,3160,6150,5193,4599,4514,4493,
5168,4320,6345,4927,3666,4745,5169,5903,5005,4928,6346,5725,6014,4730,4203,5046,
4948,3395,5170,6015,4150,6016,5726,5519,6347,5047,3550,6151,6348,4197,4310,5904,
6571,5581,2965,6152,4978,3960,4291,5135,6572,5301,5727,4129,4026,5905,4853,5728,
5472,6153,6349,4533,2700,4505,5336,4678,3583,5073,2994,4486,3043,4554,5520,6350,
6017,5800,4487,6351,3931,4103,5376,6352,4011,4321,4311,4190,5136,6018,3988,3233,
4350,5906,5645,4198,6573,5107,3432,4191,3435,5582,6574,4139,5410,6353,5411,3944,
5583,5074,3198,6575,6354,4358,6576,5302,4600,5584,5194,5412,6577,6578,5585,5413,
5303,4248,5414,3879,4433,6579,4479,5025,4854,5415,6355,4760,4772,3683,2978,4700,
3797,4452,3965,3932,3721,4910,5801,6580,5195,3551,5907,3221,3471,3029,6019,3999,
5908,5909,5266,5267,3444,3023,3828,3170,4796,5646,4979,4259,6356,5647,5337,3694,
6357,5648,5338,4520,4322,5802,3031,3759,4071,6020,5586,4836,4386,5048,6581,3571,
4679,4174,4949,6154,4813,3787,3402,3822,3958,3215,3552,5268,4387,3933,4950,4359,
6021,5910,5075,3579,6358,4234,4566,5521,6359,3613,5049,6022,5911,3375,3702,3178,
4911,5339,4521,6582,6583,4395,3087,3811,5377,6023,6360,6155,4027,5171,5649,4421,
4249,2804,6584,2270,6585,4000,4235,3045,6156,5137,5729,4140,4312,3886,6361,4330,
6157,4215,6158,3500,3676,4929,4331,3713,4930,5912,4265,3776,3368,5587,4470,4855,
3038,4980,3631,6159,6160,4132,4680,6161,6362,3923,4379,5588,4255,6586,4121,6587,
6363,4649,6364,3288,4773,4774,6162,6024,6365,3543,6588,4274,3107,3737,5050,5803,
4797,4522,5589,5051,5730,3714,4887,5378,4001,4523,6163,5026,5522,4701,4175,2791,
3760,6589,5473,4224,4133,3847,4814,4815,4775,3259,5416,6590,2738,6164,6025,5304,
3733,5076,5650,4816,5590,6591,6165,6592,3934,5269,6593,3396,5340,6594,5804,3445,
3602,4042,4488,5731,5732,3525,5591,4601,5196,6166,6026,5172,3642,4612,3202,4506,
4798,6366,3818,5108,4303,5138,5139,4776,3332,4304,2915,3415,4434,5077,5109,4856,
2879,5305,4817,6595,5913,3104,3144,3903,4634,5341,3133,5110,5651,5805,6167,4057,
5592,2945,4371,5593,6596,3474,4182,6367,6597,6168,4507,4279,6598,2822,6599,4777,
4713,5594,3829,6169,3887,5417,6170,3653,5474,6368,4216,2971,5228,3790,4579,6369,
5733,6600,6601,4951,4746,4555,6602,5418,5475,6027,3400,4665,5806,6171,4799,6028,
5052,6172,3343,4800,4747,5006,6370,4556,4217,5476,4396,5229,5379,5477,3839,5914,
5652,5807,4714,3068,4635,5808,6173,5342,4192,5078,5419,5523,5734,6174,4557,6175,
4602,6371,6176,6603,5809,6372,5735,4260,3869,5111,5230,6029,5112,6177,3126,4681,
5524,5915,2706,3563,4748,3130,6178,4018,5525,6604,6605,5478,4012,4837,6606,4534,
4193,5810,4857,3615,5479,6030,4082,3697,3539,4086,5270,3662,4508,4931,5916,4912,
5811,5027,3888,6607,4397,3527,3302,3798,2775,2921,2637,3966,4122,4388,4028,4054,
1633,4858,5079,3024,5007,3982,3412,5736,6608,3426,3236,5595,3030,6179,3427,3336,
3279,3110,6373,3874,3039,5080,5917,5140,4489,3119,6374,5812,3405,4494,6031,4666,
4141,6180,4166,6032,5813,4981,6609,5081,4422,4982,4112,3915,5653,3296,3983,6375,
4266,4410,5654,6610,6181,3436,5082,6611,5380,6033,3819,5596,4535,5231,5306,5113,
6612,4952,5918,4275,3113,6613,6376,6182,6183,5814,3073,4731,4838,5008,3831,6614,
4888,3090,3848,4280,5526,5232,3014,5655,5009,5737,5420,5527,6615,5815,5343,5173,
5381,4818,6616,3151,4953,6617,5738,2796,3204,4360,2989,4281,5739,5174,5421,5197,
3132,5141,3849,5142,5528,5083,3799,3904,4839,5480,2880,4495,3448,6377,6184,5271,
5919,3771,3193,6034,6035,5920,5010,6036,5597,6037,6378,6038,3106,5422,6618,5423,
5424,4142,6619,4889,5084,4890,4313,5740,6620,3437,5175,5307,5816,4199,5198,5529,
5817,5199,5656,4913,5028,5344,3850,6185,2955,5272,5011,5818,4567,4580,5029,5921,
3616,5233,6621,6622,6186,4176,6039,6379,6380,3352,5200,5273,2908,5598,5234,3837,
5308,6623,6624,5819,4496,4323,5309,5201,6625,6626,4983,3194,3838,4167,5530,5922,
5274,6381,6382,3860,3861,5599,3333,4292,4509,6383,3553,5481,5820,5531,4778,6187,
3955,3956,4324,4389,4218,3945,4325,3397,2681,5923,4779,5085,4019,5482,4891,5382,
5383,6040,4682,3425,5275,4094,6627,5310,3015,5483,5657,4398,5924,3168,4819,6628,
5925,6629,5532,4932,4613,6041,6630,4636,6384,4780,4204,5658,4423,5821,3989,4683,
5822,6385,4954,6631,5345,6188,5425,5012,5384,3894,6386,4490,4104,6632,5741,5053,
6633,5823,5926,5659,5660,5927,6634,5235,5742,5824,4840,4933,4820,6387,4859,5928,
4955,6388,4143,3584,5825,5346,5013,6635,5661,6389,5014,5484,5743,4337,5176,5662,
6390,2836,6391,3268,6392,6636,6042,5236,6637,4158,6638,5744,5663,4471,5347,3663,
4123,5143,4293,3895,6639,6640,5311,5929,5826,3800,6189,6393,6190,5664,5348,3554,
3594,4749,4603,6641,5385,4801,6043,5827,4183,6642,5312,5426,4761,6394,5665,6191,
4715,2669,6643,6644,5533,3185,5427,5086,5930,5931,5386,6192,6044,6645,4781,4013,
5745,4282,4435,5534,4390,4267,6045,5746,4984,6046,2743,6193,3501,4087,5485,5932,
5428,4184,4095,5747,4061,5054,3058,3862,5933,5600,6646,5144,3618,6395,3131,5055,
5313,6396,4650,4956,3855,6194,3896,5202,4985,4029,4225,6195,6647,5828,5486,5829,
3589,3002,6648,6397,4782,5276,6649,6196,6650,4105,3803,4043,5237,5830,6398,4096,
3643,6399,3528,6651,4453,3315,4637,6652,3984,6197,5535,3182,3339,6653,3096,2660,
6400,6654,3449,5934,4250,4236,6047,6401,5831,6655,5487,3753,4062,5832,6198,6199,
6656,3766,6657,3403,4667,6048,6658,4338,2897,5833,3880,2797,3780,4326,6659,5748,
5015,6660,5387,4351,5601,4411,6661,3654,4424,5935,4339,4072,5277,4568,5536,6402,
6662,5238,6663,5349,5203,6200,5204,6201,5145,4536,5016,5056,4762,5834,4399,4957,
6202,6403,5666,5749,6664,4340,6665,5936,5177,5667,6666,6667,3459,4668,6404,6668,
6669,4543,6203,6670,4276,6405,4480,5537,6671,4614,5205,5668,6672,3348,2193,4763,
6406,6204,5937,5602,4177,5669,3419,6673,4020,6205,4443,4569,5388,3715,3639,6407,
6049,4058,6206,6674,5938,4544,6050,4185,4294,4841,4651,4615,5488,6207,6408,6051,
5178,3241,3509,5835,6208,4958,5836,4341,5489,5278,6209,2823,5538,5350,5206,5429,
6675,4638,4875,4073,3516,4684,4914,4860,5939,5603,5389,6052,5057,3237,5490,3791,
6676,6409,6677,4821,4915,4106,5351,5058,4243,5539,4244,5604,4842,4916,5239,3028,
3716,5837,5114,5605,5390,5940,5430,6210,4332,6678,5540,4732,3667,3840,6053,4305,
3408,5670,5541,6410,2744,5240,5750,6679,3234,5606,6680,5607,5671,3608,4283,4159,
4400,5352,4783,6681,6411,6682,4491,4802,6211,6412,5941,6413,6414,5542,5751,6683,
4669,3734,5942,6684,6415,5943,5059,3328,4670,4144,4268,6685,6686,6687,6688,4372,
3603,6689,5944,5491,4373,3440,6416,5543,4784,4822,5608,3792,4616,5838,5672,3514,
5391,6417,4892,6690,4639,6691,6054,5673,5839,6055,6692,6056,5392,6212,4038,5544,
5674,4497,6057,6693,5840,4284,5675,4021,4545,5609,6418,4454,6419,6213,4113,4472,
5314,3738,5087,5279,4074,5610,4959,4063,3179,4750,6058,6420,6214,3476,4498,4716,
5431,4960,4685,6215,5241,6694,6421,6216,6695,5841,5945,6422,3748,5946,5179,3905,
5752,5545,5947,4374,6217,4455,6423,4412,6218,4803,5353,6696,3832,5280,6219,4327,
4702,6220,6221,6059,4652,5432,6424,3749,4751,6425,5753,4986,5393,4917,5948,5030,
5754,4861,4733,6426,4703,6697,6222,4671,5949,4546,4961,5180,6223,5031,3316,5281,
6698,4862,4295,4934,5207,3644,6427,5842,5950,6428,6429,4570,5843,5282,6430,6224,
5088,3239,6060,6699,5844,5755,6061,6431,2701,5546,6432,5115,5676,4039,3993,3327,
4752,4425,5315,6433,3941,6434,5677,4617,4604,3074,4581,6225,5433,6435,6226,6062,
4823,5756,5116,6227,3717,5678,4717,5845,6436,5679,5846,6063,5847,6064,3977,3354,
6437,3863,5117,6228,5547,5394,4499,4524,6229,4605,6230,4306,4500,6700,5951,6065,
3693,5952,5089,4366,4918,6701,6231,5548,6232,6702,6438,4704,5434,6703,6704,5953,
4168,6705,5680,3420,6706,5242,4407,6066,3812,5757,5090,5954,4672,4525,3481,5681,
4618,5395,5354,5316,5955,6439,4962,6707,4526,6440,3465,4673,6067,6441,5682,6708,
5435,5492,5758,5683,4619,4571,4674,4804,4893,4686,5493,4753,6233,6068,4269,6442,
6234,5032,4705,5146,5243,5208,5848,6235,6443,4963,5033,4640,4226,6236,5849,3387,
6444,6445,4436,4437,5850,4843,5494,4785,4894,6709,4361,6710,5091,5956,3331,6237,
4987,5549,6069,6711,4342,3517,4473,5317,6070,6712,6071,4706,6446,5017,5355,6713,
6714,4988,5436,6447,4734,5759,6715,4735,4547,4456,4754,6448,5851,6449,6450,3547,
5852,5318,6451,6452,5092,4205,6716,6238,4620,4219,5611,6239,6072,4481,5760,5957,
5958,4059,6240,6453,4227,4537,6241,5761,4030,4186,5244,5209,3761,4457,4876,3337,
5495,5181,6242,5959,5319,5612,5684,5853,3493,5854,6073,4169,5613,5147,4895,6074,
5210,6717,5182,6718,3830,6243,2798,3841,6075,6244,5855,5614,3604,4606,5496,5685,
5118,5356,6719,6454,5960,5357,5961,6720,4145,3935,4621,5119,5962,4261,6721,6455,
4786,5963,4375,4582,6245,6246,6247,6076,5437,4877,5856,3376,4380,6248,4160,6722,
5148,6456,5211,6457,6723,4718,6458,6724,6249,5358,4044,3297,6459,6250,5857,5615,
5497,5245,6460,5498,6725,6251,6252,5550,3793,5499,2959,5396,6461,6462,4572,5093,
5500,5964,3806,4146,6463,4426,5762,5858,6077,6253,4755,3967,4220,5965,6254,4989,
5501,6464,4352,6726,6078,4764,2290,5246,3906,5438,5283,3767,4964,2861,5763,5094,
6255,6256,4622,5616,5859,5860,4707,6727,4285,4708,4824,5617,6257,5551,4787,5212,
4965,4935,4687,6465,6728,6466,5686,6079,3494,4413,2995,5247,5966,5618,6729,5967,
5764,5765,5687,5502,6730,6731,6080,5397,6467,4990,6258,6732,4538,5060,5619,6733,
4719,5688,5439,5018,5149,5284,5503,6734,6081,4607,6259,5120,3645,5861,4583,6260,
4584,4675,5620,4098,5440,6261,4863,2379,3306,4585,5552,5689,4586,5285,6735,4864,
6736,5286,6082,6737,4623,3010,4788,4381,4558,5621,4587,4896,3698,3161,5248,4353,
4045,6262,3754,5183,4588,6738,6263,6739,6740,5622,3936,6741,6468,6742,6264,5095,
6469,4991,5968,6743,4992,6744,6083,4897,6745,4256,5766,4307,3108,3968,4444,5287,
3889,4343,6084,4510,6085,4559,6086,4898,5969,6746,5623,5061,4919,5249,5250,5504,
5441,6265,5320,4878,3242,5862,5251,3428,6087,6747,4237,5624,5442,6266,5553,4539,
6748,2585,3533,5398,4262,6088,5150,4736,4438,6089,6267,5505,4966,6749,6268,6750,
6269,5288,5554,3650,6090,6091,4624,6092,5690,6751,5863,4270,5691,4277,5555,5864,
6752,5692,4720,4865,6470,5151,4688,4825,6753,3094,6754,6471,3235,4653,6755,5213,
5399,6756,3201,4589,5865,4967,6472,5866,6473,5019,3016,6757,5321,4756,3957,4573,
6093,4993,5767,4721,6474,6758,5625,6759,4458,6475,6270,6760,5556,4994,5214,5252,
6271,3875,5768,6094,5034,5506,4376,5769,6761,2120,6476,5253,5770,6762,5771,5970,
3990,5971,5557,5558,5772,6477,6095,2787,4641,5972,5121,6096,6097,6272,6763,3703,
5867,5507,6273,4206,6274,4789,6098,6764,3619,3646,3833,3804,2394,3788,4936,3978,
4866,4899,6099,6100,5559,6478,6765,3599,5868,6101,5869,5870,6275,6766,4527,6767)
# flake8: noqa
| mit |
thatguystone/vault | vault/util.py | 1 | 1230 | import logging
import os
import sys
import subprocess
import time
_suffixes = [
[("k", "kb"), 0],
[("m", "mb"), 0],
[("g", "gb"), 0],
[("t", "tb"), 0],
]
log = logging.getLogger(__name__)
for i, s in enumerate(_suffixes):
if i == 0:
s[1] = 1024
else:
s[1] = _suffixes[i-1][1] * 1024
def human_size(size):
if isinstance(size, str):
size = size.lower()
for s in _suffixes:
for e in s[0]:
if size.endswith(e):
size = size[:-len(e)]
return abs(int(size)) * s[1]
return int(size)
def to_bytes(obj):
if obj and isinstance(obj, str):
obj = obj.encode("utf-8")
return obj
def _proc_name(args):
if len(args) == 0:
return "<unknown>"
return args[0]
def run(*args, stdin=None, cwd=None):
start = time.monotonic()
try:
p = subprocess.Popen(args,
stdin=sys.stdin if not stdin else subprocess.PIPE,
stdout=subprocess.PIPE,
stderr=subprocess.STDOUT,
close_fds=True,
cwd=cwd)
out = p.communicate(input=to_bytes(stdin))[0].decode("utf-8")
finally:
log.debug("exec (took %fs): %s", time.monotonic() - start, args)
if p.returncode != 0:
raise RuntimeError("failed to execute {proc}, process said: {out}".format(
proc=_proc_name(args),
out=out.strip()))
return out
| mit |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.