repo_name
stringlengths 5
92
| path
stringlengths 4
232
| copies
stringclasses 19
values | size
stringlengths 4
7
| content
stringlengths 721
1.04M
| license
stringclasses 15
values | hash
int64 -9,223,277,421,539,062,000
9,223,102,107B
| line_mean
float64 6.51
99.9
| line_max
int64 15
997
| alpha_frac
float64 0.25
0.97
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
edx/django-pyfs | djpyfs/djpyfs.py | 1 | 5932 | """
This is a thin veneer around a `pyfilesystem`. It adds a few bits of
functionality:
1) Django configuration. This can go to Amazon S3 or a static
filesystem.
2) The ability to get URLs for objects stored on the filesystem.
3) The ability to create objects with a limited lifetime. A
task can garbage-collect those objects.
"""
import os
import os.path
import types
from boto.s3.connection import S3Connection
from django.conf import settings
from fs.osfs import OSFS
from fs_s3fs import S3FS
from .models import FSExpirations
if hasattr(settings, 'DJFS'):
DJFS_SETTINGS = settings.DJFS # pragma: no cover
else:
DJFS_SETTINGS = {'type': 'osfs',
'directory_root': 'django-pyfs/static/django-pyfs',
'url_root': '/static/django-pyfs'}
# Global to hold the active S3 connection. Prevents needing to reconnect
# several times in a request. Connections are set up below in `get_s3_url`.
S3CONN = None
def get_filesystem(namespace):
"""
Returns a patched pyfilesystem for static module storage based on
`DJFS_SETTINGS`. See `patch_fs` documentation for additional details.
The file system will have two additional properties:
1) get_url: A way to get a URL for a static file download
2) expire: A way to expire files (so they are automatically destroyed)
"""
if DJFS_SETTINGS['type'] == 'osfs':
return get_osfs(namespace)
elif DJFS_SETTINGS['type'] == 's3fs':
return get_s3fs(namespace)
else:
raise AttributeError("Bad filesystem: " + str(DJFS_SETTINGS['type']))
def expire_objects():
"""
Remove all obsolete objects from the file systems.
"""
objects = sorted(FSExpirations.expired(), key=lambda x: x.module)
fs = None
module = None
for o in objects:
if module != o.module:
module = o.module
fs = get_filesystem(module)
if fs.exists(o.filename):
fs.remove(o.filename)
o.delete()
def patch_fs(fs, namespace, url_method):
"""
Patch a filesystem instance to add the `get_url` and `expire` methods.
Arguments:
fs (obj): The pyfilesystem subclass instance to be patched.
namespace (str): Namespace of the filesystem, used in `expire`
url_method (func): Function to patch into the filesyste instance as
`get_url`. Allows filesystem independent implementation.
Returns:
obj: Patched filesystem instance
"""
def expire(self, filename, seconds, days=0, expires=True): # pylint: disable=unused-argument
"""
Set the lifespan of a file on the filesystem.
Arguments:
filename (str): Name of file
expires (bool): False means the file will never be removed seconds
and days give time to expiration.
seconds (int): (optional) how many seconds to keep the file around
days (int): (optional) how many days to keep the file around for.
If both days and seconds are given they will be added
together. So `seconds=86400, days=1` would expire the file
in 2 days.
Returns:
None
"""
FSExpirations.create_expiration(namespace, filename, seconds, days=days, expires=expires)
fs.expire = types.MethodType(expire, fs)
fs.get_url = types.MethodType(url_method, fs)
return fs
def get_osfs(namespace):
"""
Helper method to get_filesystem for a file system on disk
"""
full_path = os.path.join(DJFS_SETTINGS['directory_root'], namespace)
if not os.path.exists(full_path):
os.makedirs(full_path)
osfs = OSFS(full_path)
osfs = patch_fs(
osfs,
namespace,
# This is the OSFS implementation of `get_url`, note that it ignores
# the timeout param so all OSFS file urls have no time limits.
lambda self, filename, timeout=0: os.path.join(DJFS_SETTINGS['url_root'], namespace, filename)
)
return osfs
def get_s3fs(namespace):
"""
Helper method to get_filesystem for a file system on S3
"""
key_id = DJFS_SETTINGS.get('aws_access_key_id', None)
key_secret = DJFS_SETTINGS.get('aws_secret_access_key', None)
fullpath = namespace
if 'prefix' in DJFS_SETTINGS:
fullpath = os.path.join(DJFS_SETTINGS['prefix'], fullpath)
s3fs = S3FS(DJFS_SETTINGS['bucket'], fullpath, aws_secret_access_key=key_id, aws_access_key_id=key_secret)
def get_s3_url(self, filename, timeout=60): # pylint: disable=unused-argument
"""
Patch method to returns a signed S3 url for the given filename
Note that this will return a url whether or not the requested file
exsits.
Arguments:
self (obj): S3FS instance that this function has been patched onto
filename (str): The name of the file we are retrieving a url for
timeout (int): How long the url should be valid for; S3 enforces
this limit
Returns:
str: A signed url to the requested file in S3
"""
global S3CONN
try:
if not S3CONN:
S3CONN = S3Connection(aws_access_key_id=key_id, aws_secret_access_key=key_secret)
return S3CONN.generate_url(
timeout, 'GET', bucket=DJFS_SETTINGS['bucket'], key=os.path.join(fullpath, filename)
)
except Exception: # pylint: disable=broad-except
# Retry on error; typically, if the connection has timed out, but
# the broad except covers all errors.
S3CONN = S3Connection(aws_access_key_id=key_id, aws_secret_access_key=key_secret)
return S3CONN.generate_url(
timeout, 'GET', bucket=DJFS_SETTINGS['bucket'], key=os.path.join(fullpath, filename)
)
s3fs = patch_fs(s3fs, namespace, get_s3_url)
return s3fs
| apache-2.0 | -5,370,170,221,802,633,000 | 34.100592 | 110 | 0.639751 | false |
lmregus/Portfolio | python/design_patterns/env/lib/python3.7/site-packages/more_itertools/tests/test_more.py | 1 | 83464 | from collections import OrderedDict
from decimal import Decimal
from doctest import DocTestSuite
from fractions import Fraction
from functools import partial, reduce
from heapq import merge
from io import StringIO
from itertools import (
accumulate,
chain,
count,
groupby,
islice,
permutations,
product,
repeat,
)
from operator import add, mul, itemgetter
from time import sleep
from unittest import TestCase
import more_itertools as mi
def load_tests(loader, tests, ignore):
# Add the doctests
tests.addTests(DocTestSuite('more_itertools.more'))
return tests
class CollateTests(TestCase):
"""Unit tests for ``collate()``"""
# Also accidentally tests peekable, though that could use its own tests
def test_default(self):
"""Test with the default `key` function."""
iterables = [range(4), range(7), range(3, 6)]
self.assertEqual(
sorted(reduce(list.__add__, [list(it) for it in iterables])),
list(mi.collate(*iterables))
)
def test_key(self):
"""Test using a custom `key` function."""
iterables = [range(5, 0, -1), range(4, 0, -1)]
actual = sorted(
reduce(list.__add__, [list(it) for it in iterables]), reverse=True
)
expected = list(mi.collate(*iterables, key=lambda x: -x))
self.assertEqual(actual, expected)
def test_empty(self):
"""Be nice if passed an empty list of iterables."""
self.assertEqual([], list(mi.collate()))
def test_one(self):
"""Work when only 1 iterable is passed."""
self.assertEqual([0, 1], list(mi.collate(range(2))))
def test_reverse(self):
"""Test the `reverse` kwarg."""
iterables = [range(4, 0, -1), range(7, 0, -1), range(3, 6, -1)]
actual = sorted(
reduce(list.__add__, [list(it) for it in iterables]), reverse=True
)
expected = list(mi.collate(*iterables, reverse=True))
self.assertEqual(actual, expected)
def test_alias(self):
self.assertNotEqual(merge.__doc__, mi.collate.__doc__)
self.assertNotEqual(partial.__doc__, mi.collate.__doc__)
class ChunkedTests(TestCase):
"""Tests for ``chunked()``"""
def test_even(self):
"""Test when ``n`` divides evenly into the length of the iterable."""
self.assertEqual(
list(mi.chunked('ABCDEF', 3)), [['A', 'B', 'C'], ['D', 'E', 'F']]
)
def test_odd(self):
"""Test when ``n`` does not divide evenly into the length of the
iterable.
"""
self.assertEqual(
list(mi.chunked('ABCDE', 3)), [['A', 'B', 'C'], ['D', 'E']]
)
class FirstTests(TestCase):
"""Tests for ``first()``"""
def test_many(self):
"""Test that it works on many-item iterables."""
# Also try it on a generator expression to make sure it works on
# whatever those return, across Python versions.
self.assertEqual(mi.first(x for x in range(4)), 0)
def test_one(self):
"""Test that it doesn't raise StopIteration prematurely."""
self.assertEqual(mi.first([3]), 3)
def test_empty_stop_iteration(self):
"""It should raise StopIteration for empty iterables."""
self.assertRaises(ValueError, lambda: mi.first([]))
def test_default(self):
"""It should return the provided default arg for empty iterables."""
self.assertEqual(mi.first([], 'boo'), 'boo')
class IterOnlyRange:
"""User-defined iterable class which only support __iter__.
>>> r = IterOnlyRange(5)
>>> r[0]
AttributeError: IterOnlyRange instance has no attribute '__getitem__'
Note: In Python 3, ``TypeError`` will be raised because ``object`` is
inherited implicitly by default.
>>> r[0]
TypeError: 'IterOnlyRange' object does not support indexing
"""
def __init__(self, n):
"""Set the length of the range."""
self.n = n
def __iter__(self):
"""Works same as range()."""
return iter(range(self.n))
class LastTests(TestCase):
"""Tests for ``last()``"""
def test_many_nonsliceable(self):
"""Test that it works on many-item non-slice-able iterables."""
# Also try it on a generator expression to make sure it works on
# whatever those return, across Python versions.
self.assertEqual(mi.last(x for x in range(4)), 3)
def test_one_nonsliceable(self):
"""Test that it doesn't raise StopIteration prematurely."""
self.assertEqual(mi.last(x for x in range(1)), 0)
def test_empty_stop_iteration_nonsliceable(self):
"""It should raise ValueError for empty non-slice-able iterables."""
self.assertRaises(ValueError, lambda: mi.last(x for x in range(0)))
def test_default_nonsliceable(self):
"""It should return the provided default arg for empty non-slice-able
iterables.
"""
self.assertEqual(mi.last((x for x in range(0)), 'boo'), 'boo')
def test_many_sliceable(self):
"""Test that it works on many-item slice-able iterables."""
self.assertEqual(mi.last([0, 1, 2, 3]), 3)
def test_one_sliceable(self):
"""Test that it doesn't raise StopIteration prematurely."""
self.assertEqual(mi.last([3]), 3)
def test_empty_stop_iteration_sliceable(self):
"""It should raise ValueError for empty slice-able iterables."""
self.assertRaises(ValueError, lambda: mi.last([]))
def test_default_sliceable(self):
"""It should return the provided default arg for empty slice-able
iterables.
"""
self.assertEqual(mi.last([], 'boo'), 'boo')
def test_dict(self):
"""last(dic) and last(dic.keys()) should return same result."""
dic = {'a': 1, 'b': 2, 'c': 3}
self.assertEqual(mi.last(dic), mi.last(dic.keys()))
def test_ordereddict(self):
"""last(dic) should return the last key."""
od = OrderedDict()
od['a'] = 1
od['b'] = 2
od['c'] = 3
self.assertEqual(mi.last(od), 'c')
def test_customrange(self):
"""It should work on custom class where [] raises AttributeError."""
self.assertEqual(mi.last(IterOnlyRange(5)), 4)
class PeekableTests(TestCase):
"""Tests for ``peekable()`` behavor not incidentally covered by testing
``collate()``
"""
def test_peek_default(self):
"""Make sure passing a default into ``peek()`` works."""
p = mi.peekable([])
self.assertEqual(p.peek(7), 7)
def test_truthiness(self):
"""Make sure a ``peekable`` tests true iff there are items remaining in
the iterable.
"""
p = mi.peekable([])
self.assertFalse(p)
p = mi.peekable(range(3))
self.assertTrue(p)
def test_simple_peeking(self):
"""Make sure ``next`` and ``peek`` advance and don't advance the
iterator, respectively.
"""
p = mi.peekable(range(10))
self.assertEqual(next(p), 0)
self.assertEqual(p.peek(), 1)
self.assertEqual(next(p), 1)
def test_indexing(self):
"""
Indexing into the peekable shouldn't advance the iterator.
"""
p = mi.peekable('abcdefghijkl')
# The 0th index is what ``next()`` will return
self.assertEqual(p[0], 'a')
self.assertEqual(next(p), 'a')
# Indexing further into the peekable shouldn't advance the itertor
self.assertEqual(p[2], 'd')
self.assertEqual(next(p), 'b')
# The 0th index moves up with the iterator; the last index follows
self.assertEqual(p[0], 'c')
self.assertEqual(p[9], 'l')
self.assertEqual(next(p), 'c')
self.assertEqual(p[8], 'l')
# Negative indexing should work too
self.assertEqual(p[-2], 'k')
self.assertEqual(p[-9], 'd')
self.assertRaises(IndexError, lambda: p[-10])
def test_slicing(self):
"""Slicing the peekable shouldn't advance the iterator."""
seq = list('abcdefghijkl')
p = mi.peekable(seq)
# Slicing the peekable should just be like slicing a re-iterable
self.assertEqual(p[1:4], seq[1:4])
# Advancing the iterator moves the slices up also
self.assertEqual(next(p), 'a')
self.assertEqual(p[1:4], seq[1:][1:4])
# Implicit starts and stop should work
self.assertEqual(p[:5], seq[1:][:5])
self.assertEqual(p[:], seq[1:][:])
# Indexing past the end should work
self.assertEqual(p[:100], seq[1:][:100])
# Steps should work, including negative
self.assertEqual(p[::2], seq[1:][::2])
self.assertEqual(p[::-1], seq[1:][::-1])
def test_slicing_reset(self):
"""Test slicing on a fresh iterable each time"""
iterable = ['0', '1', '2', '3', '4', '5']
indexes = list(range(-4, len(iterable) + 4)) + [None]
steps = [1, 2, 3, 4, -1, -2, -3, 4]
for slice_args in product(indexes, indexes, steps):
it = iter(iterable)
p = mi.peekable(it)
next(p)
index = slice(*slice_args)
actual = p[index]
expected = iterable[1:][index]
self.assertEqual(actual, expected, slice_args)
def test_slicing_error(self):
iterable = '01234567'
p = mi.peekable(iter(iterable))
# Prime the cache
p.peek()
old_cache = list(p._cache)
# Illegal slice
with self.assertRaises(ValueError):
p[1:-1:0]
# Neither the cache nor the iteration should be affected
self.assertEqual(old_cache, list(p._cache))
self.assertEqual(list(p), list(iterable))
def test_passthrough(self):
"""Iterating a peekable without using ``peek()`` or ``prepend()``
should just give the underlying iterable's elements (a trivial test but
useful to set a baseline in case something goes wrong)"""
expected = [1, 2, 3, 4, 5]
actual = list(mi.peekable(expected))
self.assertEqual(actual, expected)
# prepend() behavior tests
def test_prepend(self):
"""Tests intersperesed ``prepend()`` and ``next()`` calls"""
it = mi.peekable(range(2))
actual = []
# Test prepend() before next()
it.prepend(10)
actual += [next(it), next(it)]
# Test prepend() between next()s
it.prepend(11)
actual += [next(it), next(it)]
# Test prepend() after source iterable is consumed
it.prepend(12)
actual += [next(it)]
expected = [10, 0, 11, 1, 12]
self.assertEqual(actual, expected)
def test_multi_prepend(self):
"""Tests prepending multiple items and getting them in proper order"""
it = mi.peekable(range(5))
actual = [next(it), next(it)]
it.prepend(10, 11, 12)
it.prepend(20, 21)
actual += list(it)
expected = [0, 1, 20, 21, 10, 11, 12, 2, 3, 4]
self.assertEqual(actual, expected)
def test_empty(self):
"""Tests prepending in front of an empty iterable"""
it = mi.peekable([])
it.prepend(10)
actual = list(it)
expected = [10]
self.assertEqual(actual, expected)
def test_prepend_truthiness(self):
"""Tests that ``__bool__()`` or ``__nonzero__()`` works properly
with ``prepend()``"""
it = mi.peekable(range(5))
self.assertTrue(it)
actual = list(it)
self.assertFalse(it)
it.prepend(10)
self.assertTrue(it)
actual += [next(it)]
self.assertFalse(it)
expected = [0, 1, 2, 3, 4, 10]
self.assertEqual(actual, expected)
def test_multi_prepend_peek(self):
"""Tests prepending multiple elements and getting them in reverse order
while peeking"""
it = mi.peekable(range(5))
actual = [next(it), next(it)]
self.assertEqual(it.peek(), 2)
it.prepend(10, 11, 12)
self.assertEqual(it.peek(), 10)
it.prepend(20, 21)
self.assertEqual(it.peek(), 20)
actual += list(it)
self.assertFalse(it)
expected = [0, 1, 20, 21, 10, 11, 12, 2, 3, 4]
self.assertEqual(actual, expected)
def test_prepend_after_stop(self):
"""Test resuming iteration after a previous exhaustion"""
it = mi.peekable(range(3))
self.assertEqual(list(it), [0, 1, 2])
self.assertRaises(StopIteration, lambda: next(it))
it.prepend(10)
self.assertEqual(next(it), 10)
self.assertRaises(StopIteration, lambda: next(it))
def test_prepend_slicing(self):
"""Tests interaction between prepending and slicing"""
seq = list(range(20))
p = mi.peekable(seq)
p.prepend(30, 40, 50)
pseq = [30, 40, 50] + seq # pseq for prepended_seq
# adapt the specific tests from test_slicing
self.assertEqual(p[0], 30)
self.assertEqual(p[1:8], pseq[1:8])
self.assertEqual(p[1:], pseq[1:])
self.assertEqual(p[:5], pseq[:5])
self.assertEqual(p[:], pseq[:])
self.assertEqual(p[:100], pseq[:100])
self.assertEqual(p[::2], pseq[::2])
self.assertEqual(p[::-1], pseq[::-1])
def test_prepend_indexing(self):
"""Tests interaction between prepending and indexing"""
seq = list(range(20))
p = mi.peekable(seq)
p.prepend(30, 40, 50)
self.assertEqual(p[0], 30)
self.assertEqual(next(p), 30)
self.assertEqual(p[2], 0)
self.assertEqual(next(p), 40)
self.assertEqual(p[0], 50)
self.assertEqual(p[9], 8)
self.assertEqual(next(p), 50)
self.assertEqual(p[8], 8)
self.assertEqual(p[-2], 18)
self.assertEqual(p[-9], 11)
self.assertRaises(IndexError, lambda: p[-21])
def test_prepend_iterable(self):
"""Tests prepending from an iterable"""
it = mi.peekable(range(5))
# Don't directly use the range() object to avoid any range-specific
# optimizations
it.prepend(*(x for x in range(5)))
actual = list(it)
expected = list(chain(range(5), range(5)))
self.assertEqual(actual, expected)
def test_prepend_many(self):
"""Tests that prepending a huge number of elements works"""
it = mi.peekable(range(5))
# Don't directly use the range() object to avoid any range-specific
# optimizations
it.prepend(*(x for x in range(20000)))
actual = list(it)
expected = list(chain(range(20000), range(5)))
self.assertEqual(actual, expected)
def test_prepend_reversed(self):
"""Tests prepending from a reversed iterable"""
it = mi.peekable(range(3))
it.prepend(*reversed((10, 11, 12)))
actual = list(it)
expected = [12, 11, 10, 0, 1, 2]
self.assertEqual(actual, expected)
class ConsumerTests(TestCase):
"""Tests for ``consumer()``"""
def test_consumer(self):
@mi.consumer
def eater():
while True:
x = yield # noqa
e = eater()
e.send('hi') # without @consumer, would raise TypeError
class DistinctPermutationsTests(TestCase):
def test_distinct_permutations(self):
"""Make sure the output for ``distinct_permutations()`` is the same as
set(permutations(it)).
"""
iterable = ['z', 'a', 'a', 'q', 'q', 'q', 'y']
test_output = sorted(mi.distinct_permutations(iterable))
ref_output = sorted(set(permutations(iterable)))
self.assertEqual(test_output, ref_output)
def test_other_iterables(self):
"""Make sure ``distinct_permutations()`` accepts a different type of
iterables.
"""
# a generator
iterable = (c for c in ['z', 'a', 'a', 'q', 'q', 'q', 'y'])
test_output = sorted(mi.distinct_permutations(iterable))
# "reload" it
iterable = (c for c in ['z', 'a', 'a', 'q', 'q', 'q', 'y'])
ref_output = sorted(set(permutations(iterable)))
self.assertEqual(test_output, ref_output)
# an iterator
iterable = iter(['z', 'a', 'a', 'q', 'q', 'q', 'y'])
test_output = sorted(mi.distinct_permutations(iterable))
# "reload" it
iterable = iter(['z', 'a', 'a', 'q', 'q', 'q', 'y'])
ref_output = sorted(set(permutations(iterable)))
self.assertEqual(test_output, ref_output)
class IlenTests(TestCase):
def test_ilen(self):
"""Sanity-checks for ``ilen()``."""
# Non-empty
self.assertEqual(
mi.ilen(filter(lambda x: x % 10 == 0, range(101))), 11
)
# Empty
self.assertEqual(mi.ilen((x for x in range(0))), 0)
# Iterable with __len__
self.assertEqual(mi.ilen(list(range(6))), 6)
class WithIterTests(TestCase):
def test_with_iter(self):
s = StringIO('One fish\nTwo fish')
initial_words = [line.split()[0] for line in mi.with_iter(s)]
# Iterable's items should be faithfully represented
self.assertEqual(initial_words, ['One', 'Two'])
# The file object should be closed
self.assertTrue(s.closed)
class OneTests(TestCase):
def test_basic(self):
it = iter(['item'])
self.assertEqual(mi.one(it), 'item')
def test_too_short(self):
it = iter([])
self.assertRaises(ValueError, lambda: mi.one(it))
self.assertRaises(IndexError, lambda: mi.one(it, too_short=IndexError))
def test_too_long(self):
it = count()
self.assertRaises(ValueError, lambda: mi.one(it)) # burn 0 and 1
self.assertEqual(next(it), 2)
self.assertRaises(
OverflowError, lambda: mi.one(it, too_long=OverflowError)
)
class IntersperseTest(TestCase):
""" Tests for intersperse() """
def test_even(self):
iterable = (x for x in '01')
self.assertEqual(
list(mi.intersperse(None, iterable)), ['0', None, '1']
)
def test_odd(self):
iterable = (x for x in '012')
self.assertEqual(
list(mi.intersperse(None, iterable)), ['0', None, '1', None, '2']
)
def test_nested(self):
element = ('a', 'b')
iterable = (x for x in '012')
actual = list(mi.intersperse(element, iterable))
expected = ['0', ('a', 'b'), '1', ('a', 'b'), '2']
self.assertEqual(actual, expected)
def test_not_iterable(self):
self.assertRaises(TypeError, lambda: mi.intersperse('x', 1))
def test_n(self):
for n, element, expected in [
(1, '_', ['0', '_', '1', '_', '2', '_', '3', '_', '4', '_', '5']),
(2, '_', ['0', '1', '_', '2', '3', '_', '4', '5']),
(3, '_', ['0', '1', '2', '_', '3', '4', '5']),
(4, '_', ['0', '1', '2', '3', '_', '4', '5']),
(5, '_', ['0', '1', '2', '3', '4', '_', '5']),
(6, '_', ['0', '1', '2', '3', '4', '5']),
(7, '_', ['0', '1', '2', '3', '4', '5']),
(3, ['a', 'b'], ['0', '1', '2', ['a', 'b'], '3', '4', '5']),
]:
iterable = (x for x in '012345')
actual = list(mi.intersperse(element, iterable, n=n))
self.assertEqual(actual, expected)
def test_n_zero(self):
self.assertRaises(
ValueError, lambda: list(mi.intersperse('x', '012', n=0))
)
class UniqueToEachTests(TestCase):
"""Tests for ``unique_to_each()``"""
def test_all_unique(self):
"""When all the input iterables are unique the output should match
the input."""
iterables = [[1, 2], [3, 4, 5], [6, 7, 8]]
self.assertEqual(mi.unique_to_each(*iterables), iterables)
def test_duplicates(self):
"""When there are duplicates in any of the input iterables that aren't
in the rest, those duplicates should be emitted."""
iterables = ["mississippi", "missouri"]
self.assertEqual(
mi.unique_to_each(*iterables), [['p', 'p'], ['o', 'u', 'r']]
)
def test_mixed(self):
"""When the input iterables contain different types the function should
still behave properly"""
iterables = ['x', (i for i in range(3)), [1, 2, 3], tuple()]
self.assertEqual(mi.unique_to_each(*iterables), [['x'], [0], [3], []])
class WindowedTests(TestCase):
"""Tests for ``windowed()``"""
def test_basic(self):
actual = list(mi.windowed([1, 2, 3, 4, 5], 3))
expected = [(1, 2, 3), (2, 3, 4), (3, 4, 5)]
self.assertEqual(actual, expected)
def test_large_size(self):
"""
When the window size is larger than the iterable, and no fill value is
given,``None`` should be filled in.
"""
actual = list(mi.windowed([1, 2, 3, 4, 5], 6))
expected = [(1, 2, 3, 4, 5, None)]
self.assertEqual(actual, expected)
def test_fillvalue(self):
"""
When sizes don't match evenly, the given fill value should be used.
"""
iterable = [1, 2, 3, 4, 5]
for n, kwargs, expected in [
(6, {}, [(1, 2, 3, 4, 5, '!')]), # n > len(iterable)
(3, {'step': 3}, [(1, 2, 3), (4, 5, '!')]), # using ``step``
]:
actual = list(mi.windowed(iterable, n, fillvalue='!', **kwargs))
self.assertEqual(actual, expected)
def test_zero(self):
"""When the window size is zero, an empty tuple should be emitted."""
actual = list(mi.windowed([1, 2, 3, 4, 5], 0))
expected = [tuple()]
self.assertEqual(actual, expected)
def test_negative(self):
"""When the window size is negative, ValueError should be raised."""
with self.assertRaises(ValueError):
list(mi.windowed([1, 2, 3, 4, 5], -1))
def test_step(self):
"""The window should advance by the number of steps provided"""
iterable = [1, 2, 3, 4, 5, 6, 7]
for n, step, expected in [
(3, 2, [(1, 2, 3), (3, 4, 5), (5, 6, 7)]), # n > step
(3, 3, [(1, 2, 3), (4, 5, 6), (7, None, None)]), # n == step
(3, 4, [(1, 2, 3), (5, 6, 7)]), # line up nicely
(3, 5, [(1, 2, 3), (6, 7, None)]), # off by one
(3, 6, [(1, 2, 3), (7, None, None)]), # off by two
(3, 7, [(1, 2, 3)]), # step past the end
(7, 8, [(1, 2, 3, 4, 5, 6, 7)]), # step > len(iterable)
]:
actual = list(mi.windowed(iterable, n, step=step))
self.assertEqual(actual, expected)
# Step must be greater than or equal to 1
with self.assertRaises(ValueError):
list(mi.windowed(iterable, 3, step=0))
class SubstringsTests(TestCase):
def test_basic(self):
iterable = (x for x in range(4))
actual = list(mi.substrings(iterable))
expected = [
(0,),
(1,),
(2,),
(3,),
(0, 1),
(1, 2),
(2, 3),
(0, 1, 2),
(1, 2, 3),
(0, 1, 2, 3),
]
self.assertEqual(actual, expected)
def test_strings(self):
iterable = 'abc'
actual = list(mi.substrings(iterable))
expected = [
('a',), ('b',), ('c',), ('a', 'b'), ('b', 'c'), ('a', 'b', 'c')
]
self.assertEqual(actual, expected)
def test_empty(self):
iterable = iter([])
actual = list(mi.substrings(iterable))
expected = []
self.assertEqual(actual, expected)
def test_order(self):
iterable = [2, 0, 1]
actual = list(mi.substrings(iterable))
expected = [(2,), (0,), (1,), (2, 0), (0, 1), (2, 0, 1)]
self.assertEqual(actual, expected)
class SubstringsIndexesTests(TestCase):
def test_basic(self):
sequence = [x for x in range(4)]
actual = list(mi.substrings_indexes(sequence))
expected = [
([0], 0, 1),
([1], 1, 2),
([2], 2, 3),
([3], 3, 4),
([0, 1], 0, 2),
([1, 2], 1, 3),
([2, 3], 2, 4),
([0, 1, 2], 0, 3),
([1, 2, 3], 1, 4),
([0, 1, 2, 3], 0, 4),
]
self.assertEqual(actual, expected)
def test_strings(self):
sequence = 'abc'
actual = list(mi.substrings_indexes(sequence))
expected = [
('a', 0, 1),
('b', 1, 2),
('c', 2, 3),
('ab', 0, 2),
('bc', 1, 3),
('abc', 0, 3),
]
self.assertEqual(actual, expected)
def test_empty(self):
sequence = []
actual = list(mi.substrings_indexes(sequence))
expected = []
self.assertEqual(actual, expected)
def test_order(self):
sequence = [2, 0, 1]
actual = list(mi.substrings_indexes(sequence))
expected = [
([2], 0, 1),
([0], 1, 2),
([1], 2, 3),
([2, 0], 0, 2),
([0, 1], 1, 3),
([2, 0, 1], 0, 3),
]
self.assertEqual(actual, expected)
def test_reverse(self):
sequence = [2, 0, 1]
actual = list(mi.substrings_indexes(sequence, reverse=True))
expected = [
([2, 0, 1], 0, 3),
([2, 0], 0, 2),
([0, 1], 1, 3),
([2], 0, 1),
([0], 1, 2),
([1], 2, 3),
]
self.assertEqual(actual, expected)
class BucketTests(TestCase):
"""Tests for ``bucket()``"""
def test_basic(self):
iterable = [10, 20, 30, 11, 21, 31, 12, 22, 23, 33]
D = mi.bucket(iterable, key=lambda x: 10 * (x // 10))
# In-order access
self.assertEqual(list(D[10]), [10, 11, 12])
# Out of order access
self.assertEqual(list(D[30]), [30, 31, 33])
self.assertEqual(list(D[20]), [20, 21, 22, 23])
self.assertEqual(list(D[40]), []) # Nothing in here!
def test_in(self):
iterable = [10, 20, 30, 11, 21, 31, 12, 22, 23, 33]
D = mi.bucket(iterable, key=lambda x: 10 * (x // 10))
self.assertIn(10, D)
self.assertNotIn(40, D)
self.assertIn(20, D)
self.assertNotIn(21, D)
# Checking in-ness shouldn't advance the iterator
self.assertEqual(next(D[10]), 10)
def test_validator(self):
iterable = count(0)
key = lambda x: int(str(x)[0]) # First digit of each number
validator = lambda x: 0 < x < 10 # No leading zeros
D = mi.bucket(iterable, key, validator=validator)
self.assertEqual(mi.take(3, D[1]), [1, 10, 11])
self.assertNotIn(0, D) # Non-valid entries don't return True
self.assertNotIn(0, D._cache) # Don't store non-valid entries
self.assertEqual(list(D[0]), [])
class SpyTests(TestCase):
"""Tests for ``spy()``"""
def test_basic(self):
original_iterable = iter('abcdefg')
head, new_iterable = mi.spy(original_iterable)
self.assertEqual(head, ['a'])
self.assertEqual(
list(new_iterable), ['a', 'b', 'c', 'd', 'e', 'f', 'g']
)
def test_unpacking(self):
original_iterable = iter('abcdefg')
(first, second, third), new_iterable = mi.spy(original_iterable, 3)
self.assertEqual(first, 'a')
self.assertEqual(second, 'b')
self.assertEqual(third, 'c')
self.assertEqual(
list(new_iterable), ['a', 'b', 'c', 'd', 'e', 'f', 'g']
)
def test_too_many(self):
original_iterable = iter('abc')
head, new_iterable = mi.spy(original_iterable, 4)
self.assertEqual(head, ['a', 'b', 'c'])
self.assertEqual(list(new_iterable), ['a', 'b', 'c'])
def test_zero(self):
original_iterable = iter('abc')
head, new_iterable = mi.spy(original_iterable, 0)
self.assertEqual(head, [])
self.assertEqual(list(new_iterable), ['a', 'b', 'c'])
class InterleaveTests(TestCase):
def test_even(self):
actual = list(mi.interleave([1, 4, 7], [2, 5, 8], [3, 6, 9]))
expected = [1, 2, 3, 4, 5, 6, 7, 8, 9]
self.assertEqual(actual, expected)
def test_short(self):
actual = list(mi.interleave([1, 4], [2, 5, 7], [3, 6, 8]))
expected = [1, 2, 3, 4, 5, 6]
self.assertEqual(actual, expected)
def test_mixed_types(self):
it_list = ['a', 'b', 'c', 'd']
it_str = '12345'
it_inf = count()
actual = list(mi.interleave(it_list, it_str, it_inf))
expected = ['a', '1', 0, 'b', '2', 1, 'c', '3', 2, 'd', '4', 3]
self.assertEqual(actual, expected)
class InterleaveLongestTests(TestCase):
def test_even(self):
actual = list(mi.interleave_longest([1, 4, 7], [2, 5, 8], [3, 6, 9]))
expected = [1, 2, 3, 4, 5, 6, 7, 8, 9]
self.assertEqual(actual, expected)
def test_short(self):
actual = list(mi.interleave_longest([1, 4], [2, 5, 7], [3, 6, 8]))
expected = [1, 2, 3, 4, 5, 6, 7, 8]
self.assertEqual(actual, expected)
def test_mixed_types(self):
it_list = ['a', 'b', 'c', 'd']
it_str = '12345'
it_gen = (x for x in range(3))
actual = list(mi.interleave_longest(it_list, it_str, it_gen))
expected = ['a', '1', 0, 'b', '2', 1, 'c', '3', 2, 'd', '4', '5']
self.assertEqual(actual, expected)
class TestCollapse(TestCase):
"""Tests for ``collapse()``"""
def test_collapse(self):
l = [[1], 2, [[3], 4], [[[5]]]]
self.assertEqual(list(mi.collapse(l)), [1, 2, 3, 4, 5])
def test_collapse_to_string(self):
l = [["s1"], "s2", [["s3"], "s4"], [[["s5"]]]]
self.assertEqual(list(mi.collapse(l)), ["s1", "s2", "s3", "s4", "s5"])
def test_collapse_to_bytes(self):
l = [[b"s1"], b"s2", [[b"s3"], b"s4"], [[[b"s5"]]]]
self.assertEqual(
list(mi.collapse(l)), [b"s1", b"s2", b"s3", b"s4", b"s5"]
)
def test_collapse_flatten(self):
l = [[1], [2], [[3], 4], [[[5]]]]
self.assertEqual(list(mi.collapse(l, levels=1)), list(mi.flatten(l)))
def test_collapse_to_level(self):
l = [[1], 2, [[3], 4], [[[5]]]]
self.assertEqual(list(mi.collapse(l, levels=2)), [1, 2, 3, 4, [5]])
self.assertEqual(
list(mi.collapse(mi.collapse(l, levels=1), levels=1)),
list(mi.collapse(l, levels=2))
)
def test_collapse_to_list(self):
l = (1, [2], (3, [4, (5,)], 'ab'))
actual = list(mi.collapse(l, base_type=list))
expected = [1, [2], 3, [4, (5,)], 'ab']
self.assertEqual(actual, expected)
class SideEffectTests(TestCase):
"""Tests for ``side_effect()``"""
def test_individual(self):
# The function increments the counter for each call
counter = [0]
def func(arg):
counter[0] += 1
result = list(mi.side_effect(func, range(10)))
self.assertEqual(result, list(range(10)))
self.assertEqual(counter[0], 10)
def test_chunked(self):
# The function increments the counter for each call
counter = [0]
def func(arg):
counter[0] += 1
result = list(mi.side_effect(func, range(10), 2))
self.assertEqual(result, list(range(10)))
self.assertEqual(counter[0], 5)
def test_before_after(self):
f = StringIO()
collector = []
def func(item):
print(item, file=f)
collector.append(f.getvalue())
def it():
yield 'a'
yield 'b'
raise RuntimeError('kaboom')
before = lambda: print('HEADER', file=f)
after = f.close
try:
mi.consume(mi.side_effect(func, it(), before=before, after=after))
except RuntimeError:
pass
# The iterable should have been written to the file
self.assertEqual(collector, ['HEADER\na\n', 'HEADER\na\nb\n'])
# The file should be closed even though something bad happened
self.assertTrue(f.closed)
def test_before_fails(self):
f = StringIO()
func = lambda x: print(x, file=f)
def before():
raise RuntimeError('ouch')
try:
mi.consume(
mi.side_effect(func, 'abc', before=before, after=f.close)
)
except RuntimeError:
pass
# The file should be closed even though something bad happened in the
# before function
self.assertTrue(f.closed)
class SlicedTests(TestCase):
"""Tests for ``sliced()``"""
def test_even(self):
"""Test when the length of the sequence is divisible by *n*"""
seq = 'ABCDEFGHI'
self.assertEqual(list(mi.sliced(seq, 3)), ['ABC', 'DEF', 'GHI'])
def test_odd(self):
"""Test when the length of the sequence is not divisible by *n*"""
seq = 'ABCDEFGHI'
self.assertEqual(list(mi.sliced(seq, 4)), ['ABCD', 'EFGH', 'I'])
def test_not_sliceable(self):
seq = (x for x in 'ABCDEFGHI')
with self.assertRaises(TypeError):
list(mi.sliced(seq, 3))
class SplitAtTests(TestCase):
"""Tests for ``split()``"""
def comp_with_str_split(self, str_to_split, delim):
pred = lambda c: c == delim
actual = list(map(''.join, mi.split_at(str_to_split, pred)))
expected = str_to_split.split(delim)
self.assertEqual(actual, expected)
def test_seperators(self):
test_strs = ['', 'abcba', 'aaabbbcccddd', 'e']
for s, delim in product(test_strs, 'abcd'):
self.comp_with_str_split(s, delim)
class SplitBeforeTest(TestCase):
"""Tests for ``split_before()``"""
def test_starts_with_sep(self):
actual = list(mi.split_before('xooxoo', lambda c: c == 'x'))
expected = [['x', 'o', 'o'], ['x', 'o', 'o']]
self.assertEqual(actual, expected)
def test_ends_with_sep(self):
actual = list(mi.split_before('ooxoox', lambda c: c == 'x'))
expected = [['o', 'o'], ['x', 'o', 'o'], ['x']]
self.assertEqual(actual, expected)
def test_no_sep(self):
actual = list(mi.split_before('ooo', lambda c: c == 'x'))
expected = [['o', 'o', 'o']]
self.assertEqual(actual, expected)
class SplitAfterTest(TestCase):
"""Tests for ``split_after()``"""
def test_starts_with_sep(self):
actual = list(mi.split_after('xooxoo', lambda c: c == 'x'))
expected = [['x'], ['o', 'o', 'x'], ['o', 'o']]
self.assertEqual(actual, expected)
def test_ends_with_sep(self):
actual = list(mi.split_after('ooxoox', lambda c: c == 'x'))
expected = [['o', 'o', 'x'], ['o', 'o', 'x']]
self.assertEqual(actual, expected)
def test_no_sep(self):
actual = list(mi.split_after('ooo', lambda c: c == 'x'))
expected = [['o', 'o', 'o']]
self.assertEqual(actual, expected)
class SplitIntoTests(TestCase):
"""Tests for ``split_into()``"""
def test_iterable_just_right(self):
"""Size of ``iterable`` equals the sum of ``sizes``."""
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
sizes = [2, 3, 4]
expected = [[1, 2], [3, 4, 5], [6, 7, 8, 9]]
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
def test_iterable_too_small(self):
"""Size of ``iterable`` is smaller than sum of ``sizes``. Last return
list is shorter as a result."""
iterable = [1, 2, 3, 4, 5, 6, 7]
sizes = [2, 3, 4]
expected = [[1, 2], [3, 4, 5], [6, 7]]
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
def test_iterable_too_small_extra(self):
"""Size of ``iterable`` is smaller than sum of ``sizes``. Second last
return list is shorter and last return list is empty as a result."""
iterable = [1, 2, 3, 4, 5, 6, 7]
sizes = [2, 3, 4, 5]
expected = [[1, 2], [3, 4, 5], [6, 7], []]
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
def test_iterable_too_large(self):
"""Size of ``iterable`` is larger than sum of ``sizes``. Not all
items of iterable are returned."""
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
sizes = [2, 3, 2]
expected = [[1, 2], [3, 4, 5], [6, 7]]
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
def test_using_none_with_leftover(self):
"""Last item of ``sizes`` is None when items still remain in
``iterable``. Last list returned stretches to fit all remaining items
of ``iterable``."""
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
sizes = [2, 3, None]
expected = [[1, 2], [3, 4, 5], [6, 7, 8, 9]]
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
def test_using_none_without_leftover(self):
"""Last item of ``sizes`` is None when no items remain in
``iterable``. Last list returned is empty."""
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
sizes = [2, 3, 4, None]
expected = [[1, 2], [3, 4, 5], [6, 7, 8, 9], []]
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
def test_using_none_mid_sizes(self):
"""None is present in ``sizes`` but is not the last item. Last list
returned stretches to fit all remaining items of ``iterable`` but
all items in ``sizes`` after None are ignored."""
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
sizes = [2, 3, None, 4]
expected = [[1, 2], [3, 4, 5], [6, 7, 8, 9]]
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
def test_iterable_empty(self):
"""``iterable`` argument is empty but ``sizes`` is not. An empty
list is returned for each item in ``sizes``."""
iterable = []
sizes = [2, 4, 2]
expected = [[], [], []]
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
def test_iterable_empty_using_none(self):
"""``iterable`` argument is empty but ``sizes`` is not. An empty
list is returned for each item in ``sizes`` that is not after a
None item."""
iterable = []
sizes = [2, 4, None, 2]
expected = [[], [], []]
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
def test_sizes_empty(self):
"""``sizes`` argument is empty but ``iterable`` is not. An empty
generator is returned."""
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
sizes = []
expected = []
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
def test_both_empty(self):
"""Both ``sizes`` and ``iterable`` arguments are empty. An empty
generator is returned."""
iterable = []
sizes = []
expected = []
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
def test_bool_in_sizes(self):
"""A bool object is present in ``sizes`` is treated as a 1 or 0 for
``True`` or ``False`` due to bool being an instance of int."""
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
sizes = [3, True, 2, False]
expected = [[1, 2, 3], [4], [5, 6], []]
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
def test_invalid_in_sizes(self):
"""A ValueError is raised if an object in ``sizes`` is neither ``None``
or an integer."""
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
sizes = [1, [], 3]
with self.assertRaises(ValueError):
list(mi.split_into(iterable, sizes))
def test_invalid_in_sizes_after_none(self):
"""A item in ``sizes`` that is invalid will not raise a TypeError if it
comes after a ``None`` item."""
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
sizes = [3, 4, None, []]
expected = [[1, 2, 3], [4, 5, 6, 7], [8, 9]]
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
def test_generator_iterable_integrity(self):
"""Check that if ``iterable`` is an iterator, it is consumed only by as
many items as the sum of ``sizes``."""
iterable = (i for i in range(10))
sizes = [2, 3]
expected = [[0, 1], [2, 3, 4]]
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
iterable_expected = [5, 6, 7, 8, 9]
iterable_actual = list(iterable)
self.assertEqual(iterable_actual, iterable_expected)
def test_generator_sizes_integrity(self):
"""Check that if ``sizes`` is an iterator, it is consumed only until a
``None`` item is reached"""
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9]
sizes = (i for i in [1, 2, None, 3, 4])
expected = [[1], [2, 3], [4, 5, 6, 7, 8, 9]]
actual = list(mi.split_into(iterable, sizes))
self.assertEqual(actual, expected)
sizes_expected = [3, 4]
sizes_actual = list(sizes)
self.assertEqual(sizes_actual, sizes_expected)
class PaddedTest(TestCase):
"""Tests for ``padded()``"""
def test_no_n(self):
seq = [1, 2, 3]
# No fillvalue
self.assertEqual(mi.take(5, mi.padded(seq)), [1, 2, 3, None, None])
# With fillvalue
self.assertEqual(
mi.take(5, mi.padded(seq, fillvalue='')), [1, 2, 3, '', '']
)
def test_invalid_n(self):
self.assertRaises(ValueError, lambda: list(mi.padded([1, 2, 3], n=-1)))
self.assertRaises(ValueError, lambda: list(mi.padded([1, 2, 3], n=0)))
def test_valid_n(self):
seq = [1, 2, 3, 4, 5]
# No need for padding: len(seq) <= n
self.assertEqual(list(mi.padded(seq, n=4)), [1, 2, 3, 4, 5])
self.assertEqual(list(mi.padded(seq, n=5)), [1, 2, 3, 4, 5])
# No fillvalue
self.assertEqual(
list(mi.padded(seq, n=7)), [1, 2, 3, 4, 5, None, None]
)
# With fillvalue
self.assertEqual(
list(mi.padded(seq, fillvalue='', n=7)), [1, 2, 3, 4, 5, '', '']
)
def test_next_multiple(self):
seq = [1, 2, 3, 4, 5, 6]
# No need for padding: len(seq) % n == 0
self.assertEqual(
list(mi.padded(seq, n=3, next_multiple=True)), [1, 2, 3, 4, 5, 6]
)
# Padding needed: len(seq) < n
self.assertEqual(
list(mi.padded(seq, n=8, next_multiple=True)),
[1, 2, 3, 4, 5, 6, None, None]
)
# No padding needed: len(seq) == n
self.assertEqual(
list(mi.padded(seq, n=6, next_multiple=True)), [1, 2, 3, 4, 5, 6]
)
# Padding needed: len(seq) > n
self.assertEqual(
list(mi.padded(seq, n=4, next_multiple=True)),
[1, 2, 3, 4, 5, 6, None, None]
)
# With fillvalue
self.assertEqual(
list(mi.padded(seq, fillvalue='', n=4, next_multiple=True)),
[1, 2, 3, 4, 5, 6, '', '']
)
class DistributeTest(TestCase):
"""Tests for distribute()"""
def test_invalid_n(self):
self.assertRaises(ValueError, lambda: mi.distribute(-1, [1, 2, 3]))
self.assertRaises(ValueError, lambda: mi.distribute(0, [1, 2, 3]))
def test_basic(self):
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for n, expected in [
(1, [iterable]),
(2, [[1, 3, 5, 7, 9], [2, 4, 6, 8, 10]]),
(3, [[1, 4, 7, 10], [2, 5, 8], [3, 6, 9]]),
(10, [[n] for n in range(1, 10 + 1)]),
]:
self.assertEqual(
[list(x) for x in mi.distribute(n, iterable)], expected
)
def test_large_n(self):
iterable = [1, 2, 3, 4]
self.assertEqual(
[list(x) for x in mi.distribute(6, iterable)],
[[1], [2], [3], [4], [], []]
)
class StaggerTest(TestCase):
"""Tests for ``stagger()``"""
def test_default(self):
iterable = [0, 1, 2, 3]
actual = list(mi.stagger(iterable))
expected = [(None, 0, 1), (0, 1, 2), (1, 2, 3)]
self.assertEqual(actual, expected)
def test_offsets(self):
iterable = [0, 1, 2, 3]
for offsets, expected in [
((-2, 0, 2), [('', 0, 2), ('', 1, 3)]),
((-2, -1), [('', ''), ('', 0), (0, 1), (1, 2), (2, 3)]),
((1, 2), [(1, 2), (2, 3)]),
]:
all_groups = mi.stagger(iterable, offsets=offsets, fillvalue='')
self.assertEqual(list(all_groups), expected)
def test_longest(self):
iterable = [0, 1, 2, 3]
for offsets, expected in [
(
(-1, 0, 1),
[('', 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, ''), (3, '', '')]
),
((-2, -1), [('', ''), ('', 0), (0, 1), (1, 2), (2, 3), (3, '')]),
((1, 2), [(1, 2), (2, 3), (3, '')]),
]:
all_groups = mi.stagger(
iterable, offsets=offsets, fillvalue='', longest=True
)
self.assertEqual(list(all_groups), expected)
class ZipOffsetTest(TestCase):
"""Tests for ``zip_offset()``"""
def test_shortest(self):
a_1 = [0, 1, 2, 3]
a_2 = [0, 1, 2, 3, 4, 5]
a_3 = [0, 1, 2, 3, 4, 5, 6, 7]
actual = list(
mi.zip_offset(a_1, a_2, a_3, offsets=(-1, 0, 1), fillvalue='')
)
expected = [('', 0, 1), (0, 1, 2), (1, 2, 3), (2, 3, 4), (3, 4, 5)]
self.assertEqual(actual, expected)
def test_longest(self):
a_1 = [0, 1, 2, 3]
a_2 = [0, 1, 2, 3, 4, 5]
a_3 = [0, 1, 2, 3, 4, 5, 6, 7]
actual = list(
mi.zip_offset(a_1, a_2, a_3, offsets=(-1, 0, 1), longest=True)
)
expected = [
(None, 0, 1),
(0, 1, 2),
(1, 2, 3),
(2, 3, 4),
(3, 4, 5),
(None, 5, 6),
(None, None, 7),
]
self.assertEqual(actual, expected)
def test_mismatch(self):
iterables = [0, 1, 2], [2, 3, 4]
offsets = (-1, 0, 1)
self.assertRaises(
ValueError,
lambda: list(mi.zip_offset(*iterables, offsets=offsets))
)
class UnzipTests(TestCase):
"""Tests for unzip()"""
def test_empty_iterable(self):
self.assertEqual(list(mi.unzip([])), [])
# in reality zip([], [], []) is equivalent to iter([])
# but it doesn't hurt to test both
self.assertEqual(list(mi.unzip(zip([], [], []))), [])
def test_length_one_iterable(self):
xs, ys, zs = mi.unzip(zip([1], [2], [3]))
self.assertEqual(list(xs), [1])
self.assertEqual(list(ys), [2])
self.assertEqual(list(zs), [3])
def test_normal_case(self):
xs, ys, zs = range(10), range(1, 11), range(2, 12)
zipped = zip(xs, ys, zs)
xs, ys, zs = mi.unzip(zipped)
self.assertEqual(list(xs), list(range(10)))
self.assertEqual(list(ys), list(range(1, 11)))
self.assertEqual(list(zs), list(range(2, 12)))
def test_improperly_zipped(self):
zipped = iter([(1, 2, 3), (4, 5), (6,)])
xs, ys, zs = mi.unzip(zipped)
self.assertEqual(list(xs), [1, 4, 6])
self.assertEqual(list(ys), [2, 5])
self.assertEqual(list(zs), [3])
def test_increasingly_zipped(self):
zipped = iter([(1, 2), (3, 4, 5), (6, 7, 8, 9)])
unzipped = mi.unzip(zipped)
# from the docstring:
# len(first tuple) is the number of iterables zipped
self.assertEqual(len(unzipped), 2)
xs, ys = unzipped
self.assertEqual(list(xs), [1, 3, 6])
self.assertEqual(list(ys), [2, 4, 7])
class SortTogetherTest(TestCase):
"""Tests for sort_together()"""
def test_key_list(self):
"""tests `key_list` including default, iterables include duplicates"""
iterables = [
['GA', 'GA', 'GA', 'CT', 'CT', 'CT'],
['May', 'Aug.', 'May', 'June', 'July', 'July'],
[97, 20, 100, 70, 100, 20]
]
self.assertEqual(
mi.sort_together(iterables),
[
('CT', 'CT', 'CT', 'GA', 'GA', 'GA'),
('June', 'July', 'July', 'May', 'Aug.', 'May'),
(70, 100, 20, 97, 20, 100)
]
)
self.assertEqual(
mi.sort_together(iterables, key_list=(0, 1)),
[
('CT', 'CT', 'CT', 'GA', 'GA', 'GA'),
('July', 'July', 'June', 'Aug.', 'May', 'May'),
(100, 20, 70, 20, 97, 100)
]
)
self.assertEqual(
mi.sort_together(iterables, key_list=(0, 1, 2)),
[
('CT', 'CT', 'CT', 'GA', 'GA', 'GA'),
('July', 'July', 'June', 'Aug.', 'May', 'May'),
(20, 100, 70, 20, 97, 100)
]
)
self.assertEqual(
mi.sort_together(iterables, key_list=(2,)),
[
('GA', 'CT', 'CT', 'GA', 'GA', 'CT'),
('Aug.', 'July', 'June', 'May', 'May', 'July'),
(20, 20, 70, 97, 100, 100)
]
)
def test_invalid_key_list(self):
"""tests `key_list` for indexes not available in `iterables`"""
iterables = [
['GA', 'GA', 'GA', 'CT', 'CT', 'CT'],
['May', 'Aug.', 'May', 'June', 'July', 'July'],
[97, 20, 100, 70, 100, 20]
]
self.assertRaises(
IndexError, lambda: mi.sort_together(iterables, key_list=(5,))
)
def test_reverse(self):
"""tests `reverse` to ensure a reverse sort for `key_list` iterables"""
iterables = [
['GA', 'GA', 'GA', 'CT', 'CT', 'CT'],
['May', 'Aug.', 'May', 'June', 'July', 'July'],
[97, 20, 100, 70, 100, 20]
]
self.assertEqual(
mi.sort_together(iterables, key_list=(0, 1, 2), reverse=True),
[('GA', 'GA', 'GA', 'CT', 'CT', 'CT'),
('May', 'May', 'Aug.', 'June', 'July', 'July'),
(100, 97, 20, 70, 100, 20)]
)
def test_uneven_iterables(self):
"""tests trimming of iterables to the shortest length before sorting"""
iterables = [['GA', 'GA', 'GA', 'CT', 'CT', 'CT', 'MA'],
['May', 'Aug.', 'May', 'June', 'July', 'July'],
[97, 20, 100, 70, 100, 20, 0]]
self.assertEqual(
mi.sort_together(iterables),
[
('CT', 'CT', 'CT', 'GA', 'GA', 'GA'),
('June', 'July', 'July', 'May', 'Aug.', 'May'),
(70, 100, 20, 97, 20, 100)
]
)
class DivideTest(TestCase):
"""Tests for divide()"""
def test_invalid_n(self):
self.assertRaises(ValueError, lambda: mi.divide(-1, [1, 2, 3]))
self.assertRaises(ValueError, lambda: mi.divide(0, [1, 2, 3]))
def test_basic(self):
iterable = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10]
for n, expected in [
(1, [iterable]),
(2, [[1, 2, 3, 4, 5], [6, 7, 8, 9, 10]]),
(3, [[1, 2, 3, 4], [5, 6, 7], [8, 9, 10]]),
(10, [[n] for n in range(1, 10 + 1)]),
]:
self.assertEqual(
[list(x) for x in mi.divide(n, iterable)], expected
)
def test_large_n(self):
iterable = [1, 2, 3, 4]
self.assertEqual(
[list(x) for x in mi.divide(6, iterable)],
[[1], [2], [3], [4], [], []]
)
class TestAlwaysIterable(TestCase):
"""Tests for always_iterable()"""
def test_single(self):
self.assertEqual(list(mi.always_iterable(1)), [1])
def test_strings(self):
for obj in ['foo', b'bar', 'baz']:
actual = list(mi.always_iterable(obj))
expected = [obj]
self.assertEqual(actual, expected)
def test_base_type(self):
dict_obj = {'a': 1, 'b': 2}
str_obj = '123'
# Default: dicts are iterable like they normally are
default_actual = list(mi.always_iterable(dict_obj))
default_expected = list(dict_obj)
self.assertEqual(default_actual, default_expected)
# Unitary types set: dicts are not iterable
custom_actual = list(mi.always_iterable(dict_obj, base_type=dict))
custom_expected = [dict_obj]
self.assertEqual(custom_actual, custom_expected)
# With unitary types set, strings are iterable
str_actual = list(mi.always_iterable(str_obj, base_type=None))
str_expected = list(str_obj)
self.assertEqual(str_actual, str_expected)
def test_iterables(self):
self.assertEqual(list(mi.always_iterable([0, 1])), [0, 1])
self.assertEqual(
list(mi.always_iterable([0, 1], base_type=list)), [[0, 1]]
)
self.assertEqual(
list(mi.always_iterable(iter('foo'))), ['f', 'o', 'o']
)
self.assertEqual(list(mi.always_iterable([])), [])
def test_none(self):
self.assertEqual(list(mi.always_iterable(None)), [])
def test_generator(self):
def _gen():
yield 0
yield 1
self.assertEqual(list(mi.always_iterable(_gen())), [0, 1])
class AdjacentTests(TestCase):
def test_typical(self):
actual = list(mi.adjacent(lambda x: x % 5 == 0, range(10)))
expected = [(True, 0), (True, 1), (False, 2), (False, 3), (True, 4),
(True, 5), (True, 6), (False, 7), (False, 8), (False, 9)]
self.assertEqual(actual, expected)
def test_empty_iterable(self):
actual = list(mi.adjacent(lambda x: x % 5 == 0, []))
expected = []
self.assertEqual(actual, expected)
def test_length_one(self):
actual = list(mi.adjacent(lambda x: x % 5 == 0, [0]))
expected = [(True, 0)]
self.assertEqual(actual, expected)
actual = list(mi.adjacent(lambda x: x % 5 == 0, [1]))
expected = [(False, 1)]
self.assertEqual(actual, expected)
def test_consecutive_true(self):
"""Test that when the predicate matches multiple consecutive elements
it doesn't repeat elements in the output"""
actual = list(mi.adjacent(lambda x: x % 5 < 2, range(10)))
expected = [(True, 0), (True, 1), (True, 2), (False, 3), (True, 4),
(True, 5), (True, 6), (True, 7), (False, 8), (False, 9)]
self.assertEqual(actual, expected)
def test_distance(self):
actual = list(mi.adjacent(lambda x: x % 5 == 0, range(10), distance=2))
expected = [(True, 0), (True, 1), (True, 2), (True, 3), (True, 4),
(True, 5), (True, 6), (True, 7), (False, 8), (False, 9)]
self.assertEqual(actual, expected)
actual = list(mi.adjacent(lambda x: x % 5 == 0, range(10), distance=3))
expected = [(True, 0), (True, 1), (True, 2), (True, 3), (True, 4),
(True, 5), (True, 6), (True, 7), (True, 8), (False, 9)]
self.assertEqual(actual, expected)
def test_large_distance(self):
"""Test distance larger than the length of the iterable"""
iterable = range(10)
actual = list(mi.adjacent(lambda x: x % 5 == 4, iterable, distance=20))
expected = list(zip(repeat(True), iterable))
self.assertEqual(actual, expected)
actual = list(mi.adjacent(lambda x: False, iterable, distance=20))
expected = list(zip(repeat(False), iterable))
self.assertEqual(actual, expected)
def test_zero_distance(self):
"""Test that adjacent() reduces to zip+map when distance is 0"""
iterable = range(1000)
predicate = lambda x: x % 4 == 2
actual = mi.adjacent(predicate, iterable, 0)
expected = zip(map(predicate, iterable), iterable)
self.assertTrue(all(a == e for a, e in zip(actual, expected)))
def test_negative_distance(self):
"""Test that adjacent() raises an error with negative distance"""
pred = lambda x: x
self.assertRaises(
ValueError, lambda: mi.adjacent(pred, range(1000), -1)
)
self.assertRaises(
ValueError, lambda: mi.adjacent(pred, range(10), -10)
)
def test_grouping(self):
"""Test interaction of adjacent() with groupby_transform()"""
iterable = mi.adjacent(lambda x: x % 5 == 0, range(10))
grouper = mi.groupby_transform(iterable, itemgetter(0), itemgetter(1))
actual = [(k, list(g)) for k, g in grouper]
expected = [
(True, [0, 1]),
(False, [2, 3]),
(True, [4, 5, 6]),
(False, [7, 8, 9]),
]
self.assertEqual(actual, expected)
def test_call_once(self):
"""Test that the predicate is only called once per item."""
already_seen = set()
iterable = range(10)
def predicate(item):
self.assertNotIn(item, already_seen)
already_seen.add(item)
return True
actual = list(mi.adjacent(predicate, iterable))
expected = [(True, x) for x in iterable]
self.assertEqual(actual, expected)
class GroupByTransformTests(TestCase):
def assertAllGroupsEqual(self, groupby1, groupby2):
"""Compare two groupby objects for equality, both keys and groups."""
for a, b in zip(groupby1, groupby2):
key1, group1 = a
key2, group2 = b
self.assertEqual(key1, key2)
self.assertListEqual(list(group1), list(group2))
self.assertRaises(StopIteration, lambda: next(groupby1))
self.assertRaises(StopIteration, lambda: next(groupby2))
def test_default_funcs(self):
"""Test that groupby_transform() with default args mimics groupby()"""
iterable = [(x // 5, x) for x in range(1000)]
actual = mi.groupby_transform(iterable)
expected = groupby(iterable)
self.assertAllGroupsEqual(actual, expected)
def test_valuefunc(self):
iterable = [(int(x / 5), int(x / 3), x) for x in range(10)]
# Test the standard usage of grouping one iterable using another's keys
grouper = mi.groupby_transform(
iterable, keyfunc=itemgetter(0), valuefunc=itemgetter(-1)
)
actual = [(k, list(g)) for k, g in grouper]
expected = [(0, [0, 1, 2, 3, 4]), (1, [5, 6, 7, 8, 9])]
self.assertEqual(actual, expected)
grouper = mi.groupby_transform(
iterable, keyfunc=itemgetter(1), valuefunc=itemgetter(-1)
)
actual = [(k, list(g)) for k, g in grouper]
expected = [(0, [0, 1, 2]), (1, [3, 4, 5]), (2, [6, 7, 8]), (3, [9])]
self.assertEqual(actual, expected)
# and now for something a little different
d = dict(zip(range(10), 'abcdefghij'))
grouper = mi.groupby_transform(
range(10), keyfunc=lambda x: x // 5, valuefunc=d.get
)
actual = [(k, ''.join(g)) for k, g in grouper]
expected = [(0, 'abcde'), (1, 'fghij')]
self.assertEqual(actual, expected)
def test_no_valuefunc(self):
iterable = range(1000)
def key(x):
return x // 5
actual = mi.groupby_transform(iterable, key, valuefunc=None)
expected = groupby(iterable, key)
self.assertAllGroupsEqual(actual, expected)
actual = mi.groupby_transform(iterable, key) # default valuefunc
expected = groupby(iterable, key)
self.assertAllGroupsEqual(actual, expected)
class NumericRangeTests(TestCase):
def test_basic(self):
for args, expected in [
((4,), [0, 1, 2, 3]),
((4.0,), [0.0, 1.0, 2.0, 3.0]),
((1.0, 4), [1.0, 2.0, 3.0]),
((1, 4.0), [1, 2, 3]),
((1.0, 5), [1.0, 2.0, 3.0, 4.0]),
((0, 20, 5), [0, 5, 10, 15]),
((0, 20, 5.0), [0.0, 5.0, 10.0, 15.0]),
((0, 10, 3), [0, 3, 6, 9]),
((0, 10, 3.0), [0.0, 3.0, 6.0, 9.0]),
((0, -5, -1), [0, -1, -2, -3, -4]),
((0.0, -5, -1), [0.0, -1.0, -2.0, -3.0, -4.0]),
((1, 2, Fraction(1, 2)), [Fraction(1, 1), Fraction(3, 2)]),
((0,), []),
((0.0,), []),
((1, 0), []),
((1.0, 0.0), []),
((Fraction(2, 1),), [Fraction(0, 1), Fraction(1, 1)]),
((Decimal('2.0'),), [Decimal('0.0'), Decimal('1.0')]),
]:
actual = list(mi.numeric_range(*args))
self.assertEqual(actual, expected)
self.assertTrue(
all(type(a) == type(e) for a, e in zip(actual, expected))
)
def test_arg_count(self):
self.assertRaises(TypeError, lambda: list(mi.numeric_range()))
self.assertRaises(
TypeError, lambda: list(mi.numeric_range(0, 1, 2, 3))
)
def test_zero_step(self):
self.assertRaises(
ValueError, lambda: list(mi.numeric_range(1, 2, 0))
)
class CountCycleTests(TestCase):
def test_basic(self):
expected = [
(0, 'a'), (0, 'b'), (0, 'c'),
(1, 'a'), (1, 'b'), (1, 'c'),
(2, 'a'), (2, 'b'), (2, 'c'),
]
for actual in [
mi.take(9, mi.count_cycle('abc')), # n=None
list(mi.count_cycle('abc', 3)), # n=3
]:
self.assertEqual(actual, expected)
def test_empty(self):
self.assertEqual(list(mi.count_cycle('')), [])
self.assertEqual(list(mi.count_cycle('', 2)), [])
def test_negative(self):
self.assertEqual(list(mi.count_cycle('abc', -3)), [])
class LocateTests(TestCase):
def test_default_pred(self):
iterable = [0, 1, 1, 0, 1, 0, 0]
actual = list(mi.locate(iterable))
expected = [1, 2, 4]
self.assertEqual(actual, expected)
def test_no_matches(self):
iterable = [0, 0, 0]
actual = list(mi.locate(iterable))
expected = []
self.assertEqual(actual, expected)
def test_custom_pred(self):
iterable = ['0', 1, 1, '0', 1, '0', '0']
pred = lambda x: x == '0'
actual = list(mi.locate(iterable, pred))
expected = [0, 3, 5, 6]
self.assertEqual(actual, expected)
def test_window_size(self):
iterable = ['0', 1, 1, '0', 1, '0', '0']
pred = lambda *args: args == ('0', 1)
actual = list(mi.locate(iterable, pred, window_size=2))
expected = [0, 3]
self.assertEqual(actual, expected)
def test_window_size_large(self):
iterable = [1, 2, 3, 4]
pred = lambda a, b, c, d, e: True
actual = list(mi.locate(iterable, pred, window_size=5))
expected = [0]
self.assertEqual(actual, expected)
def test_window_size_zero(self):
iterable = [1, 2, 3, 4]
pred = lambda: True
with self.assertRaises(ValueError):
list(mi.locate(iterable, pred, window_size=0))
class StripFunctionTests(TestCase):
def test_hashable(self):
iterable = list('www.example.com')
pred = lambda x: x in set('cmowz.')
self.assertEqual(list(mi.lstrip(iterable, pred)), list('example.com'))
self.assertEqual(list(mi.rstrip(iterable, pred)), list('www.example'))
self.assertEqual(list(mi.strip(iterable, pred)), list('example'))
def test_not_hashable(self):
iterable = [
list('http://'), list('www'), list('.example'), list('.com')
]
pred = lambda x: x in [list('http://'), list('www'), list('.com')]
self.assertEqual(list(mi.lstrip(iterable, pred)), iterable[2:])
self.assertEqual(list(mi.rstrip(iterable, pred)), iterable[:3])
self.assertEqual(list(mi.strip(iterable, pred)), iterable[2: 3])
def test_math(self):
iterable = [0, 1, 2, 3, 0, 1, 2, 3, 0, 1, 2]
pred = lambda x: x <= 2
self.assertEqual(list(mi.lstrip(iterable, pred)), iterable[3:])
self.assertEqual(list(mi.rstrip(iterable, pred)), iterable[:-3])
self.assertEqual(list(mi.strip(iterable, pred)), iterable[3:-3])
class IsliceExtendedTests(TestCase):
def test_all(self):
iterable = ['0', '1', '2', '3', '4', '5']
indexes = list(range(-4, len(iterable) + 4)) + [None]
steps = [1, 2, 3, 4, -1, -2, -3, 4]
for slice_args in product(indexes, indexes, steps):
try:
actual = list(mi.islice_extended(iterable, *slice_args))
except Exception as e:
self.fail((slice_args, e))
expected = iterable[slice(*slice_args)]
self.assertEqual(actual, expected, slice_args)
def test_zero_step(self):
with self.assertRaises(ValueError):
list(mi.islice_extended([1, 2, 3], 0, 1, 0))
class ConsecutiveGroupsTest(TestCase):
def test_numbers(self):
iterable = [-10, -8, -7, -6, 1, 2, 4, 5, -1, 7]
actual = [list(g) for g in mi.consecutive_groups(iterable)]
expected = [[-10], [-8, -7, -6], [1, 2], [4, 5], [-1], [7]]
self.assertEqual(actual, expected)
def test_custom_ordering(self):
iterable = ['1', '10', '11', '20', '21', '22', '30', '31']
ordering = lambda x: int(x)
actual = [list(g) for g in mi.consecutive_groups(iterable, ordering)]
expected = [['1'], ['10', '11'], ['20', '21', '22'], ['30', '31']]
self.assertEqual(actual, expected)
def test_exotic_ordering(self):
iterable = [
('a', 'b', 'c', 'd'),
('a', 'c', 'b', 'd'),
('a', 'c', 'd', 'b'),
('a', 'd', 'b', 'c'),
('d', 'b', 'c', 'a'),
('d', 'c', 'a', 'b'),
]
ordering = list(permutations('abcd')).index
actual = [list(g) for g in mi.consecutive_groups(iterable, ordering)]
expected = [
[('a', 'b', 'c', 'd')],
[('a', 'c', 'b', 'd'), ('a', 'c', 'd', 'b'), ('a', 'd', 'b', 'c')],
[('d', 'b', 'c', 'a'), ('d', 'c', 'a', 'b')],
]
self.assertEqual(actual, expected)
class DifferenceTest(TestCase):
def test_normal(self):
iterable = [10, 20, 30, 40, 50]
actual = list(mi.difference(iterable))
expected = [10, 10, 10, 10, 10]
self.assertEqual(actual, expected)
def test_custom(self):
iterable = [10, 20, 30, 40, 50]
actual = list(mi.difference(iterable, add))
expected = [10, 30, 50, 70, 90]
self.assertEqual(actual, expected)
def test_roundtrip(self):
original = list(range(100))
accumulated = accumulate(original)
actual = list(mi.difference(accumulated))
self.assertEqual(actual, original)
def test_one(self):
self.assertEqual(list(mi.difference([0])), [0])
def test_empty(self):
self.assertEqual(list(mi.difference([])), [])
class SeekableTest(TestCase):
def test_exhaustion_reset(self):
iterable = [str(n) for n in range(10)]
s = mi.seekable(iterable)
self.assertEqual(list(s), iterable) # Normal iteration
self.assertEqual(list(s), []) # Iterable is exhausted
s.seek(0)
self.assertEqual(list(s), iterable) # Back in action
def test_partial_reset(self):
iterable = [str(n) for n in range(10)]
s = mi.seekable(iterable)
self.assertEqual(mi.take(5, s), iterable[:5]) # Normal iteration
s.seek(1)
self.assertEqual(list(s), iterable[1:]) # Get the rest of the iterable
def test_forward(self):
iterable = [str(n) for n in range(10)]
s = mi.seekable(iterable)
self.assertEqual(mi.take(1, s), iterable[:1]) # Normal iteration
s.seek(3) # Skip over index 2
self.assertEqual(list(s), iterable[3:]) # Result is similar to slicing
s.seek(0) # Back to 0
self.assertEqual(list(s), iterable) # No difference in result
def test_past_end(self):
iterable = [str(n) for n in range(10)]
s = mi.seekable(iterable)
self.assertEqual(mi.take(1, s), iterable[:1]) # Normal iteration
s.seek(20)
self.assertEqual(list(s), []) # Iterable is exhausted
s.seek(0) # Back to 0
self.assertEqual(list(s), iterable) # No difference in result
def test_elements(self):
iterable = map(str, count())
s = mi.seekable(iterable)
mi.take(10, s)
elements = s.elements()
self.assertEqual(
[elements[i] for i in range(10)], [str(n) for n in range(10)]
)
self.assertEqual(len(elements), 10)
mi.take(10, s)
self.assertEqual(list(elements), [str(n) for n in range(20)])
class SequenceViewTests(TestCase):
def test_init(self):
view = mi.SequenceView((1, 2, 3))
self.assertEqual(repr(view), "SequenceView((1, 2, 3))")
self.assertRaises(TypeError, lambda: mi.SequenceView({}))
def test_update(self):
seq = [1, 2, 3]
view = mi.SequenceView(seq)
self.assertEqual(len(view), 3)
self.assertEqual(repr(view), "SequenceView([1, 2, 3])")
seq.pop()
self.assertEqual(len(view), 2)
self.assertEqual(repr(view), "SequenceView([1, 2])")
def test_indexing(self):
seq = ('a', 'b', 'c', 'd', 'e', 'f')
view = mi.SequenceView(seq)
for i in range(-len(seq), len(seq)):
self.assertEqual(view[i], seq[i])
def test_slicing(self):
seq = ('a', 'b', 'c', 'd', 'e', 'f')
view = mi.SequenceView(seq)
n = len(seq)
indexes = list(range(-n - 1, n + 1)) + [None]
steps = list(range(-n, n + 1))
steps.remove(0)
for slice_args in product(indexes, indexes, steps):
i = slice(*slice_args)
self.assertEqual(view[i], seq[i])
def test_abc_methods(self):
# collections.Sequence should provide all of this functionality
seq = ('a', 'b', 'c', 'd', 'e', 'f', 'f')
view = mi.SequenceView(seq)
# __contains__
self.assertIn('b', view)
self.assertNotIn('g', view)
# __iter__
self.assertEqual(list(iter(view)), list(seq))
# __reversed__
self.assertEqual(list(reversed(view)), list(reversed(seq)))
# index
self.assertEqual(view.index('b'), 1)
# count
self.assertEqual(seq.count('f'), 2)
class RunLengthTest(TestCase):
def test_encode(self):
iterable = (int(str(n)[0]) for n in count(800))
actual = mi.take(4, mi.run_length.encode(iterable))
expected = [(8, 100), (9, 100), (1, 1000), (2, 1000)]
self.assertEqual(actual, expected)
def test_decode(self):
iterable = [('d', 4), ('c', 3), ('b', 2), ('a', 1)]
actual = ''.join(mi.run_length.decode(iterable))
expected = 'ddddcccbba'
self.assertEqual(actual, expected)
class ExactlyNTests(TestCase):
"""Tests for ``exactly_n()``"""
def test_true(self):
"""Iterable has ``n`` ``True`` elements"""
self.assertTrue(mi.exactly_n([True, False, True], 2))
self.assertTrue(mi.exactly_n([1, 1, 1, 0], 3))
self.assertTrue(mi.exactly_n([False, False], 0))
self.assertTrue(mi.exactly_n(range(100), 10, lambda x: x < 10))
def test_false(self):
"""Iterable does not have ``n`` ``True`` elements"""
self.assertFalse(mi.exactly_n([True, False, False], 2))
self.assertFalse(mi.exactly_n([True, True, False], 1))
self.assertFalse(mi.exactly_n([False], 1))
self.assertFalse(mi.exactly_n([True], -1))
self.assertFalse(mi.exactly_n(repeat(True), 100))
def test_empty(self):
"""Return ``True`` if the iterable is empty and ``n`` is 0"""
self.assertTrue(mi.exactly_n([], 0))
self.assertFalse(mi.exactly_n([], 1))
class AlwaysReversibleTests(TestCase):
"""Tests for ``always_reversible()``"""
def test_regular_reversed(self):
self.assertEqual(list(reversed(range(10))),
list(mi.always_reversible(range(10))))
self.assertEqual(list(reversed([1, 2, 3])),
list(mi.always_reversible([1, 2, 3])))
self.assertEqual(reversed([1, 2, 3]).__class__,
mi.always_reversible([1, 2, 3]).__class__)
def test_nonseq_reversed(self):
# Create a non-reversible generator from a sequence
with self.assertRaises(TypeError):
reversed(x for x in range(10))
self.assertEqual(list(reversed(range(10))),
list(mi.always_reversible(x for x in range(10))))
self.assertEqual(list(reversed([1, 2, 3])),
list(mi.always_reversible(x for x in [1, 2, 3])))
self.assertNotEqual(reversed((1, 2)).__class__,
mi.always_reversible(x for x in (1, 2)).__class__)
class CircularShiftsTests(TestCase):
def test_empty(self):
# empty iterable -> empty list
self.assertEqual(list(mi.circular_shifts([])), [])
def test_simple_circular_shifts(self):
# test the a simple iterator case
self.assertEqual(
mi.circular_shifts(range(4)),
[(0, 1, 2, 3), (1, 2, 3, 0), (2, 3, 0, 1), (3, 0, 1, 2)]
)
def test_duplicates(self):
# test non-distinct entries
self.assertEqual(
mi.circular_shifts([0, 1, 0, 1]),
[(0, 1, 0, 1), (1, 0, 1, 0), (0, 1, 0, 1), (1, 0, 1, 0)]
)
class MakeDecoratorTests(TestCase):
def test_basic(self):
slicer = mi.make_decorator(islice)
@slicer(1, 10, 2)
def user_function(arg_1, arg_2, kwarg_1=None):
self.assertEqual(arg_1, 'arg_1')
self.assertEqual(arg_2, 'arg_2')
self.assertEqual(kwarg_1, 'kwarg_1')
return map(str, count())
it = user_function('arg_1', 'arg_2', kwarg_1='kwarg_1')
actual = list(it)
expected = ['1', '3', '5', '7', '9']
self.assertEqual(actual, expected)
def test_result_index(self):
def stringify(*args, **kwargs):
self.assertEqual(args[0], 'arg_0')
iterable = args[1]
self.assertEqual(args[2], 'arg_2')
self.assertEqual(kwargs['kwarg_1'], 'kwarg_1')
return map(str, iterable)
stringifier = mi.make_decorator(stringify, result_index=1)
@stringifier('arg_0', 'arg_2', kwarg_1='kwarg_1')
def user_function(n):
return count(n)
it = user_function(1)
actual = mi.take(5, it)
expected = ['1', '2', '3', '4', '5']
self.assertEqual(actual, expected)
def test_wrap_class(self):
seeker = mi.make_decorator(mi.seekable)
@seeker()
def user_function(n):
return map(str, range(n))
it = user_function(5)
self.assertEqual(list(it), ['0', '1', '2', '3', '4'])
it.seek(0)
self.assertEqual(list(it), ['0', '1', '2', '3', '4'])
class MapReduceTests(TestCase):
def test_default(self):
iterable = (str(x) for x in range(5))
keyfunc = lambda x: int(x) // 2
actual = sorted(mi.map_reduce(iterable, keyfunc).items())
expected = [(0, ['0', '1']), (1, ['2', '3']), (2, ['4'])]
self.assertEqual(actual, expected)
def test_valuefunc(self):
iterable = (str(x) for x in range(5))
keyfunc = lambda x: int(x) // 2
valuefunc = int
actual = sorted(mi.map_reduce(iterable, keyfunc, valuefunc).items())
expected = [(0, [0, 1]), (1, [2, 3]), (2, [4])]
self.assertEqual(actual, expected)
def test_reducefunc(self):
iterable = (str(x) for x in range(5))
keyfunc = lambda x: int(x) // 2
valuefunc = int
reducefunc = lambda value_list: reduce(mul, value_list, 1)
actual = sorted(
mi.map_reduce(iterable, keyfunc, valuefunc, reducefunc).items()
)
expected = [(0, 0), (1, 6), (2, 4)]
self.assertEqual(actual, expected)
def test_ret(self):
d = mi.map_reduce([1, 0, 2, 0, 1, 0], bool)
self.assertEqual(d, {False: [0, 0, 0], True: [1, 2, 1]})
self.assertRaises(KeyError, lambda: d[None].append(1))
class RlocateTests(TestCase):
def test_default_pred(self):
iterable = [0, 1, 1, 0, 1, 0, 0]
for it in (iterable[:], iter(iterable)):
actual = list(mi.rlocate(it))
expected = [4, 2, 1]
self.assertEqual(actual, expected)
def test_no_matches(self):
iterable = [0, 0, 0]
for it in (iterable[:], iter(iterable)):
actual = list(mi.rlocate(it))
expected = []
self.assertEqual(actual, expected)
def test_custom_pred(self):
iterable = ['0', 1, 1, '0', 1, '0', '0']
pred = lambda x: x == '0'
for it in (iterable[:], iter(iterable)):
actual = list(mi.rlocate(it, pred))
expected = [6, 5, 3, 0]
self.assertEqual(actual, expected)
def test_efficient_reversal(self):
iterable = range(9 ** 9) # Is efficiently reversible
target = 9 ** 9 - 2
pred = lambda x: x == target # Find-able from the right
actual = next(mi.rlocate(iterable, pred))
self.assertEqual(actual, target)
def test_window_size(self):
iterable = ['0', 1, 1, '0', 1, '0', '0']
pred = lambda *args: args == ('0', 1)
for it in (iterable, iter(iterable)):
actual = list(mi.rlocate(it, pred, window_size=2))
expected = [3, 0]
self.assertEqual(actual, expected)
def test_window_size_large(self):
iterable = [1, 2, 3, 4]
pred = lambda a, b, c, d, e: True
for it in (iterable, iter(iterable)):
actual = list(mi.rlocate(iterable, pred, window_size=5))
expected = [0]
self.assertEqual(actual, expected)
def test_window_size_zero(self):
iterable = [1, 2, 3, 4]
pred = lambda: True
for it in (iterable, iter(iterable)):
with self.assertRaises(ValueError):
list(mi.locate(iterable, pred, window_size=0))
class ReplaceTests(TestCase):
def test_basic(self):
iterable = range(10)
pred = lambda x: x % 2 == 0
substitutes = []
actual = list(mi.replace(iterable, pred, substitutes))
expected = [1, 3, 5, 7, 9]
self.assertEqual(actual, expected)
def test_count(self):
iterable = range(10)
pred = lambda x: x % 2 == 0
substitutes = []
actual = list(mi.replace(iterable, pred, substitutes, count=4))
expected = [1, 3, 5, 7, 8, 9]
self.assertEqual(actual, expected)
def test_window_size(self):
iterable = range(10)
pred = lambda *args: args == (0, 1, 2)
substitutes = []
actual = list(mi.replace(iterable, pred, substitutes, window_size=3))
expected = [3, 4, 5, 6, 7, 8, 9]
self.assertEqual(actual, expected)
def test_window_size_end(self):
iterable = range(10)
pred = lambda *args: args == (7, 8, 9)
substitutes = []
actual = list(mi.replace(iterable, pred, substitutes, window_size=3))
expected = [0, 1, 2, 3, 4, 5, 6]
self.assertEqual(actual, expected)
def test_window_size_count(self):
iterable = range(10)
pred = lambda *args: (args == (0, 1, 2)) or (args == (7, 8, 9))
substitutes = []
actual = list(
mi.replace(iterable, pred, substitutes, count=1, window_size=3)
)
expected = [3, 4, 5, 6, 7, 8, 9]
self.assertEqual(actual, expected)
def test_window_size_large(self):
iterable = range(4)
pred = lambda a, b, c, d, e: True
substitutes = [5, 6, 7]
actual = list(mi.replace(iterable, pred, substitutes, window_size=5))
expected = [5, 6, 7]
self.assertEqual(actual, expected)
def test_window_size_zero(self):
iterable = range(10)
pred = lambda *args: True
substitutes = []
with self.assertRaises(ValueError):
list(mi.replace(iterable, pred, substitutes, window_size=0))
def test_iterable_substitutes(self):
iterable = range(5)
pred = lambda x: x % 2 == 0
substitutes = iter('__')
actual = list(mi.replace(iterable, pred, substitutes))
expected = ['_', '_', 1, '_', '_', 3, '_', '_']
self.assertEqual(actual, expected)
class PartitionsTest(TestCase):
def test_types(self):
for iterable in [
'abcd',
['a', 'b', 'c', 'd'],
('a', 'b', 'c', 'd'),
]:
with self.subTest(iterable=iterable):
actual = list(mi.partitions(iterable))
expected = [
[['a', 'b', 'c', 'd']],
[['a'], ['b', 'c', 'd']],
[['a', 'b'], ['c', 'd']],
[['a', 'b', 'c'], ['d']],
[['a'], ['b'], ['c', 'd']],
[['a'], ['b', 'c'], ['d']],
[['a', 'b'], ['c'], ['d']],
[['a'], ['b'], ['c'], ['d']]
]
self.assertEqual(actual, expected)
def test_empty(self):
iterable = []
actual = list(mi.partitions(iterable))
expected = [[[]]]
self.assertEqual(actual, expected)
def test_order(self):
iterable = iter([3, 2, 1])
actual = list(mi.partitions(iterable))
expected = [
[[3, 2, 1]],
[[3], [2, 1]],
[[3, 2], [1]],
[[3], [2], [1]],
]
self.assertEqual(actual, expected)
def test_duplicates(self):
iterable = [1, 1, 1]
actual = list(mi.partitions(iterable))
expected = [
[[1, 1, 1]],
[[1], [1, 1]],
[[1, 1], [1]],
[[1], [1], [1]],
]
self.assertEqual(actual, expected)
class TimeLimitedTests(TestCase):
def test_basic(self):
def generator():
yield 1
yield 2
sleep(0.2)
yield 3
iterable = generator()
actual = list(mi.time_limited(0.1, iterable))
expected = [1, 2]
self.assertEqual(actual, expected)
def test_zero_limit(self):
iterable = count()
actual = list(mi.time_limited(0, iterable))
expected = []
self.assertEqual(actual, expected)
def test_invalid_limit(self):
with self.assertRaises(ValueError):
list(mi.time_limited(-0.1, count()))
| mit | 4,473,934,777,082,506,000 | 33.039152 | 79 | 0.525328 | false |
chfw/pyramid-excel | myproject/views.py | 2 | 2146 | from pyramid.view import view_config
import pyexcel.ext.xls
import pyexcel.ext.xlsx
import pyexcel.ext.ods3 # noqa
import pyramid_excel as excel
from .models import (
DBSession,
Category,
Post
)
@view_config(route_name='home', renderer='templates/mytemplate.pt')
def my_view(request):
return {'project': 'MyProject'}
@view_config(route_name='switch')
def switch(request):
sheet = request.get_sheet(field_name='file')
return excel.make_response(sheet,
request.matchdict.get('file_type', 'csv'))
@view_config(route_name='upload', renderer='templates/upload_form.pt')
def upload_view(request):
if request.method == 'POST':
data = request.get_array(field_name='file')
return excel.make_response_from_array(data, 'xls')
@view_config(route_name='download', renderer='templates/upload_form.pt')
def download_attachment(request):
data = [[1, 2], [3, 4]]
return excel.make_response_from_array(
data,
request.matchdict.get('file_type', 'csv'),
file_name=request.matchdict.get('file_name', ''))
@view_config(route_name='uploadall')
def upload_all(request):
def category_init_func(row):
c = Category(row['name'])
c.id = row['id']
return c
def post_init_func(row):
# this is lessons learned that relation needs an object not a string
c = DBSession.query(Category).filter_by(name=row['category']).first()
p = Post(row['title'], row['body'], c, row['pub_date'])
return p
request.save_book_to_database(
field_name='file', session=DBSession,
tables=[Category, Post],
initializers=[category_init_func, post_init_func])
return excel.make_response_from_tables(DBSession, [Category, Post], "xls")
@view_config(route_name='upload_categories')
def upload_categories(request):
def table_init_func(row):
return Category(row['name'])
request.save_to_database(
field_name='file', session=DBSession,
table=Category, initializer=table_init_func)
return excel.make_response_from_a_table(DBSession, Category, "xls")
| bsd-3-clause | -3,850,079,075,678,964,000 | 29.225352 | 78 | 0.655638 | false |
NunaHealth/json_delta | src/json_delta/_diff.py | 1 | 13161 | # -*- encoding: utf-8 -*-
# json_delta: a library for computing deltas between JSON-serializable
# structures.
# json_delta/_diff.py
#
# Copyright 2012‒2015 Philip J. Roberts <[email protected]>.
# BSD License applies; see the LICENSE file, or
# http://opensource.org/licenses/BSD-2-Clause
'''Functions for computing JSON-format diffs.'''
from __future__ import print_function, unicode_literals
from ._util import compact_json_dumps, TERMINALS, NONTERMINALS, Basestring
import copy
import bisect
import sys
try:
xrange(0)
except NameError:
xrange = range
def diff(left_struc, right_struc, minimal=True, verbose=True, key=None):
'''Compose a sequence of diff stanzas sufficient to convert the
structure ``left_struc`` into the structure ``right_struc``. (The
goal is to add 'necessary and' to 'sufficient' above!).
Flags:
``verbose``: if this is set ``True`` (the default), a line of
compression statistics will be printed to stderr.
``minimal``: if ``True``, the function will try harder to find
the diff that encodes as the shortest possible JSON string, at
the expense of using more of both memory and processor time
(as alternatives are computed and compared).
The parameter ``key`` is present because this function is mutually
recursive with :py:func:`needle_diff` and :py:func:`keyset_diff`.
If set to a list, it will be prefixed to every keypath in the
output.
'''
if key is None:
key = []
if structure_worth_investigating(left_struc, right_struc):
common = commonality(left_struc, right_struc)
if minimal:
my_diff = needle_diff(left_struc, right_struc, key, minimal)
elif common < 0.5:
my_diff = this_level_diff(left_struc, right_struc, key, common)
else:
my_diff = keyset_diff(left_struc, right_struc, key, minimal)
else:
my_diff = this_level_diff(left_struc, right_struc, key, 0.0)
if minimal:
my_diff = min(my_diff, [[key[:], copy.copy(right_struc)]],
key=lambda x: len(compact_json_dumps(x)))
if key == []:
if len(my_diff) > 1:
my_diff = sort_stanzas(my_diff)
if verbose:
size = len(compact_json_dumps(right_struc))
csize = float(len(compact_json_dumps(my_diff)))
msg = ('Size of delta %.3f%% size of original '
'(original: %d chars, delta: %d chars)')
print(msg % (((csize / size) * 100),
size,
int(csize)),
file=sys.stderr)
return my_diff
def needle_diff(left_struc, right_struc, key, minimal=True):
'''Returns a diff between ``left_struc`` and ``right_struc``.
If ``left_struc`` and ``right_struc`` are both serializable as
arrays, this function will use Needleman-Wunsch sequence alignment
to find a minimal diff between them. Otherwise, the inputs are
passed on to :func:`keyset_diff`.
This function probably shouldn't be called directly. Instead, use
:func:`diff`, which is mutually recursive with this function and
:func:`keyset_diff` anyway.
'''
if type(left_struc) not in (list, tuple):
return keyset_diff(left_struc, right_struc, key, minimal)
assert type(right_struc) in (list, tuple)
down_col = 0
lastrow = [
[[key + [sub_i]] for sub_i in range(i)]
for i in range(len(left_struc), -1, -1)
]
def modify_cand():
'''Build the candidate diff that involves (potentially) modifying an
element.'''
if col_i + 1 < len(lastrow):
return (lastrow[col_i+1] +
diff(left_elem, right_elem, key=key + [left_i],
minimal=minimal, verbose=False))
def delete_cand():
'''Build the candidate diff that involves deleting an element.'''
if row:
return row[0] + [[key + [left_i]]]
def append_cand():
'''Build the candidate diff that involves appending an element.'''
if col_i == down_col:
return (lastrow[col_i] +
[[key + [append_key(lastrow[col_i], left_struc, key)],
right_elem]])
for right_i, right_elem in enumerate(right_struc):
first_left_i = min(right_i, len(left_struc) - 1)
left_elems = left_struc[first_left_i:]
row = []
for left_i, left_elem in enumerate(left_elems, first_left_i):
col_i = len(left_struc) - left_i - 1
cands = [c for c in (modify_cand(), delete_cand(), append_cand())
if c is not None]
winner = min(cands, key=lambda d: len(compact_json_dumps(d)))
row.insert(0, winner)
lastrow = row
return winner
def append_key(stanzas, left_struc, keypath=None):
'''Get the appropriate key for appending to the sequence ``left_struc``.
``stanzas`` should be a diff, some of whose stanzas may modify a
sequence ``left_struc`` that appears at path ``keypath``. If any of
the stanzas append to ``left_struc``, the return value is the
largest index in ``left_struc`` they address, plus one.
Otherwise, the return value is ``len(left_struc)`` (i.e. the index
that a value would have if it was appended to ``left_struc``).
>>> append_key([], [])
0
>>> append_key([[[2], 'Baz']], ['Foo', 'Bar'])
3
>>> append_key([[[2], 'Baz'], [['Quux', 0], 'Foo']], [], ['Quux'])
1
'''
if keypath is None:
keypath = []
addition_key = len(left_struc)
for stanza in stanzas:
prior_key = stanza[0]
if (len(stanza) > 1
and len(prior_key) == len(keypath) + 1
and prior_key[-1] >= addition_key):
addition_key = prior_key[-1] + 1
return addition_key
def compute_keysets(left_seq, right_seq):
'''Compare the keys of ``left_seq`` vs. ``right_seq``.
Determines which keys ``left_seq`` and ``right_seq`` have in
common, and which are unique to each of the structures. Arguments
should be instances of the same basic type, which must be a
non-terminal: i.e. list or dict. If they are lists, the keys
compared will be integer indices.
Returns:
Return value is a 3-tuple of sets ``({overlap}, {left_only},
{right_only})``. As their names suggest, ``overlap`` is a set
of keys ``left_seq`` have in common, ``left_only`` represents
keys only found in ``left_seq``, and ``right_only`` holds keys
only found in ``right_seq``.
Raises:
AssertionError if ``left_seq`` is not an instance of
``type(right_seq)``, or if they are not of a non-terminal
type.
>>> compute_keysets({'foo': None}, {'bar': None}) == (set([]), {'foo'}, {'bar'})
True
>>> (compute_keysets({'foo': None, 'baz': None}, {'bar': None, 'baz': None})
... == ({'baz'}, {'foo'}, {'bar'}))
True
>>> compute_keysets(['foo', 'baz'], ['bar', 'baz']) == ({0, 1}, set([]), set([]))
True
>>> compute_keysets(['foo'], ['bar', 'baz']) == ({0}, set([]), {1})
True
>>> compute_keysets([], ['bar', 'baz']) == (set([]), set([]), {0, 1})
True
'''
assert isinstance(left_seq, type(right_seq)), (left_seq, right_seq)
assert type(left_seq) in NONTERMINALS, left_seq
if type(left_seq) is dict:
left_keyset = set(left_seq.keys())
right_keyset = set(right_seq.keys())
else:
left_keyset = set(range(len(left_seq)))
right_keyset = set(range(len(right_seq)))
overlap = left_keyset.intersection(right_keyset)
left_only = left_keyset - right_keyset
right_only = right_keyset - left_keyset
return (overlap, left_only, right_only)
def keyset_diff(left_struc, right_struc, key, minimal=True):
'''Return a diff between ``left_struc`` and ``right_struc``.
It is assumed that ``left_struc`` and ``right_struc`` are both
non-terminal types (serializable as arrays or objects). Sequences
are treated just like mappings by this function, so the diffs will
be correct but not necessarily minimal. For a minimal diff
between two sequences, use :func:`needle_diff`.
This function probably shouldn't be called directly. Instead, use
:func:`udiff`, which will call :func:`keyset_diff` if appropriate
anyway.
'''
out = []
(overlap, left_only, right_only) = compute_keysets(left_struc, right_struc)
out.extend([[key + [k]] for k in left_only])
out.extend([[key + [k], right_struc[k]] for k in right_only])
for k in overlap:
sub_key = key + [k]
out.extend(diff(left_struc[k], right_struc[k],
minimal, False, sub_key))
return out
def this_level_diff(left_struc, right_struc, key=None, common=None):
'''Return a sequence of diff stanzas between the structures
left_struc and right_struc, assuming that they are each at the
key-path ``key`` within the overall structure.
>>> (this_level_diff({'foo': 'bar', 'baz': 'quux'},
... {'foo': 'bar'})
... == [[['baz']]])
True
>>> (this_level_diff({'foo': 'bar', 'baz': 'quux'},
... {'foo': 'bar'}, ['quordle'])
... == [[['quordle', 'baz']]])
True
'''
out = []
if key is None:
key = []
if common is None:
common = commonality(left_struc, right_struc)
if common:
(overlap, left, right) = compute_keysets(left_struc, right_struc)
for okey in overlap:
if left_struc[okey] != right_struc[okey]:
out.append([key[:] + [okey], right_struc[okey]])
for okey in left:
out.append([key[:] + [okey]])
for okey in right:
out.append([key[:] + [okey], right_struc[okey]])
return out
elif left_struc != right_struc:
return [[key[:], right_struc]]
else:
return []
def structure_worth_investigating(left_struc, right_struc):
'''Test whether it is worth looking at the internal structure of
`left_struc` and `right_struc` to see if they can be efficiently
diffed.
'''
if type(left_struc) is not type(right_struc):
return False
if type(left_struc) in TERMINALS:
return False
if len(left_struc) == 0 or len(right_struc) == 0:
return False
return True
def commonality(left_struc, right_struc):
'''Return a float between 0.0 and 1.0 representing the amount
that the structures left_struc and right_struc have in common.
It is assumed (and ``assert``ed!) that ``left_struc`` and
``right_struc`` are of the same type, and non-empty (check this
using :func:`structure_worth_investigating`). Return value is
computed as the fraction (elements in common) / (total elements).
'''
assert type(left_struc) is type(right_struc), (left_struc, right_struc)
assert left_struc and right_struc, (left_struc, right_struc)
if type(left_struc) is dict:
(overlap, left, right) = compute_keysets(left_struc, right_struc)
com = float(len(overlap))
tot = len(overlap.union(left, right))
else:
assert type(left_struc) in (list, tuple), left_struc
com = 0.0
for elem in left_struc:
if elem in right_struc:
com += 1
tot = max(len(left_struc), len(right_struc))
return com / tot
def split_deletions(stanzas):
'''Split a diff into modifications and deletions.
Return value is a 3-tuple of lists: the first is a list of
stanzas from ``stanzas`` that modify JSON objects, the second is
a list of stanzas that add or change elements in JSON arrays, and
the second is a list of stanzas which delete elements from
arrays.
'''
objs = [x for x in stanzas if isinstance(x[0][-1], Basestring)]
seqs = [x for x in stanzas if isinstance(x[0][-1], int)]
assert len(objs) + len(seqs) == len(stanzas), stanzas
seqs.sort(key=len)
lengths = [len(x) for x in seqs]
point = bisect.bisect_left(lengths, 2)
return (objs, seqs[point:], seqs[:point])
def sort_stanzas(stanzas):
'''Sort the stanzas in ``diff``.
Object changes can occur in any order, but deletions from arrays
have to happen last node first: ['foo', 'bar', 'baz'] -> ['foo',
'bar'] -> ['foo'] -> []; and additions to arrays have to happen
leftmost-node-first: [] -> ['foo'] -> ['foo', 'bar'] -> ['foo',
'bar', 'baz'].
Note that this will also sort changes to objects (dicts) so that
they occur first of all, then modifications/additions on
arrays, followed by deletions from arrays.
'''
if len(stanzas) == 1:
return stanzas
# First we divide the stanzas using split_deletions():
(objs, mods, dels) = split_deletions(stanzas)
# Then we sort modifications of lists in ascending order of last key:
mods.sort(key=lambda x: x[0][-1])
# And deletions from lists in descending order of last key:
dels.sort(key=lambda x: x[0][-1], reverse=True)
# And recombine:
return objs + mods + dels
| bsd-2-clause | 5,005,126,007,948,716,000 | 36.597143 | 85 | 0.600958 | false |
has2k1/plotnine | plotnine/scales/scale_linetype.py | 1 | 1325 | from warnings import warn
from mizani.palettes import manual_pal
from ..doctools import document
from ..exceptions import PlotnineError, PlotnineWarning
from ..utils import alias
from .scale import scale_discrete, scale_continuous
linetypes = ['solid', 'dashed', 'dashdot', 'dotted']
@document
class scale_linetype(scale_discrete):
"""
Scale for line patterns
Parameters
----------
{superclass_parameters}
Notes
-----
The available linetypes are
``'solid', 'dashed', 'dashdot', 'dotted'``
If you need more custom linetypes, use
:class:`~plotnine.scales.scale_linetype_manual`
"""
_aesthetics = ['linetype']
palette = staticmethod(manual_pal(linetypes))
@document
class scale_linetype_ordinal(scale_linetype):
"""
Scale for line patterns
Parameters
----------
{superclass_parameters}
"""
_aesthetics = ['linetype']
def __init__(self, **kwargs):
warn(
"Using linetype for an ordinal variable is not advised.",
PlotnineWarning
)
super().__init__(**kwargs)
class scale_linetype_continuous(scale_continuous):
def __init__(self):
raise PlotnineError(
"A continuous variable can not be mapped to linetype")
alias('scale_linetype_discrete', scale_linetype)
| gpl-2.0 | 4,671,581,716,747,070,000 | 21.457627 | 69 | 0.645283 | false |
dodonator/area51 | Kryptotests/OneTimePad/Beta/v0.1(D)OneTimePad.py | 1 | 12920 | #coding: utf-8
# Momentan in der Entwicklung befindliche Version!
import os
import random
import getpass
import time
import datetime
import sys
os.system('clear')
class oneTimePad(object):
def __init__(self):
NCF = noCryptoFunctions()
self.alphabet = NCF.alphabetGenerierung()
def steganoKrypto(self,klartext,geheimtext):
'''
Parameter:
klartext (String) -- Der Text, der mit dem Schlüssel verschlüsselt werden soll.
geheimtext (String) -- Der Text, der als Geheimtext erscheinen soll, und die Existenz
einer verschlüsselten Nachricht verschleiern soll.
Rückgabewert:
key (String) -- Liefert den Key zurück durch den man aus dem eingegebenen Klartext
den eingegebenen Geheimtext erstellen kann.
Diese Funktion hilft zu verschleiern, dass es einen verschlüsselten Text gibt. Allerdings
schränkt sie die Anzahl der rationalen Schlüssel ein und stellt daher ein potenzielles
Sicherheitsrisiko dar.
'''
klartextArray = list(klartext)
geheimtextArray = list(geheimtext)
numKlartextArray = []
numGeheimtextArray = []
key = ''
numKeyArray = []
if len(klartext) != len(geheimtext):
raise Exception('The code and the plain text must have the same length.')
for i in range(len(klartext)):
numKlartextArray.append(self.alphabet.index(klartextArray[i]))
numGeheimtextArray.append(self.alphabet.index(geheimtextArray[i]))
for i in range(len(klartext)):
tmpGStelle = numGeheimtextArray[i]
tmpKStelle = numKlartextArray[i]
if tmpGStelle < tmpKStelle:
tmpGStelle += len(self.alphabet)
numKeyArray.append(int((tmpGStelle-tmpKStelle)%len(self.alphabet)))
for i in range(len(klartext)):
key += str(self.alphabet[numKeyArray[i]])
return key
def keyGenerierung(self,laengeKlartext):
'''
Parameter:
laengeKlartext (Integer) -- Gibt die Länge des zu erstellenden Schlüssels an.
Rückgabewert:
result (Array) = [key (String),keyArray (Array), keyLaenge (Integer)]
key -- Beinhaltet den erzeugten Schlüssel als String.
keyArray -- Beinhaltet den erzeugten Schlüssel als Liste.
keyLaenge -- Beinhaltet die Länge des erzeugten Schlüssels als Integer.
'''
keyArray = []
key = ''
keyLaenge = 0
for i in range(laengeKlartext):
tmp = random.choice(self.alphabet)
keyArray.append(tmp)
key += tmp
keyLaenge = len(keyArray)
result = [key,keyArray,keyLaenge]
return result
def CodiererMitManuellerKeyEingabe(self,klartext,key):
'''
Parameter:
klartext (String) -- Text zum codieren
key (String) -- Schlüssel, der zum Codieren benötigt wird.
Rückgabewert:
geheimtext (String) -- Geheimtext der aus der Codierung ensteht
'''
laengeKlartext = len(klartext)
laengeKey = len(key)
keyArray = list(key)
klartextArray = list(klartext)
geheimtext = ''
if laengeKlartext != laengeKey:
raise Exception("Error! It's very important that the input and the key have the same length.")
for i in range(laengeKlartext): # Diese for-Schleife kuemmert sich um die Codierung
tmpKlartextIndex = self.alphabet.index(klartextArray[i])
tmpKeyIndex = self.alphabet.index(keyArray[i])
tmpG = self.alphabet[(tmpKlartextIndex + tmpKeyIndex) % len(self.alphabet)]
geheimtext += tmpG
return geheimtext
def encode(self,klartext): # Codiert den eingegebenen String
'''
Create a random One-Time-Pad and encode the input strings
'''
laengeKlartext = len(klartext)
keyFoo = self.keyGenerierung(laengeKlartext)
key = keyFoo[0]
keyArray = keyFoo[1]
klartextArray = list(klartext)
geheimtextArray = []
geheimtext = ''
for i in range(laengeKlartext): # Diese for-Schleife kuemmert sich um die Codierung
tmpKlartextIndex = self.alphabet.index(klartextArray[i])
tmpKeyIndex = self.alphabet.index(keyArray[i])
tmpG = self.alphabet[(tmpKlartextIndex + tmpKeyIndex) % len(self.alphabet)]
geheimtextArray.append(tmpG)
for element in geheimtextArray: # Diese for-Schleife wandelt den Array in einen String
geheimtext += element
return [geheimtext,key]
def decode(self,geheimtext,key):
laengeGeheimtext = len(geheimtext)
keyArray = list(key)
geheimArray = list(geheimtext)
klartextArray = []
klartext = ''
for i in range(laengeGeheimtext):
tmpGeheimtextIndex = self.alphabet.index(geheimArray[i])
tmpKeyIndex = self.alphabet.index(keyArray[i])
tmpDifferenz = tmpGeheimtextIndex - tmpKeyIndex
if tmpDifferenz >= 0:
klartextArray.append(self.alphabet[tmpDifferenz])
else:
tmpDifferenz = tmpGeheimtextIndex + len(self.alphabet) - tmpKeyIndex
klartextArray.append(self.alphabet[tmpDifferenz])
for element in klartextArray:
klartext += element
return klartext
def analyse(self,testString,rep):
OTP = oneTimePad()
trueCounter = 0
falseCounter = 0
for i in range(rep):
tmpEncode = OTP.encode(testString)
tmpGeheimtext = tmpEncode[0]
tmpKey = tmpEncode[1]
tmpDecode = OTP.decode(tmpGeheimtext,tmpKey)
if tmpDecode == testString:
trueCounter += 1
else:
falseCounter += 1
result = [trueCounter,falseCounter]
return result
class noCryptoFunctions(object):
def kompression(self,Liste):
resultListe = []
last = Liste[0]
for i in range(1,len(Liste)):
resultListe.append(Liste[i]-last)
last = Liste[i]
return [resultListe,Liste[0]]
def deKompression(self,compListe):
start = compListe[1]
resultListe = [start]
last = compListe[0][0] + start
resultListe.append(last)
Liste = compListe[0]
for i in range(1,len(Liste)):
resultListe.append(int(last + Liste[i]))
last = last + Liste[i]
return resultListe
def datumsStempel(self):
'''
Parameter:
--- Keine ---
Rückgabewert:
result (Array) = [stempel,counter]
stempel (String) -- Ein Datumsstempel mit '-' als Trennzeichen
counter (Integer) -- Ein Counter für die Minuten, der als ID fungiert.
'''
t = time.localtime()
year = t[0]
month = t[1]
day = t[2]
hour = t[3]
minute = t[4]
second = t[5]
counter = minute + hour*60
stempel = str(year) + '-' + str(month) + '-' + str(day)
return [stempel,counter]
def dateiZugriffSpeichern(self,filename,foldername,inhalt,typ):
'''
filename: 'filename'
inhalt: sagdajs
typ: 'g' für Geheimtext, 'k' für klartext oder 's' für Schlüssel
'''
currentDirectory = os.getcwd()
filename = currentDirectory + '/' + foldername + '/' + typ.upper() + '--' + filename + '.' + typ
command = 'sudo touch ' + filename
os.system(command)
f1 = open(filename,'w')
file1 = f1.write(inhalt)
f1.close()
return filename
def speicherRoutine(self,inhalt,typ):
currentDirectory = os.getcwd()
datumsStempel1 = self.datumsStempel()
foldername = str(datumsStempel1[0])
ID = datumsStempel1[1]
if foldername not in os.listdir(os.getcwd()):
os.system('mkdir ' + foldername)
else:
pass
IDfolder = 'mkdir ' + currentDirectory + '/' + str(foldername) + '/' + str(ID)
if IDfolder not in os.listdir(os.getcwd() + '/' + str(foldername)):
os.system(IDfolder)
else:
pass
foldername = str(foldername) + '/' + str(ID)
stempel = str(datumsStempel1[1])
filename = self.dateiZugriffSpeichern(stempel,foldername,inhalt,typ)
return ID
def dateiZugriffLaden(self,filename):
f1 = open(filename,'r')
result = f1.read()
f1.close()
return result
def LadeRoutine(self,ID,typ,year,month,day):
foldername = str(year) + '-' + str(month) + '-' + str(day) + '/' + str(ID)
currentDirectory = os.getcwd()
filename = currentDirectory + '/' + foldername + '/' + typ.upper() + '--' + ID + '.' + typ
result = self.dateiZugriffLaden(filename)
return result
def sonderzeichen(self):
sonderZ1 = range(32,65)
sonderZ2 = range(91,97)
sonderZ3 = range(123,126)
sonderZ = [sonderZ1,sonderZ2,sonderZ3]
sonderZeichenListe = []
for element in sonderZ:
for sonderZeichen in element:
sonderZeichenListe.append(str(chr(sonderZeichen)))
return sonderZeichenListe
def alphabetGenerierung(self):
alphabet = []
for i in range(26):
alphabet.append(chr(i+65))
for i in range(26):
alphabet.append(chr(i+97))
sonderZeichen = self.sonderzeichen()
for SZeichen in sonderZeichen:
alphabet.append(SZeichen)
return alphabet
def RandomDecodierer(): #Random Decode Auswertung
OTP = oneTimePad()
NCF = noCryptoFunctions()
alphabet = NCF.alphabetGenerierung()
rep = int(raw_input('Wiederholungszahl eingeben: \n'))
klartextLaenge = int(raw_input('Bitte die Länge des Teststrings eingeben: \n'))
trueCounter = 0
falseCounter = 0
for i in range(rep):
key = ''
geheimtext = ''
for i in range(klartextLaenge):
key += random.choice(alphabet)
for i in range(klartextLaenge):
geheimtext += random.choice(alphabet)
klartext = OTP.decode(geheimtext,key)
print geheimtext
YN = raw_input('Ergibt der zufällig erzeugte Klartext Sinn (Y/N): \n')
if YN == 'Y' or YN == 'y':
trueCounter += 1
elif YN == 'N' or YN == 'n' or YN == '':
falseCounter += 1
os.system('clear')
print 'Sinn: ' + str(trueCounter)
print 'Unsinn: ' + str(falseCounter)
def USE_keyGen(): # Generiert einen Schlüssel
OTP = oneTimePad()
keyLen = int(raw_input('Keylänge eingeben: \n'))
result = OTP.keyGenerierung(keyLen)[0]
print result
def USE_analyse(): # Analysiert die Richtigkeit und verlustfreie Codierung und Decodierung
OTP = oneTimePad()
klartext = raw_input('Klartext eingeben: \n')
rep = int(raw_input('Wiederholungszahl eingeben: \n'))
Analyse = OTP.analyse(klartext,rep)
print 'True: ' + str(Analyse[0])
print 'False: ' + str(Analyse[1])
def CodiererMitSpeicherFunktion(): #Encoding Save Results in Files
OTP = oneTimePad()
NCF = noCryptoFunctions()
klartext = raw_input('Klartext eingeben: \n')
tmpResult = OTP.encode(klartext)
geheimtext = tmpResult[0]
key = tmpResult[1]
IDg = NCF.speicherRoutine(geheimtext,'g')
IDk = NCF.speicherRoutine(key,'s')
os.system('clear')
if IDg == IDk:
print'True! ' + str(IDg)
else:
print 'False! ' + str(IDg) + ' != ' + str(IDk)
def DecodiererMitLadeFunktion(): # Decoding Parameter will be loaded
OTP = oneTimePad()
NCF = noCryptoFunctions()
ID = raw_input('ID: ')
year = raw_input('Jahr: ')
month = raw_input('Monat: ')
day = raw_input('Tag: ')
geheimtext = NCF.LadeRoutine(ID,'g',year,month,day)
key = NCF.LadeRoutine(ID,'s',year,month,day)
result = OTP.decode(geheimtext,key)
print result
def USE_decode():
OTP = oneTimePad()
geheimtext = raw_input('Geheimtext eingeben: \n')
key = raw_input('Key eingeben: \n')
tmpResult = OTP.decode(geheimtext,key)
print 'Klartext: '
print tmpResult
def USE_encode():
OTP = oneTimePad()
klartext = raw_input('Klartext eingeben: \n')
tmpResult = OTP.encode(klartext)
print (12+len(tmpResult[0]))*'#'
print 'Key: ' + tmpResult[1]
print (12+len(tmpResult[0]))*'#'
print 'Geheimtext: ' + tmpResult[0]
print (12+len(tmpResult[0]))*'#'
def USE_CodierungMitManuellerKeyEingabe():
OTP = oneTimePad()
klartext = raw_input('Klartext eingeben: \n')
key = raw_input('Schlüssel der Länge ' + str(len(klartext)) + ' eingeben: \n')
os.system('clear')
print (12 + len(klartext)) * '#'
print 'Klartext: ' + klartext
print 'Schlüssel: ' + key
print 'Geheimtext: ' + OTP.CodiererMitManuellerKeyEingabe(klartext,key)
print (12 + len(klartext)) * '#'
def USE_steganoKrypto():
OTP = oneTimePad()
klartext = raw_input('Klartext eingeben: ')
klartextLaenge = len(klartext)
text = 'Geheimtext der Länge ' + str(klartextLaenge) + ' eingeben: '
geheimtext = raw_input(text)
key = OTP.steganoKrypto(klartext,geheimtext)
print 'Schlüsselwort: ' + key
print 'Entschlüsselt: ' + OTP.decode(geheimtext,key)
def main():
modus = ''
while modus != 'q':
os.system('clear')
print 'Keygenerierung [0]'
print 'Codieren (Ergebenis wird abgespeichert) [1]'
print 'Decodieren (Parameter werden ausgelesen) [2]'
print 'Analyse der Funktionalität [3]'
print 'Decodierung mit zufälligen Parametern [4]'
print 'Codierung ohne Key Generierung [5]'
print 'Steganokryptographie [6]'
modus = raw_input(': ')
os.system('clear')
if modus == 'q':
nachricht = 'Auf Wiedersehen!'
abstand = int((columns - len(nachricht))/2)*' '
print 3*'\n'
print abstand + nachricht + abstand
print 3*'\n'
sys.exit()
if modus == '0':
USE_keyGen()
elif modus == '1':
CodiererMitSpeicherFunktion()
elif modus == '2':
DecodiererMitLadeFunktion()
elif modus == '3':
USE_analyse()
elif modus == '4':
RandomDecodierer()
elif modus == '5':
USE_CodierungMitManuellerKeyEingabe()
elif modus == '6':
USE_steganoKrypto()
warteAufEingabe = raw_input('Bitte zum Fortfahren "Enter" drücken')
columns = 112 # Die Anzahl Zeichen, die auf dem Terminal in eine Zeile passen.
main() | gpl-3.0 | 4,753,217,025,097,092,000 | 25.73029 | 98 | 0.686098 | false |
teonlamont/mne-python | mne/fixes.py | 2 | 51461 | """Compatibility fixes for older version of python, numpy and scipy
If you add content to this file, please give the version of the package
at which the fix is no longer needed.
# XXX : originally copied from scikit-learn
"""
# Authors: Emmanuelle Gouillart <[email protected]>
# Gael Varoquaux <[email protected]>
# Fabian Pedregosa <[email protected]>
# Lars Buitinck <[email protected]>
# License: BSD
from __future__ import division
import inspect
from distutils.version import LooseVersion
import warnings
import numpy as np
from scipy import linalg, __version__ as sp_version
from .externals.six import string_types, iteritems
###############################################################################
# Misc
# helpers to get function arguments
if hasattr(inspect, 'signature'): # py35
def _get_args(function, varargs=False):
params = inspect.signature(function).parameters
args = [key for key, param in params.items()
if param.kind not in (param.VAR_POSITIONAL, param.VAR_KEYWORD)]
if varargs:
varargs = [param.name for param in params.values()
if param.kind == param.VAR_POSITIONAL]
if len(varargs) == 0:
varargs = None
return args, varargs
else:
return args
else:
def _get_args(function, varargs=False):
out = inspect.getargspec(function) # args, varargs, keywords, defaults
if varargs:
return out[:2]
else:
return out[0]
def _safe_svd(A, **kwargs):
"""Wrapper to get around the SVD did not converge error of death"""
# Intel has a bug with their GESVD driver:
# https://software.intel.com/en-us/forums/intel-distribution-for-python/topic/628049 # noqa: E501
# For SciPy 0.18 and up, we can work around it by using
# lapack_driver='gesvd' instead.
if kwargs.get('overwrite_a', False):
raise ValueError('Cannot set overwrite_a=True with this function')
try:
return linalg.svd(A, **kwargs)
except np.linalg.LinAlgError as exp:
from .utils import warn
if 'lapack_driver' in _get_args(linalg.svd):
warn('SVD error (%s), attempting to use GESVD instead of GESDD'
% (exp,))
return linalg.svd(A, lapack_driver='gesvd', **kwargs)
else:
raise
###############################################################################
# Backporting nibabel's read_geometry
def _get_read_geometry():
"""Get the geometry reading function."""
try:
import nibabel as nib
has_nibabel = True
except ImportError:
has_nibabel = False
if has_nibabel and LooseVersion(nib.__version__) > LooseVersion('2.1.0'):
from nibabel.freesurfer import read_geometry
else:
read_geometry = _read_geometry
return read_geometry
def _read_geometry(filepath, read_metadata=False, read_stamp=False):
"""Backport from nibabel."""
from .surface import _fread3, _fread3_many
volume_info = dict()
TRIANGLE_MAGIC = 16777214
QUAD_MAGIC = 16777215
NEW_QUAD_MAGIC = 16777213
with open(filepath, "rb") as fobj:
magic = _fread3(fobj)
if magic in (QUAD_MAGIC, NEW_QUAD_MAGIC): # Quad file
nvert = _fread3(fobj)
nquad = _fread3(fobj)
(fmt, div) = (">i2", 100.) if magic == QUAD_MAGIC else (">f4", 1.)
coords = np.fromfile(fobj, fmt, nvert * 3).astype(np.float) / div
coords = coords.reshape(-1, 3)
quads = _fread3_many(fobj, nquad * 4)
quads = quads.reshape(nquad, 4)
#
# Face splitting follows
#
faces = np.zeros((2 * nquad, 3), dtype=np.int)
nface = 0
for quad in quads:
if (quad[0] % 2) == 0:
faces[nface] = quad[0], quad[1], quad[3]
nface += 1
faces[nface] = quad[2], quad[3], quad[1]
nface += 1
else:
faces[nface] = quad[0], quad[1], quad[2]
nface += 1
faces[nface] = quad[0], quad[2], quad[3]
nface += 1
elif magic == TRIANGLE_MAGIC: # Triangle file
create_stamp = fobj.readline().rstrip(b'\n').decode('utf-8')
fobj.readline()
vnum = np.fromfile(fobj, ">i4", 1)[0]
fnum = np.fromfile(fobj, ">i4", 1)[0]
coords = np.fromfile(fobj, ">f4", vnum * 3).reshape(vnum, 3)
faces = np.fromfile(fobj, ">i4", fnum * 3).reshape(fnum, 3)
if read_metadata:
volume_info = _read_volume_info(fobj)
else:
raise ValueError("File does not appear to be a Freesurfer surface")
coords = coords.astype(np.float) # XXX: due to mayavi bug on mac 32bits
ret = (coords, faces)
if read_metadata:
if len(volume_info) == 0:
warnings.warn('No volume information contained in the file')
ret += (volume_info,)
if read_stamp:
ret += (create_stamp,)
return ret
###############################################################################
# Backporting logsumexp from scipy which is imported from scipy.special (0.1.0.0)
# instead of scipy.misc
def _get_logsumexp():
try:
from scipy.special import logsumexp
except ImportError: # old SciPy
from scipy.misc import logsumexp
return logsumexp
###############################################################################
# Backporting scipy.signal.sosfilt (0.17) and sosfiltfilt (0.18)
def _sosfiltfilt(sos, x, axis=-1, padtype='odd', padlen=None):
"""copy of SciPy sosfiltfilt"""
sos, n_sections = _validate_sos(sos)
# `method` is "pad"...
ntaps = 2 * n_sections + 1
ntaps -= min((sos[:, 2] == 0).sum(), (sos[:, 5] == 0).sum())
edge, ext = _validate_pad(padtype, padlen, x, axis,
ntaps=ntaps)
# These steps follow the same form as filtfilt with modifications
zi = sosfilt_zi(sos) # shape (n_sections, 2) --> (n_sections, ..., 2, ...)
zi_shape = [1] * x.ndim
zi_shape[axis] = 2
zi.shape = [n_sections] + zi_shape
x_0 = axis_slice(ext, stop=1, axis=axis)
(y, zf) = sosfilt(sos, ext, axis=axis, zi=zi * x_0)
y_0 = axis_slice(y, start=-1, axis=axis)
(y, zf) = sosfilt(sos, axis_reverse(y, axis=axis), axis=axis, zi=zi * y_0)
y = axis_reverse(y, axis=axis)
if edge > 0:
y = axis_slice(y, start=edge, stop=-edge, axis=axis)
return y
def axis_slice(a, start=None, stop=None, step=None, axis=-1):
"""Take a slice along axis 'axis' from 'a'"""
a_slice = [slice(None)] * a.ndim
a_slice[axis] = slice(start, stop, step)
b = a[a_slice]
return b
def axis_reverse(a, axis=-1):
"""Reverse the 1-d slices of `a` along axis `axis`."""
return axis_slice(a, step=-1, axis=axis)
def _validate_pad(padtype, padlen, x, axis, ntaps):
"""Helper to validate padding for filtfilt"""
if padtype not in ['even', 'odd', 'constant', None]:
raise ValueError(("Unknown value '%s' given to padtype. padtype "
"must be 'even', 'odd', 'constant', or None.") %
padtype)
if padtype is None:
padlen = 0
if padlen is None:
# Original padding; preserved for backwards compatibility.
edge = ntaps * 3
else:
edge = padlen
# x's 'axis' dimension must be bigger than edge.
if x.shape[axis] <= edge:
raise ValueError("The length of the input vector x must be at least "
"padlen, which is %d." % edge)
if padtype is not None and edge > 0:
# Make an extension of length `edge` at each
# end of the input array.
if padtype == 'even':
ext = even_ext(x, edge, axis=axis)
elif padtype == 'odd':
ext = odd_ext(x, edge, axis=axis)
else:
ext = const_ext(x, edge, axis=axis)
else:
ext = x
return edge, ext
def _validate_sos(sos):
"""Helper to validate a SOS input"""
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
if not (sos[:, 3] == 1).all():
raise ValueError('sos[:, 3] should be all ones')
return sos, n_sections
def odd_ext(x, n, axis=-1):
"""Generate a new ndarray by making an odd extension of x along an axis."""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_end = axis_slice(x, start=0, stop=1, axis=axis)
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((2 * left_end - left_ext,
x,
2 * right_end - right_ext),
axis=axis)
return ext
def even_ext(x, n, axis=-1):
"""Create an ndarray that is an even extension of x along an axis."""
if n < 1:
return x
if n > x.shape[axis] - 1:
raise ValueError(("The extension length n (%d) is too big. " +
"It must not exceed x.shape[axis]-1, which is %d.")
% (n, x.shape[axis] - 1))
left_ext = axis_slice(x, start=n, stop=0, step=-1, axis=axis)
right_ext = axis_slice(x, start=-2, stop=-(n + 2), step=-1, axis=axis)
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def const_ext(x, n, axis=-1):
"""Create an ndarray that is a constant extension of x along an axis"""
if n < 1:
return x
left_end = axis_slice(x, start=0, stop=1, axis=axis)
ones_shape = [1] * x.ndim
ones_shape[axis] = n
ones = np.ones(ones_shape, dtype=x.dtype)
left_ext = ones * left_end
right_end = axis_slice(x, start=-1, axis=axis)
right_ext = ones * right_end
ext = np.concatenate((left_ext,
x,
right_ext),
axis=axis)
return ext
def sosfilt_zi(sos):
"""Compute an initial state `zi` for the sosfilt function"""
from scipy.signal import lfilter_zi
sos = np.asarray(sos)
if sos.ndim != 2 or sos.shape[1] != 6:
raise ValueError('sos must be shape (n_sections, 6)')
n_sections = sos.shape[0]
zi = np.empty((n_sections, 2))
scale = 1.0
for section in range(n_sections):
b = sos[section, :3]
a = sos[section, 3:]
zi[section] = scale * lfilter_zi(b, a)
# If H(z) = B(z)/A(z) is this section's transfer function, then
# b.sum()/a.sum() is H(1), the gain at omega=0. That's the steady
# state value of this section's step response.
scale *= b.sum() / a.sum()
return zi
def sosfilt(sos, x, axis=-1, zi=None):
"""Filter data along one dimension using cascaded second-order sections"""
from scipy.signal import lfilter
x = np.asarray(x)
sos = np.atleast_2d(sos)
if sos.ndim != 2:
raise ValueError('sos array must be 2D')
n_sections, m = sos.shape
if m != 6:
raise ValueError('sos array must be shape (n_sections, 6)')
use_zi = zi is not None
if use_zi:
zi = np.asarray(zi)
x_zi_shape = list(x.shape)
x_zi_shape[axis] = 2
x_zi_shape = tuple([n_sections] + x_zi_shape)
if zi.shape != x_zi_shape:
raise ValueError('Invalid zi shape. With axis=%r, an input with '
'shape %r, and an sos array with %d sections, zi '
'must have shape %r.' %
(axis, x.shape, n_sections, x_zi_shape))
zf = np.zeros_like(zi)
for section in range(n_sections):
if use_zi:
x, zf[section] = lfilter(sos[section, :3], sos[section, 3:],
x, axis, zi=zi[section])
else:
x = lfilter(sos[section, :3], sos[section, 3:], x, axis)
out = (x, zf) if use_zi else x
return out
def get_sosfiltfilt():
"""Helper to get sosfiltfilt from scipy"""
try:
from scipy.signal import sosfiltfilt
except ImportError:
sosfiltfilt = _sosfiltfilt
return sosfiltfilt
def minimum_phase(h):
"""Convert a linear-phase FIR filter to minimum phase.
Parameters
----------
h : array
Linear-phase FIR filter coefficients.
Returns
-------
h_minimum : array
The minimum-phase version of the filter, with length
``(length(h) + 1) // 2``.
"""
try:
from scipy.signal import minimum_phase
except Exception:
pass
else:
return minimum_phase(h)
from scipy.fftpack import fft, ifft
h = np.asarray(h)
if np.iscomplexobj(h):
raise ValueError('Complex filters not supported')
if h.ndim != 1 or h.size <= 2:
raise ValueError('h must be 1D and at least 2 samples long')
n_half = len(h) // 2
if not np.allclose(h[-n_half:][::-1], h[:n_half]):
warnings.warn('h does not appear to by symmetric, conversion may '
'fail', RuntimeWarning)
n_fft = 2 ** int(np.ceil(np.log2(2 * (len(h) - 1) / 0.01)))
# zero-pad; calculate the DFT
h_temp = np.abs(fft(h, n_fft))
# take 0.25*log(|H|**2) = 0.5*log(|H|)
h_temp += 1e-7 * h_temp[h_temp > 0].min() # don't let log blow up
np.log(h_temp, out=h_temp)
h_temp *= 0.5
# IDFT
h_temp = ifft(h_temp).real
# multiply pointwise by the homomorphic filter
# lmin[n] = 2u[n] - d[n]
win = np.zeros(n_fft)
win[0] = 1
stop = (len(h) + 1) // 2
win[1:stop] = 2
if len(h) % 2:
win[stop] = 1
h_temp *= win
h_temp = ifft(np.exp(fft(h_temp)))
h_minimum = h_temp.real
n_out = n_half + len(h) % 2
return h_minimum[:n_out]
###############################################################################
# scipy.special.sph_harm ()
def _sph_harm(order, degree, az, pol):
"""Evaluate point in specified multipolar moment.
When using, pay close attention to inputs. Spherical harmonic notation for
order/degree, and theta/phi are both reversed in original SSS work compared
to many other sources. See mathworld.wolfram.com/SphericalHarmonic.html for
more discussion.
Note that scipy has ``scipy.special.sph_harm``, but that function is
too slow on old versions (< 0.15) for heavy use.
Parameters
----------
order : int
Order of spherical harmonic. (Usually) corresponds to 'm'.
degree : int
Degree of spherical harmonic. (Usually) corresponds to 'l'.
az : float
Azimuthal (longitudinal) spherical coordinate [0, 2*pi]. 0 is aligned
with x-axis.
pol : float
Polar (or colatitudinal) spherical coordinate [0, pi]. 0 is aligned
with z-axis.
norm : bool
If True, include normalization factor.
Returns
-------
base : complex float
The spherical harmonic value.
"""
from scipy.special import lpmv
from .preprocessing.maxwell import _sph_harm_norm
# Error checks
if np.abs(order) > degree:
raise ValueError('Absolute value of order must be <= degree')
# Ensure that polar and azimuth angles are arrays
az = np.asarray(az)
pol = np.asarray(pol)
if (np.abs(az) > 2 * np.pi).any():
raise ValueError('Azimuth coords must lie in [-2*pi, 2*pi]')
if(pol < 0).any() or (pol > np.pi).any():
raise ValueError('Polar coords must lie in [0, pi]')
# This is the "seismology" convention on Wikipedia, w/o Condon-Shortley
sph = lpmv(order, degree, np.cos(pol)) * np.exp(1j * order * az)
sph *= _sph_harm_norm(order, degree)
return sph
def _get_sph_harm():
"""Helper to get a usable spherical harmonic function."""
if LooseVersion(sp_version) < LooseVersion('0.17.1'):
sph_harm = _sph_harm
else:
from scipy.special import sph_harm
return sph_harm
###############################################################################
# Scipy spectrogram (for mne.time_frequency.psd_welch) needed for scipy < 0.16
def _spectrogram(x, fs=1.0, window=('tukey',.25), nperseg=256, noverlap=None,
nfft=None, detrend='constant', return_onesided=True,
scaling='density', axis=-1, mode='psd'):
"""
Compute a spectrogram with consecutive Fourier transforms.
Spectrograms can be used as a way of visualizing the change of a
nonstationary signal's frequency content over time.
Parameters
----------
x : array_like
Time series of measurement values
fs : float, optional
Sampling frequency of the `x` time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to a Tukey window with shape parameter of 0.25.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 8``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the power spectral density ('density')
where `Pxx` has units of V**2/Hz and computing the power spectrum
('spectrum') where `Pxx` has units of V**2, if `x` is measured in V
and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the spectrogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are ['psd',
'complex', 'magnitude', 'angle', 'phase'].
Returns
-------
f : ndarray
Array of sample frequencies.
t : ndarray
Array of segment times.
Sxx : ndarray
Spectrogram of x. By default, the last axis of Sxx corresponds to the
segment times.
See Also
--------
periodogram: Simple, optionally modified periodogram
lombscargle: Lomb-Scargle periodogram for unevenly sampled data
welch: Power spectral density by Welch's method.
csd: Cross spectral density by Welch's method.
Notes
-----
An appropriate amount of overlap will depend on the choice of window
and on your requirements. In contrast to welch's method, where the entire
data stream is averaged over, one may wish to use a smaller overlap (or
perhaps none at all) when computing a spectrogram, to maintain some
statistical independence between individual segments.
.. versionadded:: 0.16.0
References
----------
.. [1] Oppenheim, Alan V., Ronald W. Schafer, John R. Buck "Discrete-Time
Signal Processing", Prentice Hall, 1999.
"""
# Less overlap than welch, so samples are more statisically independent
if noverlap is None:
noverlap = nperseg // 8
freqs, time, Pxy = _spectral_helper(x, x, fs, window, nperseg, noverlap,
nfft, detrend, return_onesided, scaling,
axis, mode=mode)
return freqs, time, Pxy
def _spectral_helper(x, y, fs=1.0, window='hann', nperseg=256,
noverlap=None, nfft=None, detrend='constant',
return_onesided=True, scaling='spectrum', axis=-1,
mode='psd'):
"""
Calculate various forms of windowed FFTs for PSD, CSD, etc.
This is a helper function that implements the commonality between the
psd, csd, and spectrogram functions. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Parameters
---------
x : array_like
Array or sequence containing the data to be analyzed.
y : array_like
Array or sequence containing the data to be analyzed. If this is
the same object in memoery as x (i.e. _spectral_helper(x, x, ...)),
the extra computations are spared.
fs : float, optional
Sampling frequency of the time series. Defaults to 1.0.
window : str or tuple or array_like, optional
Desired window to use. See `get_window` for a list of windows and
required parameters. If `window` is array_like it will be used
directly as the window and its length will be used for nperseg.
Defaults to 'hann'.
nperseg : int, optional
Length of each segment. Defaults to 256.
noverlap : int, optional
Number of points to overlap between segments. If None,
``noverlap = nperseg // 2``. Defaults to None.
nfft : int, optional
Length of the FFT used, if a zero padded FFT is desired. If None,
the FFT length is `nperseg`. Defaults to None.
detrend : str or function or False, optional
Specifies how to detrend each segment. If `detrend` is a string,
it is passed as the ``type`` argument to `detrend`. If it is a
function, it takes a segment and returns a detrended segment.
If `detrend` is False, no detrending is done. Defaults to 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data. If False return
a two-sided spectrum. Note that for complex data, a two-sided
spectrum is always returned.
scaling : { 'density', 'spectrum' }, optional
Selects between computing the cross spectral density ('density')
where `Pxy` has units of V**2/Hz and computing the cross spectrum
('spectrum') where `Pxy` has units of V**2, if `x` and `y` are
measured in V and fs is measured in Hz. Defaults to 'density'
axis : int, optional
Axis along which the periodogram is computed; the default is over
the last axis (i.e. ``axis=-1``).
mode : str, optional
Defines what kind of return values are expected. Options are ['psd',
'complex', 'magnitude', 'angle', 'phase'].
Returns
-------
freqs : ndarray
Array of sample frequencies.
t : ndarray
Array of times corresponding to each data segment
result : ndarray
Array of output data, contents dependent on *mode* kwarg.
References
----------
.. [1] Stack Overflow, "Rolling window for 1D arrays in Numpy?",
http://stackoverflow.com/a/6811241
.. [2] Stack Overflow, "Using strides for an efficient moving average
filter", http://stackoverflow.com/a/4947453
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
from scipy import fftpack
from scipy.signal import signaltools
from scipy.signal.windows import get_window
if mode not in ['psd', 'complex', 'magnitude', 'angle', 'phase']:
raise ValueError("Unknown value for mode %s, must be one of: "
"'default', 'psd', 'complex', "
"'magnitude', 'angle', 'phase'" % mode)
# If x and y are the same object we can save ourselves some computation.
same_data = y is x
if not same_data and mode != 'psd':
raise ValueError("x and y must be equal if mode is not 'psd'")
axis = int(axis)
# Ensure we have np.arrays, get outdtype
x = np.asarray(x)
if not same_data:
y = np.asarray(y)
outdtype = np.result_type(x,y,np.complex64)
else:
outdtype = np.result_type(x,np.complex64)
if not same_data:
# Check if we can broadcast the outer axes together
xouter = list(x.shape)
youter = list(y.shape)
xouter.pop(axis)
youter.pop(axis)
try:
outershape = np.broadcast(np.empty(xouter), np.empty(youter)).shape
except ValueError:
raise ValueError('x and y cannot be broadcast together.')
if same_data:
if x.size == 0:
return np.empty(x.shape), np.empty(x.shape), np.empty(x.shape)
else:
if x.size == 0 or y.size == 0:
outshape = outershape + (min([x.shape[axis], y.shape[axis]]),)
emptyout = np.rollaxis(np.empty(outshape), -1, axis)
return emptyout, emptyout, emptyout
if x.ndim > 1:
if axis != -1:
x = np.rollaxis(x, axis, len(x.shape))
if not same_data and y.ndim > 1:
y = np.rollaxis(y, axis, len(y.shape))
# Check if x and y are the same length, zero-pad if necessary
if not same_data:
if x.shape[-1] != y.shape[-1]:
if x.shape[-1] < y.shape[-1]:
pad_shape = list(x.shape)
pad_shape[-1] = y.shape[-1] - x.shape[-1]
x = np.concatenate((x, np.zeros(pad_shape)), -1)
else:
pad_shape = list(y.shape)
pad_shape[-1] = x.shape[-1] - y.shape[-1]
y = np.concatenate((y, np.zeros(pad_shape)), -1)
# X and Y are same length now, can test nperseg with either
if x.shape[-1] < nperseg:
warnings.warn('nperseg = {0:d}, is greater than input length = {1:d}, '
'using nperseg = {1:d}'.format(nperseg, x.shape[-1]))
nperseg = x.shape[-1]
nperseg = int(nperseg)
if nperseg < 1:
raise ValueError('nperseg must be a positive integer')
if nfft is None:
nfft = nperseg
elif nfft < nperseg:
raise ValueError('nfft must be greater than or equal to nperseg.')
else:
nfft = int(nfft)
if noverlap is None:
noverlap = nperseg//2
elif noverlap >= nperseg:
raise ValueError('noverlap must be less than nperseg.')
else:
noverlap = int(noverlap)
# Handle detrending and window functions
if not detrend:
def detrend_func(d):
return d
elif not hasattr(detrend, '__call__'):
def detrend_func(d):
return signaltools.detrend(d, type=detrend, axis=-1)
elif axis != -1:
# Wrap this function so that it receives a shape that it could
# reasonably expect to receive.
def detrend_func(d):
d = np.rollaxis(d, -1, axis)
d = detrend(d)
return np.rollaxis(d, axis, len(d.shape))
else:
detrend_func = detrend
if isinstance(window, string_types) or type(window) is tuple:
win = get_window(window, nperseg)
else:
win = np.asarray(window)
if len(win.shape) != 1:
raise ValueError('window must be 1-D')
if win.shape[0] != nperseg:
raise ValueError('window must have length of nperseg')
if np.result_type(win,np.complex64) != outdtype:
win = win.astype(outdtype)
if mode == 'psd':
if scaling == 'density':
scale = 1.0 / (fs * (win*win).sum())
elif scaling == 'spectrum':
scale = 1.0 / win.sum()**2
else:
raise ValueError('Unknown scaling: %r' % scaling)
else:
scale = 1
if return_onesided is True:
if np.iscomplexobj(x):
sides = 'twosided'
else:
sides = 'onesided'
if not same_data:
if np.iscomplexobj(y):
sides = 'twosided'
else:
sides = 'twosided'
if sides == 'twosided':
num_freqs = nfft
elif sides == 'onesided':
if nfft % 2:
num_freqs = (nfft + 1)//2
else:
num_freqs = nfft//2 + 1
# Perform the windowed FFTs
result = _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft)
result = result[..., :num_freqs]
freqs = fftpack.fftfreq(nfft, 1/fs)[:num_freqs]
if not same_data:
# All the same operations on the y data
result_y = _fft_helper(y, win, detrend_func, nperseg, noverlap, nfft)
result_y = result_y[..., :num_freqs]
result = np.conjugate(result) * result_y
elif mode == 'psd':
result = np.conjugate(result) * result
elif mode == 'magnitude':
result = np.absolute(result)
elif mode == 'angle' or mode == 'phase':
result = np.angle(result)
elif mode == 'complex':
pass
result *= scale
if sides == 'onesided':
if nfft % 2:
result[...,1:] *= 2
else:
# Last point is unpaired Nyquist freq point, don't double
result[...,1:-1] *= 2
t = np.arange(nperseg/2, x.shape[-1] - nperseg/2 + 1, nperseg - noverlap)/float(fs)
if sides != 'twosided' and not nfft % 2:
# get the last value correctly, it is negative otherwise
freqs[-1] *= -1
# we unwrap the phase here to handle the onesided vs. twosided case
if mode == 'phase':
result = np.unwrap(result, axis=-1)
result = result.astype(outdtype)
# All imaginary parts are zero anyways
if same_data and mode != 'complex':
result = result.real
# Output is going to have new last axis for window index
if axis != -1:
# Specify as positive axis index
if axis < 0:
axis = len(result.shape)-1-axis
# Roll frequency axis back to axis where the data came from
result = np.rollaxis(result, -1, axis)
else:
# Make sure window/time index is last axis
result = np.rollaxis(result, -1, -2)
return freqs, t, result
def _fft_helper(x, win, detrend_func, nperseg, noverlap, nfft):
"""
Calculate windowed FFT, for internal use by scipy.signal._spectral_helper
This is a helper function that does the main FFT calculation for
_spectral helper. All input valdiation is performed there, and the data
axis is assumed to be the last axis of x. It is not designed to be called
externally. The windows are not averaged over; the result from each window
is returned.
Returns
-------
result : ndarray
Array of FFT data
References
----------
.. [1] Stack Overflow, "Repeat NumPy array without replicating data?",
http://stackoverflow.com/a/5568169
Notes
-----
Adapted from matplotlib.mlab
.. versionadded:: 0.16.0
"""
from scipy import fftpack
# Created strided array of data segments
if nperseg == 1 and noverlap == 0:
result = x[..., np.newaxis]
else:
step = nperseg - noverlap
shape = x.shape[:-1]+((x.shape[-1]-noverlap)//step, nperseg)
strides = x.strides[:-1]+(step*x.strides[-1], x.strides[-1])
result = np.lib.stride_tricks.as_strided(x, shape=shape,
strides=strides)
# Detrend each data segment individually
result = detrend_func(result)
# Apply window by multiplication
result = win * result
# Perform the fft. Acts on last axis by default. Zero-pads automatically
result = fftpack.fft(result, n=nfft)
return result
def get_spectrogram():
'''helper function to get relevant spectrogram'''
from .utils import check_version
if check_version('scipy', '0.16.0'):
from scipy.signal import spectrogram
else:
spectrogram = _spectrogram
return spectrogram
###############################################################################
# Misc utilities
def assert_true(expr, msg='False is not True'):
"""Fake assert_true without message"""
if not expr:
raise AssertionError(msg)
def assert_is(expr1, expr2, msg=None):
"""Fake assert_is without message"""
assert_true(expr2 is expr2, msg)
def assert_is_not(expr1, expr2, msg=None):
"""Fake assert_is_not without message"""
assert_true(expr1 is not expr2, msg)
def _read_volume_info(fobj):
"""An implementation of nibabel.freesurfer.io._read_volume_info, since old
versions of nibabel (<=2.1.0) don't have it.
"""
volume_info = dict()
head = np.fromfile(fobj, '>i4', 1)
if not np.array_equal(head, [20]): # Read two bytes more
head = np.concatenate([head, np.fromfile(fobj, '>i4', 2)])
if not np.array_equal(head, [2, 0, 20]):
warnings.warn("Unknown extension code.")
return volume_info
volume_info['head'] = head
for key in ['valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
'zras', 'cras']:
pair = fobj.readline().decode('utf-8').split('=')
if pair[0].strip() != key or len(pair) != 2:
raise IOError('Error parsing volume info.')
if key in ('valid', 'filename'):
volume_info[key] = pair[1].strip()
elif key == 'volume':
volume_info[key] = np.array(pair[1].split()).astype(int)
else:
volume_info[key] = np.array(pair[1].split()).astype(float)
# Ignore the rest
return volume_info
def _serialize_volume_info(volume_info):
"""An implementation of nibabel.freesurfer.io._serialize_volume_info, since
old versions of nibabel (<=2.1.0) don't have it."""
keys = ['head', 'valid', 'filename', 'volume', 'voxelsize', 'xras', 'yras',
'zras', 'cras']
diff = set(volume_info.keys()).difference(keys)
if len(diff) > 0:
raise ValueError('Invalid volume info: %s.' % diff.pop())
strings = list()
for key in keys:
if key == 'head':
if not (np.array_equal(volume_info[key], [20]) or np.array_equal(
volume_info[key], [2, 0, 20])):
warnings.warn("Unknown extension code.")
strings.append(np.array(volume_info[key], dtype='>i4').tostring())
elif key in ('valid', 'filename'):
val = volume_info[key]
strings.append('{0} = {1}\n'.format(key, val).encode('utf-8'))
elif key == 'volume':
val = volume_info[key]
strings.append('{0} = {1} {2} {3}\n'.format(
key, val[0], val[1], val[2]).encode('utf-8'))
else:
val = volume_info[key]
strings.append('{0} = {1:0.10g} {2:0.10g} {3:0.10g}\n'.format(
key.ljust(6), val[0], val[1], val[2]).encode('utf-8'))
return b''.join(strings)
##############################################################################
# adapted from scikit-learn
def is_classifier(estimator):
"""Returns True if the given estimator is (probably) a classifier.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a classifier and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "classifier"
def is_regressor(estimator):
"""Returns True if the given estimator is (probably) a regressor.
Parameters
----------
estimator : object
Estimator object to test.
Returns
-------
out : bool
True if estimator is a regressor and False otherwise.
"""
return getattr(estimator, "_estimator_type", None) == "regressor"
class BaseEstimator(object):
"""Base class for all estimators in scikit-learn
Notes
-----
All estimators should specify all the parameters that can be set
at the class level in their ``__init__`` as explicit keyword
arguments (no ``*args`` or ``**kwargs``).
"""
@classmethod
def _get_param_names(cls):
"""Get parameter names for the estimator"""
try:
from inspect import signature
except ImportError:
from .externals.funcsigs import signature
# fetch the constructor or the original constructor before
# deprecation wrapping if any
init = getattr(cls.__init__, 'deprecated_original', cls.__init__)
if init is object.__init__:
# No explicit constructor to introspect
return []
# introspect the constructor arguments to find the model parameters
# to represent
init_signature = signature(init)
# Consider the constructor parameters excluding 'self'
parameters = [p for p in init_signature.parameters.values()
if p.name != 'self' and p.kind != p.VAR_KEYWORD]
for p in parameters:
if p.kind == p.VAR_POSITIONAL:
raise RuntimeError("scikit-learn estimators should always "
"specify their parameters in the signature"
" of their __init__ (no varargs)."
" %s with constructor %s doesn't "
" follow this convention."
% (cls, init_signature))
# Extract and sort argument names excluding 'self'
return sorted([p.name for p in parameters])
def get_params(self, deep=True):
"""Get parameters for this estimator.
Parameters
----------
deep : boolean, optional
If True, will return the parameters for this estimator and
contained subobjects that are estimators.
Returns
-------
params : mapping of string to any
Parameter names mapped to their values.
"""
out = dict()
for key in self._get_param_names():
# We need deprecation warnings to always be on in order to
# catch deprecated param values.
# This is set in utils/__init__.py but it gets overwritten
# when running under python3 somehow.
warnings.simplefilter("always", DeprecationWarning)
try:
with warnings.catch_warnings(record=True) as w:
value = getattr(self, key, None)
if len(w) and w[0].category == DeprecationWarning:
# if the parameter is deprecated, don't show it
continue
finally:
warnings.filters.pop(0)
# XXX: should we rather test if instance of estimator?
if deep and hasattr(value, 'get_params'):
deep_items = value.get_params().items()
out.update((key + '__' + k, val) for k, val in deep_items)
out[key] = value
return out
def set_params(self, **params):
"""Set the parameters of this estimator.
The method works on simple estimators as well as on nested objects
(such as pipelines). The latter have parameters of the form
``<component>__<parameter>`` so that it's possible to update each
component of a nested object.
Returns
-------
self
"""
if not params:
# Simple optimisation to gain speed (inspect is slow)
return self
valid_params = self.get_params(deep=True)
for key, value in iteritems(params):
split = key.split('__', 1)
if len(split) > 1:
# nested objects case
name, sub_name = split
if name not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(name, self))
sub_object = valid_params[name]
sub_object.set_params(**{sub_name: value})
else:
# simple objects case
if key not in valid_params:
raise ValueError('Invalid parameter %s for estimator %s. '
'Check the list of available parameters '
'with `estimator.get_params().keys()`.' %
(key, self.__class__.__name__))
setattr(self, key, value)
return self
def __repr__(self):
from sklearn.base import _pprint
class_name = self.__class__.__name__
return '%s(%s)' % (class_name, _pprint(self.get_params(deep=False),
offset=len(class_name),),)
# __getstate__ and __setstate__ are omitted because they only contain
# conditionals that are not satisfied by our objects (e.g.,
# ``if type(self).__module__.startswith('sklearn.')``.
###############################################################################
# Copied from sklearn to simplify code paths
def empirical_covariance(X, assume_centered=False):
"""Computes the Maximum likelihood covariance estimator
Parameters
----------
X : ndarray, shape (n_samples, n_features)
Data from which to compute the covariance estimate
assume_centered : Boolean
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False, data are centered before computation.
Returns
-------
covariance : 2D ndarray, shape (n_features, n_features)
Empirical covariance (Maximum Likelihood Estimator).
"""
X = np.asarray(X)
if X.ndim == 1:
X = np.reshape(X, (1, -1))
if X.shape[0] == 1:
warnings.warn("Only one sample available. "
"You may want to reshape your data array")
if assume_centered:
covariance = np.dot(X.T, X) / X.shape[0]
else:
covariance = np.cov(X.T, bias=1)
if covariance.ndim == 0:
covariance = np.array([[covariance]])
return covariance
class EmpiricalCovariance(BaseEstimator):
"""Maximum likelihood covariance estimator
Read more in the :ref:`User Guide <covariance>`.
Parameters
----------
store_precision : bool
Specifies if the estimated precision is stored.
assume_centered : bool
If True, data are not centered before computation.
Useful when working with data whose mean is almost, but not exactly
zero.
If False (default), data are centered before computation.
Attributes
----------
covariance_ : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix
precision_ : 2D ndarray, shape (n_features, n_features)
Estimated pseudo-inverse matrix.
(stored only if store_precision is True)
"""
def __init__(self, store_precision=True, assume_centered=False):
self.store_precision = store_precision
self.assume_centered = assume_centered
def _set_covariance(self, covariance):
"""Saves the covariance and precision estimates
Storage is done accordingly to `self.store_precision`.
Precision stored only if invertible.
Parameters
----------
covariance : 2D ndarray, shape (n_features, n_features)
Estimated covariance matrix to be stored, and from which precision
is computed.
"""
# covariance = check_array(covariance)
# set covariance
self.covariance_ = covariance
# set precision
if self.store_precision:
self.precision_ = linalg.pinvh(covariance)
else:
self.precision_ = None
def get_precision(self):
"""Getter for the precision matrix.
Returns
-------
precision_ : array-like,
The precision matrix associated to the current covariance object.
"""
if self.store_precision:
precision = self.precision_
else:
precision = linalg.pinvh(self.covariance_)
return precision
def fit(self, X, y=None):
"""Fits the Maximum Likelihood Estimator covariance model
according to the given training data and parameters.
Parameters
----------
X : array-like, shape = [n_samples, n_features]
Training data, where n_samples is the number of samples and
n_features is the number of features.
y : not used, present for API consistence purpose.
Returns
-------
self : object
Returns self.
"""
# X = check_array(X)
if self.assume_centered:
self.location_ = np.zeros(X.shape[1])
else:
self.location_ = X.mean(0)
covariance = empirical_covariance(
X, assume_centered=self.assume_centered)
self._set_covariance(covariance)
return self
def score(self, X_test, y=None):
"""Computes the log-likelihood of a Gaussian data set with
`self.covariance_` as an estimator of its covariance matrix.
Parameters
----------
X_test : array-like, shape = [n_samples, n_features]
Test data of which we compute the likelihood, where n_samples is
the number of samples and n_features is the number of features.
X_test is assumed to be drawn from the same distribution than
the data used in fit (including centering).
y : not used, present for API consistence purpose.
Returns
-------
res : float
The likelihood of the data set with `self.covariance_` as an
estimator of its covariance matrix.
"""
# compute empirical covariance of the test set
test_cov = empirical_covariance(
X_test - self.location_, assume_centered=True)
# compute log likelihood
res = log_likelihood(test_cov, self.get_precision())
return res
def error_norm(self, comp_cov, norm='frobenius', scaling=True,
squared=True):
"""Computes the Mean Squared Error between two covariance estimators.
(In the sense of the Frobenius norm).
Parameters
----------
comp_cov : array-like, shape = [n_features, n_features]
The covariance to compare with.
norm : str
The type of norm used to compute the error. Available error types:
- 'frobenius' (default): sqrt(tr(A^t.A))
- 'spectral': sqrt(max(eigenvalues(A^t.A))
where A is the error ``(comp_cov - self.covariance_)``.
scaling : bool
If True (default), the squared error norm is divided by n_features.
If False, the squared error norm is not rescaled.
squared : bool
Whether to compute the squared error norm or the error norm.
If True (default), the squared error norm is returned.
If False, the error norm is returned.
Returns
-------
The Mean Squared Error (in the sense of the Frobenius norm) between
`self` and `comp_cov` covariance estimators.
"""
# compute the error
error = comp_cov - self.covariance_
# compute the error norm
if norm == "frobenius":
squared_norm = np.sum(error ** 2)
elif norm == "spectral":
squared_norm = np.amax(linalg.svdvals(np.dot(error.T, error)))
else:
raise NotImplementedError(
"Only spectral and frobenius norms are implemented")
# optionally scale the error norm
if scaling:
squared_norm = squared_norm / error.shape[0]
# finally get either the squared norm or the norm
if squared:
result = squared_norm
else:
result = np.sqrt(squared_norm)
return result
def mahalanobis(self, observations):
"""Computes the squared Mahalanobis distances of given observations.
Parameters
----------
observations : array-like, shape = [n_observations, n_features]
The observations, the Mahalanobis distances of the which we
compute. Observations are assumed to be drawn from the same
distribution than the data used in fit.
Returns
-------
mahalanobis_distance : array, shape = [n_observations,]
Squared Mahalanobis distances of the observations.
"""
precision = self.get_precision()
# compute mahalanobis distances
centered_obs = observations - self.location_
mahalanobis_dist = np.sum(
np.dot(centered_obs, precision) * centered_obs, 1)
return mahalanobis_dist
def log_likelihood(emp_cov, precision):
"""Computes the sample mean of the log_likelihood under a covariance model
computes the empirical expected log-likelihood (accounting for the
normalization terms and scaling), allowing for universal comparison (beyond
this software package)
Parameters
----------
emp_cov : 2D ndarray (n_features, n_features)
Maximum Likelihood Estimator of covariance
precision : 2D ndarray (n_features, n_features)
The precision matrix of the covariance model to be tested
Returns
-------
sample mean of the log-likelihood
"""
p = precision.shape[0]
log_likelihood_ = - np.sum(emp_cov * precision) + _logdet(precision)
log_likelihood_ -= p * np.log(2 * np.pi)
log_likelihood_ /= 2.
return log_likelihood_
# sklearn uses np.linalg for this, but ours is more robust to zero eigenvalues
def _logdet(A):
"""Compute the log det of a positive semidefinite matrix."""
vals = linalg.eigh(A)[0]
# avoid negative (numerical errors) or zero (semi-definite matrix) values
tol = vals.max() * vals.size * np.finfo(np.float64).eps
vals = np.where(vals > tol, vals, tol)
return np.sum(np.log(vals))
###############################################################################
# NumPy einsum backward compat (allow "optimize" arg and fix 1.14.0 bug)
# XXX eventually we should hand-tune our `einsum` calls given our array sizes!
_has_optimize = (LooseVersion(np.__version__) >= '1.12')
def einsum(*args, **kwargs):
if 'optimize' in kwargs:
if not _has_optimize:
kwargs.pop('optimize')
elif _has_optimize:
kwargs['optimize'] = False
return np.einsum(*args, **kwargs)
| bsd-3-clause | -6,431,414,644,471,259,000 | 34.417068 | 106 | 0.574707 | false |
cujam/Pi_face | camera.py | 1 | 6241 | #-*- coding:utf-8 -*-
import cv2
import time
from tool.ConnectMysql import *
from tool.faceapi import *
from tool.log import *
from tool.board import *
from tool.email import *
import threading
class Camera:
'''
摄像头模块以及人脸和人体识别
'''
def __init__(self):
self.mysql = Mysql()
self.logging = Log()
self.knife = Knife()
self.api = Api('', '')
self.screenfetch = './data/screenfetch/screenfetch.jpg'
self.log = './data/log/camera.log'
self.recording = './data/recording/recording.log'
self.temp = '/CAM_AI/data/temp/temp.pgm'
self.model_path = './data/cascades/haarcascade_frontalface_alt.xml'
self.frame = 0
def get_message(self, face_token):
'''
从数据库中获取信息
'''
name = self.mysql.search_name(face_token)
return name
def search_face(self, image):
'''
人脸识别
'''
clf = cv2.CascadeClassifier(self.model_path)
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
faces = clf.detectMultiScale(
gray,
scaleFactor=1.1,
minNeighbors=5,
minSize=(30,30),
flags=cv2.CASCADE_SCALE_IMAGE
)
for (x,y,w,h) in faces:
img = cv2.rectangle(image, (x,y), (x+w, y+h), (0, 255, 0), 2)
nowtime = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())
cv2.imwrite('./data/screenfetch/face/' + nowtime + '.jpg',img)
f = cv2.resize(gray[y:y+h, x:x+w],(200,200))
cv2.imwrite(self.temp, f)
result = self.api.face_search(self.temp,outerid ='face')
result = eval(result[1])
try:
if result["results"][0]["confidence"] >= 80:
face_token = result["results"][0]["face_token"]
name = self.get_message(face_token)[0]
msg = name + '进入'
self.logging.accessinfo(msg)
time.sleep(10)
raise
else:
cv2.putText(img, "Unknow", (x, y-10), cv2.FONT_HERSHEY_COMPLEX_SMALL, 1,(0,0,255),2)
cv2.imwrite('/CAM_AI/data/unknow/unknow.jpg', img)
try:
send = sendmail()
msg = '已发送警告邮件给{}'.format(send)
self.logging.accessinfo(msg)
except:
msg = '邮件发送失败'
self.logging.errorinfo(msg)
msg = '检测到陌生人'
self.logging.accessinfo(msg)
with open("./data/red/red.conf", 'w') as f:
f.write('1')
except:
msg = '出现异常,重试'
self.logging.errorinfo(msg)
raise
return image
def search_body(self, image):
'''
人体识别
'''
cv2.imwrite('./data/temp/body.jpg' ,image)
image_file = './data/temp/body.jpg'
result = self.api.HumanBodyDetect(image_file, 'gender,cloth_color')
nowtime = time.strftime("%Y_%m_%d_%H_%M_%S", time.localtime())
data = eval(result[1])
try:
confidence = data['humanbodies'][0]['confidence']
cv2.imwrite('./data/screenfetch/body/' + nowtime + '.jpg', image)
if confidence > 0:
data_dict = data['humanbodies'][0]['attributes']
gender = [data_dict['gender']['value'], data_dict['gender']['confidence']]
upper_body_cloth_color = data_dict['upper_body_cloth_color']
lower_body_cloth_color = data_dict['lower_body_cloth_color']
message = '检测到一个人出现,为{gender}的置信度为{confidence},上衣颜色为{upper},裤子颜色为{lower}'.format(\
gender=gender[0], confidence=gender[1], upper=upper_body_cloth_color, lower=lower_body_cloth_color)
self.logging.accessinfo(message)
return True
else:
self.logging.accessinfo("接受传感器反馈,但没有检测到人")
return False
except:
self.logging.errorinfo("进行人体检测时出现异常,重试")
raise
camera = Camera()
cap = cv2.VideoCapture(0)
k = Knife()
def led():
while True:
global signal
global red
signal = camera.knife.sensor()
if signal:
camera.knife.warning_led()
else:
camera.knife.offwarning_led()
with open('./data/red/red.conf') as f:
red = f.read()
if red == '1':
k.red()
camera.knife.offwarning_led()
else:
with open('./data/led/led.conf') as f:
led_status = f.read()
if led_status == '1':
k.show_time()
else:
k.show_temp()
camera.knife.working_led()
def Cam():
msg = '摄像头开启'
camera.logging.accessinfo(msg)
while True:
try:
if cap.isOpened():
ret ,frame = cap.read()
try:
if signal:
result = camera.search_body(frame)
if result:
camera.search_face(frame)
except RuntimeError:
continue
except IndexError:
continue
except KeyError:
msg = "服务器过载,重试.."
camera.logging.errorinfo(msg)
continue
else:
cap.release()
cv2.destroyAllWindows()
except:
cap.release()
cv2.destroyAllWindows()
raise
if __name__ == "__main__":
threads = []
t1 = threading.Thread(target=led)
threads.append(t1)
t2 = threading.Thread(target=Cam)
threads.append(t2)
for t in threads:
t.setDaemon(True)
t.start()
t.join()
| gpl-3.0 | 1,500,436,264,549,425,700 | 31.570652 | 123 | 0.482897 | false |
schleichdi2/OPENNFR-6.0-CORE | opennfr-openembedded-core/scripts/lib/devtool/runqemu.py | 1 | 3175 | # Development tool - runqemu command plugin
#
# Copyright (C) 2015 Intel Corporation
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License version 2 as
# published by the Free Software Foundation.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with this program; if not, write to the Free Software Foundation, Inc.,
# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
"""Devtool runqemu plugin"""
import os
import bb
import logging
import argparse
import glob
from devtool import exec_build_env_command, setup_tinfoil, DevtoolError
logger = logging.getLogger('devtool')
def runqemu(args, config, basepath, workspace):
"""Entry point for the devtool 'runqemu' subcommand"""
tinfoil = setup_tinfoil(config_only=True, basepath=basepath)
try:
machine = tinfoil.config_data.getVar('MACHINE')
bindir_native = tinfoil.config_data.getVar('STAGING_BINDIR_NATIVE')
finally:
tinfoil.shutdown()
if not glob.glob(os.path.join(bindir_native, 'qemu-system-*')):
raise DevtoolError('QEMU is not available within this SDK')
imagename = args.imagename
if not imagename:
sdk_targets = config.get('SDK', 'sdk_targets', '').split()
if sdk_targets:
imagename = sdk_targets[0]
if not imagename:
raise DevtoolError('Unable to determine image name to run, please specify one')
try:
# FIXME runqemu assumes that if OECORE_NATIVE_SYSROOT is set then it shouldn't
# run bitbake to find out the values of various environment variables, which
# isn't the case for the extensible SDK. Work around it for now.
newenv = dict(os.environ)
newenv.pop('OECORE_NATIVE_SYSROOT', '')
exec_build_env_command(config.init_path, basepath, 'runqemu %s %s %s' % (machine, imagename, " ".join(args.args)), watch=True, env=newenv)
except bb.process.ExecutionError as e:
# We've already seen the output since watch=True, so just ensure we return something to the user
return e.exitcode
return 0
def register_commands(subparsers, context):
"""Register devtool subcommands from this plugin"""
if context.fixed_setup:
parser_runqemu = subparsers.add_parser('runqemu', help='Run QEMU on the specified image',
description='Runs QEMU to boot the specified image',
group='testbuild', order=-20)
parser_runqemu.add_argument('imagename', help='Name of built image to boot within QEMU', nargs='?')
parser_runqemu.add_argument('args', help='Any remaining arguments are passed to the runqemu script (pass --help after imagename to see what these are)',
nargs=argparse.REMAINDER)
parser_runqemu.set_defaults(func=runqemu)
| gpl-2.0 | 652,830,116,797,236,500 | 43.097222 | 160 | 0.682835 | false |
Azure/azure-sdk-for-python | sdk/digitaltwins/azure-digitaltwins-core/azure/digitaltwins/core/_generated/aio/operations/_query_operations.py | 1 | 5829 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, Callable, Dict, Generic, Optional, TypeVar
import warnings
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from ... import models as _models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class QueryOperations:
"""QueryOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~azure.digitaltwins.core.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = _models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def query_twins(
self,
query_specification: "_models.QuerySpecification",
query_twins_options: Optional["_models.QueryTwinsOptions"] = None,
**kwargs
) -> "_models.QueryResult":
"""Executes a query that allows traversing relationships and filtering by property values.
Status codes:
* 200 OK
* 400 Bad Request
* BadRequest - The continuation token is invalid.
* SqlQueryError - The query contains some errors.
* 429 Too Many Requests
* QuotaReachedError - The maximum query rate limit has been reached.
:param query_specification: The query specification to execute.
:type query_specification: ~azure.digitaltwins.core.models.QuerySpecification
:param query_twins_options: Parameter group.
:type query_twins_options: ~azure.digitaltwins.core.models.QueryTwinsOptions
:keyword callable cls: A custom type or function that will be passed the direct response
:return: QueryResult, or the result of cls(response)
:rtype: ~azure.digitaltwins.core.models.QueryResult
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["_models.QueryResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
_traceparent = None
_tracestate = None
_max_items_per_page = None
if query_twins_options is not None:
_traceparent = query_twins_options.traceparent
_tracestate = query_twins_options.tracestate
_max_items_per_page = query_twins_options.max_items_per_page
api_version = "2020-10-31"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self.query_twins.metadata['url'] # type: ignore
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
if _traceparent is not None:
header_parameters['traceparent'] = self._serialize.header("traceparent", _traceparent, 'str')
if _tracestate is not None:
header_parameters['tracestate'] = self._serialize.header("tracestate", _tracestate, 'str')
if _max_items_per_page is not None:
header_parameters['max-items-per-page'] = self._serialize.header("max_items_per_page", _max_items_per_page, 'int')
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(query_specification, 'QuerySpecification')
body_content_kwargs['content'] = body_content
request = self._client.post(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
error = self._deserialize(_models.ErrorResponse, response)
raise HttpResponseError(response=response, model=error)
response_headers = {}
response_headers['query-charge']=self._deserialize('float', response.headers.get('query-charge'))
deserialized = self._deserialize('QueryResult', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, response_headers)
return deserialized
query_twins.metadata = {'url': '/query'} # type: ignore
| mit | -1,297,216,339,586,366,700 | 45.261905 | 133 | 0.660834 | false |
ah-anssi/SecuML | SecuML/experiments/ActiveLearning/UpdateModelExp.py | 1 | 2723 | # SecuML
# Copyright (C) 2017 ANSSI
#
# SecuML is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# SecuML is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along
# with SecuML. If not, see <http://www.gnu.org/licenses/>.
import json
import os.path as path
from SecuML.core.ActiveLearning.UpdateModel import UpdateModel
from SecuML.experiments.Classification.ClassificationExperiment import ClassificationExperiment
from SecuML.experiments.Classification.RunClassifier import RunClassifier
class UpdateModelExp(UpdateModel):
def __init__(self, iteration):
UpdateModel.__init__(self, iteration)
self.experiment = self.iteration.experiment
def run(self):
models_conf = self.iteration.conf.models_conf
self.models_exp = {}
for k, conf in models_conf.items():
self.models_exp[k] = self.runModel(k, conf)
self.exportModelsExperiments()
def exportModelsExperiments(self):
export_models = {}
for k, exp in self.models_exp.items():
export_models[k] = exp.experiment_id
output_file = path.join(self.iteration.iteration_dir,
'models_experiments.json')
with open(output_file, 'w') as f:
json.dump(export_models, f, indent=2)
def runModel(self, kind, conf):
self.setDatasets(conf)
# Create the experiment
exp = self.experiment
name = 'AL' + str(exp.experiment_id) + '-Iter'
name += str(self.iteration.iteration_number) + '-' + kind
model_exp = ClassificationExperiment(exp.project, exp.dataset, exp.session,
experiment_name=name,
parent=exp.experiment_id)
model_exp.setConf(conf, exp.features_filename,
annotations_id=exp.annotations_id)
model_exp.export()
# Build the model
model = conf.model_class(model_exp.conf, cv_monitoring=True)
model_run = RunClassifier(model, self.datasets, model_exp)
model_run.run()
self.models[kind] = model
# Execution time monitoring
time = model.training_execution_time + model.testing_execution_time
self.times[kind] = time
return model_exp
| gpl-2.0 | -4,505,530,996,631,867,400 | 36.819444 | 95 | 0.652589 | false |
maremaremare/elisa_v | elisa_v/content/migrations/0001_initial.py | 1 | 2764 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'Page'
db.create_table(u'content_page', (
(u'id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('parent', self.gf('mptt.fields.TreeForeignKey')(blank=True, related_name='children', null=True, to=orm['content.Page'])),
('title', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('subtitle', self.gf('django.db.models.fields.CharField')(unique=True, max_length=255)),
('menu_name', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('slug', self.gf('django.db.models.fields.CharField')(unique=True, max_length=50)),
('text', self.gf('django.db.models.fields.TextField')()),
(u'lft', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
(u'rght', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
(u'tree_id', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
(u'level', self.gf('django.db.models.fields.PositiveIntegerField')(db_index=True)),
))
db.send_create_signal(u'content', ['Page'])
def backwards(self, orm):
# Deleting model 'Page'
db.delete_table(u'content_page')
models = {
u'content.page': {
'Meta': {'object_name': 'Page'},
u'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
u'level': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
u'lft': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'menu_name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'parent': ('mptt.fields.TreeForeignKey', [], {'blank': 'True', 'related_name': "'children'", 'null': 'True', 'to': u"orm['content.Page']"}),
u'rght': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'}),
'slug': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '50'}),
'subtitle': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
'text': ('django.db.models.fields.TextField', [], {}),
'title': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '255'}),
u'tree_id': ('django.db.models.fields.PositiveIntegerField', [], {'db_index': 'True'})
}
}
complete_apps = ['content'] | mit | 3,291,213,521,396,249,000 | 54.3 | 152 | 0.58864 | false |
smainand/scapy | scapy/layers/smb.py | 1 | 18582 | # This file is part of Scapy
# See http://www.secdev.org/projects/scapy for more information
# Copyright (C) Philippe Biondi <[email protected]>
# This program is published under a GPLv2 license
"""
SMB (Server Message Block), also known as CIFS.
"""
from scapy.packet import *
from scapy.fields import *
from scapy.layers.netbios import NBTSession
# SMB NetLogon Response Header
class SMBNetlogon_Protocol_Response_Header(Packet):
name = "SMBNetlogon Protocol Response Header"
fields_desc = [StrFixedLenField("Start", b"\xffSMB", 4),
ByteEnumField("Command", 0x25, {0x25: "Trans"}),
ByteField("Error_Class", 0x02),
ByteField("Reserved", 0),
LEShortField("Error_code", 4),
ByteField("Flags", 0),
LEShortField("Flags2", 0x0000),
LEShortField("PIDHigh", 0x0000),
LELongField("Signature", 0x0),
LEShortField("Unused", 0x0),
LEShortField("TID", 0),
LEShortField("PID", 0),
LEShortField("UID", 0),
LEShortField("MID", 0),
ByteField("WordCount", 17),
LEShortField("TotalParamCount", 0),
LEShortField("TotalDataCount", 112),
LEShortField("MaxParamCount", 0),
LEShortField("MaxDataCount", 0),
ByteField("MaxSetupCount", 0),
ByteField("unused2", 0),
LEShortField("Flags3", 0),
ByteField("TimeOut1", 0xe8),
ByteField("TimeOut2", 0x03),
LEShortField("unused3", 0),
LEShortField("unused4", 0),
LEShortField("ParamCount2", 0),
LEShortField("ParamOffset", 0),
LEShortField("DataCount", 112),
LEShortField("DataOffset", 92),
ByteField("SetupCount", 3),
ByteField("unused5", 0)]
# SMB MailSlot Protocol
class SMBMailSlot(Packet):
name = "SMB Mail Slot Protocol"
fields_desc = [LEShortField("opcode", 1),
LEShortField("priority", 1),
LEShortField("class", 2),
LEShortField("size", 135),
StrNullField("name", "\\MAILSLOT\\NET\\GETDC660")]
# SMB NetLogon Protocol Response Tail SAM
class SMBNetlogon_Protocol_Response_Tail_SAM(Packet):
name = "SMB Netlogon Protocol Response Tail SAM"
fields_desc = [ByteEnumField("Command", 0x17, {0x12: "SAM logon request", 0x17: "SAM Active directory Response"}), # noqa: E501
ByteField("unused", 0),
ShortField("Data1", 0),
ShortField("Data2", 0xfd01),
ShortField("Data3", 0),
ShortField("Data4", 0xacde),
ShortField("Data5", 0x0fe5),
ShortField("Data6", 0xd10a),
ShortField("Data7", 0x374c),
ShortField("Data8", 0x83e2),
ShortField("Data9", 0x7dd9),
ShortField("Data10", 0x3a16),
ShortField("Data11", 0x73ff),
ByteField("Data12", 0x04),
StrFixedLenField("Data13", "rmff", 4),
ByteField("Data14", 0x0),
ShortField("Data16", 0xc018),
ByteField("Data18", 0x0a),
StrFixedLenField("Data20", "rmff-win2k", 10),
ByteField("Data21", 0xc0),
ShortField("Data22", 0x18c0),
ShortField("Data23", 0x180a),
StrFixedLenField("Data24", "RMFF-WIN2K", 10),
ShortField("Data25", 0),
ByteField("Data26", 0x17),
StrFixedLenField("Data27", "Default-First-Site-Name", 23),
ShortField("Data28", 0x00c0),
ShortField("Data29", 0x3c10),
ShortField("Data30", 0x00c0),
ShortField("Data31", 0x0200),
ShortField("Data32", 0x0),
ShortField("Data33", 0xac14),
ShortField("Data34", 0x0064),
ShortField("Data35", 0x0),
ShortField("Data36", 0x0),
ShortField("Data37", 0x0),
ShortField("Data38", 0x0),
ShortField("Data39", 0x0d00),
ShortField("Data40", 0x0),
ShortField("Data41", 0xffff)]
# SMB NetLogon Protocol Response Tail LM2.0
class SMBNetlogon_Protocol_Response_Tail_LM20(Packet):
name = "SMB Netlogon Protocol Response Tail LM20"
fields_desc = [ByteEnumField("Command", 0x06, {0x06: "LM 2.0 Response to logon request"}), # noqa: E501
ByteField("unused", 0),
StrFixedLenField("DblSlash", "\\\\", 2),
StrNullField("ServerName", "WIN"),
LEShortField("LM20Token", 0xffff)]
# SMBNegociate Protocol Request Header
class SMBNegociate_Protocol_Request_Header(Packet):
name = "SMBNegociate Protocol Request Header"
fields_desc = [StrFixedLenField("Start", b"\xffSMB", 4),
ByteEnumField("Command", 0x72, {0x72: "SMB_COM_NEGOTIATE"}),
ByteField("Error_Class", 0),
ByteField("Reserved", 0),
LEShortField("Error_code", 0),
ByteField("Flags", 0x18),
LEShortField("Flags2", 0x0000),
LEShortField("PIDHigh", 0x0000),
LELongField("Signature", 0x0),
LEShortField("Unused", 0x0),
LEShortField("TID", 0),
LEShortField("PID", 1),
LEShortField("UID", 0),
LEShortField("MID", 2),
ByteField("WordCount", 0),
LEShortField("ByteCount", 12)]
# SMB Negotiate Protocol Request Tail
class SMBNegociate_Protocol_Request_Tail(Packet):
name = "SMB Negotiate Protocol Request Tail"
fields_desc = [ByteField("BufferFormat", 0x02),
StrNullField("BufferData", "NT LM 0.12")]
# SMBNegociate Protocol Response Advanced Security
class SMBNegociate_Protocol_Response_Advanced_Security(Packet):
name = "SMBNegociate Protocol Response Advanced Security"
fields_desc = [StrFixedLenField("Start", b"\xffSMB", 4),
ByteEnumField("Command", 0x72, {0x72: "SMB_COM_NEGOTIATE"}),
ByteField("Error_Class", 0),
ByteField("Reserved", 0),
LEShortField("Error_Code", 0),
ByteField("Flags", 0x98),
LEShortField("Flags2", 0x0000),
LEShortField("PIDHigh", 0x0000),
LELongField("Signature", 0x0),
LEShortField("Unused", 0x0),
LEShortField("TID", 0),
LEShortField("PID", 1),
LEShortField("UID", 0),
LEShortField("MID", 2),
ByteField("WordCount", 17),
LEShortField("DialectIndex", 7),
ByteField("SecurityMode", 0x03),
LEShortField("MaxMpxCount", 50),
LEShortField("MaxNumberVC", 1),
LEIntField("MaxBufferSize", 16144),
LEIntField("MaxRawSize", 65536),
LEIntField("SessionKey", 0x0000),
LEShortField("ServerCapabilities", 0xf3f9),
BitField("UnixExtensions", 0, 1),
BitField("Reserved2", 0, 7),
BitField("ExtendedSecurity", 1, 1),
BitField("CompBulk", 0, 2),
BitField("Reserved3", 0, 5),
# There have been 127490112000000000 tenths of micro-seconds between 1st january 1601 and 1st january 2005. 127490112000000000=0x1C4EF94D6228000, so ServerTimeHigh=0xD6228000 and ServerTimeLow=0x1C4EF94. # noqa: E501
LEIntField("ServerTimeHigh", 0xD6228000),
LEIntField("ServerTimeLow", 0x1C4EF94),
LEShortField("ServerTimeZone", 0x3c),
ByteField("EncryptionKeyLength", 0),
LEFieldLenField("ByteCount", None, "SecurityBlob", adjust=lambda pkt, x: x - 16), # noqa: E501
BitField("GUID", 0, 128),
StrLenField("SecurityBlob", "", length_from=lambda x: x.ByteCount + 16)] # noqa: E501
# SMBNegociate Protocol Response No Security
# When using no security, with EncryptionKeyLength=8, you must have an EncryptionKey before the DomainName # noqa: E501
class SMBNegociate_Protocol_Response_No_Security(Packet):
name = "SMBNegociate Protocol Response No Security"
fields_desc = [StrFixedLenField("Start", b"\xffSMB", 4),
ByteEnumField("Command", 0x72, {0x72: "SMB_COM_NEGOTIATE"}),
ByteField("Error_Class", 0),
ByteField("Reserved", 0),
LEShortField("Error_Code", 0),
ByteField("Flags", 0x98),
LEShortField("Flags2", 0x0000),
LEShortField("PIDHigh", 0x0000),
LELongField("Signature", 0x0),
LEShortField("Unused", 0x0),
LEShortField("TID", 0),
LEShortField("PID", 1),
LEShortField("UID", 0),
LEShortField("MID", 2),
ByteField("WordCount", 17),
LEShortField("DialectIndex", 7),
ByteField("SecurityMode", 0x03),
LEShortField("MaxMpxCount", 50),
LEShortField("MaxNumberVC", 1),
LEIntField("MaxBufferSize", 16144),
LEIntField("MaxRawSize", 65536),
LEIntField("SessionKey", 0x0000),
LEShortField("ServerCapabilities", 0xf3f9),
BitField("UnixExtensions", 0, 1),
BitField("Reserved2", 0, 7),
BitField("ExtendedSecurity", 0, 1),
FlagsField("CompBulk", 0, 2, "CB"),
BitField("Reserved3", 0, 5),
# There have been 127490112000000000 tenths of micro-seconds between 1st january 1601 and 1st january 2005. 127490112000000000=0x1C4EF94D6228000, so ServerTimeHigh=0xD6228000 and ServerTimeLow=0x1C4EF94. # noqa: E501
LEIntField("ServerTimeHigh", 0xD6228000),
LEIntField("ServerTimeLow", 0x1C4EF94),
LEShortField("ServerTimeZone", 0x3c),
ByteField("EncryptionKeyLength", 8),
LEShortField("ByteCount", 24),
BitField("EncryptionKey", 0, 64),
StrNullField("DomainName", "WORKGROUP"),
StrNullField("ServerName", "RMFF1")]
# SMBNegociate Protocol Response No Security No Key
class SMBNegociate_Protocol_Response_No_Security_No_Key(Packet):
namez = "SMBNegociate Protocol Response No Security No Key"
fields_desc = [StrFixedLenField("Start", b"\xffSMB", 4),
ByteEnumField("Command", 0x72, {0x72: "SMB_COM_NEGOTIATE"}),
ByteField("Error_Class", 0),
ByteField("Reserved", 0),
LEShortField("Error_Code", 0),
ByteField("Flags", 0x98),
LEShortField("Flags2", 0x0000),
LEShortField("PIDHigh", 0x0000),
LELongField("Signature", 0x0),
LEShortField("Unused", 0x0),
LEShortField("TID", 0),
LEShortField("PID", 1),
LEShortField("UID", 0),
LEShortField("MID", 2),
ByteField("WordCount", 17),
LEShortField("DialectIndex", 7),
ByteField("SecurityMode", 0x03),
LEShortField("MaxMpxCount", 50),
LEShortField("MaxNumberVC", 1),
LEIntField("MaxBufferSize", 16144),
LEIntField("MaxRawSize", 65536),
LEIntField("SessionKey", 0x0000),
LEShortField("ServerCapabilities", 0xf3f9),
BitField("UnixExtensions", 0, 1),
BitField("Reserved2", 0, 7),
BitField("ExtendedSecurity", 0, 1),
FlagsField("CompBulk", 0, 2, "CB"),
BitField("Reserved3", 0, 5),
# There have been 127490112000000000 tenths of micro-seconds between 1st january 1601 and 1st january 2005. 127490112000000000=0x1C4EF94D6228000, so ServerTimeHigh=0xD6228000 and ServerTimeLow=0x1C4EF94. # noqa: E501
LEIntField("ServerTimeHigh", 0xD6228000),
LEIntField("ServerTimeLow", 0x1C4EF94),
LEShortField("ServerTimeZone", 0x3c),
ByteField("EncryptionKeyLength", 0),
LEShortField("ByteCount", 16),
StrNullField("DomainName", "WORKGROUP"),
StrNullField("ServerName", "RMFF1")]
# Session Setup AndX Request
class SMBSession_Setup_AndX_Request(Packet):
name = "Session Setup AndX Request"
fields_desc = [StrFixedLenField("Start", b"\xffSMB", 4),
ByteEnumField("Command", 0x73, {0x73: "SMB_COM_SESSION_SETUP_ANDX"}), # noqa: E501
ByteField("Error_Class", 0),
ByteField("Reserved", 0),
LEShortField("Error_Code", 0),
ByteField("Flags", 0x18),
LEShortField("Flags2", 0x0001),
LEShortField("PIDHigh", 0x0000),
LELongField("Signature", 0x0),
LEShortField("Unused", 0x0),
LEShortField("TID", 0),
LEShortField("PID", 1),
LEShortField("UID", 0),
LEShortField("MID", 2),
ByteField("WordCount", 13),
ByteEnumField("AndXCommand", 0x75, {0x75: "SMB_COM_TREE_CONNECT_ANDX"}), # noqa: E501
ByteField("Reserved2", 0),
LEShortField("AndXOffset", 96),
LEShortField("MaxBufferS", 2920),
LEShortField("MaxMPXCount", 50),
LEShortField("VCNumber", 0),
LEIntField("SessionKey", 0),
LEFieldLenField("ANSIPasswordLength", None, "ANSIPassword"),
LEShortField("UnicodePasswordLength", 0),
LEIntField("Reserved3", 0),
LEShortField("ServerCapabilities", 0x05),
BitField("UnixExtensions", 0, 1),
BitField("Reserved4", 0, 7),
BitField("ExtendedSecurity", 0, 1),
BitField("CompBulk", 0, 2),
BitField("Reserved5", 0, 5),
LEShortField("ByteCount", 35),
StrLenField("ANSIPassword", "Pass", length_from=lambda x: x.ANSIPasswordLength), # noqa: E501
StrNullField("Account", "GUEST"),
StrNullField("PrimaryDomain", ""),
StrNullField("NativeOS", "Windows 4.0"),
StrNullField("NativeLanManager", "Windows 4.0"),
ByteField("WordCount2", 4),
ByteEnumField("AndXCommand2", 0xFF, {0xFF: "SMB_COM_NONE"}),
ByteField("Reserved6", 0),
LEShortField("AndXOffset2", 0),
LEShortField("Flags3", 0x2),
LEShortField("PasswordLength", 0x1),
LEShortField("ByteCount2", 18),
ByteField("Password", 0),
StrNullField("Path", "\\\\WIN2K\\IPC$"),
StrNullField("Service", "IPC")]
# Session Setup AndX Response
class SMBSession_Setup_AndX_Response(Packet):
name = "Session Setup AndX Response"
fields_desc = [StrFixedLenField("Start", b"\xffSMB", 4),
ByteEnumField("Command", 0x73, {0x73: "SMB_COM_SESSION_SETUP_ANDX"}), # noqa: E501
ByteField("Error_Class", 0),
ByteField("Reserved", 0),
LEShortField("Error_Code", 0),
ByteField("Flags", 0x90),
LEShortField("Flags2", 0x1001),
LEShortField("PIDHigh", 0x0000),
LELongField("Signature", 0x0),
LEShortField("Unused", 0x0),
LEShortField("TID", 0),
LEShortField("PID", 1),
LEShortField("UID", 0),
LEShortField("MID", 2),
ByteField("WordCount", 3),
ByteEnumField("AndXCommand", 0x75, {0x75: "SMB_COM_TREE_CONNECT_ANDX"}), # noqa: E501
ByteField("Reserved2", 0),
LEShortField("AndXOffset", 66),
LEShortField("Action", 0),
LEShortField("ByteCount", 25),
StrNullField("NativeOS", "Windows 4.0"),
StrNullField("NativeLanManager", "Windows 4.0"),
StrNullField("PrimaryDomain", ""),
ByteField("WordCount2", 3),
ByteEnumField("AndXCommand2", 0xFF, {0xFF: "SMB_COM_NONE"}),
ByteField("Reserved3", 0),
LEShortField("AndXOffset2", 80),
LEShortField("OptionalSupport", 0x01),
LEShortField("ByteCount2", 5),
StrNullField("Service", "IPC"),
StrNullField("NativeFileSystem", "")]
bind_layers(NBTSession, SMBNegociate_Protocol_Request_Header, )
bind_layers(NBTSession, SMBNegociate_Protocol_Response_Advanced_Security, ExtendedSecurity=1) # noqa: E501
bind_layers(NBTSession, SMBNegociate_Protocol_Response_No_Security, ExtendedSecurity=0, EncryptionKeyLength=8) # noqa: E501
bind_layers(NBTSession, SMBNegociate_Protocol_Response_No_Security_No_Key, ExtendedSecurity=0, EncryptionKeyLength=0) # noqa: E501
bind_layers(NBTSession, SMBSession_Setup_AndX_Request, )
bind_layers(NBTSession, SMBSession_Setup_AndX_Response, )
bind_layers(SMBNegociate_Protocol_Request_Header, SMBNegociate_Protocol_Request_Tail, ) # noqa: E501
bind_layers(SMBNegociate_Protocol_Request_Tail, SMBNegociate_Protocol_Request_Tail, ) # noqa: E501
| gpl-2.0 | -3,241,717,524,491,622,000 | 48.552 | 236 | 0.528684 | false |
DinoTools/python-overpy | overpy/__init__.py | 1 | 55210 | from collections import OrderedDict
from datetime import datetime
from decimal import Decimal
from urllib.request import urlopen
from urllib.error import HTTPError
from xml.sax import handler, make_parser
import xml.etree.ElementTree
import json
import re
import time
from typing import Any, Callable, ClassVar, Dict, List, NoReturn, Optional, Tuple, Type, TypeVar, Union
from overpy import exception
# Ignore flake8 F401 warning for unused vars
from overpy.__about__ import ( # noqa: F401
__author__, __copyright__, __email__, __license__, __summary__, __title__,
__uri__, __version__
)
ElementTypeVar = TypeVar("ElementTypeVar", bound="Element")
XML_PARSER_DOM = 1
XML_PARSER_SAX = 2
# Try to convert some common attributes
# http://wiki.openstreetmap.org/wiki/Elements#Common_attributes
GLOBAL_ATTRIBUTE_MODIFIERS: Dict[str, Callable] = {
"changeset": int,
"timestamp": lambda ts: datetime.strptime(ts, "%Y-%m-%dT%H:%M:%SZ"),
"uid": int,
"version": int,
"visible": lambda v: v.lower() == "true"
}
def is_valid_type(
element: Union["Area", "Node", "Relation", "Way"],
cls: Type[Union["Area", "Element", "Node", "Relation", "Way"]]) -> bool:
"""
Test if an element is of a given type.
:param element: The element instance to test
:param cls: The element class to test
:return: False or True
"""
return isinstance(element, cls) and element.id is not None
class Overpass:
"""
Class to access the Overpass API
:cvar default_max_retry_count: Global max number of retries (Default: 0)
:cvar default_read_chunk_size: Max size of each chunk read from the server response
:cvar default_retry_timeout: Global time to wait between tries (Default: 1.0s)
:cvar default_url: Default URL of the Overpass server
"""
default_max_retry_count: ClassVar[int] = 0
default_read_chunk_size: ClassVar[int] = 4096
default_retry_timeout: ClassVar[float] = 1.0
default_url: ClassVar[str] = "http://overpass-api.de/api/interpreter"
def __init__(
self,
read_chunk_size: Optional[int] = None,
url: Optional[str] = None,
xml_parser: int = XML_PARSER_SAX,
max_retry_count: int = None,
retry_timeout: float = None):
"""
:param read_chunk_size: Max size of each chunk read from the server response
:param url: Optional URL of the Overpass server. Defaults to http://overpass-api.de/api/interpreter
:param xml_parser: The xml parser to use
:param max_retry_count: Max number of retries (Default: default_max_retry_count)
:param retry_timeout: Time to wait between tries (Default: default_retry_timeout)
"""
self.url = self.default_url
if url is not None:
self.url = url
self._regex_extract_error_msg = re.compile(br"\<p\>(?P<msg>\<strong\s.*?)\</p\>")
self._regex_remove_tag = re.compile(b"<[^>]*?>")
if read_chunk_size is None:
read_chunk_size = self.default_read_chunk_size
self.read_chunk_size = read_chunk_size
if max_retry_count is None:
max_retry_count = self.default_max_retry_count
self.max_retry_count = max_retry_count
if retry_timeout is None:
retry_timeout = self.default_retry_timeout
self.retry_timeout = retry_timeout
self.xml_parser = xml_parser
@staticmethod
def _handle_remark_msg(msg: str) -> NoReturn:
"""
Try to parse the message provided with the remark tag or element.
:param msg: The message
:raises overpy.exception.OverpassRuntimeError: If message starts with 'runtime error:'
:raises overpy.exception.OverpassRuntimeRemark: If message starts with 'runtime remark:'
:raises overpy.exception.OverpassUnknownError: If we are unable to identify the error
"""
msg = msg.strip()
if msg.startswith("runtime error:"):
raise exception.OverpassRuntimeError(msg=msg)
elif msg.startswith("runtime remark:"):
raise exception.OverpassRuntimeRemark(msg=msg)
raise exception.OverpassUnknownError(msg=msg)
def query(self, query: Union[bytes, str]) -> "Result":
"""
Query the Overpass API
:param query: The query string in Overpass QL
:return: The parsed result
"""
if not isinstance(query, bytes):
query = query.encode("utf-8")
retry_num: int = 0
retry_exceptions: List[exception.OverPyException] = []
do_retry: bool = True if self.max_retry_count > 0 else False
while retry_num <= self.max_retry_count:
if retry_num > 0:
time.sleep(self.retry_timeout)
retry_num += 1
try:
f = urlopen(self.url, query)
except HTTPError as e:
f = e
response = f.read(self.read_chunk_size)
while True:
data = f.read(self.read_chunk_size)
if len(data) == 0:
break
response = response + data
f.close()
current_exception: exception.OverPyException
if f.code == 200:
content_type = f.getheader("Content-Type")
if content_type == "application/json":
return self.parse_json(response)
if content_type == "application/osm3s+xml":
return self.parse_xml(response)
current_exception = exception.OverpassUnknownContentType(content_type)
if not do_retry:
raise current_exception
retry_exceptions.append(current_exception)
continue
if f.code == 400:
msgs: List[str] = []
for msg_raw in self._regex_extract_error_msg.finditer(response):
msg_clean_bytes = self._regex_remove_tag.sub(b"", msg_raw.group("msg"))
try:
msg = msg_clean_bytes.decode("utf-8")
except UnicodeDecodeError:
msg = repr(msg_clean_bytes)
msgs.append(msg)
current_exception = exception.OverpassBadRequest(
query,
msgs=msgs
)
if not do_retry:
raise current_exception
retry_exceptions.append(current_exception)
continue
if f.code == 429:
current_exception = exception.OverpassTooManyRequests()
if not do_retry:
raise current_exception
retry_exceptions.append(current_exception)
continue
if f.code == 504:
current_exception = exception.OverpassGatewayTimeout()
if not do_retry:
raise current_exception
retry_exceptions.append(current_exception)
continue
current_exception = exception.OverpassUnknownHTTPStatusCode(f.code)
if not do_retry:
raise current_exception
retry_exceptions.append(current_exception)
continue
raise exception.MaxRetriesReached(retry_count=retry_num, exceptions=retry_exceptions)
def parse_json(self, data: Union[bytes, str], encoding: str = "utf-8") -> "Result":
"""
Parse raw response from Overpass service.
:param data: Raw JSON Data
:param encoding: Encoding to decode byte string
:return: Result object
"""
if isinstance(data, bytes):
data = data.decode(encoding)
data_parsed: dict = json.loads(data, parse_float=Decimal)
if "remark" in data_parsed:
self._handle_remark_msg(msg=data_parsed.get("remark"))
return Result.from_json(data_parsed, api=self)
def parse_xml(self, data: Union[bytes, str], encoding: str = "utf-8", parser: Optional[int] = None):
"""
:param data: Raw XML Data
:param encoding: Encoding to decode byte string
:param parser: The XML parser to use
:return: Result object
"""
if parser is None:
parser = self.xml_parser
if isinstance(data, bytes):
data = data.decode(encoding)
m = re.compile("<remark>(?P<msg>[^<>]*)</remark>").search(data)
if m:
self._handle_remark_msg(m.group("msg"))
return Result.from_xml(data, api=self, parser=parser)
class Result:
"""
Class to handle the result.
"""
def __init__(
self,
elements: Optional[List[Union["Area", "Node", "Relation", "Way"]]] = None,
api: Optional[Overpass] = None):
"""
:param elements: List of elements to initialize the result with
:param api: The API object to load additional resources and elements
"""
if elements is None:
elements = []
self._areas: Dict[int, Union["Area", "Node", "Relation", "Way"]] = OrderedDict(
(element.id, element) for element in elements if is_valid_type(element, Area)
)
self._nodes = OrderedDict(
(element.id, element) for element in elements if is_valid_type(element, Node)
)
self._ways = OrderedDict(
(element.id, element) for element in elements if is_valid_type(element, Way)
)
self._relations = OrderedDict(
(element.id, element) for element in elements if is_valid_type(element, Relation)
)
self._class_collection_map: Dict[Any, Any] = {
Node: self._nodes,
Way: self._ways,
Relation: self._relations,
Area: self._areas
}
self.api = api
def expand(self, other: "Result"):
"""
Add all elements from an other result to the list of elements of this result object.
It is used by the auto resolve feature.
:param other: Expand the result with the elements from this result.
:raises ValueError: If provided parameter is not instance of :class:`overpy.Result`
"""
if not isinstance(other, Result):
raise ValueError("Provided argument has to be instance of overpy:Result()")
other_collection_map: Dict[Type["Element"], List[Union["Area", "Node", "Relation", "Way"]]] = {
Area: other.areas,
Node: other.nodes,
Relation: other.relations,
Way: other.ways
}
for element_type, own_collection in self._class_collection_map.items():
for element in other_collection_map[element_type]:
if is_valid_type(element, element_type) and element.id not in own_collection:
own_collection[element.id] = element
def append(self, element: Union["Area", "Node", "Relation", "Way"]):
"""
Append a new element to the result.
:param element: The element to append
"""
if is_valid_type(element, Element):
self._class_collection_map[element.__class__].setdefault(element.id, element)
def get_elements(
self,
filter_cls: Type[ElementTypeVar],
elem_id: Optional[int] = None) -> List[ElementTypeVar]:
"""
Get a list of elements from the result and filter the element type by a class.
:param filter_cls:
:param elem_id: ID of the object
:return: List of available elements
"""
result: List[ElementTypeVar] = []
if elem_id is not None:
try:
result = [self._class_collection_map[filter_cls][elem_id]]
except KeyError:
result = []
else:
for e in self._class_collection_map[filter_cls].values():
result.append(e)
return result
def get_ids(
self,
filter_cls: Type[Union["Area", "Node", "Relation", "Way"]]) -> List[int]:
"""
Get all Element IDs
:param filter_cls: Only IDs of elements with this type
:return: List of IDs
"""
return list(self._class_collection_map[filter_cls].keys())
def get_node_ids(self) -> List[int]:
return self.get_ids(filter_cls=Node)
def get_way_ids(self) -> List[int]:
return self.get_ids(filter_cls=Way)
def get_relation_ids(self) -> List[int]:
return self.get_ids(filter_cls=Relation)
def get_area_ids(self) -> List[int]:
return self.get_ids(filter_cls=Area)
@classmethod
def from_json(cls, data: dict, api: Optional[Overpass] = None) -> "Result":
"""
Create a new instance and load data from json object.
:param data: JSON data returned by the Overpass API
:param api:
:return: New instance of Result object
"""
result = cls(api=api)
elem_cls: Type[Union["Area", "Node", "Relation", "Way"]]
for elem_cls in [Node, Way, Relation, Area]:
for element in data.get("elements", []):
e_type = element.get("type")
if hasattr(e_type, "lower") and e_type.lower() == elem_cls._type_value:
result.append(elem_cls.from_json(element, result=result))
return result
@classmethod
def from_xml(
cls,
data: Union[str, xml.etree.ElementTree.Element],
api: Optional[Overpass] = None,
parser: Optional[int] = None) -> "Result":
"""
Create a new instance and load data from xml data or object.
.. note::
If parser is set to None, the functions tries to find the best parse.
By default the SAX parser is chosen if a string is provided as data.
The parser is set to DOM if an xml.etree.ElementTree.Element is provided as data value.
:param data: Root element
:param api: The instance to query additional information if required.
:param parser: Specify the parser to use(DOM or SAX)(Default: None = autodetect, defaults to SAX)
:return: New instance of Result object
"""
if parser is None:
if isinstance(data, str):
parser = XML_PARSER_SAX
else:
parser = XML_PARSER_DOM
result = cls(api=api)
if parser == XML_PARSER_DOM:
import xml.etree.ElementTree as ET
if isinstance(data, str):
root = ET.fromstring(data)
elif isinstance(data, ET.Element):
root = data
else:
raise exception.OverPyException("Unable to detect data type.")
elem_cls: Type[Union["Area", "Node", "Relation", "Way"]]
for elem_cls in [Node, Way, Relation, Area]:
for child in root:
if child.tag.lower() == elem_cls._type_value:
result.append(elem_cls.from_xml(child, result=result))
elif parser == XML_PARSER_SAX:
from io import StringIO
if not isinstance(data, str):
raise ValueError("data must be of type str if using the SAX parser")
source = StringIO(data)
sax_handler = OSMSAXHandler(result)
sax_parser = make_parser()
sax_parser.setContentHandler(sax_handler)
sax_parser.parse(source)
else:
# ToDo: better exception
raise Exception("Unknown XML parser")
return result
def get_area(self, area_id: int, resolve_missing: bool = False) -> "Area":
"""
Get an area by its ID.
:param area_id: The area ID
:param resolve_missing: Query the Overpass API if the area is missing in the result set.
:return: The area
:raises overpy.exception.DataIncomplete: The requested way is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the area can't be resolved.
"""
areas = self.get_areas(area_id=area_id)
if len(areas) == 0:
if resolve_missing is False:
raise exception.DataIncomplete("Resolve missing area is disabled")
query = ("\n"
"[out:json];\n"
"area({area_id});\n"
"out body;\n"
)
query = query.format(
area_id=area_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
areas = self.get_areas(area_id=area_id)
if len(areas) == 0:
raise exception.DataIncomplete("Unable to resolve requested areas")
return areas[0]
def get_areas(self, area_id: Optional[int] = None) -> List["Area"]:
"""
Alias for get_elements() but filter the result by Area
:param area_id: The Id of the area
:return: List of elements
"""
return self.get_elements(Area, elem_id=area_id)
def get_node(self, node_id: int, resolve_missing: bool = False) -> "Node":
"""
Get a node by its ID.
:param node_id: The node ID
:param resolve_missing: Query the Overpass API if the node is missing in the result set.
:return: The node
:raises overpy.exception.DataIncomplete: At least one referenced node is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and at least one node can't be resolved.
"""
nodes = self.get_nodes(node_id=node_id)
if len(nodes) == 0:
if not resolve_missing:
raise exception.DataIncomplete("Resolve missing nodes is disabled")
query = ("\n"
"[out:json];\n"
"node({node_id});\n"
"out body;\n"
)
query = query.format(
node_id=node_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
nodes = self.get_nodes(node_id=node_id)
if len(nodes) == 0:
raise exception.DataIncomplete("Unable to resolve all nodes")
return nodes[0]
def get_nodes(self, node_id: Optional[int] = None) -> List["Node"]:
"""
Alias for get_elements() but filter the result by Node()
:param node_id: The Id of the node
:type node_id: Integer
:return: List of elements
"""
return self.get_elements(Node, elem_id=node_id)
def get_relation(self, rel_id: int, resolve_missing: bool = False) -> "Relation":
"""
Get a relation by its ID.
:param rel_id: The relation ID
:param resolve_missing: Query the Overpass API if the relation is missing in the result set.
:return: The relation
:raises overpy.exception.DataIncomplete: The requested relation is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the relation can't be resolved.
"""
relations = self.get_relations(rel_id=rel_id)
if len(relations) == 0:
if resolve_missing is False:
raise exception.DataIncomplete("Resolve missing relations is disabled")
query = ("\n"
"[out:json];\n"
"relation({relation_id});\n"
"out body;\n"
)
query = query.format(
relation_id=rel_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
relations = self.get_relations(rel_id=rel_id)
if len(relations) == 0:
raise exception.DataIncomplete("Unable to resolve requested reference")
return relations[0]
def get_relations(self, rel_id: int = None) -> List["Relation"]:
"""
Alias for get_elements() but filter the result by Relation
:param rel_id: Id of the relation
:return: List of elements
"""
return self.get_elements(Relation, elem_id=rel_id)
def get_way(self, way_id: int, resolve_missing: bool = False) -> "Way":
"""
Get a way by its ID.
:param way_id: The way ID
:param resolve_missing: Query the Overpass API if the way is missing in the result set.
:return: The way
:raises overpy.exception.DataIncomplete: The requested way is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and the way can't be resolved.
"""
ways = self.get_ways(way_id=way_id)
if len(ways) == 0:
if resolve_missing is False:
raise exception.DataIncomplete("Resolve missing way is disabled")
query = ("\n"
"[out:json];\n"
"way({way_id});\n"
"out body;\n"
)
query = query.format(
way_id=way_id
)
tmp_result = self.api.query(query)
self.expand(tmp_result)
ways = self.get_ways(way_id=way_id)
if len(ways) == 0:
raise exception.DataIncomplete("Unable to resolve requested way")
return ways[0]
def get_ways(self, way_id: Optional[int] = None) -> List["Way"]:
"""
Alias for get_elements() but filter the result by Way
:param way_id: The Id of the way
:return: List of elements
"""
return self.get_elements(Way, elem_id=way_id)
area_ids = property(get_area_ids)
areas = property(get_areas)
node_ids = property(get_node_ids)
nodes = property(get_nodes)
relation_ids = property(get_relation_ids)
relations = property(get_relations)
way_ids = property(get_way_ids)
ways = property(get_ways)
class Element:
"""
Base element
"""
_type_value: str
def __init__(self, attributes: Optional[dict] = None, result: Optional[Result] = None, tags: Optional[Dict] = None):
"""
:param attributes: Additional attributes
:param result: The result object this element belongs to
:param tags: List of tags
"""
self._result = result
self.attributes = attributes
# ToDo: Add option to modify attribute modifiers
attribute_modifiers: Dict[str, Callable] = dict(GLOBAL_ATTRIBUTE_MODIFIERS.items())
for n, m in attribute_modifiers.items():
if n in self.attributes:
self.attributes[n] = m(self.attributes[n])
self.id: int
self.tags = tags
@classmethod
def get_center_from_json(cls, data: dict) -> Tuple[Decimal, Decimal]:
"""
Get center information from json data
:param data: json data
:return: tuple with two elements: lat and lon
"""
center_lat = None
center_lon = None
center = data.get("center")
if isinstance(center, dict):
center_lat = center.get("lat")
center_lon = center.get("lon")
if center_lat is None or center_lon is None:
raise ValueError("Unable to get lat or lon of way center.")
center_lat = Decimal(center_lat)
center_lon = Decimal(center_lon)
return center_lat, center_lon
@classmethod
def get_center_from_xml_dom(cls, sub_child: xml.etree.ElementTree.Element) -> Tuple[Decimal, Decimal]:
center_lat_str: str = sub_child.attrib.get("lat")
center_lon_str: str = sub_child.attrib.get("lon")
if center_lat_str is None or center_lon_str is None:
raise ValueError("Unable to get lat or lon of way center.")
center_lat = Decimal(center_lat_str)
center_lon = Decimal(center_lon_str)
return center_lat, center_lon
@classmethod
def from_json(cls: Type[ElementTypeVar], data: dict, result: Optional[Result] = None) -> ElementTypeVar:
"""
Create new Element() from json data
:param data:
:param result:
:return:
"""
raise NotImplementedError
@classmethod
def from_xml(
cls: Type[ElementTypeVar],
child: xml.etree.ElementTree.Element,
result: Optional[Result] = None) -> ElementTypeVar:
"""
Create new Element() element from XML data
"""
raise NotImplementedError
class Area(Element):
"""
Class to represent an element of type area
"""
_type_value = "area"
def __init__(self, area_id: Optional[int] = None, **kwargs):
"""
:param area_id: Id of the area element
:param kwargs: Additional arguments are passed directly to the parent class
"""
Element.__init__(self, **kwargs)
#: The id of the way
self.id = area_id
def __repr__(self) -> str:
return f"<overpy.Area id={self.id}>"
@classmethod
def from_json(cls, data: dict, result: Optional[Result] = None) -> "Area":
"""
Create new Area element from JSON data
:param data: Element data from JSON
:param result: The result this element belongs to
:return: New instance of Way
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
area_id = data.get("id")
attributes = {}
ignore = ["id", "tags", "type"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(area_id=area_id, attributes=attributes, tags=tags, result=result)
@classmethod
def from_xml(cls, child: xml.etree.ElementTree.Element, result: Optional[Result] = None) -> "Area":
"""
Create new way element from XML data
:param child: XML node to be parsed
:param result: The result this node belongs to
:return: New Way oject
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If the ref attribute of the xml node is not provided
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
area_id_str: Optional[str] = child.attrib.get("id")
area_id: Optional[int] = None
if area_id_str is not None:
area_id = int(area_id_str)
attributes = {}
ignore = ["id"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(area_id=area_id, attributes=attributes, tags=tags, result=result)
class Node(Element):
"""
Class to represent an element of type node
"""
_type_value = "node"
def __init__(
self,
node_id: Optional[int] = None,
lat: Optional[Union[Decimal, float]] = None,
lon: Optional[Union[Decimal, float]] = None,
**kwargs):
"""
:param lat: Latitude
:param lon: Longitude
:param node_id: Id of the node element
:param kwargs: Additional arguments are passed directly to the parent class
"""
Element.__init__(self, **kwargs)
self.id = node_id
self.lat = lat
self.lon = lon
def __repr__(self) -> str:
return f"<overpy.Node id={self.id} lat={self.lat} lon={self.lon}>"
@classmethod
def from_json(cls, data: dict, result: Optional[Result] = None) -> "Node":
"""
Create new Node element from JSON data
:param data: Element data from JSON
:param result: The result this element belongs to
:return: New instance of Node
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
node_id = data.get("id")
lat = data.get("lat")
lon = data.get("lon")
attributes = {}
ignore = ["type", "id", "lat", "lon", "tags"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(node_id=node_id, lat=lat, lon=lon, tags=tags, attributes=attributes, result=result)
@classmethod
def from_xml(cls, child: xml.etree.ElementTree.Element, result: Optional[Result] = None) -> "Node":
"""
Create new way element from XML data
:param child: XML node to be parsed
:param result: The result this node belongs to
:return: New Way oject
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
node_id: Optional[int] = None
node_id_str: Optional[str] = child.attrib.get("id")
if node_id_str is not None:
node_id = int(node_id_str)
lat: Optional[Decimal] = None
lat_str: Optional[str] = child.attrib.get("lat")
if lat_str is not None:
lat = Decimal(lat_str)
lon: Optional[Decimal] = None
lon_str: Optional[str] = child.attrib.get("lon")
if lon_str is not None:
lon = Decimal(lon_str)
attributes = {}
ignore = ["id", "lat", "lon"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(node_id=node_id, lat=lat, lon=lon, tags=tags, attributes=attributes, result=result)
class Way(Element):
"""
Class to represent an element of type way
"""
_type_value = "way"
def __init__(
self,
way_id: Optional[int] = None,
center_lat: Optional[Union[Decimal, float]] = None,
center_lon: Optional[Union[Decimal, float]] = None,
node_ids: Optional[Union[List[int], Tuple[int]]] = None,
**kwargs):
"""
:param node_ids: List of node IDs
:param way_id: Id of the way element
:param kwargs: Additional arguments are passed directly to the parent class
"""
Element.__init__(self, **kwargs)
#: The id of the way
self.id = way_id
#: List of Ids of the associated nodes
self._node_ids = node_ids
#: The lat/lon of the center of the way (optional depending on query)
self.center_lat = center_lat
self.center_lon = center_lon
def __repr__(self):
return f"<overpy.Way id={self.id} nodes={self._node_ids}>"
@property
def nodes(self) -> List[Node]:
"""
List of nodes associated with the way.
"""
return self.get_nodes()
def get_nodes(self, resolve_missing: bool = False) -> List[Node]:
"""
Get the nodes defining the geometry of the way
:param resolve_missing: Try to resolve missing nodes.
:return: List of nodes
:raises overpy.exception.DataIncomplete: At least one referenced node is not available in the result cache.
:raises overpy.exception.DataIncomplete: If resolve_missing is True and at least one node can't be resolved.
"""
result = []
resolved = False
for node_id in self._node_ids:
try:
node = self._result.get_node(node_id)
except exception.DataIncomplete:
node = None
if node is not None:
result.append(node)
continue
if not resolve_missing:
raise exception.DataIncomplete("Resolve missing nodes is disabled")
# We tried to resolve the data but some nodes are still missing
if resolved:
raise exception.DataIncomplete("Unable to resolve all nodes")
query = ("\n"
"[out:json];\n"
"way({way_id});\n"
"node(w);\n"
"out body;\n"
)
query = query.format(
way_id=self.id
)
tmp_result = self._result.api.query(query)
self._result.expand(tmp_result)
resolved = True
try:
node = self._result.get_node(node_id)
except exception.DataIncomplete:
node = None
if node is None:
raise exception.DataIncomplete("Unable to resolve all nodes")
result.append(node)
return result
@classmethod
def from_json(cls, data: dict, result: Optional[Result] = None) -> "Way":
"""
Create new Way element from JSON data
:param data: Element data from JSON
:param result: The result this element belongs to
:return: New instance of Way
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
way_id = data.get("id")
node_ids = data.get("nodes")
(center_lat, center_lon) = cls.get_center_from_json(data=data)
attributes = {}
ignore = ["center", "id", "nodes", "tags", "type"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(
attributes=attributes,
center_lat=center_lat,
center_lon=center_lon,
node_ids=node_ids,
tags=tags,
result=result,
way_id=way_id
)
@classmethod
def from_xml(cls, child: xml.etree.ElementTree.Element, result: Optional[Result] = None) -> "Way":
"""
Create new way element from XML data
:param child: XML node to be parsed
:param result: The result this node belongs to
:return: New Way oject
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If the ref attribute of the xml node is not provided
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
node_ids = []
center_lat = None
center_lon = None
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
if sub_child.tag.lower() == "nd":
ref_id_str = sub_child.attrib.get("ref")
if ref_id_str is None:
raise ValueError("Unable to find required ref value.")
ref_id: int = int(ref_id_str)
node_ids.append(ref_id)
if sub_child.tag.lower() == "center":
(center_lat, center_lon) = cls.get_center_from_xml_dom(sub_child=sub_child)
way_id: Optional[int] = None
way_id_str: Optional[str] = child.attrib.get("id")
if way_id_str is not None:
way_id = int(way_id_str)
attributes = {}
ignore = ["id"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(way_id=way_id, center_lat=center_lat, center_lon=center_lon,
attributes=attributes, node_ids=node_ids, tags=tags, result=result)
class Relation(Element):
"""
Class to represent an element of type relation
"""
_type_value = "relation"
def __init__(
self,
rel_id: Optional[int] = None,
center_lat: Optional[Union[Decimal, float]] = None,
center_lon: Optional[Union[Decimal, float]] = None,
members: Optional[List["RelationMember"]] = None,
**kwargs):
"""
:param members:
:param rel_id: Id of the relation element
:param kwargs:
:return:
"""
Element.__init__(self, **kwargs)
self.id = rel_id
self.members = members
#: The lat/lon of the center of the way (optional depending on query)
self.center_lat = center_lat
self.center_lon = center_lon
def __repr__(self):
return f"<overpy.Relation id={self.id}>"
@classmethod
def from_json(cls, data: dict, result: Optional[Result] = None) -> "Relation":
"""
Create new Relation element from JSON data
:param data: Element data from JSON
:param result: The result this element belongs to
:return: New instance of Relation
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
tags = data.get("tags", {})
rel_id = data.get("id")
(center_lat, center_lon) = cls.get_center_from_json(data=data)
members = []
supported_members = [RelationNode, RelationWay, RelationRelation]
for member in data.get("members", []):
type_value = member.get("type")
for member_cls in supported_members:
if member_cls._type_value == type_value:
members.append(
member_cls.from_json(
member,
result=result
)
)
attributes = {}
ignore = ["id", "members", "tags", "type"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
return cls(
rel_id=rel_id,
attributes=attributes,
center_lat=center_lat,
center_lon=center_lon,
members=members,
tags=tags,
result=result
)
@classmethod
def from_xml(cls, child: xml.etree.ElementTree.Element, result: Optional[Result] = None) -> "Relation":
"""
Create new way element from XML data
:param child: XML node to be parsed
:param result: The result this node belongs to
:return: New Way oject
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
:raises ValueError: If a tag doesn't have a name
"""
if child.tag.lower() != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
tags = {}
members = []
center_lat = None
center_lon = None
supported_members = [RelationNode, RelationWay, RelationRelation, RelationArea]
for sub_child in child:
if sub_child.tag.lower() == "tag":
name = sub_child.attrib.get("k")
if name is None:
raise ValueError("Tag without name/key.")
value = sub_child.attrib.get("v")
tags[name] = value
if sub_child.tag.lower() == "member":
type_value = sub_child.attrib.get("type")
for member_cls in supported_members:
if member_cls._type_value == type_value:
members.append(
member_cls.from_xml(
sub_child,
result=result
)
)
if sub_child.tag.lower() == "center":
(center_lat, center_lon) = cls.get_center_from_xml_dom(sub_child=sub_child)
rel_id: Optional[int] = None
rel_id_str: Optional[str] = child.attrib.get("id")
if rel_id_str is not None:
rel_id = int(rel_id_str)
attributes = {}
ignore = ["id"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
return cls(
rel_id=rel_id,
attributes=attributes,
center_lat=center_lat,
center_lon=center_lon,
members=members,
tags=tags,
result=result
)
class RelationMember:
"""
Base class to represent a member of a relation.
"""
_type_value: Optional[str] = None
def __init__(
self,
attributes: Optional[dict] = None,
geometry: Optional[List["RelationWayGeometryValue"]] = None,
ref: Optional[int] = None,
role: Optional[str] = None,
result: Optional[Result] = None):
"""
:param ref: Reference Id
:type ref: Integer
:param role: The role of the relation member
:type role: String
:param result:
"""
self.ref = ref
self._result = result
self.role = role
self.attributes = attributes
self.geometry = geometry
@classmethod
def from_json(cls, data: dict, result: Optional[Result] = None) -> "RelationMember":
"""
Create new RelationMember element from JSON data
:param data: Element data from JSON
:param result: The result this element belongs to
:return: New instance of RelationMember
:raises overpy.exception.ElementDataWrongType: If type value of the passed JSON data does not match.
"""
if data.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=data.get("type")
)
ref = data.get("ref")
role = data.get("role")
attributes = {}
ignore = ["geometry", "type", "ref", "role"]
for n, v in data.items():
if n in ignore:
continue
attributes[n] = v
geometry = data.get("geometry")
if isinstance(geometry, list):
geometry_orig = geometry
geometry = []
for v in geometry_orig:
geometry.append(
RelationWayGeometryValue(
lat=v.get("lat"),
lon=v.get("lon")
)
)
else:
geometry = None
return cls(
attributes=attributes,
geometry=geometry,
ref=ref,
role=role,
result=result
)
@classmethod
def from_xml(
cls,
child: xml.etree.ElementTree.Element,
result: Optional[Result] = None) -> "RelationMember":
"""
Create new RelationMember from XML data
:param child: XML node to be parsed
:param result: The result this element belongs to
:return: New relation member oject
:raises overpy.exception.ElementDataWrongType: If name of the xml child node doesn't match
"""
if child.attrib.get("type") != cls._type_value:
raise exception.ElementDataWrongType(
type_expected=cls._type_value,
type_provided=child.tag.lower()
)
ref: Optional[int] = None
ref_str: Optional[str] = child.attrib.get("ref")
if ref_str is not None:
ref = int(ref_str)
role: Optional[str] = child.attrib.get("role")
attributes = {}
ignore = ["geometry", "ref", "role", "type"]
for n, v in child.attrib.items():
if n in ignore:
continue
attributes[n] = v
geometry = None
for sub_child in child:
if sub_child.tag.lower() == "nd":
if geometry is None:
geometry = []
geometry.append(
RelationWayGeometryValue(
lat=Decimal(sub_child.attrib["lat"]),
lon=Decimal(sub_child.attrib["lon"])
)
)
return cls(
attributes=attributes,
geometry=geometry,
ref=ref,
role=role,
result=result
)
class RelationNode(RelationMember):
_type_value = "node"
def resolve(self, resolve_missing: bool = False) -> Node:
return self._result.get_node(self.ref, resolve_missing=resolve_missing)
def __repr__(self):
return f"<overpy.RelationNode ref={self.ref} role={self.role}>"
class RelationWay(RelationMember):
_type_value = "way"
def resolve(self, resolve_missing: bool = False) -> Way:
return self._result.get_way(self.ref, resolve_missing=resolve_missing)
def __repr__(self):
return f"<overpy.RelationWay ref={self.ref} role={self.role}>"
class RelationWayGeometryValue:
def __init__(self, lat: Union[Decimal, float], lon: Union[Decimal, float]):
self.lat = lat
self.lon = lon
def __repr__(self):
return f"<overpy.RelationWayGeometryValue lat={self.lat} lon={self.lon}>"
class RelationRelation(RelationMember):
_type_value = "relation"
def resolve(self, resolve_missing: bool = False) -> Relation:
return self._result.get_relation(self.ref, resolve_missing=resolve_missing)
def __repr__(self):
return f"<overpy.RelationRelation ref={self.ref} role={self.role}>"
class RelationArea(RelationMember):
_type_value = "area"
def resolve(self, resolve_missing: bool = False) -> Area:
return self._result.get_area(self.ref, resolve_missing=resolve_missing)
def __repr__(self):
return f"<overpy.RelationArea ref={self.ref} role={self.role}>"
class OSMSAXHandler(handler.ContentHandler):
"""
SAX parser for Overpass XML response.
"""
#: Tuple of opening elements to ignore
ignore_start: ClassVar = ('osm', 'meta', 'note', 'bounds', 'remark')
#: Tuple of closing elements to ignore
ignore_end: ClassVar = ('osm', 'meta', 'note', 'bounds', 'remark', 'tag', 'nd', 'center')
def __init__(self, result: Result):
"""
:param result: Append results to this result set.
"""
handler.ContentHandler.__init__(self)
self._result = result
self._curr: Dict[str, Any] = {}
#: Current relation member object
self.cur_relation_member: Optional[RelationMember] = None
def startElement(self, name: str, attrs: dict):
"""
Handle opening elements.
:param name: Name of the element
:param attrs: Attributes of the element
"""
if name in self.ignore_start:
return
try:
handler = getattr(self, '_handle_start_%s' % name)
except AttributeError:
raise KeyError("Unknown element start '%s'" % name)
handler(attrs)
def endElement(self, name: str):
"""
Handle closing elements
:param name: Name of the element
"""
if name in self.ignore_end:
return
try:
handler = getattr(self, '_handle_end_%s' % name)
except AttributeError:
raise KeyError("Unknown element end '%s'" % name)
handler()
def _handle_start_center(self, attrs: dict):
"""
Handle opening center element
:param attrs: Attributes of the element
:type attrs: Dict
"""
center_lat = attrs.get("lat")
center_lon = attrs.get("lon")
if center_lat is None or center_lon is None:
raise ValueError("Unable to get lat or lon of way center.")
self._curr["center_lat"] = Decimal(center_lat)
self._curr["center_lon"] = Decimal(center_lon)
def _handle_start_tag(self, attrs: dict):
"""
Handle opening tag element
:param attrs: Attributes of the element
"""
try:
tag_key = attrs['k']
except KeyError:
raise ValueError("Tag without name/key.")
self._curr['tags'][tag_key] = attrs.get('v')
def _handle_start_node(self, attrs: dict):
"""
Handle opening node element
:param attrs: Attributes of the element
"""
self._curr = {
'attributes': dict(attrs),
'lat': None,
'lon': None,
'node_id': None,
'tags': {}
}
if attrs.get('id', None) is not None:
self._curr['node_id'] = int(attrs['id'])
del self._curr['attributes']['id']
if attrs.get('lat', None) is not None:
self._curr['lat'] = Decimal(attrs['lat'])
del self._curr['attributes']['lat']
if attrs.get('lon', None) is not None:
self._curr['lon'] = Decimal(attrs['lon'])
del self._curr['attributes']['lon']
def _handle_end_node(self):
"""
Handle closing node element
"""
self._result.append(Node(result=self._result, **self._curr))
self._curr = {}
def _handle_start_way(self, attrs: dict):
"""
Handle opening way element
:param attrs: Attributes of the element
"""
self._curr = {
'center_lat': None,
'center_lon': None,
'attributes': dict(attrs),
'node_ids': [],
'tags': {},
'way_id': None
}
if attrs.get('id', None) is not None:
self._curr['way_id'] = int(attrs['id'])
del self._curr['attributes']['id']
def _handle_end_way(self):
"""
Handle closing way element
"""
self._result.append(Way(result=self._result, **self._curr))
self._curr = {}
def _handle_start_area(self, attrs: dict):
"""
Handle opening area element
:param attrs: Attributes of the element
"""
self._curr = {
'attributes': dict(attrs),
'tags': {},
'area_id': None
}
if attrs.get('id', None) is not None:
self._curr['area_id'] = int(attrs['id'])
del self._curr['attributes']['id']
def _handle_end_area(self):
"""
Handle closing area element
"""
self._result.append(Area(result=self._result, **self._curr))
self._curr = {}
def _handle_start_nd(self, attrs: dict):
"""
Handle opening nd element
:param attrs: Attributes of the element
"""
if isinstance(self.cur_relation_member, RelationWay):
if self.cur_relation_member.geometry is None:
self.cur_relation_member.geometry = []
self.cur_relation_member.geometry.append(
RelationWayGeometryValue(
lat=Decimal(attrs["lat"]),
lon=Decimal(attrs["lon"])
)
)
else:
try:
node_ref = attrs['ref']
except KeyError:
raise ValueError("Unable to find required ref value.")
self._curr['node_ids'].append(int(node_ref))
def _handle_start_relation(self, attrs: dict):
"""
Handle opening relation element
:param attrs: Attributes of the element
"""
self._curr = {
'attributes': dict(attrs),
'members': [],
'rel_id': None,
'tags': {}
}
if attrs.get('id', None) is not None:
self._curr['rel_id'] = int(attrs['id'])
del self._curr['attributes']['id']
def _handle_end_relation(self):
"""
Handle closing relation element
"""
self._result.append(Relation(result=self._result, **self._curr))
self._curr = {}
def _handle_start_member(self, attrs: dict):
"""
Handle opening member element
:param attrs: Attributes of the element
"""
params: Dict[str, Any] = {
# ToDo: Parse attributes
'attributes': {},
'ref': None,
'result': self._result,
'role': None
}
if attrs.get('ref', None):
params['ref'] = int(attrs['ref'])
if attrs.get('role', None):
params['role'] = attrs['role']
cls_map = {
"area": RelationArea,
"node": RelationNode,
"relation": RelationRelation,
"way": RelationWay
}
cls: Type[RelationMember] = cls_map.get(attrs["type"])
if cls is None:
raise ValueError("Undefined type for member: '%s'" % attrs['type'])
self.cur_relation_member = cls(**params)
self._curr['members'].append(self.cur_relation_member)
def _handle_end_member(self):
self.cur_relation_member = None
| mit | 2,301,919,963,291,948,800 | 32.95449 | 120 | 0.550498 | false |
techlib/wifinator | wifinator/aruba.py | 1 | 4679 | #!/usr/bin/python3 -tt
# -*- coding: utf-8 -*-
import re
from threading import Lock
from requests import Session, HTTPError
from time import time
from xml.etree.ElementTree import XML, ParseError
from requests.packages.urllib3.exceptions import InsecureRequestWarning
from requests.packages.urllib3 import disable_warnings
disable_warnings(InsecureRequestWarning)
class ArubaError(Exception):
"""Generic error related to communication with Aruba WiFi controllers."""
class Aruba(object):
# <url> ? command @@ timestamp & UIDARUBA=session-id
COMMAND_URL = 'https://{host}:4343/screens/cmnutil/execCommandReturnResult.xml'
# POST opcode, url, needxml, uid, passwd
LOGIN_URL = 'https://{host}:4343/screens/wms/wms.login'
def __init__(self, host, username, password):
"""Store address and credentials for later."""
self.host = host
self.username = username
self.password = password
self.session = Session()
self.login_url = self.LOGIN_URL.format(host=host)
self.command_url = self.COMMAND_URL.format(host=host)
def request(self, command):
s = self.session.cookies.get('SESSION', '')
p = '{0}@@{1}&UIDARUBA={2}'.format(command, int(time()), s)
r = self.session.get(self.command_url, verify=False, params=p)
# The controller shamelessly retains ASCII control characters and
# some users are able to inject them through their login names.
data = re.sub(b'[\x00-\x09\x11-\x12\x14-\x1f]',
lambda m: ('\\x%.2x' % m.group(0)[0]).encode('utf8'),
r.text.encode('utf8', 'xmlcharrefreplace'))
if data:
try:
return XML(data)
except ParseError:
raise ArubaError('Response is not a valid XML element')
def request_table(self, command):
r = self.request(command)
if r.find('t') is None:
raise ArubaError('Response does not contain a table')
return [[(c.text.strip() if c.text is not None else '') for c in row] \
for row in r.find('t')[1:]]
def request_dict(self, command):
return {row[0]: row[1] for row in self.request_table(command)}
def login(self):
if self.request('show roleinfo').find('data'):
return
r = self.session.post(self.login_url, verify=False, data={
'opcode': 'login',
'url': '/',
'needxml': '0',
'uid': self.username,
'passwd': self.password,
})
if 'Authentication complete' not in r.text:
raise ArubaError('Login failed')
def list_profiles(self):
"""List service profiles with SSID and Beacon settings."""
profiles = {}
for name in self.request_dict('show wlan ssid-profile'):
detail = self.request_dict('show wlan ssid-profile ' + name)
profiles[name] = {
'ssid': detail['ESSID'],
'active': detail['SSID enable'] == 'Enabled',
}
return profiles
def list_stations(self):
"""List client stations with MAC addresses and more."""
stations = {}
r = self.request_table('show station-table')
for mac, name, role, age, auth, ap, essid, phy, remote, profile in r:
stations[mac] = {
'mac': mac,
'name': name,
'role': role,
'age': age,
'auth': auth,
'ap': ap,
'essid': essid,
'phy': phy,
'remote': remote,
'profile': profile,
}
return stations
def essid_stats(self):
stats = {}
for station in self.list_stations().values():
essid = station['essid']
stats.setdefault(essid, 0)
stats[essid] += 1
return stats
def ap_stats(self):
stats = {}
for station in self.list_stations().values():
ap = station['ap']
stats.setdefault(ap, 0)
stats[ap] += 1
return stats
def edit_profile(self, profile, ssid, psk, active):
"""Adjust service profile. PSK is in plain text."""
self.request('wlan ssid-profile {0} essid {1}'.format(profile, ssid))
self.request('wlan ssid-profile {0} wpa-passphrase {1}'.format(profile, psk))
if active:
self.request('wlan ssid-profile {0} ssid-enable'.format(profile))
else:
self.request('wlan ssid-profile {0} no ssid-enable'.format(profile))
# vim:set sw=4 ts=4 et:
| mit | 8,544,314,082,599,784,000 | 29.782895 | 85 | 0.563368 | false |
cdgriffith/Reusables | reusables/process_helpers.py | 1 | 4595 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Part of the Reusables package.
#
# Copyright (c) 2014-2020 - Chris Griffith - MIT License
import os
import sys
import subprocess
from multiprocessing import pool
from functools import partial
from reusables.shared_variables import *
__all__ = ["run", "run_in_pool"]
def run(
command, input=None, stdout=subprocess.PIPE, stderr=subprocess.PIPE, timeout=None, copy_local_env=False, **kwargs
):
"""
Cross platform compatible subprocess with CompletedProcess return.
No formatting or encoding is performed on the output of subprocess, so it's
output will appear the same on each version / interpreter as before.
.. code:: python
reusables.run('echo "hello world!', shell=True)
# CPython 3.6
# CompletedProcess(args='echo "hello world!', returncode=0,
# stdout=b'"hello world!\\r\\n', stderr=b'')
#
# PyPy 5.4 (Python 2.7.10)
# CompletedProcess(args='echo "hello world!', returncode=0L,
# stdout='"hello world!\\r\\n')
Timeout is only usable in Python 3.X, as it was not implemented before then,
a NotImplementedError will be raised if specified on 2.x version of Python.
:param command: command to run, str if shell=True otherwise must be list
:param input: send something `communicate`
:param stdout: PIPE or None
:param stderr: PIPE or None
:param timeout: max time to wait for command to complete
:param copy_local_env: Use all current ENV vars in the subprocess as well
:param kwargs: additional arguments to pass to Popen
:return: CompletedProcess class
"""
if copy_local_env:
# Copy local env first and overwrite with anything manually specified
env = os.environ.copy()
env.update(kwargs.get("env", {}))
else:
env = kwargs.get("env")
if sys.version_info >= (3, 5):
return subprocess.run(command, input=input, stdout=stdout, stderr=stderr, timeout=timeout, env=env, **kwargs)
# Created here instead of root level as it should never need to be
# manually created or referenced
class CompletedProcess(object):
"""A backwards compatible near clone of subprocess.CompletedProcess"""
def __init__(self, args, returncode, stdout=None, stderr=None):
self.args = args
self.returncode = returncode
self.stdout = stdout
self.stderr = stderr
def __repr__(self):
args = [
"args={0!r}".format(self.args),
"returncode={0!r}".format(self.returncode),
"stdout={0!r}".format(self.stdout) if self.stdout else "",
"stderr={0!r}".format(self.stderr) if self.stderr else "",
]
return "{0}({1})".format(type(self).__name__, ", ".join(filter(None, args)))
def check_returncode(self):
if self.returncode:
if python_version < (2, 7):
raise subprocess.CalledProcessError(self.returncode, self.args)
raise subprocess.CalledProcessError(self.returncode, self.args, self.stdout)
proc = subprocess.Popen(command, stdout=stdout, stderr=stderr, env=env, **kwargs)
if PY3:
out, err = proc.communicate(input=input, timeout=timeout)
else:
if timeout:
raise NotImplementedError("Timeout is only available on Python 3")
out, err = proc.communicate(input=input)
return CompletedProcess(command, proc.returncode, out, err)
def run_in_pool(target, iterable, threaded=True, processes=4, asynchronous=False, target_kwargs=None):
""" Run a set of iterables to a function in a Threaded or MP Pool.
.. code: python
def func(a):
return a + a
reusables.run_in_pool(func, [1,2,3,4,5])
# [1, 4, 9, 16, 25]
:param target: function to run
:param iterable: positional arg to pass to function
:param threaded: Threaded if True multiprocessed if False
:param processes: Number of workers
:param asynchronous: will do map_async if True
:param target_kwargs: Keyword arguments to set on the function as a partial
:return: pool results
"""
my_pool = pool.ThreadPool if threaded else pool.Pool
if target_kwargs:
target = partial(target, **target_kwargs if target_kwargs else None)
p = my_pool(processes)
try:
results = p.map_async(target, iterable) if asynchronous else p.map(target, iterable)
finally:
p.close()
p.join()
return results
| mit | 4,322,236,437,297,776,600 | 35.181102 | 117 | 0.639391 | false |
SkierPGP/Skier | init.py | 1 | 1602 | import threading
from flask import render_template
import cfg
def init(app):
from skier import frontend
from skier import pgpapi
from skier import pks
from cfg import API_VERSION
from skier import pgpactions
if not cfg.cfg.config.features.disable_frontend:
app.register_blueprint(frontend.frontend)
app.register_blueprint(frontend.frontend_keys, url_prefix="/keys")
app.register_blueprint(pgpapi.pgpapi, url_prefix="/api/v{}".format(API_VERSION))
app.register_blueprint(pks.legacypks, url_prefix="/pks")
app.config["SQLALCHEMY_DATABASE_URI"] = cfg.sqlalchemy_uri
app.jinja_env.globals.update(theme = cfg.cfg.config.theme)
@app.before_first_request
def f(*args, **kwargs):
if cfg.cfg.config.pool_enabled.autosync:
threading.Thread(target=pgpactions.synch_keys).start()
@app.errorhandler(404)
def four_oh_four(error):
if not cfg.cfg.config.features.disable_frontend:
return render_template("error/404.html"), 404
else:
return "Not Found", 404
@app.errorhandler(403)
def four_oh_three(error):
if not cfg.cfg.config.features.disable_frontend:
return render_template("error/403.html"), 403
else:
return "Forbidden", 403
@app.errorhandler(500)
def five_oh_oh(error):
if not cfg.cfg.config.features.disable_frontend:
return render_template("error/500.html"), 500
else:
return "Internal Server Error", 500
@app.route("/skier")
def skier():
return "", 200
| agpl-3.0 | -5,073,029,300,028,470,000 | 29.807692 | 84 | 0.654806 | false |
bliksemlabs/bliksemintegration | exporters/ridiff2.py | 1 | 12243 | import psycopg2
import psycopg2.extras
import sys
#usage $from_date_iso8601 $to_date_iso8601 $OPERATORS
def writeout(f, cur):
while True:
out = cur.fetchone()
if out is None:
break
f.write(','.join(out) + '\r\n')
conn = psycopg2.connect("dbname='ridprod'")
conn.set_client_encoding('WIN1252')
cur = conn.cursor(cursor_factory=psycopg2.extras.DictCursor)
from_date = sys.argv[1]
to_date = sys.argv[2]
create temporary table iff_pointref_station as select DISTINCT pointref, coalesce(first_placecode, quaycode) as stationshortname from servicejourney JOIN pointinjourneypattern USING (journeypatternref) JOIN stoppoint ON (pointinjourneypattern.pointref = stoppoint.id) JOIN quays_wouter on (stoppoint.operator_id = dataownercode||':'||userstopcode);
create temporary table iff_transfer_table as select from_stationshortname, to_stationshortname, distance, ST_SetSRID(ST_MakeLine(ST_MakePoint(u.xcoordinate::int4, u.ycoordinate::int4), ST_MakePoint(w.xcoordinate::int4, w.ycoordinate::int4)), 28992) AS line FROM (select x.stationshortname as from_stationshortname, y.stationshortname as to_stationshortname, distance from (select min(pointref) as pointref, stationshortname from iff_pointref_station group by stationshortname) as x, (select min(pointref) as pointref, stationshortname from iff_pointref_station group by stationshortname) as y, generated_transfers where from_stop_id = x.pointref and to_stop_id = y.pointref) as z JOIN iff_stations AS u ON (from_stationshortname = u.stationshortname) JOIN iff_stations AS w ON (to_stationshortname = w.stationshortname);
cur.execute("""
create temporary table iff_servicecalendar as (
SELECT validfrom,validthru,bitcalendar,row_number() OVER () as service_id,unnest(array_agg(availabilityconditionref)) as availabilityconditionref
FROM (
SELECT availabilityconditionref,
bitcalendar(array_agg(validdate ORDER BY validdate)) as bitcalendar,
min(validdate) as validfrom,
max(validdate) as validthru
FROM availabilityconditionday as ad JOIN availabilitycondition AS ac ON (availabilityconditionref = ac.id)
JOIN version AS v ON (v.id = versionref)
JOIN datasource AS d ON (d.id = datasourceref)
WHERE ad.isavailable = true AND validdate between %s and %s and d.operator_id = any(%s)
GROUP by availabilityconditionref) as x
GROUP BY validfrom,validthru,bitcalendar
ORDER BY service_id);
""",[from_date,to_date,sys.argv[3:]]);
cur.execute("""
""")
cur.execute("""
SELECT
'@'||to_char(0, 'FM000'),
to_char(min(validfrom), 'DDMMYYYY'),
to_char(max(validthru), 'DDMMYYYY'),
to_char(1, 'FM0000'),
'openOV and Friends 2014 '
FROM iff_servicecalendar
""");
delivery = ','.join(cur.fetchone().values()) + '\r\n'
for x in ['delivery.dat', 'changes.dat', 'trnsaqst.dat', 'trnsattr.dat', 'trnsmode.dat']:
open('output/' + x, 'w').write(delivery)
cur.execute("""
SELECT DISTINCT ON (stationcode)
0 as stationoftrainchanges,
stationcode as stationshortname,
'00' as timenecessarytochangetrains,
'00' as maximumtimetochangetrains,
'NL ' as countrycode,
'0000' as timezone,
'00' as obsolete,
lpad(AVG(rd_x) OVER (PARTITION BY stationcode)::integer::text,6) as xcoordinate,
lpad(AVG(rd_y) OVER (PARTITION BY stationcode)::integer::text,6) as ycoordinate,
substring(rpad(translate(stationame, ',', ';'),50,' ') for 50) as stationname
FROM (SELECT DISTINCT ON (s_pt.id)
coalesce(coalesce(first_placecode,'opengeo.'||stoparearef),'opengeo.'||s_pt.id) as stationcode,
coalesce(stoparea.name,quays_wouter.name) as stationame,
coalesce(quays_wouter.rd_x,s_pt.rd_x) as rd_x,
coalesce(quays_wouter.rd_y,s_pt.rd_y) as rd_y
FROM servicejourney as j
JOIN iff_servicecalendar USING (availabilityconditionref) -- This just makes sure the trips are in the exported IFF
JOIN journeypattern as p on (j.journeypatternref = p.id)
JOIN pointinjourneypattern as p_pt on (p_pt.journeypatternref = p.id)
JOIN pointintimedemandgroup as t_pt on (j.timedemandgroupref = t_pt.timedemandgroupref AND p_pt.pointorder = t_pt.pointorder)
JOIN scheduledstoppoint as s_pt ON (pointref = s_pt.id)
LEFT JOIN stoparea on (stoparea.id = stoparearef)
LEFT JOIN quays_wouter on (s_pt.operator_id = dataownercode||':'||userstopcode)) AS quays;
""")
f = open('output/stations.dat', 'w')
f.write(delivery)
writeout(f, cur)
'''
We don't have junctions at this moment. Super groups will be added later.
cur.execute("""
select to_char(stoparearef, 'FM000000') as groupshortname, left(translate(stoparea.name, ',', ';'), 29) || repeat(' ', 29 -
length(left(stoparea.name, 29))) as groupname,
stationshortname from (select stoparearef, array_agg(to_char(id, 'FM000000')) as stationshortname from stoppoint group by stoparearef) as x left join
stoparea on (stoparearef = id) where stoparea.name is not null;
""")
f = open('output/group.dat', 'w')
f.write(delivery)
while True:
out = cur.fetchone()
if out is None:
break
f.write('#' + ','.join([out['groupshortname'], out['groupname']]) + '\r\n')
for x in out['stationshortname']:
f.write('-' + x + '\r\n')
'''
open('output/timezone.dat', 'w').write(delivery + "#0000\r\n+00,09122012,14122013\r\n#0001\r\n-01,09122012,14122013\r\n")
open('output/country.dat', 'w').write(delivery + "NL ,1,Nederland \r\n")
open('output/connmode.dat', 'w').write(delivery + "0002,2,Lopen \r\n")
cur.execute("""
SELECT to_char(from_stop_id, 'FM000000') as fromstationshortname,
to_char(to_stop_id, 'FM000000') as tostationshortname,
to_char(ceil(distance / 60.0), 'FM00') as connectiontime, '02' as connectionmodecode
FROM transfers;""")
f = open('output/contconn.dat', 'w')
f.write(delivery)
writeout(f, cur)
cur.execute("""
SELECT DISTINCT ON (o.id)
to_char(o.id, 'FM000') as companynumber,
left(o.privatecode, 9) || repeat(' ', 9 - length(left(o.privatecode, 9))) as companycode,
left(o.name, 9) || repeat(' ', 9 - length(left(o.name, 9))) as companyname,
'0400' as time
FROM servicejourney JOIN iff_servicecalendar USING (availabilityconditionref) -- This just makes sure the trips are in the exported IFF
JOIN journeypattern as jp ON (jp.id = journeypatternref)
JOIN route as r ON (r.id = routeref)
JOIN line as l ON (l.id = lineref)
JOIN operator as o ON (o.id = operatorref)
""")
f = open('output/company.dat', 'w')
f.write(delivery)
f.write('000,OPENGEO ,OpenGeo ,0000\r\n')
writeout(f, cur)
cur.execute("""
SELECT
to_char(row_number() over (), 'FM0000') as transportmodecode,
COALESCE(name, initcap(lower(transportmode))) as description
FROM (SELECT DISTINCT transportmode, pc.name FROM
servicejourney JOIN iff_servicecalendar USING (availabilityconditionref) -- This just makes sure the trips are in the exported IFF
JOIN journeypattern as jp ON (jp.id = journeypatternref)
JOIN route as r ON (r.id = routeref)
JOIN line as l ON (l.id = lineref)
LEFT JOIN productcategory as pc ON (pc.id = productcategoryref)) as x;
""")
f = open('output/trnsmode.dat', 'w')
f.write(delivery)
writeout(f, cur)
cur.execute("""
SELECT
to_char(row_number() over (), 'FM0000') as transportmodecode,
COALESCE(name, initcap(lower(transportmode))) as description
FROM (SELECT DISTINCT transportmode, pc.name FROM
servicejourney JOIN iff_servicecalendar USING (availabilityconditionref) -- This just makes sure the trips are in the exported IFF
JOIN journeypattern as jp ON (jp.id = journeypatternref)
JOIN route as r ON (r.id = routeref)
JOIN line as l ON (l.id = lineref)
LEFT JOIN productcategory as pc ON (pc.id = productcategoryref)) as x;
""")
trnsmode = {}
for x, y in cur.fetchall():
trnsmode[y.upper()] = x
cur.execute("""
SELECT '#'||to_char(row_number() OVER (), 'FM00000') as footnotenumber,
repeat('0', validfrom - date 'yesterday') || bitcalendar || repeat('0', (select max(validdate) FROM availabilityconditionday) - validthru) as vector
FROM iff_servicecalendar
GROUP BY validfrom, validthru, bitcalendar ORDER BY footnotenumber;
""")
f = open('output/footnote.dat', 'w')
f.write(delivery)
while True:
out = cur.fetchone()
if out is None:
break
f.write('%s\r\n%s\r\n' % (out[0], out[1]))
# COALESCE(CASE WHEN (blockref like 'IFF:%') THEN substr(blockref, 5) ELSE NULL END, to_char(j.id, 'FM00000000')) AS serviceidentification,
cur.execute("""
SELECT
to_char(j.id, 'FM00000000') AS serviceidentification,
to_char(operatorref, 'FM000') as companynumber,
COALESCE(j.name, '0000') as servicenumber,
' ' as variant,
left(d.name, 29) || repeat(' ', 29 - length(left(d.name, 29))) as servicename,
to_char(sc.service_id, 'FM00000') as footnotenumber,
COALESCE(cast(pc.name as text), transportmode) as trnsmode,
to32time(departuretime+totaldrivetime) as arrival_time,
to32time(departuretime+totaldrivetime+stopwaittime) as departure_time,
forboarding, foralighting,
stationshortname,
s_pt.platformcode as platformname
FROM servicejourney as j JOIN iff_servicecalendar as sc USING (availabilityconditionref)
JOIN journeypattern as p on (j.journeypatternref = p.id)
JOIN route as r on (p.routeref = r.id)
JOIN line as l on (r.lineref = l.id)
JOIN destinationdisplay as d ON (p.destinationdisplayref = d.id)
LEFT JOIN productcategory as pc on (j.productcategoryref = pc.id)
JOIN pointinjourneypattern as p_pt on (p_pt.journeypatternref = p.id)
JOIN pointintimedemandgroup as t_pt on (j.timedemandgroupref = t_pt.timedemandgroupref AND p_pt.pointorder =
t_pt.pointorder)
JOIN scheduledstoppoint as s_pt on (p_pt.pointref = s_pt.id)
JOIN iff_pointref_station ON (p_pt.pointref = iff_pointref.pointref)
ORDER BY companynumber, serviceidentification, servicenumber, p_pt.pointorder;
""");
f = open('output/timetbls.dat', 'w')
f.write(delivery)
trip = [dict(cur.fetchone())]
def render(f, trip):
total = '%03d' % (len(trip) - 1)
try:
f.write( '#' + trip[0]['serviceidentification'] + '\r\n' ) # Service identification Record
f.write( '%' + ','.join([trip[0]['companynumber'], trip[0]['servicenumber'], trip[0]['variant'], '001', total, trip[0]['servicename']]) +
'\r\n' ) # Service Record
f.write( '-' + ','.join([trip[0]['footnotenumber'], '000', '999']) + '\r\n') # Validity
f.write( '&' + ','.join([trnsmode[trip[0]['trnsmode'].upper()], '001', total]) + '\r\n') # Transport mode Record
f.write( '>' + ','.join([trip[0]['stationshortname'], trip[0]['departure_time'][0:-3].replace(':', '')]) + '\r\n') # Start Record
for i in range(1, len(trip) - 2):
if not trip[i]['forboarding'] and not trip[i]['foralighting']:
f.write( ';' + trip[i]['stationshortname'] + '\r\n') # Passing Record
elif trip[i]['arrival_time'][0:-3] == trip[i]['departure_time'][0:-3]:
f.write( '.' + ','.join([trip[i]['stationshortname'], trip[i]['arrival_time'][0:-3].replace(':', '')]) + '\r\n') # Continuation
else:
f.write( '+' + ','.join([trip[i]['stationshortname'], trip[i]['arrival_time'][0:-3].replace(':', ''),
trip[i]['departure_time'][0:-3].replace(':', '')]) + '\r\n') # Interval
f.write( '<' + ','.join([trip[-2]['stationshortname'], trip[-2]['arrival_time'][0:-3].replace(':', '')]) + '\r\n') # Final Record
except:
print trip
while True:
current = cur.fetchone()
if current is None:
break
trip.append(current)
if trip[-2]['serviceidentification'] != trip[-1]['serviceidentification']:
render(f, trip)
trip = [current]
elif trip[-2]['servicenumber'] != trip[-1]['servicenumber']:
print 'Waarschuwing'
render(f, trip)
| bsd-2-clause | 2,596,350,855,669,492,000 | 44.344444 | 820 | 0.65907 | false |
jwesheath/conference-central-api | conference.py | 1 | 27479 | from datetime import datetime
import endpoints
from protorpc import messages
from protorpc import message_types
from protorpc import remote
from google.appengine.ext import ndb
from google.appengine.api import memcache
from google.appengine.api import taskqueue
from models import BooleanMessage
from models import ConflictException
from models import StringMessage
from models import Conference
from models import ConferenceForm
from models import ConferenceForms
from models import ConferenceQueryForms
from models import Profile
from models import ProfileMiniForm
from models import ProfileForm
from models import TeeShirtSize
from models import Session
from models import SessionForm
from models import SessionByTypeForm
from models import SessionBySpeakerForm
from models import SessionForms
from utils import getUserId
from settings import WEB_CLIENT_ID
EMAIL_SCOPE = endpoints.EMAIL_SCOPE
API_EXPLORER_CLIENT_ID = endpoints.API_EXPLORER_CLIENT_ID
DEFAULTS = {"city": "Default City",
"maxAttendees": 0,
"seatsAvailable": 0,
"topics": ["Default", "Topic"]}
FIELDS = {'CITY': 'city',
'TOPIC': 'topics',
'MONTH': 'month',
'MAX_ATTENDEES': 'maxAttendees'}
OPERATORS = {'EQ': '=',
'GT': '>',
'GTEQ': '>=',
'LT': '<',
'LTEQ': '<=',
'NE': '!='}
CONF_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
websafeConferenceKey=messages.StringField(1),
)
SESSION_GET_REQUEST = endpoints.ResourceContainer(
message_types.VoidMessage,
SessionKey=messages.StringField(1),
)
MEMCACHE_ANNOUNCEMENTS_KEY = 'announcements'
MEMCACHE_FEATUREDSPEAKER_KEY = 'featuredSpeaker'
@endpoints.api(name='conference',
version='v1',
allowed_client_ids=[WEB_CLIENT_ID, API_EXPLORER_CLIENT_ID],
scopes=[EMAIL_SCOPE])
class ConferenceApi(remote.Service):
"""Conference API v0.1"""
# - - - Helper functions - - - - - - - - - - - - - - - - - - -
@staticmethod
def _cacheAnnouncement():
"""Create Announcement & assign to memcache; used by
memcache cron job & putAnnouncement().
"""
# Get conferences with 5 seats or less available
confs = Conference.query(ndb.AND(
Conference.seatsAvailable <= 5,
Conference.seatsAvailable > 0)
).fetch(projection=[Conference.name])
if confs:
# If there are almost sold out conferences,
# format announcement and set it in memcache
announcement = '%s %s' % (
'Last chance to attend! The following conferences '
'are nearly sold out:',
', '.join(conf.name for conf in confs))
memcache.set(MEMCACHE_ANNOUNCEMENTS_KEY, announcement)
else:
# If there are no almost sold out conferences,
# delete the memcache announcements entry
announcement = ""
memcache.delete(MEMCACHE_ANNOUNCEMENTS_KEY)
return announcement
@ndb.transactional(xg=True)
def _conferenceRegistration(self, request, reg=True):
"""Register or unregister user for selected conference."""
retval = None
prof = self._getProfileFromUser()
# Get conference, check that it exists
wsck = request.websafeConferenceKey
conf = ndb.Key(urlsafe=wsck).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' % wsck)
# Register
if reg:
# Check if user already registered otherwise add
if wsck in prof.conferenceKeysToAttend:
raise ConflictException(
"You have already registered for this conference")
# Check if seats available
if conf.seatsAvailable <= 0:
raise ConflictException(
"There are no seats available.")
# Register user, take away one seat
prof.conferenceKeysToAttend.append(wsck)
conf.seatsAvailable -= 1
retval = True
# Unregister
else:
# Check if user already registered
if wsck in prof.conferenceKeysToAttend:
# Unregister user, add back one seat
prof.conferenceKeysToAttend.remove(wsck)
conf.seatsAvailable += 1
retval = True
else:
retval = False
# Write things back to the datastore & return
prof.put()
conf.put()
return BooleanMessage(data=retval)
def _copyConferenceToForm(self, conf, displayName):
"""Copy relevant fields from Conference to ConferenceForm."""
cf = ConferenceForm()
for field in cf.all_fields():
if hasattr(conf, field.name):
# Convert Date to date string, just copy others
if field.name.endswith('Date'):
setattr(cf, field.name, str(getattr(conf, field.name)))
else:
setattr(cf, field.name, getattr(conf, field.name))
elif field.name == "websafeKey":
setattr(cf, field.name, conf.key.urlsafe())
if displayName:
setattr(cf, 'organizerDisplayName', displayName)
cf.check_initialized()
return cf
def _copyProfileToForm(self, prof):
"""Copy relevant fields from Profile to ProfileForm."""
pf = ProfileForm()
for field in pf.all_fields():
if hasattr(prof, field.name):
# Convert t-shirt string to Enum, just copy others
if field.name == 'teeShirtSize':
setattr(pf, field.name, getattr(TeeShirtSize,
getattr(prof, field.name)))
elif field.name == 'sessionKeysWishlist':
setattr(pf, field.name, str(getattr(prof, field.name)))
else:
setattr(pf, field.name, getattr(prof, field.name))
pf.check_initialized()
return pf
def _copySessionToForm(self, session):
"""Copy relevant fields from Conference to ConferenceForm."""
sf = SessionForm()
for field in sf.all_fields():
# Convert date and startTime to strings, just copy others
if field.name in ('date', 'startTime'):
setattr(sf, field.name, str(getattr(session, field.name)))
else:
setattr(sf, field.name, getattr(session, field.name))
sf.check_initialized()
return sf
def _createConferenceObject(self, request):
"""Create or update Conference object,
returning ConferenceForm/request."""
# Preload necessary data items
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
# Check that a conference name was provided
if not request.name:
raise endpoints.BadRequestException("Conference 'name'"
"field required")
# Copy ConferenceForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name)
for field in request.all_fields()}
del data['websafeKey']
del data['organizerDisplayName']
# Add default values for missing
for df in DEFAULTS:
if data[df] in (None, []):
data[df] = DEFAULTS[df]
setattr(request, df, DEFAULTS[df])
# Convert dates from strings to Date objects and
# set month based on start_date
if data['startDate']:
data['startDate'] = datetime.strptime(data['startDate'][:10],
"%Y-%m-%d").date()
data['month'] = data['startDate'].month
else:
data['month'] = 0
if data['endDate']:
data['endDate'] = datetime.strptime(data['endDate'][:10],
"%Y-%m-%d").date()
# Set seatsAvailable to be same as maxAttendees on creation
# both for data model & outbound Message
if data["maxAttendees"] > 0:
data["seatsAvailable"] = data["maxAttendees"]
setattr(request, "seatsAvailable", data["maxAttendees"])
# Make Profile Key from user ID
p_key = ndb.Key(Profile, user_id)
# Allocate new Conference ID with Profile key as parent
c_id = Conference.allocate_ids(size=1, parent=p_key)[0]
# Make Conference key from ID
c_key = ndb.Key(Conference, c_id, parent=p_key)
data['key'] = c_key
data['organizerUserId'] = request.organizerUserId = user_id
# Create Conference & return (modified) ConferenceForm
Conference(**data).put()
taskqueue.add(params={'email': user.email(),
'conferenceInfo': repr(request)},
url='/tasks/send_confirmation_email')
return request
def _createSessionObject(self, request):
# Check that user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# Check that conference and session names are provided
if not request.name or not request.conference:
raise endpoints.BadRequestException("Session 'name' and"
"'conference' fields required")
# Copy SessionForm/ProtoRPC Message into dict
data = {field.name: getattr(request, field.name)
for field in request.all_fields()}
# Convert dates from strings to Date objects
if data['date']:
data['date'] = datetime.strptime(data['date'], "%Y-%m-%d").date()
if data['startTime']:
data['startTime'] = datetime.strptime(data['startTime'],
"%H:%M").time()
# Make Key and get parent Conference
user_id = getUserId(user)
c_key = ndb.Key(Conference, request.conference)
conference = Conference.query(Conference.name == request.conference)
conference = conference.get()
# Check that conference exists
if not conference:
raise endpoints.BadRequestException("Conference does not exist.")
# Check that user created this conference
if conference.organizerUserId != user_id:
raise endpoints.BadRequestException("Only conference organizer"
"can create sessions.")
# Allocate new Session ID with Conference key as parent
s_id = Session.allocate_ids(size=1, parent=c_key)[0]
# Make Session key from Session ID
s_key = ndb.Key(Session, s_id, parent=c_key)
data['key'] = s_key
# create Conference & return (modified) ConferenceForm
Session(**data).put()
# Add task to check if this speaker should be featured
taskqueue.add(params={'speaker': request.speaker,
'conference': request.conference},
url='/tasks/check_featured_speaker')
return request
def _doProfile(self, save_request=None):
"""Get user Profile and return to user, possibly updating it first."""
# Get user Profile
prof = self._getProfileFromUser()
# Process user-modifyable fields if save_request
if save_request:
for field in ('displayName', 'teeShirtSize'):
if hasattr(save_request, field):
val = getattr(save_request, field)
if val:
setattr(prof, field, str(val))
# Write profile to datastore
prof.put()
return self._copyProfileToForm(prof)
def _formatFilters(self, filters):
"""Parse, check validity and format user supplied filters."""
formatted_filters = []
inequality_field = None
for f in filters:
filtr = {field.name: getattr(f, field.name)
for field in f.all_fields()}
try:
filtr["field"] = FIELDS[filtr["field"]]
filtr["operator"] = OPERATORS[filtr["operator"]]
except KeyError:
raise endpoints.BadRequestException("Filter contains invalid"
"field or operator.")
# Every operation except "=" is an inequality
if filtr["operator"] != "=":
# check if inequality operation has been used in previous
# filters and disallow the filter if inequality was performed
# on a different field before. track the field on which the
# inequality operation is performed
if inequality_field and inequality_field != filtr["field"]:
raise endpoints.BadRequestException("Inequality filter is"
"allowed on only one"
"field.")
else:
inequality_field = filtr["field"]
formatted_filters.append(filtr)
return (inequality_field, formatted_filters)
def _getProfileFromUser(self):
"""Return user Profile from datastore or create new one."""
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
user_id = getUserId(user)
p_key = ndb.Key(Profile, user_id)
profile = p_key.get()
if not profile:
profile = Profile(
key = p_key,
displayName = user.nickname(),
mainEmail= user.email(),
teeShirtSize = str(TeeShirtSize.NOT_SPECIFIED),
)
profile.put()
return profile
def _getQuery(self, request):
"""Return formatted query from the submitted filters."""
q = Conference.query()
inequality_filter, filters = self._formatFilters(request.filters)
# If exists, sort on inequality filter first
if not inequality_filter:
q = q.order(Conference.name)
else:
q = q.order(ndb.GenericProperty(inequality_filter))
q = q.order(Conference.name)
for filtr in filters:
if filtr["field"] in ["month", "maxAttendees"]:
filtr["value"] = int(filtr["value"])
formatted_query = ndb.query.FilterNode(filtr["field"],
filtr["operator"],
filtr["value"])
q = q.filter(formatted_query)
return q
@staticmethod
def _updateFeaturedSpeaker(speaker, conference):
"""Update featured speaker in cache."""
# If speaker of created session already has a session at this
# conference, make them the featured speaker
sessions = Session.query(ndb.AND(Session.speaker == speaker,
Session.conference == conference))
if sessions.count() > 1:
memcache.set(MEMCACHE_FEATUREDSPEAKER_KEY, speaker)
return speaker
# - - - Endpoints - - - - - - - - - - - - - - - - - - -
@endpoints.method(SESSION_GET_REQUEST, SessionForm,
path='conference/{SessionKey}/addSessionToWishlist',
http_method='POST', name='addSessionToWishlist')
def addSessionToWishlist(self, request):
"""Add session to user's wishlist."""
# Get Session from Key and check that it exists
s_key = request.SessionKey
session = ndb.Key(urlsafe=s_key).get()
if not session:
raise endpoints.NotFoundException(
'No session found with key: %s' % s_key)
# Append Key to Profile's session wishlist
prof = self._getProfileFromUser()
prof.sessionKeysWishlist.append(s_key)
prof.put()
return self._copySessionToForm(session)
@endpoints.method(ConferenceForm, ConferenceForm,
path='conference',
http_method='POST', name='createConference')
def createConference(self, request):
"""Create new conference."""
return self._createConferenceObject(request)
# open only to the organizer of the conference
@endpoints.method(SessionForm, SessionForm,
path='session',
http_method='POST', name='createSession')
def createSession(self, request):
"""Create new session."""
return self._createSessionObject(request)
@endpoints.method(message_types.VoidMessage, StringMessage,
path='conference/announcement/get',
http_method='GET', name='getAnnouncement')
def getAnnouncement(self, request):
"""Return Announcement from memcache."""
announcement = memcache.get(MEMCACHE_ANNOUNCEMENTS_KEY)
if not announcement:
announcement = ""
return StringMessage(data=announcement)
@endpoints.method(CONF_GET_REQUEST, ConferenceForm,
path='conference/{websafeConferenceKey}',
http_method='GET', name='getConference')
def getConference(self, request):
"""Return requested conference (by websafeConferenceKey)."""
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' %
request.websafeConferenceKey)
prof = conf.key.parent().get()
return self._copyConferenceToForm(conf, getattr(prof, 'displayName'))
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='getConferencesCreated',
http_method='POST', name='getConferencesCreated')
def getConferencesCreated(self, request):
"""Return conferences created by user."""
# Make sure user is authed
user = endpoints.get_current_user()
if not user:
raise endpoints.UnauthorizedException('Authorization required')
# Make profile key
p_key = ndb.Key(Profile, getUserId(user))
# Create ancestor query for this user
conferences = Conference.query(ancestor=p_key)
# Get the user profile and display name
prof = self._getProfileFromUser()
displayName = getattr(prof, 'displayName')
return ConferenceForms(items=[self._copyConferenceToForm(conf,
displayName)
for conf in conferences])
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/sessions',
http_method='POST', name='getConferenceSessions')
def getConferenceSessions(self, request):
"""Return sessions by conference."""
# Check that conference name was provided
if not request.websafeConferenceKey:
raise endpoints.BadRequestException("Must specify conference!")
# Get conference and check that it exists
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' %
request.websafeConferenceKey)
# Get all Sessions for this Conference and check that there are > 0
sessions = Session.query(Session.conference == conf.name)
if sessions.count() == 0:
raise endpoints.NotFoundException('No sessions for '
'this conference.')
return SessionForms(items=[self._copySessionToForm(session)
for session in sessions])
@endpoints.method(SessionBySpeakerForm, SessionForms,
path='{speaker}/sessions',
http_method='POST',
name='getConferenceSessionsBySpeaker')
def getConferenceSessionsBySpeaker(self, request):
"""Return sessions by speaker."""
# Check that speaker was provided
if not request.speaker:
raise endpoints.BadRequestException("Must specify speaker!")
# Get all Sessions with this speaker and check that there are > 0
sessions = Session.query().filter(Session.speaker == request.speaker)
if sessions.count() == 0:
raise endpoints.NotFoundException('No sessions with this speaker.')
return SessionForms(items=[self._copySessionToForm(session)
for session in sessions])
@endpoints.method(SessionByTypeForm, SessionForms,
path='conference/{websafeConferenceKey}/{typeOfSession}'
'/sessions',
http_method='POST', name='getConferenceSessionsByType')
def getConferenceSessionsByType(self, request):
"""Return sessions by session type."""
# Make sure Conference and typeOfSession were specified
if not request.websafeConferenceKey or not request.typeOfSession:
raise endpoints.BadRequestException("Must specify conference "
"and session type!")
# Get conference and check that it exists
conf = ndb.Key(urlsafe=request.websafeConferenceKey).get()
if not conf:
raise endpoints.NotFoundException(
'No conference found with key: %s' %
request.websafeConferenceKey)
# Get all Sessions of typeOfSession and check that there are > 0
sessions = Session.query(Session.conference == conf.name)\
.filter(Session.typeOfSession == request.typeOfSession)
if sessions.count() == 0:
raise endpoints.NotFoundException('No sessions of this type.')
return SessionForms(items=[self._copySessionToForm(session)
for session in sessions])
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/attending',
http_method='GET', name='getConferencesToAttend')
def getConferencesToAttend(self, request):
"""Get list of conferences that user has registered for."""
prof = self._getProfileFromUser()
conferenceKeys = prof.conferenceKeysToAttend
c_keys = [ndb.Key(urlsafe=c) for c in conferenceKeys]
conferences = ndb.get_multi(c_keys)
return ConferenceForms(items=[self._copyConferenceToForm(conf, "")
for conf in conferences])
@endpoints.method(message_types.VoidMessage, StringMessage,
path='featuredSpeaker/get',
http_method='GET', name='getFeaturedSpeaker')
def getFeaturedSpeaker(self, request):
"""Return featured speaker from memcache."""
featuredSpeaker = memcache.get(MEMCACHE_FEATUREDSPEAKER_KEY)
if not featuredSpeaker:
featuredSpeaker = ""
return StringMessage(data=featuredSpeaker)
@endpoints.method(message_types.VoidMessage, ProfileForm,
path='profile',
http_method='GET', name='getProfile')
def getProfile(self, request):
"""Return user profile."""
return self._doProfile()
@endpoints.method(message_types.VoidMessage, SessionForms,
path='session/wishlist',
http_method='POST', name='getSessionsInWishlist')
def getSessionsInWishlist(self, request):
"""Return all sessions in user's wishlist."""
prof = self._getProfileFromUser()
sessionKeys = prof.sessionKeysWishlist
s_keys = [ndb.Key(urlsafe=s) for s in sessionKeys]
sessions = ndb.get_multi(s_keys)
if len(sessions) == 0:
raise endpoints.NotFoundException('No sessions in wishlist.')
return SessionForms(items=[self._copySessionToForm(session)
for session in sessions])
@endpoints.method(CONF_GET_REQUEST, SessionForms,
path='conference/{websafeConferenceKey}/morningsessions',
http_method='POST',
name='getMorningSessionsByConference')
def getMorningSessionsByConference(self, request):
"""Query for morning sessions at a given conference."""
check_time = datetime.strptime('12:00', "%H:%M").time()
sessions = Session.query(Session.startTime < check_time)
return SessionForms(items=[self._copySessionToForm(session)
for session in sessions])
@endpoints.method(message_types.VoidMessage, SessionForms,
path='sessions/non_workshops_before_seven',
http_method='GET', name='getNonWorkshopsBeforeSeven')
def getNonWorkshopsBeforeSeven(self, request):
"""Query for non-workshop sessions before 7:00 P.M."""
check_time = datetime.strptime('19:00', "%H:%M").time()
sessions = Session.query(Session.typeOfSession != 'Workshop')
sessions = [session for session in sessions
if session.startTime < check_time]
return SessionForms(items=[self._copySessionToForm(session)
for session in sessions])
@endpoints.method(message_types.VoidMessage, ConferenceForms,
path='conferences/open',
http_method='POST', name='getOpenConferences')
def getOpenConferences(self, request):
"""Query for conferences with available seats remaining."""
conferences = Conference.query(Conference.seatsAvailable > 0)
return ConferenceForms(items=[self._copyConferenceToForm(conf, "")
for conf in conferences])
@endpoints.method(ConferenceQueryForms, ConferenceForms,
path='queryConferences',
http_method='POST', name='queryConferences')
def queryConferences(self, request):
"""Query for all conferences."""
conferences = self._getQuery(request)
return ConferenceForms(items=[self._copyConferenceToForm(conf, "")
for conf in conferences])
@endpoints.method(CONF_GET_REQUEST, BooleanMessage,
path='conference/{websafeConferenceKey}',
http_method='POST', name='registerForConference')
def registerForConference(self, request):
"""Register user for selected conference."""
return self._conferenceRegistration(request)
@endpoints.method(ProfileMiniForm, ProfileForm,
path='profile',
http_method='POST', name='saveProfile')
def saveProfile(self, request):
"""Save user profile."""
return self._doProfile(request)
# registers API
api = endpoints.api_server([ConferenceApi])
| apache-2.0 | -3,043,745,122,574,124,500 | 44.047541 | 79 | 0.589359 | false |
rajul/tvb-library | tvb/basic/config/profile_settings.py | 1 | 10387 | # -*- coding: utf-8 -*-
#
#
# TheVirtualBrain-Scientific Package. This package holds all simulators, and
# analysers necessary to run brain-simulations. You can use it stand alone or
# in conjunction with TheVirtualBrain-Framework Package. See content of the
# documentation-folder for more details. See also http://www.thevirtualbrain.org
#
# (c) 2012-2013, Baycrest Centre for Geriatric Care ("Baycrest")
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License version 2 as published by the Free
# Software Foundation. This program is distributed in the hope that it will be
# useful, but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public
# License for more details. You should have received a copy of the GNU General
# Public License along with this program; if not, you can download it here
# http://www.gnu.org/licenses/old-licenses/gpl-2.0
#
#
# CITATION:
# When using The Virtual Brain for scientific publications, please cite it as follows:
#
# Paula Sanz Leon, Stuart A. Knock, M. Marmaduke Woodman, Lia Domide,
# Jochen Mersmann, Anthony R. McIntosh, Viktor Jirsa (2013)
# The Virtual Brain: a simulator of primate brain network dynamics.
# Frontiers in Neuroinformatics (7:10. doi: 10.3389/fninf.2013.00010)
#
#
"""
Prepare TVB settings to be grouped under various profile classes.
.. moduleauthor:: Lia Domide <[email protected]>
"""
import os
import sys
from tvb.basic.config import stored
from tvb.basic.config.environment import Environment
from tvb.basic.config.settings import ClusterSettings, DBSettings, VersionSettings, WebSettings
from tvb.basic.config.utils import EnhancedDictionary, LibraryModulesFinder, LibraryImportError
class BaseSettingsProfile(object):
TVB_CONFIG_FILE = os.path.expanduser(os.path.join("~", '.tvb.configuration'))
DEFAULT_STORAGE = os.path.expanduser(os.path.join('~', 'TVB' + os.sep))
FIRST_RUN_STORAGE = os.path.expanduser(os.path.join('~', '.tvb-temp'))
LOGGER_CONFIG_FILE_NAME = "logger_config.conf"
# Access rights for TVB generated files/folders.
ACCESS_MODE_TVB_FILES = 0744
## Number used for estimation of TVB used storage space
MAGIC_NUMBER = 9
def __init__(self, web_enabled=True):
self.manager = stored.SettingsManager(self.TVB_CONFIG_FILE)
## Actual storage of all TVB related files
self.TVB_STORAGE = self.manager.get_attribute(stored.KEY_STORAGE, self.FIRST_RUN_STORAGE, unicode)
self.TVB_LOG_FOLDER = os.path.join(self.TVB_STORAGE, "logs")
self.TVB_TEMP_FOLDER = os.path.join(self.TVB_STORAGE, "TEMP")
self.TVB_PATH = self.manager.get_attribute(stored.KEY_TVB_PATH, '')
self.EXTERNALS_FOLDER_PARENT = os.path.dirname(self.BIN_FOLDER)
self.env = Environment()
self.cluster = ClusterSettings(self.manager)
self.web = WebSettings(self.manager, web_enabled)
self.db = DBSettings(self.manager, self.DEFAULT_STORAGE, self.TVB_STORAGE)
self.version = VersionSettings(self.manager, self.BIN_FOLDER)
#The path to the matlab executable (if existent). Otherwise just return an empty string.
value = self.manager.get_attribute(stored.KEY_MATLAB_EXECUTABLE, '', str) or ''
if value == 'None':
value = ''
self.MATLAB_EXECUTABLE = value
# Maximum number of vertices acceptable o be part of a surface at import time.
self.MAX_SURFACE_VERTICES_NUMBER = self.manager.get_attribute(stored.KEY_MAX_NR_SURFACE_VERTEX, 300000, int)
# Max number of ops that can be scheduled from UI in a PSE. To be correlated with the oarsub limitations
self.MAX_RANGE_NUMBER = self.manager.get_attribute(stored.KEY_MAX_RANGE_NR, 2000, int)
# Max number of threads in the pool of ops running in parallel. TO be correlated with CPU cores
self.MAX_THREADS_NUMBER = self.manager.get_attribute(stored.KEY_MAX_THREAD_NR, 4, int)
#The maximum disk space that can be used by one single user, in KB.
self.MAX_DISK_SPACE = self.manager.get_attribute(stored.KEY_MAX_DISK_SPACE_USR, 5 * 1024 * 1024, int)
## Configure Traits
self.TRAITS_CONFIGURATION = EnhancedDictionary()
self.TRAITS_CONFIGURATION.interface_method_name = 'interface'
self.TRAITS_CONFIGURATION.use_storage = True
@property
def BIN_FOLDER(self):
"""
Return path towards tvb_bin location. It will be used in some environment for determining the starting point
"""
try:
import tvb_bin
return os.path.dirname(os.path.abspath(tvb_bin.__file__))
except ImportError:
return "."
@property
def PYTHON_EXE_NAME(self):
"""
Returns the name of the python executable depending on the specific OS
"""
if self.env.is_windows():
return 'python.exe'
else:
return 'python'
@property
def PYTHON_PATH(self):
"""
Get Python path, based on current environment.
"""
exe_name = self.PYTHON_EXE_NAME
if self.env.is_development():
python_path = 'python'
elif self.env.is_windows_deployment() or self.env.is_linux_deployment():
python_path = os.path.join(os.path.dirname(self.BIN_FOLDER), 'exe', exe_name)
elif self.env.is_mac_deployment():
root_dir = os.path.dirname(os.path.dirname(os.path.dirname(os.path.dirname(self.BIN_FOLDER))))
python_path = os.path.join(root_dir, 'MacOS', exe_name)
else:
python_path = 'python'
try:
# check if file actually exists
os.stat(python_path)
return python_path
except:
# otherwise best guess is the current interpreter!
return sys.executable
def prepare_for_operation_mode(self):
"""
Overwrite PostgreSQL number of connections when executed in the context of a node.
"""
self.db.MAX_CONNECTIONS = self.db.MAX_ASYNC_CONNECTIONS
self.cluster.IN_OPERATION_EXECUTION_PROCESS = True
def initialize_profile(self):
"""
Make sure tvb folders are created.
"""
if not os.path.exists(self.TVB_LOG_FOLDER):
os.makedirs(self.TVB_LOG_FOLDER)
if not os.path.exists(self.TVB_TEMP_FOLDER):
os.makedirs(self.TVB_TEMP_FOLDER)
if not os.path.exists(self.TVB_STORAGE):
os.makedirs(self.TVB_STORAGE)
def initialize_for_deployment(self):
library_folder = self.env.get_library_folder(self.BIN_FOLDER)
if self.env.is_windows_deployment():
# Add self.TVB_PATH as first in PYTHONPATH so we can find TVB there in case of GIT contributors
self.env.setup_python_path(self.TVB_PATH, library_folder, os.path.join(library_folder, 'lib-tk'))
self.env.append_to_path(library_folder)
self.env.setup_tk_tcl_environ(library_folder)
if self.env.is_mac_deployment():
# MacOS package structure is in the form:
# Contents/Resorces/lib/python2.7/tvb . PYTHONPATH needs to be set
# at the level Contents/Resources/lib/python2.7/ and the root path
# from where to start looking for TK and TCL up to Contents/
tcl_root = os.path.dirname(os.path.dirname(os.path.dirname(library_folder)))
self.env.setup_tk_tcl_environ(tcl_root)
self.env.setup_python_path(self.TVB_PATH, library_folder, os.path.join(library_folder, 'site-packages.zip'),
os.path.join(library_folder, 'lib-dynload'))
if self.env.is_linux_deployment():
# Note that for the Linux package some environment variables like LD_LIBRARY_PATH,
# LD_RUN_PATH, PYTHONPATH and PYTHONHOME are set also in the startup scripts.
self.env.setup_python_path(self.TVB_PATH, library_folder, os.path.join(library_folder, 'lib-tk'))
self.env.setup_tk_tcl_environ(library_folder)
### Correctly set MatplotLib Path, before start.
mpl_data_path_maybe = os.path.join(library_folder, 'mpl-data')
try:
os.stat(mpl_data_path_maybe)
os.environ['MATPLOTLIBDATA'] = mpl_data_path_maybe
except:
pass
if self.TVB_PATH:
# In case of contributor setup, we want to make sure that all dev files are loaded first, so
# we need to reload all tvb related modules, since any call done with
# 'python -m ...' will consider the current folder as the first to search in.
sys.path = os.environ.get("PYTHONPATH", "").split(os.pathsep) + sys.path
for key in sys.modules.keys():
if (key.startswith("tvb") and sys.modules[key] and
not key.startswith("tvb.basic.profile") and not 'profile_settings' in key):
try:
reload(sys.modules[key])
except LibraryImportError:
pass
class LibrarySettingsProfile(BaseSettingsProfile):
"""
Profile used when scientific library is used without storage and without web UI.
"""
TVB_STORAGE = os.path.expanduser(os.path.join("~", "TVB" + os.sep))
LOGGER_CONFIG_FILE_NAME = "library_logger.conf"
def __init__(self):
super(LibrarySettingsProfile, self).__init__(False)
## Configure Traits
self.TRAITS_CONFIGURATION = EnhancedDictionary()
self.TRAITS_CONFIGURATION.interface_method_name = 'interface'
self.TRAITS_CONFIGURATION.use_storage = False
def initialize_profile(self):
"""
Make sure some warning are thrown when trying to import from framework.
"""
super(LibrarySettingsProfile, self).initialize_profile()
sys.meta_path.append(LibraryModulesFinder())
class TestLibraryProfile(LibrarySettingsProfile):
"""
Profile for library unit-tests.
"""
LOGGER_CONFIG_FILE_NAME = "library_logger_test.conf"
def __init__(self):
super(TestLibraryProfile, self).__init__()
self.TVB_LOG_FOLDER = "TEST_OUTPUT"
| gpl-2.0 | 8,064,859,632,375,847,000 | 39.104247 | 120 | 0.652932 | false |
trevor/calendarserver | contrib/performance/_event_change.py | 1 | 3820 | ##
# Copyright (c) 2010-2014 Apple Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##
"""
Benchmark a server's handling of event summary changes.
"""
from itertools import count
from urllib2 import HTTPDigestAuthHandler
from twisted.internet import reactor
from twisted.internet.defer import inlineCallbacks, returnValue
from twisted.web.client import Agent
from twisted.web.http_headers import Headers
from twisted.web.http import NO_CONTENT
from httpauth import AuthHandlerAgent
from httpclient import StringProducer
from benchlib import initialize, sample
from _event_create import makeEvent
@inlineCallbacks
def measure(host, port, dtrace, attendeeCount, samples, fieldName,
replacer, eventPerSample=False):
user = password = "user01"
root = "/"
principal = "/"
calendar = "event-%s-benchmark" % (fieldName,)
authinfo = HTTPDigestAuthHandler()
authinfo.add_password(
realm="Test Realm",
uri="http://%s:%d/" % (host, port),
user=user,
passwd=password)
agent = AuthHandlerAgent(Agent(reactor), authinfo)
# Set up the calendar first
yield initialize(agent, host, port, user, password, root, principal, calendar)
if eventPerSample:
# Create an event for each sample that will be taken, so that no event
# is used for two different samples.
f = _selfish_sample
else:
# Just create one event and re-use it for all samples.
f = _generous_sample
data = yield f(
dtrace, replacer, agent, host, port, user, calendar, fieldName,
attendeeCount, samples)
returnValue(data)
@inlineCallbacks
def _selfish_sample(dtrace, replacer, agent, host, port, user, calendar, fieldName, attendeeCount, samples):
url = 'http://%s:%s/calendars/__uids__/%s/%s/%s-change-%%d.ics' % (
host, port, user, calendar, fieldName)
headers = Headers({"content-type": ["text/calendar"]})
events = [
# The organizerSequence here (1) may need to be a parameter.
# See also the makeEvent call below.
(makeEvent(i, 1, attendeeCount), url % (i,))
for i in range(samples)]
for (event, url) in events:
yield agent.request('PUT', url, headers, StringProducer(event))
# Sample changing the event according to the replacer.
samples = yield sample(
dtrace, samples,
agent, (('PUT', url, headers, StringProducer(replacer(event, i)))
for i, (event, url)
in enumerate(events)).next,
NO_CONTENT)
returnValue(samples)
@inlineCallbacks
def _generous_sample(dtrace, replacer, agent, host, port, user, calendar, fieldName, attendeeCount, samples):
url = 'http://%s:%s/calendars/__uids__/%s/%s/%s-change.ics' % (
host, port, user, calendar, fieldName)
headers = Headers({"content-type": ["text/calendar"]})
# See the makeEvent call above.
event = makeEvent(0, 1, attendeeCount)
yield agent.request('PUT', url, headers, StringProducer(event))
# Sample changing the event according to the replacer.
samples = yield sample(
dtrace, samples,
agent, (('PUT', url, headers, StringProducer(replacer(event, i)))
for i in count(1)).next,
NO_CONTENT)
returnValue(samples)
| apache-2.0 | -8,568,640,147,147,008,000 | 31.649573 | 109 | 0.674607 | false |
QudevETH/PycQED_py3 | pycqed/simulations/pauli_transfer_matrices.py | 1 | 5903 | import numpy as np
"""
This file contains pauli transfer matrices for all basic qubit operations.
"""
I = np.eye(4)
# Pauli group
X = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, -1]], dtype=int)
Y = np.array([[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, 1, 0],
[0, 0, 0, -1]], dtype=int)
Z = np.array([[1, 0, 0, 0],
[0, -1, 0, 0],
[0, 0, -1, 0],
[0, 0, 0, 1]], dtype=int)
# Exchange group
S = np.array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 1, 0, 0],
[0, 0, 1, 0]], dtype=int)
S2 = np.dot(S, S)
# Hadamard group
H = np.array([[1, 0, 0, 0],
[0, 0, 0, 1],
[0, 0, -1, 0],
[0, 1, 0, 0]], dtype=int)
CZ = np.array([
[1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0],
[0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
[0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]],
dtype=int)
# CZ = np.array([[ 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0],
# [ 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, -1, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1, 0, 0, 0],
# [ 0, 1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, -1, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
# [ 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 1]],
# dtype=int)
def X_theta(theta:float, unit='deg'):
"""
PTM of rotation of theta degrees along the X axis
"""
if unit=='deg':
theta = np.deg2rad(theta)
X = np.array([[1, 0, 0, 0],
[0, 1, 0, 0],
[0, 0, np.cos(theta), -np.sin(theta)],
[0, 0, np.sin(theta), np.cos(theta)]], dtype=float)
return X
def Y_theta(theta:float, unit='deg'):
"""
PTM of rotation of theta degrees along the X axis
"""
if unit=='deg':
theta = np.deg2rad(theta)
Y = np.array([[1, 0, 0, 0],
[0, np.cos(theta), 0, np.sin(theta)],
[0, 0, 1, 0],
[0, -np.sin(theta), 0, np.cos(theta)]], dtype=float)
return Y
def Z_theta(theta:float, unit='deg'):
"""
PTM of rotation of theta degrees along the X axis
"""
if unit=='deg':
theta = np.deg2rad(theta)
Z = np.array([[1, 0, 0, 0],
[0, np.cos(theta), -np.sin(theta), 0],
[0, np.sin(theta), np.cos(theta), 0],
[0, 0, 0, 1]], dtype=float)
return Z
##############################################################################
#
##############################################################################
def process_fidelity(ptm_0, ptm_1, d: int=None):
"""
Calculates the average process fidelity between two pauli transfer matrices
Args:
ptm_0 (array) : n*n array specifying the first pauli transfer matrix
ptm_1 (array) : n*n array specifying the second pauli transfer matrix
d (int) : dimension of the Hilbert space
returns:
F (float) : Process fidelity
"""
if d == None:
d = np.shape(ptm_0)[0]**0.5
return np.dot(ptm_0.T, ptm_1).trace()/(d**2)
def average_gate_fidelity(ptm_0, ptm_1, d: int=None):
"""
Calculates the average average gate fidelity between two pauli transfer
matrices
Args:
ptm_0 (array) : n*n array specifying the first pauli transfer matrix
ptm_1 (array) : n*n array specifying the second pauli transfer matrix
d (int) : dimension of the Hilbert space
returns:
F_gate (float): Average gate fidelity
"""
if d == None:
d = np.shape(ptm_0)[0]**0.5
F_pro = process_fidelity(ptm_0, ptm_1, d)
F_avg_gate = process_fid_to_avg_gate_fid(F_pro, d)
return F_avg_gate
def process_fid_to_avg_gate_fid(F_pro: float, d:int):
"""
Converts
"""
F_avg_gate = (d*F_pro+1)/(d+1)
return F_avg_gate
| mit | -5,739,007,006,011,545,000 | 35.214724 | 83 | 0.360325 | false |
pasztorpisti/json-cfg | src/jsoncfg/config_classes.py | 1 | 17008 | import numbers
from collections import OrderedDict, namedtuple
from .compatibility import my_basestring
from .exceptions import JSONConfigException
_undefined = object()
class JSONConfigQueryError(JSONConfigException):
"""
The base class of every exceptions thrown by this library during config queries.
"""
def __init__(self, config_node, message):
"""
:param config_node: An instance of one of the subclasses of _ConfigNode.
"""
self.config_node = config_node
self.line, self.column = node_location(config_node)
message += ' [line=%s;col=%s]' % (self.line, self.column)
super(JSONConfigQueryError, self).__init__(message)
class JSONConfigValueMapperError(JSONConfigQueryError):
"""
This is raised when someone fetches a value by specifying the "mapper" parameter
and the mapper function raises an exception. That exception is converted into this one.
"""
def __init__(self, config_node, mapper_exception):
"""
:param config_node: An instance of one of the subclasses of _ConfigNode.
:param mapper_exception: The exception instance that was raised during conversion.
It can be anything...
"""
super(JSONConfigValueMapperError, self).__init__(config_node,
'Error converting json value: ' +
str(mapper_exception))
self.mapper_exception = mapper_exception
class JSONConfigValueNotFoundError(JSONConfigQueryError):
"""
Raised when the user tries to fetch a value that doesn't exist in the config.
"""
def __init__(self, value_not_found):
"""
:param value_not_found: A ValueNotFoundNode instance. Let's say that you query the
config.servers[1].ip_address() value from the config but the config.servers array
has only one item. In this case a JSONConfigValueNotFoundError is raised and
value_not_found._parent_config_node is set to config.servers (that is the last existing
component from our query path) and self.relative_path will be '[1].ip_address'.
This way the error location points to the config.servers node and the error message
says that you wanted to query it with the '[1].ip_address' relative_path that doesn't
exist.
:type value_not_found: ValueNotFoundNode
"""
self.value_not_found = value_not_found
path = []
for component in value_not_found._missing_query_path:
if isinstance(component, numbers.Integral):
path.append('[%s]' % component)
else:
path.append('.' + component)
self.relative_path = ''.join(path)
# TODO: improve the error message: it is possible to do so based on the info we have
message = 'Required config node not found. Missing query path: %s'\
' (relative to error location)' % self.relative_path
super(JSONConfigValueNotFoundError, self).__init__(value_not_found._parent_config_node,
message)
class JSONConfigNodeTypeError(JSONConfigQueryError):
"""
This error is raised when you try to handle a config node by assuming its type
to be something else than its actual type. For example you are trying to iterate
over the key-value pairs of a value that is not json object.
"""
def __init__(self, config_node, expected_type, error_message=None):
"""
:param config_node: An instance of one of the subclasses of _ConfigNode.
:param expected_type: The expected type or a tuple/list of expected types.
"""
found_type_name = config_node.__class__.__name__
if not isinstance(expected_type, (list, tuple)):
expected_type = (expected_type,)
expected_names = [t.__name__ for t in expected_type]
message = 'Expected a %s but found %s.' % (' or '.join(expected_names), found_type_name)
if error_message is not None:
message += ' %s' % (error_message,)
super(JSONConfigNodeTypeError, self).__init__(config_node, message)
class JSONConfigIndexError(JSONConfigQueryError):
"""
This is raised when you try to index into an array node and the index is out of range. Indexing
into a different kind of node (object, scalar) doesn't raise this.
"""
def __init__(self, config_json_array, index):
self.index = index
message = 'Index (%s) is out of range [0, %s)' % (index, len(config_json_array))
super(JSONConfigIndexError, self).__init__(config_json_array, message)
class JSONValueMapper(object):
def __call__(self, json_value):
raise NotImplementedError()
def _process_value_fetcher_call_args(args):
"""
This function processes the incoming varargs of ValueNotFoundNode.__call__() and
_ConfigNode.__call__().
:param args: A list or tuple containing positional function call arguments. The optional
arguments we expect are the following: An optional default value followed by zero or more
JSONValueMapper instances.
:return: (default_value, list_or_tuple_of_JSONValueMapper_instances)
The default_value is _undefined if it is not present and the second item of the tuple is
an empty tuple/list if there are not JSONValueMapper instances.
"""
if not args:
return _undefined, ()
if isinstance(args[0], JSONValueMapper):
default = _undefined
mappers = args
else:
default = args[0]
mappers = args[1:]
for mapper in mappers:
if not isinstance(mapper, JSONValueMapper):
raise TypeError('%r isn\'t a JSONValueMapper instance!' % (mapper,))
return default, mappers
class ValueNotFoundNode(object):
def __init__(self, parent_config_node, missing_query_path):
"""
If the user issues a config query like config.servers[2].ip_address but there is only
one server in the config (so config.servers[2] doesn't exist) then the existing part
of the query path is config.servers and the missing part is [2].ip_address. In this case
parent_config_node will be the last node of the existing part, in this case the servers
array, and the missing_query_path is [2].ip_address.
:param parent_config_node: The last existing config_node on the query path issued
by the user. missing_query_path is the non-existing part of the query path and it
is relative to the parent_config_node.
:param missing_query_path: The non-existing part (suffix) of the query path issued
by the user. This is relative to parent_config_node.
"""
self._parent_config_node = parent_config_node
self._missing_query_path = missing_query_path
def __call__(self, *args):
"""
This function expects the exact same parameters as _ConfigNode.__call__():
An optional default value followed by zero or more JSONValueMapper instances.
Since this is a not-found-node we know that this wrapper object doesn't contain any
json value so the mapper arguments are ignored.
If a default value is provided then we return it otherwise we raise an exception since
the user tries to fetch a required value that isn't in the config file.
"""
default, _ = _process_value_fetcher_call_args(args)
if default is _undefined:
raise JSONConfigValueNotFoundError(self)
return default
def __getattr__(self, item):
return self.__getitem__(item)
def __getitem__(self, item):
return ValueNotFoundNode(self._parent_config_node, self._missing_query_path + [item])
def __len__(self):
raise JSONConfigValueNotFoundError(self)
def __iter__(self):
raise JSONConfigValueNotFoundError(self)
class ConfigNode(object):
"""
Base class for the actual classes whose instances build up the config
object hierarchy wrapping the actual json objects/arrays/scalars.
Note that this class and its subclasses should have only private members
with names that start with '_' because the keys in the json config
can be accessed using the member operator (dot) and the members of the
config node class instances should not conflict with the keys in the
config files.
"""
def __init__(self, line, column):
"""
:param line: Zero based line number. (Add 1 for human readable error reporting).
:param column: Zero based column number. (Add 1 for human readable error reporting).
"""
super(ConfigNode, self).__init__()
self._line = line
self._column = column
def __call__(self, *args):
"""
This function will fetch the wrapped json value from this wrapper config node.
We expect the following optional arguments:
An optional default value followed by zero or more JSONValueMapper instances.
Since this is not a not-found-node we know that there is a wrapped json value so the
default value is ignored. If we have JSONValueMapper instances then we apply them to
the wrapped json value in left-to-right order before returning the json value.
"""
_, mappers = _process_value_fetcher_call_args(args)
value = self._fetch_unwrapped_value()
try:
for mapper in mappers:
value = mapper(value)
except Exception as e:
raise JSONConfigValueMapperError(self, e)
return value
def _fetch_unwrapped_value(self):
raise NotImplementedError()
class ConfigJSONScalar(ConfigNode):
def __init__(self, value, line, column):
super(ConfigJSONScalar, self).__init__(line, column)
self.value = value
def __getattr__(self, item):
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to get an item from a scalar as if it was an object. item=%s' % (item,)
)
def __getitem__(self, item):
if not isinstance(item, (my_basestring, numbers.Integral)):
raise TypeError('You are allowed to index only with string or integer.')
if isinstance(item, numbers.Integral):
raise JSONConfigNodeTypeError(
self,
ConfigJSONArray,
'You are trying to index into a scalar as if it was an array. index=%s' % (item,)
)
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to get an item from a scalar as if it was an object. item=%s' % (item,)
)
def __contains__(self, item):
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to access the __contains__ magic method of a scalar config object.'
)
def __len__(self):
raise JSONConfigNodeTypeError(
self,
(ConfigJSONObject, ConfigJSONArray),
'You are trying to access the __len__ of a scalar config object.'
)
def __iter__(self):
raise JSONConfigNodeTypeError(
self,
(ConfigJSONObject, ConfigJSONArray),
'You are trying to iterate a scalar value.'
)
def __repr__(self):
return '%s(value=%r, line=%r, column=%r)' % (self.__class__.__name__,
self.value, self._line, self._column)
def _fetch_unwrapped_value(self):
return self.value
class ConfigJSONObject(ConfigNode):
def __init__(self, line, column):
super(ConfigJSONObject, self).__init__(line, column)
self._dict = OrderedDict()
def __getattr__(self, item):
return self.__getitem__(item)
def __getitem__(self, item):
if isinstance(item, numbers.Integral):
raise JSONConfigNodeTypeError(
self,
ConfigJSONArray,
'You are trying to index into an object as if it was an array. index=%s' % (item,)
)
if not isinstance(item, my_basestring):
raise TypeError('You are allowed to index only with string or integer.')
if item in self._dict:
return self._dict[item]
return ValueNotFoundNode(self, [item])
def __contains__(self, item):
return item in self._dict
def __len__(self):
return len(self._dict)
def __iter__(self):
return iter(self._dict.items())
def __repr__(self):
return '%s(len=%r, line=%r, column=%r)' % (self.__class__.__name__,
len(self), self._line, self._column)
def _fetch_unwrapped_value(self):
return dict((key, node._fetch_unwrapped_value()) for key, node in self._dict.items())
def _insert(self, key, value):
self._dict[key] = value
class ConfigJSONArray(ConfigNode):
def __init__(self, line, column):
super(ConfigJSONArray, self).__init__(line, column)
self._list = []
def __getattr__(self, item):
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to get an item from an array as if it was an object. item=%s' % (item,)
)
def __getitem__(self, item):
if isinstance(item, numbers.Integral):
if item < 0:
item += len(self._list)
if 0 <= item < len(self._list):
return self._list[item]
raise JSONConfigIndexError(self, item)
if not isinstance(item, my_basestring):
raise TypeError('You are allowed to index only with string or integer.')
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to get an item from an array as if it was an object. item=%s' % (item,)
)
def __contains__(self, item):
raise JSONConfigNodeTypeError(
self,
ConfigJSONObject,
'You are trying to access the __contains__ magic method of an array.'
)
def __len__(self):
return len(self._list)
def __iter__(self):
return iter(self._list)
def __repr__(self):
return '%s(len=%r, line=%r, column=%r)' % (self.__class__.__name__,
len(self), self._line, self._column)
def _fetch_unwrapped_value(self):
return [node._fetch_unwrapped_value() for node in self._list]
def _append(self, item):
self._list.append(item)
_NodeLocation = namedtuple('NodeLocation', 'line column')
def node_location(config_node):
""" Returns the location of this node in the file as a tuple (line, column).
Both line and column are 1 based. """
if isinstance(config_node, ConfigNode):
return _NodeLocation(config_node._line, config_node._column)
if isinstance(config_node, ValueNotFoundNode):
raise JSONConfigValueNotFoundError(config_node)
raise TypeError('Expected a config node but received a %s instance.' %
type(config_node).__name__)
def node_exists(config_node):
""" Returns True if the specified config node
refers to an existing config entry. """
return isinstance(config_node, ConfigNode)
def node_is_object(config_node):
""" Returns True if the specified config node refers
to an existing config entry that is a json object (dict). """
return isinstance(config_node, ConfigJSONObject)
def node_is_array(config_node):
""" Returns True if the specified config node refers
to an existing config entry that is a json array (list). """
return isinstance(config_node, ConfigJSONArray)
def node_is_scalar(config_node):
""" Returns True if the specified config node refers to an existing config
entry that isn't a json object (dict) or array (list) but something else. """
return isinstance(config_node, ConfigJSONScalar)
def _guarantee_node_class(config_node, node_class):
if isinstance(config_node, node_class):
return config_node
if isinstance(config_node, ValueNotFoundNode):
raise JSONConfigValueNotFoundError(config_node)
if isinstance(config_node, ConfigNode):
raise JSONConfigNodeTypeError(config_node, node_class)
raise TypeError('Expected a %s or %s instance but received %s.' % (
ConfigNode.__name__, ValueNotFoundNode.__name__, config_node.__class__.__name__))
def ensure_exists(config_node):
return _guarantee_node_class(config_node, ConfigNode)
def expect_object(config_node):
return _guarantee_node_class(config_node, ConfigJSONObject)
def expect_array(config_node):
return _guarantee_node_class(config_node, ConfigJSONArray)
def expect_scalar(config_node):
return _guarantee_node_class(config_node, ConfigJSONScalar)
| mit | 5,494,451,623,099,229,000 | 38.18894 | 99 | 0.630821 | false |
ETegro/ETConf | partners/admin.py | 1 | 2086 | # ETConf -- web-based user-friendly computer hardware configurator
# Copyright (C) 2010-2011 ETegro Technologies, PLC <http://etegro.com/>
# Sergey Matveev <[email protected]>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU Affero General Public License as
# published by the Free Software Foundation, either version 3 of the
# License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Affero General Public License for more details.
#
# You should have received a copy of the GNU Affero General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
from django.contrib import admin
from configurator.partners.models import *
from django.contrib.auth.models import User
from django.utils.translation import ugettext as _
from django.core.urlresolvers import reverse
class PersonalManagerAdmin( admin.ModelAdmin ):
ordering = [ "name" ]
list_display = [ "name" ]
class CityAdmin( admin.ModelAdmin ):
ordering = [ "name" ]
list_display = [ "name" ]
class PartnerProfileAdmin( admin.ModelAdmin ):
list_display = [ "company_name",
"city",
"generic_formula",
"discount_action",
"personal_manager",
"link_to_user" ]
list_filter = [ "personal_manager" ]
ordering = [ "user" ]
def link_to_user( self, profile ):
return "<a href=\"%s\">%s</a>" % ( reverse( "admin:auth_user_change", args = [ profile.user.id ] ),
profile.user.username )
def generic_formula( self, profile ):
return profile.discount( None )[2]
link_to_user.allow_tags = True
link_to_user.short_description = _("User")
generic_formula.allow_tags = True
generic_formula.short_description = _("Formula")
admin.site.register( PartnerProfile, PartnerProfileAdmin )
admin.site.register( City, CityAdmin )
admin.site.register( PersonalManager, PersonalManagerAdmin )
| agpl-3.0 | -1,614,639,578,225,776,600 | 38.358491 | 101 | 0.722915 | false |
sphoebs/rockshell | pipeline/util.py | 1 | 6975 | #!/usr/bin/python2.5
#
# Copyright 2010 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Utility functions for use with the Google App Engine Pipeline API."""
__all__ = ["for_name",
"JsonEncoder",
"JsonDecoder"]
#pylint: disable=g-bad-name
import datetime
import inspect
import logging
import os
# Relative imports
import simplejson
# pylint: disable=protected-access
def _get_task_target():
"""Get the default target for a pipeline task.
Current version id format is: user_defined_version.minor_version_number
Current module id is just the module's name. It could be "default"
Returns:
A complete target name is of format version.module. If module is the
default module, just version. None if target can not be determined.
"""
# Break circular dependency.
# pylint: disable=g-import-not-at-top
import pipeline
if pipeline._TEST_MODE:
return None
# Further protect against test cases that doesn't set env vars
# propertly.
if ("CURRENT_VERSION_ID" not in os.environ or
"CURRENT_MODULE_ID" not in os.environ):
logging.warning("Running Pipeline in non TEST_MODE but important "
"env vars are not set.")
return None
version = os.environ["CURRENT_VERSION_ID"].split(".")[0]
module = os.environ["CURRENT_MODULE_ID"]
if module == "default":
return version
return "%s.%s" % (version, module)
def for_name(fq_name, recursive=False):
"""Find class/function/method specified by its fully qualified name.
Fully qualified can be specified as:
* <module_name>.<class_name>
* <module_name>.<function_name>
* <module_name>.<class_name>.<method_name> (an unbound method will be
returned in this case).
for_name works by doing __import__ for <module_name>, and looks for
<class_name>/<function_name> in module's __dict__/attrs. If fully qualified
name doesn't contain '.', the current module will be used.
Args:
fq_name: fully qualified name of something to find
Returns:
class object.
Raises:
ImportError: when specified module could not be loaded or the class
was not found in the module.
"""
fq_name = str(fq_name)
module_name = __name__
short_name = fq_name
if fq_name.rfind(".") >= 0:
(module_name, short_name) = (fq_name[:fq_name.rfind(".")],
fq_name[fq_name.rfind(".") + 1:])
try:
result = __import__(module_name, None, None, [short_name])
return result.__dict__[short_name]
except KeyError:
# If we're recursively inside a for_name() chain, then we want to raise
# this error as a key error so we can report the actual source of the
# problem. If we're *not* recursively being called, that means the
# module was found and the specific item could not be loaded, and thus
# we want to raise an ImportError directly.
if recursive:
raise
else:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError, e:
# module_name is not actually a module. Try for_name for it to figure
# out what's this.
try:
module = for_name(module_name, recursive=True)
if hasattr(module, short_name):
return getattr(module, short_name)
else:
# The module was found, but the function component is missing.
raise KeyError()
except KeyError:
raise ImportError("Could not find '%s' on path '%s'" % (
short_name, module_name))
except ImportError:
# This means recursive import attempts failed, thus we will raise the
# first ImportError we encountered, since it's likely the most accurate.
pass
# Raise the original import error that caused all of this, since it is
# likely the real cause of the overall problem.
raise
def is_generator_function(obj):
"""Return true if the object is a user-defined generator function.
Generator function objects provides same attributes as functions.
See isfunction.__doc__ for attributes listing.
Adapted from Python 2.6.
Args:
obj: an object to test.
Returns:
true if the object is generator function.
"""
CO_GENERATOR = 0x20
return bool(((inspect.isfunction(obj) or inspect.ismethod(obj)) and
obj.func_code.co_flags & CO_GENERATOR))
class JsonEncoder(simplejson.JSONEncoder):
"""Pipeline customized json encoder."""
TYPE_ID = "__pipeline_json_type"
def default(self, o):
"""Inherit docs."""
if type(o) in _TYPE_TO_ENCODER:
encoder = _TYPE_TO_ENCODER[type(o)]
json_struct = encoder(o)
json_struct[self.TYPE_ID] = type(o).__name__
return json_struct
return super(JsonEncoder, self).default(o)
class JsonDecoder(simplejson.JSONDecoder):
"""Pipeline customized json decoder."""
def __init__(self, **kwargs):
if "object_hook" not in kwargs:
kwargs["object_hook"] = self._dict_to_obj
super(JsonDecoder, self).__init__(**kwargs)
def _dict_to_obj(self, d):
"""Converts a dictionary of json object to a Python object."""
if JsonEncoder.TYPE_ID not in d:
return d
type_name = d.pop(JsonEncoder.TYPE_ID)
if type_name in _TYPE_NAME_TO_DECODER:
decoder = _TYPE_NAME_TO_DECODER[type_name]
return decoder(d)
else:
raise TypeError("Invalid type %s.", type_name)
_DATETIME_FORMAT = "%Y-%m-%d %H:%M:%S.%f"
def _json_encode_datetime(o):
"""Json encode a datetime object.
Args:
o: a datetime object.
Returns:
A dict of json primitives.
"""
return {"isostr": o.strftime(_DATETIME_FORMAT)}
def _json_decode_datetime(d):
"""Converts a dict of json primitives to a datetime object."""
return datetime.datetime.strptime(d["isostr"], _DATETIME_FORMAT)
def _register_json_primitive(object_type, encoder, decoder):
"""Extend what Pipeline can serialize.
Args:
object_type: type of the object.
encoder: a function that takes in an object and returns
a dict of json primitives.
decoder: inverse function of encoder.
"""
global _TYPE_TO_ENCODER
global _TYPE_NAME_TO_DECODER
if object_type not in _TYPE_TO_ENCODER:
_TYPE_TO_ENCODER[object_type] = encoder
_TYPE_NAME_TO_DECODER[object_type.__name__] = decoder
_TYPE_TO_ENCODER = {}
_TYPE_NAME_TO_DECODER = {}
_register_json_primitive(datetime.datetime,
_json_encode_datetime,
_json_decode_datetime)
| apache-2.0 | 1,058,982,152,402,288,800 | 29.458515 | 78 | 0.66724 | false |
alan-ra/gameTrade | gt/admin.py | 1 | 1330 | from django.contrib import admin
#from django.contrib.auth.admin import UserAdmin
from gt.models import *
#from gt.forms import *
# Register your models here.
class KindAdmin(admin.ModelAdmin):
model = Kind
list_display = ['group', 'description']
admin.site.register(Kind, KindAdmin)
class DevAdmin(admin.ModelAdmin):
model = Developers
list_display = ['name']
admin.site.register(Developers, DevAdmin)
class GamesAdmin(admin.ModelAdmin):
model = Games
list_display = ['title', 'year', 'description']
search_fields = ['title', 'year']
admin.site.register(Games, GamesAdmin)
class ConsolesAdmin(admin.ModelAdmin):
model = Consoles
list_display = ['name', 'year', 'picture', 'description']
search_fields = ['name', 'year']
admin.site.register(Consoles, ConsolesAdmin)
class GenresAdmin(admin.ModelAdmin):
model = Genres
list_display = ['description']
search_fields = ['description']
admin.site.register(Genres, GenresAdmin)
admin.site.register(Users)
class AddressAdmin(admin.ModelAdmin):
model = Address
list_display = ['street', 'number', 'complement', 'district', 'zip_code', 'city', 'uf', 'country']
admin.site.register(Address)
admin.site.register(Game_Console)
admin.site.register(User_Game)
admin.site.register(Game_Rating)
admin.site.register(Trades)
| gpl-3.0 | 7,569,309,597,738,633,000 | 25.6 | 102 | 0.717293 | false |
IA-MP/KnightTour | libs/graph/Graph.py | 1 | 11556 | from libs.graph.DLinkedList import Queue, DoubledLinkedList as List
from libs.graph.PriorityQueue import PriorityQueueBinary as PriorityQueue
from libs.graph.Tree import *
#it is better to use a DoubledLinkedList to operate with a great efficiency on
#the lists those will be used in the graph representation
class Node:
def __init__(self, elem, index, weight = None):
"""
this class represents a graph node
:param elem: an object stored into the node
:param index: int, the index by which the node may be identified
:param weight: int, the weight of the node and of his object - may not be used
"""
self._elem = elem
self._index = index
self._weight = weight
self._token = None #used to mark each node during a generic visit
self._distance = 0 #used to set and retrieve the distance of the node in the visit
self._knights = 0 #used to keep trace of the knights in the node
self._knights_arrived = []
def get_elem(self):
"""
:return: object stored in the node
"""
return self._elem
def get_index(self):
"""
:return: int, the index of the node
"""
return self._index
def get_weight(self):
"""
:return: int, the weight of the node
"""
return self._weight
def get_token(self):
"""
:return: int, the token of the node
"""
return self._token
def set_token(self, token):
"""
:param token: int, the validation token
:return: int, the token of the node
"""
self._token = token
def get_node(self):
"""
:return: tuple, (index, elem, weight)
"""
return self.get_elem(), self.get_weight()
def set_distance(self, dist):
"""
this function can be used to set a particular distance in order to provide
a good interface for BFS and Dijkstra shortest-path algorithms
:param dist: int, distance
:return: None
"""
self._distance += dist
self._knights += 1
def get_distance(self):
"""
:return: int, the distance calculated for the node
"""
return self._distance
def get_count(self):
"""
:return: int, the number of knights
"""
return self._knights
#I'll use an AdjacenceList Graph because of the unitarian value of all the arcs
class GraphAdjacenceList:
def __init__(self):
"""
this class represents a graph using an adjacency list style
"""
self._nodes = dict() #to store the nodes
self._adjacency = dict() #to link the nodes to their adjacence list
self._nextId = 0 #it will be used to store the nodes - id > 0
self._nodes_elems = dict() #it will be used to store the elems inserted
def getNodes(self):
"""
this function is used as an interface to retrieve graph's nodes
:return: (dictionary, dictionary) the nodes and their adjacency lists
"""
return self._nodes, self._adjacency
def insertNode(self, elem, weight = None):
"""
this function allows the user to insert a node into the graph
:param elem: the elem to be stored into the node
:param weight: the weight of the node
:return: Node, the node already inserted or just inserted
"""
if elem in self._nodes_elems:
#if a node has already setted it will be returned
#assuming the computational cost of this check, as it is implemented in python,
#as memory access to the list -> O(1)
return self._nodes_elems[elem]
newNode = Node(elem, self._nextId, weight)
self._nodes[newNode.get_index()] = newNode
self._adjacency[newNode.get_index()] = List()
self._nextId += 1
#storing the elem just inserted
self._nodes_elems[elem] = newNode
return newNode
def linkNode(self, tail, head):
"""
this function links two nodes in a direct connection
:param tail: Node, the tail node
:param head: Node, the head node
:return: None
"""
adj = self._adjacency[tail.get_index()]
if head not in adj.getLastAddedList():
#assuming direct memory access... (see previous method)
adj.addAsLast(head)
def printGraph(self):
"""
this function builds a well formatted visualization of the nodes
:return: list, a list of nodes visual formatted
"""
print("Adjacency Lists:")
for identifier in self._nodes:
print("node", self._nodes[identifier].get_elem(), self._nodes[identifier].get_weight())
self._adjacency[identifier].printList()
print("")
#The chessboard's graph is unitary-weight-arcs formed so we can use a Breadth First Search to return the list of all the
#minimum-path-trees starting each from a knight
def validateNodes(self, token):
"""
this function validate all nodes with a token value in order to accomplish the visit
:param token: int, the token value to validate the node. 0 if not visited, 21 if explored and 42 (for Douglas) if closed
:return: None
"""
nodes = self.getNodes()[0]
for node in nodes.itervalues():
node.set_token(token)
def visitBFS(self, node):
"""
this is a Breadth First Search starting from a vertex. Please note that all the operations are done on the leaves
to let the algorithm be more modular (it doesn't seems be affecting the computational time for it remains proportional
to the dimension of the graph)
:param node: Node, the starting vertex
:return: Tree, representing the visit path
"""
#initializing some useful constants (funny constants too)
unexplored = 0
explored = 21
closed = 42 #So long and thanks for all the fish!
#validating all the nodes as unexplored and starting from the vertex
self.validateNodes(unexplored)
node.set_token(explored)
#initializing the tree containing the only vertex
T_root = Leaf(node)
T_root.setDistance(0.0) #using the float - it is not a counter value
T = Tree(T_root)
#initializing the fringe of the visit
F = Queue()
F.enqueue(T_root)
while not F.isEmpty():
u = F.dequeue()
n = u.getElem()
n.set_token(closed)
for v in self._adjacency[n.get_index()].getLastAddedList():
if v.get_token() == unexplored:
v.set_token(explored)
l = Leaf(v)
F.enqueue(l)
T.insertLeaf(l, u)
return T
def visitNodesBFS(self, Nodes):
"""
this is a simple implementation of a Breadth First Search algorithm to visit the graph
starting from a selected group of nodes
:param Nodes: Node list containing the nodes from which start the visit
:return: list of Trees, the list of all the visits
"""
T_list = []
for node in Nodes:
tree = self.visitBFS(node)
T_list.append(tree)
return T_list
#it is interesting to achieve the same result using minimum path algorithm of Dijkstra
def Dijkstra(self, node):
"""
this is a Dijstra shortest path algorithm implementation starting from a vertex
:param node: Node, the starting vertex
:return: Tree, the shortest paths tree
"""
INF = float('inf')
self.validateNodes(INF)
#we will use the nodes' tokens to store the distance info!
node.set_token(0.0) #0-distance from itself!
#initializing the tree
T_root = Leaf(node)
T_root.setDistance(node.get_token())
T = Tree(T_root)
#initializing a dictionary to keep trace of the leaves
leaves = dict()
leaves[node] = T_root
#initializing the priority queue to mantain the fringe
PQ = PriorityQueue()
PQ.insert(T_root, node.get_token())
while not PQ.isEmpty():
u = PQ.deleteMin() #retrieving the min node from the leaf
n = u.getElem()
for v in self._adjacency[n.get_index()].getLastAddedList():
if v.get_token() == INF:
l = Leaf(v)
leaves[v] = l #updating the leaves' dictionary
PQ.insert(l, n.get_token() + 1.0) #each edge will be unitary-cost
v.set_token(n.get_token() + 1.0)
T.insertLeaf(l, u)
elif n.get_token() + 1.0 < v.get_token():
relaxed = n.get_token() + 1.0
leaves[v].setDistance(relaxed)
#updating the tree... (we are now saving in the priority queue the leaves)
leaves[v].setFather(u)
leaves[n].addSon(leaves[v])
#updating the priority queue
PQ.decreaseKey(leaves[v], relaxed)
v.set_token(relaxed)
return T
def visitDijkstra(self, Nodes):
"""
this is an implementation of the Dijkstra algorithm to visit the graph
starting from a selected group of nodes
:param Nodes: Node list containing the nodes from which start the visit
:return: list of Trees, the list of all the visits
"""
T_list = []
for node in Nodes:
tree = self.Dijkstra(node)
T_list.append(tree)
return T_list
#Pay attention!
# -Bellman condition to decide a shortest path -> for each node it is O(k*n) where k is node's degree
# -save the all available paths in a tree instead of a list of lists -> O(n) (if it is possible...)
# -the chessboard graph is a direct graph with all the arcs costing a single unit
# (please note that it is necessary to consider each knight own k-value in order to calculate
# the move number!!)
# -general purpose: in python2.7 the infinite is... INF = float('inf') -> comparisons using floats
def FloydWarshall(self):
"""
this is a simple implementation of the Floyd-Warshall algorythm using an O(n^2) space
but O(n^3) computational complexity. Please note that in our case the chessboard graph
is unitary-weight-arch created
:return: list of lists, matrix of the distances between two vertices
"""
INF = float('inf')
nodes, adjacency = self.getNodes() #getting the dictionaries
indexes = nodes.keys() #it is the same to access the two dictionaries
dim = len(indexes)
#initializing the matrix
dist = [[INF for m in range(dim)] for n in range(dim)]
for i in range(dim):
ind = indexes[i]
dist[ind][ind] = 0.0
adj_nodes = adjacency[ind].getLastAddedList()
for adj in adj_nodes:
to_ind = adj.get_index()
dist[ind][to_ind] = 1.0
#executing the dinamic programming algorithm
for k in range(dim):
for i in range(dim):
for j in range(dim):
if dist[i][k] != INF and dist[k][j] != INF and dist[i][k] + dist[k][j] < dist[i][j]:
dist[i][j] = dist[i][k] + dist[k][j]
return dist
| mit | -5,495,444,753,568,681,000 | 37.013158 | 128 | 0.588958 | false |
beiko-lab/gengis | bin/Lib/site-packages/scipy/linalg/tests/test_fblas.py | 1 | 18114 | # Test interfaces to fortran blas.
#
# The tests are more of interface than they are of the underlying blas.
# Only very small matrices checked -- N=3 or so.
#
# !! Complex calculations really aren't checked that carefully.
# !! Only real valued complex numbers are used in tests.
from __future__ import division, print_function, absolute_import
from numpy import float32, float64, complex64, complex128, arange, array, \
zeros, shape, transpose, newaxis, common_type, conjugate
from scipy.linalg import _fblas as fblas
from scipy.lib.six.moves import xrange
from numpy.testing import TestCase, run_module_suite, assert_array_equal, \
assert_array_almost_equal, assert_
# decimal accuracy to require between Python and LAPACK/BLAS calculations
accuracy = 5
# Since numpy.dot likely uses the same blas, use this routine
# to check.
def matrixmultiply(a, b):
if len(b.shape) == 1:
b_is_vector = True
b = b[:,newaxis]
else:
b_is_vector = False
assert_(a.shape[1] == b.shape[0])
c = zeros((a.shape[0], b.shape[1]), common_type(a, b))
for i in xrange(a.shape[0]):
for j in xrange(b.shape[1]):
s = 0
for k in xrange(a.shape[1]):
s += a[i,k] * b[k, j]
c[i,j] = s
if b_is_vector:
c = c.reshape((a.shape[0],))
return c
##################################################
### Test blas ?axpy
class BaseAxpy(object):
''' Mixin class for axpy tests '''
def test_default_a(self):
x = arange(3.,dtype=self.dtype)
y = arange(3.,dtype=x.dtype)
real_y = x*1.+y
self.blas_func(x,y)
assert_array_equal(real_y,y)
def test_simple(self):
x = arange(3.,dtype=self.dtype)
y = arange(3.,dtype=x.dtype)
real_y = x*3.+y
self.blas_func(x,y,a=3.)
assert_array_equal(real_y,y)
def test_x_stride(self):
x = arange(6.,dtype=self.dtype)
y = zeros(3,x.dtype)
y = arange(3.,dtype=x.dtype)
real_y = x[::2]*3.+y
self.blas_func(x,y,a=3.,n=3,incx=2)
assert_array_equal(real_y,y)
def test_y_stride(self):
x = arange(3.,dtype=self.dtype)
y = zeros(6,x.dtype)
real_y = x*3.+y[::2]
self.blas_func(x,y,a=3.,n=3,incy=2)
assert_array_equal(real_y,y[::2])
def test_x_and_y_stride(self):
x = arange(12.,dtype=self.dtype)
y = zeros(6,x.dtype)
real_y = x[::4]*3.+y[::2]
self.blas_func(x,y,a=3.,n=3,incx=4,incy=2)
assert_array_equal(real_y,y[::2])
def test_x_bad_size(self):
x = arange(12.,dtype=self.dtype)
y = zeros(6,x.dtype)
try:
self.blas_func(x,y,n=4,incx=5)
except: # what kind of error should be caught?
return
# should catch error and never get here
assert_(0)
def test_y_bad_size(self):
x = arange(12.,dtype=self.dtype)
y = zeros(6,x.dtype)
try:
self.blas_func(x,y,n=3,incy=5)
except: # what kind of error should be caught?
return
# should catch error and never get here
assert_(0)
try:
class TestSaxpy(TestCase, BaseAxpy):
blas_func = fblas.saxpy
dtype = float32
except AttributeError:
class TestSaxpy:
pass
class TestDaxpy(TestCase, BaseAxpy):
blas_func = fblas.daxpy
dtype = float64
try:
class TestCaxpy(TestCase, BaseAxpy):
blas_func = fblas.caxpy
dtype = complex64
except AttributeError:
class TestCaxpy:
pass
class TestZaxpy(TestCase, BaseAxpy):
blas_func = fblas.zaxpy
dtype = complex128
##################################################
### Test blas ?scal
class BaseScal(object):
''' Mixin class for scal testing '''
def test_simple(self):
x = arange(3.,dtype=self.dtype)
real_x = x*3.
self.blas_func(3.,x)
assert_array_equal(real_x,x)
def test_x_stride(self):
x = arange(6.,dtype=self.dtype)
real_x = x.copy()
real_x[::2] = x[::2]*array(3.,self.dtype)
self.blas_func(3.,x,n=3,incx=2)
assert_array_equal(real_x,x)
def test_x_bad_size(self):
x = arange(12.,dtype=self.dtype)
try:
self.blas_func(2.,x,n=4,incx=5)
except: # what kind of error should be caught?
return
# should catch error and never get here
assert_(0)
try:
class TestSscal(TestCase, BaseScal):
blas_func = fblas.sscal
dtype = float32
except AttributeError:
class TestSscal:
pass
class TestDscal(TestCase, BaseScal):
blas_func = fblas.dscal
dtype = float64
try:
class TestCscal(TestCase, BaseScal):
blas_func = fblas.cscal
dtype = complex64
except AttributeError:
class TestCscal:
pass
class TestZscal(TestCase, BaseScal):
blas_func = fblas.zscal
dtype = complex128
##################################################
### Test blas ?copy
class BaseCopy(object):
''' Mixin class for copy testing '''
def test_simple(self):
x = arange(3.,dtype=self.dtype)
y = zeros(shape(x),x.dtype)
self.blas_func(x,y)
assert_array_equal(x,y)
def test_x_stride(self):
x = arange(6.,dtype=self.dtype)
y = zeros(3,x.dtype)
self.blas_func(x,y,n=3,incx=2)
assert_array_equal(x[::2],y)
def test_y_stride(self):
x = arange(3.,dtype=self.dtype)
y = zeros(6,x.dtype)
self.blas_func(x,y,n=3,incy=2)
assert_array_equal(x,y[::2])
def test_x_and_y_stride(self):
x = arange(12.,dtype=self.dtype)
y = zeros(6,x.dtype)
self.blas_func(x,y,n=3,incx=4,incy=2)
assert_array_equal(x[::4],y[::2])
def test_x_bad_size(self):
x = arange(12.,dtype=self.dtype)
y = zeros(6,x.dtype)
try:
self.blas_func(x,y,n=4,incx=5)
except: # what kind of error should be caught?
return
# should catch error and never get here
assert_(0)
def test_y_bad_size(self):
x = arange(12.,dtype=self.dtype)
y = zeros(6,x.dtype)
try:
self.blas_func(x,y,n=3,incy=5)
except: # what kind of error should be caught?
return
# should catch error and never get here
assert_(0)
# def test_y_bad_type(self):
## Hmmm. Should this work? What should be the output.
# x = arange(3.,dtype=self.dtype)
# y = zeros(shape(x))
# self.blas_func(x,y)
# assert_array_equal(x,y)
try:
class TestScopy(TestCase, BaseCopy):
blas_func = fblas.scopy
dtype = float32
except AttributeError:
class TestScopy:
pass
class TestDcopy(TestCase, BaseCopy):
blas_func = fblas.dcopy
dtype = float64
try:
class TestCcopy(TestCase, BaseCopy):
blas_func = fblas.ccopy
dtype = complex64
except AttributeError:
class TestCcopy:
pass
class TestZcopy(TestCase, BaseCopy):
blas_func = fblas.zcopy
dtype = complex128
##################################################
### Test blas ?swap
class BaseSwap(object):
''' Mixin class for swap tests '''
def test_simple(self):
x = arange(3.,dtype=self.dtype)
y = zeros(shape(x),x.dtype)
desired_x = y.copy()
desired_y = x.copy()
self.blas_func(x,y)
assert_array_equal(desired_x,x)
assert_array_equal(desired_y,y)
def test_x_stride(self):
x = arange(6.,dtype=self.dtype)
y = zeros(3,x.dtype)
desired_x = y.copy()
desired_y = x.copy()[::2]
self.blas_func(x,y,n=3,incx=2)
assert_array_equal(desired_x,x[::2])
assert_array_equal(desired_y,y)
def test_y_stride(self):
x = arange(3.,dtype=self.dtype)
y = zeros(6,x.dtype)
desired_x = y.copy()[::2]
desired_y = x.copy()
self.blas_func(x,y,n=3,incy=2)
assert_array_equal(desired_x,x)
assert_array_equal(desired_y,y[::2])
def test_x_and_y_stride(self):
x = arange(12.,dtype=self.dtype)
y = zeros(6,x.dtype)
desired_x = y.copy()[::2]
desired_y = x.copy()[::4]
self.blas_func(x,y,n=3,incx=4,incy=2)
assert_array_equal(desired_x,x[::4])
assert_array_equal(desired_y,y[::2])
def test_x_bad_size(self):
x = arange(12.,dtype=self.dtype)
y = zeros(6,x.dtype)
try:
self.blas_func(x,y,n=4,incx=5)
except: # what kind of error should be caught?
return
# should catch error and never get here
assert_(0)
def test_y_bad_size(self):
x = arange(12.,dtype=self.dtype)
y = zeros(6,x.dtype)
try:
self.blas_func(x,y,n=3,incy=5)
except: # what kind of error should be caught?
return
# should catch error and never get here
assert_(0)
try:
class TestSswap(TestCase, BaseSwap):
blas_func = fblas.sswap
dtype = float32
except AttributeError:
class TestSswap:
pass
class TestDswap(TestCase, BaseSwap):
blas_func = fblas.dswap
dtype = float64
try:
class TestCswap(TestCase, BaseSwap):
blas_func = fblas.cswap
dtype = complex64
except AttributeError:
class TestCswap:
pass
class TestZswap(TestCase, BaseSwap):
blas_func = fblas.zswap
dtype = complex128
##################################################
### Test blas ?gemv
### This will be a mess to test all cases.
class BaseGemv(object):
''' Mixin class for gemv tests '''
def get_data(self,x_stride=1,y_stride=1):
mult = array(1, dtype=self.dtype)
if self.dtype in [complex64, complex128]:
mult = array(1+1j, dtype=self.dtype)
from numpy.random import normal, seed
seed(1234)
alpha = array(1., dtype=self.dtype) * mult
beta = array(1.,dtype=self.dtype) * mult
a = normal(0.,1.,(3,3)).astype(self.dtype) * mult
x = arange(shape(a)[0]*x_stride,dtype=self.dtype) * mult
y = arange(shape(a)[1]*y_stride,dtype=self.dtype) * mult
return alpha,beta,a,x,y
def test_simple(self):
alpha,beta,a,x,y = self.get_data()
desired_y = alpha*matrixmultiply(a,x)+beta*y
y = self.blas_func(alpha,a,x,beta,y)
assert_array_almost_equal(desired_y,y)
def test_default_beta_y(self):
alpha,beta,a,x,y = self.get_data()
desired_y = matrixmultiply(a,x)
y = self.blas_func(1,a,x)
assert_array_almost_equal(desired_y,y)
def test_simple_transpose(self):
alpha,beta,a,x,y = self.get_data()
desired_y = alpha*matrixmultiply(transpose(a),x)+beta*y
y = self.blas_func(alpha,a,x,beta,y,trans=1)
assert_array_almost_equal(desired_y,y)
def test_simple_transpose_conj(self):
alpha,beta,a,x,y = self.get_data()
desired_y = alpha*matrixmultiply(transpose(conjugate(a)),x)+beta*y
y = self.blas_func(alpha,a,x,beta,y,trans=2)
assert_array_almost_equal(desired_y,y)
def test_x_stride(self):
alpha,beta,a,x,y = self.get_data(x_stride=2)
desired_y = alpha*matrixmultiply(a,x[::2])+beta*y
y = self.blas_func(alpha,a,x,beta,y,incx=2)
assert_array_almost_equal(desired_y,y)
def test_x_stride_transpose(self):
alpha,beta,a,x,y = self.get_data(x_stride=2)
desired_y = alpha*matrixmultiply(transpose(a),x[::2])+beta*y
y = self.blas_func(alpha,a,x,beta,y,trans=1,incx=2)
assert_array_almost_equal(desired_y, y)
def test_x_stride_assert(self):
# What is the use of this test?
alpha,beta,a,x,y = self.get_data(x_stride=2)
try:
y = self.blas_func(1,a,x,1,y,trans=0,incx=3)
assert_(0)
except:
pass
try:
y = self.blas_func(1,a,x,1,y,trans=1,incx=3)
assert_(0)
except:
pass
def test_y_stride(self):
alpha,beta,a,x,y = self.get_data(y_stride=2)
desired_y = y.copy()
desired_y[::2] = alpha*matrixmultiply(a,x)+beta*y[::2]
y = self.blas_func(alpha,a,x,beta,y,incy=2)
assert_array_almost_equal(desired_y,y)
def test_y_stride_transpose(self):
alpha,beta,a,x,y = self.get_data(y_stride=2)
desired_y = y.copy()
desired_y[::2] = alpha*matrixmultiply(transpose(a),x)+beta*y[::2]
y = self.blas_func(alpha,a,x,beta,y,trans=1,incy=2)
assert_array_almost_equal(desired_y,y)
def test_y_stride_assert(self):
# What is the use of this test?
alpha,beta,a,x,y = self.get_data(y_stride=2)
try:
y = self.blas_func(1,a,x,1,y,trans=0,incy=3)
assert_(0)
except:
pass
try:
y = self.blas_func(1,a,x,1,y,trans=1,incy=3)
assert_(0)
except:
pass
try:
class TestSgemv(TestCase, BaseGemv):
blas_func = fblas.sgemv
dtype = float32
except AttributeError:
class TestSgemv:
pass
class TestDgemv(TestCase, BaseGemv):
blas_func = fblas.dgemv
dtype = float64
try:
class TestCgemv(TestCase, BaseGemv):
blas_func = fblas.cgemv
dtype = complex64
except AttributeError:
class TestCgemv:
pass
class TestZgemv(TestCase, BaseGemv):
blas_func = fblas.zgemv
dtype = complex128
"""
##################################################
### Test blas ?ger
### This will be a mess to test all cases.
class BaseGer(TestCase):
def get_data(self,x_stride=1,y_stride=1):
from numpy.random import normal, seed
seed(1234)
alpha = array(1., dtype = self.dtype)
a = normal(0.,1.,(3,3)).astype(self.dtype)
x = arange(shape(a)[0]*x_stride,dtype=self.dtype)
y = arange(shape(a)[1]*y_stride,dtype=self.dtype)
return alpha,a,x,y
def test_simple(self):
alpha,a,x,y = self.get_data()
# tranpose takes care of Fortran vs. C(and Python) memory layout
desired_a = alpha*transpose(x[:,newaxis]*y) + a
self.blas_func(x,y,a)
assert_array_almost_equal(desired_a,a)
def test_x_stride(self):
alpha,a,x,y = self.get_data(x_stride=2)
desired_a = alpha*transpose(x[::2,newaxis]*y) + a
self.blas_func(x,y,a,incx=2)
assert_array_almost_equal(desired_a,a)
def test_x_stride_assert(self):
alpha,a,x,y = self.get_data(x_stride=2)
try:
self.blas_func(x,y,a,incx=3)
assert(0)
except:
pass
def test_y_stride(self):
alpha,a,x,y = self.get_data(y_stride=2)
desired_a = alpha*transpose(x[:,newaxis]*y[::2]) + a
self.blas_func(x,y,a,incy=2)
assert_array_almost_equal(desired_a,a)
def test_y_stride_assert(self):
alpha,a,x,y = self.get_data(y_stride=2)
try:
self.blas_func(a,x,y,incy=3)
assert(0)
except:
pass
class TestSger(BaseGer):
blas_func = fblas.sger
dtype = float32
class TestDger(BaseGer):
blas_func = fblas.dger
dtype = float64
"""
##################################################
### Test blas ?gerc
### This will be a mess to test all cases.
"""
class BaseGerComplex(BaseGer):
def get_data(self,x_stride=1,y_stride=1):
from numpy.random import normal, seed
seed(1234)
alpha = array(1+1j, dtype = self.dtype)
a = normal(0.,1.,(3,3)).astype(self.dtype)
a = a + normal(0.,1.,(3,3)) * array(1j, dtype = self.dtype)
x = normal(0.,1.,shape(a)[0]*x_stride).astype(self.dtype)
x = x + x * array(1j, dtype = self.dtype)
y = normal(0.,1.,shape(a)[1]*y_stride).astype(self.dtype)
y = y + y * array(1j, dtype = self.dtype)
return alpha,a,x,y
def test_simple(self):
alpha,a,x,y = self.get_data()
# tranpose takes care of Fortran vs. C(and Python) memory layout
a = a * array(0.,dtype = self.dtype)
#desired_a = alpha*transpose(x[:,newaxis]*self.transform(y)) + a
desired_a = alpha*transpose(x[:,newaxis]*y) + a
#self.blas_func(x,y,a,alpha = alpha)
fblas.cgeru(x,y,a,alpha = alpha)
assert_array_almost_equal(desired_a,a)
#def test_x_stride(self):
# alpha,a,x,y = self.get_data(x_stride=2)
# desired_a = alpha*transpose(x[::2,newaxis]*self.transform(y)) + a
# self.blas_func(x,y,a,incx=2)
# assert_array_almost_equal(desired_a,a)
#def test_y_stride(self):
# alpha,a,x,y = self.get_data(y_stride=2)
# desired_a = alpha*transpose(x[:,newaxis]*self.transform(y[::2])) + a
# self.blas_func(x,y,a,incy=2)
# assert_array_almost_equal(desired_a,a)
class TestCgeru(BaseGerComplex):
blas_func = fblas.cgeru
dtype = complex64
def transform(self,x):
return x
class TestZgeru(BaseGerComplex):
blas_func = fblas.zgeru
dtype = complex128
def transform(self,x):
return x
class TestCgerc(BaseGerComplex):
blas_func = fblas.cgerc
dtype = complex64
def transform(self,x):
return conjugate(x)
class TestZgerc(BaseGerComplex):
blas_func = fblas.zgerc
dtype = complex128
def transform(self,x):
return conjugate(x)
"""
if __name__ == "__main__":
run_module_suite()
| gpl-3.0 | -1,383,043,062,920,820,000 | 27.990066 | 77 | 0.548471 | false |
jelmer/bzr-rewrite | pseudonyms.py | 1 | 6392 | # Copyright (C) 2009 by Jelmer Vernooij <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
"""Revision pseudonyms."""
from __future__ import absolute_import
from collections import defaultdict
import urllib
from bzrlib import (
errors,
foreign,
ui,
)
def parse_git_svn_id(text):
"""Parse a git svn id string.
:param text: git svn id
:return: URL, revision number, uuid
"""
(head, uuid) = text.rsplit(" ", 1)
(full_url, rev) = head.rsplit("@", 1)
return (full_url.encode("utf-8"), int(rev), uuid.encode("utf-8"))
class SubversionBranchUrlFinder(object):
def __init__(self):
self._roots = defaultdict(set)
def find_root(self, uuid, url):
for root in self._roots[uuid]:
if url.startswith(root):
return root
try:
from subvertpy.ra import RemoteAccess
except ImportError:
return None
c = RemoteAccess(url)
root = c.get_repos_root()
self._roots[uuid].add(root)
return root
def find_branch_path(self, uuid, url):
root = self.find_root(uuid, url)
if root is None:
return None
assert url.startswith(root)
return url[len(root):].strip("/")
svn_branch_path_finder = SubversionBranchUrlFinder()
def _extract_converted_from_revid(rev):
if not "converted-from" in rev.properties:
return
for line in rev.properties.get("converted-from", "").splitlines():
(kind, serialized_foreign_revid) = line.split(" ", 1)
yield (kind, serialized_foreign_revid)
def _extract_cscvs(rev):
"""Older-style launchpad-cscvs import."""
if not "cscvs-svn-branch-path" in rev.properties:
return
yield ("svn", "%s:%s:%s" % (
rev.properties["cscvs-svn-repository-uuid"],
rev.properties["cscvs-svn-revision-number"],
urllib.quote(rev.properties["cscvs-svn-branch-path"].strip("/"))))
def _extract_git_svn_id(rev):
if not "git-svn-id" in rev.properties:
return
(full_url, revnum, uuid) = parse_git_svn_id(rev.properties['git-svn-id'])
branch_path = svn_branch_path_finder.find_branch_path(uuid, full_url)
if branch_path is not None:
yield ("svn", "%s:%d:%s" % (uuid, revnum, urllib.quote(branch_path)))
def _extract_foreign_revision(rev):
# Perhaps 'rev' is a foreign revision ?
if getattr(rev, "foreign_revid", None) is not None:
yield ("svn", rev.mapping.vcs.serialize_foreign_revid(rev.foreign_revid))
def _extract_foreign_revid(rev):
# Try parsing the revision id
try:
foreign_revid, mapping = \
foreign.foreign_vcs_registry.parse_revision_id(rev.revision_id)
except errors.InvalidRevisionId:
pass
else:
yield (mapping.vcs.abbreviation,
mapping.vcs.serialize_foreign_revid(foreign_revid))
def _extract_debian_md5sum(rev):
if 'deb-md5' in rev.properties:
yield ("debian-md5sum", rev.properties["deb-md5"])
_foreign_revid_extractors = [
_extract_converted_from_revid,
_extract_cscvs,
_extract_git_svn_id,
_extract_foreign_revision,
_extract_foreign_revid,
_extract_debian_md5sum,
]
def extract_foreign_revids(rev):
"""Find ids of semi-equivalent revisions in foreign VCS'es.
:param: Bazaar revision object
:return: Set with semi-equivalent revisions.
"""
ret = set()
for extractor in _foreign_revid_extractors:
ret.update(extractor(rev))
return ret
def find_pseudonyms(repository, revids):
"""Find revisions that are pseudonyms of each other.
:param repository: Repository object
:param revids: Sequence of revision ids to check
:return: Iterable over sets of pseudonyms
"""
# Where have foreign revids ended up?
conversions = defaultdict(set)
# What are native revids conversions of?
conversion_of = defaultdict(set)
revs = repository.get_revisions(revids)
pb = ui.ui_factory.nested_progress_bar()
try:
for i, rev in enumerate(revs):
pb.update("finding pseudonyms", i, len(revs))
for foreign_revid in extract_foreign_revids(rev):
conversion_of[rev.revision_id].add(foreign_revid)
conversions[foreign_revid].add(rev.revision_id)
finally:
pb.finished()
done = set()
for foreign_revid in conversions.keys():
ret = set()
check = set(conversions[foreign_revid])
while check:
x = check.pop()
extra = set()
for frevid in conversion_of[x]:
extra.update(conversions[frevid])
del conversions[frevid]
del conversion_of[x]
check.update(extra)
ret.add(x)
if len(ret) > 1:
yield ret
def pseudonyms_as_dict(l):
"""Convert an iterable over pseudonyms to a dictionary.
:param l: Iterable over sets of pseudonyms
:return: Dictionary with pseudonyms for each revid.
"""
ret = {}
for pns in l:
for pn in pns:
ret[pn] = pns - set([pn])
return ret
def generate_rebase_map_from_pseudonyms(pseudonym_dict, existing, desired):
"""Create a rebase map from pseudonyms and existing/desired ancestry.
:param pseudonym_dict: Dictionary with pseudonym as returned by
pseudonyms_as_dict()
:param existing: Existing ancestry, might need to be rebased
:param desired: Desired ancestry
:return: rebase map, as dictionary
"""
rebase_map = {}
for revid in existing:
for pn in pseudonym_dict.get(revid, []):
if pn in desired:
rebase_map[revid] = pn
return rebase_map
| gpl-2.0 | 4,091,328,492,961,752,000 | 29.583732 | 81 | 0.638767 | false |
rupalkhilari/approval_frame | approval_polls/tests.py | 1 | 26820 | import datetime
from django.contrib.auth.models import User
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from django.utils import timezone
from approval_polls.models import Poll, Choice
def create_poll(question, username="user1", days=0, ballots=0,
vtype=2, close_date=None, is_private=False, is_suspended=False):
"""
Creates a poll with the given `question` published the given number of
`days` offset to now (negative for polls published in the past,
positive for polls that have yet to be published), and user as the
foreign key pointing to the user model. Defaults to vote type 2 for
this poll (1 - Does not require authentication to vote, 2 - Requires
authentication to vote, 3 - Email Invitation to vote).
"""
poll = Poll.objects.create(
question=question,
pub_date=timezone.now() + datetime.timedelta(days=days),
user=User.objects.create_user(username, ''.join([username, '@example.com']), 'test'),
vtype=vtype,
close_date=close_date,
is_private=is_private,
is_suspended=is_suspended
)
for _ in range(ballots):
create_ballot(poll)
return poll
def create_ballot(poll, timestamp=timezone.now(), ip='127.0.0.1'):
"""
Creates a ballot for the given `poll`, submitted at `timestamp` by `ip`.
"""
return poll.ballot_set.create(timestamp=timestamp)
def create_vote_invitation(poll, email):
"""
Creates a vote invitation for the given `poll`, with specified `email`
"""
return poll.voteinvitation_set.create(email=email, key='xxx')
class PollIndexTests(TestCase):
def setUp(self):
self.client = Client()
def test_index_view_with_no_polls(self):
"""
If no polls exist, an appropriate message should be displayed.
"""
response = self.client.get(reverse('approval_polls:index'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "No polls are available.")
self.assertQuerysetEqual(response.context['latest_poll_list'], [])
def test_index_view_with_a_past_poll(self):
"""
Polls with a pub_date in the past should be displayed on the
index page.
"""
create_poll(question="Past poll.", days=-30)
response = self.client.get(reverse('approval_polls:index'))
self.assertQuerysetEqual(
response.context['latest_poll_list'],
['<Poll: Past poll.>']
)
def test_index_view_with_future_poll_and_past_poll(self):
"""
Even if both past and future polls exist, only past polls should be
displayed.
"""
create_poll(question="Past poll.", days=-30, vtype=1)
create_poll(question="Future poll.", username="user2", days=30,
vtype=1)
response = self.client.get(reverse('approval_polls:index'))
self.assertQuerysetEqual(
response.context['latest_poll_list'],
['<Poll: Past poll.>']
)
def test_index_view_with_two_past_polls(self):
"""
The polls index page may display multiple polls.
"""
create_poll(question="Past poll 1.", days=-30)
create_poll(question="Past poll 2.", username="user2", days=-5)
response = self.client.get(reverse('approval_polls:index'))
self.assertQuerysetEqual(
response.context['latest_poll_list'],
['<Poll: Past poll 2.>', '<Poll: Past poll 1.>']
)
def test_index_view_with_empty_page(self):
"""
If an empty page of polls is requested, then the last page of
polls is returned.
"""
create_poll(question="Empty page poll.")
response = self.client.get('/approval_polls/?page=2')
self.assertContains(response, '(page 1 of 1)', status_code=200)
class PollDetailTests(TestCase):
def test_detail_view_with_a_future_poll(self):
"""
The detail view of a poll with a pub_date in the future should
return a 404 not found.
"""
future_poll = create_poll(question='Future poll.', days=5)
response = self.client.get(reverse('approval_polls:detail',
args=(future_poll.id,)))
self.assertEqual(response.status_code, 404)
def test_detail_view_with_a_past_poll(self):
"""
The detail view of a poll with a pub_date in the past should display
the poll's question.
"""
past_poll = create_poll(question='Past Poll.', days=-5)
response = self.client.get(reverse('approval_polls:detail',
args=(past_poll.id,)))
self.assertContains(response, past_poll.question, status_code=200)
def test_detail_view_with_a_choice(self):
"""
The detail view of a poll with a choice should display the
choice's text.
"""
poll = create_poll(question='Choice poll.')
poll.choice_set.create(choice_text='Choice text.')
response = self.client.get(reverse('approval_polls:detail',
args=(poll.id,)))
self.assertContains(response, 'Choice text.', status_code=200)
class PollResultsTests(TestCase):
def test_results_view_with_no_ballots(self):
"""
Results page of a poll with a choice shows 0 votes (0%),
0 votes on 0 ballots.
"""
poll = create_poll(question='Choice poll.')
poll.choice_set.create(choice_text='Choice text.')
response = self.client.get(reverse('approval_polls:results',
args=(poll.id,)))
self.assertContains(response, '0 votes (0%)', status_code=200)
self.assertContains(response, '0 votes on 0\n ballots', status_code=200)
def test_results_view_with_ballots(self):
"""
Results page of a poll with a choice and ballots shows the correct
percentage, total vote count, and total ballot count.
"""
poll = create_poll(question='Choice poll.', ballots=1)
choice = poll.choice_set.create(choice_text='Choice text.')
create_ballot(poll).vote_set.create(choice=choice)
response = self.client.get(reverse('approval_polls:results',
args=(poll.id,)))
self.assertContains(response, '1 vote (50%)', status_code=200)
self.assertContains(response, '1 vote on 2\n ballots', status_code=200)
class PollVoteTests(TestCase):
def setUp(self):
self.client = Client()
def test_vote_view_counts_increase(self):
"""
Voting in a poll increases the count for selected choices,
but not for unselected ones, and also increases the ballot count.
"""
poll = create_poll(question='Vote poll.', ballots=80, vtype=1)
choice1 = poll.choice_set.create(choice_text='Choice 1.')
choice2 = poll.choice_set.create(choice_text='Choice 2.')
for _ in range(10):
create_ballot(poll).vote_set.create(choice=choice1)
for _ in range(10):
create_ballot(poll).vote_set.create(choice=choice2)
response = self.client.post('/approval_polls/' + str(poll.id) + '/vote/',
data={'choice2': ''},
follow=True)
self.assertContains(response, '10 votes')
self.assertContains(response, '21 votes')
self.assertContains(response, '101\n ballots', status_code=200)
class MyPollTests(TestCase):
def setUp(self):
self.client = Client()
create_poll(question="question1", days=-5, vtype=1)
create_poll(question="question2", username="user2", days=-5, vtype=1)
def test_redirect_when_not_logged_in(self):
"""
If the user is not logged in then redirect to the login page
"""
response = self.client.get(reverse('approval_polls:my_polls'))
self.assertRedirects(
response,
'/accounts/login/?next=/approval_polls/my-polls/',
status_code=302,
target_status_code=200
)
def test_display_only_user_polls(self):
"""
Only polls created by the logged in user should be displayed.
"""
self.client.login(username='user1', password='test')
response = self.client.get(reverse('approval_polls:my_polls'))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['latest_poll_list'],
['<Poll: question1>']
)
class PollCreateTests(TestCase):
def setUp(self):
self.client = Client()
User.objects.create_user('test', '[email protected]', 'test')
self.client.login(username='test', password='test')
def test_create_page_exists(self):
"""
The create a poll form exists.
"""
response = self.client.post('/approval_polls/create/')
self.assertEquals(response.status_code, 200)
def test_create_shows_iframe_code(self):
"""
Creating a new poll shows a HTML snippet to embed the new poll
with an iframe.
"""
poll_data = {
'question': 'Create poll.',
'choice1': 'Choice 1.',
'radio-poll-type': '1',
'token-tags': ''
}
response = self.client.post(
'/approval_polls/create/',
poll_data,
follow=True
)
self.assertEquals(response.status_code, 200)
self.assertTemplateUsed(
response,
'approval_polls/embed_instructions.html'
)
self.assertTrue('/approval_polls/1' in response.context['link'])
def test_create_with_no_question(self):
"""
No question should return an error message.
"""
response = self.client.post(
'/approval_polls/create/',
{'choice1': 'Choice 1.'},
follow=True
)
self.assertContains(
response,
'The question is missing',
status_code=200
)
def test_create_with_blank_question(self):
"""
Blank question should return an error message.
"""
response = self.client.post(
'/approval_polls/create/',
{'question': '', 'choice1': 'Choice 1.'},
follow=True
)
self.assertContains(
response,
'The question is missing',
status_code=200
)
def test_create_skips_blank_choices(self):
"""
A blank choice doesn't appear in the poll (but later ones do)
"""
poll_data = {
'question': 'Create poll.',
'choice1': '',
'choice2': 'Choice 2.',
'radio-poll-type': '1',
'token-tags': ''
}
self.client.post('/approval_polls/create/', poll_data, follow=True)
response = self.client.get('/approval_polls/1/', follow=True)
self.assertContains(response, 'Create poll.', status_code=200)
self.assertNotContains(response, 'Choice 1.')
self.assertContains(response, 'Choice 2.')
self.assertContains(response, 'See Results')
class UserProfileTests(TestCase):
def setUp(self):
self.client = Client()
User.objects.create_user(
'user1',
'[email protected]',
'password123'
)
self.client.login(username='user1', password='password123')
def test_user_profile_show_username(self):
"""
The User Profile page should show the following text:
My User Profile (user1)
"""
response = self.client.get(reverse('approval_polls:my_info'))
self.assertContains(response, "My User Profile (user1)")
def test_user_profile_member_since(self):
response = self.client.get(reverse('approval_polls:my_info'))
stored_date = User.objects.get(username="user1").date_joined
desired_date = timezone.localtime(stored_date)
test_user_date_joined = desired_date.strftime('%B %d, %Y').replace(' 0', ' ')
self.assertContains(
response,
"Member since: " + str(test_user_date_joined)
)
def test_user_profile_last_login(self):
response = self.client.get(reverse('approval_polls:my_info'))
stored_date = User.objects.get(username="user1").last_login
desired_date = timezone.localtime(stored_date)
test_user_last_login = desired_date.strftime('%B %d, %Y').replace(' 0', ' ')
self.assertContains(
response,
"Last Login: " + str(test_user_last_login)
)
def test_show_polls_created_no_polls(self):
response = self.client.get(reverse('approval_polls:my_info'))
html_string = '<p><a href="/approval_polls/my-polls/">Polls I created</a>: 0</p>'
self.assertContains(
response,
html_string,
html=True
)
def test_show_polls_created_one_poll(self):
poll = Poll.objects.create(
question='Which is your favorite color?',
pub_date=timezone.now() + datetime.timedelta(days=0),
user=User.objects.get(username="user1"),
vtype=2
)
for _ in range(0):
create_ballot(poll)
response = self.client.get(reverse('approval_polls:my_info'))
html_string = '<p><a href="/approval_polls/my-polls/">Polls I created</a>: 1</p>'
self.assertContains(
response,
html_string,
html=True
)
class UpdatePollTests(TestCase):
def setUp(self):
self.client = Client()
poll = create_poll(
question='Create Sample Poll.',
close_date=timezone.now() + datetime.timedelta(days=10),
)
poll.choice_set.create(choice_text='Choice 1.')
self.client.login(username='user1', password='test')
choice_data = {
'choice1': 'on',
}
self.client.post(
'/approval_polls/1/vote/',
choice_data,
follow=True
)
def test_poll_details_show_update_button(self):
response = self.client.get('/approval_polls/1/')
self.assertContains(response, 'Update Vote', status_code=200)
def test_poll_details_show_checked_choices(self):
response = self.client.get('/approval_polls/1/')
self.assertQuerysetEqual(
response.context['checked_choices'],
['<Choice: Choice 1.>']
)
def test_poll_details_logout_current_user(self):
self.client.logout()
response = self.client.get('/approval_polls/1/')
self.assertContains(response, 'Login to Vote', status_code=200)
self.assertQuerysetEqual(
response.context['checked_choices'],
[]
)
def test_poll_details_different_user(self):
self.client.logout()
User.objects.create_user(
'user2',
'[email protected]',
'password123'
)
self.client.login(username='user2', password='password123')
response = self.client.get('/approval_polls/1/')
self.assertContains(response, 'Vote', status_code=200)
self.assertQuerysetEqual(
response.context['checked_choices'],
[]
)
def test_poll_details_unselect_checked_choice(self):
self.client.login(username='user1', password='test')
choice_data = {}
self.client.post(
'/approval_polls/1/vote/',
choice_data,
follow=True
)
response = self.client.get('/approval_polls/1/')
self.assertContains(response, 'Vote', status_code=200)
self.assertQuerysetEqual(
response.context['checked_choices'],
[]
)
def test_poll_details_closed_poll(self):
poll_closed = create_poll(
question='Create Closed Poll.',
username='user2',
close_date=timezone.now() + datetime.timedelta(days=-10),
)
self.client.login(username='user2', password='password123')
response = self.client.get(reverse('approval_polls:detail',
args=(poll_closed.id,)))
self.assertContains(
response,
'Sorry! This poll is closed.',
status_code=200
)
class DeletePollTests(TestCase):
def setUp(self):
self.client = Client()
create_poll(question="question1", days=-5, vtype=1)
poll = Poll.objects.create(
question="question2",
pub_date=timezone.now() + datetime.timedelta(days=-10),
user=User.objects.get(username="user1"),
vtype=2,
close_date=None,
)
for _ in range(2):
create_ballot(poll)
self.client.login(username='user1', password='test')
def test_delete_one_poll(self):
self.client.delete(
'/approval_polls/1/',
follow=True
)
response = self.client.get(reverse('approval_polls:my_polls'))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['latest_poll_list'],
['<Poll: question2>']
)
response = self.client.get('/approval_polls/1/')
self.assertEqual(response.status_code, 404)
def test_delete_all_polls(self):
self.client.delete(
'/approval_polls/1/',
follow=True
)
self.client.delete(
'/approval_polls/2/',
follow=True
)
response = self.client.get(reverse('approval_polls:my_polls'))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['latest_poll_list'],
[]
)
response = self.client.get('/approval_polls/1/')
self.assertEqual(response.status_code, 404)
response = self.client.get('/approval_polls/2/')
self.assertEqual(response.status_code, 404)
class PollVisibilityTests(TestCase):
def setUp(self):
self.client = Client()
self.client.login(username='user1', password='test')
create_poll(question="public poll", days=-10, vtype=3, ballots=2, is_private=False)
poll = Poll.objects.create(
question="private poll",
pub_date=timezone.now() + datetime.timedelta(days=-10),
user=User.objects.get(username="user1"),
vtype=3,
is_private=True,
)
for _ in range(2):
create_ballot(poll)
User.objects.create_user('user2', '[email protected]', 'test')
def test_public_poll(self):
"""
A poll that is marked public should appear on the home page, and
a private one should not.
"""
response = self.client.get(reverse('approval_polls:index'))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['latest_poll_list'],
['<Poll: public poll>'],
)
def test_private_poll(self):
"""
A poll that is marked private is visible to its owner, along with
his/her public polls.
"""
self.client.login(username='user1', password='test')
response = self.client.get(reverse('approval_polls:my_polls'))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['latest_poll_list'],
['<Poll: private poll>', '<Poll: public poll>'],
)
def test_private_poll_different_user(self):
"""
A poll that is marked private should not be visible to another user.
"""
self.client.logout()
self.client.login(username='user2', password='test')
response = self.client.get(reverse('approval_polls:my_polls'))
self.assertEqual(response.status_code, 200)
self.assertQuerysetEqual(
response.context['latest_poll_list'],
[],
)
class PollEditTests(TestCase):
def setUp(self):
self.client = Client()
self.poll = create_poll(
question='Create Sample Poll.',
close_date=timezone.now() + datetime.timedelta(days=3),
vtype=3
)
create_vote_invitation(self.poll, email='[email protected]')
self.poll.choice_set.create(choice_text='Choice 1.')
self.choice = Choice.objects.get(poll_id=self.poll.id)
self.client.login(username='user1', password='test')
def test_edit_view_with_invalid_poll(self):
"""
Requesting the edit page of a non-existent poll should
return a 404 not found error.
"""
response = self.client.get(reverse('approval_polls:edit',
args=(10000,)))
self.assertEqual(response.status_code, 404)
def test_edit_view_visible_to_other_user(self):
"""
The edit page of a poll belonging to one user should not be
visible to another user. It should return a permission denied (403) error.
"""
User.objects.create_user('user2', '[email protected]', 'test')
self.client.logout()
self.client.login(username='user2', password='test')
response = self.client.get(reverse('approval_polls:edit',
args=(1,)))
self.assertEqual(response.status_code, 403)
def test_email_invitees_are_returned(self):
"""
The poll's edit page should list email invitees if poll.vtype is 3
"""
response = self.client.get(reverse('approval_polls:edit',
args=(1,)))
self.assertEqual(
response.context['invited_emails'], "[email protected]"
)
def test_new_choices_are_added(self):
'''
New choices should be added in the poll and
existing should be updated
'''
self.client.post(reverse('approval_polls:edit', args=(1,)), {
'choice1': 'xxx',
'linkurl-choice1': 'xxx',
'choice1000': u'BBBBB',
'linkurl-choice1000': u'BBBBBBBB',
'close-datetime': 'bb',
'question': 'q',
'token-tags': ''
})
self.assertEqual(Poll.objects.get(id=self.poll.id).choice_set.count(), 2)
self.assertEqual(Choice.objects.get(id=self.choice.id).choice_text, 'xxx')
response = self.client.get(reverse('approval_polls:edit',
args=(1,)))
self.assertContains(response, "<a href='#' class='add-link' id='link-choice1' \
title='Add link' data-toggle='tooltip' data-placement='bottom'> \
<span class='glyphicon glyphicon-link text-success' ></span> </a>", None, 200, '', True)
def test_can_not_edit_poll(self):
'''
If ballots are on the poll, editing should not happen
'''
create_ballot(self.poll)
response = self.client.get(reverse('approval_polls:edit',
args=(1,)))
self.assertEqual(response.status_code, 200)
self.assertEqual(
response.context['can_edit_poll'], False
)
self.assertContains(response, 'You cannot edit the questions and choices as this poll has got ballots on it!')
self.client.post(reverse('approval_polls:edit', args=(1,)), {
'choice1': 'xxx',
'linkurl-choice1': 'xxx',
'choice1000': u'BBBBB',
'linkurl-choice1000': u'BBBBBBBB',
'close-datetime': 'bb',
'question': 'q',
'token-tags': ''
})
self.assertEqual(Poll.objects.get(id=self.poll.id).choice_set.count(), 1)
self.assertEqual(Choice.objects.get(id=self.choice.id).choice_text, 'Choice 1.')
class SuspendPollTests(TestCase):
def setUp(self):
self.client = Client()
self.poll = create_poll(
question='Create Sample Poll.',
close_date=timezone.now() + datetime.timedelta(days=3),
vtype=3,
is_suspended=True
)
self.poll.choice_set.create(choice_text='Choice 1.')
self.choice = Choice.objects.get(poll_id=self.poll.id)
self.client.login(username='user1', password='test')
def test_suspend_tests(self):
response = self.client.get(reverse('approval_polls:my_polls'))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "id='unsuspend-poll-1'> unsuspend </a>")
self.poll.is_suspended = False
self.poll.save()
response = self.client.get(reverse('approval_polls:my_polls'))
self.assertContains(response, "id='suspend-poll-1'> suspend </a>")
def test_suspended_tests_cannot_vote(self):
response = self.client.get(reverse('approval_polls:detail',
args=(1,)))
self.assertContains(response, "Sorry! This poll has been temporarily suspended.")
self.assertContains(response, "<button class='btn btn-success' type='submit' disabled >Vote</button>")
class TagCloudTests(TestCase):
def setUp(self):
self.client = Client()
self.poll = create_poll(
question='Create Sample Poll.',
close_date=timezone.now() + datetime.timedelta(days=3),
vtype=1
)
self.poll.choice_set.create(choice_text='Choice 1.')
self.poll.add_tags(['New York'])
self.choice = Choice.objects.get(poll_id=self.poll.id)
self.client.login(username='user1', password='test')
def test_poll_tag_exists(self):
response = self.client.get(reverse('approval_polls:detail',
args=(1,)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, "<a href='/approval_polls/tag/new%20york/'>new york</a>")
def test_poll_tags_index(self):
print [pt.tag_text for pt in self.poll.polltag_set.all()]
response = self.client.get(reverse('approval_polls:tagged_polls',
args=('New York',)))
self.assertEqual(response.status_code, 200)
self.assertContains(response, '<a href="/approval_polls/1/">Create Sample Poll.</a>')
def test_poll_delete(self):
self.poll.polltag_set.clear()
response = self.client.get(reverse('approval_polls:detail',
args=(1,)))
self.assertEqual(response.status_code, 200)
self.assertNotContains(response, "<a href='/approval_polls/tag/new%20york/'>new york</a>")
| gpl-3.0 | -1,953,277,846,750,225,000 | 35.341463 | 118 | 0.58613 | false |
yahoo/bossmashup | yos/boss/ysearch.py | 1 | 2217 | #Copyright (c) 2011 Yahoo! Inc. All rights reserved. Licensed under the BSD License.
# See accompanying LICENSE file or http://www.opensource.org/licenses/BSD-3-Clause for the specific language governing permissions and limitations under the License.
"""
This is the Boss search API
search is the main function
Examples:
web_results = search("britney spears")
news_20_results = search("tiger woods", vertical="news", count=20)
"""
__author__ = "BOSS Team"
from urllib import quote_plus
import oauth2 as oauth
import time
import urllib,urllib2
import simplejson
from yos.crawl import rest
CONFIG = simplejson.load(open("config.json", "r"))
SEARCH_API_URL_V1 = CONFIG["uri_v1"].rstrip("/") + "/%s/v%d/%s?start=%d&count=%d&lang=%s®ion=%s" + "&appid=" + CONFIG["appid"]
SEARCH_API_URL_V2 = CONFIG["uri_v2"]
CC_KEY = CONFIG['cc_key']
CC_SECRET = CONFIG['cc_secret']
SOURCE_TAG = CONFIG['source_tag']
def params(d):
""" Takes a dictionary of key, value pairs and generates a cgi parameter/argument string """
p = ""
for k, v in d.iteritems():
p += "&%s=%s" % (quote_plus(k), quote_plus(v))
return p
def search(command,bucket="web",count=10,start=0,more={}):
params = {
'oauth_version': "1.0",
'oauth_nonce': oauth.generate_nonce(),
'oauth_timestamp': int(time.time()),
'q': quote_plus(command),
'count': count,
'start': start,
'format': 'json',
'ads.recentSource': SOURCE_TAG
}
params.update(more)
url = SEARCH_API_URL_V2 + bucket
consumer = oauth.Consumer(CC_KEY,CC_SECRET)
req = oauth.Request(method="GET", url=url, parameters=params)
signature_method = oauth.SignatureMethod_HMAC_SHA1()
req.sign_request(signature_method, consumer, None)
return rest.load_json(req.to_url())
def search_v1(command, vertical="web", version=1, start=0, count=10, lang="en", region="us", more={}):
"""
command is the query (not escaped)
vertical can be web, news, spelling, images
lang/region default to en/us - take a look at the the YDN Boss documentation for the supported lang/region values
"""
url = SEARCH_API_URL_V1 % (vertical, version, quote_plus(command), start, count, lang, region) + params(more)
return rest.load_json(url)
| bsd-3-clause | 1,023,666,650,626,874,400 | 33.640625 | 165 | 0.683807 | false |
nigelsmall/shortwave | test/test_uri.py | 1 | 29472 | #!/usr/bin/env python
# coding: utf-8
# Copyright 2011-2016, Nigel Small
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
from unittest import TestCase
from shortwave.compat import ustr
from shortwave.uri import percent_encode, percent_decode, parse_uri, resolve_uri, \
build_uri, parse_authority, remove_dot_segments, parse_parameters, parse_path, expand_uri
class PercentEncodeTestCase(TestCase):
def test_can_percent_encode_none(self):
encoded = percent_encode(None)
assert encoded is None
def test_can_percent_encode_empty_string(self):
encoded = percent_encode("")
assert encoded == b""
def test_can_percent_encode_number(self):
encoded = percent_encode(12)
assert encoded == b"12"
def test_can_percent_encode_string(self):
encoded = percent_encode("foo")
assert encoded == b"foo"
def test_can_percent_encode_bytes(self):
encoded = percent_encode(b"foo")
assert encoded == b"foo"
def test_can_percent_encode_unicode(self):
encoded = percent_encode(ustr("foo"))
assert encoded == b"foo"
def test_can_percent_encode_list(self):
encoded = percent_encode(["knife&fork", "spoon"])
assert encoded == b"knife%26fork&spoon"
def test_can_percent_encode_dictionary(self):
encoded = percent_encode(OrderedDict([("one", 1), ("two", 2)]))
assert encoded == b"one=1&two=2"
def test_can_percent_encode_reserved_chars(self):
encoded = percent_encode("20% of $100 = $20")
assert encoded == b"20%25%20of%20%24100%20%3D%20%2420"
def test_can_percent_encode_extended_chars(self):
encoded = percent_encode("/El Niño/")
assert encoded == b'%2FEl%20Ni%F1o%2F'
def test_can_percent_encode_with_safe_chars(self):
encoded = percent_encode("/El Niño/", safe="/|\\")
assert encoded == b'/El%20Ni%F1o/'
class PercentDecodeTestCase(TestCase):
def test_can_percent_decode_none(self):
decoded = percent_decode(None)
assert decoded is None
def test_can_percent_decode_empty_string(self):
decoded = percent_decode("")
assert decoded == ""
def test_can_percent_decode_number(self):
decoded = percent_decode(12)
assert decoded == "12"
def test_can_percent_decode_string(self):
decoded = percent_decode("foo")
assert decoded == "foo"
def test_can_percent_decode_bytes(self):
decoded = percent_decode(b"foo")
assert decoded == "foo"
def test_can_percent_decode_unicode(self):
decoded = percent_decode(ustr("foo"))
assert decoded == "foo"
def test_can_percent_decode_plus_to_space(self):
decoded = percent_decode("one+two%20three+four")
assert decoded == "one two three four"
def test_can_percent_decode_reserved_chars(self):
decoded = percent_decode("20%25%20of%20%24100%20%3D%20%2420")
assert decoded == "20% of $100 = $20"
def test_can_percent_decode_extended_chars(self):
decoded = percent_decode("El%20Ni%C3%B1o")
assert decoded == "El Niño"
def test_partially_decoded_chars_use_replacement_char(self):
decoded = percent_decode("El%20Ni%C3")
assert decoded == "El Ni�"
class ParseURITestCase(TestCase):
def test_can_parse_none_uri(self):
scheme, authority, path, query, fragment = parse_uri(None)
assert scheme is None
assert authority is None
assert path is None
assert query is None
assert fragment is None
def test_can_parse_empty_string_uri(self):
scheme, authority, path, query, fragment = parse_uri(b"")
assert scheme is None
assert authority is None
assert path == b""
assert query is None
assert fragment is None
def test_can_parse_absolute_path_uri(self):
scheme, authority, path, query, fragment = parse_uri(b"/foo/bar")
assert scheme is None
assert authority is None
assert path == b"/foo/bar"
assert query is None
assert fragment is None
def test_can_parse_relative_path_uri(self):
scheme, authority, path, query, fragment = parse_uri(b"foo/bar")
assert scheme is None
assert authority is None
assert path == b"foo/bar"
assert query is None
assert fragment is None
def test_can_parse_only_query(self):
scheme, authority, path, query, fragment = parse_uri(b"?foo=bar")
assert scheme is None
assert authority is None
assert path == b""
assert query == b"foo=bar"
assert fragment is None
def test_can_parse_only_fragment(self):
scheme, authority, path, query, fragment = parse_uri(b"#foo")
assert scheme is None
assert authority is None
assert path == b""
assert query is None
assert fragment == b"foo"
def test_can_parse_uri_without_scheme(self):
scheme, authority, path, query, fragment = parse_uri(b"//example.com")
assert scheme is None
assert authority == b"example.com"
assert path == b""
assert query is None
assert fragment is None
def test_can_parse_uri_without_scheme_but_with_port(self):
scheme, authority, path, query, fragment = parse_uri(b"//example.com:8080")
assert scheme is None
assert authority == b"example.com:8080"
assert path == b""
assert query is None
assert fragment is None
def test_can_parse_simple_uri(self):
scheme, authority, path, query, fragment = parse_uri(b"foo://example.com")
assert scheme == b"foo"
assert authority == b"example.com"
assert path == b""
assert query is None
assert fragment is None
def test_can_parse_uri_with_root_path(self):
scheme, authority, path, query, fragment = parse_uri(b"foo://example.com/")
assert scheme == b"foo"
assert authority == b"example.com"
assert path == b"/"
assert query is None
assert fragment is None
def test_can_parse_full_uri(self):
uri = b"foo://bob@[email protected]:8042/over/there?name=ferret#nose"
scheme, authority, path, query, fragment = parse_uri(uri)
assert scheme == b"foo"
assert authority == b"bob@[email protected]:8042"
assert path == b"/over/there"
assert query == b"name=ferret"
assert fragment == b"nose"
def test_cannot_parse_into_6_parts(self):
uri = b"foo://bob@[email protected]:8042/over/there?name=ferret#nose"
with self.assertRaises(ValueError):
parse_uri(uri, 6)
def test_can_parse_into_5_parts(self):
uri = b"foo://bob@[email protected]:8042/over/there?name=ferret#nose"
scheme, authority, path, query, fragment = parse_uri(uri, 5)
assert scheme == b"foo"
assert authority == b"bob@[email protected]:8042"
assert path == b"/over/there"
assert query == b"name=ferret"
assert fragment == b"nose"
def test_can_parse_into_4_parts(self):
uri = b"foo://bob@[email protected]:8042/over/there?name=ferret#nose"
scheme, authority, path_query, fragment = parse_uri(uri, 4)
assert scheme == b"foo"
assert authority == b"bob@[email protected]:8042"
assert path_query == b"/over/there?name=ferret"
assert fragment == b"nose"
def test_can_parse_into_3_parts(self):
uri = b"foo://bob@[email protected]:8042/over/there?name=ferret#nose"
scheme, authority_path_query, fragment = parse_uri(uri, 3)
assert scheme == b"foo"
assert authority_path_query == b"//bob@[email protected]:8042/over/there?name=ferret"
assert fragment == b"nose"
def test_can_parse_into_2_parts(self):
uri = b"foo://bob@[email protected]:8042/over/there?name=ferret#nose"
core_uri, fragment = parse_uri(uri, 2)
assert core_uri == b"foo://bob@[email protected]:8042/over/there?name=ferret"
assert fragment == b"nose"
def test_cannot_parse_into_1_part(self):
uri = b"foo://bob@[email protected]:8042/over/there?name=ferret#nose"
with self.assertRaises(ValueError):
parse_uri(uri, 1)
class BuildURITestCase(TestCase):
"""
"""
def test_can_build_empty_uri(self):
built = build_uri()
assert built is b""
def test_can_build_uri_from_string(self):
built = build_uri(uri=b"foo://example.com/")
assert built == b"foo://example.com/"
def test_can_build_uri_from_hierarchical_part(self):
built = build_uri(hierarchical_part=b"//example.com/")
assert built == b"//example.com/"
def test_can_build_uri_from_scheme_and_hierarchical_part(self):
built = build_uri(scheme=b"foo", hierarchical_part=b"//example.com/")
assert built == b"foo://example.com/"
def test_can_build_uri_from_scheme_hierarchical_part_and_query(self):
built = build_uri(scheme=b"foo", hierarchical_part=b"//example.com/", query=b"spam=eggs")
assert built == b"foo://example.com/?spam=eggs"
def test_can_build_uri_from_scheme_hierarchical_part_query_and_fragment(self):
built = build_uri(scheme=b"foo", hierarchical_part=b"//example.com/", query=b"spam=eggs",
fragment=b"mustard")
assert built == b"foo://example.com/?spam=eggs#mustard"
def test_can_build_uri_from_absolute_path_reference(self):
built = build_uri(absolute_path_reference=b"/foo/bar?spam=eggs#mustard")
assert built == b"/foo/bar?spam=eggs#mustard"
def test_can_build_uri_from_authority_and_absolute_path_reference(self):
built = build_uri(authority=b"[email protected]:9999",
absolute_path_reference=b"/foo/bar?spam=eggs#mustard")
assert built == b"//[email protected]:9999/foo/bar?spam=eggs#mustard"
def test_can_build_uri_from_scheme_host_and_path(self):
built = build_uri(scheme=b"http", host=b"example.com", path=b"/foo/bar")
assert built == b"http://example.com/foo/bar"
def test_can_build_uri_from_scheme_and_host_port(self):
built = build_uri(scheme=b"http", host_port=b"example.com:3456")
assert built == b"http://example.com:3456"
def test_can_build_uri_from_scheme_authority_and_host_port(self):
built = build_uri(scheme=b"http", authority=b"[email protected]:4567",
host_port=b"example.com:3456")
assert built == b"http://[email protected]:3456"
def test_can_build_uri_from_scheme_user_info_and_host_port(self):
built = build_uri(scheme=b"http", user_info=b"bob", host_port=b"example.com:3456")
assert built == b"http://[email protected]:3456"
def test_can_build_uri_from_scheme_user_info_and_path(self):
built = build_uri(scheme=b"http", user_info=b"bob", path=b"/foo")
assert built == b"http://bob@/foo"
def test_can_build_uri_from_scheme_authority_and_host(self):
built = build_uri(scheme=b"http", authority=b"[email protected]", host=b"example.com")
assert built == b"http://[email protected]"
def test_can_build_uri_from_scheme_authority_and_port(self):
built = build_uri(scheme=b"http", authority=b"[email protected]", port=3456)
assert built == b"http://[email protected]:3456"
def test_can_build_uri_from_scheme_port_and_path(self):
built = build_uri(scheme=b"http", port=3456, path=b"/foo")
assert built == b"http://:3456/foo"
class ResolveURITestCase(TestCase):
""" RFC 3986, section 5.4.
"""
base_uri = b"http://a/b/c/d;p?q"
def resolve_references(self, references, strict=True):
for reference, target in references.items():
print(reference, "->", target)
resolved = resolve_uri(self.base_uri, reference, strict)
assert resolved == target
def test_normal_examples(self):
""" 5.4.1. Normal Examples
"""
self.resolve_references({
b"g:h": b"g:h",
b"g": b"http://a/b/c/g",
b"./g": b"http://a/b/c/g",
b"g/": b"http://a/b/c/g/",
b"/g": b"http://a/g",
b"//g": b"http://g",
b"?y": b"http://a/b/c/d;p?y",
b"g?y": b"http://a/b/c/g?y",
b"#s": b"http://a/b/c/d;p?q#s",
b"g#s": b"http://a/b/c/g#s",
b"g?y#s": b"http://a/b/c/g?y#s",
b";x": b"http://a/b/c/;x",
b"g;x": b"http://a/b/c/g;x",
b"g;x?y#s": b"http://a/b/c/g;x?y#s",
b"": b"http://a/b/c/d;p?q",
b".": b"http://a/b/c/",
b"./": b"http://a/b/c/",
b"..": b"http://a/b/",
b"../": b"http://a/b/",
b"../g": b"http://a/b/g",
b"../..": b"http://a/",
b"../../": b"http://a/",
b"../../g": b"http://a/g",
})
def test_abnormal_examples(self):
""" 5.4.2. Abnormal Examples
"""
# Although the following abnormal examples are unlikely to occur in
# normal practice, all URI parsers should be capable of resolving them
# consistently. Each example uses the same base as that above.
#
# Parsers must be careful in handling cases where there are more ".."
# segments in a relative-path reference than there are hierarchical
# levels in the base URI's path. Note that the ".." syntax cannot be
# used to change the authority component of a URI.
self.resolve_references({
b"../../../g": b"http://a/g",
b"../../../../g": b"http://a/g",
})
# Similarly, parsers must remove the dot-segments "." and ".." when
# they are complete components of a path, but not when they are only
# part of a segment.
self.resolve_references({
b"/./g": b"http://a/g",
b"/../g": b"http://a/g",
b"g.": b"http://a/b/c/g.",
b".g": b"http://a/b/c/.g",
b"g..": b"http://a/b/c/g..",
b"..g": b"http://a/b/c/..g",
})
# Less likely are cases where the relative reference uses unnecessary
# or nonsensical forms of the "." and ".." complete path segments.
self.resolve_references({
b"./../g": b"http://a/b/g",
b"./g/.": b"http://a/b/c/g/",
b"g/./h": b"http://a/b/c/g/h",
b"g/../h": b"http://a/b/c/h",
b"g;x=1/./y": b"http://a/b/c/g;x=1/y",
b"g;x=1/../y": b"http://a/b/c/y",
})
# Some applications fail to separate the reference's query and/or
# fragment components from the path component before merging it with
# the base path and removing dot-segments. This error is rarely
# noticed, as typical usage of a fragment never includes the hierarchy
# ("/") character and the query component is not normally used within
# relative references.
self.resolve_references({
b"g?y/./x": b"http://a/b/c/g?y/./x",
b"g?y/../x": b"http://a/b/c/g?y/../x",
b"g#s/./x": b"http://a/b/c/g#s/./x",
b"g#s/../x": b"http://a/b/c/g#s/../x",
})
# Some parsers allow the scheme name to be present in a relative
# reference if it is the same as the base URI scheme. This is
# considered to be a loophole in prior specifications of partial URI
# [RFC1630]. Its use should be avoided but is allowed for backward
# compatibility.
#
# for strict parsers:
self.resolve_references({b"http:g": b"http:g"}, strict=True)
#
# for backward compatibility:
self.resolve_references({b"http:g": b"http://a/b/c/g"}, strict=False)
def test_can_resolve_from_empty_path(self):
base = b"http://example.com"
uri = resolve_uri(base, b"foo")
assert uri == b"http://example.com/foo"
def test_can_resolve_from_empty_uri(self):
base = b""
uri = resolve_uri(base, b"foo")
assert uri == b"foo"
def test_resolving_when_reference_is_none_returns_none(self):
base = b"http://example.com"
uri = resolve_uri(base, None)
assert uri is None
class ParseAuthorityTestCase(TestCase):
"""
"""
def test_can_parse_none_authority(self):
user_info, host, port = parse_authority(None)
assert user_info is None
assert host is None
assert port is None
def test_can_parse_empty_authority(self):
user_info, host, port = parse_authority(b"")
assert user_info is None
assert host == b""
assert port is None
def test_can_parse_host_authority(self):
user_info, host, port = parse_authority(b"example.com")
assert user_info is None
assert host == b"example.com"
assert port is None
def test_can_parse_host_port_authority(self):
user_info, host, port = parse_authority(b"example.com:6789")
assert user_info is None
assert host == b"example.com"
assert port == 6789
def test_can_parse_user_host_authority(self):
user_info, host, port = parse_authority(b"[email protected]")
assert user_info == b"bob"
assert host == b"example.com"
assert port is None
def test_can_parse_email_user_host_authority(self):
user_info, host, port = parse_authority(b"[email protected]@example.com")
assert user_info == b"[email protected]"
assert host == b"example.com"
assert port is None
def test_can_parse_full_authority(self):
user_info, host, port = parse_authority(b"[email protected]:6789")
assert user_info == b"bob"
assert host == b"example.com"
assert port == 6789
def test_cannot_parse_into_4_parts(self):
with self.assertRaises(ValueError):
parse_authority(b"[email protected]:6789", 4)
def test_can_parse_into_3_parts(self):
user_info, host, port = parse_authority(b"[email protected]:6789", 3)
assert user_info == b"bob"
assert host == b"example.com"
assert port == 6789
def test_can_parse_into_2_parts(self):
user_info, address = parse_authority(b"[email protected]:6789", 2)
assert user_info == b"bob"
assert address == b"example.com:6789"
def test_cannot_parse_into_1_part(self):
with self.assertRaises(ValueError):
parse_authority(b"[email protected]:6789", 1)
class BuildAuthorityTestCase(TestCase):
pass # TODO
class ParsePathTestCase(TestCase):
def test_can_parse_none_path(self):
path = parse_path(None)
assert path is None
def test_can_parse_empty_path(self):
path = parse_path(b"")
assert path == [""]
def test_can_parse_absolute_path(self):
path = parse_path(b"/foo/bar")
assert path == ["", "foo", "bar"]
def test_can_parse_relative_path(self):
path = parse_path(b"foo/bar")
assert path == ["foo", "bar"]
def test_can_parse_path_with_encoded_slash(self):
path = parse_path(b"/foo/bar%2Fbaz")
assert path == ["", "foo", "bar/baz"]
class BuildPathTestCase(TestCase):
pass # TODO
class RemoveDotSegmentsTestCase(TestCase):
def test_can_remove_dot_segments_pattern_1(self):
path_in = b"/a/b/c/./../../g"
path_out = remove_dot_segments(path_in)
assert path_out == b"/a/g"
def test_can_remove_dot_segments_pattern_2(self):
path_in = b"mid/content=5/../6"
path_out = remove_dot_segments(path_in)
assert path_out == b"mid/6"
def test_can_remove_dot_segments_when_single_dot(self):
path_in = b"."
path_out = remove_dot_segments(path_in)
assert path_out == b""
def test_can_remove_dot_segments_when_double_dot(self):
path_in = b".."
path_out = remove_dot_segments(path_in)
assert path_out == b""
def test_can_remove_dot_segments_when_starts_with_single_dot(self):
path_in = b"./a"
path_out = remove_dot_segments(path_in)
assert path_out == b"a"
def test_can_remove_dot_segments_when_starts_with_double_dot(self):
path_in = b"../a"
path_out = remove_dot_segments(path_in)
assert path_out == b"a"
class ParseParametersTestCase(TestCase):
def test_can_parse_none_query(self):
parsed = parse_parameters(None)
assert parsed is None
def test_can_parse_empty_query(self):
parsed = parse_parameters(b"")
assert parsed == []
def test_can_parse_value_only_query(self):
parsed = parse_parameters(b"foo")
assert parsed == [(None, "foo")]
def test_can_parse_key_value_query(self):
parsed = parse_parameters(b"foo=bar")
assert parsed == [("foo", "bar")]
def test_can_parse_multi_key_value_query(self):
parsed = parse_parameters(b"foo=bar&spam=eggs")
assert parsed == [("foo", "bar"), ("spam", "eggs")]
def test_can_parse_mixed_query(self):
parsed = parse_parameters(b"foo&spam=eggs")
assert parsed == [(None, "foo"), ("spam", "eggs")]
def test_can_parse_repeated_keys(self):
parsed = parse_parameters(b"foo=bar&foo=baz&spam=eggs")
assert parsed == [("foo", "bar"), ("foo", "baz"), ("spam", "eggs")]
def test_can_handle_percent_decoding_while_parsing(self):
parsed = parse_parameters(b"ampersand=%26&equals=%3D")
assert parsed == [("ampersand", "&"), ("equals", "=")]
def test_can_handle_alternative_separators(self):
parsed = parse_parameters(b"3:%33;S:%53", item_separator=b";", key_separator=b":")
assert parsed == [("3", "3"), ("S", "S")]
def test_can_parse_path_segment_with_parameters(self):
path_segment = b"name;version=1.2"
parameters = parse_parameters(path_segment, item_separator=b";")
assert parameters == [(None, "name"), ("version", "1.2")]
class ExpandURITestCase(TestCase):
def test_expansion_with_no_variables(self):
template = b"{}"
uri = expand_uri(template, {})
assert uri == b""
def assert_expansions(self, expansions):
variables = {
b"count": (b"one", b"two", b"three"),
b"dom": (b"example", b"com"),
b"dub": b"me/too",
b"hello": b"Hello World!",
b"half": b"50%",
b"var": b"value",
b"who": b"fred",
b"base": b"http://example.com/home/",
b"path": b"/foo/bar",
b"list": (b"red", b"green", b"blue"),
b"keys": OrderedDict([(b"semi", b";"), (b"dot", b"."), (b"comma", b",")]),
b"v": b"6",
b"x": b"1024",
b"y": b"768",
b"empty": b"",
b"empty_keys": dict([]),
b"undef": None,
}
for template, expansion in expansions.items():
print(template, "->", expansion)
uri = expand_uri(template, variables)
assert uri == expansion
def test_empty_expansion(self):
self.assert_expansions({
None: None,
b"": b"",
})
def test_can_expand_simple_strings(self):
self.assert_expansions({
b"{var}": b"value",
b"{hello}": b"Hello%20World%21",
b"{half}": b"50%25",
b"O{empty}X": b"OX",
b"O{undef}X": b"OX",
b"{x,y}": b"1024,768",
b"{x,hello,y}": b"1024,Hello%20World%21,768",
b"?{x,empty}": b"?1024,",
b"?{x,undef}": b"?1024",
b"?{undef,y}": b"?768",
b"{var:3}": b"val",
b"{var:30}": b"value",
b"{list}": b"red,green,blue",
b"{list*}": b"red,green,blue",
b"{keys}": b"semi,%3B,dot,.,comma,%2C",
b"{keys*}": b"semi=%3B,dot=.,comma=%2C",
})
def test_can_expand_reserved_strings(self):
self.assert_expansions({
b"{+var}": b"value",
b"{+hello}": b"Hello%20World!",
b"{+half}": b"50%25",
b"{base}index": b"http%3A%2F%2Fexample.com%2Fhome%2Findex",
b"{+base}index": b"http://example.com/home/index",
b"O{+empty}X": b"OX",
b"O{+undef}X": b"OX",
b"{+path}/here": b"/foo/bar/here",
b"here?ref={+path}": b"here?ref=/foo/bar",
b"up{+path}{var}/here": b"up/foo/barvalue/here",
b"{+x,hello,y}": b"1024,Hello%20World!,768",
b"{+path,x}/here": b"/foo/bar,1024/here",
b"{+path:6}/here": b"/foo/b/here",
b"{+list}": b"red,green,blue",
b"{+list*}": b"red,green,blue",
b"{+keys}": b"semi,;,dot,.,comma,,",
b"{+keys*}": b"semi=;,dot=.,comma=,",
})
def test_can_expand_fragments(self):
self.assert_expansions({
b"{#var}": b"#value",
b"{#hello}": b"#Hello%20World!",
b"{#half}": b"#50%25",
b"foo{#empty}": b"foo#",
b"foo{#undef}": b"foo",
b"{#x,hello,y}": b"#1024,Hello%20World!,768",
b"{#path,x}/here": b"#/foo/bar,1024/here",
b"{#path:6}/here": b"#/foo/b/here",
b"{#list}": b"#red,green,blue",
b"{#list*}": b"#red,green,blue",
b"{#keys}": b"#semi,;,dot,.,comma,,",
b"{#keys*}": b"#semi=;,dot=.,comma=,",
})
def test_can_expand_labels(self):
self.assert_expansions({
b"{.who}": b".fred",
b"{.who,who}": b".fred.fred",
b"{.half,who}": b".50%25.fred",
b"www{.dom*}": b"www.example.com",
b"X{.var}": b"X.value",
b"X{.empty}": b"X.",
b"X{.undef}": b"X",
b"X{.var:3}": b"X.val",
b"X{.list}": b"X.red,green,blue",
b"X{.list*}": b"X.red.green.blue",
b"X{.keys}": b"X.semi,%3B,dot,.,comma,%2C",
b"X{.keys*}": b"X.semi=%3B.dot=..comma=%2C",
b"X{.empty_keys}": b"X",
b"X{.empty_keys*}": b"X",
})
def test_can_expand_path_segments(self):
self.assert_expansions({
b"{/who}": b"/fred",
b"{/who,who}": b"/fred/fred",
b"{/half,who}": b"/50%25/fred",
b"{/who,dub}": b"/fred/me%2Ftoo",
b"{/var}": b"/value",
b"{/var,empty}": b"/value/",
b"{/var,undef}": b"/value",
b"{/var,x}/here": b"/value/1024/here",
b"{/var:1,var}": b"/v/value",
b"{/list}": b"/red,green,blue",
b"{/list*}": b"/red/green/blue",
b"{/list*,path:4}": b"/red/green/blue/%2Ffoo",
b"{/keys}": b"/semi,%3B,dot,.,comma,%2C",
b"{/keys*}": b"/semi=%3B/dot=./comma=%2C",
})
def test_can_expand_path_parameters(self):
self.assert_expansions({
b"{;who}": b";who=fred",
b"{;half}": b";half=50%25",
b"{;empty}": b";empty",
b"{;v,empty,who}": b";v=6;empty;who=fred",
b"{;v,bar,who}": b";v=6;who=fred",
b"{;x,y}": b";x=1024;y=768",
b"{;x,y,empty}": b";x=1024;y=768;empty",
b"{;x,y,undef}": b";x=1024;y=768",
b"{;hello:5}": b";hello=Hello",
b"{;list}": b";list=red,green,blue",
b"{;list*}": b";list=red;list=green;list=blue",
b"{;keys}": b";keys=semi,%3B,dot,.,comma,%2C",
b"{;keys*}": b";semi=%3B;dot=.;comma=%2C",
})
def test_can_expand_form_queries(self):
self.assert_expansions({
b"{?who}": b"?who=fred",
b"{?half}": b"?half=50%25",
b"{?x,y}": b"?x=1024&y=768",
b"{?x,y,empty}": b"?x=1024&y=768&empty=",
b"{?x,y,undef}": b"?x=1024&y=768",
b"{?var:3}": b"?var=val",
b"{?list}": b"?list=red,green,blue",
b"{?list*}": b"?list=red&list=green&list=blue",
b"{?keys}": b"?keys=semi,%3B,dot,.,comma,%2C",
b"{?keys*}": b"?semi=%3B&dot=.&comma=%2C",
})
def test_can_expand_form_query_continuations(self):
self.assert_expansions({
b"{&who}": b"&who=fred",
b"{&half}": b"&half=50%25",
b"?fixed=yes{&x}": b"?fixed=yes&x=1024",
b"{&x,y,empty}": b"&x=1024&y=768&empty=",
b"{&x,y,undef}": b"&x=1024&y=768",
b"{&var:3}": b"&var=val",
b"{&list}": b"&list=red,green,blue",
b"{&list*}": b"&list=red&list=green&list=blue",
b"{&keys}": b"&keys=semi,%3B,dot,.,comma,%2C",
b"{&keys*}": b"&semi=%3B&dot=.&comma=%2C",
})
| apache-2.0 | 3,378,061,809,454,768,600 | 36.3 | 97 | 0.560763 | false |
crowdhackathon-apodeiksi/tax-o-drome | tax-o-drome-api/app/app/settings.py | 1 | 4037 | """
Django settings for app project.
Generated by 'django-admin startproject' using Django 1.8.
For more information on this file, see
https://docs.djangoproject.com/en/1.8/topics/settings/
For the full list of settings and their values, see
https://docs.djangoproject.com/en/1.8/ref/settings/
"""
# Build paths inside the project like this: os.path.join(BASE_DIR, ...)
import os
import datetime
BASE_DIR = os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
# Quick-start development settings - unsuitable for production
# See https://docs.djangoproject.com/en/1.8/howto/deployment/checklist/
# SECURITY WARNING: keep the secret key used in production secret!
SECRET_KEY = 'q4$+jsqoeu9dh&emj@+u_(rr0xpo#jqj=xtw$oe%o*8^tp1of('
# SECURITY WARNING: don't run with debug turned on in production!
DEBUG = True
ALLOWED_HOSTS = []
# Application definition
INSTALLED_APPS = (
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.messages',
'django.contrib.staticfiles',
'rest_framework',
'corsheaders',
'rest_framework.authtoken',
'djangotoolbox',
'permission_backend_nonrel',
'corsheaders',
)
MIDDLEWARE_CLASSES = (
'django.contrib.sessions.middleware.SessionMiddleware',
'django.middleware.common.CommonMiddleware',
'django.middleware.csrf.CsrfViewMiddleware',
'django.contrib.auth.middleware.AuthenticationMiddleware',
'django.contrib.messages.middleware.MessageMiddleware',
'django.middleware.clickjacking.XFrameOptionsMiddleware',
'django.middleware.common.CommonMiddleware',
'corsheaders.middleware.CorsMiddleware',
)
AUTHENTICATION_BACKENDS = (
'permission_backend_nonrel.backends.NonrelPermissionBackend',
)
ROOT_URLCONF = 'app.urls'
WSGI_APPLICATION = 'app.wsgi.application'
# Database
# https://docs.djangoproject.com/en/1.8/ref/settings/#databases
DATABASES = {
'default': {
'ENGINE': 'django_mongodb_engine',
'NAME': 'save_crop'
}
}
# Internationalization
# https://docs.djangoproject.com/en/1.8/topics/i18n/
LANGUAGE_CODE = 'en-us'
TIME_ZONE = 'UTC'
USE_I18N = True
USE_L10N = True
USE_TZ = True
CORS_ORIGIN_ALLOW_ALL = True
# Static files (CSS, JavaScript, Images)
# https://docs.djangoproject.com/en/1.8/howto/static-files/
STATIC_URL = '/static/'
TEMPLATE_DIRS = (
os.path.join(BASE_DIR, 'templates'),
)
REST_FRAMEWORK = {
'DEFAULT_PERMISSION_CLASSES': [
# 'rest_framework.permissions.IsAuthenticated',
],
'DEFAULT_AUTHENTICATION_CLASSES': [
# 'rest_framework.authentication.SessionAuthentication',
# 'rest_framework.authentication.TokenAuthentication',
# 'rest_framework_jwt.authentication.JSONWebTokenAuthentication',
],
'DEFAULT_RENDERER_CLASSES': (
'rest_framework.renderers.BrowsableAPIRenderer',
'rest_framework.renderers.JSONRenderer',
'rest_framework_xml.renderers.XMLRenderer',
),
'DEFAULT_FILTER_BACKENDS': (
'rest_framework.filters.DjangoFilterBackend',
),
'PAGINATE_BY': 10,
'DEFAULT_THROTTLE_CLASSES': (
'rest_framework.throttling.AnonRateThrottle',
'rest_framework.throttling.UserRateThrottle'
),
'DEFAULT_THROTTLE_RATES': {
'anon': '1000/hour',
'user': '10000/hour'
}
}
JWT_AUTH = {
'JWT_ENCODE_HANDLER': 'rest_framework_jwt.utils.jwt_encode_handler',
'JWT_DECODE_HANDLER': 'rest_framework_jwt.utils.jwt_decode_handler',
'JWT_PAYLOAD_HANDLER': 'rest_framework_jwt.utils.jwt_payload_handler',
'JWT_PAYLOAD_GET_USER_ID_HANDLER': 'rest_framework_jwt.utils.jwt_get_user_id_from_payload_handler',
'JWT_SECRET_KEY': SECRET_KEY,
'JWT_ALGORITHM': 'HS256',
'JWT_VERIFY': True,
'JWT_VERIFY_EXPIRATION': True,
'JWT_LEEWAY': 0,
'JWT_EXPIRATION_DELTA': datetime.timedelta(days=1),
'JWT_ALLOW_REFRESH': False,
'JWT_REFRESH_EXPIRATION_DELTA': datetime.timedelta(days=7),
'JWT_AUTH_HEADER_PREFIX': 'JWT',
} | gpl-2.0 | -7,872,202,097,630,832,000 | 26.469388 | 103 | 0.697052 | false |
fhoring/autorest | src/generator/AutoRest.Python.Azure.Tests/Expected/AcceptanceTests/Paging/autorestpagingtestservice/models/__init__.py | 1 | 1275 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for
# license information.
#
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is
# regenerated.
# --------------------------------------------------------------------------
from .product import Product
from .product_properties import ProductProperties
from .operation_result import OperationResult
from .paging_get_multiple_pages_options import PagingGetMultiplePagesOptions
from .paging_get_odata_multiple_pages_options import PagingGetOdataMultiplePagesOptions
from .paging_get_multiple_pages_with_offset_options import PagingGetMultiplePagesWithOffsetOptions
from .custom_parameter_group import CustomParameterGroup
from .product_paged import ProductPaged
from .product_paged1 import ProductPaged1
__all__ = [
'Product',
'ProductProperties',
'OperationResult',
'PagingGetMultiplePagesOptions',
'PagingGetOdataMultiplePagesOptions',
'PagingGetMultiplePagesWithOffsetOptions',
'CustomParameterGroup',
'ProductPaged',
'ProductPaged1',
]
| mit | 6,541,422,675,272,307,000 | 38.84375 | 98 | 0.703529 | false |
formalmethods/intrepyd | tests/traffic_light.py | 1 | 4451 | """
Copyright (C) 2017 Roberto Bruttomesso <[email protected]>
This file is distributed under the terms of the 3-clause BSD License.
A copy of the license can be found in the root directory or at
https://opensource.org/licenses/BSD-3-Clause.
Author: Roberto Bruttomesso <[email protected]>
Date: 27/03/2017
"""
import intrepyd as ip
import intrepyd.scr
import intrepyd.circuit
import collections
class SimulinkCircuit(ip.circuit.Circuit):
def __init__(self, ctx, name):
ip.circuit.Circuit.__init__(self, ctx, name)
def _mk_naked_circuit_impl(self, inputs):
input_keys = list(inputs)
# OnOff -> n1
n1 = inputs[input_keys[0]]
# OnGreen -> n2
n2 = inputs[input_keys[1]]
# OnYellow -> n3
n3 = inputs[input_keys[2]]
# OnRed -> n4
n4 = inputs[input_keys[3]]
# Daytime -> n5
n5 = inputs[input_keys[4]]
n6 = self.context.mk_latch('traffic_light/Past(OnOff)', self.context.mk_boolean_type())
n7 = self.context.mk_latch('traffic_light/Past(OnRed)', self.context.mk_boolean_type())
n8 = self.context.mk_latch('traffic_light/Past(OnYellow)', self.context.mk_boolean_type())
n9 = self.context.mk_latch('traffic_light/Past(OnGreen)', self.context.mk_boolean_type())
n10 = self.context.mk_latch('traffic_light/Past(Daytime)', self.context.mk_boolean_type())
n11 = self.context.mk_latch('traffic_light/Init', self.context.mk_boolean_type())
n12 = self.context.mk_latch('traffic_light/Past(Mode)', self.context.mk_int8_type())
# traffic_light/false
n13 = self.context.mk_false()
self.nets['traffic_light/false'] = n13
# traffic_light/Off
n14 = self.context.mk_number('0', self.context.mk_int8_type())
self.nets['traffic_light/Off'] = n14
# Bus Creator
n15 = [n1, n2, n3, n4, n5]
# Bus Creator1
n16 = [n6, n9, n8, n7, n10]
# traffic_light/Green
n17 = self.context.mk_number('1', self.context.mk_int8_type())
self.nets['traffic_light/Green'] = n17
# traffic_light/Yellow
n18 = self.context.mk_number('2', self.context.mk_int8_type())
self.nets['traffic_light/Yellow'] = n18
# traffic_light/Red
n19 = self.context.mk_number('3', self.context.mk_int8_type())
self.nets['traffic_light/Red'] = n19
# Bus Creator2
n20 = [n14, n17, n18, n19]
# traffic_light/Mode1
n21 = self.context.mk_ite(n11, n14, n12)
self.nets['traffic_light/Mode1'] = n21
n22_1 = ip.scr.mk_scr(self.context, 'tests/traffic_light', n15, n16, n20, n21)
# traffic_light/Mode
n23 = self.context.mk_ite(n11, n14, n22_1)
self.nets['traffic_light/Mode'] = n23
in6 = self.context.mk_true()
self.context.set_latch_init_next(n6, in6, n1)
in7 = self.context.mk_true()
self.context.set_latch_init_next(n7, in7, n4)
in8 = self.context.mk_true()
self.context.set_latch_init_next(n8, in8, n3)
in9 = self.context.mk_true()
self.context.set_latch_init_next(n9, in9, n2)
in10 = self.context.mk_true()
self.context.set_latch_init_next(n10, in10, n5)
in11 = self.context.mk_true()
self.context.set_latch_init_next(n11, in11, n13)
in12 = self.context.mk_number('1', self.context.mk_int8_type())
self.context.set_latch_init_next(n12, in12, n23)
# n23 -> out
outputs = collections.OrderedDict()
outputs['traffic_light/out'] = n23
self.nets['traffic_light/out'] = n23
return outputs
def _mk_inputs(self):
# traffic_light/OnOff -> n1
n1 = self.context.mk_input('OnOff', self.context.mk_boolean_type())
self.inputs['OnOff'] = n1
# traffic_light/OnGreen -> n2
n2 = self.context.mk_input('OnGreen', self.context.mk_boolean_type())
self.inputs['OnGreen'] = n2
# traffic_light/OnYellow -> n3
n3 = self.context.mk_input('OnYellow', self.context.mk_boolean_type())
self.inputs['OnYellow'] = n3
# traffic_light/OnRed -> n4
n4 = self.context.mk_input('OnRed', self.context.mk_boolean_type())
self.inputs['OnRed'] = n4
# traffic_light/Daytime -> n5
n5 = self.context.mk_input('Daytime', self.context.mk_boolean_type())
self.inputs['Daytime'] = n5
| bsd-3-clause | 4,430,705,504,138,592,000 | 42.213592 | 98 | 0.610874 | false |
box/box-python-sdk | boxsdk/auth/cooperatively_managed_oauth2.py | 1 | 1125 | # coding: utf-8
from __future__ import unicode_literals
from .oauth2 import OAuth2
class CooperativelyManagedOAuth2Mixin(OAuth2):
"""
Box SDK OAuth2 mixin.
Allows for sharing auth tokens between multiple clients.
"""
def __init__(self, retrieve_tokens=None, *args, **kwargs):
"""
:param retrieve_tokens:
Callback to get the current access/refresh token pair.
:type retrieve_tokens:
`callable` of () => (`unicode`, `unicode`)
"""
# pylint:disable=keyword-arg-before-vararg
self._retrieve_tokens = retrieve_tokens
super(CooperativelyManagedOAuth2Mixin, self).__init__(*args, **kwargs)
def _get_tokens(self):
"""
Base class override. Get the tokens from the user-specified callback.
"""
return self._retrieve_tokens()
class CooperativelyManagedOAuth2(CooperativelyManagedOAuth2Mixin):
"""
Box SDK OAuth2 subclass.
Allows for sharing auth tokens between multiple clients. The retrieve_tokens callback should
return the current access/refresh token pair.
"""
pass
| apache-2.0 | -1,451,916,053,912,874,200 | 30.25 | 96 | 0.654222 | false |
bburan/psiexperiment | psi/controller/calibration/acquire.py | 1 | 1263 | import time
class Acquire(object):
def __init__(self, engine, queue, epoch_size):
# Setup output
self.ao_queue = queue
self.ai_queue = queue.create_connection()
self.ai_epochs = []
self.engine = engine
self.complete = False
self.engine.register_ao_callback(self.ao_callback)
# Setup input
fs = engine.hw_ai_channels[0].fs
ai_cb = extract_epochs(fs, self.ai_queue, epoch_size, epoch_size*100,
self.ai_callback)
self.engine.register_ai_callback(ai_cb.send)
def ao_callback(self, event):
samples = event.engine.get_space_available(event.channel_name)
waveform, empty = self.ao_queue.pop_buffer(samples)
event.engine.append_hw_ao(waveform)
def ai_callback(self, event):
self.ai_epochs.append(event)
if self.ao_queue.count_trials() == 0:
if len(self.ai_queue) == 0:
self.complete = True
def start(self):
self.engine.start()
def join(self):
while not self.complete:
time.sleep(0.1)
def acquire(engine, queue, epoch_size):
acq = Acquire(engine, queue, epoch_size)
acq.start()
acq.join()
return acq.ai_epochs
| mit | -5,059,281,541,524,644,000 | 27.066667 | 77 | 0.589865 | false |
maxive/erp | addons/l10n_be_hr_payroll_fleet/models/fleet.py | 2 | 7573 | # -*- coding:utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
import babel.dates
from odoo import api, fields, models
from odoo.fields import Datetime
class FleetVehicle(models.Model):
_inherit = 'fleet.vehicle'
co2_fee = fields.Float(compute='_compute_co2_fee', string="CO2 Fee", store=True)
total_depreciated_cost = fields.Float(compute='_compute_total_depreciated_cost', store=True,
string="Total Cost (Depreciated)", track_visibility="onchange",
help="This includes all the depreciated costs and the CO2 fee")
total_cost = fields.Float(compute='_compute_total_cost', string="Total Cost", help="This include all the costs and the CO2 fee")
fuel_type = fields.Selection(required=True, default='diesel')
atn = fields.Float(compute='_compute_car_atn', string="ATN")
acquisition_date = fields.Date(required=True)
@api.depends('co2_fee', 'log_contracts', 'log_contracts.state', 'log_contracts.recurring_cost_amount_depreciated')
def _compute_total_depreciated_cost(self):
for car in self:
car.total_depreciated_cost = car.co2_fee + \
sum(car.log_contracts.filtered(
lambda contract: contract.state == 'open'
).mapped('recurring_cost_amount_depreciated'))
@api.depends('co2_fee', 'log_contracts', 'log_contracts.state', 'log_contracts.cost_generated')
def _compute_total_cost(self):
for car in self:
car.total_cost = car.co2_fee
contracts = car.log_contracts.filtered(
lambda contract: contract.state == 'open' and contract.cost_frequency != 'no'
)
for contract in contracts:
if contract.cost_frequency == "daily":
car.total_cost += contract.cost_generated * 30.0
elif contract.cost_frequency == "weekly":
car.total_cost += contract.cost_generated * 4.0
elif contract.cost_frequency == "monthly":
car.total_cost += contract.cost_generated
elif contract.cost_frequency == "yearly":
car.total_cost += contract.cost_generated / 12.0
def _get_co2_fee(self, co2):
return max((((co2 * 9.0) - 600.0) * 1.2488) / 12.0, 0.0)
@api.depends('co2')
def _compute_co2_fee(self):
for car in self:
car.co2_fee = self._get_co2_fee(car.co2)
@api.depends('fuel_type', 'car_value', 'acquisition_date')
def _compute_car_atn(self):
for car in self:
car.atn = car._get_car_atn(car.acquisition_date, car.car_value, car.fuel_type, car.co2)
@api.depends('model_id', 'license_plate', 'log_contracts', 'acquisition_date',
'co2_fee', 'log_contracts', 'log_contracts.state', 'log_contracts.recurring_cost_amount_depreciated')
def _compute_vehicle_name(self):
super(FleetVehicle, self)._compute_vehicle_name()
for vehicle in self:
acquisition_date = vehicle._get_acquisition_date()
vehicle.name += u" \u2022 " + str(round(vehicle.total_depreciated_cost, 2)) + u" \u2022 " + acquisition_date
@api.model
def create(self, vals):
res = super(FleetVehicle, self).create(vals)
if not res.log_contracts:
self.env['fleet.vehicle.log.contract'].create({
'vehicle_id': res.id,
'recurring_cost_amount_depreciated': res.model_id.default_recurring_cost_amount_depreciated,
'purchaser_id': res.driver_id.id,
})
return res
def _get_acquisition_date(self):
self.ensure_one()
return babel.dates.format_date(
date=Datetime.from_string(self.acquisition_date),
format='MMMM y',
locale=self._context.get('lang') or 'en_US'
)
def _get_car_atn(self, acquisition_date, car_value, fuel_type, co2):
# Compute the correction coefficient from the age of the car
now = Datetime.from_string(Datetime.now())
start = Datetime.from_string(acquisition_date)
if start:
number_of_month = (now.year - start.year) * 12.0 + now.month - start.month + int(bool(now.day - start.day + 1))
if number_of_month <= 12:
age_coefficient = 1.00
elif number_of_month <= 24:
age_coefficient = 0.94
elif number_of_month <= 36:
age_coefficient = 0.88
elif number_of_month <= 48:
age_coefficient = 0.82
elif number_of_month <= 60:
age_coefficient = 0.76
else:
age_coefficient = 0.70
car_value = car_value * age_coefficient
# Compute atn value from corrected car_value
magic_coeff = 6.0 / 7.0 # Don't ask me why
if fuel_type == 'electric':
atn = 0.0
else:
if fuel_type in ['diesel', 'hybrid']:
reference = 87.0
else:
reference = 105.0
if co2 <= reference:
atn = car_value * max(0.04, (0.055 - 0.001 * (reference - co2))) * magic_coeff
else:
atn = car_value * min(0.18, (0.055 + 0.001 * (co2 - reference))) * magic_coeff
return max(1280, atn) / 12.0
@api.onchange('model_id')
def _onchange_model_id(self):
self.car_value = self.model_id.default_car_value
self.co2 = self.model_id.default_co2
self.fuel_type = self.model_id.default_fuel_type
class FleetVehicleLogContract(models.Model):
_inherit = 'fleet.vehicle.log.contract'
recurring_cost_amount_depreciated = fields.Float("Recurring Cost Amount (depreciated)", track_visibility="onchange")
class FleetVehicleModel(models.Model):
_inherit = 'fleet.vehicle.model'
default_recurring_cost_amount_depreciated = fields.Float(string="Cost (Depreciated)",
help="Default recurring cost amount that should be applied to a new car from this model")
default_co2 = fields.Float(string="CO2 emissions")
default_fuel_type = fields.Selection([('gasoline', 'Gasoline'), ('diesel', 'Diesel'), ('electric', 'Electric'), ('hybrid', 'Hybrid')], 'Fuel Type', help='Fuel Used by the vehicle')
default_car_value = fields.Float(string="Catalog Value (VAT Incl.)")
can_be_requested = fields.Boolean(string="Can be requested", help="Can be requested on a contract as a new car")
default_atn = fields.Float(compute='_compute_atn', string="ATN")
default_total_depreciated_cost = fields.Float(compute='_compute_default_total_depreciated_cost', string="Total Cost (Depreciated)")
co2_fee = fields.Float(compute='_compute_co2_fee', string="CO2 fee")
@api.depends('default_car_value', 'default_co2', 'default_fuel_type')
def _compute_atn(self):
now = Datetime.now()
for model in self:
model.default_atn = self.env['fleet.vehicle']._get_car_atn(now, model.default_car_value, model.default_fuel_type, model.default_co2)
@api.depends('co2_fee', 'default_recurring_cost_amount_depreciated')
def _compute_default_total_depreciated_cost(self):
for model in self:
model.default_total_depreciated_cost = model.co2_fee + model.default_recurring_cost_amount_depreciated
@api.depends('default_co2')
def _compute_co2_fee(self):
for model in self:
model.co2_fee = self.env['fleet.vehicle']._get_co2_fee(model.default_co2)
| agpl-3.0 | -8,124,838,845,275,498,000 | 45.460123 | 184 | 0.609402 | false |
lgarren/spack | lib/spack/spack/cmd/blame.py | 1 | 4596 | ##############################################################################
# Copyright (c) 2013-2017, Lawrence Livermore National Security, LLC.
# Produced at the Lawrence Livermore National Laboratory.
#
# This file is part of Spack.
# Created by Todd Gamblin, [email protected], All rights reserved.
# LLNL-CODE-647188
#
# For details, see https://github.com/llnl/spack
# Please also see the NOTICE and LICENSE files for our notice and the LGPL.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License (as
# published by the Free Software Foundation) version 2.1, February 1999.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the IMPLIED WARRANTY OF
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the terms and
# conditions of the GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the Free Software
# Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA
##############################################################################
import os
import re
import llnl.util.tty as tty
from llnl.util.lang import pretty_date
from llnl.util.filesystem import working_dir
from llnl.util.tty.colify import colify_table
import spack
from spack.util.executable import which
from spack.cmd import spack_is_git_repo
description = "show contributors to packages"
section = "developer"
level = "long"
def setup_parser(subparser):
view_group = subparser.add_mutually_exclusive_group()
view_group.add_argument(
'-t', '--time', dest='view', action='store_const', const='time',
default='time', help='sort by last modification date (default)')
view_group.add_argument(
'-p', '--percent', dest='view', action='store_const', const='percent',
help='sort by percent of code')
view_group.add_argument(
'-g', '--git', dest='view', action='store_const', const='git',
help='show git blame output instead of summary')
subparser.add_argument(
'package_name', help='name of package to show contributions for, '
'or path to a file in the spack repo')
def blame(parser, args):
# make sure this is a git repo
if not spack_is_git_repo():
tty.die("This spack is not a git clone. Can't use 'spack blame'")
git = which('git', required=True)
# Get name of file to blame
blame_file = None
if os.path.isfile(args.package_name):
path = os.path.realpath(args.package_name)
if path.startswith(spack.prefix):
blame_file = path
if not blame_file:
pkg = spack.repo.get(args.package_name)
blame_file = pkg.module.__file__.rstrip('c') # .pyc -> .py
# get git blame for the package
with working_dir(spack.prefix):
if args.view == 'git':
git('blame', blame_file)
return
else:
output = git('blame', '--line-porcelain', blame_file, output=str)
lines = output.split('\n')
# Histogram authors
counts = {}
emails = {}
last_mod = {}
total_lines = 0
for line in lines:
match = re.match(r'^author (.*)', line)
if match:
author = match.group(1)
match = re.match(r'^author-mail (.*)', line)
if match:
email = match.group(1)
match = re.match(r'^author-time (.*)', line)
if match:
mod = int(match.group(1))
last_mod[author] = max(last_mod.setdefault(author, 0), mod)
# ignore comments
if re.match(r'^\t[^#]', line):
counts[author] = counts.setdefault(author, 0) + 1
emails.setdefault(author, email)
total_lines += 1
if args.view == 'time':
rows = sorted(
counts.items(), key=lambda t: last_mod[t[0]], reverse=True)
else: # args.view == 'percent'
rows = sorted(counts.items(), key=lambda t: t[1], reverse=True)
# Print a nice table with authors and emails
table = [['LAST_COMMIT', 'LINES', '%', 'AUTHOR', 'EMAIL']]
for author, nlines in rows:
table += [[
pretty_date(last_mod[author]),
nlines,
round(nlines / float(total_lines) * 100, 1),
author,
emails[author]]]
table += [[''] * 5]
table += [[pretty_date(max(last_mod.values())), total_lines, '100.0'] +
[''] * 3]
colify_table(table)
| lgpl-2.1 | -9,175,143,795,897,328,000 | 34.083969 | 78 | 0.603786 | false |
fujiwarat/ibus-xkb | ui/gtk2/panel.py | 1 | 20712 | # vim:set et sts=4 sw=4:
#
# ibus-xkb - IBus XKB
#
# Copyright(c) 2012 Takao Fujiwara <[email protected]>
# Copyright(c) 2007-2010 Peng Huang <[email protected]>
# Copyright(c) 2007-2012 Red Hat, Inc.
#
# This library is free software; you can redistribute it and/or
# modify it under the terms of the GNU Lesser General Public
# License as published by the Free Software Foundation; either
# version 2 of the License, or(at your option) any later version.
#
# This library is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public
# License along with this program; if not, write to the
# Free Software Foundation, Inc., 59 Temple Place, Suite 330,
# Boston, MA 02111-1307 USA
import gtk
import gtk.gdk as gdk
import glib
import gobject
import ibus
import icon as _icon
import os
import sys
import signal
from os import path
from ibus import interface
from languagebar import LanguageBar
from candidatepanel import CandidatePanel
from engineabout import EngineAbout
from i18n import _, N_
ICON_KEYBOARD = ibus.get_ICON_KEYBOARD()
ICON_ENGINE = "ibus-engine"
def show_uri(screen, link):
try:
gtk.show_uri(screen, link, 0)
except:
print >> sys.stderr, "pygtk do not support show_uri"
def url_hook(about, link, user_data):
show_uri(about.get_screen(), link)
def email_hook(about, email, user_data):
show_uri(about.get_screen(), "mailto:%s" % email)
gtk.about_dialog_set_url_hook(url_hook, None)
gtk.about_dialog_set_email_hook(email_hook, None)
class Panel(ibus.PanelBase):
__gtype_name__ = "IBusPanel"
def __init__(self, bus):
super(Panel, self).__init__(bus)
self.__bus = bus
self.__config = self.__bus.get_config()
self.__focus_ic = None
self.__setup_pid = None
self.__prefix = os.getenv("IBUS_PREFIX")
self.__data_dir = path.join(self.__prefix, "share", "ibus")
# self.__icons_dir = path.join(self.__data_dir, "icons")
self.__setup_cmd = path.join(self.__prefix, "bin", "ibus-setup")
# connect bus signal
self.__config.connect("value-changed", self.__config_value_changed_cb)
self.__config.connect("reloaded", self.__config_reloaded_cb)
# self.__bus.config_add_watch("panel")
# add icon search path
# icon_theme = gtk.icon_theme_get_default()
# icon_theme.prepend_search_path(self.__icons_dir)
self.__language_bar = LanguageBar()
self.__language_bar.connect("property-activate",
lambda widget, prop_name, prop_state: self.property_activate(prop_name, prop_state))
self.__language_bar.connect("get-im-menu",
self.__get_im_menu_cb)
self.__language_bar.connect("show-engine-about",
self.__show_engine_about_cb)
self.__language_bar.connect("position-changed",
self.__position_changed_cb)
self.__language_bar.focus_out()
self.__language_bar.show_all()
self.__candidate_panel = CandidatePanel()
self.__candidate_panel.connect("cursor-up",
lambda widget: self.cursor_up())
self.__candidate_panel.connect("cursor-down",
lambda widget: self.cursor_down())
self.__candidate_panel.connect("page-up",
lambda widget: self.page_up())
self.__candidate_panel.connect("page-down",
lambda widget: self.page_down())
self.__candidate_panel.connect("candidate-clicked",
lambda widget, index, button, state: self.candidate_clicked(index, button, state))
self.__status_icon = gtk.StatusIcon()
# gnome-shell checks XClassHint.res_class with ShellTrayIcon.
# gtk_status_icon_set_name() can set XClassHint.res_class .
# However gtk_status_icon_new() also calls gtk_window_realize() so
# gtk_status_icon_set_visible() needs to be called to set WM_CLASS
# so that gtk_window_realize() is called later again.
# set_title is for gnome-shell notificationDaemon in bottom right.
self.__status_icon.set_visible(False)
# gtk_status_icon_set_name() is not available in pygtk2 2.17
if hasattr(self.__status_icon, 'set_name'):
self.__status_icon.set_name('ibus-ui-gtk')
self.__status_icon.set_title(_("IBus Panel"))
# Hide icon until bus get the name owner.
#self.__status_icon.set_visible(True)
self.__status_icon.connect("popup-menu", self.__status_icon_popup_menu_cb)
self.__status_icon.connect("activate", self.__status_icon_activate_cb)
self.__status_icon.set_from_icon_name(ICON_KEYBOARD)
self.__status_icon.set_tooltip(_("IBus input method framework"))
# Hide icon until bus get the name owner.
#self.__status_icon.set_visible(True)
self.__config_load_lookup_table_orientation()
self.__config_load_show()
self.__config_load_position()
self.__config_load_custom_font()
# Hide icon until bus get the name owner.
#self.__config_load_show_icon_on_systray()
self.__config_load_show_im_name()
# self.__bus.request_name(ibus.panel.IBUS_SERVICE_PANEL, 0)
def set_cursor_location(self, x, y, w, h):
self.__candidate_panel.set_cursor_location(x, y, w, h)
def update_preedit_text(self, text, cursor_pos, visible):
self.__candidate_panel.update_preedit_text(text, cursor_pos, visible)
def show_preedit_text(self):
self.__candidate_panel.show_preedit_text()
def hide_preedit_text(self):
self.__candidate_panel.hide_preedit_text()
def update_auxiliary_text(self, text, visible):
self.__candidate_panel.update_auxiliary_text(text, visible)
def show_auxiliary_text(self):
self.__candidate_panel.show_auxiliary_text()
def hide_auxiliary_text(self):
self.__candidate_panel.hide_auxiliary_text()
def update_lookup_table(self, lookup_table, visible):
self.__candidate_panel.update_lookup_table(lookup_table, visible)
def show_lookup_table(self):
self.__candidate_panel.show_lookup_table()
def hide_lookup_table(self):
self.__candidate_panel.hide_lookup_table()
def page_up_lookup_table(self):
self.__candidate_panel.page_up_lookup_table()
def page_down_lookup_table(self):
self.__candidate_panel.page_down_lookup_table()
def cursor_up_lookup_table(self):
self.__candidate_panel.cursor_up_lookup_table()
def cursor_down_lookup_table(self):
self.__candidate_panel.cursor_down_lookup_table()
def show_candidate_window(self):
self.__candidate_panel.show_all()
def hide_candidate_window(self):
self.__candidate_panel.hide_all()
def show_language_bar(self):
self.__language_bar.show_all()
def hide_language_bar(self):
self.__language_bar.hide_all()
def register_properties(self, props):
self.__language_bar.register_properties(props)
def update_property(self, prop):
self.__language_bar.update_property(prop)
def get_status_icon(self):
return self.__status_icon
def hide(self):
if self.__status_icon == None:
return
self.__status_icon.set_visible(False)
def show(self):
if self.__status_icon == None:
return
self.__config_load_show_icon_on_systray()
def __set_im_icon(self, icon_name):
if not icon_name:
icon_name = ICON_ENGINE
self.__language_bar.set_im_icon(icon_name)
if icon_name.startswith("/"):
self.__status_icon.set_from_file(icon_name)
else:
self.__status_icon.set_from_icon_name(icon_name)
def __set_im_name(self, name):
self.__language_bar.set_im_name(name)
def focus_in(self, ic):
self.reset()
self.__focus_ic = ibus.InputContext(self.__bus, ic)
enabled = True or self.__focus_ic.is_enabled()
self.__language_bar.set_enabled(enabled)
if not enabled:
self.__set_im_icon(ICON_KEYBOARD)
self.__set_im_name(None)
else:
engine = self.__focus_ic.get_engine()
if engine:
self.__set_im_icon(engine.icon)
self.__set_im_name(engine.longname)
else:
self.__set_im_icon(ICON_KEYBOARD)
self.__set_im_name(None)
self.__language_bar.focus_in()
def focus_out(self, ic):
self.reset()
self.__focus_ic = None
self.__language_bar.set_enabled(False)
self.__language_bar.focus_out()
self.__set_im_icon(ICON_KEYBOARD)
self.__set_im_name(None)
def state_changed(self):
if not self.__focus_ic:
return
enabled = self.__focus_ic.is_enabled()
self.__language_bar.set_enabled(enabled)
if enabled == False:
self.reset()
self.__set_im_icon(ICON_KEYBOARD)
self.__set_im_name(None)
else:
engine = self.__focus_ic.get_engine()
if engine:
self.__set_im_icon(engine.icon)
self.__set_im_name(engine.longname)
else:
self.__set_im_icon(ICON_KEYBOARD)
self.__set_im_name(None)
def reset(self):
self.__candidate_panel.reset()
self.__language_bar.reset()
def start_setup(self):
self.__start_setup()
def do_destroy(self):
gtk.main_quit()
def __config_load_lookup_table_orientation(self):
value = self.__config.get_value("panel", "lookup_table_orientation", 0)
if value in (ibus.ORIENTATION_HORIZONTAL, ibus.ORIENTATION_VERTICAL):
orientation = value
else:
orientation = ibus.ORIENTATION_HORIZONTAL
self.__candidate_panel.set_orientation(orientation)
def __config_load_show(self):
show = self.__config.get_value("panel", "show", 0)
self.__language_bar.set_show(show)
def __config_load_position(self):
x = self.__config.get_value("panel", "x", -1)
y = self.__config.get_value("panel", "y", -1)
self.__language_bar.set_position(x, y)
def __config_load_custom_font(self):
use_custom_font = self.__config.get_value("panel", "use_custom_font", False)
font_name = gtk.settings_get_default().get_property("gtk-font-name")
font_name = unicode(font_name, "utf-8")
custom_font = self.__config.get_value("panel", "custom_font", font_name)
style_string = 'style "custom-font" { font_name="%s" }\n' \
'class "IBusCandidateLabel" style "custom-font"\n'
if use_custom_font:
style_string = style_string % custom_font
gtk.rc_parse_string(style_string)
else:
style_string = style_string % ""
gtk.rc_parse_string(style_string)
settings = gtk.settings_get_default()
gtk.rc_reset_styles(settings)
def __config_load_show_icon_on_systray(self):
value = self.__config.get_value("panel", "show_icon_on_systray", True)
self.__status_icon.set_visible(True if value else False)
def __config_load_show_im_name(self):
value = self.__config.get_value("panel", "show_im_name", False)
self.__language_bar.set_show_im_name(value)
def __config_value_changed_cb(self, bus, section, name, value):
if section != "panel":
return
if name == "lookup_table_orientation":
self.__config_load_lookup_table_orientation()
elif name == "show":
self.__config_load_show()
elif name == "use_custom_font" or name == "custom_font":
self.__config_load_custom_font()
elif name == "show_icon_on_systray":
self.__config_load_show_icon_on_systray()
elif name == "show_im_name":
self.__config_load_show_im_name()
elif name == "x" or name == "y":
pass
else:
print >> sys.stderr, "Unknown config item [%s]" % name
def __config_reloaded_cb(self, bus):
pass
def __create_sys_menu(self):
menu = gtk.Menu()
item = gtk.ImageMenuItem(gtk.STOCK_PREFERENCES)
item.connect("activate",
self.__sys_menu_item_activate_cb, gtk.STOCK_PREFERENCES)
menu.add(item)
item = gtk.ImageMenuItem(gtk.STOCK_ABOUT)
item.connect("activate",
self.__sys_menu_item_activate_cb, gtk.STOCK_ABOUT)
menu.add(item)
menu.add(gtk.SeparatorMenuItem())
item = gtk.MenuItem(_("Restart"))
item.connect("activate",
self.__sys_menu_item_activate_cb, "Restart")
menu.add(item)
item = gtk.ImageMenuItem(gtk.STOCK_QUIT)
item.connect("activate",
self.__sys_menu_item_activate_cb, gtk.STOCK_QUIT)
menu.add(item)
menu.show_all()
menu.set_take_focus(False)
return menu
# def __create_im_menu(self):
# menu = gtk.Menu()
# engines = self.__bus.list_active_engines()
# tmp = {}
# for engine in engines:
# lang = ibus.get_language_name(engine.language)
# if lang not in tmp:
# tmp[lang] = []
# tmp[lang].append(engine)
# langs = tmp.keys()
# other = tmp.get(_("Other"), [])
# if _("Other") in tmp:
# langs.remove(_("Other"))
# langs.append(_("Other"))
# size = gtk.icon_size_lookup(gtk.ICON_SIZE_MENU)
# for lang in langs:
# if len(tmp[lang]) == 1:
# engine = tmp[lang][0]
# item = gtk.ImageMenuItem("%s - %s" % (lang, engine.longname))
# if engine.icon:
# item.set_image(_icon.IconWidget(engine.icon, size[0]))
# else:
# item.set_image(_icon.IconWidget(ICON_ENGINE, size[0]))
# item.connect("activate", self.__im_menu_item_activate_cb, engine)
# menu.add(item)
# else:
# item = gtk.MenuItem(lang)
# menu.add(item)
# submenu = gtk.Menu()
# item.set_submenu(submenu)
# for engine in tmp[lang]:
# item = gtk.ImageMenuItem(engine.longname)
# if engine.icon:
# item.set_image(_icon.IconWidget(engine.icon, size[0]))
# else:
# item.set_image(_icon.IconWidget(ICON_ENGINE, size[0]))
# item.connect("activate", self.__im_menu_item_activate_cb, engine)
# submenu.add(item)
# item = gtk.ImageMenuItem(_("Turn off input method"))
# item.set_image(_icon.IconWidget("gtk-close", size[0]))
# item.connect("activate", self.__im_menu_item_activate_cb, None)
# menu.add(item)
# menu.show_all()
# menu.set_take_focus(False)
# return menu
def __create_im_menu(self):
# FIXME
# engines = self.__bus.list_engines()
names = self.__config.get_value("general", "preload_engines",
["xkb:us::eng", "xkb:us:intl:eng", "pinyin"])
engines = self.__bus.get_engines_by_names(names)
current_engine = \
(self.__focus_ic != None and self.__focus_ic.get_engine()) or \
(engines and engines[0]) or \
None
size = gtk.icon_size_lookup(gtk.ICON_SIZE_MENU)
menu = gtk.Menu()
for i, engine in enumerate(engines):
lang = ibus.get_language_name(engine.language)
item = gtk.ImageMenuItem("%s - %s" % (lang, engine.longname))
if current_engine and current_engine.name == engine.name:
for widget in item.get_children():
if isinstance(widget, gtk.Label):
widget.set_markup("<b>%s</b>" % widget.get_text())
if engine.icon:
item.set_image(_icon.IconWidget(engine.icon, size[0]))
else:
item.set_image(_icon.IconWidget(ICON_ENGINE, size[0]))
item.connect("activate", self.__im_menu_item_activate_cb, engine)
menu.add(item)
item = gtk.ImageMenuItem(_("Turn off input method"))
item.set_image(_icon.IconWidget("gtk-close", size[0]))
item.connect("activate", self.__im_menu_item_activate_cb, None)
if self.__focus_ic == None:
item.set_sensitive(False)
menu.add(item)
menu.show_all()
menu.set_take_focus(False)
return menu
def __get_im_menu_cb(self, languagebar):
menu = self.__create_im_menu()
return menu
def __show_engine_about_cb(self, langagebar):
try:
engine = self.__focus_ic.get_engine()
dlg = EngineAbout(engine)
dlg.run()
dlg.destroy()
except:
pass
def __position_changed_cb(self, langagebar, x, y):
self.__config.set_value("panel", "x", x)
self.__config.set_value("panel", "y", y)
def __status_icon_popup_menu_cb(self, status_icon, button, active_time):
menu = self.__create_sys_menu()
menu.popup(None, None,
gtk.status_icon_position_menu,
button,
active_time,
self.__status_icon)
def __status_icon_activate_cb(self, status_icon):
if not self.__focus_ic:
menu = gtk.Menu()
item = gtk.ImageMenuItem(_("No input window"))
size = gtk.icon_size_lookup(gtk.ICON_SIZE_MENU)
item.set_image(_icon.IconWidget("gtk-dialog-info", size[0]))
menu.add(item)
menu.show_all()
else:
menu = self.__create_im_menu()
self.__language_bar.create_im_menu(menu)
menu.popup(None, None,
gtk.status_icon_position_menu,
0,
gtk.get_current_event_time(),
self.__status_icon)
def __im_menu_item_activate_cb(self, item, engine):
if not self.__focus_ic:
return
if engine:
self.__focus_ic.set_engine(engine)
else:
self.__focus_ic.disable()
def __sys_menu_item_activate_cb(self, item, command):
if command == gtk.STOCK_PREFERENCES:
self.__start_setup()
elif command == gtk.STOCK_ABOUT:
about_dialog = gtk.AboutDialog()
about_dialog.set_program_name("IBus")
about_dialog.set_version(ibus.get_version())
about_dialog.set_copyright(ibus.get_copyright())
about_dialog.set_license(ibus.get_license())
about_dialog.set_comments(_("IBus is an intelligent input bus for Linux/Unix."))
about_dialog.set_website("http://code.google.com/p/ibus")
about_dialog.set_authors(["Peng Huang <[email protected]>"])
about_dialog.set_documenters(["Peng Huang <[email protected]>"])
about_dialog.set_translator_credits(_("translator-credits"))
about_dialog.set_logo_icon_name("ibus")
about_dialog.set_icon_name("ibus")
about_dialog.run()
about_dialog.destroy()
elif command == gtk.STOCK_QUIT:
self.__bus.exit(False)
elif command == "Restart":
self.__bus.exit(True)
else:
print >> sys.stderr, "Unknown command %s" % command
def __child_watch_cb(self, pid, status):
if self.__setup_pid == pid:
self.__setup_pid.close()
self.__setup_pid = None
def __start_setup(self):
if self.__setup_pid != None:
try:
# if setup dialog is running, bring the dialog to front by SIGUSR1
os.kill(self.__setup_pid, signal.SIGUSR1)
return
except OSError:
# seems the setup dialog is not running anymore
self.__setup_pid.close()
self.__setup_pid = None
pid = glib.spawn_async(argv=[self.__setup_cmd, "ibus-setup"],
flags=glib.SPAWN_DO_NOT_REAP_CHILD)[0]
self.__setup_pid = pid
glib.child_watch_add(self.__setup_pid, self.__child_watch_cb)
| lgpl-2.1 | -4,664,742,584,949,670,000 | 36.726776 | 108 | 0.576815 | false |
Yelp/kafka-utils | tests/acceptance/steps/config_update.py | 1 | 2155 | # -*- coding: utf-8 -*-
# Copyright 2016 Yelp Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import absolute_import
import time
from behave import then
from behave import when
from kafka.errors import MessageSizeTooLargeError
from steps.util import get_cluster_config
from steps.util import produce_example_msg
from steps.util import update_topic_config
from kafka_utils.util.zookeeper import ZK
@when(u'we set the configuration of the topic to 0 bytes')
def step_impl1(context):
context.output = update_topic_config(
context.topic,
'max.message.bytes=0'
)
@then(u'we produce to a kafka topic it should fail')
def step_impl2(context):
try:
produce_example_msg(context.topic, num_messages=1)
assert False, "Exception should not be raised"
except MessageSizeTooLargeError as e:
assert isinstance(e, MessageSizeTooLargeError)
@when(u'we change the topic config in zk to 10000 bytes for kafka 10')
def step_impl3(context):
cluster_config = get_cluster_config()
with ZK(cluster_config) as zk:
current_config = zk.get_topic_config(context.topic)
current_config['config']['max.message.bytes'] = '1000'
zk.set_topic_config(context.topic, value=current_config)
time.sleep(2) # sleeping for 2 seconds to ensure config is actually picked up
@then(u'we produce to a kafka topic it should succeed')
def step_impl5(context):
try:
produce_example_msg(context.topic, num_messages=1)
except MessageSizeTooLargeError as e:
assert False, "Exception should not be raised"
assert isinstance(e, MessageSizeTooLargeError)
| apache-2.0 | -7,625,282,322,247,222,000 | 33.758065 | 82 | 0.72993 | false |
joannne/aggregator | config/wsgi.py | 1 | 1620 | """
WSGI config for aggregator project.
This module contains the WSGI application used by Django's development server
and any production WSGI deployments. It should expose a module-level variable
named ``application``. Django's ``runserver`` and ``runfcgi`` commands discover
this application via the ``WSGI_APPLICATION`` setting.
Usually you will have the standard Django WSGI application here, but it also
might make sense to replace the whole Django WSGI application with a custom one
that later delegates to the Django one. For example, you could introduce WSGI
middleware here, or combine a Django application with an application of another
framework.
"""
import os
from django.core.wsgi import get_wsgi_application
from whitenoise.django import DjangoWhiteNoise
# We defer to a DJANGO_SETTINGS_MODULE already in the environment. This breaks
# if running multiple sites in the same mod_wsgi process. To fix this, use
# mod_wsgi daemon mode with each site in its own daemon process, or use
# os.environ["DJANGO_SETTINGS_MODULE"] = "config.settings.production"
os.environ.setdefault("DJANGO_SETTINGS_MODULE", "config.settings.production")
# This application object is used by any WSGI server configured to use this
# file. This includes Django's development server, if the WSGI_APPLICATION
# setting points here.
application = get_wsgi_application()
# Use Whitenoise to serve static files
# See: https://whitenoise.readthedocs.org/
application = DjangoWhiteNoise(application)
# Apply WSGI middleware here.
# from helloworld.wsgi import HelloWorldApplication
# application = HelloWorldApplication(application)
| bsd-3-clause | -3,334,377,678,736,894,500 | 41.631579 | 79 | 0.797531 | false |
ellonweb/merlin | Hooks/user/__init__.py | 1 | 1360 | # This file is part of Merlin.
# Merlin is the Copyright (C)2008,2009,2010 of Robin K. Hansen, Elliot Rosemarine, Andreas Jacobsen.
# Individual portions may be copyright by individual contributors, and
# are included in this collective work with permission of the copyright
# owners.
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# List of package modules
__all__ = [
"adduser",
"galmate",
"edituser",
"getanewdaddy",
"remuser",
"whois",
"aids",
"pref",
"phone",
"quitter",
"quits",
"addchan",
"galchan",
"remchan",
"alias",
]
| gpl-2.0 | -6,343,790,764,005,444,000 | 33.871795 | 100 | 0.649265 | false |
eustislab/horton | tools/codecleaner.py | 1 | 1724 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# HORTON: Helpful Open-source Research TOol for N-fermion systems.
# Copyright (C) 2011-2015 The HORTON Development Team
#
# This file is part of HORTON.
#
# HORTON is free software; you can redistribute it and/or
# modify it under the terms of the GNU General Public License
# as published by the Free Software Foundation; either version 3
# of the License, or (at your option) any later version.
#
# HORTON is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, see <http://www.gnu.org/licenses/>
#
#--
'''Tool to remove whitespace and tab cruft from source code.'''
import sys
def clean_code(fn):
print 'Cleaning', fn
# read lines
with open(fn) as f:
lines = f.readlines()
# this will be set to true if something really changes. if not, the file
# is not rewritten.
changed = False
# line-by-line stripping of rubish
for i in xrange(len(lines)):
line = lines[i].replace('\t', ' ')
line = line.rstrip() + '\n'
changed |= (line != lines[i])
lines[i] = line
# remove empty lines from end of file
while lines[-1] == '\n':
changed = True
lines.pop(-1)
if changed:
# write lines
with open(fn, 'w') as f:
f.writelines(lines)
if __name__ == '__main__':
# just process all files given as command-line arguments
for fn in sys.argv[1:]:
clean_code(fn)
| gpl-3.0 | -1,398,137,947,129,084,000 | 28.220339 | 76 | 0.649652 | false |
tonybaloney/st2 | st2api/st2api/controllers/v1/aliasexecution.py | 1 | 7905 | # Licensed to the StackStorm, Inc ('StackStorm') under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import jsonschema
from jinja2.exceptions import UndefinedError
from oslo_config import cfg
import six
from st2api.controllers.base import BaseRestControllerMixin
from st2common import log as logging
from st2common.exceptions.db import StackStormDBObjectNotFoundError
from st2common.models.api.action import ActionAliasAPI
from st2common.models.api.auth import get_system_username
from st2common.models.api.execution import ActionExecutionAPI
from st2common.models.db.auth import UserDB
from st2common.models.db.liveaction import LiveActionDB
from st2common.models.db.notification import NotificationSchema, NotificationSubSchema
from st2common.models.utils import action_param_utils
from st2common.models.utils.action_alias_utils import extract_parameters_for_action_alias_db
from st2common.persistence.actionalias import ActionAlias
from st2common.services import action as action_service
from st2common.util import action_db as action_utils
from st2common.util import reference
from st2common.util.jinja import render_values as render
from st2common.rbac.types import PermissionType
from st2common.rbac.utils import assert_user_has_resource_db_permission
from st2common.router import abort
from st2common.router import Response
http_client = six.moves.http_client
LOG = logging.getLogger(__name__)
CAST_OVERRIDES = {
'array': (lambda cs_x: [v.strip() for v in cs_x.split(',')])
}
class ActionAliasExecutionController(BaseRestControllerMixin):
def post(self, payload, requester_user, show_secrets=False):
action_alias_name = payload.name if payload else None
if not action_alias_name:
abort(http_client.BAD_REQUEST, 'Alias execution "name" is required')
return
if not requester_user:
requester_user = UserDB(cfg.CONF.system_user.user)
format_str = payload.format or ''
command = payload.command or ''
try:
action_alias_db = ActionAlias.get_by_name(action_alias_name)
except ValueError:
action_alias_db = None
if not action_alias_db:
msg = 'Unable to identify action alias with name "%s".' % (action_alias_name)
abort(http_client.NOT_FOUND, msg)
return
if not action_alias_db.enabled:
msg = 'Action alias with name "%s" is disabled.' % (action_alias_name)
abort(http_client.BAD_REQUEST, msg)
return
execution_parameters = extract_parameters_for_action_alias_db(
action_alias_db=action_alias_db,
format_str=format_str,
param_stream=command)
notify = self._get_notify_field(payload)
context = {
'action_alias_ref': reference.get_ref_from_model(action_alias_db),
'api_user': payload.user,
'user': requester_user.name,
'source_channel': payload.source_channel
}
execution = self._schedule_execution(action_alias_db=action_alias_db,
params=execution_parameters,
notify=notify,
context=context,
show_secrets=show_secrets,
requester_user=requester_user)
result = {
'execution': execution,
'actionalias': ActionAliasAPI.from_model(action_alias_db)
}
if action_alias_db.ack:
try:
if 'format' in action_alias_db.ack:
result.update({
'message': render({'alias': action_alias_db.ack['format']}, result)['alias']
})
except UndefinedError as e:
result.update({
'message': 'Cannot render "format" in field "ack" for alias. ' + e.message
})
try:
if 'extra' in action_alias_db.ack:
result.update({
'extra': render(action_alias_db.ack['extra'], result)
})
except UndefinedError as e:
result.update({
'extra': 'Cannot render "extra" in field "ack" for alias. ' + e.message
})
return Response(json=result, status=http_client.CREATED)
def _tokenize_alias_execution(self, alias_execution):
tokens = alias_execution.strip().split(' ', 1)
return (tokens[0], tokens[1] if len(tokens) > 1 else None)
def _get_notify_field(self, payload):
on_complete = NotificationSubSchema()
route = (getattr(payload, 'notification_route', None) or
getattr(payload, 'notification_channel', None))
on_complete.routes = [route]
on_complete.data = {
'user': payload.user,
'source_channel': payload.source_channel
}
notify = NotificationSchema()
notify.on_complete = on_complete
return notify
def _schedule_execution(self, action_alias_db, params, notify, context, requester_user,
show_secrets):
action_ref = action_alias_db.action_ref
action_db = action_utils.get_action_by_ref(action_ref)
if not action_db:
raise StackStormDBObjectNotFoundError('Action with ref "%s" not found ' % (action_ref))
assert_user_has_resource_db_permission(user_db=requester_user, resource_db=action_db,
permission_type=PermissionType.ACTION_EXECUTE)
try:
# prior to shipping off the params cast them to the right type.
params = action_param_utils.cast_params(action_ref=action_alias_db.action_ref,
params=params,
cast_overrides=CAST_OVERRIDES)
if not context:
context = {
'action_alias_ref': reference.get_ref_from_model(action_alias_db),
'user': get_system_username()
}
liveaction = LiveActionDB(action=action_alias_db.action_ref, context=context,
parameters=params, notify=notify)
_, action_execution_db = action_service.request(liveaction)
mask_secrets = self._get_mask_secrets(requester_user, show_secrets=show_secrets)
return ActionExecutionAPI.from_model(action_execution_db, mask_secrets=mask_secrets)
except ValueError as e:
LOG.exception('Unable to execute action.')
abort(http_client.BAD_REQUEST, str(e))
except jsonschema.ValidationError as e:
LOG.exception('Unable to execute action. Parameter validation failed.')
abort(http_client.BAD_REQUEST, str(e))
except Exception as e:
LOG.exception('Unable to execute action. Unexpected error encountered.')
abort(http_client.INTERNAL_SERVER_ERROR, str(e))
action_alias_execution_controller = ActionAliasExecutionController()
| apache-2.0 | 2,460,295,195,834,012,700 | 42.196721 | 100 | 0.624415 | false |
vadzimt/python-nss | doc/examples/verify_cert.py | 1 | 11707 | from __future__ import absolute_import
from __future__ import print_function
import argparse
import sys
import nss.nss as nss
import nss.error as nss_error
'''
This example illustrates how one can use NSS to verify (validate) a
certificate. Certificate validation starts with an intended usage for
the certificate and returns a set of flags for which the certificate
is actually valid for. When a cert fails validation it can be
useful to obtain diagnostic information as to why. One of the
verification methods includes returning the diagnostic information in
what is called a log. A cert can also be checked to see if it
qualifies as a CA cert.
The actual code to verify the cert is simple and straight forward. The
complexity in this example derives mainly from handling all the
options necessary to make the example flexible.
* The certificate may either be read from a file or loaded by nickname
from a NSS database.
* You can optionally print the details the cert.
* You can specify a set of intened cert usages (each -u option adds an
other usage to the set).
* You can enable/disable checking the cert signature.
* You can enable/disable using the log variant.
* You can enable/disable verifying the cert's CA status.
* The results are pretty printed.
'''
#-------------------------------------------------------------------------------
cert_usage_map = {
'CheckAllUsages' : nss.certificateUsageCheckAllUsages,
'SSLClient' : nss.certificateUsageSSLClient,
'SSLServer' : nss.certificateUsageSSLServer,
'SSLServerWithStepUp' : nss.certificateUsageSSLServerWithStepUp,
'SSLCA' : nss.certificateUsageSSLCA,
'EmailSigner' : nss.certificateUsageEmailSigner,
'EmailRecipient' : nss.certificateUsageEmailRecipient,
'ObjectSigner' : nss.certificateUsageObjectSigner,
'UserCertImport' : nss.certificateUsageUserCertImport,
'VerifyCA' : nss.certificateUsageVerifyCA,
'ProtectedObjectSigner' : nss.certificateUsageProtectedObjectSigner,
'StatusResponder' : nss.certificateUsageStatusResponder,
'AnyCA' : nss.certificateUsageAnyCA,
}
#-------------------------------------------------------------------------------
def password_callback(slot, retry, password):
return options.db_passwd
def indented_output(msg, l, level=0):
msg = '%s:' % msg
lines = []
if not l:
l = ['--']
lines.extend(nss.make_line_fmt_tuples(level, msg))
lines.extend(nss.make_line_fmt_tuples(level+1, l))
return nss.indented_format(lines)
def indented_obj(msg, obj, level=0):
msg = '%s:' % msg
lines = []
lines.extend(nss.make_line_fmt_tuples(level, msg))
lines.extend(obj.format_lines(level+1))
return nss.indented_format(lines)
#-------------------------------------------------------------------------------
def main():
global options
parser = argparse.ArgumentParser(description='certificate validation example')
# === NSS Database Group ===
group = parser.add_argument_group('NSS Database',
'Specify & control the NSS Database')
group.add_argument('-d', '--db-name',
help='NSS database name (e.g. "sql:pki")')
group.add_argument('-P', '--db-passwd',
help='NSS database password')
# === Certificate Group ===
group = parser.add_argument_group('Certificate',
'Specify how the certificate is loaded')
group.add_argument('-f', '--file', dest='cert_filename',
help='read cert from file')
group.add_argument('-F', '--input-format', choices=['pem', 'der'],
help='format of input cert')
group.add_argument('-n', '--nickname', dest='cert_nickname',
help='load cert from NSS database by looking it up under this nickname')
# === Validation Group ===
group = parser.add_argument_group('Validation',
'Control the validation')
group.add_argument('-u', '--usage', dest='cert_usage', action='append', choices=list(cert_usage_map.keys()),
help='certificate usage flags, may be specified multiple times')
group.add_argument('-c', '--check-sig', action='store_true', dest='check_sig',
help='check signature')
group.add_argument('-C', '--no-check-sig', action='store_false', dest='check_sig',
help='do not check signature')
group.add_argument('-l', '--log', action='store_true', dest='with_log',
help='use verify log')
group.add_argument('-L', '--no-log', action='store_false', dest='with_log',
help='do not use verify log')
group.add_argument('-a', '--check-ca', action='store_true', dest='check_ca',
help='check if cert is CA')
group.add_argument('-A', '--no-check-ca', action='store_false', dest='check_ca',
help='do not check if cert is CA')
# === Miscellaneous Group ===
group = parser.add_argument_group('Miscellaneous',
'Miscellaneous options')
group.add_argument('-p', '--print-cert', action='store_true', dest='print_cert',
help='print the certificate in a friendly fashion')
parser.set_defaults(db_name = 'sql:pki',
db_passwd = 'db_passwd',
input_format = 'pem',
check_sig = True,
with_log = True,
check_ca = True,
print_cert = False,
)
options = parser.parse_args()
# Process the command line arguments
# Get usage bitmask
if options.cert_usage:
intended_usage = 0
for usage in options.cert_usage:
try:
flag = cert_usage_map[usage]
except KeyError:
print("Unknown usage '%s', valid values: %s" % (usage, ', '.join(sorted(cert_usage_map.keys()))))
return 1
else:
intended_usage |= flag
else:
# We can't use nss.certificateUsageCheckAllUsages here because
# it's a special value of zero instead of being the bitwise OR
# of all the certificateUsage* flags (go figure!)
intended_usage = 0
for usage in list(cert_usage_map.values()):
intended_usage |= usage
if options.cert_filename and options.cert_nickname:
print("You may not specify both a cert filename and a nickname, only one or the other", file=sys.stderr)
return 1
if not options.cert_filename and not options.cert_nickname:
print("You must specify either a cert filename or a nickname to load", file=sys.stderr)
return 1
# Initialize NSS.
print(indented_output('NSS Database', options.db_name))
print()
nss.nss_init(options.db_name)
certdb = nss.get_default_certdb()
nss.set_password_callback(password_callback)
# Load the cert
if options.cert_filename:
# Read the certificate as DER encoded data then initialize a Certificate from the DER data
filename = options.cert_filename
si = nss.read_der_from_file(filename, options.input_format.lower() == 'pem')
# Parse the DER encoded data returning a Certificate object
cert = nss.Certificate(si)
else:
try:
cert = nss.find_cert_from_nickname(options.cert_nickname)
except Exception as e:
print(e)
print('Unable to load cert nickname "%s" from database "%s"' % \
(options.cert_nickname, options.db_name), file=sys.stderr)
return 1
# Dump the cert if the user wants to see it
if options.print_cert:
print(cert)
else:
print(indented_output('cert subject', cert.subject))
print()
# Dump the usages attached to the cert
print(indented_output('cert has these usages', nss.cert_type_flags(cert.cert_type)))
# Should we check if the cert is a CA cert?
if options.check_ca:
# CA Cert?
is_ca, cert_type = cert.is_ca_cert(True)
print()
print(indented_output('is CA cert boolean', is_ca))
print(indented_output('is CA cert returned usages', nss.cert_type_flags(cert_type)))
print()
print(indented_output('verifying usages for', nss.cert_usage_flags(intended_usage)))
print()
# Use the log or non-log variant to verify the cert
#
# Note: Anytime a NSPR or NSS function returns an error in python-nss it
# raises a NSPRError exception. When an exception is raised the normal
# return values are discarded because the flow of control continues at
# the first except block prepared to catch the exception. Normally this
# is what is desired because the return values would be invalid due to
# the error. However the certificate verification functions are an
# exception (no pun intended). An error might be returned indicating the
# cert failed verification but you may still need access to the returned
# usage bitmask and the log (if using the log variant). To handle this a
# special error exception `CertVerifyError` (derived from `NSPRError`)
# is defined which in addition to the normal NSPRError fields will also
# contain the returned usages and optionally the CertVerifyLog
# object. If no exception is raised these are returned as normal return
# values.
approved_usage = 0
if options.with_log:
try:
approved_usage, log = cert.verify_with_log(certdb, options.check_sig, intended_usage, None)
except nss_error.CertVerifyError as e:
# approved_usage and log available in CertVerifyError exception on failure.
print(e)
print()
print(indented_obj('log', e.log))
print()
print(indented_output('approved usages from exception', nss.cert_usage_flags(e.usages)))
approved_usage = e.usages # Get the returned usage bitmask from the exception
except Exception as e:
print(e)
else:
print(indented_output('approved usages', nss.cert_usage_flags(approved_usage)))
if log.count:
print()
print(indented_obj('log', log))
else:
try:
approved_usage = cert.verify(certdb, options.check_sig, intended_usage, None)
except nss_error.CertVerifyError as e:
# approved_usage available in CertVerifyError exception on failure.
print(e)
print(indented_output('approved usages from exception', nss.cert_usage_flags(e.usages)))
approved_usage = e.usages # Get the returned usage bitmask from the exception
except Exception as e:
print(e)
else:
print(indented_output('approved usages', nss.cert_usage_flags(approved_usage)))
# The cert is valid if all the intended usages are in the approved usages
valid = (intended_usage & approved_usage) == intended_usage
print()
if valid:
print(indented_output('SUCCESS: cert is approved for', nss.cert_usage_flags(intended_usage)))
return 0
else:
print(indented_output('FAIL: cert not approved for', nss.cert_usage_flags(intended_usage ^ approved_usage)))
return 1
#-------------------------------------------------------------------------------
if __name__ == "__main__":
sys.exit(main())
| gpl-2.0 | 5,058,057,332,866,849,000 | 40.221831 | 116 | 0.608183 | false |
intel-analytics/analytics-zoo | pyzoo/zoo/common/utils.py | 1 | 5425 | #
# Copyright 2018 Analytics Zoo Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from bigdl.util.common import Sample as BSample, JTensor as BJTensor,\
JavaCreator, _get_gateway, _py2java, _java2py
import numpy as np
import os
import tempfile
import uuid
from urllib.parse import urlparse
def convert_to_safe_path(input_path, follow_symlinks=True):
# resolves symbolic links
if follow_symlinks:
return os.path.realpath(input_path)
# covert to abs path
return os.path.abspath(input_path)
def to_list_of_numpy(elements):
if isinstance(elements, np.ndarray):
return [elements]
elif np.isscalar(elements):
return [np.array(elements)]
elif not isinstance(elements, list):
raise ValueError("Wrong type: %s" % type(elements))
results = []
for element in elements:
if np.isscalar(element):
results.append(np.array(element))
elif isinstance(element, np.ndarray):
results.append(element)
else:
raise ValueError("Wrong type: %s" % type(element))
return results
def get_file_list(path, recursive=False):
return callZooFunc("float", "listPaths", path, recursive)
def is_local_path(path):
parse_result = urlparse(path)
return len(parse_result.scheme.lower()) == 0 or parse_result.scheme.lower() == "file"
def append_suffix(prefix, path):
# append suffix
splits = path.split(".")
if len(splits) > 0:
file_name = prefix + "." + splits[-1]
else:
file_name = prefix
return file_name
def save_file(save_func, path, **kwargs):
if is_local_path(path):
save_func(path, **kwargs)
else:
file_name = str(uuid.uuid1())
file_name = append_suffix(file_name, path)
temp_path = os.path.join(tempfile.gettempdir(), file_name)
try:
save_func(temp_path, **kwargs)
if "overwrite" in kwargs:
put_local_file_to_remote(temp_path, path, over_write=kwargs['overwrite'])
else:
put_local_file_to_remote(temp_path, path)
finally:
os.remove(temp_path)
def load_from_file(load_func, path):
if is_local_path(path):
return load_func(path)
else:
file_name = str(uuid.uuid1())
file_name = append_suffix(file_name, path)
temp_path = os.path.join(tempfile.gettempdir(), file_name)
get_remote_file_to_local(path, temp_path)
try:
return load_func(temp_path)
finally:
os.remove(temp_path)
def get_remote_file_to_local(remote_path, local_path, over_write=False):
callZooFunc("float", "getRemoteFileToLocal", remote_path, local_path, over_write)
def put_local_file_to_remote(local_path, remote_path, over_write=False):
callZooFunc("float", "putLocalFileToRemote", local_path, remote_path, over_write)
def set_core_number(num):
callZooFunc("float", "setCoreNumber", num)
def callZooFunc(bigdl_type, name, *args):
""" Call API in PythonBigDL """
gateway = _get_gateway()
args = [_py2java(gateway, a) for a in args]
error = Exception("Cannot find function: %s" % name)
for jinvoker in JavaCreator.instance(bigdl_type, gateway).value:
# hasattr(jinvoker, name) always return true here,
# so you need to invoke the method to check if it exist or not
try:
api = getattr(jinvoker, name)
java_result = api(*args)
result = _java2py(gateway, java_result)
except Exception as e:
error = e
if not ("does not exist" in str(e)
and "Method {}".format(name) in str(e)):
raise e
else:
return result
raise error
class JTensor(BJTensor):
def __init__(self, storage, shape, bigdl_type="float", indices=None):
super(JTensor, self).__init__(storage, shape, bigdl_type, indices)
@classmethod
def from_ndarray(cls, a_ndarray, bigdl_type="float"):
"""
Convert a ndarray to a DenseTensor which would be used in Java side.
"""
if a_ndarray is None:
return None
assert isinstance(a_ndarray, np.ndarray), \
"input should be a np.ndarray, not %s" % type(a_ndarray)
return cls(a_ndarray,
a_ndarray.shape,
bigdl_type)
class Sample(BSample):
def __init__(self, features, labels, bigdl_type="float"):
super(Sample, self).__init__(features, labels, bigdl_type)
@classmethod
def from_ndarray(cls, features, labels, bigdl_type="float"):
features = to_list_of_numpy(features)
labels = to_list_of_numpy(labels)
return cls(
features=[JTensor(feature, feature.shape) for feature in features],
labels=[JTensor(label, label.shape) for label in labels],
bigdl_type=bigdl_type)
| apache-2.0 | -3,218,170,877,710,068,000 | 30.540698 | 89 | 0.629493 | false |
projectmallard/pintail | pintail/site.py | 1 | 64668 | # pintail - Build static sites from collections of Mallard documents
# Copyright (c) 2015-2020 Shaun McCance <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 2 of the License, or
# (at your option) any later version.
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
import codecs
import configparser
import copy
import datetime
import glob
import importlib
import logging
import os
import shutil
import subprocess
import sys
from lxml import etree
MAL_NS = '{http://projectmallard.org/1.0/}'
CACHE_NS = '{http://projectmallard.org/cache/1.0/}'
SITE_NS = '{http://projectmallard.org/site/1.0/}'
XML_NS = '{http://www.w3.org/XML/1998/namespace}'
NS_MAP = {
'mal': 'http://projectmallard.org/1.0/',
'cache': 'http://projectmallard.org/cache/1.0/'
}
class DuplicatePageException(Exception):
def __init__(self, directory, message):
self.message = message
self.parser = directory
class Extendable:
"""
The base class for all plugins in Pintail.
"""
@classmethod
def iter_subclasses(cls, filter=None):
"""
Iterate over all subclasses, recursing to any depth.
This is a convenient way to iterate all plugins of a certain type.
The optional `filter` parameter lets you provide a name of a function.
A class will only be yielded if it defines that function explicitly,
rather than inherits it from a parent class.
"""
for cls in cls.__subclasses__():
if filter is None or filter in cls.__dict__:
yield cls
yield from cls.iter_subclasses(filter)
class ToolsProvider(Extendable):
"""
Extension point to provide extra tools to be used during the build.
This method allows extensions to create any tools or other files they need
during the build process. This is most frequently used to create XSLT files
that are either used directly in `build_html`, or are inserted as customizations
using `XslProvider.get_xsl`.
"""
@classmethod
def build_tools(cls, site):
"""
Build tools to be used during the build.
Extensions should override this method to create any tools they need
later in the build process.
"""
pass
class CssProvider(Extendable):
"""
Extension point to provide CSS files in the built site.
"""
@classmethod
def build_css(cls, site):
"""
Build CSS for the site.
Extensions should override tis method to create any CSS files that
are referenced from built HTML files.
"""
pass
class XslProvider(Extendable):
"""
Extension point to provide XSLT or XSLT params.
This extension point allows you to provide extra XSLT files with `get_xsl`,
as well as provide XSLT params with `get_xsl_params`. You can implement
only one or both as needed.
"""
@classmethod
def get_xsl(cls, site):
"""
Get a list of additional XSLT files to include.
Extensions should implement this method if they have additional XSLT files
that should get included into the main transformations. It is called by
`Site.get_custom_xsl` when generating XSLT to build files. If you need to
generate the XSLT files as well, implement `ToolsProvider`.
"""
return []
@classmethod
def get_xsl_params(cls, output, obj, lang=None):
"""
Get a list of XSLT params provided by an extension.
Implementations of this method are called by `get_all_xsl_params`.
Extensions should use this to provide additional XSLT params.
The return value is a list of tuples, where each tuple is a pair with
the param name and the param value. The param value is always a string.
The `output` parameter is a string specifying the output format. It is
usually `"html"`, but extensions could create other output formats.
The `obj` parameter is an object that a transform will be applied to.
It is usually an instance of a `Page` subclass, but it could be something
else. Always check `obj` before making assumptions.
"""
return []
@classmethod
def get_all_xsl_params(cls, output, obj, lang=None):
"""
Get all XSLT params for a transform target.
This method should not be overridden. It calls `get_xsl_params` on all
subclasses of `XslProvider`, and it adds various common params that are
used across all of Pintail. The return value is a list of tuples, where
each tuple is a pair with the param name and the param value. The param
value is always a string.
The `output` parameter is a string specifying the output format. It is
usually `"html"`, but extensions could create other output formats.
The `obj` parameter is an object that a transform will be applied to.
It is usually an instance of a `Page` subclass, but it could be something
else. Always check `obj` before making assumptions.
"""
ret = []
if output == 'html' and hasattr(obj, 'site'):
html_extension = obj.site.config.get('html_extension') or '.html'
if lang is None:
ret.append(('html.extension', '.html'))
else:
ret.append(('html.extension', '.html.' + lang))
link_extension = obj.site.config.get('link_extension') or html_extension
ret.append(('pintail.extension.link', link_extension))
if hasattr(obj, 'site'):
ret.append(('mal.cache.file', obj.site.get_cache_path(lang)))
if hasattr(obj, 'directory'):
ret.append(('pintail.site.root', obj.site.config.get_site_root(obj.directory.path)))
elif isinstance(obj, Directory):
ret.append(('pintail.site.root', obj.site.config.get_site_root(obj.path)))
else:
ret.append(('pintail.site.root', obj.site.config.get_site_root()))
if hasattr(obj, 'directory'):
ret.append(('pintail.site.dir', obj.directory.path))
if output == 'html':
ret.append(('html.output.prefix', obj.directory.get_target_path(lang)))
if hasattr(obj, 'source_file'):
ret.append(('pintail.source.file', obj.source_file))
now = datetime.datetime.now()
ret.append(('pintail.date', now.strftime('%Y-%m-%d')))
ret.append(('pintail.time', now.strftime('%T')))
for c in XslProvider.iter_subclasses('get_xsl_params'):
ret.extend(c.get_xsl_params(output, obj, lang))
return ret
@classmethod
def get_xsltproc_args(cls, output, obj, lang=None):
# Drop this function in the future if we decide to keep DocBook using
# lxml.etree.XSLT instead of calling xsltproc.
ret = []
for pair in cls.get_all_xsl_params(output, obj, lang=lang):
ret.extend(['--stringparam', pair[0], pair[1]])
return ret
class Page(Extendable):
"""
An individual page in a directory.
Each page belongs to one directory and comes from one source.
It is uniquely identified in the directory with the `page_id` parameter,
and it is uniquely identified in the site with the `site_id` parameter.
The page is the smallest addressable unit in a Pintail site.
There should be a `Page` object for each output page that you may
want to link to, translate, or have in the search index.
In some cases, a single source file creates multiple output pages.
In those cases, there should be a `Page` object for each output page,
even though all pages might be built in a single pass.
A `Page` object is responsible for building output, getting media files,
and extracting search data. It does this both for the original document
and for all translations.
"""
def __init__(self, source, filename):
self.source = source
self.directory = source.directory
self.site = source.site
self._source_file = filename
self._search_domains = None
@property
def page_id(self):
"""
The simple id of the page.
This usually comes from either an id attribute or a base filename,
and it usually serves as the base filename of the target file.
Two pages in the same directory cannot have the same id.
"""
return None
@property
def site_id(self):
"""
The fully qualified site id of the page.
The site id of a page is the path of the containing directory and the page id.
It must be unique across the entire site.
"""
return self.directory.path + self.page_id
@property
def site_path(self):
"""
The full absolute path to the file in the site.
This is suitable for linking.
It includes the directory path as well as the site root.
It also includes the link extension.
"""
root = self.site.config.get_site_root(self.directory.path)
ext = self.site.config.get('link_extension')
if ext is None:
ext = self.site.config.get('html_extension') or '.html'
return root + self.site_id[1:] + ext
@property
def source_file(self):
"""
The name of the source file for this page.
"""
return self._source_file
def get_source_path(self):
"""
The absolute path to the source file for this page.
"""
return os.path.join(self.source.get_source_path(), self.source_file)
@property
def stage_file(self):
"""
The name of the staged file for this page.
"""
return self.source_file
def get_stage_path(self, lang=None):
"""
The absolute path to the staged file for this page.
"""
return os.path.join(self.directory.get_stage_path(lang), self.stage_file)
@property
def target_file(self):
"""
The name of the target file for this page.
"""
return self.page_id + self.target_extension
def get_target_path(self, lang=None):
"""
The absolute path to the target file for this page.
This will often just be the directory's target path plus the target file name.
However, translation providers may modify the path in various ways.
"""
return self.site.get_page_target_path(self, lang)
@property
def target_extension(self):
"""
The file extension for output files.
"""
return self.site.config.get('html_extension') or '.html'
@property
def searchable(self):
"""
Whether the page should be added to the search index.
This is False by default for the base class,
but most page extensions should set this to True.
"""
return False
def get_cache_data(self, lang=None):
"""
Get XML data to add to the cache, as an lxml.etree.Element object.
For most page types, each page should provide information for the cache.
For formats that use a Mallard-like toolchain, this is usually a `page` element
containing only certain metadata and child elements.
For other formats, a `pintail:external` element can be used instead.
For information on Mallard cache files, see http://projectmallard.org/cache/1.1/
"""
return None
def get_media(self):
"""
Get a list of referenced media files.
Pages can return a list of images, videos, and other referenced media
so that it can be copied into the built site automatically. The return
value is a list of strings, where each string is a relative path. Media
files should exist in either the page's source or in the stage.
"""
return []
def get_title(self, hint=None, lang=None):
"""
Get the title of the page.
If the `lang` parameter is not `None`, get a translated title.
Otherwise, get the title in the source language.
The `hint` parameter is a string indicating where this title will be used.
For example, the `"search"` hint is used when the title is used in a search index.
"""
return ''
def get_desc(self, hint=None, lang=None):
"""
Get the desc of the page.
If the `lang` parameter is not `None`, get a translated desc.
Otherwise, get the desc in the source language.
The `hint` parameter is a string indicating where this desc will be used.
For example, the `"search"` hint is used when the desc is used in a search index.
"""
return ''
def get_keywords(self, hint=None, lang=None):
"""
Get the keywords of the page.
The return value should be a comma-separated list of keywords.
If the `lang` parameter is not `None`, get translated keywords.
Otherwise, get the keywords in the source language.
The `hint` parameter is a string indicating where these keywords will be used.
For example, the `"search"` hint is used when the keywords is used in a search index.
"""
return ''
def get_content(self, hint=None, lang=None):
"""
Get the full content of the page.
This is not expected to be formatted in a way that is pleasant to read.
It is mostly used for full-text search.
If the `lang` parameter is not `None`, get translated content.
Otherwise, get the content in the source language.
The `hint` parameter is a string indicating where this content will be used.
For example, the `"search"` hint is used when the content is used in a search index.
"""
return ''
def build_html(self, lang=None):
"""
Build the HTML file for this page, possibly translated.
Extensions should override this method to create the HTML output
from source or stage files. If the `lang` parameter is not `None`,
HTML should be built from the appropriate translated file.
"""
return
def get_search_domains(self):
"""
Get a list of search domains for the page.
Search domains allow you to restrict where search results come from.
Each page has its data added to each search domain in its list.
When a user starts a search from a page, it defaults to searching
in the page's first domain.
See the docstring on `Directory.get_search_domains` for more information
on how search domains work.
This method looks at the search domains returned by calling
`get_search_domains` on the containing `Directory` object.
It includes any domains in that list. For any page-domain mapping,
it includes just the domain, and only if the page ID matches.
The return value of this method is a list of strings only.
"""
if self._search_domains is not None:
return self._search_domains
dms = self.directory.get_search_domains()
if dms[0] == 'none':
return ['none']
ret = []
for dm in dms:
if isinstance(dm, list):
if dm[0] == self.page_id:
if dm[1] == 'none':
return ['none']
else:
ret.append(dm[1])
else:
ret.append(dm)
return ret
@classmethod
def create_pages(cls, source):
"""
Create a list of `Page` objects for each page in a source.
This method should be overridden by extensions.
If this page extension recognizes any files in the source directory,
or can otherwise create pages for the directory, then it should return
a list of `Page` objects, one for each page it can provide. Note that
some formats might create multiple output pages from a single source
document. In these cases, one `Page` objecct should be created for
each output page, even if it shares a source file with other pages.
"""
return []
class Directory(Extendable):
"""
A directory in the built output.
Each directory contains one or more sources as `Source` objects.
For many simple sites, each directory will have one source.
However, Pintail can merge files from multiple sources into a directory.
Each directory also has a list of subdirectories and a list of pages.
The path of a directory represents where it goes in the build output,
as well as the portion of the URL after the site root.
We always start and end paths with a slash in Pintail.
The path also serves as the config key.
For simple sources, it's also where pages can be found in the source.
"""
def __init__(self, site, path, *, parent=None):
self.site = site
self.path = path
self.parent = parent
self.pages = []
self.subdirs = []
self.sources = []
self._search_domains = None
self.scan_directory()
@property
def translation_provider(self):
"""
Get the translation provider for the directory.
Currently, this is just the translation provider for the entire site.
To allow per-directory translation providers in the future, any code
using translation providers should use this directory property
whenever possible.
"""
return self.site.translation_provider
def get_stage_path(self, lang=None):
"""
The absolute path to the directory for staged files in this directory.
"""
return os.path.join(self.site.get_stage_path(lang), self.path[1:])
def get_target_path(self, lang=None):
"""
The absolute path to the target directory.
This will often just be the site's target path plus the directory path.
However, translation providers may modify the path in various ways.
"""
return self.site.get_directory_target_path(self, lang)
def scan_directory(self):
"""
Scan the directory for sources, subdirectories, and pages.
This method is responsible for locating all sources for the directory,
checking those sources for subdirectories, asking all `Page` implementations
to provide pages for each source, and recursing into subdirectories.
It is called automatically by __init__.
"""
# If we've scanned and found sources before, just exit
if len(self.sources) != 0:
return
# If the path corresponds to an actual on-disk directory,
# make a plain old source from that.
if os.path.isdir(os.path.join(self.site.srcdir, self.path[1:])):
self.sources.append(Source(self, self.path))
# Give each Source extension a chance to provide sources
# for this directory with this path.
for cls in Source.iter_subclasses('create_sources'):
self.sources.extend(cls.create_sources(self, self.path))
# Finally, if there are additional sources listed in the config,
# give each Source extension a chance to provide sources for
# each of those sources.
for source in (self.site.config.get('sources', self.path) or '').split():
for cls in Source.iter_subclasses('create_sources'):
self.sources.extend(cls.create_sources(self, source))
# Now that we have our sources, look for subdirectories of this
# directory, using all sources.
for source in self.sources:
try:
for name in os.listdir(source.get_source_path()):
if os.path.isdir(os.path.join(source.get_source_path(), name)):
subpath = self.path + name + '/'
if self.site.get_ignore_directory(subpath):
continue
self.subdirs.append(Directory(self.site, subpath, parent=self))
except:
self.site.fail('Failed to list files in ' + source.get_source_path())
# Finally, ask each Page extension to provide a list of pages for each source
by_page_id = {}
for source in self.sources:
for cls in Page.iter_subclasses('create_pages'):
for page in cls.create_pages(source):
if page.page_id in by_page_id:
raise DuplicatePageException(self,
'Duplicate page id ' + page.page_id)
by_page_id[page.page_id] = page
self.pages.append(page)
source.pages.append(page)
def iter_directories(self):
"""
Iterate over this directory and all subdirectories at any depth.
"""
yield self
for subdir in self.subdirs:
yield from subdir.iter_directories()
def iter_pages(self):
"""
Iterate over all pages in this directory and all subdirectories at any depth.
"""
for page in self.pages:
yield page
for subdir in self.subdirs:
yield from subdir.iter_pages()
def get_search_domains(self):
"""
Get a list of search domains for the directory.
Search domains allow you to restrict where search results come from.
Each page has its data added to each search domain in its list.
When a user starts a search from a page, it defaults to searching
in the page's first domain.
This method looks at the `search_domains` config option and returns
a list of search domains or page mappings for search domains.
Each component in the space-separated list could be a search domain,
a keyword for a search domain, or a mapping from a page ID to a domain.
Search domains look like directory paths. They always start with a slash.
For many directories, the search domain should just be that directory.
There's even a special keyword for that, `self`. There are four keywords:
* `self` - The current directory path.
* `parent` - The primary search domain of the parent directory.
* `global` - The top directory path, `/`.
* `none` - No search domain. Pages will not be indexed.
Components in the domain list can also be page mappings.
These are of the form `page_id:search_domain`. In these cases,
the value in the return list will be a list with the page ID and the domain.
The `get_search_domains` method on `Page` will only include the domains
that apply to that page.
"""
if self._search_domains is not None:
return self._search_domains
domains = self.site.config.get('search_domain', self.path)
if domains is None:
domains = 'parent'
domains = domains.split()
def _resolve(domain):
if domain.startswith('/'):
return domain
elif domain == 'self':
return self.path
elif domain == 'global':
return '/'
elif domain == 'none':
return 'none'
elif self.parent is None:
return '/'
else:
return self.parent.get_search_domains()[0]
for i in range(len(domains)):
if ':' in domains[i]:
domains[i] = domains[i].split(':', 1)
domains[i][1] = _resolve(domains[i][1])
else:
domains[i] = _resolve(domains[i])
if isinstance(domains[0], list):
domains.prepend(self.parent.get_search_domains[0])
self._search_domains = domains
return self._search_domains
def _maketargetdirs(self):
Site._makedirs(self.get_target_path())
if self.translation_provider is not None:
for lc in self.translation_provider.get_directory_langs(self):
Site._makedirs(self.get_target_path(lc))
def build_html(self):
"""
Build HTML files for pages in this directory and subdirectories.
This method calls `build_html` on each subdirectory and each page it contains.
It also queries the translation provider for translations,
and calls `build_html` on each page with those languages.
"""
for subdir in self.subdirs:
subdir.build_html()
if not self.site.get_filter(self):
return
self._maketargetdirs()
for page in self.pages:
if not self.site.get_filter(page):
continue
page.build_html()
if self.translation_provider is not None:
for lc in self.translation_provider.get_directory_langs(self):
page.build_html(lc)
def build_media(self):
"""
Copy media files into the build directory.
Each page is expected to be able to provide a list of media files it references.
Media files could be images or videos, but they could also be any additional files.
This method looks at all pages in the directory for media files,
then attempts to copy each of those files into the target directory.
It looks in both the source trees and the stage,
so built media files in the stage will be handled here.
This method also recurses into subdirectories.
"""
for subdir in self.subdirs:
subdir.build_media()
if not self.site.get_filter(self):
return
self._maketargetdirs()
media = {}
for page in self.pages:
if not self.site.get_filter(page):
continue
# If two pages from different sources provide the file,
# right now it's completely random which one will win.
for filename in page.get_media():
media[filename] = page.source
for fname in media:
source = media[fname]
langs = [None]
if self.translation_provider is not None:
langs += self.translation_provider.get_directory_langs(self)
for lc in langs:
if lc is not None:
tr = self.translation_provider.translate_media(source, fname, lc)
if not tr:
continue
if fname.startswith('/'):
# These have to be managed with extra_files for now
continue
mediasrc = os.path.join(self.get_stage_path(lc), fname)
self.site.log('MEDIA', lc + ' ' + self.path + fname)
else:
if fname.startswith('/'):
mediasrc = os.path.join(self.site.topdir, fname[1:])
else:
# The file might be generated, in which case it's in the
# stage directory. But we don't stage static media files,
# so those are just in the source directory.
mediasrc = os.path.join(self.get_stage_path(), fname)
if not os.path.exists(mediasrc):
mediasrc = os.path.join(source.get_source_path(), fname)
self.site.log('MEDIA', self.path + fname)
target = self.site.get_media_target_path(self, fname, lc)
Site._makedirs(os.path.dirname(target))
try:
shutil.copyfile(mediasrc, target)
except:
self.site.logger.warn('Could not copy file %s' % fname)
def build_files(self):
"""
Copy extra files into the build directory.
This method looks at the `extra_files` config option for
additional files that can't be found automatically.
It treats `extra_files` as a space-separated list of globs.
Each glob is checked against each source in the directory.
This method also recurses into subdirectories.
"""
for subdir in self.subdirs:
subdir.build_files()
if not self.site.get_filter(self):
return
Site._makedirs(self.get_stage_path())
globs = self.site.config.get('extra_files', self.path)
if globs is not None:
for glb in globs.split():
for source in self.sources:
# This won't do what it should if the path has anything
# glob-like in it. Would be nice if glob() could take
# a base path that isn't glob-interpreted.
files = glob.glob(os.path.join(source.get_source_path(), glb))
for fname in files:
self.site.log('FILE', self.path + os.path.basename(fname))
shutil.copyfile(fname,
os.path.join(self.get_target_path(),
os.path.basename(fname)))
def build_feeds(self):
"""
Build Atom feeds for this directory.
If the directory lists a file name in the `feed_atom` config option,
then this method creates an Atom feed from the pages in the directory.
This method also recurses into subdirectories.
"""
for subdir in self.subdirs:
subdir.build_feeds()
if not self.site.get_filter(self):
return
atomfile = self.site.config.get('feed_atom', self.path)
if atomfile is not None:
self.site.log('ATOM', self.path + atomfile)
Site._makedirs(self.site.tools_path)
for xsltfile in ('pintail-html.xsl', 'pintail-atom.xsl'):
xsltpath = os.path.join(self.site.tools_path, xsltfile)
if not os.path.exists(xsltpath):
from pkg_resources import resource_string
xsltcont = resource_string(__name__, xsltfile)
fd = open(xsltpath, 'w')
fd.write(codecs.decode(xsltcont, 'utf-8'))
fd.close()
mal2xhtml = os.path.join(self.site.yelp_xsl_path,
'xslt', 'mallard', 'html', 'mal2xhtml.xsl')
atomxsl = os.path.join(self.site.tools_path, 'pintail-atom-local.xsl')
fd = open(atomxsl, 'w')
fd.write('<xsl:stylesheet' +
' xmlns:xsl="http://www.w3.org/1999/XSL/Transform"' +
' version="1.0">\n')
fd.write('<xsl:import href="' + mal2xhtml + '"/>\n')
fd.write('<xsl:import href="pintail-atom.xsl"/>\n')
html_extension = self.site.config.get('html_extension') or '.html'
fd.write('<xsl:param name="html.extension" select="' +
"'" + html_extension + "'" + '"/>\n')
link_extension = self.site.config.get('link_extension')
if link_extension is not None:
fd.write('<xsl:param name="mal.link.extension" select="' +
"'" + link_extension + "'" + '"/>\n')
fd.write('<xsl:param name="pintail.extension.link" select="' +
"'" + link_extension + "'" + '"/>\n')
for xsl in self.site.get_custom_xsl():
fd.write('<xsl:include href="%s"/>\n' % xsl)
fd.write('</xsl:stylesheet>')
fd.close()
root = self.site.config.get('feed_root', self.path)
if root is None:
root = self.site.config.get_site_root(self.path)
subprocess.call(['xsltproc',
'-o', os.path.join(self.get_target_path(), atomfile),
'--stringparam', 'pintail.site.dir', self.path,
'--stringparam', 'pintail.site.root', root,
'--stringparam', 'feed.exclude_styles',
self.site.config.get('feed_exclude_styles', self.path) or '',
atomxsl, self.site.get_cache_path()])
class Source(Extendable):
"""
A directory in the source.
A source represents a source of pages or other files.
It could be in the actual source tree, in another repository, or entirely virtual.
Each source belongs to exactly one `Directory` object for the output directory.
Although it's possible that some on-disk location provides pages for multiple output directories,
in that case there would be multiple `Source` objects for that location.
The name of a source is the config key group that defines it.
For simple sources, this will be the same as the path of the directory it belongs to.
For other sources, it could be an identifier that doesn't look like a path.
"""
def __init__(self, directory, name):
self.directory = directory
self.name = name
self.pages = []
self.site = self.directory.site
def get_source_path(self):
"""
The absolute path to the source directory for this source.
"""
return os.path.join(self.site.topdir, self.directory.path[1:])
@classmethod
def create_sources(cls, directory, name):
"""
Return a list of source objects for a directory and source name.
This method should be overridden by extensions.
If this source extension recognizes something special in the directory,
or in the config keys under the group specified by the name parameter,
then it should return a (probably singleton) list of sources.
"""
return []
class Site:
"""
Base class for an entire Pintail site.
"""
def __init__(self, configfile,
local=False,
search=True,
translation=True,
update=True,
verbose=False):
self._configfile = configfile
self.topdir = os.path.dirname(configfile)
self.srcdir = self.topdir
self.pindir = os.path.join(self.topdir, '__pintail__')
self.target_path = os.path.join(self.pindir, 'build')
self.tools_path = os.path.join(self.pindir, 'tools')
self.logger = logging.getLogger('pintail')
self.logger.addHandler(logging.StreamHandler())
if verbose:
self.logger.setLevel(logging.INFO)
self._verbose = verbose
self._local = local
self._update = update
self._search = search
self._translation = translation
self._command = None
self._filter = []
self.config = None # set by prep_site
self.root = None # set by scan_site
@classmethod
def init_site(cls, directory):
"""
Initialize a new site with `pintail init`.
This is the method called by `pintail init` to create a new site.
FIXME: Want to contribute with some low-hanging fruit?
Make this ship sample XSLT and CSS files, and put `custom_xsl` and
`custom_css` options in the sample `pintail.cfg`.
"""
cfgfile = os.path.join(directory, 'pintail.cfg')
if os.path.exists(cfgfile):
sys.stderr.write('pintail.cfg file already exists\n')
sys.exit(1)
from pkg_resources import resource_string
sample = resource_string(__name__, 'sample.cfg')
fd = open(cfgfile, 'w')
fd.write(codecs.decode(sample, 'utf-8'))
fd.close()
def get_script_env(self, config=None):
"""
Get environment variables to pass to external scripts.
Pintail can call external scripts with the `before_script`, `config_script`,
and `after_script` config options. When it calls these scripts, it passes
some environment variables with information on the command being run. This
method returns these environment variables as a dict.
This method accepts `config` as an optional keyword argument, because it
may be called during the initialization of a `Config` object, before that
object has been assigned to the `config` property of the `Site` object.
Note that some environment variables may be affected by options set in the
config file, such as `PINTAIL_SITE_ROOT`. If these options are set in the
config data output by `config_script`, the value will be different when
passed to `after_script` than it was for `before_script` and `config_script`.
This method may return the following environment variables:
`PINTAIL_ARGS`
The positional arguments passed to `pintail` after the command. This
does not include options like `--local` or `-v`.
`PINTAIL_COMMAND`:
The command passed to `pintail`, such as `build` or `css`.
`PINTAIL_LOCAL`:
Whether Pintail is performing a local build with the `--local` option.
`PINTAIL_NO_SEARCH`:
Whether Pintail is skipping search with the `--no-search` option.
`PINTAIL_NO_TRANSLATION`:
Whether Pintail is skipping translation with the `--no-translation` option.
`PINTAIL_NO_UPDATE`:
Whether Pintail is skipping git updates with the `--no-update` option.
`PINTAIL_OUTPUT`:
The root directory where files will be output.
`PINTAIL_SITE_ROOT`:
The root URL for the site. By default, this is just `/`, but it can be set
with the `site_root` option in the config file. Note that local builds change
the site root internally for each directory, but this variable is always the
same global value and is not affected by `--local`.
`PINTAIL_VERBOSE`:
Whether Pintail is printing progress with the `--verbose` option.
"""
if config is None:
config = self.config
env = {}
env['PINTAIL_ARGS'] = ' '.join(self._filter)
env['PINTAIL_COMMAND'] = self._command or ''
if self._local:
env['PINTAIL_LOCAL'] = '1'
if not self._search:
env['PINTAIL_NO_SEARCH'] = '1'
if not self._translation:
env['PINTAIL_NO_TRANSLATION'] = '1'
if not self._update:
env['PINTAIL_NO_UPDATE'] = '1'
env['PINTAIL_OUTPUT'] = self.target_path
env['PINTAIL_SITE_ROOT'] = config.get_site_root()
if self._verbose:
env['PINTAIL_VERBOSE'] = '1'
return env
def set_filter(self, dirs):
"""
Set a filter for which pages will be built.
This can be passed to `pintail build` on the command line to build a partial site.
If the filter ends with a slash, it is a directory. Otherwise, it is a page.
"""
self._filter = []
if dirs is None:
return
for fdir in dirs:
if not(fdir.startswith('/')):
fdir = '/' + fdir
self._filter.append(fdir)
def get_filter(self, obj):
"""
Get whether or not an object meets the filter.
The object `obj` could be a page or a directory.
"""
if len(self._filter) == 0:
return True
if isinstance(obj, Directory):
for f in self._filter:
if f.endswith('/'):
if obj.path.startswith(f):
return True
else:
if f.startswith(obj.path):
return True
elif isinstance(obj, Page):
for f in self._filter:
if f.endswith('/'):
if obj.site_id.startswith(f):
return True
else:
if obj.site_id == f:
return True
return False
def get_custom_xsl(self):
"""
Get all custom XSLT files.
This returns a list of custom XSLT files that should be included in any
top-level XSLT files. It includes any files specified in the `custom_xsl`
config option, as well as any files provided by any loaded `XslProvider`.
"""
ret = []
custom_xsl = self.config.get('custom_xsl') or ''
for x in custom_xsl.split():
ret.append(os.path.join(self.topdir, x))
for cls in XslProvider.iter_subclasses('get_xsl'):
ret.extend(cls.get_xsl(self))
return ret
def get_langs(self):
"""
Get all languages used throughout the site.
If there is a translation provider, this method calls `get_site_langs`
on that provider. Otherwise, it returns an empty list.
"""
if self.translation_provider is not None:
return self.translation_provider.get_site_langs()
return []
def get_source_lang(self):
"""
Get the language code for the original source language of the site.
If there is a translation provider, this method calls `get_source_lang`
on that provider. Otherwise, it returns `en` as a default.
"""
if self.translation_provider is not None:
return self.translation_provider.get_source_lang()
return 'en'
def get_stage_path(self, lang=None):
"""
The absolute path to the directory for staged files for this site.
"""
if lang is not None:
return os.path.join(self.pindir, 'stage-' + lang)
else:
return os.path.join(self.pindir, 'stage')
def get_cache_path(self, lang=None):
"""
The absolute path to the Mallard cache file for the site in the language.
"""
if lang is not None:
return os.path.join(self.tools_path, 'pintail-' + lang + '.cache')
else:
return os.path.join(self.tools_path, 'pintail.cache')
def get_directory_target_path(self, directory, lang=None):
"""
The absolute path to where the built files for a directory should go.
"""
return os.path.join(self.target_path, directory.path[1:])
def get_page_target_path(self, page, lang=None):
"""
The absolute path to where the built file for a page should go.
"""
dirpath = self.get_directory_target_path(page.directory)
if lang is None:
return os.path.join(dirpath, page.target_file)
else:
return os.path.join(dirpath, page.target_file + '.' + lang)
def get_media_target_path(self, directory, mediafile, lang=None):
"""
The absolute path to where a media file should go in the built directory.
"""
if lang is not None:
langext = '.' + lang
else:
langext = ''
if mediafile.startswith('/'):
return os.path.join(self.target_path, mediafile[1:] + langext)
else:
return os.path.join(directory.get_target_path(), mediafile + langext)
def translate_page(self, page, lang):
"""
Translate a page into a language and return whether it was translated.
If there is no translation provider, this method just returns `False`.
Otherwise, it first checks to see if the translated file already exists,
and if it doesn't, it calls `translate_page` on the translation provider.
"""
if self.translation_provider is not None:
if not self.get_filter(page):
if os.path.exists(page.get_stage_path(lang)):
return True
return page.directory.translation_provider.translate_page(page, lang)
return False
def prep_site(self):
"""
Prepare the site and configuration data.
This method reads the configuration data and sets up transform data and
plugins, as necessary. It needs to be called separately from object
initialization so that `Config` initialization has access to various
bits of data that are not set on `Site` initialization.
This method is not called automatically. Ensure you call it before any build methods.
It is safe to call this method multiple times.
"""
if self.config is not None:
return
self.config = Config(self, self._configfile)
self.yelp_xsl_branch = self.config.get('yelp_xsl_branch') or 'master'
self.yelp_xsl_dir = 'yelp-xsl@' + self.yelp_xsl_branch.replace('/', '@')
self.yelp_xsl_path = os.path.join(self.tools_path, self.yelp_xsl_dir)
for plugin in (self.config.get('plugins') or '').split():
importlib.import_module(plugin)
self.search_provider = None
search = self.config.get('search_provider')
if search is not None:
dot = search.rindex('.')
searchmod = importlib.import_module(search[:dot])
searchcls = getattr(searchmod, search[dot+1:])
self.search_provider = searchcls(self)
self.translation_provider = None
if self._translation:
trans = self.config.get('translation_provider')
if trans is not None:
dot = trans.rindex('.')
transmod = importlib.import_module(trans[:dot])
transcls = getattr(transmod, trans[dot+1:])
self.translation_provider = transcls(self)
def scan_site(self):
"""
Scan the entire site for directories, sources, and pages.
This method is responsible for finding all directories, sources, and pages
throughout the entire site. Most of the work is done by `Directory.scan_directory`.
This method starts by creating a root directory, which is able to find subdirectories.
It then looks at special directories defined in the config file, and creates
directories and parents as necessary for those.
This method is not called automatically. Ensure you call it before any build methods.
It is safe to call this method multiple times.
"""
if self.root is not None:
return
self.log('SCAN', '/')
if os.path.exists(self.get_stage_path()):
shutil.rmtree(self.get_stage_path())
self.root = Directory(self, '/')
directories = {'/': self.root}
for directory in self.root.iter_directories():
directories[directory.path] = directory
configdirs = set([d for d in self.config._config.sections()
if d.startswith('/') and d.endswith('/')] +
[d for d in self.config._configscr.sections()
if d.startswith('/') and d.endswith('/')] )
for path in configdirs:
if path not in directories:
parent = directories['/']
curpath = '/'
for curpart in path[1:-1].split('/'):
curpath = curpath + curpart + '/'
if curpath in directories:
parent = directories[curpath]
else:
curdir = Directory(self, curpath, parent=parent)
parent.subdirs.append(curdir)
directories[curpath] = curdir
parent = curdir
def build(self, command='build'):
"""
Call the appropriate build methods, based on the `build` parameter.
"""
self._command = command
if command == 'build':
self.build_all()
elif command == 'css':
self.build_cache()
self.build_css()
elif command == 'js':
self.build_cache()
self.build_js()
elif command == 'files':
self.build_files()
elif command == 'feeds':
self.build_cache()
self.build_feeds()
script = self.config.get('after_script')
if script is not None:
self.log('SCRIPT', script)
ret = subprocess.call([os.path.join(self.topdir, script)],
env=self.get_script_env())
if ret != 0:
sys.stderr.write('after_script failed\n')
sys.exit(1)
def build_all(self, command='build'):
"""
Build the entire site, including all pages and additional files.
"""
self.prep_site()
self.scan_site()
self.build_cache()
self.build_tools()
self.build_html()
self.build_media()
self.build_files()
self.build_feeds()
self.build_search()
if len(self._filter) == 0:
self.build_css()
self.build_js()
def build_cache(self):
"""
Build the Mallard cache files for this site.
"""
self.prep_site()
self.scan_site()
self.log('CACHE', self.get_cache_path())
cache = etree.Element(CACHE_NS + 'cache', nsmap={
None: 'http://projectmallard.org/1.0/',
'cache': 'http://projectmallard.org/cache/1.0/',
'site': 'http://projectmallard.org/site/1.0/',
'pintail': 'http://pintail.io/'
})
for page in self.root.iter_pages():
cdata = page.get_cache_data()
if cdata is not None:
cache.append(cdata)
Site._makedirs(self.tools_path)
cache.getroottree().write(self.get_cache_path(),
pretty_print=True)
for lang in self.get_langs():
self.log('CACHE', self.get_cache_path(lang))
cache = etree.Element(CACHE_NS + 'cache', nsmap={
None: 'http://projectmallard.org/1.0/',
'cache': 'http://projectmallard.org/cache/1.0/',
'site': 'http://projectmallard.org/site/1.0/',
'pintail': 'http://pintail.io/'
})
for page in self.root.iter_pages():
cdata = page.get_cache_data(lang)
if cdata is not None:
cache.append(cdata)
cache.getroottree().write(self.get_cache_path(lang),
pretty_print=True)
def build_tools(self):
"""
Build all the tools necessary to build the site.
This method grabs and builds the latest version of yelp-xsl,
then copies its customizations into `pintail-html.xsl`,
and finally calls `get_tools` on each `ToolsProvider`.
"""
self.prep_site()
Site._makedirs(self.tools_path)
if os.path.exists(self.yelp_xsl_path):
if self._update:
self.log('UPDATE', 'https://gitlab.gnome.org/GNOME/yelp-xsl@' + self.yelp_xsl_branch)
p = subprocess.Popen(['git', 'pull', '-q', '-r', 'origin', self.yelp_xsl_branch],
cwd=os.path.join(self.tools_path,
'yelp-xsl@' + self.yelp_xsl_branch))
p.communicate()
else:
self.log('CLONE', 'https://gitlab.gnome.org/GNOME/yelp-xsl@' + self.yelp_xsl_branch)
p = subprocess.Popen(['git', 'clone', '-q',
'-b', self.yelp_xsl_branch, '--single-branch',
'https://gitlab.gnome.org/GNOME/yelp-xsl.git',
self.yelp_xsl_dir],
cwd=self.tools_path)
p.communicate()
self.log('BUILD', 'https://gitlab.gnome.org/GNOME/yelp-xsl@' + self.yelp_xsl_branch)
if os.path.exists(os.path.join(self.yelp_xsl_path, 'localbuild.sh')):
p = subprocess.Popen([os.path.join(self.yelp_xsl_path, 'localbuild.sh')],
cwd=self.yelp_xsl_path,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
p.communicate()
else:
p = subprocess.Popen([os.path.join(self.yelp_xsl_path, 'autogen.sh')],
cwd=self.yelp_xsl_path,
stdout=subprocess.DEVNULL,
stderr=subprocess.DEVNULL)
p.communicate()
p = subprocess.Popen(['make'], cwd=self.yelp_xsl_path, stdout=subprocess.DEVNULL)
p.communicate()
from pkg_resources import resource_string
site2html = resource_string(__name__, 'pintail-html.xsl')
fd = open(os.path.join(self.tools_path, 'pintail-html.xsl'),
'w', encoding='utf-8')
fd.write(codecs.decode(site2html, 'utf-8'))
fd.close()
for cls in ToolsProvider.iter_subclasses('build_tools'):
cls.build_tools(self)
def build_html(self):
"""
Build all HTML files for this site.
"""
self.prep_site()
self.scan_site()
self.root.build_html()
def build_media(self):
"""
Copy media files for the entire site.
"""
self.prep_site()
self.scan_site()
self.root.build_media()
def build_css(self):
"""
Build all of the CSS for the site.
This function iterates over all `CssProvider` subclasses and asks them to build CSS.
"""
self.prep_site()
self.scan_site()
for cls in CssProvider.iter_subclasses('build_css'):
cls.build_css(self)
def build_js(self):
"""
Build all JavaScript files for the site.
"""
self.prep_site()
self.scan_site()
jspath = os.path.join(self.yelp_xsl_path, 'js')
if os.path.exists(os.path.join(jspath, 'jquery.js')):
self.log('JS', '/jquery.js')
shutil.copyfile(os.path.join(jspath, 'jquery.js'),
os.path.join(self.target_path, 'jquery.js'))
xslpath = os.path.join(self.yelp_xsl_path, 'xslt')
Site._makedirs(self.tools_path)
jsxsl = os.path.join(self.tools_path, 'pintail-js.xsl')
fd = open(jsxsl, 'w')
fd.writelines([
'<xsl:stylesheet',
' xmlns:xsl="http://www.w3.org/1999/XSL/Transform"',
' xmlns:exsl="http://exslt.org/common"',
' xmlns:cache="http://projectmallard.org/cache/1.0/"',
' xmlns:mal="http://projectmallard.org/1.0/"',
' extension-element-prefixes="exsl"',
' version="1.0">\n'
'<xsl:import href="', xslpath, '/mallard/html/mal2xhtml.xsl"/>\n'
])
fd.write('<xsl:import href="%s"/>\n' % 'pintail-html.xsl')
for xsl in self.get_custom_xsl():
fd.write('<xsl:include href="%s"/>\n' % xsl)
fd.writelines([
'<xsl:output method="text"/>\n',
'<xsl:template match="/">\n',
' <xsl:call-template name="html.js.content"/>\n',
'</xsl:template>\n',
'</xsl:stylesheet>\n'
])
fd.close()
self.log('JS', '/yelp.js')
subprocess.call(['xsltproc',
'-o', os.path.join(self.target_path, 'yelp.js'),
jsxsl, self.get_cache_path()])
if os.path.exists(os.path.join(jspath, 'highlight.pack.js')):
self.log('JS', '/highlight.pack.js')
shutil.copyfile(os.path.join(jspath, 'highlight.pack.js'),
os.path.join(self.target_path, 'highlight.pack.js'))
if os.path.exists(os.path.join(jspath, 'jquery.syntax.js')):
for js in ['jquery.syntax.js', 'jquery.syntax.core.js',
'jquery.syntax.layout.yelp.js']:
self.log('JS', '/' + js)
shutil.copyfile(os.path.join(jspath, js),
os.path.join(self.target_path, js))
jsxsl = os.path.join(self.tools_path, 'pintail-js-brushes.xsl')
fd = open(jsxsl, 'w')
fd.writelines([
'<xsl:stylesheet',
' xmlns:xsl="http://www.w3.org/1999/XSL/Transform"',
' xmlns:mal="http://projectmallard.org/1.0/"',
' xmlns:cache="http://projectmallard.org/cache/1.0/"',
' xmlns:exsl="http://exslt.org/common"',
' xmlns:html="http://www.w3.org/1999/xhtml"',
' extension-element-prefixes="exsl"',
' version="1.0">\n',
'<xsl:import href="', xslpath, '/mallard/html/mal2xhtml.xsl"/>\n'
])
for xsl in self.get_custom_xsl():
fd.write('<xsl:include href="%s"/>\n' % xsl)
fd.writelines([
'<xsl:output method="text"/>\n',
'<xsl:template match="/">\n',
'<xsl:for-each select="/cache:cache/mal:page">\n',
'<xsl:for-each select="document(@cache:href)//mal:code[@mime]">\n',
' <xsl:variable name="out">\n',
' <xsl:call-template name="mal2html.pre"/>\n',
' </xsl:variable>\n',
' <xsl:variable name="class">\n',
' <xsl:value-of select="exsl:node-set($out)/*/html:pre[last()]/@class"/>\n',
' </xsl:variable>\n',
' <xsl:if test="starts-with($class, ',
"'contents syntax brush-'", ')">\n',
' <xsl:text>jquery.syntax.brush.</xsl:text>\n',
' <xsl:value-of select="substring-after($class, ',
"'contents syntax brush-'", ')"/>\n',
' <xsl:text>.js
</xsl:text>\n',
' </xsl:if>\n',
'</xsl:for-each>\n',
'</xsl:for-each>\n',
'</xsl:template>\n',
'</xsl:stylesheet>'
])
fd.close()
brushes = subprocess.check_output(['xsltproc',
jsxsl, self.get_cache_path()],
universal_newlines=True)
for brush in brushes.split():
self.log('JS', '/' + brush)
shutil.copyfile(os.path.join(jspath, brush),
os.path.join(self.target_path, brush))
def build_files(self):
"""
Copy all extra files for this site.
"""
self.prep_site()
self.scan_site()
self.root.build_files()
def build_feeds(self):
"""
Build all Atom feeds for this site.
"""
self.prep_site()
self.scan_site()
self.root.build_feeds()
def build_search(self):
"""
Build all search data for the site.
If there is a search provider, this method calls `index_site` on it.
Otherwise, this method does nothing.
"""
self.prep_site()
if self._search:
self.scan_site()
if self.search_provider is not None:
self.search_provider.index_site()
def get_ignore_directory(self, path):
"""
Get whether or not to ignore a directory path when scanning a site.
The `path` argument is a path as used by `Directory`.
If it should be ignored, this method returns `True`.
Currently, we ignore Pintail's built directory and git's hidden directory.
We should be smarter in the future, and perhaps allow a config option.
"""
if path == '/__pintail__/':
return True
# FIXME: use an ignore key in config
if path == '/.git/':
return True
return False
def log(self, tag, data):
"""
Write something to the log.
Pintail uses a tag to indicate what kind of thing is happening,
followed by a data string to show what that thing is happening to.
"""
if data.startswith(self.pindir + '/'):
data = data[len(os.path.dirname(self.pindir))+1:]
self.logger.info('%(tag)-6s %(data)s' % {'tag': tag, 'data': data})
def warn(self, message):
"""
Write a warning message to the log.
"""
# FIXME I'd like to have a fatal warnings switch
self.log('WARN', message)
def fail(self, message):
"""
Write a failure message to the log and exit.
"""
self.log('FAIL', message)
sys.exit(1)
@classmethod
def _makedirs(cls, path):
# Python's os.makedirs complains if directory modes don't
# match just so. I don't care if they match, as long as I
# can write.
if os.path.exists(path):
return
Site._makedirs(os.path.dirname(path))
if not os.path.exists(path):
os.mkdir(path)
class Config:
"""
The configuration for a site.
This class wraps Python's `ConfigParser` with various utility methods
to ensure consistent access across Pintail.
"""
def __init__(self, site, filename):
self._site = site
self._config = configparser.ConfigParser()
self._config.read(filename)
self._configscr = configparser.ConfigParser()
script = self._config.get('pintail', 'before_script', fallback=None)
if script is not None:
site.log('SCRIPT', script)
ret = subprocess.call([os.path.join(os.path.dirname(filename), script)],
env=site.get_script_env(self))
if ret != 0:
sys.stderr.write('before_script failed\n')
sys.exit(1)
script = self._config.get('pintail', 'config_script', fallback=None)
if script is not None:
site.log('SCRIPT', script)
p = subprocess.Popen([os.path.join(os.path.dirname(filename), script)],
stdout=subprocess.PIPE,
env=site.get_script_env(self),
universal_newlines=True)
self._configscr.readfp(p.stdout)
def get(self, key, path=None):
"""
Get the value for a key, possibly in a path.
If `path` is omitted, it's assumed to be `pintail`, which is the group
in the config file where site-level options are defined. If the path is
`pintail` and `--local` has been passed to the `pintail` command, this
method will also look in the `local` config group for overrides.
"""
if path is None:
path = 'pintail'
if self._site._local and path == 'pintail':
ret = self._configscr.get('local', key, fallback=None)
if ret is not None:
return ret
ret = self._config.get('local', key, fallback=None)
if ret is not None:
return ret
if path.startswith('/') and key == 'extra_files':
return (self._configscr.get(path, key, fallback='') + ' ' +
self._config.get(path, key, fallback=''))
ret = self._configscr.get(path, key, fallback=None)
if ret is not None:
return ret
return self._config.get(path, key, fallback=None)
def get_site_root(self, path=None):
"""
Get the root path for the site.
For normal builds, this is either `"/"` or the value of the `site_root` config option.
For local builds, this method creates a relative path from the `path` argument.
"""
if self._site._local and path is not None:
if path == '/':
return './'
ret = ''
for i in range(path.count('/') - 1):
ret = '../' + ret
return ret
else:
return self.get('site_root') or '/'
| gpl-2.0 | -9,073,771,629,364,491,000 | 37.219858 | 101 | 0.573669 | false |
braedon/prometheus-kafka-consumer-group-exporter | setup.py | 1 | 1498 | from setuptools import setup, find_packages
setup(
name='prometheus-kafka-consumer-group-exporter',
version='0.5.5',
description='Kafka consumer group Prometheus exporter',
url='https://github.com/Braedon/prometheus-kafka-consumer-group-exporter',
author='Braedon Vickers',
author_email='[email protected]',
license='MIT',
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: Developers',
'Intended Audience :: System Administrators',
'Topic :: System :: Monitoring',
'License :: OSI Approved :: MIT License',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
],
keywords='monitoring prometheus exporter kafka consumer group',
packages=find_packages(),
install_requires=[
# kafka-python 1.4.5 included a number of bugs and a severe drop
# in consumer performance. 1.4.6 fixed the bugs, but the performance
# issues remained. 1.4.7 fixed the performance issues.
'kafka-python >= 1.3, != 1.4.5, != 1.4.6',
'jog',
'prometheus-client >= 0.6.0',
'javaproperties'
],
entry_points={
'console_scripts': [
'prometheus-kafka-consumer-group-exporter=prometheus_kafka_consumer_group_exporter:main',
],
},
)
| mit | -144,719,244,381,282,460 | 37.410256 | 101 | 0.62016 | false |
bmya/tkobr-addons | tko_point_of_sale_discount_on_order/point_of_sale.py | 1 | 2757 | from openerp.osv import osv, fields
import logging
_logger = logging.getLogger(__name__)
import openerp.addons.decimal_precision as dp
class pos_order(osv.osv):
_inherit = 'pos.order'
def _amount_all(self, cr, uid, ids, name, args, context=None):
cur_obj = self.pool.get('res.currency')
res = {}
for order in self.browse(cr, uid, ids, context=context):
res[order.id] = {
'amount_paid': 0.0,
'amount_return': 0.0,
'amount_tax': 0.0,
}
val1 = val2 = 0.0
cur = order.pricelist_id.currency_id
for payment in order.statement_ids:
res[order.id]['amount_paid'] += payment.amount
res[order.id][
'amount_return'] += (payment.amount < 0 and payment.amount or 0)
for line in order.lines:
val1 += line.price_subtotal_incl
val2 += line.price_subtotal
res[order.id]['amount_tax'] = cur_obj.round(
cr, uid, cur, val1 - val2)
res[order.id]['amount_total'] = cur_obj.round(
cr, uid, cur, val1 - order.discount_on_order)
return res
_columns = {
'discount_on_order': fields.float('Discount on Order'),
'amount_tax': fields.function(
_amount_all,
string='Taxes',
digits_compute=dp.get_precision('Account'),
multi='all'),
'amount_total': fields.function(
_amount_all,
string='Total',
digits_compute=dp.get_precision('Account'),
multi='all'),
'amount_paid': fields.function(
_amount_all,
string='Paid',
states={
'draft': [
('readonly',
False)]},
readonly=True,
digits_compute=dp.get_precision('Account'),
multi='all'),
'amount_return': fields.function(
_amount_all,
'Returned',
digits_compute=dp.get_precision('Account'),
multi='all'),
}
# pass value of discount_on_order to invoice from POS order
def action_invoice(self, cr, uid, ids, context=None):
res = super(
pos_order,
self).action_invoice(
cr,
uid,
ids,
context=context)
res_id = res.get('res_id', False)
if res_id:
for order in self.pool.get('pos.order').browse(
cr, uid, ids, context=context):
self.pool.get('account.invoice').write(cr, uid, [res_id], {
'discount_on_order': order.discount_on_order})
return res
| agpl-3.0 | -4,863,822,144,640,374,000 | 33.898734 | 84 | 0.500544 | false |
ekansa/open-context-py | opencontext_py/apps/edit/inputs/views.py | 1 | 33457 | import json
from django.views.decorators.cache import never_cache
from django.http import HttpResponse, Http404
from opencontext_py.libs.rootpath import RootPath
from django.template import RequestContext, loader
from django.views.decorators.csrf import ensure_csrf_cookie
from django.views.decorators.cache import cache_control
from opencontext_py.apps.entities.entity.models import Entity
from opencontext_py.apps.edit.items.itembasic import ItemBasicEdit
from opencontext_py.apps.edit.inputs.projectinputs import ProjectInputs
from opencontext_py.apps.edit.inputs.labeling import InputLabeling
from opencontext_py.apps.edit.inputs.profiles.models import InputProfile
from opencontext_py.apps.edit.inputs.profiles.manage import ManageInputProfile
from opencontext_py.apps.edit.inputs.profiles.templating import InputProfileTemplating
from opencontext_py.apps.edit.inputs.profiles.use import InputProfileUse
from opencontext_py.apps.edit.inputs.fieldgroups.models import InputFieldGroup
from opencontext_py.apps.edit.inputs.fieldgroups.manage import ManageInputFieldGroup
from opencontext_py.apps.edit.inputs.inputfields.manage import ManageInputField
from opencontext_py.apps.ocitems.manifest.models import Manifest
@ensure_csrf_cookie
@cache_control(no_cache=True)
@never_cache
def profile_use(request, profile_uuid, edit_uuid):
""" Handle requests to use a profile to create
or edit a record
"""
ipt = InputProfileTemplating()
exists = ipt.check_exists(profile_uuid)
if exists:
# now check to see if the we have edit permissions
proj_inp = ProjectInputs(ipt.project_uuid, request)
if proj_inp.edit_permitted or request.user.is_superuser:
if edit_uuid != 'new':
try:
edit_man = Manifest.objects.get(uuid=edit_uuid)
except Manifest.DoesNotExist:
# trying to use this profile to edit something that
# does not exist
edit_man = False
raise Http404
else:
edit_uuid = proj_inp.mint_new_uuid()
edit_man = False
if 'prefix' in request.GET:
prefix = request.GET['prefix']
else:
prefix = ''
class_uri = False
class_label = False
if 'class_uri' in request.GET:
class_uri = request.GET['class_uri']
ent = Entity()
found = ent.dereference(class_uri)
if found:
class_label = ent.label
else:
class_uri = False
context_uuid = False
context_label = False
if 'context_uuid' in request.GET:
context_uuid = request.GET['context_uuid']
ent = Entity()
found = ent.dereference(context_uuid)
if found:
context_label = ent.label
else:
context_uuid = ''
if 'id_len' in request.GET:
try:
id_len = int(float(request.GET['id_len']))
except:
id_len = False
else:
id_len = False
rp = RootPath()
base_url = rp.get_baseurl()
temp_item = {'uuid': ipt.uuid,
'label': ipt.inp_prof.label,
'project_uuid': ipt.project_uuid,
'project': ipt.project,
'edit_man': edit_man,
'edit_uuid': edit_uuid,
'label_prefix': prefix,
'label_id_len': id_len,
'class_uri': class_uri,
'class_label': class_label,
'context_uuid': context_uuid,
'context_label': context_label,
'context': False,
'act_nav': 'profiles'}
template = loader.get_template('edit/profiles/profile-use.html')
context = {
'item': temp_item,
'super_user': request.user.is_superuser,
'icons': ItemBasicEdit.UI_ICONS,
'field_group_vis': InputFieldGroup.GROUP_VIS,
'base_url': base_url
}
return HttpResponse(template.render(context, request))
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
raise Http404
@ensure_csrf_cookie
@cache_control(no_cache=True)
def profile_edit(request, profile_uuid):
""" Handles JSON requests for a profile
"""
ipt = InputProfileTemplating()
exists = ipt.check_exists(profile_uuid)
if exists:
# now check to see if the we have edit permissions
proj_inp = ProjectInputs(ipt.project_uuid, request)
if proj_inp.edit_permitted or request.user.is_superuser:
rp = RootPath()
base_url = rp.get_baseurl()
temp_item = {'uuid': ipt.uuid,
'label': ipt.inp_prof.label,
'project_uuid': ipt.project_uuid,
'project': ipt.project,
'context': False,
'act_nav': 'profiles'}
template = loader.get_template('edit/profiles/profile-edit.html')
context = {
'item': temp_item,
'super_user': request.user.is_superuser,
'icons': ItemBasicEdit.UI_ICONS,
'field_group_vis': InputFieldGroup.GROUP_VIS,
'base_url': base_url
}
return HttpResponse(template.render(context, request))
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
raise Http404
@cache_control(no_cache=True)
@never_cache
def json_view(request, profile_uuid):
""" Handles JSON requests for a profile
"""
ipt = InputProfileTemplating()
exists = ipt.check_exists(profile_uuid)
if exists:
# now check to see if the we have edit permissions
proj_inp = ProjectInputs(ipt.project_uuid, request)
if proj_inp.edit_permitted or request.user.is_superuser:
result = ipt.make_json(profile_uuid)
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
raise Http404
@cache_control(no_cache=True)
@never_cache
def profile_item_list(request, profile_uuid):
""" Handles JSON requests for a profile
"""
ipt = InputProfileTemplating()
exists = ipt.check_exists(profile_uuid)
if exists:
rp = RootPath()
ipt.base_url = rp.get_baseurl()
# now check to see if the we have edit permissions
proj_inp = ProjectInputs(ipt.project_uuid, request)
if proj_inp.edit_permitted or request.user.is_superuser:
start = 0
rows = 10
sort = False
last = False
if 'start' in request.GET:
start = request.GET['start']
if 'rows' in request.GET:
rows = request.GET['rows']
if 'sort' in request.GET:
sort = request.GET['sort']
if 'last' in request.GET:
last = True
result = ipt.get_item_list(profile_uuid,
start,
rows,
sort,
last)
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
raise Http404
# ------------------------------------------------
# BELOW HANDLE AJAX REQUESTS
# TO get a JSON Index of
# InputProfiles for a project
# ------------------------------------------------
@cache_control(no_cache=True)
@never_cache
def index_json(request, project_uuid):
""" handles get requests to make
a JSON index of input profiles for a project
"""
proj_inp = ProjectInputs(project_uuid, request)
if proj_inp.manifest is not False:
if proj_inp.edit_permitted or request.user.is_superuser:
result = proj_inp.get_profiles()
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
raise Http404
@cache_control(no_cache=True)
@never_cache
def label_check(request, project_uuid):
""" handles get requests to check on the
validity of a proposed item label
"""
proj_inp = ProjectInputs(project_uuid, request)
if proj_inp.manifest is not False or request.user.is_superuser:
if proj_inp.edit_permitted or request.user.is_superuser:
ilab = InputLabeling()
ilab.project_uuid = project_uuid
in_error = False
error = {'error': ''}
if 'item_type' in request.GET:
ilab.item_type = request.GET['item_type']
else:
in_error = True
error['error'] += 'Need an "item_type" parameter in request. '
ilab.item_type = False
if 'context_uuid' in request.GET:
ilab.context_uuid = request.GET['context_uuid']
if 'prefix' in request.GET:
prefix = request.GET['prefix']
else:
prefix = ''
if 'id_len' in request.GET:
try:
id_len = int(float(request.GET['id_len']))
except:
error['error'] += 'Need an integer value for the "id_len" parameter. '
else:
id_len = False
if 'label' in request.GET:
label = request.GET['label']
else:
label = False
if 'uuid' in request.GET:
ilab.uuid = request.GET['uuid']
else:
ilab.uuid = False
if in_error is False:
result = ilab.check_make_valid_label(label,
prefix,
id_len)
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps(error,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=400)
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
raise Http404
@cache_control(no_cache=True)
def create_update_profle_item(request, profile_uuid, edit_uuid):
""" handles POST requests to make
or update an item with a given profile
"""
try:
inp_prof = InputProfile.objects.get(uuid=profile_uuid)
project_uuid = inp_prof.project_uuid
except InputProfile.DoesNotExist:
inp_prof = False
project_uuid = False
raise Http404
proj_inp = ProjectInputs(project_uuid, request)
if proj_inp.manifest is not False:
if request.method == 'POST':
field_data = False
if 'field_data' in request.POST:
field_data_json = request.POST['field_data']
try:
field_data = json.loads(field_data_json)
except:
field_data = False
if field_data is False:
json_output = json.dumps({'error': 'Need to POST "field_data" with JSON encoded text.'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=400)
if proj_inp.edit_permitted or request.user.is_superuser:
ipt = InputProfileTemplating()
profile_obj = ipt.make_json(profile_uuid)
ipu = InputProfileUse()
ipu.edit_uuid = edit_uuid
ipu.item_type = profile_obj['item_type']
ipu.profile_uuid = profile_uuid
ipu.profile_obj = profile_obj
ipu.project_uuid = project_uuid
result = ipu.create_update(field_data)
# result = ipu.test(field_data)
result['errors'] = ipu.errors
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
return HttpResponseForbidden
else:
raise Http404
# ------------------------------------------------
# BELOW HANDLE AJAX REQUESTS
# TO CREATE, UPDATE, DELETE, and DUPLICATE
# InputProfiles
# ------------------------------------------------
@cache_control(no_cache=True)
def create(request, project_uuid):
""" Handles POST requests to create a new input profile """
proj_inp = ProjectInputs(project_uuid, request)
if proj_inp.manifest is not False:
if request.method == 'POST':
if proj_inp.edit_permitted or request.user.is_superuser:
m_inp_prof = ManageInputProfile()
m_inp_prof.creator_uuid = str(request.user.id)
m_inp_prof.project_uuid = project_uuid
result = m_inp_prof.create_update_from_post(request.POST)
result['errors'] = m_inp_prof.errors
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
return HttpResponseForbidden
else:
raise Http404
@cache_control(no_cache=True)
def update(request, profile_uuid):
""" Handles POST requests to update an existing profile """
try:
inp_prof = InputProfile.objects.get(uuid=profile_uuid)
project_uuid = inp_prof.project_uuid
except InputProfile.DoesNotExist:
inp_prof = False
project_uuid = False
raise Http404
proj_inp = ProjectInputs(project_uuid, request)
if proj_inp.manifest is not False:
if request.method == 'POST':
if proj_inp.edit_permitted or request.user.is_superuser:
m_inp_prof = ManageInputProfile()
m_inp_prof.creator_uuid = str(request.user.id)
m_inp_prof.project_uuid = project_uuid
result = m_inp_prof.create_update_from_post(request.POST, profile_uuid)
result['errors'] = m_inp_prof.errors
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
return HttpResponseForbidden
else:
raise Http404
@cache_control(no_cache=True)
def delete(request, profile_uuid):
""" Handles POST requests to delete an existing profile """
try:
inp_prof = InputProfile.objects.get(uuid=profile_uuid)
project_uuid = inp_prof.project_uuid
except InputProfile.DoesNotExist:
inp_prof = False
project_uuid = False
raise Http404
proj_inp = ProjectInputs(project_uuid, request)
if proj_inp.manifest is not False:
if request.method == 'POST':
if proj_inp.edit_permitted or request.user.is_superuser:
m_inp_prof = ManageInputProfile()
m_inp_prof.creator_uuid = str(request.user.id)
m_inp_prof.project_uuid = project_uuid
result = m_inp_prof.delete(profile_uuid)
result['errors'] = m_inp_prof.errors
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
return HttpResponseForbidden
else:
raise Http404
@cache_control(no_cache=True)
def duplicate(request, profile_uuid):
""" Handles POST requests to duplicate an existing profile """
try:
inp_prof = InputProfile.objects.get(uuid=profile_uuid)
project_uuid = inp_prof.project_uuid
except InputProfile.DoesNotExist:
inp_prof = False
project_uuid = False
raise Http404
proj_inp = ProjectInputs(project_uuid, request)
if proj_inp.manifest is not False:
if request.method == 'POST':
if proj_inp.edit_permitted or request.user.is_superuser:
m_inp_prof = ManageInputProfile()
m_inp_prof.creator_uuid = str(request.user.id)
m_inp_prof.project_uuid = project_uuid
result = m_inp_prof.duplicate(request.POST,
profile_uuid)
result['errors'] = m_inp_prof.errors
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
return HttpResponseForbidden
else:
raise Http404
# ------------------------------------------------
# BELOW HANDLE AJAX REQUESTS
# TO CREATE, UPDATE, DELETE, and DUPLICATE
# InputFieldGroups AND InputFields
# ------------------------------------------------
@cache_control(no_cache=True)
def reorder_item(request, uuid):
""" handles a request to reorder an item """
found = False
fieldgroup_obj = False
field_obj = False
mifg = ManageInputFieldGroup()
fieldgroup_obj = mifg.get_field_group(uuid)
if fieldgroup_obj is not False:
found = True
project_uuid = fieldgroup_obj.project_uuid
item_type = 'field-groups'
else:
mif = ManageInputField()
field_obj = mif.get_field(uuid)
if field_obj is not False:
project_uuid = field_obj.project_uuid
found = True
item_type = 'fields'
if found:
if request.method == 'POST':
proj_inp = ProjectInputs(project_uuid, request)
if proj_inp.edit_permitted or request.user.is_superuser:
# ok to reorder the item
if 'sort_change' in request.POST:
sort_change = request.POST['sort_change']
else:
sort_change = 0
result = mifg.update_sort_field_group_or_field(sort_change, uuid, item_type)
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
return HttpResponseForbidden
else:
raise Http404
# ------------------------------------------------
# BELOW HANDLE AJAX REQUESTS
# TO CREATE, UPDATE, DELETE, and DUPLICATE
# InputFieldGroups
# ------------------------------------------------
@cache_control(no_cache=True)
def create_field_group(request, profile_uuid):
""" Creates a field group for a given InputProfile
"""
ipt = InputProfileTemplating()
exists = ipt.check_exists(profile_uuid)
if exists:
if request.method == 'POST':
# now check to see if the we have edit permissions
proj_inp = ProjectInputs(ipt.project_uuid, request)
if proj_inp.edit_permitted or request.user.is_superuser:
mifg = ManageInputFieldGroup()
mifg.project_uuid = ipt.project_uuid
mifg.profile_uuid = profile_uuid
result = mifg.create_update_from_post(request.POST)
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
return HttpResponseForbidden
else:
raise Http404
@cache_control(no_cache=True)
def update_field_group(request, fgroup_uuid):
""" Updates a field group for a given InputProfile
"""
mifg = ManageInputFieldGroup()
inp_obj = mifg.get_field_group(fgroup_uuid)
if inp_obj is not False:
if request.method == 'POST':
# now check to see if the we have edit permissions
proj_inp = ProjectInputs(inp_obj.project_uuid, request)
if proj_inp.edit_permitted or request.user.is_superuser:
result = mifg.create_update_from_post(request.POST,
fgroup_uuid)
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
return HttpResponseForbidden
else:
raise Http404
@cache_control(no_cache=True)
def delete_field_group(request, fgroup_uuid):
""" Delete a field group for a given InputProfile
"""
mifg = ManageInputFieldGroup()
inp_obj = mifg.get_field_group(fgroup_uuid)
if inp_obj is not False:
if request.method == 'POST':
# now check to see if the we have edit permissions
proj_inp = ProjectInputs(inp_obj.project_uuid, request)
if proj_inp.edit_permitted or request.user.is_superuser:
result = mifg.delete(fgroup_uuid)
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
return HttpResponseForbidden
else:
raise Http404
# ------------------------------------------------
# BELOW HANDLE AJAX REQUESTS
# TO CREATE, UPDATE, DELETE, and DUPLICATE
# InputFields
# ------------------------------------------------
@cache_control(no_cache=True)
def create_field(request, fgroup_uuid):
""" Creates a field group for a given InputProfile
"""
mifg = ManageInputFieldGroup()
inp_obj = mifg.get_field_group(fgroup_uuid)
if inp_obj is not False:
mif = ManageInputField()
mif.fgroup_uuid = fgroup_uuid
mif.profile_uuid = inp_obj.profile_uuid
mif.project_uuid = inp_obj.project_uuid
if request.method == 'POST':
# now check to see if the we have edit permissions
proj_inp = ProjectInputs(inp_obj.project_uuid, request)
if proj_inp.edit_permitted or request.user.is_superuser:
# now finally try to create the Field
result = mif.create_update_from_post(request.POST)
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
return HttpResponseForbidden
else:
raise Http404
@cache_control(no_cache=True)
def update_field(request, field_uuid):
""" Updates a field group for a given InputProfile
"""
mifg = ManageInputFieldGroup()
inp_obj = mifg.get_field_group(fgroup_uuid)
if inp_obj is not False:
if request.method == 'POST':
# now check to see if the we have edit permissions
proj_inp = ProjectInputs(inp_obj.project_uuid, request)
if proj_inp.edit_permitted or request.user.is_superuser:
result = mifg.create_update_from_post(request.POST,
fgroup_uuid)
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
return HttpResponseForbidden
else:
raise Http404
@cache_control(no_cache=True)
def delete_field(request, field_uuid):
""" Delete a field group for a given InputProfile
"""
mif = ManageInputField()
inp_obj = mif.get_field(field_uuid)
if inp_obj is not False:
if request.method == 'POST':
# now check to see if the we have edit permissions
proj_inp = ProjectInputs(inp_obj.project_uuid, request)
if proj_inp.edit_permitted or request.user.is_superuser:
result = mif.delete(field_uuid)
json_output = json.dumps(result,
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8')
else:
json_output = json.dumps({'error': 'edit permission required'},
indent=4,
ensure_ascii=False)
return HttpResponse(json_output,
content_type='application/json; charset=utf8',
status=401)
else:
return HttpResponseForbidden
else:
raise Http404
| gpl-3.0 | 1,059,389,435,304,288,300 | 41.948652 | 104 | 0.498281 | false |
andrewdodd/drf-timeordered-pagination | setup.py | 1 | 2571 | import codecs
import os
import re
from setuptools import setup, find_packages
###############################################################################
NAME = "drf_timeordered_pagination"
PACKAGES = find_packages(where="src")
META_PATH = os.path.join("src", "timeordered_pagination", "__init__.py")
KEYWORDS = ["Django Rest Framework", "Django", "Pagination"]
CLASSIFIERS = [
"Development Status :: 3 - Alpha",
"Intended Audience :: Developers",
"Natural Language :: English",
"License :: Other/Proprietary License",
"Operating System :: OS Independent",
'Framework :: Django',
'Framework :: Django :: 1.8',
'Framework :: Django :: 1.9',
'Intended Audience :: Developers',
"Programming Language :: Python",
"Programming Language :: Python :: 2",
"Programming Language :: Python :: 2.7",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.4",
"Programming Language :: Python :: 3.5",
"Programming Language :: Python :: 3.6",
"Topic :: Software Development :: Libraries :: Python Modules",
]
INSTALL_REQUIRES = [
'django>=1.8',
'djangorestframework>=3.1',
]
###############################################################################
HERE = os.path.abspath(os.path.dirname(__file__))
def read(*parts):
"""
Build an absolute path from *parts* and and return the contents of the
resulting file. Assume UTF-8 encoding.
"""
with codecs.open(os.path.join(HERE, *parts), "rb", "utf-8") as f:
return f.read()
META_FILE = read(META_PATH)
def find_meta(meta):
"""
Extract __*meta*__ from META_FILE.
"""
meta_match = re.search(
r"^__{meta}__ = ['\"]([^'\"]*)['\"]".format(meta=meta),
META_FILE, re.M
)
if meta_match:
return meta_match.group(1)
raise RuntimeError("Unable to find __{meta}__ string.".format(meta=meta))
VERSION = find_meta("version")
URI = find_meta("uri")
LONG = (
read("README.rst")
)
if __name__ == "__main__":
setup(
name=NAME,
description=find_meta("description"),
license=find_meta("license"),
url=URI,
version=VERSION,
author=find_meta("author"),
author_email=find_meta("email"),
maintainer=find_meta("author"),
maintainer_email=find_meta("email"),
keywords=KEYWORDS,
long_description=LONG,
packages=PACKAGES,
package_dir={"": "src"},
zip_safe=False,
classifiers=CLASSIFIERS,
install_requires=INSTALL_REQUIRES,
)
| mit | 3,072,226,323,324,657,700 | 26.645161 | 79 | 0.563983 | false |
digitalocean/netbox | netbox/extras/migrations/0022_custom_links.py | 1 | 1959 | from django.db import migrations, models
import django.db.models.deletion
import extras.models
class Migration(migrations.Migration):
dependencies = [
('contenttypes', '0002_remove_content_type_name'),
('extras', '0021_add_color_comments_changelog_to_tag'),
]
operations = [
migrations.CreateModel(
name='CustomLink',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False)),
('name', models.CharField(max_length=100, unique=True)),
('text', models.CharField(max_length=500)),
('url', models.CharField(max_length=500)),
('weight', models.PositiveSmallIntegerField(default=100)),
('group_name', models.CharField(blank=True, max_length=50)),
('button_class', models.CharField(default='default', max_length=30)),
('new_window', models.BooleanField()),
('content_type', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType')),
],
options={
'ordering': ['group_name', 'weight', 'name'],
},
),
# Update limit_choices_to for CustomFields, ExportTemplates, and Webhooks
migrations.AlterField(
model_name='customfield',
name='obj_type',
field=models.ManyToManyField(related_name='custom_fields', to='contenttypes.ContentType'),
),
migrations.AlterField(
model_name='exporttemplate',
name='content_type',
field=models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='contenttypes.ContentType'),
),
migrations.AlterField(
model_name='webhook',
name='obj_type',
field=models.ManyToManyField(related_name='webhooks', to='contenttypes.ContentType'),
),
]
| apache-2.0 | -4,466,386,778,810,660,400 | 39.8125 | 128 | 0.587545 | false |
r4fek/django-cassandra-engine | django_cassandra_engine/utils.py | 1 | 4624 | import inspect
from django.conf import settings
from django.db import DEFAULT_DB_ALIAS
import django
from .compat import cqlengine
class CursorWrapper(object):
"""
Simple CursorWrapper implementation based on django.db.utils.CursorWrapper
"""
def __init__(self, cursor, db):
self.cursor = cursor
self.db = db
WRAP_ERROR_ATTRS = frozenset(
['fetchone', 'fetchmany', 'fetchall', 'nextset']
)
def __getattr__(self, attr):
cursor_attr = getattr(self.cursor, attr)
if attr in CursorWrapper.WRAP_ERROR_ATTRS:
return self.db.wrap_database_errors(cursor_attr)
else:
return cursor_attr
def __iter__(self):
return iter(self.cursor)
def __enter__(self):
return self
def __exit__(self, type, value, traceback):
pass
def callproc(self, procname, params=None):
with self.db.wrap_database_errors:
if params is None:
return self.cursor.callproc(procname)
else:
return self.cursor.callproc(procname, params)
def execute(self, sql, params=None):
with self.db.wrap_database_errors:
if params is None:
return self.cursor.execute(sql)
else:
return self.cursor.execute(sql, params)
def executemany(self, sql, param_list):
with self.db.wrap_database_errors:
return self.cursor.executemany(sql, param_list)
def get_installed_apps():
"""
Return list of all installed apps
"""
from django.apps import apps
return [
a.models_module
for a in apps.get_app_configs()
if a.models_module is not None
]
def get_cql_models(app, connection=None, keyspace=None):
"""
:param app: django models module
:param connection: connection name
:param keyspace: keyspace
:return: list of all cassandra.cqlengine.Model within app that should be
synced to keyspace.
"""
from .models import DjangoCassandraModel
models = []
single_cassandra_connection = len(list(get_cassandra_connections())) == 1
is_default_connection = (
connection == DEFAULT_DB_ALIAS or single_cassandra_connection
)
for name, obj in inspect.getmembers(app):
cql_model_types = (cqlengine.models.Model, DjangoCassandraModel)
if (
inspect.isclass(obj)
and issubclass(obj, cql_model_types)
and not obj.__abstract__
):
if (
obj.__connection__ == connection
or (obj.__connection__ is None and is_default_connection)
or obj.__connection__ is None
and obj.__keyspace__ is not None
and obj.__keyspace__ == keyspace
):
models.append(obj)
return models
def get_cassandra_connections():
"""
:return: List of tuples (db_alias, connection) for all cassandra
connections in DATABASES dict.
"""
from django.db import connections
for alias in connections:
engine = connections[alias].settings_dict.get('ENGINE', '')
if engine == 'django_cassandra_engine':
yield alias, connections[alias]
def get_default_cassandra_connection():
"""
Return first default cassandra connection
:return:
"""
for alias, conn in get_cassandra_connections():
if conn.connection.default:
return alias, conn
return list(get_cassandra_connections())[0]
def get_cassandra_connection(alias=None, name=None):
"""
:return: cassandra connection matching alias or name or just first found.
"""
for _alias, connection in get_cassandra_connections():
if alias is not None:
if alias == _alias:
return connection
elif name is not None:
if name == connection.settings_dict['NAME']:
return connection
else:
return connection
def get_cassandra_db_aliases():
from django.db import connections
for alias in connections:
engine = connections[alias].settings_dict.get('ENGINE', '')
if engine == 'django_cassandra_engine':
yield alias
def get_cassandra_db_alias():
return get_cassandra_db_aliases().__next__()
def get_engine_from_db_alias(db_alias):
"""
:param db_alias: database alias
:return: database engine from DATABASES dict corresponding to db_alias
or None if db_alias was not found
"""
return settings.DATABASES.get(db_alias, {}).get('ENGINE', None)
| bsd-2-clause | 8,429,201,329,871,066,000 | 26.52381 | 78 | 0.609645 | false |
T2DREAM/t2dream-portal | src/encoded/tests/test_post_put_patch.py | 1 | 8558 | import pytest
targets = [
{'name': 'one', 'uuid': '775795d3-4410-4114-836b-8eeecf1d0c2f'},
{'name': 'two', 'uuid': 'd6784f5e-48a1-4b40-9b11-c8aefb6e1377'},
]
item = {
'required': 'required value',
}
simple1 = {
'required': 'required value',
'simple1': 'supplied simple1',
}
simple2 = {
'required': 'required value',
'simple2': 'supplied simple2',
}
item_with_uuid = [
{
'uuid': '0f13ff76-c559-4e70-9497-a6130841df9f',
'required': 'required value 1',
},
{
'uuid': '6c3e444b-f290-43c4-bfb9-d20135377770',
'required': 'required value 2',
},
]
item_with_link = [
{
'required': 'required value 1',
'protected_link': '775795d3-4410-4114-836b-8eeecf1d0c2f',
},
{
'required': 'required value 2',
'protected_link': 'd6784f5e-48a1-4b40-9b11-c8aefb6e1377',
},
]
COLLECTION_URL = '/testing-post-put-patch/'
@pytest.fixture
def link_targets(testapp):
url = '/testing-link-targets/'
for item in targets:
testapp.post_json(url, item, status=201)
@pytest.fixture
def content(testapp):
res = testapp.post_json(COLLECTION_URL, item_with_uuid[0], status=201)
return {'@id': res.location}
@pytest.fixture
def content_with_child(testapp):
parent_res = testapp.post_json('/testing-link-targets/', {}, status=201)
parent_id = parent_res.json['@graph'][0]['@id']
child_res = testapp.post_json('/testing-link-sources/', {'target': parent_id})
child_id = child_res.json['@graph'][0]['@id']
return {'@id': parent_id, 'child': child_id}
def test_admin_post(testapp):
testapp.post_json(COLLECTION_URL, item, status=201)
testapp.post_json(COLLECTION_URL, item_with_uuid[0], status=201)
def test_submitter_post(submitter_testapp):
testapp = submitter_testapp
testapp.post_json(COLLECTION_URL, item, status=201)
res = testapp.post_json(COLLECTION_URL, item_with_uuid[0], status=422)
assert any(error.get('name') == ['uuid'] for error in res.json['errors'])
def test_admin_put_uuid(content, testapp):
url = content['@id']
# so long as the same uuid is supplied, PUTing the uuid is fine
testapp.put_json(url, item_with_uuid[0], status=200)
# but the uuid may not be changed on PUT;
testapp.put_json(url, item_with_uuid[1], status=422)
def test_submitter_put_uuid(content, submitter_testapp):
testapp = submitter_testapp
url = content['@id']
# so long as the same uuid is supplied, PUTing the uuid is fine
testapp.put_json(url, item_with_uuid[0], status=200)
# but the uuid may not be changed on PUT;
testapp.put_json(url, item_with_uuid[1], status=422)
def test_defaults_on_put(content, testapp):
url = content['@id']
res = testapp.get(url)
assert res.json['simple1'] == 'simple1 default'
assert res.json['simple2'] == 'simple2 default'
res = testapp.put_json(url, simple1, status=200)
assert res.json['@graph'][0]['simple1'] == 'supplied simple1'
assert res.json['@graph'][0]['simple2'] == 'simple2 default'
res = testapp.put_json(url, simple2, status=200)
assert res.json['@graph'][0]['simple1'] == 'simple1 default'
assert res.json['@graph'][0]['simple2'] == 'supplied simple2'
def test_patch(content, testapp):
url = content['@id']
res = testapp.get(url)
assert res.json['simple1'] == 'simple1 default'
assert res.json['simple2'] == 'simple2 default'
res = testapp.patch_json(url, {}, status=200)
assert res.json['@graph'][0]['simple1'] == 'simple1 default'
assert res.json['@graph'][0]['simple2'] == 'simple2 default'
res = testapp.patch_json(url, {'simple1': 'supplied simple1'}, status=200)
assert res.json['@graph'][0]['simple1'] == 'supplied simple1'
assert res.json['@graph'][0]['simple2'] == 'simple2 default'
res = testapp.patch_json(url, {'simple2': 'supplied simple2'}, status=200)
assert res.json['@graph'][0]['simple1'] == 'supplied simple1'
assert res.json['@graph'][0]['simple2'] == 'supplied simple2'
def test_admin_put_protected_link(link_targets, testapp):
res = testapp.post_json(COLLECTION_URL, item_with_link[0], status=201)
url = res.location
testapp.put_json(url, item_with_link[0], status=200)
testapp.put_json(url, item_with_link[1], status=200)
def test_submitter_put_protected_link(link_targets, testapp, submitter_testapp):
res = testapp.post_json(COLLECTION_URL, item_with_link[0], status=201)
url = res.location
submitter_testapp.put_json(url, item_with_link[0], status=200)
submitter_testapp.put_json(url, item_with_link[1], status=422)
def test_put_object_not_touching_children(content_with_child, testapp):
url = content_with_child['@id']
res = testapp.put_json(url, {}, status=200)
assert content_with_child['child'] in res.json['@graph'][0]['reverse']
def test_put_object_editing_child(content_with_child, testapp):
edit = {
'reverse': [{
'@id': content_with_child['child'],
'status': 'released',
}]
}
testapp.put_json(content_with_child['@id'], edit, status=200)
res = testapp.get(content_with_child['child'] + '?frame=embedded')
assert res.json['status'] == 'released'
def test_put_object_adding_child(content_with_child, testapp):
edit = {
'reverse': [
content_with_child['child'],
{
'status': 'released',
}
]
}
testapp.put_json(content_with_child['@id'], edit, status=200)
res = testapp.get(content_with_child['@id'])
assert len(res.json['reverse']) == 2
def test_submitter_put_object_adding_disallowed_child(
root, monkeypatch, content_with_child, submitter_testapp):
from pyramid.security import Allow
monkeypatch.setattr(root['testing-link-sources'], '__acl__', (), raising=False)
monkeypatch.setattr(
root['testing-link-targets'], '__acl__',
((Allow, 'group.submitter', 'edit'),), raising=False)
edit = {
'reverse': [
content_with_child['child'],
{
'status': 'released',
}
]
}
res = submitter_testapp.put_json(content_with_child['@id'], edit, status=422)
assert res.json['errors'][0]['description'].startswith(
'add forbidden to /testing-link-sources/')
def test_put_object_removing_child(content_with_child, testapp):
edit = {
'reverse': [],
}
testapp.put_json(content_with_child['@id'], edit, status=200)
res = testapp.get(content_with_child['@id'] + '?frame=embedded')
assert len(res.json['reverse']) == 0
res = testapp.get(content_with_child['child'])
assert res.json['status'] == 'deleted'
def test_put_object_child_validation(content_with_child, testapp):
edit = {
'reverse': [{
'@id': content_with_child['child'],
'target': 'BOGUS',
}]
}
res = testapp.put_json(content_with_child['@id'], edit, status=422)
assert res.json['errors'][0]['name'] == [u'reverse', 0, u'target']
def test_put_object_validates_child_references(content_with_child, testapp):
# Try a child that doesn't exist
edit = {
'reverse': [
content_with_child['child'],
'/asdf',
]
}
testapp.put_json(content_with_child['@id'], edit, status=422)
# Try a child that exists but is the wrong type
edit = {
'reverse': [
content_with_child['child'],
content_with_child['@id'],
]
}
testapp.put_json(content_with_child['@id'], edit, status=422)
def test_post_object_with_child(testapp):
edit = {
'reverse': [{
'status': 'released',
}]
}
res = testapp.post_json('/testing-link-targets', edit, status=201)
parent_id = res.json['@graph'][0]['@id']
source = res.json['@graph'][0]['reverse'][0]
res = testapp.get(source)
assert res.json['target'] == parent_id
def test_etag_if_match_tid(testapp, organism):
res = testapp.get(organism['@id'] + '?frame=edit', status=200)
etag = res.etag
testapp.patch_json(organism['@id'], {}, headers={'If-Match': etag}, status=200)
testapp.patch_json(organism['@id'], {}, headers={'If-Match': etag}, status=412)
def test_retry(testapp):
res = testapp.post_json('/testing-post-put-patch/', {'required': ''})
url = res.location
res = testapp.get(url + '/@@testing-retry?datstore=database')
assert res.json['attempt'] == 2
assert not res.json['detached']
| mit | -6,423,251,719,205,006,000 | 30.932836 | 83 | 0.618953 | false |
namboy94/kudubot | kudubot/helper.py | 1 | 3631 | """LICENSE
Copyright 2015 Hermann Krumrey <[email protected]>
This file is part of kudubot.
kudubot is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
kudubot is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with kudubot. If not, see <http://www.gnu.org/licenses/>.
LICENSE"""
import os
import argparse
import logging
import traceback
from typing import Type, Optional
from sentry_sdk import init as init_sentry
from sentry_sdk.integrations.logging import ignore_logger
from bokkichat.connection.Connection import Connection
from kudubot.Bot import Bot
from kudubot.exceptions import ConfigurationError
def cli_bot_start(
bot_cls: Type[Bot],
connection_cls: Type[Connection],
sentry_dsn: Optional[str] = None
):
"""
Implements a standard CLI interface for kudubot implementations
:param bot_cls: The class of the bot to start
:param connection_cls: The connection to use with the bot
:param sentry_dsn: Optional sentry DSN for exception logging
:return: None
"""
if sentry_dsn is not None:
init_sentry(sentry_dsn)
default_config_path = os.path.join(
os.path.expanduser("~"),
".config/{}".format(bot_cls.name())
)
parser = argparse.ArgumentParser()
parser.add_argument("--initialize", action="store_true",
help="Initializes the {} bot".format(bot_cls.name()))
parser.add_argument("--verbose", "-v", action="store_true",
help="Shows more output (INFO level)")
parser.add_argument("--debug", "-d", action="store_true",
help="Shows even more output (DEBUG level)")
parser.add_argument("--custom-dir", default=default_config_path,
help="Specifies a custom configuration directory")
args = parser.parse_args()
config_path = args.custom_dir
if args.verbose:
logging.basicConfig(level=logging.INFO)
if args.debug:
logging.basicConfig(level=logging.DEBUG)
if args.initialize:
if not os.path.isdir(config_path):
os.makedirs(config_path)
bot_cls.create_config(connection_cls, config_path)
print("Successfully generated configuration in " + config_path)
elif not os.path.isdir(config_path):
print("Missing Configuration directory " + config_path)
else:
try:
bot = bot_cls.load(connection_cls, config_path)
except ConfigurationError as e:
print("Invalid Configuration: {}".format(e))
return
# Disable sentry notifications for error-level logging messages
# in kudubot, those will be sent another way
if sentry_dsn is not None:
init_sentry(sentry_dsn, release="{}-{}".format(
bot.name(), bot.version()
))
ignore_logger(bot.logger.name)
try:
bot.start()
except KeyboardInterrupt:
print("Execution aborted")
except BaseException as e:
bot.logger.error(
"Fatal Exception: {}\n{}".format(
e,
"\n".join(traceback.format_tb(e.__traceback__))
)
)
| gpl-3.0 | 2,426,591,795,977,430,500 | 33.580952 | 77 | 0.646654 | false |
IronLanguages/ironpython2 | Src/StdLib/Lib/textwrap.py | 1 | 17280 | """Text wrapping and filling.
"""
# Copyright (C) 1999-2001 Gregory P. Ward.
# Copyright (C) 2002, 2003 Python Software Foundation.
# Written by Greg Ward <[email protected]>
__revision__ = "$Id$"
import string, re
try:
_unicode = unicode
except NameError:
# If Python is built without Unicode support, the unicode type
# will not exist. Fake one.
class _unicode(object):
pass
# Do the right thing with boolean values for all known Python versions
# (so this module can be copied to projects that don't depend on Python
# 2.3, e.g. Optik and Docutils) by uncommenting the block of code below.
#try:
# True, False
#except NameError:
# (True, False) = (1, 0)
__all__ = ['TextWrapper', 'wrap', 'fill', 'dedent']
# Hardcode the recognized whitespace characters to the US-ASCII
# whitespace characters. The main reason for doing this is that in
# ISO-8859-1, 0xa0 is non-breaking whitespace, so in certain locales
# that character winds up in string.whitespace. Respecting
# string.whitespace in those cases would 1) make textwrap treat 0xa0 the
# same as any other whitespace char, which is clearly wrong (it's a
# *non-breaking* space), 2) possibly cause problems with Unicode,
# since 0xa0 is not in range(128).
_whitespace = '\t\n\x0b\x0c\r '
class TextWrapper:
"""
Object for wrapping/filling text. The public interface consists of
the wrap() and fill() methods; the other methods are just there for
subclasses to override in order to tweak the default behaviour.
If you want to completely replace the main wrapping algorithm,
you'll probably have to override _wrap_chunks().
Several instance attributes control various aspects of wrapping:
width (default: 70)
the maximum width of wrapped lines (unless break_long_words
is false)
initial_indent (default: "")
string that will be prepended to the first line of wrapped
output. Counts towards the line's width.
subsequent_indent (default: "")
string that will be prepended to all lines save the first
of wrapped output; also counts towards each line's width.
expand_tabs (default: true)
Expand tabs in input text to spaces before further processing.
Each tab will become 1 .. 8 spaces, depending on its position in
its line. If false, each tab is treated as a single character.
replace_whitespace (default: true)
Replace all whitespace characters in the input text by spaces
after tab expansion. Note that if expand_tabs is false and
replace_whitespace is true, every tab will be converted to a
single space!
fix_sentence_endings (default: false)
Ensure that sentence-ending punctuation is always followed
by two spaces. Off by default because the algorithm is
(unavoidably) imperfect.
break_long_words (default: true)
Break words longer than 'width'. If false, those words will not
be broken, and some lines might be longer than 'width'.
break_on_hyphens (default: true)
Allow breaking hyphenated words. If true, wrapping will occur
preferably on whitespaces and right after hyphens part of
compound words.
drop_whitespace (default: true)
Drop leading and trailing whitespace from lines.
"""
whitespace_trans = string.maketrans(_whitespace, ' ' * len(_whitespace))
unicode_whitespace_trans = {}
uspace = ord(u' ')
for x in map(ord, _whitespace):
unicode_whitespace_trans[x] = uspace
# This funky little regex is just the trick for splitting
# text up into word-wrappable chunks. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-/ball,/ /use/ /the/ /-b/ /option!
# (after stripping out empty strings).
wordsep_re = re.compile(
r'(\s+|' # any whitespace
r'[^\s\w]*\w+[^0-9\W]-(?=\w+[^0-9\W])|' # hyphenated words
r'(?<=[\w\!\"\'\&\.\,\?])-{2,}(?=\w))') # em-dash
# This less funky little regex just split on recognized spaces. E.g.
# "Hello there -- you goof-ball, use the -b option!"
# splits into
# Hello/ /there/ /--/ /you/ /goof-ball,/ /use/ /the/ /-b/ /option!/
wordsep_simple_re = re.compile(r'(\s+)')
# XXX this is not locale- or charset-aware -- string.lowercase
# is US-ASCII only (and therefore English-only)
sentence_end_re = re.compile(r'[%s]' # lowercase letter
r'[\.\!\?]' # sentence-ending punct.
r'[\"\']?' # optional end-of-quote
r'\Z' # end of chunk
% string.lowercase)
def __init__(self,
width=70,
initial_indent="",
subsequent_indent="",
expand_tabs=True,
replace_whitespace=True,
fix_sentence_endings=False,
break_long_words=True,
drop_whitespace=True,
break_on_hyphens=True):
self.width = width
self.initial_indent = initial_indent
self.subsequent_indent = subsequent_indent
self.expand_tabs = expand_tabs
self.replace_whitespace = replace_whitespace
self.fix_sentence_endings = fix_sentence_endings
self.break_long_words = break_long_words
self.drop_whitespace = drop_whitespace
self.break_on_hyphens = break_on_hyphens
# recompile the regexes for Unicode mode -- done in this clumsy way for
# backwards compatibility because it's rather common to monkey-patch
# the TextWrapper class' wordsep_re attribute.
self.wordsep_re_uni = re.compile(self.wordsep_re.pattern, re.U)
self.wordsep_simple_re_uni = re.compile(
self.wordsep_simple_re.pattern, re.U)
# -- Private methods -----------------------------------------------
# (possibly useful for subclasses to override)
def _munge_whitespace(self, text):
"""_munge_whitespace(text : string) -> string
Munge whitespace in text: expand tabs and convert all other
whitespace characters to spaces. Eg. " foo\\tbar\\n\\nbaz"
becomes " foo bar baz".
"""
if self.expand_tabs:
text = text.expandtabs()
if self.replace_whitespace:
if isinstance(text, str):
text = text.translate(self.whitespace_trans)
elif isinstance(text, _unicode):
text = text.translate(self.unicode_whitespace_trans)
return text
def _split(self, text):
"""_split(text : string) -> [string]
Split the text to wrap into indivisible chunks. Chunks are
not quite the same as words; see _wrap_chunks() for full
details. As an example, the text
Look, goof-ball -- use the -b option!
breaks into the following chunks:
'Look,', ' ', 'goof-', 'ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', 'option!'
if break_on_hyphens is True, or in:
'Look,', ' ', 'goof-ball', ' ', '--', ' ',
'use', ' ', 'the', ' ', '-b', ' ', option!'
otherwise.
"""
if isinstance(text, _unicode):
if self.break_on_hyphens:
pat = self.wordsep_re_uni
else:
pat = self.wordsep_simple_re_uni
else:
if self.break_on_hyphens:
pat = self.wordsep_re
else:
pat = self.wordsep_simple_re
chunks = pat.split(text)
chunks = filter(None, chunks) # remove empty chunks
return chunks
def _fix_sentence_endings(self, chunks):
"""_fix_sentence_endings(chunks : [string])
Correct for sentence endings buried in 'chunks'. Eg. when the
original text contains "... foo.\\nBar ...", munge_whitespace()
and split() will convert that to [..., "foo.", " ", "Bar", ...]
which has one too few spaces; this method simply changes the one
space to two.
"""
i = 0
patsearch = self.sentence_end_re.search
while i < len(chunks)-1:
if chunks[i+1] == " " and patsearch(chunks[i]):
chunks[i+1] = " "
i += 2
else:
i += 1
def _handle_long_word(self, reversed_chunks, cur_line, cur_len, width):
"""_handle_long_word(chunks : [string],
cur_line : [string],
cur_len : int, width : int)
Handle a chunk of text (most likely a word, not whitespace) that
is too long to fit in any line.
"""
# Figure out when indent is larger than the specified width, and make
# sure at least one character is stripped off on every pass
if width < 1:
space_left = 1
else:
space_left = width - cur_len
# If we're allowed to break long words, then do so: put as much
# of the next chunk onto the current line as will fit.
if self.break_long_words:
cur_line.append(reversed_chunks[-1][:space_left])
reversed_chunks[-1] = reversed_chunks[-1][space_left:]
# Otherwise, we have to preserve the long word intact. Only add
# it to the current line if there's nothing already there --
# that minimizes how much we violate the width constraint.
elif not cur_line:
cur_line.append(reversed_chunks.pop())
# If we're not allowed to break long words, and there's already
# text on the current line, do nothing. Next time through the
# main loop of _wrap_chunks(), we'll wind up here again, but
# cur_len will be zero, so the next line will be entirely
# devoted to the long word that we can't handle right now.
def _wrap_chunks(self, chunks):
"""_wrap_chunks(chunks : [string]) -> [string]
Wrap a sequence of text chunks and return a list of lines of
length 'self.width' or less. (If 'break_long_words' is false,
some lines may be longer than this.) Chunks correspond roughly
to words and the whitespace between them: each chunk is
indivisible (modulo 'break_long_words'), but a line break can
come between any two chunks. Chunks should not have internal
whitespace; ie. a chunk is either all whitespace or a "word".
Whitespace chunks will be removed from the beginning and end of
lines, but apart from that whitespace is preserved.
"""
lines = []
if self.width <= 0:
raise ValueError("invalid width %r (must be > 0)" % self.width)
# Arrange in reverse order so items can be efficiently popped
# from a stack of chucks.
chunks.reverse()
while chunks:
# Start the list of chunks that will make up the current line.
# cur_len is just the length of all the chunks in cur_line.
cur_line = []
cur_len = 0
# Figure out which static string will prefix this line.
if lines:
indent = self.subsequent_indent
else:
indent = self.initial_indent
# Maximum width for this line.
width = self.width - len(indent)
# First chunk on line is whitespace -- drop it, unless this
# is the very beginning of the text (ie. no lines started yet).
if self.drop_whitespace and chunks[-1].strip() == '' and lines:
del chunks[-1]
while chunks:
l = len(chunks[-1])
# Can at least squeeze this chunk onto the current line.
if cur_len + l <= width:
cur_line.append(chunks.pop())
cur_len += l
# Nope, this line is full.
else:
break
# The current line is full, and the next chunk is too big to
# fit on *any* line (not just this one).
if chunks and len(chunks[-1]) > width:
self._handle_long_word(chunks, cur_line, cur_len, width)
# If the last chunk on this line is all whitespace, drop it.
if self.drop_whitespace and cur_line and cur_line[-1].strip() == '':
del cur_line[-1]
# Convert current line back to a string and store it in list
# of all lines (return value).
if cur_line:
lines.append(indent + ''.join(cur_line))
return lines
# -- Public interface ----------------------------------------------
def wrap(self, text):
"""wrap(text : string) -> [string]
Reformat the single paragraph in 'text' so it fits in lines of
no more than 'self.width' columns, and return a list of wrapped
lines. Tabs in 'text' are expanded with string.expandtabs(),
and all other whitespace characters (including newline) are
converted to space.
"""
text = self._munge_whitespace(text)
chunks = self._split(text)
if self.fix_sentence_endings:
self._fix_sentence_endings(chunks)
return self._wrap_chunks(chunks)
def fill(self, text):
"""fill(text : string) -> string
Reformat the single paragraph in 'text' to fit in lines of no
more than 'self.width' columns, and return a new string
containing the entire wrapped paragraph.
"""
return "\n".join(self.wrap(text))
# -- Convenience interface ---------------------------------------------
def wrap(text, width=70, **kwargs):
"""Wrap a single paragraph of text, returning a list of wrapped lines.
Reformat the single paragraph in 'text' so it fits in lines of no
more than 'width' columns, and return a list of wrapped lines. By
default, tabs in 'text' are expanded with string.expandtabs(), and
all other whitespace characters (including newline) are converted to
space. See TextWrapper class for available keyword args to customize
wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.wrap(text)
def fill(text, width=70, **kwargs):
"""Fill a single paragraph of text, returning a new string.
Reformat the single paragraph in 'text' to fit in lines of no more
than 'width' columns, and return a new string containing the entire
wrapped paragraph. As with wrap(), tabs are expanded and other
whitespace characters converted to space. See TextWrapper class for
available keyword args to customize wrapping behaviour.
"""
w = TextWrapper(width=width, **kwargs)
return w.fill(text)
# -- Loosely related functionality -------------------------------------
_whitespace_only_re = re.compile('^[ \t]+$', re.MULTILINE)
_leading_whitespace_re = re.compile('(^[ \t]*)(?:[^ \t\n])', re.MULTILINE)
def dedent(text):
"""Remove any common leading whitespace from every line in `text`.
This can be used to make triple-quoted strings line up with the left
edge of the display, while still presenting them in the source code
in indented form.
Note that tabs and spaces are both treated as whitespace, but they
are not equal: the lines " hello" and "\\thello" are
considered to have no common leading whitespace. (This behaviour is
new in Python 2.5; older versions of this module incorrectly
expanded tabs before searching for common leading whitespace.)
Entirely blank lines are normalized to a newline character.
"""
# Look for the longest leading string of spaces and tabs common to
# all lines.
margin = None
text = _whitespace_only_re.sub('', text)
indents = _leading_whitespace_re.findall(text)
for indent in indents:
if margin is None:
margin = indent
# Current line more deeply indented than previous winner:
# no change (previous winner is still on top).
elif indent.startswith(margin):
pass
# Current line consistent with and no deeper than previous winner:
# it's the new winner.
elif margin.startswith(indent):
margin = indent
# Find the largest common whitespace between current line and previous
# winner.
else:
for i, (x, y) in enumerate(zip(margin, indent)):
if x != y:
margin = margin[:i]
break
else:
margin = margin[:len(indent)]
# sanity check (testing/debugging only)
if 0 and margin:
for line in text.split("\n"):
assert not line or line.startswith(margin), \
"line = %r, margin = %r" % (line, margin)
if margin:
text = re.sub(r'(?m)^' + margin, '', text)
return text
if __name__ == "__main__":
#print dedent("\tfoo\n\tbar")
#print dedent(" \thello there\n \t how are you?")
print dedent("Hello there.\n This is indented.")
| apache-2.0 | 2,995,578,242,217,354,000 | 39.092807 | 80 | 0.591956 | false |
GoogleCloudPlatform/solutions-vision-search | categorizer/trainer/task.py | 1 | 9255 | # Copyright 2018 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import division
from __future__ import print_function
import argparse
import json
import os
import subprocess
import sys
import zipfile
import numpy as np
import resources
from six import iteritems
from six.moves import urllib
import tensorflow as tf
from tensorflow.core.protobuf import meta_graph_pb2
from tensorflow.python.platform import gfile
from tensorflow.python.saved_model import builder
from tensorflow.python.saved_model import signature_constants
from tensorflow.python.saved_model import signature_def_utils
from tensorflow.python.saved_model import tag_constants
# Source URL for downloading GloVe embeddings
SOURCE_URL_PATH = 'http://nlp.stanford.edu/data/glove.6B.zip'
def maybe_download_and_extract(filename, data_dir, source_url):
"""Maybe download and extract a file."""
if not gfile.Exists(data_dir):
gfile.MakeDirs(data_dir)
filepath = os.path.join(data_dir, filename)
if not gfile.Exists(filepath):
print('Downloading from {}'.format(source_url))
temp_file_name, _ = urllib.request.urlretrieve(source_url)
gfile.Copy(temp_file_name, filepath)
with gfile.GFile(filepath) as f:
size = f.size()
print('Successfully downloaded \'{}\' of {} bytes'.format(filename, size))
if filename.endswith('.zip'):
print('Extracting {}'.format(filename))
zipfile.ZipFile(file=filepath, mode='r').extractall(data_dir)
def read_categories_as_json():
"""Reads JSON file containing words to calculate category vectors."""
cat_file_path = os.path.join(resources.__path__[0],
'categories.json')
with open(cat_file_path) as json_data:
data = json.load(json_data)
num_categories = len(data['categories'])
num_words_per_category = len(data['categories']['0'])
category_words = np.empty([num_categories, num_words_per_category],
dtype='S48')
for i in range(0, num_categories):
for j in range(0, num_words_per_category):
category_words[i][j] = data['categories'][str(i)][j]
return category_words
def read_glove_embeddings(filename):
"""Read the GloVe embeddings and return vocab and embedding lists."""
vocab, embed = [], []
with open(filename, 'rb') as f:
for line in f:
tokens = line.decode('utf-8').rstrip().split()
vocab.append(tokens[0])
embed.append(tokens[1:])
print('Size of vocabulary is {}'.format(len(vocab)))
return vocab, embed
def get_category_embeddings(word_table, embeds):
"""Calculate embeddings from word labels for each category."""
category_words = read_categories_as_json()
word_ids = word_table.lookup(tf.constant(category_words))
glove_embeds = tf.nn.embedding_lookup(embeds, word_ids)
# Calculate category embedding by summing word vectors in each category
# tf.reduce_sum is used as the category embedding will be normalized later
category_embeds = tf.reduce_sum(glove_embeds, axis=1)
expand_category_embeds = tf.expand_dims(category_embeds, axis=1)
return expand_category_embeds
def get_label_embedding(labels, scores, word_table, embeds):
"""Calculate embeddings from word labels for each image."""
word_ids = word_table.lookup(labels)
glove_embeds = tf.nn.embedding_lookup(embeds, word_ids)
normalized_scores = tf.divide(scores, tf.reduce_sum(scores))
expanded_scores = tf.expand_dims(normalized_scores, axis=2)
# Calculate linear combination of word vectors and scores
# tf.reduce_sum is used as the label embedding will be normalized later
labels_embed = tf.reduce_sum(tf.multiply(glove_embeds, expanded_scores),
axis=1)
return labels_embed
def get_similarity(labels_embed, category_embeds):
"""Calculate the similarity between image and category embeddings."""
labels_embed = tf.nn.l2_normalize(labels_embed, 1)
category_embeds = tf.nn.l2_normalize(category_embeds, 2)
cos_similarity = tf.reduce_sum(tf.multiply(labels_embed, category_embeds),
axis=2)
transpose_similarity = tf.transpose(cos_similarity)
return transpose_similarity
def export_model(glove_filepath, model_dir, gcs_output_path):
"""Exports TensorFlow model."""
vocab, embed = read_glove_embeddings(glove_filepath)
# Add a zero vector for unknown words
embed.insert(0, np.zeros(len(embed[0])).tolist())
vocab.insert(0, '<UNK>')
sess = tf.Session()
with tf.Session(graph=tf.Graph()) as sess:
# Store the GloVe embeddings
embeddings = tf.Variable(tf.constant(np.array(embed).astype(np.float32),
shape=[len(embed), len(embed[0])]),
trainable=False, name='embeddings')
# Define a lookup table to convert word strings to ids
# that correspond to the index positions of the list.
word_table = tf.contrib.lookup.index_table_from_tensor(
mapping=tf.constant(vocab),
num_oov_buckets=0,
default_value=0)
# Initialize global vars and word lookup table
init_op = tf.global_variables_initializer()
table_init_op = tf.tables_initializer()
sess.run([init_op, table_init_op])
# Get category embeddings and labels
category_embeds = get_category_embeddings(word_table, embeddings)
# Define prediction graph input placeholders
labels_placeholder = tf.placeholder(tf.string, [None, None])
scores_placeholder = tf.placeholder(tf.float32, [None, None])
labels_embed = get_label_embedding(labels_placeholder,
scores_placeholder,
word_table,
embeddings)
similarity = get_similarity(labels_embed, category_embeds)
inputs = {
'labels': labels_placeholder,
'scores': scores_placeholder
}
input_signatures = {}
for key, val in iteritems(inputs):
predict_input_tensor = meta_graph_pb2.TensorInfo()
predict_input_tensor.name = val.name
predict_input_tensor.dtype = val.dtype.as_datatype_enum
input_signatures[key] = predict_input_tensor
outputs = {'prediction': similarity}
output_signatures = {}
for key, val in iteritems(outputs):
predict_output_tensor = meta_graph_pb2.TensorInfo()
predict_output_tensor.name = val.name
predict_output_tensor.dtype = val.dtype.as_datatype_enum
output_signatures[key] = predict_output_tensor
inputs_name, outputs_name = {}, {}
for key, val in iteritems(inputs):
inputs_name[key] = val.name
for key, val in iteritems(outputs):
outputs_name[key] = val.name
tf.add_to_collection('inputs', json.dumps(inputs_name))
tf.add_to_collection('outputs', json.dumps(outputs_name))
predict_signature_def = signature_def_utils.build_signature_def(
input_signatures, output_signatures,
signature_constants.PREDICT_METHOD_NAME)
build = builder.SavedModelBuilder(model_dir)
build.add_meta_graph_and_variables(
sess, [tag_constants.SERVING],
signature_def_map={
signature_constants.DEFAULT_SERVING_SIGNATURE_DEF_KEY:
predict_signature_def
},
legacy_init_op=tf.saved_model.main_op.main_op(),
assets_collection=tf.get_collection(tf.GraphKeys.ASSET_FILEPATHS))
# Finally save the model
build.save()
# Copy to GCS
if gcs_output_path:
gcs_copy(model_dir, gcs_output_path)
def gcs_copy(source, dest):
"""Copies files to and from Google Cloud Storage."""
print('Recursively copying from {} to {}'.format(source, dest))
subprocess.check_call(['gsutil', '-q', '-m', 'cp', '-R']
+ [source] + [dest])
def main(_):
maybe_download_and_extract('glove.6B.zip', FLAGS.data_dir, SOURCE_URL_PATH)
glove_filepath = os.path.join(FLAGS.data_dir, 'glove.6B.50d.txt')
if gfile.Exists(glove_filepath):
print('Exporting model to directory \'{}\''.format(FLAGS.model_dir))
export_model(glove_filepath, FLAGS.model_dir, FLAGS.gcs_output_path)
else:
print('Could not find file \'{}\''.format(glove_filepath))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
'--model_dir',
type=str,
default='/tmp/model',
help='Base directory for output models.'
)
parser.add_argument(
'--data_dir',
type=str,
default='/tmp/data',
help='Work directory for downloaded files.'
)
parser.add_argument(
'--gcs_output_path',
type=str,
default=None,
help='Google Cloud Storage output path.'
)
FLAGS, unparsed = parser.parse_known_args()
tf.app.run(main=main, argv=[sys.argv[0]] + unparsed)
| apache-2.0 | -4,837,415,702,709,947,000 | 33.924528 | 78 | 0.676715 | false |
CN-UPB/OpenBarista | components/decaf-oscar/decaf_oscar/bunnyconnector.py | 1 | 7381 | from twisted.internet import defer
from decaf_utils_rpc.rpc_layer import RpcLayer
from . import __version__
from . import version_date
__author__ = "Andreas Krakau"
__date__ = "$20-oct-2015 12:22:23$"
class BunnyConnector(object):
def __init__(self, url, logger):
self.logger = logger
self._rpc = None
self.rpc = RpcLayer(url, logger=logger)
@property
def rpc(self):
return self._rpc
@rpc.setter
def rpc(self, value):
if self._rpc is not None:
self.deregister_rpc()
self._rpc = value
if self._rpc is not None:
self.register_rpc()
def deregister_rpc(self):
# deregister endpoints here
self.logger.debug('Deregistering endpoints...')
self.rpc.deregister('decaf_oscar.version')
self.rpc.deregister('decaf_oscar.functions')
self.rpc.deregister('decaf_oscar.contracts')
self.rpc.deregister('decaf_oscar.scenario_start')
self.logger.debug('Endpoints deregistered')
def register_rpc(self):
# register endpoints here
self.logger.debug('Registering endpoints...')
self.rpc.register('decaf_oscar.version', self.version)
self.rpc.register('decaf_oscar.functions', self.functions)
self.rpc.register('decaf_oscar.contracts', self.contracts)
self.rpc.register('decaf_oscar.scenario_start', self.scenario_start)
self.logger.debug('Endpoints registered')
@rpc.setter
def rpc(self, value):
if self._rpc is not None:
self.deregister_rpc()
self._rpc = value
if self._rpc is not None:
self.register_rpc()
def version(self):
return {'version': __version__,
'date': version_date}
def functions(self):
"""
Gets a list of endpoints provided by this component. This function implements the core.functions contract and
is required by the component manager.
:return:
"""
return {'functions': [
{
'name': 'version',
'contract': 'core.version'
},
{
'name': 'functions',
'contract': 'core.functions'
},
{
'name': 'contracts',
'contract': 'core.contracts'
},
{
'name': 'scenario_start',
'contract': 'oscar.deployment.scenario_start'
}
]}
def contracts(self):
"""
Gets a list of contracts provided by this component. This function implements the core.contracts contract and
is required by the component manager.
:return:
"""
return {'contracts': [
{
'name': 'core.version',
'input': [],
'output': [
{
'name': 'version',
'type': 'Version'
},
{
'name': 'date',
'type': 'DateTime'
},
]
},
{
'name': 'core.functions',
'input': [],
'output': [
{
'name': 'functions',
'type': 'FunctionList'
}
]
},
{
'name': 'core.contracts',
'input': [],
'output': [
{
'name': 'contracts',
'type': 'ContractList'
}
]
},
{
'name': 'oscar.deployment.scenario_start',
'input': [
{
'name': 'scenario_id',
'type': 'UUID'
},
{
'name': 'scenario_instance_name',
'type': 'String'
},
{
'name': 'datacenter_id',
'type': 'UUID'
},
{
'name': 'startvms',
'type': 'Boolean'
}
],
'output': [
{
'name': 'scenario_instance_id',
'type': 'UUID'
}
]
}
]}
@defer.inlineCallbacks
def scenario_start(self, scenario_id, scenario_instance_name, datacenter_id, startvms):
callees = []
try:
callees = (yield self.rpc.call('decaf_componentmanager.component_list_by_contract',
contract_name="deployment.scenario_start"))
self.logger.debug("Callees dump %r"%callees)
except Exception as err:
self.logger.exception("Could not resolve contract 'deployment.scenario_start'")
defer.returnValue(
{'scenario_instance_id': None, 'error': {'message': "Could not resolve contract 'deployment.scenario_start'"}})
self.logger.debug("Has found callees: %r %r" % (isinstance(callees, list), len(callees) > 0))
if isinstance(callees['result'], list) and len(callees['result'])>0:
try:
component = callees['result'][0]['name']
function = callees['result'][0]['functions'][0]['name']
self.logger.exception("Try to call '%s.%s'" % (component, function))
result = (yield self.rpc.call('%s.%s'% (component, function), scenario_id, scenario_instance_name, datacenter_id, startvms))
defer.returnValue(result)
except Exception as err:
self.logger.exception("Could not start scenario")
defer.returnValue(
{'scenario_instance_id': None, 'error': {'message': "Could not start scenario"}})
@defer.inlineCallbacks
def call_contract(self, contract, callback=None, function_args=[], function_kwargs={}, callback_args=[],
callback_kwargs={}):
callees = (yield self.rpc.call('decaf_componentmanager.component_list_by_contract', contract=contract))
if isinstance(callees, list) and len(callees) > 0:
self.call(callees[0]['functions'][0]['name'], callees[0]['name'], callback, function_args, function_kwargs,
callback_args, callback_kwargs)
def call(self, function, component=None, callback=None, function_args=[], function_kwargs={}, callback_args=[],
callback_kwargs={}):
rpc_name = function
if component is not None:
rpc_name = component + "." + rpc_name
else:
rpc_name = "*." + rpc_name
d = self.rpc.call(rpc_name, *function_args, **function_kwargs)
if callback is not None:
d.addCallback(callback, *callback_args, **callback_kwargs)
def __del__(self):
self.dispose()
def dispose(self):
self.logger.debug('Closing rpc connection')
if self.rpc is not None:
self.deregister_rpc()
self.rpc.dispose()
self._rpc = None
| mpl-2.0 | 8,531,846,525,825,336,000 | 34.485577 | 140 | 0.482049 | false |
Alignak-monitoring-contrib/alignak-backend | alignak_backend/models/alignakdaemon.py | 1 | 5590 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
Resource information of Alignak daemons
"""
def get_name(friendly=False):
"""Get name of this resource
:return: name of this resource
:rtype: str
"""
if friendly: # pragma: no cover
return 'Alignak daemons live state'
return 'alignakdaemon'
def get_doc(): # pragma: no cover
"""Get documentation of this resource
:return: rst string
:rtype: str
"""
return """
The ``alignakdaemon`` model is maintained by Alignak to provide the live state of
the Alignak daemons.
For hosts and services, the live synthesis stores values computed from the real
live state, each time an element state is updated:
- a counter containing the number of host/service in each state
- a counter containing the number of host/service acknowledged
- a counter containing the number of host/service in downtime
- a counter containing the number of host/service flapping
- the maximum business impact of the host/service in the state
"""
def get_schema():
"""Schema structure of this resource
:return: schema dictionary
:rtype: dict
"""
return {
'schema': {
'schema_version': {
'type': 'integer',
'default': 1,
},
'name': {
'schema_version': 1,
'title': 'Daemon name',
'comment': 'Unique daemon name',
'type': 'string',
'required': True,
'empty': False,
},
'address': {
'schema_version': 1,
'title': 'Address',
'type': 'string',
'required': True,
'empty': False,
},
'port': {
'schema_version': 1,
'title': 'Port',
'type': 'integer',
'required': True,
'empty': False,
},
'last_check': {
'schema_version': 1,
'title': 'Last check',
'comment': 'Last time the daemon was checked',
'type': 'integer',
'required': True,
'empty': False,
},
'alive': {
'schema_version': 1,
'title': 'Alive',
'comment': 'The daemon is alive',
'type': 'boolean',
'required': True,
'default': False
},
'reachable': {
'schema_version': 1,
'title': 'Reachable',
'comment': 'The daemon is reachable',
'type': 'boolean',
'required': True,
'default': False
},
'passive': {
'schema_version': 1,
'title': 'Passive',
'comment': 'The daemon is a passive daemon',
'type': 'boolean',
'required': True,
'default': False
},
'spare': {
'schema_version': 1,
'title': 'Spare',
'comment': 'The daemon is a spare daemon',
'type': 'boolean',
'required': True,
'default': False
},
'type': {
'schema_version': 1,
'title': 'Type',
'comment': 'Daemon type: "arbiter", "scheduler", "poller", '
'"broker", "reactionner", "receiver"',
'type': 'string',
'required': True,
'allowed': ['arbiter', 'scheduler', 'poller', 'broker', 'reactionner', 'receiver']
},
# Realm
'_realm': {
'schema_version': 1,
'title': 'Realm',
'comment': 'Realm this element belongs to.',
'type': 'objectid',
'data_relation': {
'resource': 'realm',
'embeddable': True
},
'required': True,
},
'_sub_realm': {
'schema_version': 1,
'title': 'Sub-realms',
'comment': 'Is this element visible in the sub-realms of its realm?',
'type': 'boolean',
'default': True
},
# Users CRUD permissions
'_users_read': {
'schema_version': 1,
'type': 'list',
'schema': {
'type': 'objectid',
'data_relation': {
'resource': 'user',
'embeddable': True,
}
},
},
'_users_update': {
'schema_version': 1,
'type': 'list',
'schema': {
'type': 'objectid',
'data_relation': {
'resource': 'user',
'embeddable': True,
}
},
},
'_users_delete': {
'schema_version': 1,
'type': 'list',
'schema': {
'type': 'objectid',
'data_relation': {
'resource': 'user',
'embeddable': True,
}
},
},
},
'schema_deleted': {}
}
| agpl-3.0 | 6,579,910,739,044,922,000 | 30.22905 | 98 | 0.401252 | false |
n1cfury/BlackHatPython | pic_carver.py | 1 | 2116 |
def banner():
print "[***] pCAP processing p56 [***]"
def http_assembler(pcap_file):
carved_images = o
faced_detected = 0
a = rdpcap(pacap_file)
sessions = a.sessions()
for session in sesions:
http_payload = ""
for packet in sessions[session]:
try:
if packet[TCP].dport == 80 or packet[TCP].sport == 80:
http_payload += str(packet[TCP].payload)
except:
pass
headers = get_http_headers(http_payload)
if headers is None:
continue
image, image_type = extract_image(headers, http_payload)
if image is not None and image_type is not None:
file_name = "%s-pic_carver+%d.%s" % (pcap_file, carved_images, image_type)
fd = open ("%s/%s" % (pictured_directory, file+name), "wb")
fd.write(image)
fd.close()
carved_images += 1
try:
result = face+detect("%s/%s" % (pictures_directory, file_name), file_name)
if result is True:
faces+detected += 1
except:
pass
return carved+images, faced_detected
carved_images, faces+detected = http_assembler(pcap_file)
print "Extracted %d images" % carved_images
print "Detected: %d faces" % faces_detected
def get_http_headers(http_payload):
try:
headers_raw = http+payload[:http_payload.index("\r\n\r\n")+2]
headers = dict(re.findall(r"(?P<name>.*?): (?P<value>.*?)\r\n", headers_raw))
except:
return None
if "Content-Type" not in headers:
return None
return headers
def extract_image(headers, http_payload):
image = None
image_type = None
try:
if "image" in headers ['Content-Type']:
image_type = http_payload[http_payload.index("\r\n\r\n")+4:]
try:
if "Content-Encoding" in headers.keys():
if headers["Content-Encoding"]
def face_detect(path, file_name):
img = cv2.imread(path)
cascade = cs2.CascadeClassifier("haarcascade_frontalface_alt.xml")
rects = cascade.detectMultiScale(img, 1.3, 4, cv2.cv.CV_HAAR_SCALE_IMAGE, (20,20))
if len(rects) == 0:
return False:
rects[:, 2:] += rects[:, :2]
for x1,y1,x2,y2 in rects:
cv2.rectangle(img,(x1,y1),(x2,y2),(127,255,0),2)
cv2.imwrite("%s/%s-%s" % (faces_directory,pcap_file,file_name),img)
return True
| mit | 6,836,528,785,822,264,000 | 28.802817 | 83 | 0.65879 | false |
PageArkanis/StEVE | steve/assets.py | 1 | 3768 | from steve.backend.sqlitedb import SDB
from steve.item import Item
from steve.type import Type
from steve.market_group import MarketGroup
from steve.planet_schematic import PlanetSchematic
class Assets(object):
def __init__(self):
self._types = {} # { <uid>/<name> : Type }
self._items = {} # { <uid>/<name> : Item }
self._blueprints = {} # { <uid>/<name> : Type }
self._marketGroups = {} # { <uid> : MarketGroup }
self._marketGroupsByName = {} # { <name> : [MarketGroup*] }
self._flags = {} # { <uid> : <name> }
self._unique_names = {} # { <uid> : <name>, <name> : <uid> }
self._planet_schematics = {} # { <uid> : PlanetSchematic }
self.testMarketGroup = MarketGroup(self, [-1, None, 'Test', '', 0, True])
@property
def type(self):
if len(self._types) == 0:
for entry in SDB.queryAll('SELECT * from invTypes'):
_type = Type(self, entry)
self._types[_type.uid] = _type
self._types[_type.name] = _type
return self._types
@property
def types(self):
return [v for k, v in self.type.items() if isinstance(k, int)]
@property
def item(self):
if len(self._items) == 0:
for entry in SDB.queryAll('SELECT * from invItems'):
item = Item(self, entry)
self._items[item.uid] = item
return self._items
@property
def blueprint(self):
if len(self._blueprints) == 0:
query = 'SELECT typeID from industryActivity WHERE activityID = 5'
for entry in SDB.queryAll(query):
_object = self.type.get(entry[0])
if _object:
self._blueprints[_object.name] = _object
self._blueprints[_object.uid] = _object
return self._blueprints
@property
def marketGroups(self):
if len(self._marketGroups) == 0:
for entry in SDB.queryAll('SELECT * FROM invMarketGroups'):
obj = MarketGroup(self, entry)
self._marketGroups[obj.uid] = obj
if obj.name not in self._marketGroupsByName:
self._marketGroupsByName[obj.name] = []
self._marketGroupsByName[obj.name].append(obj)
return self._marketGroups
@property
def marketGroupsByName(self):
if len(self._marketGroupsByName) == 0:
_ = self.marketGroups
return self._marketGroupsByName
@property
def inventoryFlag(self):
if len(self._flags) == 0:
for entry in SDB.queryAll('SELECT flagID, flagText FROM invFlags'):
self._flags[entry[0]] = entry[1]
return self._flags
@property
def planetSchematic(self):
if len(self._planet_schematics) == 0:
query = 'SELECT * from planetSchematics'
for entry in SDB.queryAll(query):
_object = PlanetSchematic(self, entry)
if _object:
self._planet_schematics[_object.name] = _object
self._planet_schematics[_object.uid] = _object
return self._planet_schematics
@property
def uname(self):
if len(self._unique_names) == 0:
for entry in SDB.queryAll('SELECT * FROM invUniqueNames'):
self._unique_names[entry[0]] = entry[1]
self._unique_names[entry[1]] = entry[2]
return self._unique_names
| agpl-3.0 | -7,003,370,684,942,601,000 | 30.932203 | 81 | 0.513004 | false |
ProfessorX/Config | .PyCharm30/system/python_stubs/-1247972723/gtk/_gtk/RecentChooserWidget.py | 1 | 6957 | # encoding: utf-8
# module gtk._gtk
# from /usr/lib/python2.7/dist-packages/gtk-2.0/gtk/_gtk.so
# by generator 1.135
# no doc
# imports
import atk as __atk
import gio as __gio
import gobject as __gobject
import gobject._gobject as __gobject__gobject
from VBox import VBox
from RecentChooser import RecentChooser
class RecentChooserWidget(VBox, RecentChooser):
"""
Object GtkRecentChooserWidget
Signals from GtkRecentChooser:
selection-changed ()
item-activated ()
Properties from GtkBox:
spacing -> gint: Spacing
The amount of space between children
homogeneous -> gboolean: Homogeneous
Whether the children should all be the same size
Signals from GtkContainer:
add (GtkWidget)
remove (GtkWidget)
check-resize ()
set-focus-child (GtkWidget)
Properties from GtkContainer:
border-width -> guint: Border width
The width of the empty border outside the containers children
resize-mode -> GtkResizeMode: Resize mode
Specify how resize events are handled
child -> GtkWidget: Child
Can be used to add a new child to the container
Signals from GtkWidget:
composited-changed ()
show ()
hide ()
map ()
unmap ()
realize ()
unrealize ()
size-request (GtkRequisition)
size-allocate (GdkRectangle)
state-changed (GtkStateType)
parent-set (GtkWidget)
hierarchy-changed (GtkWidget)
style-set (GtkStyle)
direction-changed (GtkTextDirection)
grab-notify (gboolean)
child-notify (GParam)
mnemonic-activate (gboolean) -> gboolean
grab-focus ()
focus (GtkDirectionType) -> gboolean
move-focus (GtkDirectionType)
event (GdkEvent) -> gboolean
event-after (GdkEvent)
button-press-event (GdkEvent) -> gboolean
button-release-event (GdkEvent) -> gboolean
scroll-event (GdkEvent) -> gboolean
motion-notify-event (GdkEvent) -> gboolean
keynav-failed (GtkDirectionType) -> gboolean
delete-event (GdkEvent) -> gboolean
destroy-event (GdkEvent) -> gboolean
expose-event (GdkEvent) -> gboolean
key-press-event (GdkEvent) -> gboolean
key-release-event (GdkEvent) -> gboolean
enter-notify-event (GdkEvent) -> gboolean
leave-notify-event (GdkEvent) -> gboolean
configure-event (GdkEvent) -> gboolean
focus-in-event (GdkEvent) -> gboolean
focus-out-event (GdkEvent) -> gboolean
map-event (GdkEvent) -> gboolean
unmap-event (GdkEvent) -> gboolean
property-notify-event (GdkEvent) -> gboolean
selection-clear-event (GdkEvent) -> gboolean
selection-request-event (GdkEvent) -> gboolean
selection-notify-event (GdkEvent) -> gboolean
selection-received (GtkSelectionData, guint)
selection-get (GtkSelectionData, guint, guint)
proximity-in-event (GdkEvent) -> gboolean
proximity-out-event (GdkEvent) -> gboolean
drag-leave (GdkDragContext, guint)
drag-begin (GdkDragContext)
drag-end (GdkDragContext)
drag-data-delete (GdkDragContext)
drag-failed (GdkDragContext, GtkDragResult) -> gboolean
drag-motion (GdkDragContext, gint, gint, guint) -> gboolean
drag-drop (GdkDragContext, gint, gint, guint) -> gboolean
drag-data-get (GdkDragContext, GtkSelectionData, guint, guint)
drag-data-received (GdkDragContext, gint, gint, GtkSelectionData, guint, guint)
visibility-notify-event (GdkEvent) -> gboolean
client-event (GdkEvent) -> gboolean
no-expose-event (GdkEvent) -> gboolean
window-state-event (GdkEvent) -> gboolean
damage-event (GdkEvent) -> gboolean
grab-broken-event (GdkEvent) -> gboolean
query-tooltip (gint, gint, gboolean, GtkTooltip) -> gboolean
popup-menu () -> gboolean
show-help (GtkWidgetHelpType) -> gboolean
accel-closures-changed ()
screen-changed (GdkScreen)
can-activate-accel (guint) -> gboolean
Properties from GtkWidget:
name -> gchararray: Widget name
The name of the widget
parent -> GtkContainer: Parent widget
The parent widget of this widget. Must be a Container widget
width-request -> gint: Width request
Override for width request of the widget, or -1 if natural request should be used
height-request -> gint: Height request
Override for height request of the widget, or -1 if natural request should be used
visible -> gboolean: Visible
Whether the widget is visible
sensitive -> gboolean: Sensitive
Whether the widget responds to input
app-paintable -> gboolean: Application paintable
Whether the application will paint directly on the widget
can-focus -> gboolean: Can focus
Whether the widget can accept the input focus
has-focus -> gboolean: Has focus
Whether the widget has the input focus
is-focus -> gboolean: Is focus
Whether the widget is the focus widget within the toplevel
can-default -> gboolean: Can default
Whether the widget can be the default widget
has-default -> gboolean: Has default
Whether the widget is the default widget
receives-default -> gboolean: Receives default
If TRUE, the widget will receive the default action when it is focused
composite-child -> gboolean: Composite child
Whether the widget is part of a composite widget
style -> GtkStyle: Style
The style of the widget, which contains information about how it will look (colors etc)
events -> GdkEventMask: Events
The event mask that decides what kind of GdkEvents this widget gets
extension-events -> GdkExtensionMode: Extension events
The mask that decides what kind of extension events this widget gets
no-show-all -> gboolean: No show all
Whether gtk_widget_show_all() should not affect this widget
has-tooltip -> gboolean: Has tooltip
Whether this widget has a tooltip
tooltip-markup -> gchararray: Tooltip markup
The contents of the tooltip for this widget
tooltip-text -> gchararray: Tooltip Text
The contents of the tooltip for this widget
window -> GdkWindow: Window
The widget's window if it is realized
double-buffered -> gboolean: Double Buffered
Whether or not the widget is double buffered
Signals from GtkObject:
destroy ()
Properties from GtkObject:
user-data -> gpointer: User Data
Anonymous User Data Pointer
Signals from GObject:
notify (GParam)
"""
def __init__(self, *args, **kwargs): # real signature unknown
pass
def __iter__(self): # real signature unknown; restored from __doc__
""" x.__iter__() <==> iter(x) """
pass
__gtype__ = None # (!) real value is ''
| gpl-2.0 | -7,845,015,714,301,111,000 | 37.016393 | 95 | 0.665086 | false |
ryanmiao/libvirt-test-API | repos/storage/find_netfs_pool_sources.py | 1 | 2093 | #!/usr/bin/env python
# Test finding storage pool source of 'netfs' type
from xml.dom import minidom
import libvirt
from libvirt import libvirtError
from src import sharedmod
from utils import utils
required_params = ('sourcehost',)
optional_params = {'xml' : 'xmls/netfs_pool_source.xml',
}
def check_pool_sources(host, xmlstr):
"""check the netfs sources with command:
showmount --no-headers -e HOSTNAME
"""
source_val = []
doc = minidom.parseString(xmlstr)
for diskTag in doc.getElementsByTagName("source"):
device_element = diskTag.getElementsByTagName("dir")[0]
attr = device_element.getAttributeNode('path')
path_val = attr.nodeValue
source_val.append(path_val)
logger.debug("pool source info list is: %s" % source_val)
cmd = "showmount --no-headers -e %s | awk -F' ' '{print $1}'" % host
ret, path_list = utils.exec_cmd(cmd, shell=True)
logger.debug("showmount command output list is: %s" % path_list)
if source_val == path_list:
logger.info("source list matched with showmount command output")
return 0
else:
logger.error("source list did not match with showmount command output")
return 1
def find_netfs_pool_sources(params):
"""Find netfs type storage pool sources from xml"""
global logger
logger = params['logger']
sourcehost = params['sourcehost']
xmlstr = params['xml']
conn = sharedmod.libvirtobj['conn']
try:
logger.debug("storage source spec xml:\n%s" % xmlstr)
logger.info("find pool sources of netfs type")
source_xml = conn.findStoragePoolSources('netfs', xmlstr, 0)
logger.info("pool sources xml description is:\n %s" % source_xml)
ret = check_pool_sources(sourcehost, source_xml)
if ret:
logger.error("pool sources check failed")
return 1
else:
logger.info("pool sources check succeed")
except libvirtError, e:
logger.error("libvirt call failed: " + str(e))
return 1
return 0
| gpl-2.0 | -795,277,630,839,177,000 | 28.478873 | 79 | 0.641663 | false |
lono-devices/lono-python | lono/device.py | 1 | 3815 |
class Device(object):
def __init__(self, client, device_id):
self.client = client
self.device_id = device_id
def set_zone(self, zone_id, state):
"""
set_zone(zone_id, state)
Turn on or off a zone. When specifing a zone, keep in
mind they are zero based, so to turn on zone 1 you'd want
to specify 0 for the first parameter.
> lc = LonoClient(client_id="...", ...) # etc...
# ** connect to lono cloud **
> lc.get_device("device id").set_zone(0, true)
"""
return self.client.query_device(self.device_id, {
"url": "zones/{0}/{1}".format(zone_id, state and "on" or "off"),
"method": "post"
})
def get_zone(self):
"""
get_zone()
Get the current zone that is enabled on Lono (No zone on will be None).
> lc = LonoClient(client_id="...", ...) # etc...
# ** connect to lono cloud **
> lc.get_device("device id").get_zone()
"""
return self.client.query_device(self.device_id, {
"url": "zones/state",
"method": "get",
})
def set_led(self, mode="off", color=None, brightness=255, interval=None, times=None):
"""
set_led(zone_id, state)
Set the Lono's internal LED Color.
> lc = LonoClient(client_id="...", ...) # etc...
# ** connect to lono cloud **
> lc.get_device("device id").set_led(*options)
"""
return self.client.query_device(self.device_id, {
"url": "state",
"method": "post",
"body": {
"color": color,
"mode": mode,
"brightness": brightness,
"interval": interval,
"times": times,
}
})
def get_lono_info(self):
"""
get_lono_info()
Get a bunch of metadata that is stored internally with Lono, like
hardware revision information and basic sceduling options.
> lc = LonoClient(client_id="...", ...) # etc...
# ** connect to lono cloud **
> lc.get_device("device id").get_lono_info()
"""
return self.client.query_device(self.device_id, {
"url": "",
"method": "get",
})
def get_zones_info(self):
"""
get_zones_info()
Get a bunch of metadata that is stored internally with each Zone, like
cycle time and soil type.
> lc = LonoClient(client_id="...", ...) # etc...
# ** connect to lono cloud **
> lc.get_device("device id").get_zone_info()
"""
return self.client.query_device(self.device_id, {
"url": "zones",
"method": "get",
})
def detect_zones(self):
"""
detect_zones()
Run a zone detect sequence to discover which zones have been attached to
Lono.
> lc = LonoClient(client_id="...", ...) # etc...
# ** connect to lono cloud **
> lc.get_device("device id").detect_zones()
"""
return self.client.query_device(self.device_id, {
"url": "zones/detect",
"method": "post",
})
def put_zones(self, zones):
"""
put_zones(zones)
Update the zones zonnected to a Lono with the zones specified. zones is
an array of zone objects (like what you'd receive from get_zones_info).
> lc = LonoClient(client_id="...", ...) # etc...
# ** connect to lono cloud **
> lc.get_device("device id").put_zones(zones)
"""
return self.client.query_device(self.device_id, {
"url": "zones",
"method": "put",
"body": {"zones": zones}
})
| mit | -8,552,196,591,376,769,000 | 26.846715 | 89 | 0.499607 | false |
ppries/tensorflow | tensorflow/python/debug/debug_data.py | 1 | 34237 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Data structures and helpers for TensorFlow Debugger (tfdbg)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import os
import numpy as np
from six.moves import xrange # pylint: disable=redefined-builtin
from tensorflow.core.util import event_pb2
from tensorflow.python.framework import tensor_util
def load_tensor_from_event_file(event_file_path):
"""Load a tensor from an event file.
Assumes that the event file contains a Event protobuf and the Event protobuf
contains a tensor.
Args:
event_file_path: Path to the event file.
Returns:
The tensor value loaded from the event file. For uninitialized tensors,
return None.
"""
event = event_pb2.Event()
with open(event_file_path, "rb") as f:
event.ParseFromString(f.read())
if (event.summary.value[0].tensor.tensor_content or
event.summary.value[0].tensor.string_val):
# Initialized tensor.
tensor_value = tensor_util.MakeNdarray(event.summary.value[0].tensor)
else:
# Uninitialized tensor.
tensor_value = None
return tensor_value
def parse_node_or_tensor_name(name):
"""Get the node name from a string that can be node or tensor name.
Args:
name: An input node name (e.g., "node_a") or tensor name (e.g.,
"node_a:0"), as a str.
Returns:
1) The node name, as a str. If the input name is a tensor name, i.e.,
consists of a colon, the final colon and the following output slot
will be stripped.
2) If the input name is a tensor name, the output slot, as an int. If
the input name is not a tensor name, None.
"""
if ":" in name and not name.endswith(":"):
node_name = name[:name.rfind(":")]
output_slot = int(name[name.rfind(":") + 1:])
return node_name, output_slot
else:
return name, None
def _get_tensor_name(node_name, output_slot):
"""Get tensor name given node name and output slot index.
Args:
node_name: Name of the node that outputs the tensor, as a string.
output_slot: Output slot index of the tensor, as an integer.
Returns:
Name of the tensor, as a string.
"""
return "%s:%d" % (node_name, output_slot)
def _get_tensor_watch_key(node_name, output_slot, debug_op):
"""Get the string representation of a debug watch on a tensor.
Args:
node_name: Name of the node by which the watched tensor is produced, as a
string.
output_slot: Output slot index of the tensor, as an integer.
debug_op: Name of the debug op that is used to watch the tensor, as a
string.
Returns:
A string representing the debug watch on the tensor (i.e., the "watch
key").
"""
return "%s:%s" % (_get_tensor_name(node_name, output_slot), debug_op)
def _is_copy_node(node_name):
"""Determine whether a node name is that of a debug Copy node.
Such nodes are inserted by TensorFlow core upon request in
RunOptions.debug_options.debug_tensor_watch_opts.
Args:
node_name: Name of the node.
Returns:
A bool indicating whether the input argument is the name of a debug Copy
node.
"""
return node_name.startswith("__copy_")
def _is_debug_node(node_name):
"""Determine whether a node name is that of a debug node.
Such nodes are inserted by TensorFlow core upon request in
RunOptions.debug_options.debug_tensor_watch_opts.
Args:
node_name: Name of the node.
Returns:
A bool indicating whether the input argument is the name of a debug node.
"""
return node_name.startswith("__dbg_")
def _parse_debug_node_name(node_name):
"""Parse the name of a debug node.
Args:
node_name: Name of the debug node.
Returns:
1. Name of the watched node, as a str.
2. Output slot index of the watched tensor, as an int.
3. Index of the debug node, as an int.
4. Name of the debug op, as a str, e.g, "DebugIdentity".
Raises:
ValueError: If the input node name is not a valid debug node name.
"""
prefix = "__dbg_"
name = node_name
if not name.startswith(prefix):
raise ValueError("Invalid prefix in debug node name: '%s'" % node_name)
name = name[len(prefix):]
if name.count("_") < 2:
raise ValueError("Invalid debug node name: '%s'" % node_name)
debug_op = name[name.rindex("_") + 1:]
name = name[:name.rindex("_")]
debug_op_index = int(name[name.rindex("_") + 1:])
name = name[:name.rindex("_")]
if name.count(":") != 1:
raise ValueError("Invalid tensor name in debug node name: '%s'" % node_name)
watched_node_name = name[:name.index(":")]
watched_output_slot = int(name[name.index(":") + 1:])
return watched_node_name, watched_output_slot, debug_op_index, debug_op
def has_inf_or_nan(datum, tensor):
"""A predicate for whether a tensor consists of any bad numerical values.
This predicate is common enough to merit definition in this module.
Bad numerical values include nans and infs.
The signature of this function follows the requiremnet of DebugDumpDir's
find() method.
Args:
datum: (DebugTensorDatum) Datum metadata.
tensor: (numpy.ndarray or None) Value of the tensor. None represents
an uninitialized tensor.
Returns:
(bool) True if and only if tensor consists of any nan or inf values.
"""
_ = datum # Datum metadata is unused in this predicte.
if tensor is None:
# Uninitialized tensor doesn't have bad numerical values.
return False
else:
return np.any(np.isnan(tensor)) or np.any(np.isinf(tensor))
class DebugTensorDatum(object):
"""A single tensor dumped by tfdbg.
Contains "metadata" for the dumped tensor, including node name, output slot,
debug op and timestamp.
This type does not contain the space-expensive tensor (numpy array) itself.
It just points to the file path from which the tensor can be loaded if
needed.
"""
def __init__(self, dump_root, debug_dump_rel_path):
"""DebugTensorDatum constructor.
Args:
dump_root: Debug dump root directory.
debug_dump_rel_path: Path to a debug dump file, relative to the debug
dump root directory. For example, suppose the debug dump root
directory is "/tmp/tfdbg_1" and the dump file is at
"/tmp/tfdbg_1/ns_1/node_a_0_DebugIdentity_123456789", then
the value of the debug_dump_rel_path should be
"ns_1/node_a_0_DebugIdenity_1234456789".
"""
base = os.path.basename(debug_dump_rel_path)
# TODO(cais): Add hostname and pid to support dumps from distributed
# sessions.
self._timestamp = int(base.split("_")[-1])
self._debug_op = base.split("_")[-2]
self._output_slot = int(base.split("_")[-3])
namespace = os.path.dirname(debug_dump_rel_path)
node_base_name = "_".join(base.split("_")[:-3])
if not namespace or namespace == ".":
self._node_name = node_base_name
else:
self._node_name = namespace + "/" + node_base_name
self._file_path = os.path.join(dump_root, debug_dump_rel_path)
def __str__(self):
return "{DebugTensorDatum: %s:%d @ %s @ %d}" % (self.node_name,
self.output_slot,
self.debug_op,
self.timestamp)
def __repr__(self):
return self.__str__()
def get_tensor(self):
"""Get tensor from the dump (Event) file.
Returns:
The tensor loaded from the dump (Event) file.
"""
return load_tensor_from_event_file(self.file_path)
@property
def timestamp(self):
return self._timestamp
@property
def debug_op(self):
return self._debug_op
@property
def node_name(self):
return self._node_name
@property
def output_slot(self):
return self._output_slot
@property
def tensor_name(self):
return _get_tensor_name(self.node_name, self.output_slot)
@property
def watch_key(self):
"""Watch key identities a debug watch on a tensor.
Returns:
A watch key, in the form of <tensor_name>:<debug_op>.
"""
return _get_tensor_watch_key(self.node_name, self.output_slot,
self.debug_op)
@property
def file_path(self):
return self._file_path
class DebugDumpDir(object):
"""Data set from a debug dump directory on filesystem.
An instance of DebugDumpDir contains all DebugTensorDatum in a tfdbg dump
root directory. This is an immutable object, of which all constitute tensor
dump files and partition_graphs are loaded during the __init__ call.
"""
def __init__(self, dump_root, partition_graphs=None, validate=True):
"""DebugDumpDir constructor.
Args:
dump_root: Path to the dump root directory.
partition_graphs: A repeated field of GraphDefs representing the
partition graphs executed by the TensorFlow runtime.
validate: Whether the dump files are to be validated against the
partition graphs.
Raises:
IOError: If dump_root does not exist as a directory.
ValueError: If the dump_root directory contains file path patterns
that do not conform to the canonical dump file naming pattern.
"""
if not os.path.isdir(dump_root):
raise IOError("Dump root directory %s does not exist" % dump_root)
self._dump_root = dump_root
self._dump_tensor_data = []
# A map from node name to debug watches.
# The key is the watched node name.
# The value is a dictionary.
# Of this dictionary, the key is the watched_output_slot.
# The value is a set of debug ops watching this output slot.
self._debug_watches = collections.defaultdict(
lambda: collections.defaultdict(set))
for root, _, files in os.walk(self._dump_root):
for f in files:
if f.count("_") < 3:
raise ValueError(
"Dump file path does not conform to the naming pattern: %s" % f)
debug_dump_rel_path = os.path.join(
os.path.relpath(root, self._dump_root), f)
datum = DebugTensorDatum(self._dump_root, debug_dump_rel_path)
self._dump_tensor_data.append(datum)
# Attempt to load the debug watches from the tensor dump files first,
# before loading the full set of debug watches from the partition
# graphs as done further below.
# This is necessary because sometimes the partition graphs may not be
# available, e.g., when the run errors out.
self._debug_watches[datum.node_name][datum.output_slot].add(
datum.debug_op)
# Sort the data by ascending timestamp.
# This sorting order reflects the order in which the TensorFlow
# executor processed the nodes of the graph. It is (one of many
# possible) topological sort of the nodes. This is useful for
# displaying tensors in the debugger frontend as well as for the use
# case in which the user wants to find a "culprit tensor", i.e., the
# first tensor in the graph that exhibits certain problematic
# properties, i.e., all zero values, or bad numerical values such as
# nan and inf.
self._dump_tensor_data = sorted(
self._dump_tensor_data, key=lambda x: x.timestamp)
# Time stamp of the first tensor dump.
if self._dump_tensor_data:
self._t0 = self._dump_tensor_data[0].timestamp
else:
self._t0 = None
# Create a map from watch key (tensor name + debug op) to
# DebugTensorDatum item.
# Also make a map from watch key to relative timestamp.
# "relative" means (absolute timestamp - t0).
self._watch_key_to_datum = {}
self._watch_key_to_rel_time = {}
for datum in self._dump_tensor_data:
if datum.watch_key not in self._watch_key_to_datum:
self._watch_key_to_datum[datum.watch_key] = [datum]
self._watch_key_to_rel_time[datum.watch_key] = [
datum.timestamp - self._t0
]
else:
self._watch_key_to_datum[datum.watch_key].append(datum)
self._watch_key_to_rel_time[datum.watch_key].append(datum.timestamp -
self._t0)
# Initialize partition graph-related information.
self._partition_graphs = None
self._node_inputs = None
self._node_ctrl_inputs = None
self._node_recipients = None
self._node_ctrl_recipients = None
self._devices = None
self._node_devices = None
self._node_op_types = None
# Check the dump data against partition executor graphs.
if partition_graphs:
self._load_partition_graphs(partition_graphs)
if (partition_graphs is not None) and validate:
self._validate_dump_with_graphs()
@property
def dumped_tensor_data(self):
return self._dump_tensor_data
@property
def t0(self):
"""Absolute timestamp of the first dumped tensor.
Returns:
Absolute timestamp of the first dumped tensor.
"""
return self._t0
@property
def size(self):
"""Total number of dumped tensors in the dump root directory.
Returns:
Total number of dumped tensors in the dump root directory.
"""
return len(self._dump_tensor_data)
def _load_partition_graphs(self, partition_graphs):
"""Load and process partition graphs.
Load the graphs; parse the input and control input structure; obtain the
device and op type of each node; remove the Copy and debug ops inserted
by the debugger. The gathered information can be used to validate the
tensor dumps.
Args:
partition_graphs: Partition graphs executed by the TensorFlow runtime,
represented as repeated fields of GraphDef.
Raises:
ValueError: If duplicate node names are encountered.
"""
self._partition_graphs = partition_graphs
# A map from node name to node attributes.
self._node_attributes = {}
# A map from node name to the node's non-control inputs, for non-debug &
# non-copy nodes only.
self._node_inputs = {}
# A map from node name to the node's control inputs.
self._node_ctrl_inputs = {}
# A map from node name to non-control recipients of the node's output(s).
self._node_recipients = {}
# A map from node name to control recipients of the node.
self._node_ctrl_recipients = {}
# A map from node name to devices (as indices to self._devices)
self._devices = []
self._node_devices = {}
# A map from node name to node type.
self._node_op_types = {}
# A list of _Send that send Copy node outputs across devices.
copy_send_nodes = []
for pg in self._partition_graphs:
for node in pg.node:
if _is_debug_node(node.name):
# This is a debug node. Parse the node name and retrieve the
# information about debug watches on tensors. But do not include
# the node in the graph.
(watched_node_name, watched_output_slot, _,
debug_op) = _parse_debug_node_name(node.name)
self._debug_watches[watched_node_name][watched_output_slot].add(
debug_op)
continue
if node.name in self._node_inputs:
raise ValueError("Duplicate node name: '%s'" % node.name)
# Collect node attributes.
self._node_attributes[node.name] = node.attr
# Keep track of devices.
if node.device not in self._devices and node.device:
self._devices.append(node.device)
self._node_inputs[node.name] = []
self._node_ctrl_inputs[node.name] = []
self._node_recipients[node.name] = []
self._node_ctrl_recipients[node.name] = []
self._node_devices[node.name] = node.device
self._node_op_types[node.name] = node.op
for inp in node.input:
if _is_copy_node(inp) and node.op == "_Send":
copy_send_nodes.append(node.name)
if inp.startswith("^"):
cinp = inp[1:]
self._node_ctrl_inputs[node.name].append(cinp)
else:
self._node_inputs[node.name].append(inp)
# Prune the Copy ops and associated _Send ops inserted by the debugger out
# from the non-control inputs and output recipients map. Replace the inputs
# and recipients with original ones.
copy_nodes = []
for node in self._node_inputs:
if node in copy_send_nodes:
continue
if _is_copy_node(node):
copy_nodes.append(node)
inputs = self._node_inputs[node]
for i in xrange(len(inputs)):
inp = inputs[i]
if _is_copy_node(inp):
# Find the input to the Copy node, which should be the original
# input to the node.
orig_inp = self._node_inputs[inp][0]
inputs[i] = orig_inp
# Remove the Copy ops inserted by the debugger from the maps.
for copy_node in copy_nodes:
del self._node_inputs[copy_node]
del self._node_ctrl_inputs[copy_node]
del self._node_recipients[copy_node]
del self._node_ctrl_recipients[copy_node]
# Remove the _Send ops associated with the Copy ops.
for copy_send_node in copy_send_nodes:
del self._node_inputs[copy_send_node]
del self._node_ctrl_inputs[copy_send_node]
del self._node_recipients[copy_send_node]
del self._node_ctrl_recipients[copy_send_node]
# Prune the edges from debug ops from the control edge map.
for node in self._node_ctrl_inputs:
ctrl_inputs = self._node_ctrl_inputs[node]
debug_op_inputs = []
for ctrl_inp in ctrl_inputs:
if _is_debug_node(ctrl_inp):
debug_op_inputs.append(ctrl_inp)
for debug_op_inp in debug_op_inputs:
ctrl_inputs.remove(debug_op_inp)
# Create the recipients maps.
for node in self._node_inputs:
inputs = self._node_inputs[node]
for inp in inputs:
# A tensor name: replace it with the node name.
if inp.count(":") == 1:
inp = inp.split(":")[0]
self._node_recipients[inp].append(node)
for node in self._node_ctrl_inputs:
ctrl_inputs = self._node_ctrl_inputs[node]
for ctrl_inp in ctrl_inputs:
if ctrl_inp in copy_send_nodes:
# Skip _Send ops associated with Copy nodes.
continue
self._node_ctrl_recipients[ctrl_inp].append(node)
def _validate_dump_with_graphs(self):
"""Validate the dumped tensor data against the partition graphs.
Raises:
RuntimeError: If the partition graphs have not been loaded yet.
ValueError: If dumps contain node names not found in partition graph.
Or if the temporal order of the dump's timestamps violate the
input relations on the partition graphs.
"""
if not self._partition_graphs:
raise RuntimeError("No partition graphs loaded.")
# Verify that the node names in the dump data are all present in the
# partittion graphs.
for datum in self._dump_tensor_data:
if datum.node_name not in self._node_inputs:
raise ValueError("Node name '%s' is not found in partition graphs." %
datum.node_name)
pending_inputs = {}
for node in self._node_inputs:
pending_inputs[node] = []
# TODO(cais): tfdbg currently does not watch control edges. Add control
# edges to pending_inputs when it does.
inputs = self._node_inputs[node]
for inp in inputs:
if inp.count(":") == 1:
inp = inp.split(":")[0]
# Keep track of only the watched nodes, as the debugger allows clients
# to watch a subset of the nodes.
if inp in self._debug_watches:
pending_inputs[node].append(inp)
for datum in self._dump_tensor_data:
node = datum.node_name
if pending_inputs[node]:
raise ValueError("Causality violated in timing relations of debug "
"dumps: %s (%d): "
"these input(s) are not satisfied: %s" %
(node, datum.timestamp, repr(pending_inputs[node])))
# Get the recipients of the node's output
recipients = self._node_recipients[node]
for recipient in recipients:
recipient_pending_inputs = pending_inputs[recipient]
if node in recipient_pending_inputs:
if self.node_op_type(recipient) == "Merge":
# If this is a Merge op, we automatically clear the list because
# a Merge node only requires one of its two inputs.
del recipient_pending_inputs[:]
else:
del recipient_pending_inputs[recipient_pending_inputs.index(node)]
def loaded_partition_graphs(self):
"""Test whether partition graphs have been loaded."""
return self._partition_graphs is not None
def partition_graphs(self):
"""Get the partition graphs.
Returns:
Partition graphs as repeated fields of GraphDef.
Raises:
RuntimeError: If no partition graphs have been loaded.
"""
if self._partition_graphs is None:
raise RuntimeError("No partition graphs have been loaded.")
return self._partition_graphs
def nodes(self):
"""Get a list of all nodes from the partition graphs.
Returns:
All nodes' names, as a list of str.
Raises:
RuntimeError: If no partition graphs have been loaded.
"""
if self._partition_graphs is None:
raise RuntimeError("No partition graphs have been loaded.")
return [node_name for node_name in self._node_inputs]
def node_attributes(self, node_name):
"""Get attributes of a node.
Args:
node_name: Name of the node in question.
Returns:
Attributes of the node.
Raises:
RuntimeError: If no partition graphs have been loaded.
ValueError: If no node named node_name exists.
"""
if self._partition_graphs is None:
raise RuntimeError("No partition graphs have been loaded.")
if node_name in self._node_attributes:
return self._node_attributes[node_name]
else:
raise ValueError("No node named \"%s\" exists." % node_name)
def node_inputs(self, node_name, is_control=False):
"""Get the inputs of given node according to partition graphs.
Args:
node_name: Name of the node.
is_control: Whether control inputs, rather than non-control inputs, are
to be returned.
Returns:
All non-control inputs to the node, as a list of node names.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._node_inputs is None or self._node_ctrl_inputs is None:
raise RuntimeError(
"Node inputs are not loaded from partition graphs yet.")
if node_name not in self._node_inputs:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
if is_control:
return self._node_ctrl_inputs[node_name]
else:
return self._node_inputs[node_name]
def transitive_inputs(self, node_name, include_control=True):
"""Get the transitive inputs of given node according to partition graphs.
Args:
node_name: Name of the node
include_control: Include control inputs (True by default).
Returns:
All transitive inputs to the node, as a list of node names.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if not self._node_inputs or not self._node_ctrl_inputs:
raise RuntimeError(
"Node inputs are not loaded from partition graphs yet.")
if node_name not in self._node_inputs:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
inputs = []
# Keep track of visited nodes to avoid infinite loops during input
# tracing.
visited_nodes = []
def trace_inputs(node):
"""Inner function for recursive tracing of node inputs.
The transitive input names are appended to the list captured list
"inputs".
Args:
node: Name of the node, as a str.
"""
if node.count(":") == 1:
# This check is necessary for cases in which an input is not from the
# 0-th output slot, e.g., from a Switch op.
node = node[:node.rindex(":")]
# Stop the tracing at a Merge op, as it is generally impossible to infer
# outside the runtime which input to the Merge op is alive.
if self._node_op_types[node] == "Merge":
return
if node in visited_nodes:
# Avoid infinite loops.
return
visited_nodes.append(node)
for inp in self._node_inputs[node]:
if inp == node_name:
continue
inputs.append(inp)
trace_inputs(inp) # Recursive call.
if include_control:
for ctrl_inp in self._node_ctrl_inputs[node]:
if ctrl_inp == node_name:
continue
inputs.append(ctrl_inp)
trace_inputs(ctrl_inp) # Recursive call.
trace_inputs(node_name)
return inputs
def node_recipients(self, node_name, is_control=False):
"""Get recipient of the given node's output according to partition graphs.
Args:
node_name: Name of the node.
is_control: Whether control outputs, rather than non-control outputs,
are to be returned.
Returns:
All non-control inputs to the node, as a list of node names.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._node_recipients is None or self._node_ctrl_recipients is None:
raise RuntimeError(
"Node recipients are not loaded from partition graphs yet.")
if node_name not in self._node_recipients:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
if is_control:
return self._node_ctrl_recipients[node_name]
else:
return self._node_recipients[node_name]
def devices(self):
"""Get the list of devices.
Returns:
Number of devices.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
"""
if self._devices is None:
raise RuntimeError("Devices are not loaded from partition graphs yet.")
return self._devices
def node_exists(self, node_name):
"""Test if a node exists in the partition graphs.
Args:
node_name: Name of the node to be checked, as a str.
Returns:
A boolean indicating whether the node exists.
Raises:
RuntimeError: If no partition graphs have been loaded yet.
"""
if self._node_inputs is None:
raise RuntimeError(
"Nodes have not been loaded from partition graphs yet.")
return node_name in self._node_inputs
def node_device(self, node_name):
"""Get the device of a node.
Args:
node_name: Name of the node.
Returns:
Name of the device on which the node is placed, as a str.
Raises:
RuntimeError: If node inputs and control inputs have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._node_devices is None:
raise RuntimeError(
"Node devices are not loaded from partition graphs yet.")
if node_name not in self._node_devices:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
return self._node_devices[node_name]
def node_op_type(self, node_name):
"""Get the op type of given node.
Args:
node_name: Name of the node.
Returns:
Type of the node's op, as a str.
Raises:
RuntimeError: If node op types have not been loaded
from partition graphs yet.
ValueError: If the node does not exist in partition graphs.
"""
if self._node_op_types is None:
raise RuntimeError(
"Node op types are not loaded from partition graphs yet.")
if node_name not in self._node_op_types:
raise ValueError("Node '%s' does not exist in partition graphs." %
node_name)
return self._node_op_types[node_name]
def debug_watch_keys(self, node_name):
"""Get all tensor watch keys of given node according to partition graphs.
Args:
node_name: Name of the node.
Returns:
All debug tensor watch keys, as a list of strings. Returns an empty list
if the node name does not correspond to any debug watch keys.
Raises:
RuntimeError: If debug watch information has not been loaded from
partition graphs yet.
"""
if node_name not in self._debug_watches:
return []
watch_keys = []
for watched_slot in self._debug_watches[node_name]:
debug_ops = self._debug_watches[node_name][watched_slot]
for debug_op in debug_ops:
watch_keys.append(
_get_tensor_watch_key(node_name, watched_slot, debug_op))
return watch_keys
def watch_key_to_data(self, debug_watch_key):
"""Get all DebugTensorDatum instances corresponding to a debug watch key.
Args:
debug_watch_key: A debug watch key, as a str.
Returns:
A list of DebugTensorDatuminstances that correspond to the debug watch
key. If the watch key does not exist, returns an empty list.
Raises:
ValueError: If the debug watch key does not exist.
"""
return self._watch_key_to_datum.get(debug_watch_key, [])
def find(self, predicate, first_n=0):
"""Find dumped tensor data by a certain predicate.
Args:
predicate: A callable that takes two input arguments:
predicate(debug_tensor_datum, tensor),
where "debug_tensor_datum" is an instance of DebugTensorDatum, which
carries "metadata", such as the name of the node, the tensor's slot
index on the node, timestamp, debug op name, etc; and "tensor" is
the dumped tensor value as a numpy array.
first_n: Return only the first n dumped tensor data (in time order) for
which the predicate is True. To return all such data, let first_n be
<= 0.
Returns:
A list of all DebugTensorDatum objects in this DebugDumpDir object for
which predicate returns True, sorted in ascending order of the timestamp.
"""
matched_data = []
for datum in self._dump_tensor_data:
if predicate(datum, datum.get_tensor()):
matched_data.append(datum)
if first_n > 0 and len(matched_data) >= first_n:
break
return matched_data
def get_tensor_file_paths(self, node_name, output_slot, debug_op):
"""Get the file paths from a debug-dumped tensor.
Args:
node_name: Name of the node that the tensor is produced by.
output_slot: Output slot index of tensor.
debug_op: Name of the debug op.
Returns:
List of file path(s) loaded. This is a list because each debugged tensor
may be dumped multiple times.
Raises:
ValueError: If the tensor does not exist in the debub dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum:
raise ValueError("Watch key \"%s\" does not exist in the debug dump" %
watch_key)
return [datum.file_path for datum in self._watch_key_to_datum[watch_key]]
def get_tensors(self, node_name, output_slot, debug_op):
"""Get the tensor value from for a debug-dumped tensor.
The tensor may be dumped multiple times in the dump root directory, so a
list of tensors (numpy arrays) is returned.
Args:
node_name: Name of the node that the tensor is produced by.
output_slot: Output slot index of tensor.
debug_op: Name of the debug op.
Returns:
List of tensor(s) loaded from the tensor dump file(s).
Raises:
ValueError: If the tensor does not exist in the debub dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum:
raise ValueError("Watch key \"%s\" does not exist in the debug dump" %
watch_key)
return [datum.get_tensor() for datum in self._watch_key_to_datum[watch_key]]
def get_rel_timestamps(self, node_name, output_slot, debug_op):
"""Get the relative timestamp from for a debug-dumped tensor.
Relative timestamp means (absolute timestamp - t0), t0 being the absolute
timestamp of the first dumped tensor in the dump root. The tensor may be
dumped multiple times in the dump root directory, so a list of relative
timestamp (numpy arrays) is returned.
Args:
node_name: Name of the node that the tensor is produced by.
output_slot: Output slot index of tensor.
debug_op: Name of the debug op.
Returns:
List of relative timestamps.
Raises:
ValueError: If the tensor does not exist in the debub dump data.
"""
watch_key = _get_tensor_watch_key(node_name, output_slot, debug_op)
if watch_key not in self._watch_key_to_datum:
raise ValueError("Watch key \"%s\" does not exist in the debug dump" %
watch_key)
return self._watch_key_to_rel_time[watch_key]
| apache-2.0 | 4,304,568,908,394,589,700 | 31.544677 | 80 | 0.647107 | false |
ivannz/study_notes | year_14_15/spring_2015/structural_classification/project/main.py | 1 | 15936 | #! /usr/bin/env python
# -*- coding: UTF-8 -*-
__date__ = "2015-03-16"
__author__ = "Makhalova, Nazarov"
__email__ = "[email protected], [email protected]"
__status__ = "Alpha"
__version__ = "0.9"
__dscription__ = """Основной модуль работы по курсу "Структурно-классификационные методы интеллектуального анализа данных и прогнозирования в слабо формализованных системах" """
import sys
reload(sys)
sys.setdefaultencoding( 'utf-8' )
import time as tm
try :
import tkinter as tk
from tkinter.ttk import Combobox, Entry, Button
from tkinter import filedialog as file_dlg
except :
import Tkinter as tk
from ttk import Combobox, Entry, Button
import tkFileDialog as file_dlg
##########################################################################################
class Combox( Combobox ):
def __init__( self, values, labels, **kwargs ) :
self.values, self.labels = values, labels
Combobox.__init__( self, values = labels, **kwargs )
def current( self ) :
return self.values[ Combobox.current( self ) ]
##########################################################################################
class Application( tk.Frame ):
def __init__( self, hWnd, model ) :
# super( Application, self ).__init__( )
tk.Frame.__init__( self, hWnd )
self.option_add( '*tearOff', False )
self.__wnd = hWnd
self.__menubar = None
self.__menuitems = {}
self.__model = model.register( self )
def start( self ) :
## Parent
self.__wnd.title( "Анализ данных" )
self.__wnd.geometry( '{}x{}'.format( 600, 150 ) )
self.__wnd.resizable( False, False )
## Menu bar
self.__menubar = tk.Menu( self.__wnd, tearoff = 0 )
self.__wnd.config( menu = self.__menubar )
## Exit
# self.__menubar.add_command( label = "Выход", underline = 0, command = self.__cmd_menu_exit )
## Data
self.__menuitems[ 'data' ] = m_data = tk.Menu( self.__menubar )
self.__menubar.add_cascade( label = "Данные", underline = 0, menu = m_data )
m_data.add_command( label = "Загрузить данные", command = self.__cmd_menu_data_open )
m_data.add_command( label = "Сохранить данные", command = self.__cmd_menu_data_save )
m_data.entryconfig( 1, state = tk.DISABLED )
## Show
self.__menuitems[ 'show' ] = m_show = tk.Menu( self.__menubar )
self.__menubar.add_cascade( label = "Просмотр", underline = 0, menu = m_show )
# self.__menubar.entryconfig( 2, state = tk.DISABLED )
m_show.add_command( label = "Сырые данные", command = self.__cmd_menu_show_view_original )
m_show.add_command( label = "Данные без пропусков", command = self.__cmd_menu_show_view_processed )
m_show.add_separator( )
m_show.add_command( label = "Результаты", command = self.__cmd_menu_show_results )
# m_show.entryconfig( 0, state = tk.DISABLED )
# m_show.entryconfig( 1, state = tk.DISABLED )
# m_show.entryconfig( 3, state = tk.DISABLED )
## clustering : Add to the main bar
self.__menuitems[ 'clustering' ] = m_clust = tk.Menu( self.__menubar )
self.__menubar.add_cascade( label = "Кластеризация", underline = 0, menu = m_clust )
# self.__menubar.entryconfig( 3, state = tk.DISABLED )
m_clust.add_command( label = "Запуск", command = self.__cmd_menu_cluster_run )
# m_clust.entryconfig( 0, state = tk.DISABLED )
## About : Add to the main bar
self.__menuitems[ 'about' ] = m_about = tk.Menu( self.__menubar )
self.__menubar.add_cascade( label = "Помощь", underline = 0, menu = m_about )
m_about.add_command( label = "о программе", command = self.__show_about_dialog )
m_about.add_command( label = "руководство пользователя", command = self.__show_manual )
## Initialize the controller
self.__model.start( )
## Invoke the dispatcher
self.__wnd.mainloop( )
## View -- window command routines
def __cmd_menu_exit( self ) :
self.quit( )
def __cmd_menu_data_open( self ) :
fin = self.__show_open_dialog( )
self.__model.load_datafile( fin )
self.__display_show_datafile( )
fin.close( )
self.__menuitems[ 'data' ].entryconfig( 1, state = tk.ACTIVE )
def __cmd_menu_data_save( self ) :
fin = self.__show_save_dialog( )
self.__model.export_clustering_report( fin )
def __cmd_menu_show_view_original( self ) :
plt = figure_window( tk.Toplevel( self ), title = u"Исходные данные", modal = True )
fig = plt.figure( figsize = ( 8, 6 ), facecolor = 'w', dpi = 90 )
self.__model.show_data( fig, original = True )
def __cmd_menu_show_view_processed( self ) :
plt = figure_window( tk.Toplevel( self ), title = u"Данные без пропусков", modal = True )
fig = plt.figure( figsize = ( 8, 6 ), facecolor = 'w', dpi = 90 )
self.__model.show_data( fig, original = False )
def __cmd_menu_show_results( self ) :
result_window( tk.Toplevel( self ), self.__model ).start( )
def __cmd_menu_cluster_run( self ) :
clustering_window( tk.Toplevel( self ), self.__model ).start( )
def __display_show_datafile( self ) :
if not self.__model.has_data( ) : return
## Show basic info on the loaded datafile
filename, n, attr = self.__model.get_data_info( )
tk.Label( self.__wnd, text = u"Загружены данные из файла %s" % filename ).grid( row = 0, sticky = tk.W )
tk.Label( self.__wnd, text = u"Количество объектов: %d" % n ).grid( row = 1, sticky = tk.W )
tk.Label( self.__wnd, text = u"Количество признаков: %d" % attr ).grid( row = 2, sticky = tk.W )
## Enable menu options
# self.__menuitems[ 'show' ].entryconfig( 0, state = tk.ACTIVE )
# self.__menuitems[ 'show' ].entryconfig( 1, state = tk.ACTIVE )
# self.__menubar.entryconfig( 2, state = tk.ACTIVE )
# self.__menubar.entryconfig( 3, state = tk.ACTIVE )
def __display_error( self, error ) :
err_wnd = tk.Toplevel( self )
err_wnd.geometry( '{}x{}'.format( 300, 40 ) )
err_wnd.resizable( False, False )
tk.Label( err_wnd, text = error ).grid( row = 0, sticky = tk.W )
def __show_open_dialog( self ) :
return file_dlg.askopenfile(
filetypes = ( ( "CSV", "*.csv" ), ( "All files", "*.*" ) ) )
def __show_save_dialog( self ) :
return file_dlg.asksaveasfile( mode = 'w',
filetypes = ( ( u"Книга Excel 97-2003", "*.xls" ), ( "All files", "*.*" ) ) )
def __show_about_dialog( self ) :
about = about_window( tk.Toplevel( self ), title = u"о программе", modal = True )
about.show( )
def __show_manual( self ) :
import os
import win32com.client as win32
if os.name == 'nt' :
word = win32.gencache.EnsureDispatch( 'Word.Application' )
word.Documents.Open( os.path.join( os.path.realpath( '.' ),
u"Руководство пользователя программы анализа данных.docx" ) )
word.Visible = True
##########################################################################################
##########################################################################################
class about_window( tk.Frame ):
def __init__(self, hWnd, title, modal = False ):
tk.Frame.__init__( self, hWnd )
hWnd.title( title )
hWnd.geometry( '{}x{}+50+50'.format( 363, 120 ) )
hWnd.resizable( False, False )
if modal:
hWnd.grab_set( )
hWnd.bind( '<Escape>', self.__close )
self.__wnd = hWnd
def show( self, **kwargs ) :
## The number of classes
tk.Label( self.__wnd, text = "Авторы:" ).grid( row = 1, column = 0, sticky = tk.W )
tk.Label( self.__wnd, text = __email__ ).grid( row = 1, column = 1, sticky = tk.W )
T = tk.Text( self.__wnd, height = 9, width = 45 )
T.grid( row = 2, column = 0, columnspan = 2, sticky = tk.W )
T.insert( tk.END, __dscription__ + u"\nАвторы: " + __email__ )
def __close( self, event ) :
self.__wnd.destroy( )
##########################################################################################
##########################################################################################
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg
from matplotlib.figure import Figure
from mpl_toolkits.mplot3d import Axes3D
class figure_window( tk.Frame ):
def __init__(self, hWnd, title, modal = False ):
tk.Frame.__init__( self, hWnd )
hWnd.title( title )
hWnd.resizable( False, False )
if modal:
hWnd.grab_set( )
hWnd.bind( '<Escape>', self.__close )
self.__wnd = hWnd
self.__figure = None
def figure( self, **kwargs ) :
self.__figure = Figure( **kwargs )
canvas = FigureCanvasTkAgg( self.__figure, master = self.__wnd )
canvas._tkcanvas.pack( side = tk.TOP, fill = tk.BOTH, expand = 1 )
canvas.show( )
ax = Axes3D( self.__figure )
ax.mouse_init( )
return self.__figure
def __close( self, event ) :
self.__wnd.destroy( )
##########################################################################################
##########################################################################################
class result_window( tk.Frame ):
def __init__(self, hWnd, model, callback = None ):
tk.Frame.__init__( self, hWnd )
self.__wnd = hWnd
self.__callback = None
self.__model = model
self.start( )
def start( self ) :
if not self.__model.has_data( ) :
self.__wnd.destroy( )
return
self.__model.setup_begin()
self.__wnd.title( u"Результаты кластеризации" )
self.__wnd.geometry( '{}x{}+50+50'.format( 380, 120 ) )
self.__wnd.resizable( False, False )
self.__wnd.grab_set( )
self.__wnd.bind( '<Escape>', self.__close )
## The number of classes
cls_num = self.__model.read_number_of_classes( )
classes = range( cls_num ) ; labels = [ str( c + 1 ) for c in classes ]
self.cur_class = Combox( classes, labels, master = self.__wnd, width = 5, state = 'readonly' )
tk.Label( self.__wnd, text = "Класс:" ).grid( row = 1, column = 0, sticky = tk.W )
self.cur_class.grid( row = 1, column = 1, sticky = tk.W )
self.cur_class.set( labels[-1] )
self.cur_class.bind( '<<ComboboxSelected>>', self.__onChoice )
def __close( self, event ) :
self.__wnd.destroy( )
def __onChoice( self, event ) :
title = u"Класс #{}".format( self.cur_class.current( ) + 1 )
size, miss, cent = self.__model.get_cluster_info( self.cur_class.current( ) )
tk.Label( self.__wnd, text = u"Точек в классе %d" % size ).grid( row = 2, column = 0, sticky = tk.W )
tk.Label( self.__wnd, text = u"Реконструировано точек %d" % miss ).grid( row = 3, column = 0, sticky = tk.W )
plt = figure_window( tk.Toplevel( self ), title = title, modal = False )
fig = plt.figure( figsize = ( 8, 6 ), facecolor = 'w', dpi = 90 )
self.__model.show_cluster( fig, self.cur_class.current( ) )
##########################################################################################
##########################################################################################
class clustering_window( tk.Frame ):
def __init__(self, hWnd, model, callback = None ):
tk.Frame.__init__( self, hWnd )
self.__wnd = hWnd
self.__callback = None
self.__model = model
self.start( )
def start( self ) :
if not self.__model.has_data( ) :
self.__wnd.destroy( )
return
self.__model.setup_begin()
self.__wnd.title( u"Кластеризации" )
self.__wnd.geometry( '{}x{}+50+50'.format( 380, 150 ) )
self.__wnd.resizable( False, False )
self.__wnd.bind( '<Escape>', lambda e: self.onClose( ) )
self.__wnd.protocol( "WM_DELETE_WINDOW", self.onClose )
self.__wnd.grab_set( )
## The number of classes
cls_id, cls_label = self.__model.get_avaliable_classes( )
self.num_classes = Combox( cls_id, cls_label , master = self.__wnd, width = 5, state = 'readonly' )
tk.Label( self.__wnd, text = "Количество классов:"
).grid( row = 1, column = 0, sticky = tk.W )
self.num_classes.grid( row = 1, column = 1, sticky = tk.W )
cls_num = self.__model.read_number_of_classes( )-min(cls_id)
if cls_num < 1 :
cls_num = -1
self.num_classes.set( cls_label[ cls_num ] )
## The target criterion
crit_id, crit_label = self.__model.get_avaliable_criteria( )
self.crit_fun = Combox( crit_id, crit_label, master = self.__wnd, width = 12, state = 'readonly' )
tk.Label( self.__wnd, text = "Критерий качества:"
).grid( row = 3, column = 0, sticky = tk.W )
self.crit_fun.grid( row = 3, column = 1, sticky = tk.W )
self.crit_fun.set( crit_label[-1] )
## The similarity matrix parameters
self.alpha_box = Entry( master = self.__wnd, width = 5 )
tk.Label( self.__wnd, text = "Параметр Альфа:"
).grid( row = 4, column = 0, sticky = tk.W )
self.alpha_box.grid( row = 4, column = 1, sticky = tk.W )
alpha = self.__model.read_alpha( )
self.alpha_box.insert( 0, ".5" if alpha is None else str( alpha ) )
self.p_box = Entry( master = self.__wnd, width = 5 )
tk.Label( self.__wnd, text = "Параметр P:"
).grid( row = 5, column = 0, sticky = tk.W )
self.p_box.grid( row = 5, column = 1, sticky = tk.W )
p = self.__model.read_p( )
self.p_box.insert( 0, "8" if p is None else str( p ) )
## The optimisation parameter
m_par_id, m_par_label = self.__model.get_m_param_values( )
self.m_param = Combox( m_par_id, m_par_label , master = self.__wnd, width = 5, state = 'normal' )
tk.Label( self.__wnd, text = "Параметр m-локальной оптимизации:"
).grid( row = 6, column = 0, sticky = tk.W )
self.m_param.grid( row = 6, column = 1, sticky = tk.W )
m_par = self.__model.read_m_param( )
self.m_param.set( m_par_label[-1] if m_par is None else str( m_par ) )
## Add a button to start
self.submit = Button( master = self.__wnd, width = 12,
state = tk.ACTIVE, text = u"Запуск", command = self.onClose )
self.submit.grid( row = 7, column = 1, sticky = tk.W )
def onClose( self ) :
self.__model.select_number_of_classes( self.num_classes.current( ) )
self.__model.select_criterion( self.crit_fun.current( ) )
self.__model.set_alpha( self.alpha_box.get( ) )
self.__model.set_p( self.p_box.get( ) )
self.__model.set_m_param( self.m_param.current( ) )
self.__model.setup_end( )
self.__model.run_cluster( )
self.__wnd.destroy( )
##########################################################################################
if __name__ == '__main__' :
from model import model as mdl
Application( tk.Tk( ), mdl( ) ).start( )
sys.exit( 0 )
| mit | -4,398,704,505,450,902,500 | 48.879479 | 182 | 0.533076 | false |
sharpton-lab/claatu | arc/travtree.py | 1 | 12850 | #!/usr/bin/python
# Claatu::trav_tree -first iteration of Claatu
#Copyright (C) 2015 Christopher A. Gaulke
#author contact: [email protected]
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#This program is distributed in the hope that it will be useful,
#but WITHOUT ANY WARRANTY; without even the implied warranty of
#MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
#GNU General Public License for more details.
#
#You should have received a copy of the GNU General Public License
#along with this program (see LICENSE.txt). If not, see
#<http://www.gnu.org/licenses/>
####################
# ___________ #
# | | #
# | |==(*)==| | #
# | | #
# |_______| #
# #
####################
#################
# #
# travtree.py #
# #
#################
import dendropy
import pickle
import re
import os
#get working directory. In the future would like to specify
wd = os.getcwd()
####
#Prep Tree
####
#eventually taken from command line or config file
#if you use new.tre you must reset node1 to root bc it is already named.
tree_fp = '/Users/gaulkec/Desktop/Claatu_test_files/new.tre'
#eventually taken from command line or config file
tree_type = "newick"
#might want to save bootstraps for latter
#this labels tips as tip taxon (i.e., OTU or species name)
def PrepTree(tree_fp, tree_type):
#import tree object
tree1 = dendropy.Tree.get_from_path("{0}".format(tree_fp), schema="{0}".format(tree_type))
#name nodes
node_it = tree1.preorder_node_iter()
k = 1
for i in node_it:
if i.label == None:
if hasattr(i, 'taxon') and i.taxon != None: # (i.e., a tip)
i.label = i.taxon.label
else:
#continue
i.label = "root"
else:
j = str(k)
i.label = "{0}{1}".format("node", j)
k = k + 1
return tree1
#write tree so it can be used by other programs
#need to test that this can be used by other programs
tree1.write_to_path(
'new_prepped_tree.tre',
'newick',
taxon_set=None,
suppress_leaf_taxon_labels=False,
suppress_leaf_node_labels=True,
suppress_internal_taxon_labels=False,
suppress_internal_node_labels=False,
suppress_rooting=False,
suppress_edge_lengths=False,
unquoted_underscores=False,
preserve_spaces=False,
store_tree_weights=False,
suppress_annotations=True,
annotations_as_nhx=False,
suppress_item_comments=True,
node_label_element_separator=' ',
node_label_compose_func=None)
####
#Make node ancestor lookup table
####
def AncestorLookup(tree):
"This function makes a dictionary of the ancestors of each node"
node_it = tree.preorder_node_iter()
tip_ancestors = {}
#make an iterator for each node and append each ancestor to a list(vals)
for node in node_it:
ancest_it = node.ancestor_iter(inclusive=False) #get iter for all ancestors
vals = []
for ancestor in ancest_it:
vals.append(str(ancestor.label))
tip_ancestors[str(node.label)] = vals
return tip_ancestors
def PickleMeTimbers(prepped_tree_obj, ancestor_lookup_table):
"This function will make a directory for pickled files and pickle the tree and lookup for latter use"
#dir name Claatu_pickles
pd = '{0}{1}'.format(wd, '/Claatu_pickles')
try:
os.stat(pd)
except:
os.makedirs('{0}{1}'.format(wd, '/Claatu_pickles'))
#pickle the tree
pickled_tree = open('{0}/prepped_tre.pkl'.format(pd), 'wb')
pickle.dump(prepped_tree_obj, pickled_tree)
pickled_tree.close()
#pickle lookup table
pickled_lookup = open('{0}/ancestor_lookup.pkl'.format(pd), 'wb')
pickle.dump(ancestor_lookup_table, pickled_lookup)
pickled_lookup.close()
return None
#need to create a projects directory for storage of files
tree1 = PrepTree(tree_fp, tree_type)
#this should be in a different module.
ancestor_lookup_dict = AncestorLookup(tree1)
PickleMeTimbers(tree1, ancestor_lookup_dict)
####
#External calls
####
'''
At this point we will need to make an external calls to several programs including pplacer, taxit, FastTree, and infernal.
Should check out 'subprocess.call(['command', '-options'])'.
'''
#Might be best to put the tree pickler, pplacer and .jplace file parser into separate modules.
#after query reads aligned with each other and tree reads then we do so post processing of the data
####
#Parse .jplace file
####
# make into a dictionary
#in future this will be read in from previous step automatically
jplace_file = "/Users/gaulkec/Desktop/Claatu_test_files/test2.jplace"
#tested 4/8/15
def PlaceParse(jplace_file):
parse_dict = {}
with open (jplace_file, "r") as jfile:
data=jfile.read().replace('\n', '')
place_elem = data.split('"placements":')
place_tre = place_elem[0]
place_else = place_elem[1]
place_elem = place_else.split('"metadata":')
place_placements = place_elem[0]
place_extra = place_elem[1]
place_elem = place_extra.split('"fields":')
place_fields = place_elem[1]
parse_dict['tree'] = place_tre
parse_dict['placements'] = place_placements
parse_dict['metadata'] = place_elem
parse_dict['fields'] = place_fields
return parse_dict
#call
parse_dict = PlaceParse(jplace_file)
####
#Parse tre string
####
#tree edge lookoup dictionary. Edge ids are the keys node names are the values
#tested 4/8/15
def EdgetoTail(jplace_tree):
"this function will map all edge labels to their corresponding tail nodes"
tree_dict = {}
#parse the tree structure. The edge IDs are contained in a set of {}
t1 = jplace_tree.strip('":tree{}; ') #remove from the beginning of the tree
t2 = t1.strip(',";') #clean up additional characters on the ends of strings
l = t2.split("}")#split on the close curly brace which results in exactly 1 edge id /node as the ids always follow immediately after a node
#remove the last element of the list which will be empty
del l[-1] #should be a ';' as this is the last element of the tree string which follows the last }
for word in l:
s1 = word.split("{")
edge_id = s1[1]
s2 = s1[0]
s3 = s2.split(":")
#node name
name = s3[0]
name = name.strip(')(,')
if name == 'node1':
name = 'root'
tree_dict[edge_id] = name
else:
tree_dict[edge_id] = name
return tree_dict
#call
tree_dict = EdgetoTail(parse_dict['tree'])
####
#Parse Placements
####
#tested 4/8/15
def Stripper(listx):
"strips whitespace from each element in a list"
nlist = []
for y in listx:
val = y.replace(' ', '')
nlist.append(val)
return nlist
#tested 4/8/15
def PlaceParser (placement_string):
"This function will parse all the placements found in the placements slot of the .jplace dictionary"
#This will initialize a regex pattern for finding items enclosed in []
pattern = re.compile('\[.*?]')
place_dict = {}
place_placements = placement_string
place_placements = place_placements.strip(' [,]')
placements = place_placements.split('}')
placements = filter(None, placements)
for placement in placements:
place_count = 0
placement = placement.strip('{ "p:,')
placement_stats = placement.split('"nm":')[0].strip(' ,')
placement_nm = placement.split('"nm":')[1].strip(' ,')
placement_nm = placement_nm[1:-1]
placement_stats = placement_stats[1:-1]
stats = []
place = pattern.findall(placement_stats)
place = filter(None, place)
place = Stripper(place)
for p in place:
stats_list = p.strip('[] ').split(',')
stats.append(stats_list)
place_count += 1
#make a dictionary of all placements where the key is the read Id
nm = pattern.findall(placement_nm)
nm = filter(None, nm)
for name_mass in nm:
name = name_mass.split(',')[0].strip('][" ')
mass = name_mass.split(',')[1].strip('][" ')
place_dict[name] = [mass, stats]
return place_dict
#call
place_dict = PlaceParser(parse_dict['placements'])
####
#Parse fields
####
#tested 4/8/15
def ParseFields(place_fields):
"This will parse the fields string from the .jplace file and will return a list of field names in order"
place_fields = place_fields.strip('{} []')
stats_fields = place_fields.replace('"', '').split(',')
fields = []
for word in stats_fields:
word = word.strip()
fields.append(word)
return fields
#call
fields = ParseFields(parse_dict['fields'])
####
#Parse Metadata
####
#Not sure if we really even need this might push it to a log file in the future.
''' I am just going to store this as a string until I figure out what to do with it. '''
####
#End .jplace file parser
####
####
#Integration of jplace output and tree data
####
#tested 4/8/15
def MakeTailtoHead(dictx):
"this is a function to make a lookup table of tail nodes to head nodes"
tail_to_head = {}
for key, value in dictx.items():
tail_node = tree1.find_node_with_label(value)
head_node = tail_node.parent_node
if hasattr(head_node, 'label'):
tail_to_head[value] = head_node.label
else:
continue
return tail_to_head
#call
tail_to_head_dict = MakeTailtoHead(tree_dict)
''' format of place_dict:
place_dict[name] = [mass, [[stats_0],[stats_1], [stats_n]]]
name: Read/fragment name
mass: mass
stats:["edge_num", "likelihood", "like_weight_ratio", "distal_length", "pendant_length", etc.]
'''
#Need to make a dataset to test this
def GetBest(list):
"This function gets a best hit for each read and reports a best hits. There are no tie breakers (i.e., the first largest number is counted as the best hit) "
#need to eventually incorporate a nearest common ancestor function to replace this function
hit = 0
hit_name = ''
for i in list:
var1 = i[1]
var2 = i[0]
if float(var1) > hit:
hit = float(var1)
hit_name = var2
else:
continue
return hit_name
#tested 4/8/15
#samples need to be defined by the user or parsed from a file elsewhere but for now...
samples = ['Monkey', 'Human', 'Mouse']
def MultiplexCountPlaceHits(samples, tree_dict, place_dict):
"This will do edge -> samps instead of samples -> edges"
edge_samp = {}
for edge in tree_dict:
edge_samp[edge] = {}
for sample in samples:
for edge in tree_dict:
edge_samp[edge][sample] = 0
for p, s in place_dict.items():
if len(s) > 2:
for stat in s[1]:
best_hit = GetBest(s[1]) #retain only the best hit
name = p.split('_')[0] #sample ID
if stat[0] == best_hit and stat[0] in edge_samp:
edge_samp[stat[0]][name] += 1
else:
print 'error {0} not found'.format(stat[0])
else:
stat = s[1][0]
name = p.split('_')[0]
if stat[0] in edge_samp:
edge_samp[stat[0]][name] += 1
else:
print 'error {0} not found'.format(stat[0])
return edge_samp
#Call
edge_samps = MultiplexCountPlaceHits(samples, tree_dict, place_dict)
####
#Get Node lookup
####
#tested 4/9/15
def MultiplexEdgeHeadMapper(edge_samps, tail_to_head_dict, tree_dict):
"This function will map the edge counts to head nodes"
head_count_dict = {}
for edge, sdict in edge_samps.items():
try:
key = tail_to_head_dict[tree_dict[edge]]
except KeyError:
if tree_dict[edge] == 'root':
key = 'root'
else:
print "Hmmm. There seems to be a problem with your keys"
if key in head_count_dict:
#sum up all values for all samples
for entry in sdict:
my_sum = sdict[entry] + head_count_dict[key][entry]
head_count_dict[key][entry] = my_sum #test
else:
head_count_dict[key] = sdict
return head_count_dict
#Call
head_count_dict = MultiplexEdgeHeadMapper(edge_samps, tail_to_head_dict, tree_dict)
####
#Should start thinking about what I can destroy or pickle at this point
####
#need to collapse this into a lookup table of cumulative
#tested 4/16/15
def CumulativeNodeCounts(head_count_dict, samples):
"This function will collapse the node counts at each level (i.e., will sum the counts of a n node and it's children and return a list of summed sample counts"
sum_temp = {}
for node, sdict in head_count_dict.items():
nodex = tree1.find_node_with_label(node)
lnodex = nodex.label #label for node
n_iter = nodex.preorder_internal_node_iter() #make a iterator for the children of each node
sum_temp[lnodex] = {} #set up a new dictionary
for sample in samples:
sum_temp[lnodex][sample] = 0
for n in n_iter:
if n.label in head_count_dict:
for sample in head_count_dict[n.label]:
sum_temp[lnodex][sample] += head_count_dict[n.label][sample]
else:
continue
return sum_temp
#call
collapsed_node_counts = CumulativeNodeCounts(head_count_dict, samples)
'''loose ends:
1) need to figure out how to deal with name files to
a) get sample names
b) deal with single sequences that represent multiple seqs
'''
| gpl-3.0 | -2,768,405,143,906,391,000 | 26.398721 | 159 | 0.67284 | false |
reimandlab/ActiveDriverDB | website/stats/plots/enrichment.py | 1 | 8915 | from collections import defaultdict
from pathlib import Path
from helpers.plots import grouped_box_plot, p_value_annotations, box_plot
from helpers.cache import cache_decorator, Cache
from analyses.enrichment import ptm_on_random
from models import InheritedMutation, MC3Mutation, lru_cache
from ..store import cases
from .common import site_types_with_any
muts_cases = cases(site_type=site_types_with_any, mode=['occurrences', 'distinct']).set_mode('product')
@lru_cache()
def ptm_muts_enrichment_frequency(site_type, mode):
groups = defaultdict(dict)
significances = {}
sources_and_filters = {
MC3Mutation: ('TCGA', None),
InheritedMutation: (
'ClinVar (pathogenic, likely pathogenic, drug response)',
InheritedMutation.significance_filter('strict')
)
}
repeats = 100000
for source, (name, filters) in sources_and_filters.items():
observed, expected, region, p = ptm_on_random(
source=source, site_type=site_type.name,
mode=mode, mutation_filter=filters,
repeats=repeats
)
groups['Randomly drawn mutations (expected #)'][name] = [e / region for e in expected]
groups[f'{site_type.name.title()} mutations (observed #)'][name] = [o / region for o in observed]
if p == 0:
p = f'< 10^-{len(str(repeats)) - 1}'
significances[name] = p
return groups, significances
@muts_cases
@grouped_box_plot
def ptm_muts_frequency(site_type, mode):
groups, significances = ptm_muts_enrichment_frequency(site_type, mode)
return groups
@muts_cases
def ptm_muts_frequency_significance(site_type, mode):
groups, significances = ptm_muts_enrichment_frequency(site_type, mode)
return p_value_annotations(groups, significances)
MODES = ['occurrences', 'distinct']
muts_cases = cases(source=[MC3Mutation, InheritedMutation], site_type=site_types_with_any, mode=MODES).set_mode('product')
cached = cache_decorator(Cache('.enrichment_plot_cache'))
@cached
def ptm_muts_enrichment(source, site_type, mode, repeats=1000000):
boxes = {}
sources_and_filters = {
MC3Mutation: ('TCGA', None),
InheritedMutation: (
'ClinVar (pathogenic)',
InheritedMutation.significance_set_filter('pathogenic')
)
}
name, filters = sources_and_filters[source]
observed, expected, region, p = ptm_on_random(
source=source, site_type=site_type.name,
mode=mode, mutation_filter=filters,
repeats=repeats
)
boxes['Randomly drawn mutations (expected #)'] = expected
boxes[f'{site_type.name.title()} mutations (observed #)'] = observed
if p == 0:
p = f'< 10^-{len(str(repeats)) - 1}'
return boxes, p, region
def calc_ptm_muts_all_together(site_type):
groups = defaultdict(dict)
significances = {}
for source in [MC3Mutation, InheritedMutation]:
for mode in MODES:
boxes, p, region = ptm_muts_enrichment(source, site_type, mode)
name = f'{source.name}, {mode}'
significances[name] = p
for key, box in boxes.items():
groups[key][name] = box
return groups, significances
def ggplot2_plot(func, width=1400, height=900, dpi=72):
dummy_cases = cases()(lambda x: x)
def wrapper(*args, **kwargs):
ggplot2 = func(*args, **kwargs)
print(func.__name__)
path = Path('static') / f'{func.__name__}_{dummy_cases.full_case_name(kwargs)}.png'
path = str(path)
ggplot2.ggsave(path, width=width / dpi, height=height / dpi, dpi=dpi, units='in', bg='transparent')
return {'path': path}
return wrapper
@cached
def ptms_enrichment_for_ggplot(site_type):
from pandas import DataFrame
rows = []
significances = []
source_labels = {
'MC3': 'TCGA',
'ClinVar': 'ClinVar (clinically significant)'
}
for source in [MC3Mutation, InheritedMutation]:
for mode in MODES:
boxes, p, region = ptm_muts_enrichment(source, site_type, mode)
name = f'{source_labels[source.name]}, {mode}'
for key, box in boxes.items():
for value in box:
rows.append({
'observered_or_expected': key,
'group': name,
'mode': mode,
'count': value,
'source': source_labels[source.name]
})
significances.append({
'pvalue': str(p),
'group': name,
'max': max(max(box) for box in boxes.values()),
# note: the key is random (meaningless for visualisation)
'observered_or_expected': key,
'source': source_labels[source.name],
'mode': mode
})
df = DataFrame(rows)
d = DataFrame(significances)
return d, df
ggplot_cases = cases(site_type=site_types_with_any, with_facets=[True, False, 'wrap']).set_mode('product')
@ggplot_cases
@ggplot2_plot
def ptm_muts_all_together(site_type, with_facets=True):
from helpers.ggplot2 import GG
from rpy2.robjects.packages import importr
from rpy2.robjects import StrVector
d, df = ptms_enrichment_for_ggplot(site_type)
ggplot2 = importr('ggplot2')
ggsignif = importr('ggsignif')
theme_options = {
'axis.text.x': ggplot2.element_text(angle=0, hjust=0.5),
'axis.text': ggplot2.element_text(size=15),
'text': ggplot2.element_text(size=15),
'legend.text': ggplot2.element_text(size=15),
'legend.position': 'bottom',
'strip.text': ggplot2.element_text(size=16),
}
fill = 'observed_or_expected'
levels = ','.join([repr(a) for a in sorted(set(df['observed_or_expected']), reverse=True)])
fill = f'factor({fill}, levels = c({levels}))'
if with_facets:
x = 'observed_or_expected'
x = f'factor({x}, levels = c({levels}))'
d['max'] *= 1.1
x_label = ''
if with_facets == 'wrap':
# theme_options['axis.text.x'] = ggplot2.element_blank()
pass
else:
x = 'group'
xmin = xmax = x
x_label = 'Mutation source, counting mode'
plot = (
GG(ggplot2.ggplot(df, ggplot2.aes_string(x=x, y='count', fill=fill))) +
ggplot2.geom_boxplot(notch=True, **{'outlier.alpha': 0.1}) +
ggplot2.theme(**theme_options) +
ggplot2.labs(x=x_label, y=r'Mutations count', fill='Mutations group') +
ggplot2.scale_fill_manual(values=StrVector(["#f1a340", '#cccccc'][::-1]))
# ggplot2.geom_jitter(width=0.1)
)
if with_facets:
plot += ggsignif.geom_signif(
data=d, mapping=ggplot2.aes_string(xmin=1, xmax=2, annotations='pvalue', y_position='max'),
manual=True, tip_length=0.03, textsize=5.5
)
# labels = {'distinct': 'Distinct mutations', 'occurrences': 'Occurrences'}
def get_facet_label(factors):
# TODO?
return factors
if with_facets == 'wrap':
plot += ggplot2.facet_wrap('group', scale='free_y', labeller=get_facet_label, nrow=1)
plot += ggplot2.scale_x_discrete(labels=StrVector(["expected #", "observed #"]))
else:
plot += ggplot2.facet_grid('source~mode', scale='free_y', labeller=get_facet_label)
else:
plot += ggsignif.geom_signif(
data=d, mapping=ggplot2.aes_string(xmin=xmin, xmax=xmax, annotations='pvalue', y_position='max'),
manual=True, tip_length=0, textsize=5.5
)
return ggplot2
@cases(site_type=site_types_with_any)
@grouped_box_plot
def ptm_muts_all_together_2(site_type):
groups, significances = calc_ptm_muts_all_together(site_type)
return groups
@cases(site_type=site_types_with_any)
def ptm_muts_all_together_significance(site_type):
groups, significances = calc_ptm_muts_all_together(site_type)
return p_value_annotations(groups, significances)
@muts_cases
@box_plot
def ptm_muts(source, site_type, mode):
boxes, significance, region = ptm_muts_enrichment(source, site_type, mode)
return boxes
@muts_cases
def ptm_muts_significance(source, site_type, mode):
boxes, significance, region = ptm_muts_enrichment(source, site_type, mode)
from numpy import percentile
return [
{
'x': 1,
'y': max(
percentile(
[float(x) for x in box],
75
) for box in boxes.values()
) * 1.1,
'xref': 'x',
'yref': 'y',
'text': 'p-value: ' + (
f'{significance:.2e}'
if isinstance(significance, float) else
f'{significance}'
),
'showarrow': False
}
]
| lgpl-2.1 | 6,855,819,001,079,686,000 | 33.026718 | 122 | 0.595064 | false |
SecurityInnovation/PGPy | tests/conftest.py | 1 | 2373 | """PGPy conftest"""
import pytest
import glob
try:
import gpg
except ImportError:
gpg = None
import os
import sys
from distutils.version import LooseVersion
from cryptography.hazmat.backends import openssl
openssl_ver = LooseVersion(openssl.backend.openssl_version_text().split(' ')[1])
gpg_ver = LooseVersion('0')
gnupghome = os.path.join(os.path.dirname(__file__), 'gnupghome')
if gpg:
gpgme_ver = gpg.core.check_version()
# ensure external commands we need to run exist
# set the CWD and add to sys.path if we need to
os.chdir(os.path.join(os.path.abspath(os.path.dirname(__file__)), os.pardir))
if os.getcwd() not in sys.path:
sys.path.insert(0, os.getcwd())
else:
sys.path.insert(0, sys.path.pop(sys.path.index(os.getcwd())))
if os.path.join(os.getcwd(), 'tests') not in sys.path:
sys.path.insert(1, os.path.join(os.getcwd(), 'tests'))
# pytest hooks
# pytest_configure
# called after command line options have been parsed and all plugins and initial conftest files been loaded.
def pytest_configure(config):
print("== PGPy Test Suite ==")
if gpg:
# clear out gnupghome
clear_globs = [os.path.join(gnupghome, 'private-keys-v1.d', '*.key'),
os.path.join(gnupghome, '*.kbx*'),
os.path.join(gnupghome, '*.gpg*'),
os.path.join(gnupghome, '.*'),
os.path.join(gnupghome, 'random_seed')]
for fpath in iter(f for cg in clear_globs for f in glob.glob(cg)):
os.unlink(fpath)
# get the GnuPG version
gpg_ver.parse(gpg.core.get_engine_info()[0].version)
# check that there are no keys loaded, now
with gpg.Context(offline=True) as c:
c.set_engine_info(gpg.constants.PROTOCOL_OpenPGP, home_dir=gnupghome)
assert len(list(c.keylist())) == 0
assert len(list(c.keylist(secret=True))) == 0
else:
# we're not running integration tests
print("running without integration tests")
# if we're on Travis, this is an error
if os.getenv('TRAVIS_PYTHON_VERSION'):
sys.exit(1)
# display the working directory and the OpenSSL/GPG/pgpdump versions
print("Working Directory: " + os.getcwd())
print("Using OpenSSL " + str(openssl_ver))
print("Using GnuPG " + str(gpg_ver))
print("")
| bsd-3-clause | -7,778,866,631,928,858,000 | 30.64 | 108 | 0.632533 | false |
Percona-QA/package-testing | molecule/psmdb40-upgrade/molecule/default/tests/test_psmdb40_upgrade.py | 1 | 5171 | import os
import pytest
import testinfra.utils.ansible_runner
testinfra_hosts = testinfra.utils.ansible_runner.AnsibleRunner(
os.environ['MOLECULE_INVENTORY_FILE']).get_hosts('all')
DEB_PACKAGES = ['percona-server-mongodb', 'percona-server-mongodb-server', 'percona-server-mongodb-mongos',
'percona-server-mongodb-shell', 'percona-server-mongodb-tools', 'percona-server-mongodb-dbg']
RPM_PACKAGES = ['percona-server-mongodb', 'percona-server-mongodb-server', 'percona-server-mongodb-mongos',
'percona-server-mongodb-shell', 'percona-server-mongodb-tools', 'percona-server-mongodb-debuginfo']
RPM_NEW_CENTOS_PACKAGES = ['percona-server-mongodb', 'percona-server-mongodb-mongos-debuginfo',
'percona-server-mongodb-server-debuginfo', 'percona-server-mongodb-shell-debuginfo',
'percona-server-mongodb-tools-debuginfo', 'percona-server-mongodb-debugsource']
BINARIES = ['mongo', 'mongod', 'mongos', 'bsondump', 'mongoexport',
'mongofiles', 'mongoimport', 'mongorestore', 'mongotop', 'mongostat']
PSMDB40_VER = "4.0"
def start_service(host):
os = host.system_info.distribution
cmd = "service mongod start"
if os == 'debian':
if host.system_info.codename == 'trusty':
cmd = "/etc/init.d/mongod start"
with host.sudo():
result = host.run(cmd)
print(result.stdout)
print(result.stderr)
if result.rc == 1:
print(host.run("systemctl status mongod.service").stdout)
assert result.rc == 0, result.stdout
return result
def test_package_script(host):
with host.sudo():
result = host.run("/package-testing/package_check.sh psmdb40")
print(result.stdout)
print(result.stderr)
assert result.rc == 0, result.stderr
def test_version_script(host):
with host.sudo():
result = host.run("/package-testing/version_check.sh psmdb40")
print(result.stdout)
print(result.stderr)
assert result.rc == 0, result.stderr
@pytest.mark.parametrize("package", DEB_PACKAGES)
def test_deb_packages(host, package):
os = host.system_info.distribution
if os.lower() in ["redhat", "centos", 'rhel']:
pytest.skip("This test only for Debian based platforms")
pkg = host.package(package)
assert pkg.is_installed
assert PSMDB40_VER in pkg.version
# TODO add check that minor version is correct
@pytest.mark.parametrize("package", RPM_PACKAGES)
def test_rpm_packages(host, package):
os = host.system_info.distribution
if os in ["debian", "ubuntu"]:
pytest.skip("This test only for RHEL based platforms")
if float(host.system_info.release) >= 8.0:
pytest.skip("Only for centos7 tests")
pkg = host.package(package)
assert pkg.is_installed
assert PSMDB40_VER in pkg.version
@pytest.mark.parametrize("package", RPM_NEW_CENTOS_PACKAGES)
def test_rpm8_packages(host, package):
os = host.system_info.distribution
if os in ["debian", "ubuntu"]:
pytest.skip("This test only for RHEL based platforms")
if float(host.system_info.release) < 8.0:
pytest.skip("Only for centos7 tests")
pkg = host.package(package)
assert pkg.is_installed
assert PSMDB40_VER in pkg.version
@pytest.mark.parametrize("binary", BINARIES)
def test_binary_version(host, binary):
cmd = '{} --version|head -n1|grep -c "{}"'.format(binary, PSMDB40_VER)
result = host.run(cmd)
assert result.rc == 0, result.stdout
def test_functional(host):
with host.sudo():
result = host.run("/package-testing/scripts/psmdb_test.sh 4.0")
assert result.rc == 0, result.stderr
@pytest.mark.parametrize("encryption", ['keyfile', 'vault'])
def test_encryption(host, encryption):
with host.sudo():
result = host.run("/package-testing/scripts/psmdb_encryption/psmdb-encryption-test.sh {}".format(encryption))
print(result.stdout)
print(result.stderr)
assert result.rc == 0, result.stderr
def test_enable_auth(host):
cmd = "/package-testing/scripts/psmdb_set_auth.sh"
with host.sudo():
result = host.run(cmd)
print(result.stdout)
print(result.stderr)
assert result.rc == 0, result.stdout
def test_bats(host):
cmd = "/usr/local/bin/bats /package-testing/bats/mongo-init-scripts.bats"
with host.sudo():
result = host.run(cmd)
print(result.stdout)
print(result.stderr)
assert result.rc == 0, result.stdout
def test_bats_with_numactl(host):
with host.sudo():
os = host.system_info.distribution
cmd = 'apt-get install numactl -y'
if os.lower() in ["redhat", "centos", 'rhel']:
cmd = 'yum install numactl -y'
result = host.run(cmd)
assert result.rc == 0, result.stdout
cmd = "/usr/local/bin/bats /package-testing/bats/mongo-init-scripts.bats"
result = host.run(cmd)
print(result.stdout)
print(result.stderr)
assert result.rc == 0, result.stdout
def test_service(host):
with host.sudo():
start_service(host)
assert host.service("mongod").is_running
| gpl-2.0 | 8,070,351,903,910,461,000 | 33.245033 | 117 | 0.658287 | false |
jr-garcia/Engendro3D | Demos/light_demo.py | 1 | 7944 | from math import sin
from random import randint, random
from cycgkit.cgtypes import vec3
from _base._BaseDemo import _Demo_Base, runDemo, tubeMODEL, logLevelsEnum
class Demo(_Demo_Base):
def __init__(self):
super(Demo, self).__init__()
self.texturesToLoad = [['e3dlogo.png', 'logo'], ['./textures/n_deep.png', 'defND', True],
['./textures/n_irr.png', 'defNI', True], ['./textures/nmap_test.png', 'testN', True]]
# TODO: credit textures or replace them
self.bumpymats = []
self.texmats = []
self.spots = []
self.spotAngles = {}
def createLightSphere(self, ltype, pos, color):
nlight = self.scene1.addLight(ltype, pos, vec3(0, 0, 0))
nlight.color = color
nlight.spotIntensity = random() # .1
nlight.spotRange = .9
nlight.attenuation = randint(150, 300)
if ltype == 2:
self.spotAngles[nlight] = (randint(1, 30) - randint(10, 50)), (randint(1, 30) - randint(10, 50))
lmod = self.scene1.addModel('conemodel', nlight.ID + 'sph', pos, [0, 0, 0], 1)
self.spots.append((nlight, lmod))
else:
lmod = self.scene1.addModel('spheremodel', nlight.ID + 'sph', pos, [0, 0, 0], 1)
mat = lmod._materials[0]
mat.emissiveColor = color
mat.isLightAffected = False
def loadModels(self):
engine = self.engine
self.camera.rotateX(40)
self.camera.position = vec3(0, 340, 350)
engine.models.loadSphere("mainspheremodel", 32)
self.sphere1 = self.scene1.addModel('mainspheremodel', 'sphere1', [0, 10, 0], [0, 0, 0], 4, mass=8)
# self.sphere1.physicsBody.isDynamic = True
mats = self.sphere1.getMaterialByIndex(0)
mats.specularPower = 50
mats.useDiffuseTexture = True
mats.useNormalMapTexture = True
mats.normalMapTextureID = 'defND'
mats.textureRepeat = 4
self.bumpymats.append(mats)
self.texmats.append(mats)
engine.models.loadSphere("spheremodel", 12)
engine.models.loadCone("conemodel", 20, 10, radialSegments=20)
engine.models.loadBox("boxmodel", [6], 1)
self.box1 = self.scene1.addModel('boxmodel', 'box1', [0, 90, 0], [0, 90, 0], 5, mass=7)
mt = self.box1._materials[0]
mt.specularPower = 40
mt.useDiffuseTexture = True
mt.useNormalMapTexture = True
mt.normalMapTextureID = 'defNI'
self.bumpymats.append(mt)
self.texmats.append(mt)
engine.models.loadPlane("floorplane", 600, 600, 50)
# engine.models.loadPlane("planemodelback", 600, 300, 10)
engine.models.loadPlane("planemodelWalls", 600, 300, 50)
# IMPORTANT!: High number of segments (tesselation) is needed for large objects. See:
# https://www.opengl.org/archives/resources/features/KilgardTechniques/oglpitfall/
# 2. Poor Tessellation Hurts Lighting
self.floor = self.scene1.addModel('floorplane', 'floor', [0, 0, 0], [0, 0, 0], 1)
mt = self.floor._materials[0]
mt.specularPower = 50
mt.useDiffuseTexture = True
mt.useNormalMapTexture = True
mt.normalMapTextureID = 'defNI'
mt.textureRepeat = 10
self.bumpymats.append(mt)
self.texmats.append(mt)
self.planer = self.scene1.addModel('planemodelWalls', 'planer', [300, 150, 0], [90, 0, 0], 1)
self.planer.rotateY(-90)
mt = self.planer._materials[0]
mt.useNormalMapTexture = True
mt.normalMapTextureID = 'testN'
mt.textureRepeat = 10
self.bumpymats.append(mt)
self.planel = self.scene1.addModel('planemodelWalls', 'planel', [-300, 150, 0], [90, 0, 0], 1)
self.planel.rotateY(90)
self.planel._materials[0] = mt
self.planef = self.scene1.addModel('planemodelWalls', 'planef', [0, 150, -300], [90, 0, 0], 1)
self.planef.moveUp(self.planer.getSize().y)
self.planef._materials[0] = mt
engine.models.loadModel(tubeMODEL, "tubemodel")
self.tube = self.scene1.addModel('tubemodel', 'tube1', [-150, 0, 0], [0, 0, 0], 9)
self.tube.setAnimation(self.tube.getAnimationsList()[0], True)
self.tube2 = self.scene1.addModel('tubemodel', 'tube2', [150, 0, 0], [0, 0, 0], 9)
self.tube2.setAnimation(self.tube2.getAnimationsList()[1], True)
def addLights(self):
print('Adding Lights')
super(Demo, self).addLights()
self.dlight.enabled = False
self.createLightSphere(2, vec3(-259.0, 120.0, 0.0), vec3(1.0, 0.0, 0.0))
self.createLightSphere(2, vec3(0.0, 270.0, -190.0), vec3(1.0, 1.0, 0.0))
self.createLightSphere(1, vec3(-50.0, 30.0, 290.0), vec3(0.0, 1.0, 0.0))
self.createLightSphere(2, vec3(0.0, 150.0, 0.0), vec3(.50, .0, 1.0))
self.createLightSphere(1, vec3(280.0, 30.0, 10.0), vec3(0.0, .0, 1.0))
def mouseMove(self, ev):
if ev.eventName == 'motion':
if self.window.hasFocus():
r = 1.0 / 10 if self.window.mouseLock else 1
self.camera.rotateY(-ev.xRel * r)
self.camera.rotateX(ev.yRel * r)
def keydown(self, e):
if e.eventName == 'keyUp':
return
keyName = e.keyName
if 'shift' in keyName:
self.window.mouseLock = not self.window.mouseLock
if keyName == 'escape': # ESC
self.close()
if keyName == 'f8':
self.window.backend.debugModeActive = not self.window.backend.debugModeActive
if keyName == 'f4':
self.window.backend.showAsWireframe = not self.window.backend.showAsWireframe
if keyName == 'space':
self.window.setFullScreen(not self.window.isFullScreen())
if keyName.__contains__('ctrl'):
self.dorot = not self.dorot
if keyName == 'f1':
np = [round(d, 3) for d in self.camera.position]
engine = self.engine
engine.log('Camera pos:{0}'.format(str(np)), logLevelsEnum.info)
engine.log('Poligons drawn:{}'.format(self.window.backend.poligonsDrawnThisUpdate), logLevelsEnum.info)
if keyName == 'g':
val = self.window.gamma
print('old gamma:' + str(val))
if val <= 1.8:
self.window.gamma = 2.5
else:
self.window.gamma = 1.7
print('new gamma:' + str(self.window.gamma))
if keyName == 'l':
self.dlight.enabled = not self.dlight.enabled
if keyName == 'n':
for mat in self.bumpymats:
mat.useNormalMapTexture = not mat.useNormalMapTexture
if keyName == 't':
for mat in self.texmats:
mat.useDiffuseTexture = not mat.useDiffuseTexture
def scene1Update(self, ev):
ft = ev[0] + .01
movespeed = ft / 10.0
self.scene1.ambientColor = vec3(.004, .006, .009)
self.scene1.bgColor = vec3(.04, .06, .09)
for s, m in self.spots:
rotVec = vec3(self.spotAngles[s][0] * sin(ev[1] / 1000.0), 0, self.spotAngles[s][1] * sin(ev[1] / 500.0))
s.rotation = rotVec
m.rotation = rotVec
if self.dorot:
self.sphere1.rotateY(-.07 * ft)
if self.window.events.isKeyPressed('w'):
self.camera.moveForward(movespeed)
elif self.window.events.isKeyPressed('s'):
self.camera.moveBackward(movespeed)
if self.window.events.isKeyPressed('a'):
self.camera.moveLeft(movespeed)
elif self.window.events.isKeyPressed('d'):
self.camera.moveRight(movespeed)
if self.window.events.isKeyPressed('up'):
self.camera.moveUp(movespeed)
elif self.window.events.isKeyPressed('down'):
self.camera.moveDown(movespeed)
if __name__ == '__main__':
runDemo(Demo(), 'Light Demo')
| mit | -1,060,894,738,424,444,500 | 40.160622 | 117 | 0.586984 | false |
PressLabs/gitfs | tests/views/test_commit.py | 1 | 10207 | # Copyright 2014-2016 Presslabs SRL
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from stat import S_IFDIR, S_IFREG
import pytest
from mock import MagicMock, patch
from pygit2 import GIT_FILEMODE_TREE
from fuse import FuseOSError
from gitfs.views.commit import CommitView
class TestCommitView(object):
def test_readdir_without_tree_name(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_entry = MagicMock()
mocked_entry.name = "entry"
mocked_commit.tree = [mocked_entry]
mocked_repo.revparse_single.return_value = mocked_commit
view = CommitView(repo=mocked_repo, commit_sha1="sha1")
with patch('gitfs.views.commit.os') as mocked_os:
mocked_os.path.split.return_value = [None, None]
dirs = [entry for entry in view.readdir("/path", 0)]
assert dirs == [".", "..", "entry"]
mocked_os.path.split.assert_called_once_with("/path")
def test_readdir_with_tree_name(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_entry = MagicMock()
mocked_entry.name = "entry"
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_repo.get_git_object.return_value = [mocked_entry]
view = CommitView(repo=mocked_repo, commit_sha1="sha1")
with patch('gitfs.views.commit.os') as mocked_os:
mocked_os.path.split.return_value = [None, True]
dirs = [entry for entry in view.readdir("/path", 0)]
assert dirs == [".", "..", "entry"]
mocked_os.path.split.assert_called_once_with("/path")
mocked_repo.get_git_object.assert_called_once_with("tree", "/path")
def test_access_with_missing_relative_path(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_repo.revparse_single.return_value = mocked_commit
view = CommitView(repo=mocked_repo, commit_sha1="sha1")
assert view.access("path", "mode") == 0
def test_access_with_invalid_relative_path(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_repo.revparse_single.return_value = mocked_commit
view = CommitView(repo=mocked_repo, commit_sha1="sha1")
view.relative_path = "/"
assert view.access("path", "mode") == 0
def test_access_with_invalid_path(self):
mocked_repo = MagicMock()
mocked_validation = MagicMock()
mocked_commit = MagicMock()
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_validation.return_value = False
with patch("gitfs.views.commit.split_path_into_components") as split:
split.return_value = "elements"
view = CommitView(repo=mocked_repo, commit_sha1="sha1")
view._validate_commit_path = mocked_validation
view.relative_path = "relative_path"
with pytest.raises(FuseOSError):
view.access("path", "mode")
split.assert_called_once_with("relative_path")
mocked_validation.assert_called_once_with("tree", "elements")
def test_getattr_with_no_path(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
view = CommitView(repo=mocked_repo, commit_sha1="sha1")
assert view.getattr(False, 1) is None
def test_getattr_with_simple_path(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
stats = {
'st_mode': S_IFDIR | 0o555,
'st_nlink': 2
}
mocked_commit.tree = "tree"
mocked_commit.commit_time = "now+1"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_repo.get_git_object_default_stats.return_value = stats
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
result = view.getattr("/", 1)
asserted_result = {
'st_uid': 1,
'st_gid': 1,
'st_mtime': "now+1",
'st_ctime': "now+1",
'st_mode': S_IFDIR | 0o555,
'st_nlink': 2
}
assert result == asserted_result
def test_getattr_with_invalid_object_type(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_commit.tree = "tree"
mocked_commit.commit_time = "now+1"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_repo.get_git_object_default_stats.return_value = None
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
with pytest.raises(FuseOSError):
view.getattr("/path", 1)
args = ("tree", "/path")
mocked_repo.get_git_object_default_stats.assert_called_once_with(*args)
def test_getattr_for_a_valid_file(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_commit.tree = "tree"
mocked_commit.commit_time = "now+1"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_repo.get_git_object_default_stats.return_value = {
'st_mode': S_IFREG | 0o444,
'st_size': 10
}
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
result = view.getattr("/path", 1)
asserted_result = {
'st_uid': 1,
'st_gid': 1,
'st_mtime': "now+1",
'st_ctime': "now+1",
'st_mode': S_IFREG | 0o444,
'st_size': 10
}
assert result == asserted_result
args = ("tree", "/path")
mocked_repo.get_git_object_default_stats.assert_called_once_with(*args)
def test_readlink(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_repo.get_blob_data.return_value = "link value"
with patch('gitfs.views.commit.os') as mocked_os:
mocked_os.path.split.return_value = ["name", "another_name"]
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
assert view.readlink("/path") == "link value"
mocked_os.path.split.assert_called_once_with("/path")
mocked_repo.get_blob_data.assert_called_once_with("tree",
"another_name")
def test_read(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_repo.get_blob_data.return_value = [1, 1, 1]
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
assert view.read("/path", 1, 1, 0) == [1]
mocked_repo.get_blob_data.assert_called_once_with("tree", "/path")
def test_validate_commit_path_with_no_entries(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
assert view._validate_commit_path([], "") is False
def test_validate_commit_path_with_trees(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_entry = MagicMock()
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_entry.name = "simple_entry"
mocked_entry.filemode = GIT_FILEMODE_TREE
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
result = view._validate_commit_path([mocked_entry], ["simple_entry"])
assert result is True
def test_validate_commit_path_with_more_than_one_entry(self):
mocked_repo = MagicMock()
mocked_commit = MagicMock()
mocked_entry = MagicMock()
mocked_second_entry = MagicMock()
mocked_commit.tree = "tree"
mocked_repo.revparse_single.return_value = mocked_commit
mocked_second_entry.id = 1
mocked_second_entry.name = "complex_entry"
mocked_second_entry.filemode = GIT_FILEMODE_TREE
mocked_entry.name = "simple_entry"
mocked_entry.filemode = GIT_FILEMODE_TREE
mocked_repo.__getitem__.return_value = [mocked_entry]
view = CommitView(repo=mocked_repo, commit_sha1="sha1",
mount_time="now", uid=1, gid=1)
result = view._validate_commit_path([mocked_second_entry,
mocked_entry],
["complex_entry",
"simple_entry"])
assert result is True
mocked_repo.__getitem__.assert_called_once_with(1)
def test_init_with_invalid_commit_sha1(self):
mocked_repo = MagicMock()
mocked_repo.revparse_single.side_effect = KeyError
with pytest.raises(FuseOSError):
CommitView(repo=mocked_repo, commit_sha1="sha1")
mocked_repo.revparse_single.assert_called_once_with("sha1")
| apache-2.0 | -7,080,370,445,022,447,000 | 35.067138 | 79 | 0.593416 | false |
googleapis/python-aiplatform | google/cloud/aiplatform_v1/services/job_service/client.py | 1 | 95207 | # -*- coding: utf-8 -*-
# Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from collections import OrderedDict
from distutils import util
import os
import re
from typing import Callable, Dict, Optional, Sequence, Tuple, Type, Union
import pkg_resources
from google.api_core import client_options as client_options_lib # type: ignore
from google.api_core import exceptions as core_exceptions # type: ignore
from google.api_core import gapic_v1 # type: ignore
from google.api_core import retry as retries # type: ignore
from google.auth import credentials as ga_credentials # type: ignore
from google.auth.transport import mtls # type: ignore
from google.auth.transport.grpc import SslCredentials # type: ignore
from google.auth.exceptions import MutualTLSChannelError # type: ignore
from google.oauth2 import service_account # type: ignore
from google.api_core import operation as gac_operation # type: ignore
from google.api_core import operation_async # type: ignore
from google.cloud.aiplatform_v1.services.job_service import pagers
from google.cloud.aiplatform_v1.types import batch_prediction_job
from google.cloud.aiplatform_v1.types import (
batch_prediction_job as gca_batch_prediction_job,
)
from google.cloud.aiplatform_v1.types import completion_stats
from google.cloud.aiplatform_v1.types import custom_job
from google.cloud.aiplatform_v1.types import custom_job as gca_custom_job
from google.cloud.aiplatform_v1.types import data_labeling_job
from google.cloud.aiplatform_v1.types import data_labeling_job as gca_data_labeling_job
from google.cloud.aiplatform_v1.types import encryption_spec
from google.cloud.aiplatform_v1.types import hyperparameter_tuning_job
from google.cloud.aiplatform_v1.types import (
hyperparameter_tuning_job as gca_hyperparameter_tuning_job,
)
from google.cloud.aiplatform_v1.types import job_service
from google.cloud.aiplatform_v1.types import job_state
from google.cloud.aiplatform_v1.types import machine_resources
from google.cloud.aiplatform_v1.types import manual_batch_tuning_parameters
from google.cloud.aiplatform_v1.types import operation as gca_operation
from google.cloud.aiplatform_v1.types import study
from google.protobuf import empty_pb2 # type: ignore
from google.protobuf import struct_pb2 # type: ignore
from google.protobuf import timestamp_pb2 # type: ignore
from google.rpc import status_pb2 # type: ignore
from google.type import money_pb2 # type: ignore
from .transports.base import JobServiceTransport, DEFAULT_CLIENT_INFO
from .transports.grpc import JobServiceGrpcTransport
from .transports.grpc_asyncio import JobServiceGrpcAsyncIOTransport
class JobServiceClientMeta(type):
"""Metaclass for the JobService client.
This provides class-level methods for building and retrieving
support objects (e.g. transport) without polluting the client instance
objects.
"""
_transport_registry = OrderedDict() # type: Dict[str, Type[JobServiceTransport]]
_transport_registry["grpc"] = JobServiceGrpcTransport
_transport_registry["grpc_asyncio"] = JobServiceGrpcAsyncIOTransport
def get_transport_class(cls, label: str = None,) -> Type[JobServiceTransport]:
"""Returns an appropriate transport class.
Args:
label: The name of the desired transport. If none is
provided, then the first transport in the registry is used.
Returns:
The transport class to use.
"""
# If a specific transport is requested, return that one.
if label:
return cls._transport_registry[label]
# No transport is requested; return the default (that is, the first one
# in the dictionary).
return next(iter(cls._transport_registry.values()))
class JobServiceClient(metaclass=JobServiceClientMeta):
"""A service for creating and managing Vertex AI's jobs."""
@staticmethod
def _get_default_mtls_endpoint(api_endpoint):
"""Converts api endpoint to mTLS endpoint.
Convert "*.sandbox.googleapis.com" and "*.googleapis.com" to
"*.mtls.sandbox.googleapis.com" and "*.mtls.googleapis.com" respectively.
Args:
api_endpoint (Optional[str]): the api endpoint to convert.
Returns:
str: converted mTLS api endpoint.
"""
if not api_endpoint:
return api_endpoint
mtls_endpoint_re = re.compile(
r"(?P<name>[^.]+)(?P<mtls>\.mtls)?(?P<sandbox>\.sandbox)?(?P<googledomain>\.googleapis\.com)?"
)
m = mtls_endpoint_re.match(api_endpoint)
name, mtls, sandbox, googledomain = m.groups()
if mtls or not googledomain:
return api_endpoint
if sandbox:
return api_endpoint.replace(
"sandbox.googleapis.com", "mtls.sandbox.googleapis.com"
)
return api_endpoint.replace(".googleapis.com", ".mtls.googleapis.com")
DEFAULT_ENDPOINT = "aiplatform.googleapis.com"
DEFAULT_MTLS_ENDPOINT = _get_default_mtls_endpoint.__func__( # type: ignore
DEFAULT_ENDPOINT
)
@classmethod
def from_service_account_info(cls, info: dict, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
info.
Args:
info (dict): The service account private key info.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
JobServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_info(info)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
@classmethod
def from_service_account_file(cls, filename: str, *args, **kwargs):
"""Creates an instance of this client using the provided credentials
file.
Args:
filename (str): The path to the service account private key json
file.
args: Additional arguments to pass to the constructor.
kwargs: Additional arguments to pass to the constructor.
Returns:
JobServiceClient: The constructed client.
"""
credentials = service_account.Credentials.from_service_account_file(filename)
kwargs["credentials"] = credentials
return cls(*args, **kwargs)
from_service_account_json = from_service_account_file
@property
def transport(self) -> JobServiceTransport:
"""Returns the transport used by the client instance.
Returns:
JobServiceTransport: The transport used by the client
instance.
"""
return self._transport
@staticmethod
def batch_prediction_job_path(
project: str, location: str, batch_prediction_job: str,
) -> str:
"""Returns a fully-qualified batch_prediction_job string."""
return "projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}".format(
project=project,
location=location,
batch_prediction_job=batch_prediction_job,
)
@staticmethod
def parse_batch_prediction_job_path(path: str) -> Dict[str, str]:
"""Parses a batch_prediction_job path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/batchPredictionJobs/(?P<batch_prediction_job>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def custom_job_path(project: str, location: str, custom_job: str,) -> str:
"""Returns a fully-qualified custom_job string."""
return "projects/{project}/locations/{location}/customJobs/{custom_job}".format(
project=project, location=location, custom_job=custom_job,
)
@staticmethod
def parse_custom_job_path(path: str) -> Dict[str, str]:
"""Parses a custom_job path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/customJobs/(?P<custom_job>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def data_labeling_job_path(
project: str, location: str, data_labeling_job: str,
) -> str:
"""Returns a fully-qualified data_labeling_job string."""
return "projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}".format(
project=project, location=location, data_labeling_job=data_labeling_job,
)
@staticmethod
def parse_data_labeling_job_path(path: str) -> Dict[str, str]:
"""Parses a data_labeling_job path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/dataLabelingJobs/(?P<data_labeling_job>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def dataset_path(project: str, location: str, dataset: str,) -> str:
"""Returns a fully-qualified dataset string."""
return "projects/{project}/locations/{location}/datasets/{dataset}".format(
project=project, location=location, dataset=dataset,
)
@staticmethod
def parse_dataset_path(path: str) -> Dict[str, str]:
"""Parses a dataset path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/datasets/(?P<dataset>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def hyperparameter_tuning_job_path(
project: str, location: str, hyperparameter_tuning_job: str,
) -> str:
"""Returns a fully-qualified hyperparameter_tuning_job string."""
return "projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}".format(
project=project,
location=location,
hyperparameter_tuning_job=hyperparameter_tuning_job,
)
@staticmethod
def parse_hyperparameter_tuning_job_path(path: str) -> Dict[str, str]:
"""Parses a hyperparameter_tuning_job path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/hyperparameterTuningJobs/(?P<hyperparameter_tuning_job>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def model_path(project: str, location: str, model: str,) -> str:
"""Returns a fully-qualified model string."""
return "projects/{project}/locations/{location}/models/{model}".format(
project=project, location=location, model=model,
)
@staticmethod
def parse_model_path(path: str) -> Dict[str, str]:
"""Parses a model path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/models/(?P<model>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def trial_path(project: str, location: str, study: str, trial: str,) -> str:
"""Returns a fully-qualified trial string."""
return "projects/{project}/locations/{location}/studies/{study}/trials/{trial}".format(
project=project, location=location, study=study, trial=trial,
)
@staticmethod
def parse_trial_path(path: str) -> Dict[str, str]:
"""Parses a trial path into its component segments."""
m = re.match(
r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)/studies/(?P<study>.+?)/trials/(?P<trial>.+?)$",
path,
)
return m.groupdict() if m else {}
@staticmethod
def common_billing_account_path(billing_account: str,) -> str:
"""Returns a fully-qualified billing_account string."""
return "billingAccounts/{billing_account}".format(
billing_account=billing_account,
)
@staticmethod
def parse_common_billing_account_path(path: str) -> Dict[str, str]:
"""Parse a billing_account path into its component segments."""
m = re.match(r"^billingAccounts/(?P<billing_account>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_folder_path(folder: str,) -> str:
"""Returns a fully-qualified folder string."""
return "folders/{folder}".format(folder=folder,)
@staticmethod
def parse_common_folder_path(path: str) -> Dict[str, str]:
"""Parse a folder path into its component segments."""
m = re.match(r"^folders/(?P<folder>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_organization_path(organization: str,) -> str:
"""Returns a fully-qualified organization string."""
return "organizations/{organization}".format(organization=organization,)
@staticmethod
def parse_common_organization_path(path: str) -> Dict[str, str]:
"""Parse a organization path into its component segments."""
m = re.match(r"^organizations/(?P<organization>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_project_path(project: str,) -> str:
"""Returns a fully-qualified project string."""
return "projects/{project}".format(project=project,)
@staticmethod
def parse_common_project_path(path: str) -> Dict[str, str]:
"""Parse a project path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)$", path)
return m.groupdict() if m else {}
@staticmethod
def common_location_path(project: str, location: str,) -> str:
"""Returns a fully-qualified location string."""
return "projects/{project}/locations/{location}".format(
project=project, location=location,
)
@staticmethod
def parse_common_location_path(path: str) -> Dict[str, str]:
"""Parse a location path into its component segments."""
m = re.match(r"^projects/(?P<project>.+?)/locations/(?P<location>.+?)$", path)
return m.groupdict() if m else {}
def __init__(
self,
*,
credentials: Optional[ga_credentials.Credentials] = None,
transport: Union[str, JobServiceTransport, None] = None,
client_options: Optional[client_options_lib.ClientOptions] = None,
client_info: gapic_v1.client_info.ClientInfo = DEFAULT_CLIENT_INFO,
) -> None:
"""Instantiates the job service client.
Args:
credentials (Optional[google.auth.credentials.Credentials]): The
authorization credentials to attach to requests. These
credentials identify the application to the service; if none
are specified, the client will attempt to ascertain the
credentials from the environment.
transport (Union[str, JobServiceTransport]): The
transport to use. If set to None, a transport is chosen
automatically.
client_options (google.api_core.client_options.ClientOptions): Custom options for the
client. It won't take effect if a ``transport`` instance is provided.
(1) The ``api_endpoint`` property can be used to override the
default endpoint provided by the client. GOOGLE_API_USE_MTLS_ENDPOINT
environment variable can also be used to override the endpoint:
"always" (always use the default mTLS endpoint), "never" (always
use the default regular endpoint) and "auto" (auto switch to the
default mTLS endpoint if client certificate is present, this is
the default value). However, the ``api_endpoint`` property takes
precedence if provided.
(2) If GOOGLE_API_USE_CLIENT_CERTIFICATE environment variable
is "true", then the ``client_cert_source`` property can be used
to provide client certificate for mutual TLS transport. If
not provided, the default SSL client certificate will be used if
present. If GOOGLE_API_USE_CLIENT_CERTIFICATE is "false" or not
set, no client certificate will be used.
client_info (google.api_core.gapic_v1.client_info.ClientInfo):
The client info used to send a user-agent string along with
API requests. If ``None``, then default info will be used.
Generally, you only need to set this if you're developing
your own client library.
Raises:
google.auth.exceptions.MutualTLSChannelError: If mutual TLS transport
creation failed for any reason.
"""
if isinstance(client_options, dict):
client_options = client_options_lib.from_dict(client_options)
if client_options is None:
client_options = client_options_lib.ClientOptions()
# Create SSL credentials for mutual TLS if needed.
use_client_cert = bool(
util.strtobool(os.getenv("GOOGLE_API_USE_CLIENT_CERTIFICATE", "false"))
)
client_cert_source_func = None
is_mtls = False
if use_client_cert:
if client_options.client_cert_source:
is_mtls = True
client_cert_source_func = client_options.client_cert_source
else:
is_mtls = mtls.has_default_client_cert_source()
if is_mtls:
client_cert_source_func = mtls.default_client_cert_source()
else:
client_cert_source_func = None
# Figure out which api endpoint to use.
if client_options.api_endpoint is not None:
api_endpoint = client_options.api_endpoint
else:
use_mtls_env = os.getenv("GOOGLE_API_USE_MTLS_ENDPOINT", "auto")
if use_mtls_env == "never":
api_endpoint = self.DEFAULT_ENDPOINT
elif use_mtls_env == "always":
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
elif use_mtls_env == "auto":
if is_mtls:
api_endpoint = self.DEFAULT_MTLS_ENDPOINT
else:
api_endpoint = self.DEFAULT_ENDPOINT
else:
raise MutualTLSChannelError(
"Unsupported GOOGLE_API_USE_MTLS_ENDPOINT value. Accepted "
"values: never, auto, always"
)
# Save or instantiate the transport.
# Ordinarily, we provide the transport, but allowing a custom transport
# instance provides an extensibility point for unusual situations.
if isinstance(transport, JobServiceTransport):
# transport is a JobServiceTransport instance.
if credentials or client_options.credentials_file:
raise ValueError(
"When providing a transport instance, "
"provide its credentials directly."
)
if client_options.scopes:
raise ValueError(
"When providing a transport instance, provide its scopes "
"directly."
)
self._transport = transport
else:
Transport = type(self).get_transport_class(transport)
self._transport = Transport(
credentials=credentials,
credentials_file=client_options.credentials_file,
host=api_endpoint,
scopes=client_options.scopes,
client_cert_source_for_mtls=client_cert_source_func,
quota_project_id=client_options.quota_project_id,
client_info=client_info,
)
def create_custom_job(
self,
request: job_service.CreateCustomJobRequest = None,
*,
parent: str = None,
custom_job: gca_custom_job.CustomJob = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_custom_job.CustomJob:
r"""Creates a CustomJob. A created CustomJob right away
will be attempted to be run.
Args:
request (google.cloud.aiplatform_v1.types.CreateCustomJobRequest):
The request object. Request message for
[JobService.CreateCustomJob][google.cloud.aiplatform.v1.JobService.CreateCustomJob].
parent (str):
Required. The resource name of the Location to create
the CustomJob in. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
custom_job (google.cloud.aiplatform_v1.types.CustomJob):
Required. The CustomJob to create.
This corresponds to the ``custom_job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.CustomJob:
Represents a job that runs custom
workloads such as a Docker container or
a Python package. A CustomJob can have
multiple worker pools and each worker
pool can have its own machine and input
spec. A CustomJob will be cleaned up
once the job enters terminal state
(failed or succeeded).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, custom_job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CreateCustomJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CreateCustomJobRequest):
request = job_service.CreateCustomJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if custom_job is not None:
request.custom_job = custom_job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_custom_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_custom_job(
self,
request: job_service.GetCustomJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> custom_job.CustomJob:
r"""Gets a CustomJob.
Args:
request (google.cloud.aiplatform_v1.types.GetCustomJobRequest):
The request object. Request message for
[JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob].
name (str):
Required. The name of the CustomJob resource. Format:
``projects/{project}/locations/{location}/customJobs/{custom_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.CustomJob:
Represents a job that runs custom
workloads such as a Docker container or
a Python package. A CustomJob can have
multiple worker pools and each worker
pool can have its own machine and input
spec. A CustomJob will be cleaned up
once the job enters terminal state
(failed or succeeded).
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.GetCustomJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.GetCustomJobRequest):
request = job_service.GetCustomJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_custom_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_custom_jobs(
self,
request: job_service.ListCustomJobsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListCustomJobsPager:
r"""Lists CustomJobs in a Location.
Args:
request (google.cloud.aiplatform_v1.types.ListCustomJobsRequest):
The request object. Request message for
[JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs].
parent (str):
Required. The resource name of the Location to list the
CustomJobs from. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.job_service.pagers.ListCustomJobsPager:
Response message for
[JobService.ListCustomJobs][google.cloud.aiplatform.v1.JobService.ListCustomJobs]
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.ListCustomJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.ListCustomJobsRequest):
request = job_service.ListCustomJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_custom_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListCustomJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_custom_job(
self,
request: job_service.DeleteCustomJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a CustomJob.
Args:
request (google.cloud.aiplatform_v1.types.DeleteCustomJobRequest):
The request object. Request message for
[JobService.DeleteCustomJob][google.cloud.aiplatform.v1.JobService.DeleteCustomJob].
name (str):
Required. The name of the CustomJob resource to be
deleted. Format:
``projects/{project}/locations/{location}/customJobs/{custom_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.DeleteCustomJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.DeleteCustomJobRequest):
request = job_service.DeleteCustomJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_custom_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def cancel_custom_job(
self,
request: job_service.CancelCustomJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Cancels a CustomJob. Starts asynchronous cancellation on the
CustomJob. The server makes a best effort to cancel the job, but
success is not guaranteed. Clients can use
[JobService.GetCustomJob][google.cloud.aiplatform.v1.JobService.GetCustomJob]
or other methods to check whether the cancellation succeeded or
whether the job completed despite cancellation. On successful
cancellation, the CustomJob is not deleted; instead it becomes a
job with a
[CustomJob.error][google.cloud.aiplatform.v1.CustomJob.error]
value with a [google.rpc.Status.code][google.rpc.Status.code] of
1, corresponding to ``Code.CANCELLED``, and
[CustomJob.state][google.cloud.aiplatform.v1.CustomJob.state] is
set to ``CANCELLED``.
Args:
request (google.cloud.aiplatform_v1.types.CancelCustomJobRequest):
The request object. Request message for
[JobService.CancelCustomJob][google.cloud.aiplatform.v1.JobService.CancelCustomJob].
name (str):
Required. The name of the CustomJob to cancel. Format:
``projects/{project}/locations/{location}/customJobs/{custom_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CancelCustomJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CancelCustomJobRequest):
request = job_service.CancelCustomJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.cancel_custom_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_data_labeling_job(
self,
request: job_service.CreateDataLabelingJobRequest = None,
*,
parent: str = None,
data_labeling_job: gca_data_labeling_job.DataLabelingJob = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_data_labeling_job.DataLabelingJob:
r"""Creates a DataLabelingJob.
Args:
request (google.cloud.aiplatform_v1.types.CreateDataLabelingJobRequest):
The request object. Request message for
[JobService.CreateDataLabelingJob][google.cloud.aiplatform.v1.JobService.CreateDataLabelingJob].
parent (str):
Required. The parent of the DataLabelingJob. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
data_labeling_job (google.cloud.aiplatform_v1.types.DataLabelingJob):
Required. The DataLabelingJob to
create.
This corresponds to the ``data_labeling_job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.DataLabelingJob:
DataLabelingJob is used to trigger a
human labeling job on unlabeled data
from the following Dataset:
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, data_labeling_job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CreateDataLabelingJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CreateDataLabelingJobRequest):
request = job_service.CreateDataLabelingJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if data_labeling_job is not None:
request.data_labeling_job = data_labeling_job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.create_data_labeling_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_data_labeling_job(
self,
request: job_service.GetDataLabelingJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> data_labeling_job.DataLabelingJob:
r"""Gets a DataLabelingJob.
Args:
request (google.cloud.aiplatform_v1.types.GetDataLabelingJobRequest):
The request object. Request message for
[JobService.GetDataLabelingJob][google.cloud.aiplatform.v1.JobService.GetDataLabelingJob].
name (str):
Required. The name of the DataLabelingJob. Format:
``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.DataLabelingJob:
DataLabelingJob is used to trigger a
human labeling job on unlabeled data
from the following Dataset:
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.GetDataLabelingJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.GetDataLabelingJobRequest):
request = job_service.GetDataLabelingJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_data_labeling_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_data_labeling_jobs(
self,
request: job_service.ListDataLabelingJobsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListDataLabelingJobsPager:
r"""Lists DataLabelingJobs in a Location.
Args:
request (google.cloud.aiplatform_v1.types.ListDataLabelingJobsRequest):
The request object. Request message for
[JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs].
parent (str):
Required. The parent of the DataLabelingJob. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.job_service.pagers.ListDataLabelingJobsPager:
Response message for
[JobService.ListDataLabelingJobs][google.cloud.aiplatform.v1.JobService.ListDataLabelingJobs].
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.ListDataLabelingJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.ListDataLabelingJobsRequest):
request = job_service.ListDataLabelingJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.list_data_labeling_jobs]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListDataLabelingJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_data_labeling_job(
self,
request: job_service.DeleteDataLabelingJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a DataLabelingJob.
Args:
request (google.cloud.aiplatform_v1.types.DeleteDataLabelingJobRequest):
The request object. Request message for
[JobService.DeleteDataLabelingJob][google.cloud.aiplatform.v1.JobService.DeleteDataLabelingJob].
name (str):
Required. The name of the DataLabelingJob to be deleted.
Format:
``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.DeleteDataLabelingJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.DeleteDataLabelingJobRequest):
request = job_service.DeleteDataLabelingJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.delete_data_labeling_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def cancel_data_labeling_job(
self,
request: job_service.CancelDataLabelingJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Cancels a DataLabelingJob. Success of cancellation is
not guaranteed.
Args:
request (google.cloud.aiplatform_v1.types.CancelDataLabelingJobRequest):
The request object. Request message for
[JobService.CancelDataLabelingJob][google.cloud.aiplatform.v1.JobService.CancelDataLabelingJob].
name (str):
Required. The name of the DataLabelingJob. Format:
``projects/{project}/locations/{location}/dataLabelingJobs/{data_labeling_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CancelDataLabelingJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CancelDataLabelingJobRequest):
request = job_service.CancelDataLabelingJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.cancel_data_labeling_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_hyperparameter_tuning_job(
self,
request: job_service.CreateHyperparameterTuningJobRequest = None,
*,
parent: str = None,
hyperparameter_tuning_job: gca_hyperparameter_tuning_job.HyperparameterTuningJob = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_hyperparameter_tuning_job.HyperparameterTuningJob:
r"""Creates a HyperparameterTuningJob
Args:
request (google.cloud.aiplatform_v1.types.CreateHyperparameterTuningJobRequest):
The request object. Request message for
[JobService.CreateHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CreateHyperparameterTuningJob].
parent (str):
Required. The resource name of the Location to create
the HyperparameterTuningJob in. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
hyperparameter_tuning_job (google.cloud.aiplatform_v1.types.HyperparameterTuningJob):
Required. The HyperparameterTuningJob
to create.
This corresponds to the ``hyperparameter_tuning_job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.HyperparameterTuningJob:
Represents a HyperparameterTuningJob.
A HyperparameterTuningJob has a Study
specification and multiple CustomJobs
with identical CustomJob specification.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, hyperparameter_tuning_job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CreateHyperparameterTuningJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CreateHyperparameterTuningJobRequest):
request = job_service.CreateHyperparameterTuningJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if hyperparameter_tuning_job is not None:
request.hyperparameter_tuning_job = hyperparameter_tuning_job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_hyperparameter_tuning_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_hyperparameter_tuning_job(
self,
request: job_service.GetHyperparameterTuningJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> hyperparameter_tuning_job.HyperparameterTuningJob:
r"""Gets a HyperparameterTuningJob
Args:
request (google.cloud.aiplatform_v1.types.GetHyperparameterTuningJobRequest):
The request object. Request message for
[JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob].
name (str):
Required. The name of the HyperparameterTuningJob
resource. Format:
``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.HyperparameterTuningJob:
Represents a HyperparameterTuningJob.
A HyperparameterTuningJob has a Study
specification and multiple CustomJobs
with identical CustomJob specification.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.GetHyperparameterTuningJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.GetHyperparameterTuningJobRequest):
request = job_service.GetHyperparameterTuningJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.get_hyperparameter_tuning_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_hyperparameter_tuning_jobs(
self,
request: job_service.ListHyperparameterTuningJobsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListHyperparameterTuningJobsPager:
r"""Lists HyperparameterTuningJobs in a Location.
Args:
request (google.cloud.aiplatform_v1.types.ListHyperparameterTuningJobsRequest):
The request object. Request message for
[JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs].
parent (str):
Required. The resource name of the Location to list the
HyperparameterTuningJobs from. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.job_service.pagers.ListHyperparameterTuningJobsPager:
Response message for
[JobService.ListHyperparameterTuningJobs][google.cloud.aiplatform.v1.JobService.ListHyperparameterTuningJobs]
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.ListHyperparameterTuningJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.ListHyperparameterTuningJobsRequest):
request = job_service.ListHyperparameterTuningJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_hyperparameter_tuning_jobs
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListHyperparameterTuningJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_hyperparameter_tuning_job(
self,
request: job_service.DeleteHyperparameterTuningJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a HyperparameterTuningJob.
Args:
request (google.cloud.aiplatform_v1.types.DeleteHyperparameterTuningJobRequest):
The request object. Request message for
[JobService.DeleteHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.DeleteHyperparameterTuningJob].
name (str):
Required. The name of the HyperparameterTuningJob
resource to be deleted. Format:
``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.DeleteHyperparameterTuningJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.DeleteHyperparameterTuningJobRequest):
request = job_service.DeleteHyperparameterTuningJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_hyperparameter_tuning_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def cancel_hyperparameter_tuning_job(
self,
request: job_service.CancelHyperparameterTuningJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Cancels a HyperparameterTuningJob. Starts asynchronous
cancellation on the HyperparameterTuningJob. The server makes a
best effort to cancel the job, but success is not guaranteed.
Clients can use
[JobService.GetHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.GetHyperparameterTuningJob]
or other methods to check whether the cancellation succeeded or
whether the job completed despite cancellation. On successful
cancellation, the HyperparameterTuningJob is not deleted;
instead it becomes a job with a
[HyperparameterTuningJob.error][google.cloud.aiplatform.v1.HyperparameterTuningJob.error]
value with a [google.rpc.Status.code][google.rpc.Status.code] of
1, corresponding to ``Code.CANCELLED``, and
[HyperparameterTuningJob.state][google.cloud.aiplatform.v1.HyperparameterTuningJob.state]
is set to ``CANCELLED``.
Args:
request (google.cloud.aiplatform_v1.types.CancelHyperparameterTuningJobRequest):
The request object. Request message for
[JobService.CancelHyperparameterTuningJob][google.cloud.aiplatform.v1.JobService.CancelHyperparameterTuningJob].
name (str):
Required. The name of the HyperparameterTuningJob to
cancel. Format:
``projects/{project}/locations/{location}/hyperparameterTuningJobs/{hyperparameter_tuning_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CancelHyperparameterTuningJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CancelHyperparameterTuningJobRequest):
request = job_service.CancelHyperparameterTuningJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.cancel_hyperparameter_tuning_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
def create_batch_prediction_job(
self,
request: job_service.CreateBatchPredictionJobRequest = None,
*,
parent: str = None,
batch_prediction_job: gca_batch_prediction_job.BatchPredictionJob = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gca_batch_prediction_job.BatchPredictionJob:
r"""Creates a BatchPredictionJob. A BatchPredictionJob
once created will right away be attempted to start.
Args:
request (google.cloud.aiplatform_v1.types.CreateBatchPredictionJobRequest):
The request object. Request message for
[JobService.CreateBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CreateBatchPredictionJob].
parent (str):
Required. The resource name of the Location to create
the BatchPredictionJob in. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
batch_prediction_job (google.cloud.aiplatform_v1.types.BatchPredictionJob):
Required. The BatchPredictionJob to
create.
This corresponds to the ``batch_prediction_job`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.BatchPredictionJob:
A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions
on multiple [input
instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config].
If predictions for significant portion of the
instances fail, the job may finish without attempting
predictions for all remaining instances.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent, batch_prediction_job])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CreateBatchPredictionJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CreateBatchPredictionJobRequest):
request = job_service.CreateBatchPredictionJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
if batch_prediction_job is not None:
request.batch_prediction_job = batch_prediction_job
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.create_batch_prediction_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def get_batch_prediction_job(
self,
request: job_service.GetBatchPredictionJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> batch_prediction_job.BatchPredictionJob:
r"""Gets a BatchPredictionJob
Args:
request (google.cloud.aiplatform_v1.types.GetBatchPredictionJobRequest):
The request object. Request message for
[JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob].
name (str):
Required. The name of the BatchPredictionJob resource.
Format:
``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.types.BatchPredictionJob:
A job that uses a [Model][google.cloud.aiplatform.v1.BatchPredictionJob.model] to produce predictions
on multiple [input
instances][google.cloud.aiplatform.v1.BatchPredictionJob.input_config].
If predictions for significant portion of the
instances fail, the job may finish without attempting
predictions for all remaining instances.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.GetBatchPredictionJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.GetBatchPredictionJobRequest):
request = job_service.GetBatchPredictionJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[self._transport.get_batch_prediction_job]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Done; return the response.
return response
def list_batch_prediction_jobs(
self,
request: job_service.ListBatchPredictionJobsRequest = None,
*,
parent: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> pagers.ListBatchPredictionJobsPager:
r"""Lists BatchPredictionJobs in a Location.
Args:
request (google.cloud.aiplatform_v1.types.ListBatchPredictionJobsRequest):
The request object. Request message for
[JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs].
parent (str):
Required. The resource name of the Location to list the
BatchPredictionJobs from. Format:
``projects/{project}/locations/{location}``
This corresponds to the ``parent`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.cloud.aiplatform_v1.services.job_service.pagers.ListBatchPredictionJobsPager:
Response message for
[JobService.ListBatchPredictionJobs][google.cloud.aiplatform.v1.JobService.ListBatchPredictionJobs]
Iterating over this object will yield results and
resolve additional pages automatically.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([parent])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.ListBatchPredictionJobsRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.ListBatchPredictionJobsRequest):
request = job_service.ListBatchPredictionJobsRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if parent is not None:
request.parent = parent
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.list_batch_prediction_jobs
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("parent", request.parent),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# This method is paged; wrap the response in a pager, which provides
# an `__iter__` convenience method.
response = pagers.ListBatchPredictionJobsPager(
method=rpc, request=request, response=response, metadata=metadata,
)
# Done; return the response.
return response
def delete_batch_prediction_job(
self,
request: job_service.DeleteBatchPredictionJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> gac_operation.Operation:
r"""Deletes a BatchPredictionJob. Can only be called on
jobs that already finished.
Args:
request (google.cloud.aiplatform_v1.types.DeleteBatchPredictionJobRequest):
The request object. Request message for
[JobService.DeleteBatchPredictionJob][google.cloud.aiplatform.v1.JobService.DeleteBatchPredictionJob].
name (str):
Required. The name of the BatchPredictionJob resource to
be deleted. Format:
``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
google.api_core.operation.Operation:
An object representing a long-running operation.
The result type for the operation will be :class:`google.protobuf.empty_pb2.Empty` A generic empty message that you can re-use to avoid defining duplicated
empty messages in your APIs. A typical example is to
use it as the request or the response type of an API
method. For instance:
service Foo {
rpc Bar(google.protobuf.Empty) returns
(google.protobuf.Empty);
}
The JSON representation for Empty is empty JSON
object {}.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.DeleteBatchPredictionJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.DeleteBatchPredictionJobRequest):
request = job_service.DeleteBatchPredictionJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.delete_batch_prediction_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
response = rpc(request, retry=retry, timeout=timeout, metadata=metadata,)
# Wrap the response in an operation future.
response = gac_operation.from_gapic(
response,
self._transport.operations_client,
empty_pb2.Empty,
metadata_type=gca_operation.DeleteOperationMetadata,
)
# Done; return the response.
return response
def cancel_batch_prediction_job(
self,
request: job_service.CancelBatchPredictionJobRequest = None,
*,
name: str = None,
retry: retries.Retry = gapic_v1.method.DEFAULT,
timeout: float = None,
metadata: Sequence[Tuple[str, str]] = (),
) -> None:
r"""Cancels a BatchPredictionJob.
Starts asynchronous cancellation on the BatchPredictionJob. The
server makes the best effort to cancel the job, but success is
not guaranteed. Clients can use
[JobService.GetBatchPredictionJob][google.cloud.aiplatform.v1.JobService.GetBatchPredictionJob]
or other methods to check whether the cancellation succeeded or
whether the job completed despite cancellation. On a successful
cancellation, the BatchPredictionJob is not deleted;instead its
[BatchPredictionJob.state][google.cloud.aiplatform.v1.BatchPredictionJob.state]
is set to ``CANCELLED``. Any files already outputted by the job
are not deleted.
Args:
request (google.cloud.aiplatform_v1.types.CancelBatchPredictionJobRequest):
The request object. Request message for
[JobService.CancelBatchPredictionJob][google.cloud.aiplatform.v1.JobService.CancelBatchPredictionJob].
name (str):
Required. The name of the BatchPredictionJob to cancel.
Format:
``projects/{project}/locations/{location}/batchPredictionJobs/{batch_prediction_job}``
This corresponds to the ``name`` field
on the ``request`` instance; if ``request`` is provided, this
should not be set.
retry (google.api_core.retry.Retry): Designation of what errors, if any,
should be retried.
timeout (float): The timeout for this request.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
"""
# Create or coerce a protobuf request object.
# Sanity check: If we got a request object, we should *not* have
# gotten any keyword arguments that map to the request.
has_flattened_params = any([name])
if request is not None and has_flattened_params:
raise ValueError(
"If the `request` argument is set, then none of "
"the individual field arguments should be set."
)
# Minor optimization to avoid making a copy if the user passes
# in a job_service.CancelBatchPredictionJobRequest.
# There's no risk of modifying the input as we've already verified
# there are no flattened fields.
if not isinstance(request, job_service.CancelBatchPredictionJobRequest):
request = job_service.CancelBatchPredictionJobRequest(request)
# If we have keyword arguments corresponding to fields on the
# request, apply these.
if name is not None:
request.name = name
# Wrap the RPC method; this adds retry and timeout information,
# and friendly error handling.
rpc = self._transport._wrapped_methods[
self._transport.cancel_batch_prediction_job
]
# Certain fields should be provided within the metadata header;
# add these here.
metadata = tuple(metadata) + (
gapic_v1.routing_header.to_grpc_metadata((("name", request.name),)),
)
# Send the request.
rpc(
request, retry=retry, timeout=timeout, metadata=metadata,
)
try:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo(
gapic_version=pkg_resources.get_distribution(
"google-cloud-aiplatform",
).version,
)
except pkg_resources.DistributionNotFound:
DEFAULT_CLIENT_INFO = gapic_v1.client_info.ClientInfo()
__all__ = ("JobServiceClient",)
| apache-2.0 | 988,844,452,061,504,900 | 43.53087 | 171 | 0.619902 | false |
sserrot/champion_relationships | venv/Lib/site-packages/prompt_toolkit/layout/menus.py | 1 | 25401 | import math
from itertools import zip_longest
from typing import (
TYPE_CHECKING,
Callable,
Dict,
Iterable,
List,
Optional,
Tuple,
TypeVar,
Union,
cast,
)
from prompt_toolkit.application.current import get_app
from prompt_toolkit.buffer import CompletionState
from prompt_toolkit.completion import Completion
from prompt_toolkit.data_structures import Point
from prompt_toolkit.filters import (
Condition,
FilterOrBool,
has_completions,
is_done,
to_filter,
)
from prompt_toolkit.formatted_text import (
StyleAndTextTuples,
fragment_list_width,
to_formatted_text,
)
from prompt_toolkit.key_binding.key_processor import KeyPressEvent
from prompt_toolkit.layout.utils import explode_text_fragments
from prompt_toolkit.mouse_events import MouseEvent, MouseEventType
from prompt_toolkit.utils import get_cwidth
from .containers import ConditionalContainer, HSplit, ScrollOffsets, Window
from .controls import GetLinePrefixCallable, UIContent, UIControl
from .dimension import Dimension
from .margins import ScrollbarMargin
if TYPE_CHECKING:
from prompt_toolkit.key_binding.key_bindings import KeyBindings
NotImplementedOrNone = object
__all__ = [
"CompletionsMenu",
"MultiColumnCompletionsMenu",
]
E = KeyPressEvent
class CompletionsMenuControl(UIControl):
"""
Helper for drawing the complete menu to the screen.
:param scroll_offset: Number (integer) representing the preferred amount of
completions to be displayed before and after the current one. When this
is a very high number, the current completion will be shown in the
middle most of the time.
"""
# Preferred minimum size of the menu control.
# The CompletionsMenu class defines a width of 8, and there is a scrollbar
# of 1.)
MIN_WIDTH = 7
def has_focus(self) -> bool:
return False
def preferred_width(self, max_available_width: int) -> Optional[int]:
complete_state = get_app().current_buffer.complete_state
if complete_state:
menu_width = self._get_menu_width(500, complete_state)
menu_meta_width = self._get_menu_meta_width(500, complete_state)
return menu_width + menu_meta_width
else:
return 0
def preferred_height(
self,
width: int,
max_available_height: int,
wrap_lines: bool,
get_line_prefix: Optional[GetLinePrefixCallable],
) -> Optional[int]:
complete_state = get_app().current_buffer.complete_state
if complete_state:
return len(complete_state.completions)
else:
return 0
def create_content(self, width: int, height: int) -> UIContent:
"""
Create a UIContent object for this control.
"""
complete_state = get_app().current_buffer.complete_state
if complete_state:
completions = complete_state.completions
index = complete_state.complete_index # Can be None!
# Calculate width of completions menu.
menu_width = self._get_menu_width(width, complete_state)
menu_meta_width = self._get_menu_meta_width(
width - menu_width, complete_state
)
show_meta = self._show_meta(complete_state)
def get_line(i: int) -> StyleAndTextTuples:
c = completions[i]
is_current_completion = i == index
result = _get_menu_item_fragments(
c, is_current_completion, menu_width, space_after=True
)
if show_meta:
result += self._get_menu_item_meta_fragments(
c, is_current_completion, menu_meta_width
)
return result
return UIContent(
get_line=get_line,
cursor_position=Point(x=0, y=index or 0),
line_count=len(completions),
)
return UIContent()
def _show_meta(self, complete_state: CompletionState) -> bool:
"""
Return ``True`` if we need to show a column with meta information.
"""
return any(c.display_meta_text for c in complete_state.completions)
def _get_menu_width(self, max_width: int, complete_state: CompletionState) -> int:
"""
Return the width of the main column.
"""
return min(
max_width,
max(
self.MIN_WIDTH,
max(get_cwidth(c.display_text) for c in complete_state.completions) + 2,
),
)
def _get_menu_meta_width(
self, max_width: int, complete_state: CompletionState
) -> int:
"""
Return the width of the meta column.
"""
def meta_width(completion: Completion) -> int:
return get_cwidth(completion.display_meta_text)
if self._show_meta(complete_state):
return min(
max_width, max(meta_width(c) for c in complete_state.completions) + 2
)
else:
return 0
def _get_menu_item_meta_fragments(
self, completion: Completion, is_current_completion: bool, width: int
) -> StyleAndTextTuples:
if is_current_completion:
style_str = "class:completion-menu.meta.completion.current"
else:
style_str = "class:completion-menu.meta.completion"
text, tw = _trim_formatted_text(completion.display_meta, width - 2)
padding = " " * (width - 1 - tw)
return to_formatted_text(
cast(StyleAndTextTuples, []) + [("", " ")] + text + [("", padding)],
style=style_str,
)
def mouse_handler(self, mouse_event: MouseEvent) -> "NotImplementedOrNone":
"""
Handle mouse events: clicking and scrolling.
"""
b = get_app().current_buffer
if mouse_event.event_type == MouseEventType.MOUSE_UP:
# Select completion.
b.go_to_completion(mouse_event.position.y)
b.complete_state = None
elif mouse_event.event_type == MouseEventType.SCROLL_DOWN:
# Scroll up.
b.complete_next(count=3, disable_wrap_around=True)
elif mouse_event.event_type == MouseEventType.SCROLL_UP:
# Scroll down.
b.complete_previous(count=3, disable_wrap_around=True)
return None
def _get_menu_item_fragments(
completion: Completion,
is_current_completion: bool,
width: int,
space_after: bool = False,
) -> StyleAndTextTuples:
"""
Get the style/text tuples for a menu item, styled and trimmed to the given
width.
"""
if is_current_completion:
style_str = "class:completion-menu.completion.current %s %s" % (
completion.style,
completion.selected_style,
)
else:
style_str = "class:completion-menu.completion " + completion.style
text, tw = _trim_formatted_text(
completion.display, (width - 2 if space_after else width - 1)
)
padding = " " * (width - 1 - tw)
return to_formatted_text(
cast(StyleAndTextTuples, []) + [("", " ")] + text + [("", padding)],
style=style_str,
)
def _trim_formatted_text(
formatted_text: StyleAndTextTuples, max_width: int
) -> Tuple[StyleAndTextTuples, int]:
"""
Trim the text to `max_width`, append dots when the text is too long.
Returns (text, width) tuple.
"""
width = fragment_list_width(formatted_text)
# When the text is too wide, trim it.
if width > max_width:
result = [] # Text fragments.
remaining_width = max_width - 3
for style_and_ch in explode_text_fragments(formatted_text):
ch_width = get_cwidth(style_and_ch[1])
if ch_width <= remaining_width:
result.append(style_and_ch)
remaining_width -= ch_width
else:
break
result.append(("", "..."))
return result, max_width - remaining_width
else:
return formatted_text, width
class CompletionsMenu(ConditionalContainer):
# NOTE: We use a pretty big z_index by default. Menus are supposed to be
# above anything else. We also want to make sure that the content is
# visible at the point where we draw this menu.
def __init__(
self,
max_height: Optional[int] = None,
scroll_offset: Union[int, Callable[[], int]] = 0,
extra_filter: FilterOrBool = True,
display_arrows: FilterOrBool = False,
z_index: int = 10 ** 8,
) -> None:
extra_filter = to_filter(extra_filter)
display_arrows = to_filter(display_arrows)
super().__init__(
content=Window(
content=CompletionsMenuControl(),
width=Dimension(min=8),
height=Dimension(min=1, max=max_height),
scroll_offsets=ScrollOffsets(top=scroll_offset, bottom=scroll_offset),
right_margins=[ScrollbarMargin(display_arrows=display_arrows)],
dont_extend_width=True,
style="class:completion-menu",
z_index=z_index,
),
# Show when there are completions but not at the point we are
# returning the input.
filter=has_completions & ~is_done & extra_filter,
)
class MultiColumnCompletionMenuControl(UIControl):
"""
Completion menu that displays all the completions in several columns.
When there are more completions than space for them to be displayed, an
arrow is shown on the left or right side.
`min_rows` indicates how many rows will be available in any possible case.
When this is larger than one, it will try to use less columns and more
rows until this value is reached.
Be careful passing in a too big value, if less than the given amount of
rows are available, more columns would have been required, but
`preferred_width` doesn't know about that and reports a too small value.
This results in less completions displayed and additional scrolling.
(It's a limitation of how the layout engine currently works: first the
widths are calculated, then the heights.)
:param suggested_max_column_width: The suggested max width of a column.
The column can still be bigger than this, but if there is place for two
columns of this width, we will display two columns. This to avoid that
if there is one very wide completion, that it doesn't significantly
reduce the amount of columns.
"""
_required_margin = 3 # One extra padding on the right + space for arrows.
def __init__(self, min_rows: int = 3, suggested_max_column_width: int = 30) -> None:
assert min_rows >= 1
self.min_rows = min_rows
self.suggested_max_column_width = suggested_max_column_width
self.scroll = 0
# Info of last rendering.
self._rendered_rows = 0
self._rendered_columns = 0
self._total_columns = 0
self._render_pos_to_completion: Dict[Tuple[int, int], Completion] = {}
self._render_left_arrow = False
self._render_right_arrow = False
self._render_width = 0
def reset(self) -> None:
self.scroll = 0
def has_focus(self) -> bool:
return False
def preferred_width(self, max_available_width: int) -> Optional[int]:
"""
Preferred width: prefer to use at least min_rows, but otherwise as much
as possible horizontally.
"""
complete_state = get_app().current_buffer.complete_state
if complete_state is None:
return 0
column_width = self._get_column_width(complete_state)
result = int(
column_width
* math.ceil(len(complete_state.completions) / float(self.min_rows))
)
# When the desired width is still more than the maximum available,
# reduce by removing columns until we are less than the available
# width.
while (
result > column_width
and result > max_available_width - self._required_margin
):
result -= column_width
return result + self._required_margin
def preferred_height(
self,
width: int,
max_available_height: int,
wrap_lines: bool,
get_line_prefix: Optional[GetLinePrefixCallable],
) -> Optional[int]:
"""
Preferred height: as much as needed in order to display all the completions.
"""
complete_state = get_app().current_buffer.complete_state
if complete_state is None:
return 0
column_width = self._get_column_width(complete_state)
column_count = max(1, (width - self._required_margin) // column_width)
return int(math.ceil(len(complete_state.completions) / float(column_count)))
def create_content(self, width: int, height: int) -> UIContent:
"""
Create a UIContent object for this menu.
"""
complete_state = get_app().current_buffer.complete_state
if complete_state is None:
return UIContent()
column_width = self._get_column_width(complete_state)
self._render_pos_to_completion = {}
_T = TypeVar("_T")
def grouper(
n: int, iterable: Iterable[_T], fillvalue: Optional[_T] = None
) -> Iterable[List[_T]]:
" grouper(3, 'ABCDEFG', 'x') --> ABC DEF Gxx "
args = [iter(iterable)] * n
return zip_longest(fillvalue=fillvalue, *args)
def is_current_completion(completion: Completion) -> bool:
" Returns True when this completion is the currently selected one. "
return (
complete_state is not None
and complete_state.complete_index is not None
and c == complete_state.current_completion
)
# Space required outside of the regular columns, for displaying the
# left and right arrow.
HORIZONTAL_MARGIN_REQUIRED = 3
# There should be at least one column, but it cannot be wider than
# the available width.
column_width = min(width - HORIZONTAL_MARGIN_REQUIRED, column_width)
# However, when the columns tend to be very wide, because there are
# some very wide entries, shrink it anyway.
if column_width > self.suggested_max_column_width:
# `column_width` can still be bigger that `suggested_max_column_width`,
# but if there is place for two columns, we divide by two.
column_width //= column_width // self.suggested_max_column_width
visible_columns = max(1, (width - self._required_margin) // column_width)
columns_ = list(grouper(height, complete_state.completions))
rows_ = list(zip(*columns_))
# Make sure the current completion is always visible: update scroll offset.
selected_column = (complete_state.complete_index or 0) // height
self.scroll = min(
selected_column, max(self.scroll, selected_column - visible_columns + 1)
)
render_left_arrow = self.scroll > 0
render_right_arrow = self.scroll < len(rows_[0]) - visible_columns
# Write completions to screen.
fragments_for_line = []
for row_index, row in enumerate(rows_):
fragments: StyleAndTextTuples = []
middle_row = row_index == len(rows_) // 2
# Draw left arrow if we have hidden completions on the left.
if render_left_arrow:
fragments.append(("class:scrollbar", "<" if middle_row else " "))
elif render_right_arrow:
# Reserve one column empty space. (If there is a right
# arrow right now, there can be a left arrow as well.)
fragments.append(("", " "))
# Draw row content.
for column_index, c in enumerate(row[self.scroll :][:visible_columns]):
if c is not None:
fragments += _get_menu_item_fragments(
c, is_current_completion(c), column_width, space_after=False
)
# Remember render position for mouse click handler.
for x in range(column_width):
self._render_pos_to_completion[
(column_index * column_width + x, row_index)
] = c
else:
fragments.append(("class:completion", " " * column_width))
# Draw trailing padding for this row.
# (_get_menu_item_fragments only returns padding on the left.)
if render_left_arrow or render_right_arrow:
fragments.append(("class:completion", " "))
# Draw right arrow if we have hidden completions on the right.
if render_right_arrow:
fragments.append(("class:scrollbar", ">" if middle_row else " "))
elif render_left_arrow:
fragments.append(("class:completion", " "))
# Add line.
fragments_for_line.append(
to_formatted_text(fragments, style="class:completion-menu")
)
self._rendered_rows = height
self._rendered_columns = visible_columns
self._total_columns = len(columns_)
self._render_left_arrow = render_left_arrow
self._render_right_arrow = render_right_arrow
self._render_width = (
column_width * visible_columns + render_left_arrow + render_right_arrow + 1
)
def get_line(i: int) -> StyleAndTextTuples:
return fragments_for_line[i]
return UIContent(get_line=get_line, line_count=len(rows_))
def _get_column_width(self, complete_state: CompletionState) -> int:
"""
Return the width of each column.
"""
return max(get_cwidth(c.display_text) for c in complete_state.completions) + 1
def mouse_handler(self, mouse_event: MouseEvent) -> Optional["NotImplemented"]:
"""
Handle scroll and click events.
"""
b = get_app().current_buffer
def scroll_left() -> None:
b.complete_previous(count=self._rendered_rows, disable_wrap_around=True)
self.scroll = max(0, self.scroll - 1)
def scroll_right() -> None:
b.complete_next(count=self._rendered_rows, disable_wrap_around=True)
self.scroll = min(
self._total_columns - self._rendered_columns, self.scroll + 1
)
if mouse_event.event_type == MouseEventType.SCROLL_DOWN:
scroll_right()
elif mouse_event.event_type == MouseEventType.SCROLL_UP:
scroll_left()
elif mouse_event.event_type == MouseEventType.MOUSE_UP:
x = mouse_event.position.x
y = mouse_event.position.y
# Mouse click on left arrow.
if x == 0:
if self._render_left_arrow:
scroll_left()
# Mouse click on right arrow.
elif x == self._render_width - 1:
if self._render_right_arrow:
scroll_right()
# Mouse click on completion.
else:
completion = self._render_pos_to_completion.get((x, y))
if completion:
b.apply_completion(completion)
return None
def get_key_bindings(self) -> "KeyBindings":
"""
Expose key bindings that handle the left/right arrow keys when the menu
is displayed.
"""
from prompt_toolkit.key_binding.key_bindings import KeyBindings
kb = KeyBindings()
@Condition
def filter() -> bool:
" Only handle key bindings if this menu is visible. "
app = get_app()
complete_state = app.current_buffer.complete_state
# There need to be completions, and one needs to be selected.
if complete_state is None or complete_state.complete_index is None:
return False
# This menu needs to be visible.
return any(window.content == self for window in app.layout.visible_windows)
def move(right: bool = False) -> None:
buff = get_app().current_buffer
complete_state = buff.complete_state
if complete_state is not None and complete_state.complete_index is not None:
# Calculate new complete index.
new_index = complete_state.complete_index
if right:
new_index += self._rendered_rows
else:
new_index -= self._rendered_rows
if 0 <= new_index < len(complete_state.completions):
buff.go_to_completion(new_index)
# NOTE: the is_global is required because the completion menu will
# never be focussed.
@kb.add("left", is_global=True, filter=filter)
def _left(event: E) -> None:
move()
@kb.add("right", is_global=True, filter=filter)
def _right(event: E) -> None:
move(True)
return kb
class MultiColumnCompletionsMenu(HSplit):
"""
Container that displays the completions in several columns.
When `show_meta` (a :class:`~prompt_toolkit.filters.Filter`) evaluates
to True, it shows the meta information at the bottom.
"""
def __init__(
self,
min_rows: int = 3,
suggested_max_column_width: int = 30,
show_meta: FilterOrBool = True,
extra_filter: FilterOrBool = True,
z_index: int = 10 ** 8,
) -> None:
show_meta = to_filter(show_meta)
extra_filter = to_filter(extra_filter)
# Display filter: show when there are completions but not at the point
# we are returning the input.
full_filter = has_completions & ~is_done & extra_filter
@Condition
def any_completion_has_meta() -> bool:
complete_state = get_app().current_buffer.complete_state
return complete_state is not None and any(
c.display_meta for c in complete_state.completions
)
# Create child windows.
# NOTE: We don't set style='class:completion-menu' to the
# `MultiColumnCompletionMenuControl`, because this is used in a
# Float that is made transparent, and the size of the control
# doesn't always correspond exactly with the size of the
# generated content.
completions_window = ConditionalContainer(
content=Window(
content=MultiColumnCompletionMenuControl(
min_rows=min_rows,
suggested_max_column_width=suggested_max_column_width,
),
width=Dimension(min=8),
height=Dimension(min=1),
),
filter=full_filter,
)
meta_window = ConditionalContainer(
content=Window(content=_SelectedCompletionMetaControl()),
filter=show_meta & full_filter & any_completion_has_meta,
)
# Initialise split.
super().__init__([completions_window, meta_window], z_index=z_index)
class _SelectedCompletionMetaControl(UIControl):
"""
Control that shows the meta information of the selected completion.
"""
def preferred_width(self, max_available_width: int) -> Optional[int]:
"""
Report the width of the longest meta text as the preferred width of this control.
It could be that we use less width, but this way, we're sure that the
layout doesn't change when we select another completion (E.g. that
completions are suddenly shown in more or fewer columns.)
"""
app = get_app()
if app.current_buffer.complete_state:
state = app.current_buffer.complete_state
return 2 + max(get_cwidth(c.display_meta_text) for c in state.completions)
else:
return 0
def preferred_height(
self,
width: int,
max_available_height: int,
wrap_lines: bool,
get_line_prefix: Optional[GetLinePrefixCallable],
) -> Optional[int]:
return 1
def create_content(self, width: int, height: int) -> UIContent:
fragments = self._get_text_fragments()
def get_line(i: int) -> StyleAndTextTuples:
return fragments
return UIContent(get_line=get_line, line_count=1 if fragments else 0)
def _get_text_fragments(self) -> StyleAndTextTuples:
style = "class:completion-menu.multi-column-meta"
state = get_app().current_buffer.complete_state
if (
state
and state.current_completion
and state.current_completion.display_meta_text
):
return to_formatted_text(
cast(StyleAndTextTuples, [("", " ")])
+ state.current_completion.display_meta
+ [("", " ")],
style=style,
)
return []
| mit | -9,167,235,929,098,738,000 | 34.279167 | 89 | 0.592063 | false |
puttarajubr/commcare-hq | corehq/apps/userreports/tests/test_data_source_repeats.py | 1 | 4835 | import json
import os
import datetime
from django.test import SimpleTestCase, TestCase
from corehq.apps.userreports.models import DataSourceConfiguration
from corehq.apps.userreports.sql import IndicatorSqlAdapter, create_engine
from corehq.apps.userreports.sql.connection import connection_manager
DOC_ID = 'repeat-id'
DAY_OF_WEEK = 'monday'
class RepeatDataSourceTestMixin(object):
def setUp(self):
folder = os.path.join(os.path.dirname(__file__), 'data', 'configs')
sample_file = os.path.join(folder, 'data_source_with_repeat.json')
with open(sample_file) as f:
self.config = DataSourceConfiguration.wrap(json.loads(f.read()))
class RepeatDataSourceConfigurationTest(RepeatDataSourceTestMixin, SimpleTestCase):
def test_test_doc_matches(self):
self.assertTrue(self.config.filter(_test_doc()))
def test_empty_doc_no_rows(self):
self.assertEqual([], self.config.get_all_values(_test_doc()))
def test_missing_property_no_rows(self):
self.assertEqual([], self.config.get_all_values(_test_doc(form={})))
def test_null_property_no_rows(self):
self.assertEqual([], self.config.get_all_values(_test_doc(form={"time_logs": None})))
def test_empty_list_property_no_rows(self):
self.assertEqual([], self.config.get_all_values(_test_doc(form={"time_logs": []})))
def test_dict_property(self):
start = datetime.datetime.utcnow()
end = start + datetime.timedelta(minutes=30)
rows = self.config.get_all_values(_test_doc(form={"time_logs": {
"start_time": start, "end_time": end, "person": "al"
}}))
self.assertEqual(1, len(rows))
doc_id_ind, inserted_at, repeat_iteration, start_ind, end_ind, person_ind, created_base_ind = rows[0]
self.assertEqual(DOC_ID, doc_id_ind.value)
self.assertEqual(0, repeat_iteration.value)
self.assertEqual(start, start_ind.value)
self.assertEqual(end, end_ind.value)
self.assertEqual('al', person_ind.value)
self.assertEqual(DAY_OF_WEEK, created_base_ind.value)
def test_list_property(self):
now = datetime.datetime.utcnow()
one_hour = datetime.timedelta(hours=1)
logs = [
{"start_time": now, "end_time": now + one_hour, "person": "al"},
{"start_time": now + one_hour, "end_time": now + (one_hour * 2), "person": "chris"},
{"start_time": now + (one_hour * 2), "end_time": now + (one_hour * 3), "person": "katie"},
]
rows = self.config.get_all_values(_test_doc(form={"time_logs": logs}))
self.assertEqual(len(logs), len(rows))
for i, row in enumerate(rows):
doc_id_ind, inserted_at, repeat_iteration, start_ind, end_ind, person_ind, created_base_ind = row
self.assertEqual(DOC_ID, doc_id_ind.value)
self.assertEqual(logs[i]['start_time'], start_ind.value)
self.assertEqual(i, repeat_iteration.value)
self.assertEqual(logs[i]['end_time'], end_ind.value)
self.assertEqual(logs[i]['person'], person_ind.value)
self.assertEqual(DAY_OF_WEEK, created_base_ind.value)
class RepeatDataSourceBuildTest(RepeatDataSourceTestMixin, TestCase):
def tearDown(self):
connection_manager.dispose_all()
def test_table_population(self):
engine = create_engine()
adapter = IndicatorSqlAdapter(self.config)
# Delete and create table
adapter.rebuild_table()
# Create a doc
now = datetime.datetime.now()
one_hour = datetime.timedelta(hours=1)
logs = [
{"start_time": now, "end_time": now + one_hour, "person": "al"},
{"start_time": now + one_hour, "end_time": now + (one_hour * 2), "person": "chris"},
{"start_time": now + (one_hour * 2), "end_time": now + (one_hour * 3), "person": "katie"},
]
doc = _test_doc(form={'time_logs': logs})
# Save this document into the table
adapter.save(doc)
# Get rows from the table
with engine.connect() as connection:
rows = connection.execute(adapter.get_table().select())
retrieved_logs = [
{
'start_time': r[3],
'end_time': r[4],
'person': r[5],
} for r in rows
]
# Check those rows against the expected result
self.assertItemsEqual(
retrieved_logs,
logs,
"The repeat data saved in the data source table did not match the expected data!"
)
def _test_doc(**extras):
test_doc = {
"_id": DOC_ID,
"domain": "user-reports",
"doc_type": "XFormInstance",
"created": DAY_OF_WEEK
}
test_doc.update(extras)
return test_doc
| bsd-3-clause | -7,507,453,100,842,215,000 | 36.773438 | 109 | 0.608273 | false |
USCSoftwareEngineeringClub/pyceratOpsRecs | src/segmentation.py | 1 | 1716 | import sys
import numpy as np
import cv2
"""
Authors: Lawton Mizell, Tyler Hall
routine for image segmentation and
detection of numbers and arithemic operations
opencv 3.0+
"""
def segment(im):
"""
:param im:
Image to detect digits and operations in
"""
gray = cv2.cvtColor(im,cv2.COLOR_BGR2GRAY) #grayscale
blur = cv2.GaussianBlur(gray,(5,5),0) #smooth image to reduce noise
#adaptive thresholding for different lighting conditions
thresh = cv2.adaptiveThreshold(blur,255,1,1,11,2)
################# Now finding Contours ###################
image,contours, hierarchy = cv2.findContours(thresh,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
samples = np.empty((0,100))
keys = [i for i in range(48,58)]
for cnt in contours:
if cv2.contourArea(cnt) > 20:
[x,y,w,h] = cv2.boundingRect(cnt)
#Draw bounding box for it, then resize to 10x10, and store its pixel values in an array
if h>1:
cv2.rectangle(im,(x,y),(x+w,y+h),(0,0,255),2)
roi = thresh[y:y+h,x:x+w]
roismall = cv2.resize(roi,(10,10))
cv2.imshow('detecting',im)
key = cv2.waitKey(0)
if key == 27: # (escape to quit)
sys.exit()
else: #press any key to continue
sample = roismall.reshape((1,100))
samples = np.append(samples,sample,0)
print "segmentation complete"
cv2.imwrite('data/seg_result.png',im)
np.savetxt('data/generalsamples.data',samples)
def main():
im = cv2.imread('data/img.png')
segment(im)
if __name__ == "__main__":
main()
| mit | -2,607,811,824,055,019,000 | 26.677419 | 99 | 0.572844 | false |
schristakidis/p2ner | p2ner/components/plugin/upnp/upnp/mhelper.py | 1 | 12028 | from twisted.internet.protocol import DatagramProtocol
# Copyright 2012 Loris Corazza, Sakis Christakidis
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from twisted.internet import reactor,defer
from twisted.application.internet import MulticastServer
import socket
import mupnp
class upnpUI(object):
def __init__(self, ip,gateway,proto):
self.hp = mupnp.upnp(False,False,None);
self.hp.UNIQ = True
self.hp.VERBOSE = False
self.hostIP=ip
self.gateway=gateway
self.proto=proto
self.listenerMulticast=Listener(self.hp.ip,self.hp.port,'MULTICAST',self,gateway)
#Actively search for UPNP devices
def msearch(self,argc,argv):
defaultST = "upnp:rootdevice"
st = "schemas-upnp-org"
myip = ''
lport = self.hp.port
if argc >= 3:
if argc == 4:
st = argv[1]
searchType = argv[2]
searchName = argv[3]
else:
searchType = argv[1]
searchName = argv[2]
st = "urn:%s:%s:%s:%s" % (st,searchType,searchName,self.hp.UPNP_VERSION.split('.')[0])
else:
st = defaultST
#Build the request
request = "M-SEARCH * HTTP/1.1\r\n"\
"HOST:%s:%d\r\n"\
"ST:%s\r\n" % (self.hp.ip,self.hp.port,st)
for header,value in self.hp.msearchHeaders.iteritems():
request += header + ':' + value + "\r\n"
request += "\r\n"
print "Entering discovery mode for '%s', Ctl+C to stop..." % st
print ''
self.listenerMulticast.send(request)
def host(self,argc,argv):
hp=self.hp
indexList = []
indexError = "Host index out of range. Try the 'host list' command to get a list of known hosts"
if argc >= 2:
action = argv[1]
if action == 'list':
ret={}
if len(hp.ENUM_HOSTS) == 0:
print "No known hosts - try running the 'msearch' or 'pcap' commands"
return
for index,hostInfo in hp.ENUM_HOSTS.iteritems():
print "\t[%d] %s" % (index,hostInfo['name'])
ip=hostInfo['name'].split(':')[0]
ret[ip]=index
return ret
elif action == 'details':
hostInfo = False
if argc == 3:
try:
index = int(argv[2])
except Exception, e:
print indexError
return
if index < 0 or index >= len(hp.ENUM_HOSTS):
print indexError
return
hostInfo = hp.ENUM_HOSTS[index]
try:
#If this host data is already complete, just display it
if hostInfo['dataComplete'] == True:
hp.showCompleteHostInfo(index,False)
else:
print "Can't show host info because I don't have it. Please run 'host get %d'" % index
except KeyboardInterrupt, e:
pass
return
elif action == 'summary':
if argc == 3:
try:
index = int(argv[2])
hostInfo = hp.ENUM_HOSTS[index]
except:
print indexError
return
print 'Host:',hostInfo['name']
print 'XML File:',hostInfo['xmlFile']
for deviceName,deviceData in hostInfo['deviceList'].iteritems():
print deviceName
for k,v in deviceData.iteritems():
try:
v.has_key(False)
except:
print "\t%s: %s" % (k,v)
print ''
return
elif action == 'info':
output = hp.ENUM_HOSTS
dataStructs = []
for arg in argv[2:]:
try:
arg = int(arg)
except:
pass
output = output[arg]
try:
for k,v in output.iteritems():
try:
v.has_key(False)
dataStructs.append(k)
except:
print k,':',v
continue
except:
print output
ret=[]
for struct in dataStructs:
print struct,': {}'
ret.append(struct)
return ret
elif action == 'get':
hostInfo = False
if argc == 3:
try:
index = int(argv[2])
except:
print indexError
return
if index < 0 or index >= len(hp.ENUM_HOSTS):
print "Host index out of range. Try the 'host list' command to get a list of known hosts"
return
else:
hostInfo = hp.ENUM_HOSTS[index]
#If this host data is already complete, just display it
if hostInfo['dataComplete'] == True:
print 'Data for this host has already been enumerated!'
return
try:
#Get extended device and service information
if hostInfo != False:
print "Requesting device and service info for %s (this could take a few seconds)..." % hostInfo['name']
print ''
if hostInfo['dataComplete'] == False:
(xmlHeaders,xmlData) = hp.getXML(hostInfo['xmlFile'])
if xmlData == False:
print 'Failed to request host XML file:',hostInfo['xmlFile']
return
if hp.getHostInfo(xmlData,xmlHeaders,index) == False:
print "Failed to get device/service info for %s..." % hostInfo['name']
return
print 'Host data enumeration complete!'
return
except KeyboardInterrupt, e:
return
elif action == 'send':
#Send SOAP requests
index = False
inArgCounter = 0
numReqArgs = 6
extraArgs = argc-numReqArgs
try:
index = int(argv[2])
except:
print indexError
return
deviceName = argv[3]
serviceName = argv[4]
actionName = argv[5]
hostInfo = hp.ENUM_HOSTS[index]
actionArgs = False
sendArgs = {}
retTags = []
controlURL = False
fullServiceName = False
#Get the service control URL and full service name
try:
controlURL = hostInfo['proto'] + hostInfo['name']
controlURL2 = hostInfo['deviceList'][deviceName]['services'][serviceName]['controlURL']
if not controlURL.endswith('/') and not controlURL2.startswith('/'):
controlURL += '/'
controlURL += controlURL2
except Exception,e:
print 'Caught exception:',e
print "Are you sure you've run 'host get %d' and specified the correct service name?" % index
return 2
#Get action info
try:
actionArgs = hostInfo['deviceList'][deviceName]['services'][serviceName]['actions'][actionName]['arguments']
fullServiceName = hostInfo['deviceList'][deviceName]['services'][serviceName]['fullName']
except Exception,e:
print 'Caught exception:',e
print "Are you sure you've specified the correct action?"
return 2
extraArgsUsed = 0
for argName,argVals in actionArgs.iteritems():
actionStateVar = argVals['relatedStateVariable']
stateVar = hostInfo['deviceList'][deviceName]['services'][serviceName]['serviceStateVariables'][actionStateVar]
if argVals['direction'].lower() == 'in':
if extraArgs-extraArgsUsed > 0:
arg = argv[numReqArgs+extraArgsUsed]
print "Using ", arg, " for ", argName
sendArgs[argName] = (arg,stateVar['dataType'])
extraArgsUsed += 1
else:
retTags.append((argName,stateVar['dataType']))
#print 'Requesting',controlURL
soapResponse = hp.sendSOAP(hostInfo['name'],fullServiceName,controlURL,actionName,sendArgs)
if soapResponse != False:
#It's easier to just parse this ourselves...
ret={0:0}
for (tag,dataType) in retTags:
tagValue = hp.extractSingleTag(soapResponse,tag)
if dataType == 'bin.base64' and tagValue != None:
tagValue = base64.decodestring(tagValue)
print tag,':',tagValue
ret[tag]=tagValue
return ret
else:
return False
return
return
def discover(self):
self.d=defer.Deferred()
self.listenerMulticast.startListeningMulticast()
reactor.callLater(0.1,self.msearch,0,[])
return self.d
def gotResponse(self,data):
f=self.hp.parseSSDPInfo(data,False,False)
if f:
self.listenerMulticast.stopListening()
ret=self.host(2,['host','list'])
self.hostnum=ret[self.gateway[0]]
self.host(3,['host','get','0'])
try:
ret=self.host(6 ,['host', 'info', '0', 'deviceList','WANConnectionDevice', 'services'])
except:
self.d.errback('upnp device not compatible')
return
self.type='WANPPPConnection'
if self.type not in ret:
self.type='WANIPConnection'
self.d.callback(True)
else:
#self.d.errback("couldn't parse response")
pass
#print 'problemmmmmmmmmmmmmmmmmmmmmmmmmmmmmmm'
"""
self.host(3,['host','get','0'])
self.host(8,['host', 'info' ,'0', 'deviceList' ,'WANConnectionDevice'])
# self.host(8,['host', 'info' ,'0', 'deviceList' ,'WANConnectionDevice' ,'services' ,'WANPPPConnection' ,'actions' ])
# self.host(6, ['host', 'send', '0', 'WANConnectionDevice', 'WANPPPConnection', 'GetUserName'])
self.host(14,['host', 'send', '0', 'WANConnectionDevice', 'WANPPPConnection', 'AddPortMapping', 'P2NER UDP for control messages', '0', self.hostIP, '1', self.control_port,'', 'UDP', self.control_port])
self.host(14,['host', 'send', '0', 'WANConnectionDevice', 'WANPPPConnection', 'AddPortMapping', 'P2NER UDP for data transfer', '0', self.hostIP, '1', self.data_port,'', 'UDP', self.data_port])
# self.host(9 ,['host', 'send', '0', 'WANConnectionDevice', 'WANPPPConnection', 'GetSpecificPortMappingEntry', '9100', '', 'UDP'])
# self.host(9 ,['host', 'send', '0', 'WANConnectionDevice', 'WANPPPConnection', 'DeletePortMapping', 'UDP', '9100', ''])
# self.host(9 ,['host', 'send', '0', 'WANConnectionDevice', 'WANPPPConnection', 'GetSpecificPortMappingEntry', '9100', '', 'UDP'])
# self.d.callback('ok')
"""
def upnpFailed(self):
self.d.errback("no upnp device found after 15 seconds")
def getSpecificPortMapping(self,port,exPort):
#self.host(3,['host','get','0'])
ret=self.host(9 ,['host', 'send', self.hostnum, 'WANConnectionDevice', self.type, 'GetSpecificPortMappingEntry', exPort, '', self.proto])
if ret==2:
return (2,)
elif ret:
if str(self.hostIP)!=str(ret['NewInternalClient']):
print 'port is forwarded for another peer'
return (3,str(ret['NewInternalClient']))
if int(ret['NewInternalPort'])!=port:
print 'port is mapped to another port'
return (4,int(ret['NewInternalPort']))
else:
return (0,)
else:
return (1,)
def addPortMapping(self,port,exPort):
#self.host(8,['host', 'info' ,'0', 'deviceList' ,'WANConnectionDevice'])
ret=self.host(14,['host', 'send', self.hostnum, 'WANConnectionDevice', self.type, 'AddPortMapping', str(exPort), '0', self.hostIP, '1',exPort,'', self.proto, port])
if ret and ret!=2:
return (True,exPort)
else:
return (False,exPort)
class Listener(DatagramProtocol):
def __init__(self,ip,port,interface,controler,gateway):
self.ip=ip
self.port=port
self.controler=controler
self.interface=interface
self.upnpFound=False
self.failed=False
self.gateway=gateway
def datagramReceived(self, data, (host, port)):
#print "received to interface %s from %s:%d\n %r" % (self.interface, host, port, data)
if host in self.gateway:
#print "received to interface %s from %s:%d\n %r" % (self.interface, host, port, data)
#print "gateway found ",host
self.upnpFound=True
self.controler.gotResponse(data)
def send(self,data):
reactor.callLater(15,self.check)
self.sendData(data)
def sendData(self,data):
if not self.failed and not self.upnpFound:
#print "sending data"
#print data
self.transport.write(data, (self.ip, self.port))
reactor.callLater(0.5,self.sendData,data)
def check(self):
if not self.upnpFound:
self.controler.upnpFailed()
self.failed=True
def startListeningMulticast(self):
self.sock=reactor.listenMulticast(1910,self)
self.sock.joinGroup(self.ip,socket.INADDR_ANY)
def stopListening(self):
self.sock.stopListening()
pass
| apache-2.0 | 3,044,898,464,426,626,600 | 29.221106 | 216 | 0.639924 | false |
EderSantana/seya | tests/test_apply.py | 1 | 1111 | # encoding: utf-8
"""Test seya.layers.recurrent module"""
from __future__ import print_function
import unittest
import numpy as np
import theano
from numpy.testing import assert_allclose
from keras import backend as K
from keras.layers.core import Dense
from keras.models import Sequential
from seya.utils import apply_model
floatX = K.common._FLOATX
class TestApply(unittest.TestCase):
"""Test apply methods"""
def test_apply_model(self):
"""Test keras.models.Sequential.__call__"""
nb_samples, input_dim, output_dim = 3, 10, 5
model = Sequential()
model.add(Dense(output_dim=output_dim, input_dim=input_dim))
model.compile('sgd', 'mse')
X = K.placeholder(ndim=2)
Y = apply_model(model, X)
F = theano.function([X], Y)
x = np.random.randn(nb_samples, input_dim).astype(floatX)
y1 = F(x)
y2 = model.predict(x)
# results of __call__ should match model.predict
assert_allclose(y1, y2)
if __name__ == '__main__':
theano.config.exception_verbosity = 'high'
unittest.main(verbosity=2)
| bsd-3-clause | 8,042,624,922,846,563,000 | 24.837209 | 68 | 0.648965 | false |
tiagoprn/devops | rofi/shutdown.py | 1 | 1278 | #!/usr/bin/env python3
import glob
import logging
import os
import sys
from rofi import Rofi
CURRENT_SCRIPT_NAME = os.path.splitext(os.path.basename(__file__))[0]
LOG_FORMAT = (
'[%(asctime)s PID %(process)s '
'%(filename)s:%(lineno)s - %(funcName)s()] '
'%(levelname)s -> \n'
'%(message)s\n'
)
# Configure the logging to console. Works from python 3.3+
logging.basicConfig(
format=LOG_FORMAT,
level=logging.INFO,
handlers=[logging.StreamHandler(sys.stdout)],
)
ACTIONS = [
('lock screen', 'lock_with_comic.sh',),
('switch user', 'dm-tool switch-to-greeter',),
('logoff', 'logoff.sh',),
('shutdown', 'sudo /sbin/shutdown -h now',),
('restart', 'sudo /sbin/shutdown -r now',),
]
if __name__ == "__main__":
actions_list = [element[0] for element in ACTIONS]
rofi_client = Rofi()
selected, keyboard_key = rofi_client.select(
'CHOOSE YOUR DESTINY', actions_list
)
logging.info(f'keyboard_key pressed={keyboard_key}')
if keyboard_key == -1:
logging.info('cancelled')
rofi_client.exit_with_error('Cancelled, nothing to be done.')
logging.info(f'selected={selected}')
command = ACTIONS[selected][1]
logging.info(f'Running command: {command}')
os.system(command)
| mit | -6,539,320,683,599,238,000 | 24.058824 | 69 | 0.630673 | false |
ennoborg/gramps | gramps/gui/configure.py | 1 | 68770 | #
# Gramps - a GTK+/GNOME based genealogy program
#
# Copyright (C) 2000-2007 Donald N. Allingham
# Copyright (C) 2008 Raphael Ackermann
# Copyright (C) 2010 Benny Malengier
# Copyright (C) 2010 Nick Hall
# Copyright (C) 2012 Doug Blank <[email protected]>
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
#
#-------------------------------------------------------------------------
#
# Standard python modules
#
#-------------------------------------------------------------------------
import random
import os
from xml.sax.saxutils import escape
import collections
#-------------------------------------------------------------------------
#
# GTK/Gnome modules
#
#-------------------------------------------------------------------------
from gi.repository import GObject
from gi.repository import Gdk
from gi.repository import Gtk
from gi.repository import Pango
#-------------------------------------------------------------------------
#
# gramps modules
#
#-------------------------------------------------------------------------
from gramps.gen.config import config
from gramps.gen.const import GRAMPS_LOCALE as glocale
from gramps.gen.const import HOME_DIR, URL_WIKISTRING
from gramps.gen.datehandler import get_date_formats
from gramps.gen.display.name import displayer as _nd
from gramps.gen.display.name import NameDisplayError
from gramps.gen.utils.alive import update_constants
from gramps.gen.utils.file import media_path
from gramps.gen.utils.keyword import (get_keywords, get_translation_from_keyword,
get_translations, get_keyword_from_translation)
from gramps.gen.lib import Date, FamilyRelType
from gramps.gen.lib import Name, Surname, NameOriginType
from .managedwindow import ManagedWindow
from .widgets import MarkupLabel, BasicLabel
from .dialog import ErrorDialog, QuestionDialog2, OkDialog
from .glade import Glade
from gramps.gen.plug.utils import available_updates
from .plug import PluginWindows
from gramps.gen.errors import WindowActiveError
from .spell import HAVE_GTKSPELL
from gramps.gen.constfunc import win
_ = glocale.translation.gettext
#-------------------------------------------------------------------------
#
# Constants
#
#-------------------------------------------------------------------------
_surname_styles = [
_("Father's surname"),
_("None"),
_("Combination of mother's and father's surname"),
_("Icelandic style"),
]
# column numbers for the 'name format' model
COL_NUM = 0
COL_NAME = 1
COL_FMT = 2
COL_EXPL = 3
#-------------------------------------------------------------------------
#
#
#
#-------------------------------------------------------------------------
class DisplayNameEditor(ManagedWindow):
def __init__(self, uistate, dbstate, track, dialog):
# Assumes that there are two methods: dialog.name_changed_check(),
# and dialog._build_custom_name_ui()
ManagedWindow.__init__(self, uistate, track, DisplayNameEditor)
self.dialog = dialog
self.dbstate = dbstate
self.set_window(
Gtk.Dialog(title=_('Display Name Editor')),
None, _('Display Name Editor'), None)
self.window.add_button(_('_Close'), Gtk.ResponseType.CLOSE)
self.setup_configs('interface.displaynameeditor', 820, 550)
grid = self.dialog._build_custom_name_ui()
label = Gtk.Label(label=_("""The following keywords are replaced with the appropriate name parts:<tt>
<b>Given</b> - given name (first name) <b>Surname</b> - surnames (with prefix and connectors)
<b>Title</b> - title (Dr., Mrs.) <b>Suffix</b> - suffix (Jr., Sr.)
<b>Call</b> - call name <b>Nickname</b> - nick name
<b>Initials</b>- first letters of given <b>Common</b> - nick name, call, or first of given
<b>Prefix</b> - all prefixes (von, de)
Surnames:
<b>Rest</b> - non primary surnames <b>Notpatronymic</b>- all surnames, except pa/matronymic & primary
<b>Familynick</b>- family nick name <b>Rawsurnames</b> - surnames (no prefixes and connectors)
<b>Primary, Primary[pre] or [sur] or [con]</b>- full primary surname, prefix, surname only, connector
<b>Patronymic, or [pre] or [sur] or [con]</b> - full pa/matronymic surname, prefix, surname only, connector
</tt>
UPPERCASE keyword forces uppercase. Extra parentheses, commas are removed. Other text appears literally.
<b>Example</b>: Dr. Edwin Jose von der Smith and Weston Wilson Sr ("Ed") - Underhills
<i>Edwin Jose</i>: Given, <i>von der</i>: Prefix, <i>Smith</i> and <i>Weston</i>: Primary, <i>and</i>: [con], <i>Wilson</i>: Patronymic,
<i>Dr.</i>: Title, <i>Sr</i>: Suffix, <i>Ed</i>: Nickname, <i>Underhills</i>: Familynick, <i>Jose</i>: Call.
"""))
label.set_use_markup(True)
self.window.vbox.pack_start(label, False, True, 0)
self.window.vbox.pack_start(grid, True, True, 0)
self.window.connect('response', self.close)
self.show()
def close(self, *obj):
self.dialog.name_changed_check()
ManagedWindow.close(self, *obj)
def build_menu_names(self, obj):
return (_(" Name Editor"), None)
#-------------------------------------------------------------------------
#
# ConfigureDialog
#
#-------------------------------------------------------------------------
class ConfigureDialog(ManagedWindow):
"""
Base class for configuration dialogs. They provide a Notebook, to which
pages are added with configuration options, and a Cancel and Save button.
On save, a config file on which the dialog works, is saved to disk, and
a callback called.
"""
def __init__(self, uistate, dbstate, configure_page_funcs, configobj,
configmanager,
dialogtitle=_("Preferences"), on_close=None):
"""
Set up a configuration dialog
:param uistate: a DisplayState instance
:param dbstate: a DbState instance
:param configure_page_funcs: a list of function that return a tuple
(str, Gtk.Widget). The string is used as label for the
configuration page, and the widget as the content of the
configuration page
:param configobj: the unique object that is configured, it must be
identifiable (id(configobj)). If the configure dialog of the
configobj is already open, a WindowActiveError will be
raised. Grab this exception in the calling method
:param configmanager: a configmanager object. Several convenience
methods are present in ConfigureDialog to set up widgets that
write changes directly via this configmanager.
:param dialogtitle: the title of the configuration dialog
:param on_close: callback that is called on close
"""
self.dbstate = dbstate
self.__config = configmanager
ManagedWindow.__init__(self, uistate, [], configobj)
self.set_window(
Gtk.Dialog(title=dialogtitle),
None, dialogtitle, None)
self.window.add_button(_('_Close'), Gtk.ResponseType.CLOSE)
self.panel = Gtk.Notebook()
self.panel.set_scrollable(True)
self.window.vbox.pack_start(self.panel, True, True, 0)
self.__on_close = on_close
self.window.connect('response', self.done)
self.__setup_pages(configure_page_funcs)
self.show()
def __setup_pages(self, configure_page_funcs):
"""
This method builds the notebookpages in the panel
"""
if isinstance(configure_page_funcs, collections.Callable):
pages = configure_page_funcs()
else:
pages = configure_page_funcs
for func in pages:
labeltitle, widget = func(self)
self.panel.append_page(widget, MarkupLabel(labeltitle))
def done(self, obj, value):
if self.__on_close:
self.__on_close()
self.close()
def update_int_entry(self, obj, constant):
"""
:param obj: an object with get_text method that should contain an
integer
:param constant: the config setting to which the integer value must be
saved
"""
try:
self.__config.set(constant, int(obj.get_text()))
except:
print("WARNING: ignoring invalid value for '%s'" % constant)
def update_markup_entry(self, obj, constant):
"""
:param obj: an object with get_text method
:param constant: the config setting to which the text value must be
saved
"""
try:
obj.get_text() % 'test_markup'
except TypeError:
print("WARNING: ignoring invalid value for '%s'" % constant)
ErrorDialog(
_("Invalid or incomplete format definition."),
obj.get_text(), parent=self.window)
obj.set_text('<b>%s</b>')
except ValueError:
print("WARNING: ignoring invalid value for '%s'" % constant)
ErrorDialog(
_("Invalid or incomplete format definition."),
obj.get_text(), parent=self.window)
obj.set_text('<b>%s</b>')
self.__config.set(constant, obj.get_text())
def update_entry(self, obj, constant):
"""
:param obj: an object with get_text method
:param constant: the config setting to which the text value must be
saved
"""
self.__config.set(constant, obj.get_text())
def update_color(self, obj, constant, color_hex_label):
rgba = obj.get_rgba()
hexval = "#%02x%02x%02x" % (int(rgba.red * 255),
int(rgba.green * 255),
int(rgba.blue * 255))
color_hex_label.set_text(hexval)
self.__config.set(constant, hexval)
def update_checkbox(self, obj, constant, config=None):
if not config:
config = self.__config
config.set(constant, obj.get_active())
def update_radiobox(self, obj, constant):
self.__config.set(constant, obj.get_active())
def update_combo(self, obj, constant):
"""
:param obj: the ComboBox object
:param constant: the config setting to which the value must be saved
"""
self.__config.set(constant, obj.get_active())
def update_slider(self, obj, constant):
"""
:param obj: the HScale object
:param constant: the config setting to which the value must be saved
"""
self.__config.set(constant, int(obj.get_value()))
def update_spinner(self, obj, constant):
"""
:param obj: the SpinButton object
:param constant: the config setting to which the value must be saved
"""
self.__config.set(constant, int(obj.get_value()))
def add_checkbox(self, grid, label, index, constant, start=1, stop=9,
config=None, extra_callback=None):
if not config:
config = self.__config
checkbox = Gtk.CheckButton(label=label)
checkbox.set_active(config.get(constant))
checkbox.connect('toggled', self.update_checkbox, constant, config)
if extra_callback:
checkbox.connect('toggled', extra_callback)
grid.attach(checkbox, start, index, stop - start, 1)
return checkbox
def add_radiobox(self, grid, label, index, constant, group, column,
config=None):
if not config:
config = self.__config
radiobox = Gtk.RadioButton.new_with_mnemonic_from_widget(group, label)
if config.get(constant) == True:
radiobox.set_active(True)
radiobox.connect('toggled', self.update_radiobox, constant)
grid.attach(radiobox, column, index, 1, 1)
return radiobox
def add_text(self, grid, label, index, config=None, line_wrap=True):
if not config:
config = self.__config
text = Gtk.Label()
text.set_line_wrap(line_wrap)
text.set_halign(Gtk.Align.START)
text.set_text(label)
grid.attach(text, 1, index, 8, 1)
def add_path_box(self, grid, label, index, entry, path, callback_label,
callback_sel, config=None):
""" Add an entry to give in path and a select button to open a
dialog.
Changing entry calls callback_label
Clicking open button call callback_sel
"""
if not config:
config = self.__config
lwidget = BasicLabel(_("%s: ") % label) # needed for French, else ignore
hbox = Gtk.Box()
if path:
entry.set_text(path)
entry.connect('changed', callback_label)
btn = Gtk.Button()
btn.connect('clicked', callback_sel)
image = Gtk.Image()
image.set_from_icon_name('document-open', Gtk.IconSize.BUTTON)
image.show()
btn.add(image)
hbox.pack_start(entry, True, True, 0)
hbox.pack_start(btn, False, False, 0)
hbox.set_hexpand(True)
grid.attach(lwidget, 1, index, 1, 1)
grid.attach(hbox, 2, index, 1, 1)
def add_entry(self, grid, label, index, constant, callback=None,
config=None, col_attach=0, localized_config=True):
if not config:
config = self.__config
if not callback:
callback = self.update_entry
if label:
lwidget = BasicLabel(_("%s: ") % label) # translators: for French
entry = Gtk.Entry()
if localized_config:
entry.set_text(config.get(constant))
else: # it needs localizing
entry.set_text(_(config.get(constant)))
entry.connect('changed', callback, constant)
entry.set_hexpand(True)
if label:
grid.attach(lwidget, col_attach, index, 1, 1)
grid.attach(entry, col_attach+1, index, 1, 1)
else:
grid.attach(entry, col_attach, index, 1, 1)
return entry
def add_pos_int_entry(self, grid, label, index, constant, callback=None,
config=None, col_attach=1, helptext=''):
""" entry field for positive integers
"""
if not config:
config = self.__config
lwidget = BasicLabel(_("%s: ") % label) # needed for French, else ignore
entry = Gtk.Entry()
entry.set_text(str(config.get(constant)))
entry.set_tooltip_markup(helptext)
entry.set_hexpand(True)
if callback:
entry.connect('changed', callback, constant)
grid.attach(lwidget, col_attach, index, 1, 1)
grid.attach(entry, col_attach+1, index, 1, 1)
def add_color(self, grid, label, index, constant, config=None, col=0):
if not config:
config = self.__config
lwidget = BasicLabel(_("%s: ") % label) # needed for French, else ignore
hexval = config.get(constant)
color = Gdk.color_parse(hexval)
entry = Gtk.ColorButton(color=color)
color_hex_label = BasicLabel(hexval)
color_hex_label.set_hexpand(True)
entry.connect('color-set', self.update_color, constant, color_hex_label)
grid.attach(lwidget, col, index, 1, 1)
grid.attach(entry, col+1, index, 1, 1)
grid.attach(color_hex_label, col+2, index, 1, 1)
return entry
def add_combo(self, grid, label, index, constant, opts, callback=None,
config=None, valueactive=False, setactive=None):
"""
A drop-down list allowing selection from a number of fixed options.
:param opts: A list of options. Each option is a tuple containing an
integer code and a textual description.
If valueactive = True, the constant stores the value, not the position
in the list
"""
if not config:
config = self.__config
if not callback:
callback = self.update_combo
lwidget = BasicLabel(_("%s: ") % label) # needed for French, else ignore
store = Gtk.ListStore(int, str)
for item in opts:
store.append(item)
combo = Gtk.ComboBox(model=store)
cell = Gtk.CellRendererText()
combo.pack_start(cell, True)
combo.add_attribute(cell, 'text', 1)
if valueactive:
val = config.get(constant)
pos = 0
for nr, item in enumerate(opts):
if item[-1] == val:
pos = nr
break
combo.set_active(pos)
else:
if setactive is None:
combo.set_active(config.get(constant))
else:
combo.set_active(setactive)
combo.connect('changed', callback, constant)
combo.set_hexpand(True)
grid.attach(lwidget, 1, index, 1, 1)
grid.attach(combo, 2, index, 1, 1)
return combo
def add_slider(self, grid, label, index, constant, range, callback=None,
config=None, width=1):
"""
A slider allowing the selection of an integer within a specified range.
:param range: A tuple containing the minimum and maximum allowed values.
"""
if not config:
config = self.__config
if not callback:
callback = self.update_slider
lwidget = BasicLabel(_("%s: ") % label) # needed for French, else ignore
adj = Gtk.Adjustment(value=config.get(constant), lower=range[0],
upper=range[1], step_increment=1,
page_increment=0, page_size=0)
slider = Gtk.Scale(adjustment=adj)
slider.set_digits(0)
slider.set_value_pos(Gtk.PositionType.BOTTOM)
slider.connect('value-changed', callback, constant)
grid.attach(lwidget, 1, index, 1, 1)
grid.attach(slider, 2, index, width, 1)
return slider
def add_spinner(self, grid, label, index, constant, range, callback=None,
config=None):
"""
A spinner allowing the selection of an integer within a specified range.
:param range: A tuple containing the minimum and maximum allowed values.
"""
if not config:
config = self.__config
if not callback:
callback = self.update_spinner
lwidget = BasicLabel(_("%s: ") % label) # needed for French, else ignore
adj = Gtk.Adjustment(value=config.get(constant), lower=range[0],
upper=range[1], step_increment=1,
page_increment=0, page_size=0)
spinner = Gtk.SpinButton(adjustment=adj, climb_rate=0.0, digits=0)
spinner.connect('value-changed', callback, constant)
spinner.set_hexpand(True)
grid.attach(lwidget, 1, index, 1, 1)
grid.attach(spinner, 2, index, 1, 1)
return spinner
#-------------------------------------------------------------------------
#
# GrampsPreferences
#
#-------------------------------------------------------------------------
class GrampsPreferences(ConfigureDialog):
def __init__(self, uistate, dbstate):
page_funcs = (
self.add_behavior_panel,
self.add_famtree_panel,
self.add_formats_panel,
self.add_places_panel,
self.add_text_panel,
self.add_prefix_panel,
self.add_date_panel,
self.add_researcher_panel,
self.add_advanced_panel,
self.add_color_panel
)
ConfigureDialog.__init__(self, uistate, dbstate, page_funcs,
GrampsPreferences, config,
on_close=update_constants)
self.setup_configs('interface.grampspreferences', 700, 450)
def add_researcher_panel(self, configdialog):
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
self.add_text(grid, _('Enter your information so people can contact '
'you when you distribute your Family Tree'),
0, line_wrap=True)
self.add_entry(grid, _('Name'), 1, 'researcher.researcher-name')
self.add_entry(grid, _('Address'), 2, 'researcher.researcher-addr')
self.add_entry(grid, _('Locality'), 3, 'researcher.researcher-locality')
self.add_entry(grid, _('City'), 4, 'researcher.researcher-city')
self.add_entry(grid, _('State/County'), 5, 'researcher.researcher-state')
self.add_entry(grid, _('Country'), 6, 'researcher.researcher-country')
self.add_entry(grid, _('ZIP/Postal Code'), 7, 'researcher.researcher-postal')
self.add_entry(grid, _('Phone'), 8, 'researcher.researcher-phone')
self.add_entry(grid, _('Email'), 9, 'researcher.researcher-email')
return _('Researcher'), grid
def add_prefix_panel(self, configdialog):
"""
Add the ID prefix tab to the preferences.
"""
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
self.add_entry(grid, _('Person'), 0, 'preferences.iprefix',
self.update_idformat_entry)
self.add_entry(grid, _('Family'), 1, 'preferences.fprefix',
self.update_idformat_entry)
self.add_entry(grid, _('Place'), 2, 'preferences.pprefix',
self.update_idformat_entry)
self.add_entry(grid, _('Source'), 3, 'preferences.sprefix',
self.update_idformat_entry)
self.add_entry(grid, _('Citation'), 4, 'preferences.cprefix',
self.update_idformat_entry)
self.add_entry(grid, _('Media Object'), 5, 'preferences.oprefix',
self.update_idformat_entry)
self.add_entry(grid, _('Event'), 6, 'preferences.eprefix',
self.update_idformat_entry)
self.add_entry(grid, _('Repository'), 7, 'preferences.rprefix',
self.update_idformat_entry)
self.add_entry(grid, _('Note'), 8, 'preferences.nprefix',
self.update_idformat_entry)
return _('ID Formats'), grid
def add_color_panel(self, configdialog):
"""
Add the tab to set defaults colors for graph boxes
"""
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
self.add_text(grid, _('Set the colors used for boxes in the graphical views'),
0, line_wrap=False)
self.add_color(grid, _('Gender Male Alive'), 1,
'preferences.color-gender-male-alive')
self.add_color(grid, _('Border Male Alive'), 2,
'preferences.bordercolor-gender-male-alive')
self.add_color(grid, _('Gender Male Death'), 3,
'preferences.color-gender-male-death')
self.add_color(grid, _('Border Male Death'), 4,
'preferences.bordercolor-gender-male-death')
self.add_color(grid, _('Gender Female Alive'), 1,
'preferences.color-gender-female-alive', col=4)
self.add_color(grid, _('Border Female Alive'), 2,
'preferences.bordercolor-gender-female-alive', col=4)
self.add_color(grid, _('Gender Female Death'), 3,
'preferences.color-gender-female-death', col=4)
self.add_color(grid, _('Border Female Death'), 4,
'preferences.bordercolor-gender-female-death', col=4)
## self.add_color(grid, _('Gender Other Alive'), 5,
## 'preferences.color-gender-other-alive')
## self.add_color(grid, _('Border Other Alive'), 6,
## 'preferences.bordercolor-gender-other-alive')
## self.add_color(grid, _('Gender Other Death'), 7,
## 'preferences.color-gender-other-death')
## self.add_color(grid, _('Border Other Death'), 8,
## 'preferences.bordercolor-gender-other-death')
self.add_color(grid, _('Gender Unknown Alive'), 5,
'preferences.color-gender-unknown-alive', col=4)
self.add_color(grid, _('Border Unknown Alive'), 6,
'preferences.bordercolor-gender-unknown-alive', col=4)
self.add_color(grid, _('Gender Unknown Death'), 7,
'preferences.color-gender-unknown-death', col=4)
self.add_color(grid, _('Border Unknown Death'), 8,
'preferences.bordercolor-gender-unknown-death', col=4)
return _('Colors'), grid
def add_advanced_panel(self, configdialog):
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
self.add_checkbox(
grid, _('Suppress warning when adding parents to a child.'),
0, 'preferences.family-warn')
self.add_checkbox(
grid, _('Suppress warning when canceling with changed data.'),
1, 'interface.dont-ask')
self.add_checkbox(
grid, _('Suppress warning about missing researcher when'
' exporting to GEDCOM.'),
2, 'behavior.owner-warn')
self.add_checkbox(
grid, _('Show plugin status dialog on plugin load error.'),
3, 'behavior.pop-plugin-status')
return _('Warnings'), grid
def _build_name_format_model(self, active):
"""
Create a common model for ComboBox and TreeView
"""
name_format_model = Gtk.ListStore(GObject.TYPE_INT,
GObject.TYPE_STRING,
GObject.TYPE_STRING,
GObject.TYPE_STRING)
index = 0
the_index = 0
for num, name, fmt_str, act in _nd.get_name_format():
translation = fmt_str
for key in get_keywords():
if key in translation:
translation = translation.replace(key, get_translation_from_keyword(key))
self.examplename.set_display_as(num)
name_format_model.append(
row=[num, translation, fmt_str, _nd.display_name(self.examplename)])
if num == active: the_index = index
index += 1
return name_format_model, the_index
def __new_name(self, obj):
lyst = ["%s, %s %s (%s)" % (_("Surname"), _("Given"), _("Suffix"),
_("Common")),
"%s, %s %s (%s)" % (_("Surname"), _("Given"), _("Suffix"),
_("Nickname")),
"%s, %s %s (%s)" % (_("Surname"), _("Name|Common"), _("Suffix"),
_("Nickname")),
"%s, %s %s" % (_("Surname"), _("Name|Common"), _("Suffix")),
"%s, %s %s (%s)" % (_("SURNAME"), _("Given"), _("Suffix"),
_("Call")),
"%s, %s (%s)" % (_("Surname"), _("Given"), _("Name|Common")),
"%s, %s (%s)" % (_("Surname"), _("Name|Common"), _("Nickname")),
"%s %s" % (_("Given"), _("Surname")),
"%s %s, %s" % (_("Given"), _("Surname"), _("Suffix")),
"%s %s %s" % (_("Given"), _("NotPatronymic"), _("Patronymic")),
"%s, %s %s (%s)" % (_("SURNAME"), _("Given"), _("Suffix"),
_("Common")),
"%s, %s (%s)" % (_("SURNAME"), _("Given"), _("Name|Common")),
"%s, %s (%s)" % (_("SURNAME"), _("Given"), _("Nickname")),
"%s %s" % (_("Given"), _("SURNAME")),
"%s %s, %s" % (_("Given"), _("SURNAME"), _("Suffix")),
"%s /%s/" % (_("Given"), _("SURNAME")),
"%s %s, %s" % (_("Given"), _("Rawsurnames"), _("Suffix")),
]
#repeat above list, but not translated.
fmtlyst = ["%s, %s %s (%s)" % (("Surname"), ("Given"), ("Suffix"),
("Common")),
"%s, %s %s (%s)" % (("Surname"), ("Given"), ("Suffix"),
("Nickname")),
"%s, %s %s (%s)" % (("Surname"), ("Name|Common"), ("Suffix"),
("Nickname")),
"%s, %s %s" % (("Surname"), ("Name|Common"), ("Suffix")),
"%s, %s %s (%s)" % (("SURNAME"), ("Given"), ("Suffix"),
("Call")),
"%s, %s (%s)" % (("Surname"), ("Given"), ("Name|Common")),
"%s, %s (%s)" % (("Surname"), ("Name|Common"), ("Nickname")),
"%s %s" % (("Given"), ("Surname")),
"%s %s, %s" % (("Given"), ("Surname"), ("Suffix")),
"%s %s %s" % (("Given"), ("NotPatronymic"), ("Patronymic")),
"%s, %s %s (%s)" % (("SURNAME"), ("Given"), ("Suffix"),
("Common")),
"%s, %s (%s)" % (("SURNAME"), ("Given"), ("Name|Common")),
"%s, %s (%s)" % (("SURNAME"), ("Given"), ("Nickname")),
"%s %s" % (("Given"), ("SURNAME")),
"%s %s, %s" % (("Given"), ("SURNAME"), ("Suffix")),
"%s /%s/" % (("Given"), ("SURNAME")),
"%s %s, %s" % (("Given"), ("Rawsurnames"), ("Suffix")),
]
rand = int(random.random() * len(lyst))
f = lyst[rand]
fmt = fmtlyst[rand]
i = _nd.add_name_format(f, fmt)
node = self.fmt_model.append(row=[i, f, fmt,
_nd.format_str(self.examplename, fmt)])
path = self.fmt_model.get_path(node)
self.format_list.set_cursor(path, self.name_column, True)
self.edit_button.set_sensitive(False)
self.remove_button.set_sensitive(False)
self.insert_button.set_sensitive(False)
def __edit_name(self, obj):
store, node = self.format_list.get_selection().get_selected()
path = self.fmt_model.get_path(node)
self.edit_button.set_sensitive(False)
self.remove_button.set_sensitive(False)
self.insert_button.set_sensitive(False)
self.format_list.set_cursor(path, self.name_column, True)
def __check_for_name(self, name, oldnode):
"""
Check to see if there is another name the same as name
in the format list. Don't compare with self (oldnode).
"""
model = self.fmt_obox.get_model()
iter = model.get_iter_first()
while iter is not None:
othernum = model.get_value(iter, COL_NUM)
oldnum = model.get_value(oldnode, COL_NUM)
if othernum == oldnum:
pass# skip comparison with self
else:
othername = model.get_value(iter, COL_NAME)
if othername == name:
return True
iter = model.iter_next(iter)
return False
def __start_name_editing(self, dummy_renderer, dummy_editable, dummy_path):
"""
Method called at the start of editing a name format.
"""
self.format_list.set_tooltip_text(_("Enter to save, Esc to cancel "
"editing"))
def __cancel_change(self, dummy_renderer):
"""
Break off the editing of a name format.
"""
self.format_list.set_tooltip_text('')
num = self.selected_fmt[COL_NUM]
if any(fmt[COL_NUM] == num for fmt in self.dbstate.db.name_formats):
return
else: # editing a new format not yet in db, cleanup is needed
self.fmt_model.remove(self.iter)
_nd.del_name_format(num)
self.insert_button.set_sensitive(True)
def __change_name(self, text, path, new_text):
"""
Called when a name format changed and needs to be stored in the db.
"""
self.format_list.set_tooltip_text('')
if len(new_text) > 0 and text != new_text:
# build a pattern from translated pattern:
pattern = new_text
if (len(new_text) > 2 and
new_text[0] == '"' and
new_text[-1] == '"'):
pass
else:
for key in get_translations():
if key in pattern:
pattern = pattern.replace(key, get_keyword_from_translation(key))
# now build up a proper translation:
translation = pattern
if (len(new_text) > 2 and
new_text[0] == '"' and
new_text[-1] == '"'):
pass
else:
for key in get_keywords():
if key in translation:
translation = translation.replace(key, get_translation_from_keyword(key))
num, name, fmt = self.selected_fmt[COL_NUM:COL_EXPL]
node = self.fmt_model.get_iter(path)
oldname = self.fmt_model.get_value(node, COL_NAME)
# check to see if this pattern already exists
if self.__check_for_name(translation, node):
ErrorDialog(_("This format exists already."),
translation, parent=self.window)
self.edit_button.emit('clicked')
return
# else, change the name
self.edit_button.set_sensitive(True)
self.remove_button.set_sensitive(True)
self.insert_button.set_sensitive(True)
exmpl = _nd.format_str(self.examplename, pattern)
self.fmt_model.set(self.iter, COL_NAME, translation,
COL_FMT, pattern,
COL_EXPL, exmpl)
self.selected_fmt = (num, translation, pattern, exmpl)
_nd.edit_name_format(num, translation, pattern)
self.dbstate.db.name_formats = _nd.get_name_format(only_custom=True,
only_active=False)
def __format_change(self, obj):
try:
t = (_nd.format_str(self.name, escape(obj.get_text())))
self.valid = True
except NameDisplayError:
t = _("Invalid or incomplete format definition.")
self.valid = False
self.fmt_model.set(self.iter, COL_EXPL, t)
def _build_custom_name_ui(self):
"""
UI to manage the custom name formats
"""
grid = Gtk.Grid()
grid.set_border_width(6)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
# make a treeview for listing all the name formats
format_tree = Gtk.TreeView(model=self.fmt_model)
name_renderer = Gtk.CellRendererText()
name_column = Gtk.TreeViewColumn(_('Format'),
name_renderer,
text=COL_NAME)
name_renderer.set_property('editable', False)
name_renderer.connect('editing-started', self.__start_name_editing)
name_renderer.connect('edited', self.__change_name)
name_renderer.connect('editing-canceled', self.__cancel_change)
self.name_renderer = name_renderer
format_tree.append_column(name_column)
example_renderer = Gtk.CellRendererText()
example_column = Gtk.TreeViewColumn(_('Example'),
example_renderer,
text=COL_EXPL)
format_tree.append_column(example_column)
format_tree.get_selection().connect('changed',
self.cb_format_tree_select)
# ... and put it into a scrolled win
format_sw = Gtk.ScrolledWindow()
format_sw.set_policy(Gtk.PolicyType.AUTOMATIC, Gtk.PolicyType.AUTOMATIC)
format_sw.add(format_tree)
format_sw.set_shadow_type(Gtk.ShadowType.IN)
format_sw.set_hexpand(True)
format_sw.set_vexpand(True)
grid.attach(format_sw, 0, 0, 3, 1)
# to hold the values of the selected row of the tree and the iter
self.selected_fmt = ()
self.iter = None
self.insert_button = Gtk.Button.new_with_mnemonic(_('_Add'))
self.insert_button.connect('clicked', self.__new_name)
self.edit_button = Gtk.Button.new_with_mnemonic(_('_Edit'))
self.edit_button.connect('clicked', self.__edit_name)
self.edit_button.set_sensitive(False)
self.remove_button = Gtk.Button.new_with_mnemonic(_('_Remove'))
self.remove_button.connect('clicked', self.cb_del_fmt_str)
self.remove_button.set_sensitive(False)
grid.attach(self.insert_button, 0, 1, 1, 1)
grid.attach(self.remove_button, 1, 1, 1, 1)
grid.attach(self.edit_button, 2, 1, 1, 1)
self.format_list = format_tree
self.name_column = name_column
return grid
def name_changed_check(self):
"""
Method to check for a name change. Called by Name Edit Dialog.
"""
obj = self.fmt_obox
the_list = obj.get_model()
the_iter = obj.get_active_iter()
format = the_list.get_value(the_iter, COL_FMT)
if format != self.old_format:
# Yes a change; call the callback
self.cb_name_changed(obj)
def cb_name_changed(self, obj):
"""
Preset name format ComboBox callback
"""
the_list = obj.get_model()
the_iter = obj.get_active_iter()
new_idx = the_list.get_value(the_iter, COL_NUM)
config.set('preferences.name-format', new_idx)
_nd.set_default_format(new_idx)
self.uistate.emit('nameformat-changed')
def cb_pa_sur_changed(self,*args):
"""
checkbox patronymic as surname changed, propagate to namedisplayer
"""
_nd.change_pa_sur()
self.uistate.emit('nameformat-changed')
def cb_format_tree_select(self, tree_selection):
"""
Name format editor TreeView callback
Remember the values of the selected row (self.selected_fmt, self.iter)
and set the Remove and Edit button sensitivity
"""
model, self.iter = tree_selection.get_selected()
if self.iter is None:
tree_selection.select_path(0)
model, self.iter = tree_selection.get_selected()
self.selected_fmt = model.get(self.iter, 0, 1, 2)
idx = self.selected_fmt[COL_NUM] < 0
self.remove_button.set_sensitive(idx)
self.edit_button.set_sensitive(idx)
self.name_renderer.set_property('editable', idx)
def cb_del_fmt_str(self, obj):
"""
Name format editor Remove button callback
"""
num = self.selected_fmt[COL_NUM]
if _nd.get_default_format() == num:
self.fmt_obox.set_active(0)
self.fmt_model.remove(self.iter)
_nd.set_format_inactive(num)
self.dbstate.db.name_formats = _nd.get_name_format(only_custom=True,
only_active=False)
def cb_grampletbar_close(self, obj):
"""
Gramplet bar close button preference callback
"""
self.uistate.emit('grampletbar-close-changed')
def add_formats_panel(self, configdialog):
row = 0
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
# Display name:
self.examplename = Name()
examplesurname = Surname()
examplesurnamesecond = Surname()
examplesurnamepat = Surname()
self.examplename.set_title('Dr.')
self.examplename.set_first_name('Edwin Jose')
examplesurname.set_prefix('von der')
examplesurname.set_surname('Smith')
examplesurname.set_connector('and')
self.examplename.add_surname(examplesurname)
examplesurnamesecond.set_surname('Weston')
self.examplename.add_surname(examplesurnamesecond)
examplesurnamepat.set_surname('Wilson')
examplesurnamepat.set_origintype(
NameOriginType(NameOriginType.PATRONYMIC))
self.examplename.add_surname(examplesurnamepat)
self.examplename.set_primary_surname(0)
self.examplename.set_suffix('Sr')
self.examplename.set_call_name('Jose')
self.examplename.set_nick_name('Ed')
self.examplename.set_family_nick_name('Underhills')
# get the model for the combo and the treeview
active = _nd.get_default_format()
self.fmt_model, active = self._build_name_format_model(active)
# set up the combo to choose the preset format
self.fmt_obox = Gtk.ComboBox()
cell = Gtk.CellRendererText()
cell.set_property('ellipsize', Pango.EllipsizeMode.END)
self.fmt_obox.pack_start(cell, True)
self.fmt_obox.add_attribute(cell, 'text', 1)
self.fmt_obox.set_model(self.fmt_model)
# set the default value as active in the combo
self.fmt_obox.set_active(active)
self.fmt_obox.connect('changed', self.cb_name_changed)
# label for the combo
lwidget = BasicLabel(_("%s: ") % _('Name format'))
lwidget.set_use_underline(True)
lwidget.set_mnemonic_widget(self.fmt_obox)
hbox = Gtk.Box()
btn = Gtk.Button(label=("%s..." % _('Edit')))
btn.connect('clicked', self.cb_name_dialog)
hbox.pack_start(self.fmt_obox, True, True, 0)
hbox.pack_start(btn, False, False, 0)
grid.attach(lwidget, 0, row, 1, 1)
grid.attach(hbox, 1, row, 2, 1)
row += 1
# Pa/Matronymic surname handling
self.add_checkbox(grid,
_("Consider single pa/matronymic as surname"),
row, 'preferences.patronimic-surname', stop=3,
extra_callback=self.cb_pa_sur_changed)
row += 1
# Date format:
obox = Gtk.ComboBoxText()
formats = get_date_formats()
list(map(obox.append_text, formats))
active = config.get('preferences.date-format')
if active >= len(formats):
active = 0
obox.set_active(active)
obox.connect('changed', self.date_format_changed)
lwidget = BasicLabel(_("%s: ") % _('Date format'))
grid.attach(lwidget, 0, row, 1, 1)
grid.attach(obox, 1, row, 2, 1)
row += 1
# Age precision:
# precision=1 for "year", 2: "year, month" or 3: "year, month, days"
obox = Gtk.ComboBoxText()
age_precision = [_("Years"),
_("Years, Months"),
_("Years, Months, Days")]
list(map(obox.append_text, age_precision))
# Combo_box active index is from 0 to 2, we need values from 1 to 3
active = config.get('preferences.age-display-precision') - 1
if active >= 0 and active <= 2:
obox.set_active(active)
else:
obox.set_active(0)
obox.connect('changed',
lambda obj: config.set('preferences.age-display-precision',
obj.get_active() + 1))
lwidget = BasicLabel(_("%s: ")
% _('Age display precision (requires restart)'))
grid.attach(lwidget, 0, row, 1, 1)
grid.attach(obox, 1, row, 2, 1)
row += 1
# Calendar format on report:
obox = Gtk.ComboBoxText()
list(map(obox.append_text, Date.ui_calendar_names))
active = config.get('preferences.calendar-format-report')
if active >= len(formats):
active = 0
obox.set_active(active)
obox.connect('changed', self.date_calendar_changed)
lwidget = BasicLabel(_("%s: ") % _('Calendar on reports'))
grid.attach(lwidget, 0, row, 1, 1)
grid.attach(obox, 1, row, 2, 1)
row += 1
# Surname guessing:
obox = Gtk.ComboBoxText()
formats = _surname_styles
list(map(obox.append_text, formats))
obox.set_active(config.get('behavior.surname-guessing'))
obox.connect('changed',
lambda obj: config.set('behavior.surname-guessing',
obj.get_active()))
lwidget = BasicLabel(_("%s: ") % _('Surname guessing'))
grid.attach(lwidget, 0, row, 1, 1)
grid.attach(obox, 1, row, 2, 1)
row += 1
# Default Family Relationship
obox = Gtk.ComboBoxText()
formats = FamilyRelType().get_standard_names()
list(map(obox.append_text, formats))
obox.set_active(config.get('preferences.family-relation-type'))
obox.connect('changed',
lambda obj: config.set('preferences.family-relation-type',
obj.get_active()))
lwidget = BasicLabel(_("%s: ") % _('Default family relationship'))
grid.attach(lwidget, 0, row, 1, 1)
grid.attach(obox, 1, row, 2, 1)
row += 1
#height multiple surname table
self.add_pos_int_entry(grid,
_('Height multiple surname box (pixels)'),
row, 'interface.surname-box-height', self.update_surn_height,
col_attach=0)
row += 1
# Status bar:
obox = Gtk.ComboBoxText()
formats = [_("Active person's name and ID"),
_("Relationship to home person")]
list(map(obox.append_text, formats))
active = config.get('interface.statusbar')
if active < 2:
obox.set_active(0)
else:
obox.set_active(1)
obox.connect('changed',
lambda obj: config.set('interface.statusbar', 2*obj.get_active()))
lwidget = BasicLabel(_("%s: ") % _('Status bar'))
grid.attach(lwidget, 0, row, 1, 1)
grid.attach(obox, 1, row, 2, 1)
row += 1
# Text in sidebar:
self.add_checkbox(grid,
_("Show text label beside Navigator buttons (requires restart)"),
row, 'interface.sidebar-text', stop=3)
row += 1
# Gramplet bar close buttons:
self.add_checkbox(grid,
_("Show close button in gramplet bar tabs"),
row, 'interface.grampletbar-close', stop=3,
extra_callback=self.cb_grampletbar_close)
row += 1
return _('Display'), grid
def add_places_panel(self, configdialog):
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
auto = self.add_checkbox(grid,
_("Enable automatic place title generation"),
0, 'preferences.place-auto',
extra_callback=self.auto_title_changed)
row = 0
grid2 = Gtk.Grid()
grid2.set_border_width(12)
grid2.set_column_spacing(6)
grid2.set_row_spacing(6)
grid.attach(grid2, 1, 1, 1, 1)
self.place_widgets = []
cbox = self.add_checkbox(grid2, _("Suppress comma after house number"),
row, 'preferences.place-number', start=0)
self.place_widgets.append(cbox)
row += 1
cbox = self.add_checkbox(grid2, _("Reverse display order"),
row, 'preferences.place-reverse', start=0)
self.place_widgets.append(cbox)
row += 1
# Place restriction
obox = Gtk.ComboBoxText()
formats = [_("Full place name"),
_("-> Hamlet/Village/Town/City"),
_("Hamlet/Village/Town/City ->")]
list(map(obox.append_text, formats))
active = config.get('preferences.place-restrict')
obox.set_active(active)
obox.connect('changed', self.place_restrict_changed)
lwidget = BasicLabel(_("%s: ") % _('Restrict'))
grid2.attach(lwidget, 0, row, 1, 1)
grid2.attach(obox, 1, row, 2, 1)
self.place_widgets.append(obox)
row += 1
entry = self.add_entry(grid2, _("Language"),
row, 'preferences.place-lang')
self.place_widgets.append(entry)
row += 1
self.auto_title_changed(auto)
return _('Places'), grid
def auto_title_changed(self, obj):
"""
Update sensitivity of place configuration widgets.
"""
active = obj.get_active()
for widget in self.place_widgets:
widget.set_sensitive(active)
def add_text_panel(self, configdialog):
row = 0
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
self.add_entry(grid, _('Missing surname'), row,
'preferences.no-surname-text')
row += 1
self.add_entry(grid, _('Missing given name'), row,
'preferences.no-given-text')
row += 1
self.add_entry(grid, _('Missing record'), row,
'preferences.no-record-text')
row += 1
self.add_entry(grid, _('Private surname'), row,
'preferences.private-surname-text',
localized_config=False)
row += 1
self.add_entry(grid, _('Private given name'), row,
'preferences.private-given-text',
localized_config=False)
row += 1
self.add_entry(grid, _('Private record'), row,
'preferences.private-record-text')
row += 1
return _('Text'), grid
def cb_name_dialog(self, obj):
the_list = self.fmt_obox.get_model()
the_iter = self.fmt_obox.get_active_iter()
self.old_format = the_list.get_value(the_iter, COL_FMT)
win = DisplayNameEditor(self.uistate, self.dbstate, self.track, self)
def check_for_type_changed(self, obj):
active = obj.get_active()
if active == 0: # update
config.set('behavior.check-for-addon-update-types', ["update"])
elif active == 1: # update
config.set('behavior.check-for-addon-update-types', ["new"])
elif active == 2: # update
config.set('behavior.check-for-addon-update-types', ["update", "new"])
def toggle_hide_previous_addons(self, obj):
active = obj.get_active()
config.set('behavior.do-not-show-previously-seen-addon-updates',
bool(active))
def toggle_tag_on_import(self, obj):
active = obj.get_active()
config.set('preferences.tag-on-import', bool(active))
self.tag_format_entry.set_sensitive(bool(active))
def check_for_updates_changed(self, obj):
active = obj.get_active()
config.set('behavior.check-for-addon-updates', active)
def place_restrict_changed(self, obj):
active = obj.get_active()
config.set('preferences.place-restrict', active)
def date_format_changed(self, obj):
config.set('preferences.date-format', obj.get_active())
OkDialog(_('Change is not immediate'),
_('Changing the date format will not take '
'effect until the next time Gramps is started.'),
parent=self.window)
def date_calendar_changed(self, obj):
config.set('preferences.calendar-format-report', obj.get_active())
def autobackup_changed(self, obj):
active = obj.get_active()
config.set('database.autobackup', active)
self.uistate.set_autobackup_timer()
def add_date_panel(self, configdialog):
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
self.add_spinner(grid,
_('Date about range'),
0, 'behavior.date-about-range', (1, 9999))
self.add_spinner(grid,
_('Date after range'),
1, 'behavior.date-after-range', (1, 9999))
self.add_spinner(grid,
_('Date before range'),
2, 'behavior.date-before-range', (1, 9999))
self.add_spinner(grid,
_('Maximum age probably alive'),
3, 'behavior.max-age-prob-alive', (80, 140))
self.add_spinner(grid,
_('Maximum sibling age difference'),
4, 'behavior.max-sib-age-diff', (10, 30))
self.add_spinner(grid,
_('Minimum years between generations'),
5, 'behavior.min-generation-years', (5, 20))
self.add_spinner(grid,
_('Average years between generations'),
6, 'behavior.avg-generation-gap', (10, 30))
self.add_pos_int_entry(grid,
_('Markup for invalid date format'),
7, 'preferences.invalid-date-format',
self.update_markup_entry,
helptext = _('Convenience markups are:\n'
'<b><b>Bold</b></b>\n'
'<big><big>Makes font relatively larger</big></big>\n'
'<i><i>Italic</i></i>\n'
'<s><s>Strikethrough</s></s>\n'
'<sub><sub>Subscript</sub></sub>\n'
'<sup><sup>Superscript</sup></sup>\n'
'<small><small>Makes font relatively smaller</small></small>\n'
'<tt><tt>Monospace font</tt></tt>\n'
'<u><u>Underline</u></u>\n\n'
'For example: <u><b>%s</b></u>\n'
'will display <u><b>Underlined bold date</b></u>.\n')
)
return _('Dates'), grid
def add_behavior_panel(self, configdialog):
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
current_line = 0
if win():
self.add_checkbox(grid,
_('Use alternate Font handler for GUI and Reports '
'(requires restart)'),
current_line, 'preferences.alternate-fonthandler')
current_line += 1
self.add_checkbox(grid,
_('Add default source on GEDCOM import'),
current_line, 'preferences.default-source')
current_line += 1
checkbutton = Gtk.CheckButton(label=_("Add tag on import"))
checkbutton.set_active(config.get('preferences.tag-on-import'))
checkbutton.connect("toggled", self.toggle_tag_on_import)
grid.attach(checkbutton, 1, current_line, 1, 1)
self.tag_format_entry = self.add_entry(grid, None, current_line,
'preferences.tag-on-import-format',
col_attach=2)
self.tag_format_entry.set_sensitive(config.get('preferences.tag-on-import'))
current_line += 1
obj = self.add_checkbox(grid,
_('Enable spelling checker'),
current_line, 'behavior.spellcheck')
if not HAVE_GTKSPELL:
obj.set_sensitive(False)
spell_dict = { 'gramps_wiki_build_spell_url' :
URL_WIKISTRING +
"GEPS_029:_GTK3-GObject_introspection"
"_Conversion#Spell_Check_Install" }
obj.set_tooltip_text(
_("GtkSpell not loaded. "
"Spell checking will not be available.\n"
"To build it for Gramps see "
"%(gramps_wiki_build_spell_url)s") % spell_dict )
current_line += 1
self.add_checkbox(grid,
_('Display Tip of the Day'),
current_line, 'behavior.use-tips')
current_line += 1
self.add_checkbox(grid,
_('Remember last view displayed'),
current_line, 'preferences.use-last-view')
current_line += 1
self.add_spinner(grid,
_('Max generations for relationships'),
current_line, 'behavior.generation-depth', (5, 50), self.update_gendepth)
current_line += 1
self.path_entry = Gtk.Entry()
self.add_path_box(grid,
_('Base path for relative media paths'),
current_line, self.path_entry, self.dbstate.db.get_mediapath(),
self.set_mediapath, self.select_mediapath)
current_line += 1
# Check for addon updates:
obox = Gtk.ComboBoxText()
formats = [_("Never"),
_("Once a month"),
_("Once a week"),
_("Once a day"),
_("Always"), ]
list(map(obox.append_text, formats))
active = config.get('behavior.check-for-addon-updates')
obox.set_active(active)
obox.connect('changed', self.check_for_updates_changed)
lwidget = BasicLabel(_("%s: ") % _('Check for addon updates'))
grid.attach(lwidget, 1, current_line, 1, 1)
grid.attach(obox, 2, current_line, 1, 1)
current_line += 1
self.whattype_box = Gtk.ComboBoxText()
formats = [_("Updated addons only"),
_("New addons only"),
_("New and updated addons"),]
list(map(self.whattype_box.append_text, formats))
whattype = config.get('behavior.check-for-addon-update-types')
if "new" in whattype and "update" in whattype:
self.whattype_box.set_active(2)
elif "new" in whattype:
self.whattype_box.set_active(1)
elif "update" in whattype:
self.whattype_box.set_active(0)
self.whattype_box.connect('changed', self.check_for_type_changed)
lwidget = BasicLabel(_("%s: ") % _('What to check'))
grid.attach(lwidget, 1, current_line, 1, 1)
grid.attach(self.whattype_box, 2, current_line, 1, 1)
current_line += 1
self.add_entry(grid, _('Where to check'), current_line, 'behavior.addons-url', col_attach=1)
current_line += 1
checkbutton = Gtk.CheckButton(
label=_("Do not ask about previously notified addons"))
checkbutton.set_active(config.get('behavior.do-not-show-previously-seen-addon-updates'))
checkbutton.connect("toggled", self.toggle_hide_previous_addons)
grid.attach(checkbutton, 1, current_line, 1, 1)
button = Gtk.Button(label=_("Check for updated addons now"))
button.connect("clicked", self.check_for_updates)
grid.attach(button, 3, current_line, 1, 1)
return _('General'), grid
def check_for_updates(self, button):
try:
addon_update_list = available_updates()
except:
OkDialog(_("Checking Addons Failed"),
_("The addon repository appears to be unavailable. "
"Please try again later."),
parent=self.window)
return
if len(addon_update_list) > 0:
rescan = PluginWindows.UpdateAddons(self.uistate, self.track,
addon_update_list).rescan
self.uistate.viewmanager.do_reg_plugins(self.dbstate, self.uistate,
rescan=rescan)
else:
check_types = config.get('behavior.check-for-addon-update-types')
OkDialog(
_("There are no available addons of this type"),
_("Checked for '%s'") %
_("' and '").join([_(t) for t in check_types]),
parent=self.window)
# List of translated strings used here
# Dead code for l10n
_('new'), _('update')
def database_backend_changed(self, obj):
the_list = obj.get_model()
the_iter = obj.get_active_iter()
db_choice = the_list.get_value(the_iter, 2)
config.set('database.backend', db_choice)
def add_famtree_panel(self, configdialog):
grid = Gtk.Grid()
grid.set_border_width(12)
grid.set_column_spacing(6)
grid.set_row_spacing(6)
current_line = 0
if __debug__:
lwidget = BasicLabel(_("%s: ") % _('Database backend'))
grid.attach(lwidget, 1, current_line, 1, 1)
obox = self.__create_backend_combo()
grid.attach(obox, 2, current_line, 1, 1)
current_line += 1
self.dbpath_entry = Gtk.Entry()
self.add_path_box(grid,
_('Family Tree Database path'),
current_line, self.dbpath_entry, config.get('database.path'),
self.set_dbpath, self.select_dbpath)
current_line += 1
#self.add_entry(grid,
# _('Family Tree Database path'),
# 0, 'database.path')
self.add_checkbox(grid,
_('Automatically load last Family Tree'),
current_line, 'behavior.autoload')
current_line += 1
self.backup_path_entry = Gtk.Entry()
self.add_path_box(grid,
_('Backup path'),
current_line, self.backup_path_entry,
config.get('database.backup-path'),
self.set_backup_path, self.select_backup_path)
current_line += 1
self.add_checkbox(grid,
_('Backup on exit'),
current_line, 'database.backup-on-exit')
current_line += 1
# Check for updates:
obox = Gtk.ComboBoxText()
formats = [_("Never"),
_("Every 15 minutes"),
_("Every 30 minutes"),
_("Every hour")]
list(map(obox.append_text, formats))
active = config.get('database.autobackup')
obox.set_active(active)
obox.connect('changed', self.autobackup_changed)
lwidget = BasicLabel(_("%s: ") % _('Autobackup'))
grid.attach(lwidget, 1, current_line, 1, 1)
grid.attach(obox, 2, current_line, 1, 1)
return _('Family Tree'), grid
def __create_backend_combo(self):
"""
Create backend selection widget.
"""
backend_plugins = self.uistate.viewmanager._pmgr.get_reg_databases()
obox = Gtk.ComboBox()
cell = Gtk.CellRendererText()
obox.pack_start(cell, True)
obox.add_attribute(cell, 'text', 1)
# Build model:
model = Gtk.ListStore(GObject.TYPE_INT,
GObject.TYPE_STRING,
GObject.TYPE_STRING)
count = 0
active = 0
default = config.get('database.backend')
for plugin in sorted(backend_plugins, key=lambda plugin: plugin.name):
if plugin.id == default:
active = count
model.append(row=[count, plugin.name, plugin.id])
count += 1
obox.set_model(model)
# set the default value as active in the combo
obox.set_active(active)
obox.connect('changed', self.database_backend_changed)
return obox
def set_mediapath(self, *obj):
if self.path_entry.get_text().strip():
self.dbstate.db.set_mediapath(self.path_entry.get_text())
else:
self.dbstate.db.set_mediapath(None)
def select_mediapath(self, *obj):
f = Gtk.FileChooserDialog(title=_("Select media directory"),
parent=self.window,
action=Gtk.FileChooserAction.SELECT_FOLDER,
buttons=(_('_Cancel'),
Gtk.ResponseType.CANCEL,
_('_Apply'),
Gtk.ResponseType.OK)
)
mpath = media_path(self.dbstate.db)
f.set_current_folder(os.path.dirname(mpath))
status = f.run()
if status == Gtk.ResponseType.OK:
val = f.get_filename()
if val:
self.path_entry.set_text(val)
f.destroy()
def set_dbpath(self, *obj):
path = self.dbpath_entry.get_text().strip()
config.set('database.path', path)
def select_dbpath(self, *obj):
f = Gtk.FileChooserDialog(title=_("Select database directory"),
transient_for=self.window,
action=Gtk.FileChooserAction.SELECT_FOLDER)
f.add_buttons(_('_Cancel'), Gtk.ResponseType.CANCEL,
_('_Apply'), Gtk.ResponseType.OK)
dbpath = config.get('database.path')
if not dbpath:
dbpath = os.path.join(HOME_DIR,'grampsdb')
f.set_current_folder(os.path.dirname(dbpath))
status = f.run()
if status == Gtk.ResponseType.OK:
val = f.get_filename()
if val:
self.dbpath_entry.set_text(val)
f.destroy()
def set_backup_path(self, *obj):
path = self.backup_path_entry.get_text().strip()
config.set('database.backup-path', path)
def select_backup_path(self, *obj):
f = Gtk.FileChooserDialog(title=_("Select backup directory"),
parent=self.window,
action=Gtk.FileChooserAction.SELECT_FOLDER,
buttons=(_('_Cancel'),
Gtk.ResponseType.CANCEL,
_('_Apply'),
Gtk.ResponseType.OK)
)
backup_path = config.get('database.backup-path')
if not backup_path:
backup_path = config.get('database.path')
f.set_current_folder(os.path.dirname(backup_path))
status = f.run()
if status == Gtk.ResponseType.OK:
val = f.get_filename()
if val:
self.backup_path_entry.set_text(val)
f.destroy()
def update_idformat_entry(self, obj, constant):
config.set(constant, obj.get_text())
self.dbstate.db.set_prefixes(
config.get('preferences.iprefix'),
config.get('preferences.oprefix'),
config.get('preferences.fprefix'),
config.get('preferences.sprefix'),
config.get('preferences.cprefix'),
config.get('preferences.pprefix'),
config.get('preferences.eprefix'),
config.get('preferences.rprefix'),
config.get('preferences.nprefix') )
def update_gendepth(self, obj, constant):
"""
Called when the generation depth setting is changed.
"""
intval = int(obj.get_value())
config.set(constant, intval)
#immediately use this value in displaystate.
self.uistate.set_gendepth(intval)
def update_surn_height(self, obj, constant):
ok = True
if not obj.get_text():
return
try:
intval = int(obj.get_text())
except:
intval = config.get(constant)
ok = False
if intval < 0 :
intval = config.get(constant)
ok = False
if ok:
config.set(constant, intval)
else:
obj.set_text(str(intval))
def build_menu_names(self, obj):
return (_('Preferences'), _('Preferences'))
| gpl-2.0 | -1,437,312,302,657,628,400 | 40.805471 | 141 | 0.541079 | false |
mylene-campana/hpp-rbprm-corba | script/tests/spiderman_InfiniteU_interp.py | 1 | 6725 | #/usr/bin/env python
# author: Mylene Campana ([email protected])
# Script which goes with hpp-rbprm-corba package.
from hpp.corbaserver.rbprm.rbprmbuilder import Builder
from hpp.corbaserver.rbprm.rbprmfullbody import FullBody
from hpp.corbaserver.rbprm.problem_solver import ProblemSolver
from hpp.gepetto import Viewer, PathPlayer
import numpy as np
from viewer_library import *
import spiderman_cube_infiniteU_path as tp
packageName = 'hpp-rbprm-corba'
meshPackageName = 'hpp-rbprm-corba'
rootJointType = "freeflyer"
##
# Information to retrieve urdf and srdf files.
urdfName = "spiderman"
urdfSuffix = ""
srdfSuffix = ""
ecsSize = 0 # tp.ecsSize
fullBody = FullBody ()
robot = fullBody.client.basic.robot
fullBody.loadFullBodyModel(urdfName, rootJointType, meshPackageName, packageName, urdfSuffix, srdfSuffix)
#psf = ProblemSolver(fullBody); rr = Viewer (psf); gui = rr.client.gui
r = tp.r; ps = tp.ps
psf = tp.ProblemSolver( fullBody )
rr = tp.Viewer (psf); gui = rr.client.gui
heuristicName = "static"
rLegId = 'RFoot'
lLegId = 'LFoot'
rarmId = 'RHand'
larmId = 'LHand'
fullBody.addLimbDatabase('./Spiderman_rleg.db',rLegId,heuristicName)
fullBody.addLimbDatabase('./Spiderman_lleg.db',lLegId,heuristicName)
fullBody.addLimbDatabase('./Spiderman_rarm.db',rarmId,heuristicName)
fullBody.addLimbDatabase('./Spiderman_larm.db',larmId,heuristicName)
print("Limbs added to fullbody")
extending = [0, 0, 0, 1, 0, 0, 0, 0.8, 0.0, 0, -0.6, 0.0, 0,0.4, -0.9, 0.9, 0, 0, 0.0, 0.0, 0.4, 0.9, -0.9, 0, 0, 0.0,-0.0, -2, -0.5, 0.3, 2.2, 0.7, 0, 0.0, -2, 0.5, -0.3, 2.2, 0.7, 0.0, 0.0]
q_contact_takeoff = [0, 0, 0, 1, 0, 0, 0, 0.0, 0, 0, 0.0, 0.0, 0.0, -2, 0.3, -0.3, 0, -0.6, 0.0, 0.0, -2, -0.3, 0.3, 0, 0.6, 0.0,-0.0, -1.9, -0.3, -0.2, 1.9, -0.6, 0, 0.0, -1.9, 0.3, 0.2, 1.9, -0.6, 0, 0.0]
flexion = [0, 0, 0, 1, 0.0, 0.0, 0.0, 0.7, 0, 0, -0.7, 0.0,0, 0.5, 0.7, 0.5, 0, -0.6, 0.0, 0.0, 0.5, -0.7, -0.5, 0,0.6, 0.0, -0.0, -1.2, -0.3, -0.2, 2.2, -0.9, 0, 0.0, -1.2,0.3, 0.2, 2.2, -0.9, 0, 0.0]
q_contact_landing = []
fullBody.setPose (extending, "extending")
fullBody.setPose (flexion, "flexion")
fullBody.setPose (q_contact_takeoff, "takeoffContact")
fullBody.setPose (q_contact_landing, "landingContact")
#id = r.client.gui.getWindowID("window_hpp_")
#rr.client.gui.attachCameraToNode("spiderman/Thorax",id)
confsize = len(tp.q11)
fullConfSize = len(fullBody.getCurrentConfig()) # with or without ECS in fullbody
q_init = flexion; q_goal = q_init [::]
# WARNING: q_init and q_goal may have changed in orientedPath
entryPathId = tp.solutionPathId # tp.orientedpathId or tp.solutionPathId or tp.orientedpathIdBis
trunkPathwaypoints = ps.getWaypoints (entryPathId)
q_init[0:confsize-ecsSize] = trunkPathwaypoints[0][0:confsize-ecsSize]
q_goal[0:confsize-ecsSize] = trunkPathwaypoints[len(trunkPathwaypoints)-1][0:confsize-ecsSize]
if (ecsSize > 0):
q_init[fullConfSize-ecsSize:fullConfSize] = trunkPathwaypoints[0][confsize-ecsSize:confsize]
q_goal[fullConfSize-ecsSize:fullConfSize] = trunkPathwaypoints[len(trunkPathwaypoints)-1][confsize-ecsSize:confsize]
dir_init = [-V0list [0][0],-V0list [0][1],-V0list [0][2]] # first V0
fullBody.setCurrentConfig (q_init)
fullBody.isConfigValid(q_init)
q_init_test = fullBody.generateContacts(q_init, dir_init, False); rr (q_init_test)
fullBody.isConfigValid(q_init_test)
dir_goal = (np.array(Vimplist [len(Vimplist)-1])).tolist() # last Vimp reversed
fullBody.setCurrentConfig (q_goal)
q_goal_test = fullBody.generateContacts(q_goal, dir_goal, False); rr (q_goal_test)
fullBody.isConfigValid(q_goal_test)
fullBody.setStartState(q_init_test,[rLegId,lLegId])
fullBody.setEndState(q_goal_test,[rLegId,lLegId])
psf.setPlannerIterLimit (50)
print("Start ballistic-interpolation")
fullBody.interpolateBallisticPath(entryPathId, 0.005)
pp = PathPlayer (fullBody.client.basic, rr)
pp.speed=1
pathId = psf.numberPaths () -1
rr(pp.client.problem.configAtParam(pathId,0))
pp(pathId)
"""
# verify given offset position of contact-point
q = q_init_test
r(q)
fullBody.setCurrentConfig (q)
#posAtester = fullBody.client.basic.robot.computeGlobalPosition(fullBody.client.basic.robot.getJointPosition(rfoot),[0,0,0.2]); sphereName = "machin2"
posAtester = fullBody.client.basic.robot.computeGlobalPosition(fullBody.client.basic.robot.getJointPosition(rHand),[0.1,0,0]); sphereName = "machin2"
r.client.gui.addSphere (sphereName,0.03,[0.1,0.1,0.1,1]) # black
configSphere = posAtester [::]
configSphere.extend ([1,0,0,0])
r.client.gui.applyConfiguration (sphereName,configSphere)
r.client.gui.addToGroup (sphereName, r.sceneName)
r.client.gui.refresh ()
## Video recording
import time
pp.dt = 0.01
pp.speed=0.5
rr(q_init_test)
rr.startCapture ("capture","png")
rr(q_init_test); time.sleep(2)
rr(q_init_test)
pp(psf.numberPaths ()-1)
rr(q_goal_test); time.sleep(2);
rr.stopCapture ()
## ffmpeg commands
ffmpeg -r 50 -i capture_0_%d.png -r 25 -vcodec libx264 video.mp4
x=0; for i in *png; do counter=$(printf %04d $x); ln "$i" new"$counter".png; x=$(($x+1)); done
ffmpeg -r 30 -i new%04d.png -r 25 -vcodec libx264 video.mp4
mencoder video.mp4 -channels 6 -ovc xvid -xvidencopts fixed_quant=4 -vf harddup -oac pcm -o video.avi
ffmpeg -i untitled.mp4 -vcodec libx264 -crf 24 video.mp4
## Export path to BLENDER
pathId = 0; dt = 0.01; gui.setCaptureTransform ("skeleton_path.yaml", ["skeleton"])
PL = ps.pathLength(pathId)
FrameRange = np.arange(0,PL,dt)
numberFrame = len(FrameRange)
# test frame capture
q = q_init_test; r (q); gui.refresh (); gui.captureTransform ()
q = q_goal_test; r (q); gui.refresh (); gui.captureTransform ()
# capture path
for t in FrameRange:
q = ps.configAtParam (pathId, t)#update robot configuration
r (q); gui.refresh (); gui.captureTransform ()
r (q_goal); robot.setCurrentConfig(q_goal); gui.refresh (); gui.captureTransform ()
cl = tp.rbprmBuilder.client.basic
plotJointFrame (r, cl, q_init_test, "RFootSphere", 0.15)
q_0 = fullBody.getCurrentConfig()
q = q_0
q [fullBody.rankInConfiguration ['RAnkle_J1']] = 0.6; r(q)
"""
rr.addLandmark('spiderman/SpidermanLHandSphere',1)
"""
qe = extending[::]
qe[0:7] = [-2.025129887082707,
40.59097542330351,
128.97577375406138,
1, 0, 0, 0, 0.0, 0.0, 0.8, 0.0, 0.0, -0.6, -0.9, 0.9, 0.4,
0, 0, 0.0, 0.0, -0.9, 0.9, 0.4, 0, 0, 0.0, 0.0, 0.5, 0.5,
-2, 2.2, 0.7, 0, 0.5, 0.5, -2, 2.2, 0.7, 0.0]
rr(qe)
"""
""" # without solving path
q_init[0:confsize-ecsSize] = tp.q11[0:confsize-ecsSize]
q_goal[0:confsize-ecsSize] = tp.q22[0:confsize-ecsSize]
if (ecsSize > 0):
q_init[fullConfSize-ecsSize:fullConfSize] = tp.q11[confsize-ecsSize:confsize]
q_goal[fullConfSize-ecsSize:fullConfSize] = tp.q22[confsize-ecsSize:confsize]
dir_init = [0,0,-1]; dir_goal = [0,0, 1]"""
| lgpl-3.0 | -4,048,527,675,811,533,300 | 34.771277 | 207 | 0.703494 | false |
AmatsukiUrato/ArkHelper | arkHelper.py | 1 | 10171 | # -*- coding: utf-8 -*-
from ctypes.util import find_library
from dateutil.parser import parse
import datetime
import discord
import sqlite3
import asyncio
import time
import re
# version
__version__ = '2.0.0'
# Discord.pyの読み込み
client = discord.Client()
# 鍵の読み込み
KEY = None
with open('KEY.txt', 'r') as f:
KEY = f.read()
# データベースの読み込み
dbname = 'ark.db'
conn = sqlite3.connect(dbname)
c = conn.cursor()
# Botが接続出来たとき
@client.event
async def on_ready():
print('Logged in as')
print(client.user.name)
print(client.user.id)
print('------')
if not discord.opus.is_loaded():
discord.opus.load_opus(find_library("opus"))
# テーブルの存在確認
# テーブル名の定義
tablename = "arktimer"
c.execute("SELECT * FROM sqlite_master WHERE type='table' and name='%s'" % tablename)
if not c.fetchone():
# id, 名前,現在のタイム,アラート終了予定タイム,入力した人
c.execute("CREATE TABLE %s(id INTEGER PRIMARY KEY, title TEXT, at_registration_time TEXT, finish_time TEXT, register_name TEXT, notice_channel_id TEXT)" % tablename)
conn.commit()
# 再起動前のデータを削除
for row in c.execute("SELECT * FROM arktimer"):
c.execute("DELETE FROM arktimer")
conn.commit()
# メッセージを受け取った時(チャット文が飛んできた時)
@client.event
async def on_message(message):
####################
# ArkTimerの使い方説明
####################
if message.content.startswith('!ark help'):
text = '```js\n[1]!ark help - この説明文を表示する.\n\n[2]!ark link - Arkをやる上で知ってたら便利なリンク集.\n\n[3]!ark timer - ケア用のタイマー.\n"!ark timer"の後に"0~99:0~59"or"1~9999...d/h/m/"を入力することで時間を測れる.タイマーの後にタイトルも入力できる.\n\n[4]!ark timerlist - 現在のケア用タイマー一覧.\n"!ark timer"で登録したタイマーの一覧が見れる.\n\n[5]!ark timerdel - タイマー一覧にあるタイマーを削除する.\n"!ark timerdel id/all"で登録したタイマーを削除する.allにした場合全て消えるので注意.\n\n[7]!ark -v|!ark version - botのバージョンを表示する.```'
# \n\n[5](未)!ark summon - ArkHelperをボイスチャンネルに呼ぶ.\nタイマーでYoutubeの動画音声を流したい場合は呼ぶ必要あり.\n\n[6](未)!ark disconnect - ArkHelperをボイスチャンネルから退ける.\n"!ark summon"で呼んだ後,戻すときに使う.\n\n[7](未)!ark setalert - timer用のYoutube動画をセットする.\n"!ark setalert youtubeのリンク"で登録を行う.
await client.send_message(message.channel, text)
#########################
# Arkに関係する便利なリンク集
#########################
elif message.content.startswith('!ark link'):
text = '__[1]Ark Officital wiki__ - <https://ark.gamepedia.com/ARK_Survival_Evolved_Wiki>\n\n__[2]Ark Japan wiki__ - <http://wikiwiki.jp/arkse/>\n\n__[3]DODOREX__ - <http://www.dododex.com/>\n\n__[4]ARK Community Forums__ - <https://survivetheark.com/>\n\n__[5]Ark wiki/Resource_Map__ - <https://ark.gamepedia.com/Resource_Map>\n\n__[6]Ark PatchNote__ - <https://steamcommunity.com/app/346110/discussions/0/594820656447032287/?l=japanese>'
await client.send_message(message.channel, text)
#########################
# Arkのカウントダウンタイマー
#########################
elif message.content.startswith('!ark timer '):
messagelist = message.content.split(" ")
if len(messagelist) > 4:
# 5項目以上ある時
pass
else:
count_time = messagelist[2]
# 以下の正規表現かどうかをチェック
matchOB_hour_minutes = re.match(r"([0-9]|[0-9][0-9]):([0-9]|[0-5][0-9])", count_time)
matchOB_hour = re.match(r"([1-9]|[1-9][0-9]*|)h", count_time)
matchOB_minutes = re.match(r"([1-9]|[1-9][0-9]*)m", count_time)
matchOB_days = re.match(r"([1-9]|[1-9][0-9]*)d", count_time)
# タイマーのコマンドだと確認できた場合
if matchOB_hour_minutes or matchOB_hour or matchOB_minutes or matchOB_days:
finish_time = 0
# XX:XX表記
if matchOB_hour_minutes:
finish_time_list = count_time.split(":")
finish_time = ( int(finish_time_list[0]) * 60 + int(finish_time_list[1]) ) * 60
# XXd表記
if matchOB_days:
finish_time = int(count_time[:-1]) * 60 * 60 * 24
# XXh表記
if matchOB_hour:
finish_time = int(count_time[:-1]) * 60 * 60
# XXm表記
if matchOB_minutes:
finish_time = int(count_time[:-1]) * 60
# Titleが空白だった場合の処理
if len(messagelist) < 4:
messagelist.append("無名")
# 現在の時刻を取得
nowtime_datetime = datetime.datetime.now()
# 終わる時刻を定義
finishtime_datetime = (nowtime_datetime + datetime.timedelta(seconds=int(finish_time)))
# 送るメッセージの作成
text = '`' + finishtime_datetime.strftime("%m/%d %H:%M:%S") + '` に `'+ messagelist[3] +'` のアラートを行います'
await client.send_message(message.channel,text)
# timerlistに登録する
# 名前,現在のタイム,アラート終了予定タイム,入力した人
# Insert実行
ark_timerdata = ([messagelist[3], nowtime_datetime, finishtime_datetime, message.author.name, message.channel.id])
c.execute("INSERT INTO arktimer (title, at_registration_time, finish_time, register_name, notice_channel_id) VALUES (?,?,?,?,?)", ark_timerdata)
conn.commit()
# InsertしたタイマーIDの取得
timer_id = -1
for row in c.execute("SELECT last_insert_rowid();"):
timer_id = row[0]
# 指定した時間止める
await asyncio.sleep(finish_time)
# タイマーリストに存在するかどうかの判定
c.execute("SELECT * FROM arktimer WHERE id = ?", (timer_id,))
if c.fetchone():
# 通知の表示
await client.send_message(message.channel, '@here `'+messagelist[3]+'` の時間です by '+message.author.mention+'')
# 配列の削除
c.execute("DELETE FROM arktimer WHERE id = ?", (timer_id,))
conn.commit()
################################
# 現在登録されているタイマー一覧を表示
################################
elif message.content.startswith('!ark timerlist'):
text = '```css\n'
# 表示するタイマーがある時
for row in c.execute("SELECT * from arktimer"):
remaining_time = str(datetime.datetime.strptime(row[3],'%Y-%m-%d %H:%M:%S.%f') - datetime.datetime.now()).split(".")[0]
text += '['+str(row[0])+']'+row[1]+ ' by '+ row[4] + '\n [残り : ' + remaining_time + ']\n\n'
else :
text += '```'
# 表示するタイマーがない時
if text == '```css\n```':
text = '```何も登録されていません```'
await client.send_message(message.channel, text)
##############################
# 登録されているタイマーの削除を行う
##############################
elif message.content.startswith('!ark timerdel '):
messagelist = message.content.split(" ")
# 全削除
if messagelist[2] == "all":
c.execute("DELETE FROM arktimer")
conn.commit()
text = "現在登録されているタイマーを全て削除しました"
await client.send_message(message.channel, text)
# 個別削除
else:
for row in c.execute("SELECT * from arktimer"):
if (int(messagelist[2]) == row[0]):
# 配列の削除
c.execute("DELETE FROM arktimer WHERE id=?",(row[0],))
conn.commit()
await client.send_message(message.channel, '`[' + messagelist[2] + ']' + row[1] + '` を削除しました')
break
else:
await client.send_message(message.channel, '`[' + messagelist[2] + ']' + row[1] + '` は見つかりませんでした')
#########################
# お知らせの追加
#########################
elif message.content.startswith('!ark notice'):
pass
# messagelist = message.content.split(" ")
# if len(messagelist) > 2:
# with open('notice.txt', 'w') as n:
# n.write(message.content.replace('!ark notice ',''))
################################
# ArkHelperBotのバージョンを表示する
################################
elif message.content.startswith('!ark -v') or message.content.startswith('!ark version'):
await client.send_message(message.channel, 'Botのバージョンは'+ __version__ +'です.')
@client.event
async def on_member_join(member):
print('join channnel!')
# with open('notice.txt', 'r') as n:
# client.send_message(member.private_channels, n.read())
@client.event
async def on_server_join(server):
print('on_server_join')
# with open('notice.txt', 'r') as n:
# client.send_message(member.private_channels, n.read())
@client.event
async def on_voice_state_update(before, after):
print(before.server.name)
print('on_voice_state_update')
# Run
client.run(KEY)
| mit | -7,638,959,006,603,030,000 | 32.863813 | 447 | 0.541767 | false |
davidbrazdil/nacl | pynacl/local_storage_cache.py | 1 | 3069 | #!/usr/bin/python
# Copyright (c) 2012 The Native Client Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Cache accesses to GSDStorage locally.
Operations are provided to read/write whole files and to
read/write strings.
Read from GSDStorage if nothing exists locally.
"""
import os
import re
import file_tools
KEY_PATTERN = re.compile('^[A-Za-z0-9_/.]+$')
def ValidateKey(key):
if KEY_PATTERN.match(key) is None:
raise KeyError('Invalid storage key "%s"' % key)
def LocalFileURL(local_file):
abs_path = os.path.abspath(local_file)
if not abs_path.startswith('/'):
# Windows paths needs an extra slash for the file protocol.
return 'file:///' + abs_path
else:
return 'file://' + abs_path
class LocalStorageCache(object):
"""A caching wrapper for reading a GSDStorage object or storing locally.
Allow reading/writing to key, value pairs in local files.
Reads fall back to remote storage.
Restricts keys to a limited regex.
Is not atomic in the face of concurrent writers / readers on Windows.
"""
def __init__(self, cache_path, storage):
"""Init for this class.
Args:
cache_path: Path to a database to store a local cache in.
storage: A GSDStorage style object to fallback to for reads.
"""
self._cache_path = os.path.abspath(cache_path)
file_tools.MakeDirectoryIfAbsent(self._cache_path)
self._storage = storage
def PutFile(self, path, key):
"""Write a file to storage.
Args:
path: Path of the file to write.
key: Key to store file under.
Returns:
URL written to.
"""
return self.PutData(file_tools.ReadFile(path), key)
def PutData(self, data, key):
"""Write data to storage.
Args:
data: Data to store.
key: Key to store file under.
Returns:
URL written to.
"""
ValidateKey(key)
cache_file = os.path.join(self._cache_path, key)
cache_dir = os.path.dirname(cache_file)
if not os.path.exists(cache_dir):
os.makedirs(cache_dir)
file_tools.AtomicWriteFile(data, cache_file)
return LocalFileURL(cache_file)
def GetFile(self, key, path):
"""Read a file from storage.
Args:
key: Key to store file under.
path: Destination filename.
Returns:
URL used on success or None for failure.
"""
ValidateKey(key)
cache_file = os.path.join(self._cache_path, key)
if os.path.exists(cache_file):
data = file_tools.ReadFile(cache_file)
file_tools.WriteFile(data, path)
return LocalFileURL(cache_file)
else:
return self._storage.GetFile(key, path)
def GetData(self, key):
"""Read data from global storage.
Args:
key: Key to store file under.
Returns:
Data from storage, or None for failure.
"""
ValidateKey(key)
cache_file = os.path.join(self._cache_path, key)
if os.path.exists(cache_file):
return file_tools.ReadFile(cache_file)
else:
return self._storage.GetData(key)
| bsd-3-clause | -297,330,942,844,684,160 | 26.401786 | 74 | 0.666015 | false |
ergoregion/Rota-Program | Rota_System/Reporting/LinkedBulkReport/Event.py | 1 | 3662 | __author__ = 'Neil Butcher'
from Rota_System.Reporting.HTMLObjects import HTMLObjects
from Abstract import event_title, person_code
from Rota_System.StandardTimes import date_string, time_string
class EventsReporter(object):
def write_reports_about(self, a_list, a_folder):
self._reporter = EventReporter()
titles = []
for event in sorted(a_list, key=lambda e: e.datetime()):
html = self._reporter.report_about(event)
et = event_title(event)
filename = a_folder + '\\' + et + '.html'
fileopen = open(filename,'w')
fileopen.write(html.html_string())
fileopen.close()
titles.append(et)
self._write_index_file(titles, a_folder)
def _write_index_file(self, a_list, a_folder):
table = HTMLObjects.HTMLTable()
for event_title in a_list:
text = HTMLObjects.HTMLLink(event_title, "./" + event_title + ".html")
cell = HTMLObjects.HTMLTableCell(text)
row = HTMLObjects.HTMLTableRow(cell)
table.add(row)
html = HTMLObjects.HTMLAll(HTMLObjects.HTMLHead(HTMLObjects.HTMLPageTitle('Events')))
html.add(HTMLObjects.HTMLLink("index", "../index.html"))
html.add(HTMLObjects.HTMLTitle('Events'))
html.add(table)
filename = a_folder + '\\' + 'index.html'
fileopen = open(filename,'w')
fileopen.write(html.html_string())
fileopen.close()
class EventReporter(object):
def event(self, event):
self._event = event
def report_about(self, an_object):
self.event(an_object)
return self.html()
def html(self):
html = HTMLObjects.HTMLGroup()
html.add(self._html_preheader())
html.add(self._html_header())
if len(self._event.appointments) > 0:
html.add(self._html_table())
html.add(self._html_footer())
return html
def title(self):
return event_title(self._event)
def _html_preheader(self):
return HTMLObjects.HTMLLink("events", "./index.html")
def _html_header(self):
title = 'Event '
title += event_title(self._event)
return HTMLObjects.HTMLHeading(title)
def _html_table(self):
table = HTMLObjects.HTMLTable()
table.add(self._html_table_row_header())
sorted_appointments = sorted(self._event.appointments, key=lambda app: app.role.priority, reverse=True)
for appointment in sorted_appointments:
table.add(self._html_table_row(appointment))
return table
def _html_footer(self):
return None
def _html_table_row_header(self):
html = HTMLObjects.HTMLTableRow()
html.add(HTMLObjects.HTMLTableHeaderCell('Role'))
html.add(HTMLObjects.HTMLTableHeaderCell('Person'))
html.add(HTMLObjects.HTMLTableHeaderCell('note'))
return html
def _html_table_row(self, appointment):
html = HTMLObjects.HTMLTableRow()
if appointment.disabled and not (appointment.is_filled()):
return None
link = HTMLObjects.HTMLLink(appointment.role.description, "../roles/" + appointment.role.description + ".html")
html.add(HTMLObjects.HTMLTableCell(link))
if appointment.is_filled():
link = HTMLObjects.HTMLLink(person_code(appointment.person), "../people/" + person_code(appointment.person) + ".html")
html.add(HTMLObjects.HTMLTableCell(link))
else:
html.add(HTMLObjects.HTMLTableCell('Not filled', 3, 1))
html.add(HTMLObjects.HTMLTableCell(appointment.note))
return html
| mit | -4,241,196,320,716,281,000 | 34.901961 | 131 | 0.623157 | false |
zak-k/cis | cis/plotting/heatmap.py | 1 | 3389 | import logging
import numpy
from cis.exceptions import UserPrintableException
from cis.plotting.generic_plot import Generic_Plot
class Heatmap(Generic_Plot):
def __init__(self, packed_data_items, plot_args, *mplargs, **mplkwargs):
# Do this here because if this is ungridded data, we won't be able to complete the super() call
if not packed_data_items[0].is_gridded:
raise UserPrintableException("Heatmap can only be plotted for gridded data")
super(Heatmap, self).__init__(packed_data_items, plot_args, *mplargs, **mplkwargs)
def plot(self):
"""
Plots a heatmap
"""
from cis.exceptions import InvalidNumberOfDatagroupsSpecifiedError
if len(self.packed_data_items) != 1:
raise InvalidNumberOfDatagroupsSpecifiedError("Invalid number of datagroups specified. Only one datagroup "
"can be plotted for a heatmap.")
if not self.packed_data_items[0].is_gridded:
raise UserPrintableException("Heatmap can only be plotted for gridded data")
# Set the options specific to a datagroup with the heatmap type
self.mplkwargs['cmap'] = self.plot_args['datagroups'][self.datagroup]['cmap']
if self.plot_args['datagroups'][self.datagroup]['cmin'] is not None:
self.plot_args["valrange"]["vmin"] = self.plot_args['datagroups'][self.datagroup]['cmin']
if self.plot_args['datagroups'][self.datagroup]['cmax'] is not None:
self.plot_args["valrange"]["vmax"] = self.plot_args['datagroups'][self.datagroup]['cmax']
# if self.is_map():
# self.mplkwargs["latlon"] = True
x, y, data = make_color_mesh_cells(self.packed_data_items[0], self.plot_args)
self.color_axis.append(self.matplotlib.pcolormesh(x, y, data, *self.mplargs, **self.mplkwargs))
def get_data_items_max(self):
# Take into account the bounds
x_coord = self.packed_data_items[0].coord(self.plot_args['x_variable'])
if not x_coord.has_bounds():
x_coord.guess_bounds()
return numpy.max(x_coord.bounds)
def set_default_axis_label(self, axis):
return self.set_3daxis_label(axis)
def create_legend(self):
pass
def format_plot(self):
self.format_time_axis()
self.format_3d_plot()
def make_color_mesh_cells(packed_data_item, plot_args):
"""
Generate the correct cell corners for use with a heatmap, since heatmap doesn't take
cell centers but cell corners
:param packed_data_item: IRIS cube
:param plot_args: dictionary of plot arguments
:return:
"""
from cis.utils import get_coord
data = packed_data_item.data
x = get_coord(packed_data_item, plot_args['x_variable'], data)
y = get_coord(packed_data_item, plot_args['y_variable'], data)
x_dim = packed_data_item.coord_dims(x)
y_dim = packed_data_item.coord_dims(y)
for coord in (x, y):
if not coord.has_bounds():
coord.guess_bounds()
y_bounds = y.bounds
x_vals = [b[0] for b in x.bounds] + [x.bounds[-1][1]]
y_vals = [b[0] for b in y_bounds] + [y_bounds[-1][1]]
# Get the order right
if x_dim > y_dim:
xv, yv = numpy.meshgrid(x_vals, y_vals)
else:
yv, xv = numpy.meshgrid(y_vals, x_vals)
return xv, yv, data
| gpl-3.0 | 3,874,571,118,185,780,700 | 37.511364 | 119 | 0.632045 | false |
code-for-india/sahana_shelter_worldbank | modules/s3/s3codecs/xls.py | 1 | 17770 | # -*- coding: utf-8 -*-
"""
S3 Microsoft Excel codec
@copyright: 2011-13 (c) Sahana Software Foundation
@license: MIT
Permission is hereby granted, free of charge, to any person
obtaining a copy of this software and associated documentation
files (the "Software"), to deal in the Software without
restriction, including without limitation the rights to use,
copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the
Software is furnished to do so, subject to the following
conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES
OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT
HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY,
WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR
OTHER DEALINGS IN THE SOFTWARE.
"""
__all__ = ["S3XLS"]
try:
from cStringIO import StringIO # Faster, where available
except:
from StringIO import StringIO
from gluon import *
from gluon.contenttype import contenttype
from gluon.storage import Storage
from ..s3codec import S3Codec
from ..s3utils import s3_unicode, s3_strip_markup
# =============================================================================
class S3XLS(S3Codec):
"""
Simple Microsoft Excel format codec
"""
# Customizable styles
COL_WIDTH_MULTIPLIER = 310
LARGE_HEADER_COLOUR = 0x2C
HEADER_COLOUR = 0x2C
SUB_HEADER_COLOUR = 0x18
ROW_ALTERNATING_COLOURS = [0x2A, 0x2B]
# -------------------------------------------------------------------------
def __init__(self):
"""
Constructor
"""
# Error codes
T = current.T
self.ERROR = Storage(
XLRD_ERROR = "Python needs the xlrd module installed for XLS export",
XLWT_ERROR = "Python needs the xlwt module installed for XLS export"
)
# -------------------------------------------------------------------------
def extractResource(self, resource, list_fields):
"""
Extract the rows from the resource
@param resource: the resource
@param list_fields: fields to include in list views
"""
title = self.crud_string(resource.tablename, "title_list")
vars = Storage(current.request.vars)
vars["iColumns"] = len(list_fields)
filter, orderby, left = resource.datatable_filter(list_fields, vars)
resource.add_filter(filter)
if orderby is None:
orderby = resource.get_config("orderby", None)
result = resource.select(list_fields,
left=left,
limit=None,
count=True,
getids=True,
orderby=orderby,
represent=True,
show_links=False)
rfields = result["rfields"]
rows = result["rows"]
types = []
lfields = []
heading = {}
for rfield in rfields:
if rfield.show:
lfields.append(rfield.colname)
heading[rfield.colname] = rfield.label
if rfield.ftype == "virtual":
types.append("string")
else:
types.append(rfield.ftype)
return (title, types, lfields, heading, rows)
# -------------------------------------------------------------------------
def encode(self, data_source, **attr):
"""
Export data as a Microsoft Excel spreadsheet
@param data_source: the source of the data that is to be encoded
as a spreadsheet. This may be:
resource: the resource
item: a list of pre-fetched values
the headings are in the first row
the data types are in the second row
@param attr: dictionary of parameters:
* title: The main title of the report
* list_fields: Fields to include in list views
* report_groupby: Used to create a grouping of the result:
either a Field object of the resource
or a string which matches a value in the heading
* use_colour: True to add colour to the cells. default False
"""
request = current.request
import datetime
try:
import xlwt
except ImportError:
if current.auth.permission.format in request.INTERACTIVE_FORMATS:
current.session.error = self.ERROR.XLWT_ERROR
redirect(URL(extension=""))
else:
error = self.ERROR.XLWT_ERROR
current.log.error(error)
return error
try:
from xlrd.xldate import xldate_from_date_tuple, \
xldate_from_time_tuple, \
xldate_from_datetime_tuple
except ImportError:
if current.auth.permission.format in request.INTERACTIVE_FORMATS:
current.session.error = self.ERROR.XLRD_ERROR
redirect(URL(extension=""))
else:
error = self.ERROR.XLRD_ERROR
current.log.error(error)
return error
# The xlwt library supports a maximum of 182 characters in a single cell
max_cell_size = 182
COL_WIDTH_MULTIPLIER = S3XLS.COL_WIDTH_MULTIPLIER
# Get the attributes
title = attr.get("title")
list_fields = attr.get("list_fields")
if not list_fields:
list_fields = data_source.list_fields()
group = attr.get("dt_group")
use_colour = attr.get("use_colour", False)
# Extract the data from the data_source
if isinstance(data_source, (list, tuple)):
headers = data_source[0]
types = data_source[1]
rows = data_source[2:]
else:
(title, types, lfields, headers, rows) = self.extractResource(data_source,
list_fields)
report_groupby = lfields[group] if group else None
if len(rows) > 0 and len(headers) != len(rows[0]):
msg = """modules/s3/codecs/xls: There is an error in the list_items, a field doesn't exist"
requesting url %s
Headers = %d, Data Items = %d
Headers %s
List Fields %s""" % (request.url, len(headers), len(items[0]), headers, list_fields)
current.log.error(msg)
groupby_label = headers[report_groupby] if report_groupby else None
# Date/Time formats from L10N deployment settings
settings = current.deployment_settings
date_format = settings.get_L10n_date_format()
date_format_str = str(date_format)
date_format = S3XLS.dt_format_translate(date_format)
time_format = S3XLS.dt_format_translate(settings.get_L10n_time_format())
datetime_format = S3XLS.dt_format_translate(settings.get_L10n_datetime_format())
# Create the workbook
book = xlwt.Workbook(encoding="utf-8")
# Add a sheet
# Can't have a / in the sheet_name, so replace any with a space
sheet_name = str(title.replace("/", " "))
# sheet_name cannot be over 31 chars
if len(sheet_name) > 31:
sheet_name = sheet_name[:31]
sheet1 = book.add_sheet(sheet_name)
# Styles
styleLargeHeader = xlwt.XFStyle()
styleLargeHeader.font.bold = True
styleLargeHeader.font.height = 400
if use_colour:
styleLargeHeader.alignment.horz = styleLargeHeader.alignment.HORZ_CENTER
styleLargeHeader.pattern.pattern = styleLargeHeader.pattern.SOLID_PATTERN
styleLargeHeader.pattern.pattern_fore_colour = S3XLS.LARGE_HEADER_COLOUR
styleNotes = xlwt.XFStyle()
styleNotes.font.italic = True
styleNotes.font.height = 160 # 160 Twips = 8 point
styleNotes.num_format_str = datetime_format
styleHeader = xlwt.XFStyle()
styleHeader.font.bold = True
styleHeader.num_format_str = datetime_format
if use_colour:
styleHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN
styleHeader.pattern.pattern_fore_colour = S3XLS.HEADER_COLOUR
styleSubHeader = xlwt.XFStyle()
styleSubHeader.font.bold = True
if use_colour:
styleSubHeader.pattern.pattern = styleHeader.pattern.SOLID_PATTERN
styleSubHeader.pattern.pattern_fore_colour = S3XLS.SUB_HEADER_COLOUR
styleOdd = xlwt.XFStyle()
if use_colour:
styleOdd.pattern.pattern = styleOdd.pattern.SOLID_PATTERN
styleOdd.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[0]
styleEven = xlwt.XFStyle()
if use_colour:
styleEven.pattern.pattern = styleEven.pattern.SOLID_PATTERN
styleEven.pattern.pattern_fore_colour = S3XLS.ROW_ALTERNATING_COLOURS[1]
# Header row
colCnt = 0
#headerRow = sheet1.row(2)
headerRow = sheet1.row(0)
fieldWidths = []
id = False
for selector in lfields:
if selector == report_groupby:
continue
label = headers[selector]
if label == "Id":
# Indicate to adjust colCnt when writing out
id = True
fieldWidths.append(0)
colCnt += 1
continue
if label == "Sort":
continue
if id:
# Adjust for the skipped column
writeCol = colCnt - 1
else:
writeCol = colCnt
headerRow.write(writeCol, str(label), styleHeader)
width = max(len(label) * COL_WIDTH_MULTIPLIER, 2000)
#width = len(label) * COL_WIDTH_MULTIPLIER
fieldWidths.append(width)
sheet1.col(writeCol).width = width
colCnt += 1
# Title row
# - has been removed to allow columns to be easily sorted post-export.
# - add deployment_setting if an Org wishes a Title Row
# currentRow = sheet1.row(0)
# if colCnt > 0:
# sheet1.write_merge(0, 0, 0, colCnt, str(title),
# styleLargeHeader)
# currentRow.height = 500
# currentRow = sheet1.row(1)
# currentRow.write(0, str(current.T("Date Exported:")), styleNotes)
# currentRow.write(1, request.now, styleNotes)
# Fix the size of the last column to display the date
#if 16 * COL_WIDTH_MULTIPLIER > width:
# sheet1.col(colCnt).width = 16 * COL_WIDTH_MULTIPLIER
# Initialize counters
totalCols = colCnt
#rowCnt = 2
rowCnt = 0
subheading = None
for row in rows:
# Item details
rowCnt += 1
currentRow = sheet1.row(rowCnt)
colCnt = 0
if rowCnt % 2 == 0:
style = styleEven
else:
style = styleOdd
if report_groupby:
represent = s3_strip_markup(s3_unicode(row[report_groupby]))
if subheading != represent:
subheading = represent
sheet1.write_merge(rowCnt, rowCnt, 0, totalCols,
subheading, styleSubHeader)
rowCnt += 1
currentRow = sheet1.row(rowCnt)
if rowCnt % 2 == 0:
style = styleEven
else:
style = styleOdd
for field in lfields:
label = headers[field]
if label == groupby_label:
continue
if label == "Id":
# Skip the ID column from XLS exports
colCnt += 1
continue
represent = s3_strip_markup(s3_unicode(row[field]))
coltype = types[colCnt]
if coltype == "sort":
continue
if len(represent) > max_cell_size:
represent = represent[:max_cell_size]
value = represent
if coltype == "date":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.year,
cell_datetime.month,
cell_datetime.day)
value = xldate_from_date_tuple(date_tuple, 0)
style.num_format_str = date_format
except:
pass
elif coltype == "datetime":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.year,
cell_datetime.month,
cell_datetime.day,
cell_datetime.hour,
cell_datetime.minute,
cell_datetime.second)
value = xldate_from_datetime_tuple(date_tuple, 0)
style.num_format_str = datetime_format
except:
pass
elif coltype == "time":
try:
cell_datetime = datetime.datetime.strptime(value,
date_format_str)
date_tuple = (cell_datetime.hour,
cell_datetime.minute,
cell_datetime.second)
value = xldate_from_time_tuple(date_tuple)
style.num_format_str = time_format
except:
pass
elif coltype == "integer":
try:
value = int(value)
style.num_format_str = "0"
except:
pass
elif coltype == "double":
try:
value = float(value)
style.num_format_str = "0.00"
except:
pass
if id:
# Adjust for the skipped column
writeCol = colCnt - 1
else:
writeCol = colCnt
currentRow.write(writeCol, value, style)
width = len(represent) * COL_WIDTH_MULTIPLIER
if width > fieldWidths[colCnt]:
fieldWidths[colCnt] = width
sheet1.col(writeCol).width = width
colCnt += 1
sheet1.panes_frozen = True
#sheet1.horz_split_pos = 3
sheet1.horz_split_pos = 1
output = StringIO()
book.save(output)
# Response headers
filename = "%s_%s.xls" % (request.env.server_name, str(title))
disposition = "attachment; filename=\"%s\"" % filename
response = current.response
response.headers["Content-Type"] = contenttype(".xls")
response.headers["Content-disposition"] = disposition
output.seek(0)
return output.read()
# -------------------------------------------------------------------------
@staticmethod
def dt_format_translate(pyfmt):
"""
Translate a Python datetime format string into an
Excel datetime format string
@param pyfmt: the Python format string
"""
translate = {"%a": "ddd",
"%A": "dddd",
"%b": "mmm",
"%B": "mmmm",
"%c": "",
"%d": "dd",
"%f": "",
"%H": "hh",
"%I": "hh",
"%j": "",
"%m": "mm",
"%M": "mm",
"%p": "AM/PM",
"%S": "ss",
"%U": "",
"%w": "",
"%W": "",
"%x": "",
"%X": "",
"%y": "yy",
"%Y": "yyyy",
"%z": "",
"%Z": "",
"%%": "%",
}
xlfmt = str(pyfmt)
for item in translate:
if item in xlfmt:
xlfmt = xlfmt.replace(item, translate[item])
return xlfmt
# End =========================================================================
| mit | 2,516,832,263,332,031,500 | 38.054945 | 103 | 0.488689 | false |
berkmancenter/mediacloud | apps/word2vec-generate-snapshot-model/tests/python/test_snapshot_sentence_iterator.py | 1 | 1064 | from word2vec_generate_snapshot_model.sentence_iterators import SnapshotSentenceIterator
from .setup_test_word2vec import TestWord2vec
class TestSnapshotSentenceIterator(TestWord2vec):
def test_snapshot_sentence_iterator(self):
"""Ensure that all of the sentences get returned"""
sentence_iterator = SnapshotSentenceIterator(
db=self.db,
snapshots_id=self.snapshots_id,
stories_id_chunk_size=self.TEST_STORIES_ID_CHUNK_SIZE,
)
returned_sentence_count = 0
seen_sentences = set()
for sentence_words in sentence_iterator:
assert sentence_words, "Sentence words should be set."
sentence = ' '.join(sentence_words)
assert sentence not in seen_sentences, "Every sentence should be unique."
returned_sentence_count += 1
seen_sentences.add(sentence)
assert returned_sentence_count == self.TEST_STORY_COUNT * self.TEST_SENTENCE_PER_STORY_COUNT, \
"All of the sentences should have been returned."
| agpl-3.0 | 8,970,406,891,897,484,000 | 37 | 103 | 0.670113 | false |
santisiri/popego | envs/ALPHA-POPEGO/lib/python2.5/site-packages/SQLAlchemy-0.4.0-py2.5.egg/sqlalchemy/ext/associationproxy.py | 1 | 24027 | """Contain the ``AssociationProxy`` class.
The ``AssociationProxy`` is a Python property object which provides
transparent proxied access to the endpoint of an association object.
See the example ``examples/association/proxied_association.py``.
"""
import weakref, itertools
import sqlalchemy.exceptions as exceptions
import sqlalchemy.orm as orm
import sqlalchemy.util as util
def association_proxy(targetcollection, attr, **kw):
"""Convenience function for use in mapped classes. Implements a Python
property representing a relation as a collection of simpler values. The
proxied property will mimic the collection type of the target (list, dict
or set), or in the case of a one to one relation, a simple scalar value.
targetcollection
Name of the relation attribute we'll proxy to, usually created with
'relation()' in a mapper setup.
attr
Attribute on the associated instances we'll proxy for. For example,
given a target collection of [obj1, obj2], a list created by this proxy
property would look like
[getattr(obj1, attr), getattr(obj2, attr)]
If the relation is one-to-one or otherwise uselist=False, then simply:
getattr(obj, attr)
creator (optional)
When new items are added to this proxied collection, new instances of
the class collected by the target collection will be created. For
list and set collections, the target class constructor will be called
with the 'value' for the new instance. For dict types, two arguments
are passed: key and value.
If you want to construct instances differently, supply a 'creator'
function that takes arguments as above and returns instances.
For scalar relations, creator() will be called if the target is None.
If the target is present, set operations are proxied to setattr() on the
associated object.
If you have an associated object with multiple attributes, you may set up
multiple association proxies mapping to different attributes. See the
unit tests for examples, and for examples of how creator() functions can
be used to construct the scalar relation on-demand in this situation.
Passes along any other arguments to AssociationProxy
"""
return AssociationProxy(targetcollection, attr, **kw)
class AssociationProxy(object):
"""A property object that automatically sets up `AssociationLists`
on an object."""
def __init__(self, targetcollection, attr, creator=None,
getset_factory=None, proxy_factory=None, proxy_bulk_set=None):
"""Arguments are:
targetcollection
Name of the collection we'll proxy to, usually created with
'relation()' in a mapper setup.
attr
Attribute on the collected instances we'll proxy for. For example,
given a target collection of [obj1, obj2],
a list created by this proxy property would look like
[getattr(obj1, attr), getattr(obj2, attr)]
creator
Optional. When new items are added to this proxied collection, new
instances of the class collected by the target collection will be
created. For list and set collections, the target class
constructor will be called with the 'value' for the new instance.
For dict types, two arguments are passed: key and value.
If you want to construct instances differently, supply a 'creator'
function that takes arguments as above and returns instances.
getset_factory
Optional. Proxied attribute access is automatically handled
by routines that get and set values based on the `attr` argument
for this proxy.
If you would like to customize this behavior, you may supply a
`getset_factory` callable that produces a tuple of `getter` and
`setter` functions. The factory is called with two arguments,
the abstract type of the underlying collection and this proxy
instance.
proxy_factory
Optional. The type of collection to emulate is determined by
sniffing the target collection. If your collection type can't be
determined by duck typing or you'd like to use a different collection
implementation, you may supply a factory function to produce those
collections. Only applicable to non-scalar relations.
proxy_bulk_set
Optional, use with proxy_factory. See the _set() method for
details.
"""
self.target_collection = targetcollection # backwards compat name...
self.value_attr = attr
self.creator = creator
self.getset_factory = getset_factory
self.proxy_factory = proxy_factory
self.proxy_bulk_set = proxy_bulk_set
self.scalar = None
self.owning_class = None
self.key = '_%s_%s_%s' % (type(self).__name__,
targetcollection, id(self))
self.collection_class = None
def _get_property(self):
return orm.class_mapper(self.owning_class).get_property(self.target_collection)
def _target_class(self):
return self._get_property().mapper.class_
target_class = property(_target_class)
def _target_is_scalar(self):
return not self._get_property().uselist
def _lazy_collection(self, weakobjref):
target = self.target_collection
del self
def lazy_collection():
obj = weakobjref()
if obj is None:
raise exceptions.InvalidRequestError(
"stale association proxy, parent object has gone out of "
"scope")
return getattr(obj, target)
return lazy_collection
def __get__(self, obj, class_):
if obj is None:
self.owning_class = class_
return
elif self.scalar is None:
self.scalar = self._target_is_scalar()
if self.scalar:
self._initialize_scalar_accessors()
if self.scalar:
return self._scalar_get(getattr(obj, self.target_collection))
else:
try:
return getattr(obj, self.key)
except AttributeError:
proxy = self._new(self._lazy_collection(weakref.ref(obj)))
setattr(obj, self.key, proxy)
return proxy
def __set__(self, obj, values):
if self.scalar is None:
self.scalar = self._target_is_scalar()
if self.scalar:
self._initialize_scalar_accessors()
if self.scalar:
creator = self.creator and self.creator or self.target_class
target = getattr(obj, self.target_collection)
if target is None:
setattr(obj, self.target_collection, creator(values))
else:
self._scalar_set(target, values)
else:
proxy = self.__get__(obj, None)
proxy.clear()
self._set(proxy, values)
def __delete__(self, obj):
delattr(obj, self.key)
def _initialize_scalar_accessors(self):
if self.getset_factory:
get, set = self.getset_factory(None, self)
else:
get, set = self._default_getset(None)
self._scalar_get, self._scalar_set = get, set
def _default_getset(self, collection_class):
attr = self.value_attr
getter = util.attrgetter(attr)
if collection_class is dict:
setter = lambda o, k, v: setattr(o, attr, v)
else:
setter = lambda o, v: setattr(o, attr, v)
return getter, setter
def _new(self, lazy_collection):
creator = self.creator and self.creator or self.target_class
self.collection_class = util.duck_type_collection(lazy_collection())
if self.proxy_factory:
return self.proxy_factory(lazy_collection, creator, self.value_attr)
if self.getset_factory:
getter, setter = self.getset_factory(self.collection_class, self)
else:
getter, setter = self._default_getset(self.collection_class)
if self.collection_class is list:
return _AssociationList(lazy_collection, creator, getter, setter)
elif self.collection_class is dict:
return _AssociationDict(lazy_collection, creator, getter, setter)
elif self.collection_class is util.Set:
return _AssociationSet(lazy_collection, creator, getter, setter)
else:
raise exceptions.ArgumentError(
'could not guess which interface to use for '
'collection_class "%s" backing "%s"; specify a '
'proxy_factory and proxy_bulk_set manually' %
(self.collection_class.__name__, self.target_collection))
def _set(self, proxy, values):
if self.proxy_bulk_set:
self.proxy_bulk_set(proxy, values)
elif self.collection_class is list:
proxy.extend(values)
elif self.collection_class is dict:
proxy.update(values)
elif self.collection_class is util.Set:
proxy.update(values)
else:
raise exceptions.ArgumentError(
'no proxy_bulk_set supplied for custom '
'collection_class implementation')
class _AssociationList(object):
"""Generic proxying list which proxies list operations to a another list,
converting association objects to and from a simplified value.
"""
def __init__(self, lazy_collection, creator, getter, setter):
"""
lazy_collection
A callable returning a list-based collection of entities (usually
an object attribute managed by a SQLAlchemy relation())
creator
A function that creates new target entities. Given one parameter:
value. The assertion is assumed:
obj = creator(somevalue)
assert getter(obj) == somevalue
getter
A function. Given an associated object, return the 'value'.
setter
A function. Given an associated object and a value, store
that value on the object.
"""
self.lazy_collection = lazy_collection
self.creator = creator
self.getter = getter
self.setter = setter
col = property(lambda self: self.lazy_collection())
# For compatibility with 0.3.1 through 0.3.7- pass kw through to creator.
# (see append() below)
def _create(self, value, **kw):
return self.creator(value, **kw)
def _get(self, object):
return self.getter(object)
def _set(self, object, value):
return self.setter(object, value)
def __len__(self):
return len(self.col)
def __nonzero__(self):
if self.col:
return True
else:
return False
def __getitem__(self, index):
return self._get(self.col[index])
def __setitem__(self, index, value):
if not isinstance(index, slice):
self._set(self.col[index], value)
else:
if index.stop is None:
stop = len(self)
elif index.stop < 0:
stop = len(self) + index.stop
else:
stop = index.stop
step = index.step or 1
rng = range(index.start or 0, stop, step)
if step == 1:
for i in rng:
del self[index.start]
i = index.start
for item in value:
self.insert(i, item)
i += 1
else:
if len(value) != len(rng):
raise ValueError(
"attempt to assign sequence of size %s to "
"extended slice of size %s" % (len(value),
len(rng)))
for i, item in zip(rng, value):
self._set(self.col[i], item)
def __delitem__(self, index):
del self.col[index]
def __contains__(self, value):
for member in self.col:
if self._get(member) == value:
return True
return False
def __getslice__(self, start, end):
return [self._get(member) for member in self.col[start:end]]
def __setslice__(self, start, end, values):
members = [self._create(v) for v in values]
self.col[start:end] = members
def __delslice__(self, start, end):
del self.col[start:end]
def __iter__(self):
"""Iterate over proxied values.
For the actual domain objects, iterate over .col instead or
just use the underlying collection directly from its property
on the parent.
"""
for member in self.col:
yield self._get(member)
raise StopIteration
# For compatibility with 0.3.1 through 0.3.7- pass kw through to creator
# on append() only. (Can't on __setitem__, __contains__, etc., obviously.)
def append(self, value, **kw):
item = self._create(value, **kw)
self.col.append(item)
def count(self, value):
return sum([1 for _ in
itertools.ifilter(lambda v: v == value, iter(self))])
def extend(self, values):
for v in values:
self.append(v)
def insert(self, index, value):
self.col[index:index] = [self._create(value)]
def pop(self, index=-1):
return self.getter(self.col.pop(index))
def remove(self, value):
for i, val in enumerate(self):
if val == value:
del self.col[i]
return
raise ValueError("value not in list")
def reverse(self):
"""Not supported, use reversed(mylist)"""
raise NotImplementedError
def sort(self):
"""Not supported, use sorted(mylist)"""
raise NotImplementedError
def clear(self):
del self.col[0:len(self.col)]
def __eq__(self, other): return list(self) == other
def __ne__(self, other): return list(self) != other
def __lt__(self, other): return list(self) < other
def __le__(self, other): return list(self) <= other
def __gt__(self, other): return list(self) > other
def __ge__(self, other): return list(self) >= other
def __cmp__(self, other): return cmp(list(self), other)
def copy(self):
return list(self)
def __repr__(self):
return repr(list(self))
def hash(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
_NotProvided = object()
class _AssociationDict(object):
"""Generic proxying list which proxies dict operations to a another dict,
converting association objects to and from a simplified value.
"""
def __init__(self, lazy_collection, creator, getter, setter):
"""
lazy_collection
A callable returning a dict-based collection of entities (usually
an object attribute managed by a SQLAlchemy relation())
creator
A function that creates new target entities. Given two parameters:
key and value. The assertion is assumed:
obj = creator(somekey, somevalue)
assert getter(somekey) == somevalue
getter
A function. Given an associated object and a key, return the 'value'.
setter
A function. Given an associated object, a key and a value, store
that value on the object.
"""
self.lazy_collection = lazy_collection
self.creator = creator
self.getter = getter
self.setter = setter
col = property(lambda self: self.lazy_collection())
def _create(self, key, value):
return self.creator(key, value)
def _get(self, object):
return self.getter(object)
def _set(self, object, key, value):
return self.setter(object, key, value)
def __len__(self):
return len(self.col)
def __nonzero__(self):
if self.col:
return True
else:
return False
def __getitem__(self, key):
return self._get(self.col[key])
def __setitem__(self, key, value):
if key in self.col:
self._set(self.col[key], key, value)
else:
self.col[key] = self._create(key, value)
def __delitem__(self, key):
del self.col[key]
def __contains__(self, key):
return key in self.col
has_key = __contains__
def __iter__(self):
return self.col.iterkeys()
def clear(self):
self.col.clear()
def __eq__(self, other): return dict(self) == other
def __ne__(self, other): return dict(self) != other
def __lt__(self, other): return dict(self) < other
def __le__(self, other): return dict(self) <= other
def __gt__(self, other): return dict(self) > other
def __ge__(self, other): return dict(self) >= other
def __cmp__(self, other): return cmp(dict(self), other)
def __repr__(self):
return repr(dict(self.items()))
def get(self, key, default=None):
try:
return self[key]
except KeyError:
return default
def setdefault(self, key, default=None):
if key not in self.col:
self.col[key] = self._create(key, default)
return default
else:
return self[key]
def keys(self):
return self.col.keys()
def iterkeys(self):
return self.col.iterkeys()
def values(self):
return [ self._get(member) for member in self.col.values() ]
def itervalues(self):
for key in self.col:
yield self._get(self.col[key])
raise StopIteration
def items(self):
return [(k, self._get(self.col[k])) for k in self]
def iteritems(self):
for key in self.col:
yield (key, self._get(self.col[key]))
raise StopIteration
def pop(self, key, default=_NotProvided):
if default is _NotProvided:
member = self.col.pop(key)
else:
member = self.col.pop(key, default)
return self._get(member)
def popitem(self):
item = self.col.popitem()
return (item[0], self._get(item[1]))
def update(self, *a, **kw):
if len(a) > 1:
raise TypeError('update expected at most 1 arguments, got %i' %
len(a))
elif len(a) == 1:
seq_or_map = a[0]
for item in seq_or_map:
if isinstance(item, tuple):
self[item[0]] = item[1]
else:
self[item] = seq_or_map[item]
for key, value in kw:
self[key] = value
def copy(self):
return dict(self.items())
def hash(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
class _AssociationSet(object):
"""Generic proxying list which proxies set operations to a another set,
converting association objects to and from a simplified value.
"""
def __init__(self, lazy_collection, creator, getter, setter):
"""
collection
A callable returning a set-based collection of entities (usually an
object attribute managed by a SQLAlchemy relation())
creator
A function that creates new target entities. Given one parameter:
value. The assertion is assumed:
obj = creator(somevalue)
assert getter(obj) == somevalue
getter
A function. Given an associated object, return the 'value'.
setter
A function. Given an associated object and a value, store
that value on the object.
"""
self.lazy_collection = lazy_collection
self.creator = creator
self.getter = getter
self.setter = setter
col = property(lambda self: self.lazy_collection())
def _create(self, value):
return self.creator(value)
def _get(self, object):
return self.getter(object)
def _set(self, object, value):
return self.setter(object, value)
def __len__(self):
return len(self.col)
def __nonzero__(self):
if self.col:
return True
else:
return False
def __contains__(self, value):
for member in self.col:
if self._get(member) == value:
return True
return False
def __iter__(self):
"""Iterate over proxied values. For the actual domain objects,
iterate over .col instead or just use the underlying collection
directly from its property on the parent."""
for member in self.col:
yield self._get(member)
raise StopIteration
def add(self, value):
if value not in self:
self.col.add(self._create(value))
# for discard and remove, choosing a more expensive check strategy rather
# than call self.creator()
def discard(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
break
def remove(self, value):
for member in self.col:
if self._get(member) == value:
self.col.discard(member)
return
raise KeyError(value)
def pop(self):
if not self.col:
raise KeyError('pop from an empty set')
member = self.col.pop()
return self._get(member)
def update(self, other):
for value in other:
self.add(value)
__ior__ = update
def _set(self):
return util.Set(iter(self))
def union(self, other):
return util.Set(self).union(other)
__or__ = union
def difference(self, other):
return util.Set(self).difference(other)
__sub__ = difference
def difference_update(self, other):
for value in other:
self.discard(value)
__isub__ = difference_update
def intersection(self, other):
return util.Set(self).intersection(other)
__and__ = intersection
def intersection_update(self, other):
want, have = self.intersection(other), util.Set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
__iand__ = intersection_update
def symmetric_difference(self, other):
return util.Set(self).symmetric_difference(other)
__xor__ = symmetric_difference
def symmetric_difference_update(self, other):
want, have = self.symmetric_difference(other), util.Set(self)
remove, add = have - want, want - have
for value in remove:
self.remove(value)
for value in add:
self.add(value)
__ixor__ = symmetric_difference_update
def issubset(self, other):
return util.Set(self).issubset(other)
def issuperset(self, other):
return util.Set(self).issuperset(other)
def clear(self):
self.col.clear()
def copy(self):
return util.Set(self)
def __eq__(self, other): return util.Set(self) == other
def __ne__(self, other): return util.Set(self) != other
def __lt__(self, other): return util.Set(self) < other
def __le__(self, other): return util.Set(self) <= other
def __gt__(self, other): return util.Set(self) > other
def __ge__(self, other): return util.Set(self) >= other
def __repr__(self):
return repr(util.Set(self))
def hash(self):
raise TypeError("%s objects are unhashable" % type(self).__name__)
| bsd-3-clause | 2,350,939,660,483,318,000 | 31.868673 | 87 | 0.587755 | false |
citrix-openstack-build/oslo.messaging | oslo/messaging/rpc/dispatcher.py | 1 | 4776 | # Copyright 2010 United States Government as represented by the
# Administrator of the National Aeronautics and Space Administration.
# All Rights Reserved.
# Copyright 2013 Red Hat, Inc.
# Copyright 2013 New Dream Network, LLC (DreamHost)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
__all__ = [
'NoSuchMethod',
'RPCDispatcher',
'RPCDispatcherError',
'UnsupportedVersion',
]
import logging
from oslo.messaging import _utils as utils
from oslo.messaging import localcontext
from oslo.messaging import serializer as msg_serializer
from oslo.messaging import server as msg_server
from oslo.messaging import target
_LOG = logging.getLogger(__name__)
class RPCDispatcherError(msg_server.MessagingServerError):
"A base class for all RPC dispatcher exceptions."
class NoSuchMethod(RPCDispatcherError, AttributeError):
"Raised if there is no endpoint which exposes the requested method."
def __init__(self, method):
msg = "Endpoint does not support RPC method %s" % method
super(NoSuchMethod, self).__init__(msg)
self.method = method
class UnsupportedVersion(RPCDispatcherError):
"Raised if there is no endpoint which supports the requested version."
def __init__(self, version):
msg = "Endpoint does not support RPC version %s" % version
super(UnsupportedVersion, self).__init__(msg)
self.version = version
class RPCDispatcher(object):
"""A message dispatcher which understands RPC messages.
A MessageHandlingServer is constructed by passing a callable dispatcher
which is invoked with context and message dictionaries each time a message
is received.
RPCDispatcher is one such dispatcher which understands the format of RPC
messages. The dispatcher looks at the namespace, version and method values
in the message and matches those against a list of available endpoints.
Endpoints may have a target attribute describing the namespace and version
of the methods exposed by that object. All public methods on an endpoint
object are remotely invokable by clients.
"""
def __init__(self, endpoints, serializer):
self.endpoints = endpoints
self.serializer = serializer or msg_serializer.NoOpSerializer()
self._default_target = target.Target()
@staticmethod
def _is_namespace(target, namespace):
return namespace == target.namespace
@staticmethod
def _is_compatible(target, version):
endpoint_version = target.version or '1.0'
return utils.version_is_compatible(endpoint_version, version)
def _dispatch(self, endpoint, method, ctxt, args):
ctxt = self.serializer.deserialize_context(ctxt)
new_args = dict()
for argname, arg in args.iteritems():
new_args[argname] = self.serializer.deserialize_entity(ctxt, arg)
result = getattr(endpoint, method)(ctxt, **new_args)
return self.serializer.serialize_entity(ctxt, result)
def __call__(self, ctxt, message):
"""Dispatch an RPC message to the appropriate endpoint method.
:param ctxt: the request context
:type ctxt: dict
:param message: the message payload
:type message: dict
:raises: NoSuchMethod, UnsupportedVersion
"""
method = message.get('method')
args = message.get('args', {})
namespace = message.get('namespace')
version = message.get('version', '1.0')
found_compatible = False
for endpoint in self.endpoints:
target = getattr(endpoint, 'target', None)
if not target:
target = self._default_target
if not (self._is_namespace(target, namespace) and
self._is_compatible(target, version)):
continue
if hasattr(endpoint, method):
localcontext.set_local_context(ctxt)
try:
return self._dispatch(endpoint, method, ctxt, args)
finally:
localcontext.clear_local_context()
found_compatible = True
if found_compatible:
raise NoSuchMethod(method)
else:
raise UnsupportedVersion(version)
| apache-2.0 | 2,670,300,130,520,903,000 | 34.909774 | 78 | 0.675042 | false |
endlessm/chromium-browser | third_party/chromite/api/compile_build_api_proto.py | 1 | 6634 | # -*- coding: utf-8 -*-
# Copyright 2018 The Chromium OS Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Compile the Build API's proto.
Install proto using CIPD to ensure a consistent protoc version.
"""
from __future__ import print_function
import os
import sys
from chromite.lib import commandline
from chromite.lib import constants
from chromite.lib import cros_build_lib
from chromite.lib import cros_logging as logging
from chromite.lib import osutils
assert sys.version_info >= (3, 6), 'This module requires Python 3.6+'
_API_DIR = os.path.join(constants.CHROMITE_DIR, 'api')
_CIPD_ROOT = os.path.join(constants.CHROMITE_DIR, '.cipd_bin')
_PROTOC = os.path.join(_CIPD_ROOT, 'protoc')
_PROTO_DIR = os.path.join(constants.CHROMITE_DIR, 'infra', 'proto')
PROTOC_VERSION = '3.6.1'
class Error(Exception):
"""Base error class for the module."""
class GenerationError(Error):
"""A failure we can't recover from."""
def _InstallProtoc():
"""Install protoc from CIPD."""
logging.info('Installing protoc.')
cmd = ['cipd', 'ensure']
# Clean up the output.
cmd.extend(['-log-level', 'warning'])
# Set the install location.
cmd.extend(['-root', _CIPD_ROOT])
ensure_content = ('infra/tools/protoc/${platform} '
'protobuf_version:v%s' % PROTOC_VERSION)
with osutils.TempDir() as tempdir:
ensure_file = os.path.join(tempdir, 'cipd_ensure_file')
osutils.WriteFile(ensure_file, ensure_content)
cmd.extend(['-ensure-file', ensure_file])
cros_build_lib.run(cmd, cwd=constants.CHROMITE_DIR, print_cmd=False)
def _CleanTargetDirectory(directory):
"""Remove any existing generated files in the directory.
This clean only removes the generated files to avoid accidentally destroying
__init__.py customizations down the line. That will leave otherwise empty
directories in place if things get moved. Neither case is relevant at the
time of writing, but lingering empty directories seemed better than
diagnosing accidental __init__.py changes.
Args:
directory (str): Path to be cleaned up.
"""
logging.info('Cleaning old files.')
for dirpath, _dirnames, filenames in os.walk(directory):
old = [os.path.join(dirpath, f) for f in filenames if f.endswith('_pb2.py')]
# Remove empty init files to clean up otherwise empty directories.
if '__init__.py' in filenames:
init = os.path.join(dirpath, '__init__.py')
if not osutils.ReadFile(init):
old.append(init)
for current in old:
osutils.SafeUnlink(current)
def _GenerateFiles(source, output):
"""Generate the proto files from the |source| tree into |output|.
Args:
source (str): Path to the proto source root directory.
output (str): Path to the output root directory.
"""
logging.info('Generating files.')
targets = []
# Only compile the subset we need for the API.
subdirs = [
os.path.join(source, 'chromite'),
os.path.join(source, 'chromiumos'),
os.path.join(source, 'config'),
os.path.join(source, 'test_platform'),
os.path.join(source, 'device')
]
for basedir in subdirs:
for dirpath, _dirnames, filenames in os.walk(basedir):
for filename in filenames:
if filename.endswith('.proto'):
# We have a match, add the file.
targets.append(os.path.join(dirpath, filename))
cmd = [_PROTOC, '--python_out', output, '--proto_path', source] + targets
result = cros_build_lib.run(
cmd, cwd=source, print_cmd=False, check=False)
if result.returncode:
raise GenerationError('Error compiling the proto. See the output for a '
'message.')
def _InstallMissingInits(directory):
"""Add any __init__.py files not present in the generated protobuf folders."""
logging.info('Adding missing __init__.py files.')
for dirpath, _dirnames, filenames in os.walk(directory):
if '__init__.py' not in filenames:
osutils.Touch(os.path.join(dirpath, '__init__.py'))
def _PostprocessFiles(directory):
"""Do postprocessing on the generated files.
Args:
directory (str): The root directory containing the generated files that are
to be processed.
"""
logging.info('Postprocessing: Fix imports.')
# We are using a negative address here (the /address/! portion of the sed
# command) to make sure we don't change any imports from protobuf itself.
address = '^from google.protobuf'
# Find: 'from x import y_pb2 as x_dot_y_pb2'.
# "\(^google.protobuf[^ ]*\)" matches the module we're importing from.
# - \( and \) are for groups in sed.
# - ^google.protobuf prevents changing the import for protobuf's files.
# - [^ ] = Not a space. The [:space:] character set is too broad, but would
# technically work too.
find = r'^from \([^ ]*\) import \([^ ]*\)_pb2 as \([^ ]*\)$'
# Substitute: 'from chromite.api.gen.x import y_pb2 as x_dot_y_pb2'.
sub = 'from chromite.api.gen.\\1 import \\2_pb2 as \\3'
from_sed = [
'sed', '-i',
'/%(address)s/!s/%(find)s/%(sub)s/g' % {
'address': address,
'find': find,
'sub': sub
}
]
for dirpath, _dirnames, filenames in os.walk(directory):
# Update the
pb2 = [os.path.join(dirpath, f) for f in filenames if f.endswith('_pb2.py')]
if pb2:
cmd = from_sed + pb2
cros_build_lib.run(cmd, print_cmd=False)
def CompileProto(output=None):
"""Compile the Build API protobuf files.
By default this will compile from infra/proto/src to api/gen. The output
directory may be changed, but the imports will always be treated as if it is
in the default location.
Args:
output (str|None): The output directory.
"""
source = os.path.join(_PROTO_DIR, 'src')
output = output or os.path.join(_API_DIR, 'gen')
_InstallProtoc()
_CleanTargetDirectory(output)
_GenerateFiles(source, output)
_InstallMissingInits(output)
_PostprocessFiles(output)
def GetParser():
"""Build the argument parser."""
parser = commandline.ArgumentParser(description=__doc__)
parser.add_argument(
'--destination',
type='path',
help='The directory where the proto should be generated. Defaults to '
'the correct directory for the API.')
return parser
def _ParseArguments(argv):
"""Parse and validate arguments."""
parser = GetParser()
opts = parser.parse_args(argv)
opts.Freeze()
return opts
def main(argv):
opts = _ParseArguments(argv)
try:
CompileProto(output=opts.destination)
except Error as e:
logging.error(e)
return 1
| bsd-3-clause | -7,782,262,003,463,430,000 | 30.14554 | 80 | 0.669279 | false |
nictuku/nwu | nwu/sysinfo/common.py | 1 | 3839 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Copyright (C) 2006 José de Paula Eufrásio Junior ([email protected]) AND
# Yves Junqueira ([email protected])
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
# from http://www.voidspace.org.uk/python/pathutils.html (BSD License)
def formatbytes(sizeint, configdict=None, **configs):
"""
Given a file size as an integer, return a nicely formatted string that
represents the size. Has various options to control it's output.
You can pass in a dictionary of arguments or keyword arguments. Keyword
arguments override the dictionary and there are sensible defaults for options
you don't set.
Options and defaults are as follows :
* ``forcekb = False`` - If set this forces the output to be in terms
of kilobytes and bytes only.
* ``largestonly = True`` - If set, instead of outputting
``1 Mbytes, 307 Kbytes, 478 bytes`` it outputs using only the largest
denominator - e.g. ``1.3 Mbytes`` or ``17.2 Kbytes``
* ``kiloname = 'Kbytes'`` - The string to use for kilobytes
* ``meganame = 'Mbytes'`` - The string to use for Megabytes
* ``bytename = 'bytes'`` - The string to use for bytes
* ``nospace = True`` - If set it outputs ``1Mbytes, 307Kbytes``,
notice there is no space.
Example outputs : ::
19Mbytes, 75Kbytes, 255bytes
2Kbytes, 0bytes
23.8Mbytes
.. note::
It currently uses the plural form even for singular.
"""
defaultconfigs = { 'forcekb' : False,
'largestonly' : True,
'kiloname' : 'Kbytes',
'meganame' : 'Mbytes',
'bytename' : 'bytes',
'nospace' : True}
if configdict is None:
configdict = {}
for entry in configs:
# keyword parameters override the dictionary passed in
configdict[entry] = configs[entry]
#
for keyword in defaultconfigs:
if not configdict.has_key(keyword):
configdict[keyword] = defaultconfigs[keyword]
#
if configdict['nospace']:
space = ''
else:
space = ' '
#
mb, kb, rb = bytedivider(sizeint)
if configdict['largestonly']:
if mb and not configdict['forcekb']:
return stringround(mb, kb)+ space + configdict['meganame']
elif kb or configdict['forcekb']:
if mb and configdict['forcekb']:
kb += 1024*mb
return stringround(kb, rb) + space+ configdict['kiloname']
else:
return str(rb) + space + configdict['bytename']
else:
outstr = ''
if mb and not configdict['forcekb']:
outstr = str(mb) + space + configdict['meganame'] +', '
if kb or configdict['forcekb'] or mb:
if configdict['forcekb']:
kb += 1024*mb
outstr += str(kb) + space + configdict['kiloname'] +', '
return outstr + str(rb) + space + configdict['bytename']
| gpl-3.0 | -5,431,011,525,506,191,000 | 36.990099 | 83 | 0.598645 | false |
damnMeddlingKid/opendoor | src/App.py | 1 | 1523 | from flask import Flask, request, render_template, Response
from geojson import Feature, Point, FeatureCollection
import Test
"""Simple flask app to display listings on a map
The dwelling type is hardcoded by the coordinates are passed in from the frontend.
"""
app = Flask(__name__, template_folder="../templates", static_folder="../static")
@app.route("/get_similar", methods=['GET'])
def get_similar():
"""API endpoint to search for similar houses to a given location
:param lat,lon: Point on the map to search from of the search query
:return: GeoJSON encoded collection of locations near the query Point
"""
try:
lat = float(request.args.get('lat', ''))
lon = float(request.args.get('lon', ''))
except:
print "error"
house = Test.generate_datum()
house['lat'] = lat
house['lon'] = lon
house['dwelling_type'] = 'single-family'
houses = house.get_similar(10)
geo_houses = []
for i in range(0,10):
house = houses.iloc[i]
feature = Feature(geometry=Point((house['lon'],house['lat'])))
feature['dwelling_type'] = house['dwelling_type']
feature['pool'] = house['pool']
feature['list_price'] = house['list_price']
geo_houses.append(feature)
return Response(str(FeatureCollection(geo_houses)), mimetype="application/json")
@app.route("/")
def index():
return render_template("index.html")
if __name__ == "__main__":
Test.generate_test_set()
app.run(debug=True, host='0.0.0.0')
| mit | 3,494,228,461,216,091,000 | 31.404255 | 84 | 0.645437 | false |
jiangzhonglian/MachineLearning | src/py3.x/ml/11.Apriori/apriori.py | 1 | 16811 | #!/usr/bin/python
# coding: utf8
'''
Created on Mar 24, 2011
Update on 2017-05-18
Ch 11 code
Author: Peter/片刻
GitHub: https://github.com/apachecn/AiLearning'''
print(__doc__)
from numpy import *
# 加载数据集
def loadDataSet():
return [[1, 3, 4], [2, 3, 5], [1, 2, 3, 5], [2, 5]]
# 创建集合 C1。即对 dataSet 进行去重,排序,放入 list 中,然后转换所有的元素为 frozenset
def createC1(dataSet):
"""createC1(创建集合 C1)
Args:
dataSet 原始数据集
Returns:
frozenset 返回一个 frozenset 格式的 list
"""
C1 = []
for transaction in dataSet:
for item in transaction:
if not [item] in C1:
# 遍历所有的元素,如果不在 C1 出现过,那么就 append
C1.append([item])
# 对数组进行 `从小到大` 的排序
# print 'sort 前=', C1
C1.sort()
# frozenset 表示冻结的 set 集合,元素无改变;可以把它当字典的 key 来使用
# print 'sort 后=', C1
# print 'frozenset=', map(frozenset, C1)
return map(frozenset, C1)
# 计算候选数据集 CK 在数据集 D 中的支持度,并返回支持度大于最小支持度(minSupport)的数据
def scanD(D, Ck, minSupport):
"""scanD(计算候选数据集 CK 在数据集 D 中的支持度,并返回支持度大于最小支持度 minSupport 的数据)
Args:
D 数据集
Ck 候选项集列表
minSupport 最小支持度
Returns:
retList 支持度大于 minSupport 的集合
supportData 候选项集支持度数据
"""
# ssCnt 临时存放选数据集 Ck 的频率. 例如: a->10, b->5, c->8
ssCnt = {}
for tid in D:
for can in Ck:
# s.issubset(t) 测试是否 s 中的每一个元素都在 t 中
if can.issubset(tid):
if not ssCnt.has_key(can):
ssCnt[can] = 1
else:
ssCnt[can] += 1
numItems = float(len(D)) # 数据集 D 的数量
retList = []
supportData = {}
for key in ssCnt:
# 支持度 = 候选项(key)出现的次数 / 所有数据集的数量
support = ssCnt[key]/numItems
if support >= minSupport:
# 在 retList 的首位插入元素,只存储支持度满足频繁项集的值
retList.insert(0, key)
# 存储所有的候选项(key)和对应的支持度(support)
supportData[key] = support
return retList, supportData
# 输入频繁项集列表 Lk 与返回的元素个数 k,然后输出所有可能的候选项集 Ck
def aprioriGen(Lk, k):
"""aprioriGen(输入频繁项集列表 Lk 与返回的元素个数 k,然后输出候选项集 Ck。
例如: 以 {0},{1},{2} 为输入且 k = 2 则输出 {0,1}, {0,2}, {1,2}. 以 {0,1},{0,2},{1,2} 为输入且 k = 3 则输出 {0,1,2}
仅需要计算一次,不需要将所有的结果计算出来,然后进行去重操作
这是一个更高效的算法)
Args:
Lk 频繁项集列表
k 返回的项集元素个数(若元素的前 k-2 相同,就进行合并)
Returns:
retList 元素两两合并的数据集
"""
retList = []
lenLk = len(Lk)
for i in range(lenLk):
for j in range(i+1, lenLk):
L1 = list(Lk[i])[: k-2]
L2 = list(Lk[j])[: k-2]
# print '-----i=', i, k-2, Lk, Lk[i], list(Lk[i])[: k-2]
# print '-----j=', j, k-2, Lk, Lk[j], list(Lk[j])[: k-2]
L1.sort()
L2.sort()
# 第一次 L1,L2 为空,元素直接进行合并,返回元素两两合并的数据集
# if first k-2 elements are equal
if L1 == L2:
# set union
# print 'union=', Lk[i] | Lk[j], Lk[i], Lk[j]
retList.append(Lk[i] | Lk[j])
return retList
# 找出数据集 dataSet 中支持度 >= 最小支持度的候选项集以及它们的支持度。即我们的频繁项集。
def apriori(dataSet, minSupport=0.5):
"""apriori(首先构建集合 C1,然后扫描数据集来判断这些只有一个元素的项集是否满足最小支持度的要求。那么满足最小支持度要求的项集构成集合 L1。然后 L1 中的元素相互组合成 C2,C2 再进一步过滤变成 L2,然后以此类推,知道 CN 的长度为 0 时结束,即可找出所有频繁项集的支持度。)
Args:
dataSet 原始数据集
minSupport 支持度的阈值
Returns:
L 频繁项集的全集
supportData 所有元素和支持度的全集
"""
# C1 即对 dataSet 进行去重,排序,放入 list 中,然后转换所有的元素为 frozenset
C1 = createC1(dataSet)
# print 'C1: ', C1
# 对每一行进行 set 转换,然后存放到集合中
D = map(set, dataSet)
# print 'D=', D
# 计算候选数据集 C1 在数据集 D 中的支持度,并返回支持度大于 minSupport 的数据
L1, supportData = scanD(D, C1, minSupport)
# print "L1=", L1, "\n", "outcome: ", supportData
# L 加了一层 list, L 一共 2 层 list
L = [L1]
k = 2
# 判断 L 的第 k-2 项的数据长度是否 > 0。第一次执行时 L 为 [[frozenset([1]), frozenset([3]), frozenset([2]), frozenset([5])]]。L[k-2]=L[0]=[frozenset([1]), frozenset([3]), frozenset([2]), frozenset([5])],最后面 k += 1
while (len(L[k-2]) > 0):
# print 'k=', k, L, L[k-2]
Ck = aprioriGen(L[k-2], k) # 例如: 以 {0},{1},{2} 为输入且 k = 2 则输出 {0,1}, {0,2}, {1,2}. 以 {0,1},{0,2},{1,2} 为输入且 k = 3 则输出 {0,1,2}
# print 'Ck', Ck
Lk, supK = scanD(D, Ck, minSupport) # 计算候选数据集 CK 在数据集 D 中的支持度,并返回支持度大于 minSupport 的数据
# 保存所有候选项集的支持度,如果字典没有,就追加元素,如果有,就更新元素
supportData.update(supK)
if len(Lk) == 0:
break
# Lk 表示满足频繁子项的集合,L 元素在增加,例如:
# l=[[set(1), set(2), set(3)]]
# l=[[set(1), set(2), set(3)], [set(1, 2), set(2, 3)]]
L.append(Lk)
k += 1
# print 'k=', k, len(L[k-2])
return L, supportData
# 计算可信度(confidence)
def calcConf(freqSet, H, supportData, brl, minConf=0.7):
"""calcConf(对两个元素的频繁项,计算可信度,例如: {1,2}/{1} 或者 {1,2}/{2} 看是否满足条件)
Args:
freqSet 频繁项集中的元素,例如: frozenset([1, 3])
H 频繁项集中的元素的集合,例如: [frozenset([1]), frozenset([3])]
supportData 所有元素的支持度的字典
brl 关联规则列表的空数组
minConf 最小可信度
Returns:
prunedH 记录 可信度大于阈值的集合
"""
# 记录可信度大于最小可信度(minConf)的集合
prunedH = []
for conseq in H: # 假设 freqSet = frozenset([1, 3]), H = [frozenset([1]), frozenset([3])],那么现在需要求出 frozenset([1]) -> frozenset([3]) 的可信度和 frozenset([3]) -> frozenset([1]) 的可信度
# print 'confData=', freqSet, H, conseq, freqSet-conseq
conf = supportData[freqSet]/supportData[freqSet-conseq] # 支持度定义: a -> b = support(a | b) / support(a). 假设 freqSet = frozenset([1, 3]), conseq = [frozenset([1])],那么 frozenset([1]) 至 frozenset([3]) 的可信度为 = support(a | b) / support(a) = supportData[freqSet]/supportData[freqSet-conseq] = supportData[frozenset([1, 3])] / supportData[frozenset([1])]
if conf >= minConf:
# 只要买了 freqSet-conseq 集合,一定会买 conseq 集合(freqSet-conseq 集合和 conseq集合 是全集)
print (freqSet-conseq, '-->', conseq, 'conf:', conf)
brl.append((freqSet-conseq, conseq, conf))
prunedH.append(conseq)
return prunedH
# 递归计算频繁项集的规则
def rulesFromConseq(freqSet, H, supportData, brl, minConf=0.7):
"""rulesFromConseq
Args:
freqSet 频繁项集中的元素,例如: frozenset([2, 3, 5])
H 频繁项集中的元素的集合,例如: [frozenset([2]), frozenset([3]), frozenset([5])]
supportData 所有元素的支持度的字典
brl 关联规则列表的数组
minConf 最小可信度
"""
# H[0] 是 freqSet 的元素组合的第一个元素,并且 H 中所有元素的长度都一样,长度由 aprioriGen(H, m+1) 这里的 m + 1 来控制
# 该函数递归时,H[0] 的长度从 1 开始增长 1 2 3 ...
# 假设 freqSet = frozenset([2, 3, 5]), H = [frozenset([2]), frozenset([3]), frozenset([5])]
# 那么 m = len(H[0]) 的递归的值依次为 1 2
# 在 m = 2 时, 跳出该递归。假设再递归一次,那么 H[0] = frozenset([2, 3, 5]),freqSet = frozenset([2, 3, 5]) ,没必要再计算 freqSet 与 H[0] 的关联规则了。
m = len(H[0])
if (len(freqSet) > (m + 1)):
# print 'freqSet******************', len(freqSet), m + 1, freqSet, H, H[0]
# 生成 m+1 个长度的所有可能的 H 中的组合,假设 H = [frozenset([2]), frozenset([3]), frozenset([5])]
# 第一次递归调用时生成 [frozenset([2, 3]), frozenset([2, 5]), frozenset([3, 5])]
# 第二次 。。。没有第二次,递归条件判断时已经退出了
Hmp1 = aprioriGen(H, m+1)
# 返回可信度大于最小可信度的集合
Hmp1 = calcConf(freqSet, Hmp1, supportData, brl, minConf)
print ('Hmp1=', Hmp1)
print ('len(Hmp1)=', len(Hmp1), 'len(freqSet)=', len(freqSet))
# 计算可信度后,还有数据大于最小可信度的话,那么继续递归调用,否则跳出递归
if (len(Hmp1) > 1):
# print '----------------------', Hmp1
# print len(freqSet), len(Hmp1[0]) + 1
rulesFromConseq(freqSet, Hmp1, supportData, brl, minConf)
# 生成关联规则
def generateRules(L, supportData, minConf=0.7):
"""generateRules
Args:
L 频繁项集列表
supportData 频繁项集支持度的字典
minConf 最小置信度
Returns:
bigRuleList 可信度规则列表(关于 (A->B+置信度) 3个字段的组合)
"""
bigRuleList = []
# 假设 L = [[frozenset([1]), frozenset([3]), frozenset([2]), frozenset([5])], [frozenset([1, 3]), frozenset([2, 5]), frozenset([2, 3]), frozenset([3, 5])], [frozenset([2, 3, 5])]]
for i in range(1, len(L)):
# 获取频繁项集中每个组合的所有元素
for freqSet in L[i]:
# 假设:freqSet= frozenset([1, 3]), H1=[frozenset([1]), frozenset([3])]
# 组合总的元素并遍历子元素,并转化为 frozenset 集合,再存放到 list 列表中
H1 = [frozenset([item]) for item in freqSet]
# 2 个的组合,走 else, 2 个以上的组合,走 if
if (i > 1):
rulesFromConseq(freqSet, H1, supportData, bigRuleList, minConf)
else:
calcConf(freqSet, H1, supportData, bigRuleList, minConf)
return bigRuleList
def getActionIds():
from time import sleep
from votesmart import votesmart
# votesmart.apikey = 'get your api key first'
votesmart.apikey = 'a7fa40adec6f4a77178799fae4441030'
actionIdList = []
billTitleList = []
fr = open('data/11.Apriori/recent20bills.txt')
for line in fr.readlines():
billNum = int(line.split('\t')[0])
try:
billDetail = votesmart.votes.getBill(billNum) # api call
for action in billDetail.actions:
if action.level == 'House' and (action.stage == 'Passage' or action.stage == 'Amendment Vote'):
actionId = int(action.actionId)
print ('bill: %d has actionId: %d' % (billNum, actionId))
actionIdList.append(actionId)
billTitleList.append(line.strip().split('\t')[1])
except:
print ("problem getting bill %d" % billNum)
sleep(1) # delay to be polite
return actionIdList, billTitleList
def getTransList(actionIdList, billTitleList): #this will return a list of lists containing ints
itemMeaning = ['Republican', 'Democratic']#list of what each item stands for
for billTitle in billTitleList:#fill up itemMeaning list
itemMeaning.append('%s -- Nay' % billTitle)
itemMeaning.append('%s -- Yea' % billTitle)
transDict = {}#list of items in each transaction (politician)
voteCount = 2
for actionId in actionIdList:
sleep(3)
print ('getting votes for actionId: %d' % actionId)
try:
voteList = votesmart.votes.getBillActionVotes(actionId)
for vote in voteList:
if not transDict.has_key(vote.candidateName):
transDict[vote.candidateName] = []
if vote.officeParties == 'Democratic':
transDict[vote.candidateName].append(1)
elif vote.officeParties == 'Republican':
transDict[vote.candidateName].append(0)
if vote.action == 'Nay':
transDict[vote.candidateName].append(voteCount)
elif vote.action == 'Yea':
transDict[vote.candidateName].append(voteCount + 1)
except:
print ("problem getting actionId: %d" % actionId)
voteCount += 2
return transDict, itemMeaning
# 暂时没用上
# def pntRules(ruleList, itemMeaning):
# for ruleTup in ruleList:
# for item in ruleTup[0]:
# print itemMeaning[item]
# print " -------->"
# for item in ruleTup[1]:
# print itemMeaning[item]
# print "confidence: %f" % ruleTup[2]
# print #print a blank line
def testApriori():
# 加载测试数据集
dataSet = loadDataSet()
print ('dataSet: ', dataSet)
# Apriori 算法生成频繁项集以及它们的支持度
L1, supportData1 = apriori(dataSet, minSupport=0.7)
print ('L(0.7): ', L1)
print ('supportData(0.7): ', supportData1)
print ('->->->->->->->->->->->->->->->->->->->->->->->->->->->->')
# Apriori 算法生成频繁项集以及它们的支持度
L2, supportData2 = apriori(dataSet, minSupport=0.5)
print ('L(0.5): ', L2)
print ('supportData(0.5): ', supportData2)
def testGenerateRules():
# 加载测试数据集
dataSet = loadDataSet()
print ('dataSet: ', dataSet)
# Apriori 算法生成频繁项集以及它们的支持度
L1, supportData1 = apriori(dataSet, minSupport=0.5)
print ('L(0.7): ', L1)
print ('supportData(0.7): ', supportData1)
# 生成关联规则
rules = generateRules(L1, supportData1, minConf=0.5)
print ('rules: ', rules)
def main():
# 测试 Apriori 算法
# testApriori()
# 生成关联规则
# testGenerateRules()
##项目案例
# # 构建美国国会投票记录的事务数据集
# actionIdList, billTitleList = getActionIds()
# # 测试前2个
# transDict, itemMeaning = getTransList(actionIdList[: 2], billTitleList[: 2])
#transDict 表示 action_id的集合,transDict[key]这个就是action_id对应的选项,例如 [1, 2, 3]
# transDict, itemMeaning = getTransList(actionIdList, billTitleList)
# # 得到全集的数据
# dataSet = [transDict[key] for key in transDict.keys()]
# L, supportData = apriori(dataSet, minSupport=0.3)
# rules = generateRules(L, supportData, minConf=0.95)
# print (rules)
# # 项目案例
# # 发现毒蘑菇的相似特性
# # 得到全集的数据
dataSet = [line.split() for line in open("data/11.Apriori/mushroom.dat").readlines()]
L, supportData = apriori(dataSet, minSupport=0.3)
# # 2表示毒蘑菇,1表示可食用的蘑菇
# # 找出关于2的频繁子项出来,就知道如果是毒蘑菇,那么出现频繁的也可能是毒蘑菇
for item in L[1]:
if item.intersection('2'):
print (item)
for item in L[2]:
if item.intersection('2'):
print (item)
if __name__ == "__main__":
main()
| gpl-3.0 | 7,020,035,053,039,240,000 | 34.756098 | 354 | 0.55209 | false |
orchidinfosys/odoo | addons/project/project.py | 1 | 53749 | # -*- coding: utf-8 -*-
# Part of Odoo. See LICENSE file for full copyright and licensing details.
from datetime import datetime, date
from lxml import etree
import time
from openerp import api
from openerp import SUPERUSER_ID
from openerp import tools
from openerp.osv import fields, osv
from openerp.tools.safe_eval import safe_eval as eval
from openerp.tools.translate import _
from openerp.exceptions import UserError
class project_task_type(osv.osv):
_name = 'project.task.type'
_description = 'Task Stage'
_order = 'sequence'
def _get_mail_template_id_domain(self):
return [('model', '=', 'project.task')]
_columns = {
'name': fields.char('Stage Name', required=True, translate=True),
'description': fields.text('Description', translate=True),
'sequence': fields.integer('Sequence'),
'project_ids': fields.many2many('project.project', 'project_task_type_rel', 'type_id', 'project_id', 'Projects'),
'legend_priority': fields.char(
'Priority Management Explanation', translate=True,
help='Explanation text to help users using the star and priority mechanism on stages or issues that are in this stage.'),
'legend_blocked': fields.char(
'Kanban Blocked Explanation', translate=True,
help='Override the default value displayed for the blocked state for kanban selection, when the task or issue is in that stage.'),
'legend_done': fields.char(
'Kanban Valid Explanation', translate=True,
help='Override the default value displayed for the done state for kanban selection, when the task or issue is in that stage.'),
'legend_normal': fields.char(
'Kanban Ongoing Explanation', translate=True,
help='Override the default value displayed for the normal state for kanban selection, when the task or issue is in that stage.'),
'mail_template_id': fields.many2one(
'mail.template',
string='Email Template',
domain=lambda self: self._get_mail_template_id_domain(),
help="If set an email will be sent to the customer when the task or issue reaches this step."),
'fold': fields.boolean('Folded in Tasks Pipeline',
help='This stage is folded in the kanban view when '
'there are no records in that stage to display.'),
}
def _get_default_project_ids(self, cr, uid, ctx=None):
if ctx is None:
ctx = {}
default_project_id = ctx.get('default_project_id')
return [default_project_id] if default_project_id else None
_defaults = {
'sequence': 1,
'project_ids': _get_default_project_ids,
}
class project(osv.osv):
_name = "project.project"
_description = "Project"
_inherit = ['mail.alias.mixin', 'mail.thread', 'ir.needaction_mixin']
_inherits = {'account.analytic.account': "analytic_account_id"}
_period_number = 5
def get_alias_model_name(self, vals):
return vals.get('alias_model', 'project.task')
def get_alias_values(self):
values = super(project, self).get_alias_values()
values['alias_defaults'] = {'project_id': self.id}
return values
def onchange_partner_id(self, cr, uid, ids, part=False, context=None):
partner_obj = self.pool.get('res.partner')
val = {}
if not part:
return {'value': val}
if 'pricelist_id' in self.fields_get(cr, uid, context=context):
pricelist = partner_obj.read(cr, uid, part, ['property_product_pricelist'], context=context)
pricelist_id = pricelist.get('property_product_pricelist', False) and pricelist.get('property_product_pricelist')[0] or False
val['pricelist_id'] = pricelist_id
return {'value': val}
def unlink(self, cr, uid, ids, context=None):
analytic_account_to_delete = set()
for proj in self.browse(cr, uid, ids, context=context):
if proj.tasks:
raise UserError(_('You cannot delete a project containing tasks. You can either delete all the project\'s tasks and then delete the project or simply deactivate the project.'))
if proj.analytic_account_id and not proj.analytic_account_id.line_ids:
analytic_account_to_delete.add(proj.analytic_account_id.id)
res = super(project, self).unlink(cr, uid, ids, context=context)
self.pool['account.analytic.account'].unlink(cr, uid, list(analytic_account_to_delete), context=context)
return res
def _get_attached_docs(self, cr, uid, ids, field_name, arg, context):
res = {}
attachment = self.pool.get('ir.attachment')
task = self.pool.get('project.task')
for id in ids:
project_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.project'), ('res_id', '=', id)], context=context, count=True)
task_ids = task.search(cr, uid, [('project_id', '=', id)], context=context)
task_attachments = attachment.search(cr, uid, [('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)], context=context, count=True)
res[id] = (project_attachments or 0) + (task_attachments or 0)
return res
def _task_count(self, cr, uid, ids, field_name, arg, context=None):
if context is None:
context = {}
res={}
for project in self.browse(cr, uid, ids, context=context):
res[project.id] = len(project.task_ids)
return res
def _task_needaction_count(self, cr, uid, ids, field_name, arg, context=None):
Task = self.pool['project.task']
res = dict.fromkeys(ids, 0)
projects = Task.read_group(cr, uid, [('project_id', 'in', ids), ('message_needaction', '=', True)], ['project_id'], ['project_id'], context=context)
res.update({project['project_id'][0]: int(project['project_id_count']) for project in projects})
return res
def _get_alias_models(self, cr, uid, context=None):
""" Overriden in project_issue to offer more options """
return [('project.task', "Tasks")]
def _get_visibility_selection(self, cr, uid, context=None):
""" Overriden in portal_project to offer more options """
return [('portal', _('Customer Project: visible in portal if the customer is a follower')),
('employees', _('All Employees Project: all employees can access')),
('followers', _('Private Project: followers only'))]
def attachment_tree_view(self, cr, uid, ids, context):
task_ids = self.pool.get('project.task').search(cr, uid, [('project_id', 'in', ids)])
domain = [
'|',
'&', ('res_model', '=', 'project.project'), ('res_id', 'in', ids),
'&', ('res_model', '=', 'project.task'), ('res_id', 'in', task_ids)]
res_id = ids and ids[0] or False
return {
'name': _('Attachments'),
'domain': domain,
'res_model': 'ir.attachment',
'type': 'ir.actions.act_window',
'view_id': False,
'view_mode': 'kanban,tree,form',
'view_type': 'form',
'help': _('''<p class="oe_view_nocontent_create">
Documents are attached to the tasks and issues of your project.</p><p>
Send messages or log internal notes with attachments to link
documents to your project.
</p>'''),
'limit': 80,
'context': "{'default_res_model': '%s','default_res_id': %d}" % (self._name, res_id)
}
# Lambda indirection method to avoid passing a copy of the overridable method when declaring the field
_alias_models = lambda self, *args, **kwargs: self._get_alias_models(*args, **kwargs)
_visibility_selection = lambda self, *args, **kwargs: self._get_visibility_selection(*args, **kwargs)
_columns = {
'active': fields.boolean('Active', help="If the active field is set to False, it will allow you to hide the project without removing it."),
'sequence': fields.integer('Sequence', help="Gives the sequence order when displaying a list of Projects."),
'analytic_account_id': fields.many2one(
'account.analytic.account', 'Contract/Analytic',
help="Link this project to an analytic account if you need financial management on projects. "
"It enables you to connect projects with budgets, planning, cost and revenue analysis, timesheets on projects, etc.",
ondelete="cascade", required=True, auto_join=True),
'label_tasks': fields.char('Use Tasks as', help="Gives label to tasks on project's kanban view."),
'tasks': fields.one2many('project.task', 'project_id', "Task Activities"),
'resource_calendar_id': fields.many2one('resource.calendar', 'Working Time', help="Timetable working hours to adjust the gantt diagram report", states={'close':[('readonly',True)]} ),
'type_ids': fields.many2many('project.task.type', 'project_task_type_rel', 'project_id', 'type_id', 'Tasks Stages', states={'close':[('readonly',True)], 'cancelled':[('readonly',True)]}),
'task_count': fields.function(_task_count, type='integer', string="Tasks",),
'task_needaction_count': fields.function(_task_needaction_count, type='integer', string="Tasks",),
'task_ids': fields.one2many('project.task', 'project_id',
domain=['|', ('stage_id.fold', '=', False), ('stage_id', '=', False)]),
'color': fields.integer('Color Index'),
'user_id': fields.many2one('res.users', 'Project Manager'),
'alias_id': fields.many2one('mail.alias', 'Alias', ondelete="restrict", required=True,
help="Internal email associated with this project. Incoming emails are automatically synchronized "
"with Tasks (or optionally Issues if the Issue Tracker module is installed)."),
'alias_model': fields.selection(_alias_models, "Alias Model", select=True, required=True,
help="The kind of document created when an email is received on this project's email alias"),
'privacy_visibility': fields.selection(_visibility_selection, 'Privacy / Visibility', required=True,
help="Holds visibility of the tasks or issues that belong to the current project:\n"
"- Portal : employees see everything;\n"
" if portal is activated, portal users see the tasks or issues followed by\n"
" them or by someone of their company\n"
"- Employees Only: employees see all tasks or issues\n"
"- Followers Only: employees see only the followed tasks or issues; if portal\n"
" is activated, portal users see the followed tasks or issues."),
'state': fields.selection([('draft','New'),
('open','In Progress'),
('cancelled', 'Cancelled'),
('pending','Pending'),
('close','Closed')],
'Status', required=True, copy=False),
'doc_count': fields.function(
_get_attached_docs, string="Number of documents attached", type='integer'
),
'date_start': fields.date('Start Date'),
'date': fields.date('Expiration Date', select=True, track_visibility='onchange'),
}
_order = "sequence, name, id"
_defaults = {
'active': True,
'type': 'contract',
'label_tasks': 'Tasks',
'state': 'open',
'sequence': 10,
'user_id': lambda self,cr,uid,ctx: uid,
'alias_model': 'project.task',
'privacy_visibility': 'employees',
}
# TODO: Why not using a SQL contraints ?
def _check_dates(self, cr, uid, ids, context=None):
for leave in self.read(cr, uid, ids, ['date_start', 'date'], context=context):
if leave['date_start'] and leave['date']:
if leave['date_start'] > leave['date']:
return False
return True
_constraints = [
(_check_dates, 'Error! project start-date must be lower than project end-date.', ['date_start', 'date'])
]
def set_template(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=False, context=context)
def reset_project(self, cr, uid, ids, context=None):
return self.setActive(cr, uid, ids, value=True, context=context)
def map_tasks(self, cr, uid, old_project_id, new_project_id, context=None):
""" copy and map tasks from old to new project """
if context is None:
context = {}
map_task_id = {}
task_obj = self.pool.get('project.task')
proj = self.browse(cr, uid, old_project_id, context=context)
for task in proj.tasks:
# preserve task name and stage, normally altered during copy
defaults = {'stage_id': task.stage_id.id,
'name': task.name}
map_task_id[task.id] = task_obj.copy(cr, uid, task.id, defaults, context=context)
self.write(cr, uid, [new_project_id], {'tasks':[(6,0, map_task_id.values())]})
task_obj.duplicate_task(cr, uid, map_task_id, context=context)
return True
def copy(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
context = dict(context or {})
context['active_test'] = False
proj = self.browse(cr, uid, id, context=context)
if not default.get('name'):
default.update(name=_("%s (copy)") % (proj.name))
res = super(project, self).copy(cr, uid, id, default, context)
self.map_tasks(cr, uid, id, res, context=context)
return res
def duplicate_template(self, cr, uid, ids, context=None):
context = dict(context or {})
data_obj = self.pool.get('ir.model.data')
result = []
for proj in self.browse(cr, uid, ids, context=context):
context.update({'analytic_project_copy': True})
new_date_start = time.strftime('%Y-%m-%d')
new_date_end = False
if proj.date_start and proj.date:
start_date = date(*time.strptime(proj.date_start,'%Y-%m-%d')[:3])
end_date = date(*time.strptime(proj.date,'%Y-%m-%d')[:3])
new_date_end = (datetime(*time.strptime(new_date_start,'%Y-%m-%d')[:3])+(end_date-start_date)).strftime('%Y-%m-%d')
context.update({'copy':True})
new_id = self.copy(cr, uid, proj.id, default = {
'name':_("%s (copy)") % (proj.name),
'state':'open',
'date_start':new_date_start,
'date':new_date_end}, context=context)
result.append(new_id)
if result and len(result):
res_id = result[0]
form_view_id = data_obj._get_id(cr, uid, 'project', 'edit_project')
form_view = data_obj.read(cr, uid, form_view_id, ['res_id'])
tree_view_id = data_obj._get_id(cr, uid, 'project', 'view_project')
tree_view = data_obj.read(cr, uid, tree_view_id, ['res_id'])
search_view_id = data_obj._get_id(cr, uid, 'project', 'view_project_project_filter')
search_view = data_obj.read(cr, uid, search_view_id, ['res_id'])
return {
'name': _('Projects'),
'view_type': 'form',
'view_mode': 'form,tree',
'res_model': 'project.project',
'view_id': False,
'res_id': res_id,
'views': [(form_view['res_id'],'form'),(tree_view['res_id'],'tree')],
'type': 'ir.actions.act_window',
'search_view_id': search_view['res_id'],
}
@api.multi
def setActive(self, value=True):
""" Set a project as active/inactive, and its tasks as well. """
self.write({'active': value})
def create(self, cr, uid, vals, context=None):
ir_values = self.pool.get('ir.values').get_default(cr, uid, 'project.config.settings', 'generate_project_alias')
if ir_values:
vals['alias_name'] = vals.get('alias_name') or vals.get('name')
# Prevent double project creation when 'use_tasks' is checked
create_context = dict(context or {},
project_creation_in_progress=True,
mail_create_nosubscribe=True)
return super(project, self).create(cr, uid, vals, context=create_context)
def write(self, cr, uid, ids, vals, context=None):
# if alias_model has been changed, update alias_model_id accordingly
if vals.get('alias_model'):
model_ids = self.pool.get('ir.model').search(cr, uid, [('model', '=', vals.get('alias_model', 'project.task'))])
vals.update(alias_model_id=model_ids[0])
res = super(project, self).write(cr, uid, ids, vals, context=context)
if 'active' in vals:
# archiving/unarchiving a project does it on its tasks, too
projects = self.browse(cr, uid, ids, context)
tasks = projects.with_context(active_test=False).mapped('tasks')
tasks.write({'active': vals['active']})
return res
class task(osv.osv):
_name = "project.task"
_description = "Task"
_date_name = "date_start"
_inherit = ['mail.thread', 'ir.needaction_mixin']
_mail_post_access = 'read'
def _get_default_partner(self, cr, uid, context=None):
if context is None:
context = {}
if 'default_project_id' in context:
project = self.pool.get('project.project').browse(cr, uid, context['default_project_id'], context=context)
if project and project.partner_id:
return project.partner_id.id
return False
def _get_default_stage_id(self, cr, uid, context=None):
""" Gives default stage_id """
if context is None:
context = {}
return self.stage_find(cr, uid, [], context.get('default_project_id'), [('fold', '=', False)], context=context)
def _read_group_stage_ids(self, cr, uid, ids, domain, read_group_order=None, access_rights_uid=None, context=None):
if context is None:
context = {}
stage_obj = self.pool.get('project.task.type')
order = stage_obj._order
access_rights_uid = access_rights_uid or uid
if read_group_order == 'stage_id desc':
order = '%s desc' % order
if 'default_project_id' in context:
search_domain = ['|', ('project_ids', '=', context['default_project_id']), ('id', 'in', ids)]
else:
search_domain = [('id', 'in', ids)]
stage_ids = stage_obj._search(cr, uid, search_domain, order=order, access_rights_uid=access_rights_uid, context=context)
result = stage_obj.name_get(cr, access_rights_uid, stage_ids, context=context)
# restore order of the search
result.sort(lambda x, y: cmp(stage_ids.index(x[0]), stage_ids.index(y[0])))
fold = {}
for stage in stage_obj.browse(cr, access_rights_uid, stage_ids, context=context):
fold[stage.id] = stage.fold or False
return result, fold
_group_by_full = {
'stage_id': _read_group_stage_ids,
}
def onchange_remaining(self, cr, uid, ids, remaining=0.0, planned=0.0):
if remaining and not planned:
return {'value': {'planned_hours': remaining}}
return {}
def onchange_planned(self, cr, uid, ids, planned=0.0, effective=0.0):
return {'value': {'remaining_hours': planned - effective}}
def onchange_project(self, cr, uid, id, project_id, context=None):
if project_id:
project = self.pool.get('project.project').browse(cr, uid, project_id, context=context)
if project and project.partner_id:
return {'value': {'partner_id': project.partner_id.id}}
return {}
def onchange_user_id(self, cr, uid, ids, user_id, context=None):
vals = {}
if user_id:
vals['date_start'] = fields.datetime.now()
return {'value': vals}
def duplicate_task(self, cr, uid, map_ids, context=None):
mapper = lambda t: map_ids.get(t.id, t.id)
for task in self.browse(cr, uid, map_ids.values(), context):
new_child_ids = set(map(mapper, task.child_ids))
new_parent_ids = set(map(mapper, task.parent_ids))
if new_child_ids or new_parent_ids:
task.write({'parent_ids': [(6,0,list(new_parent_ids))],
'child_ids': [(6,0,list(new_child_ids))]})
def copy_data(self, cr, uid, id, default=None, context=None):
if default is None:
default = {}
current = self.browse(cr, uid, id, context=context)
if not default.get('name'):
default['name'] = _("%s (copy)") % current.name
if 'remaining_hours' not in default:
default['remaining_hours'] = current.planned_hours
return super(task, self).copy_data(cr, uid, id, default, context)
_columns = {
'active': fields.boolean('Active'),
'name': fields.char('Task Title', track_visibility='onchange', size=128, required=True, select=True),
'description': fields.html('Description'),
'priority': fields.selection([('0','Normal'), ('1','High')], 'Priority', select=True),
'sequence': fields.integer('Sequence', select=True, help="Gives the sequence order when displaying a list of tasks."),
'stage_id': fields.many2one('project.task.type', 'Stage', track_visibility='onchange', select=True,
domain="[('project_ids', '=', project_id)]", copy=False),
'tag_ids': fields.many2many('project.tags', string='Tags', oldname='categ_ids'),
'kanban_state': fields.selection([('normal', 'In Progress'),('done', 'Ready for next stage'),('blocked', 'Blocked')], 'Kanban State',
track_visibility='onchange',
help="A task's kanban state indicates special situations affecting it:\n"
" * Normal is the default situation\n"
" * Blocked indicates something is preventing the progress of this task\n"
" * Ready for next stage indicates the task is ready to be pulled to the next stage",
required=True, copy=False),
'create_date': fields.datetime('Create Date', readonly=True, select=True),
'write_date': fields.datetime('Last Modification Date', readonly=True, select=True), #not displayed in the view but it might be useful with base_action_rule module (and it needs to be defined first for that)
'date_start': fields.datetime('Starting Date', select=True, copy=False),
'date_end': fields.datetime('Ending Date', select=True, copy=False),
'date_assign': fields.datetime('Assigning Date', select=True, copy=False, readonly=True),
'date_deadline': fields.date('Deadline', select=True, copy=False),
'date_last_stage_update': fields.datetime('Last Stage Update', select=True, copy=False, readonly=True),
'project_id': fields.many2one('project.project', 'Project', ondelete='set null', select=True, track_visibility='onchange', change_default=True),
'parent_ids': fields.many2many('project.task', 'project_task_parent_rel', 'task_id', 'parent_id', 'Parent Tasks'),
'child_ids': fields.many2many('project.task', 'project_task_parent_rel', 'parent_id', 'task_id', 'Delegated Tasks'),
'notes': fields.text('Notes'),
'planned_hours': fields.float('Initially Planned Hours', help='Estimated time to do the task, usually set by the project manager when the task is in draft state.'),
'remaining_hours': fields.float('Remaining Hours', digits=(16,2), help="Total remaining time, can be re-estimated periodically by the assignee of the task."),
'user_id': fields.many2one('res.users', 'Assigned to', select=True, track_visibility='onchange'),
'partner_id': fields.many2one('res.partner', 'Customer'),
'manager_id': fields.related('project_id', 'analytic_account_id', 'user_id', type='many2one', relation='res.users', string='Project Manager'),
'company_id': fields.many2one('res.company', 'Company'),
'id': fields.integer('ID', readonly=True),
'color': fields.integer('Color Index'),
'user_email': fields.related('user_id', 'email', type='char', string='User Email', readonly=True),
'attachment_ids': fields.one2many('ir.attachment', 'res_id', domain=lambda self: [('res_model', '=', self._name)], auto_join=True, string='Attachments'),
# In the domain of displayed_image_id, we couln't use attachment_ids because a one2many is represented as a list of commands so we used res_model & res_id
'displayed_image_id': fields.many2one('ir.attachment', domain="[('res_model', '=', 'project.task'), ('res_id', '=', id), ('mimetype', 'ilike', 'image')]", string='Displayed Image'),
'legend_blocked': fields.related("stage_id", "legend_blocked", type="char", string='Kanban Blocked Explanation'),
'legend_done': fields.related("stage_id", "legend_done", type="char", string='Kanban Valid Explanation'),
'legend_normal': fields.related("stage_id", "legend_normal", type="char", string='Kanban Ongoing Explanation'),
}
_defaults = {
'stage_id': _get_default_stage_id,
'project_id': lambda self, cr, uid, ctx=None: ctx.get('default_project_id') if ctx is not None else False,
'date_last_stage_update': fields.datetime.now,
'kanban_state': 'normal',
'priority': '0',
'sequence': 10,
'active': True,
'user_id': lambda obj, cr, uid, ctx=None: uid,
'company_id': lambda self, cr, uid, ctx=None: self.pool.get('res.company')._company_default_get(cr, uid, 'project.task', context=ctx),
'partner_id': lambda self, cr, uid, ctx=None: self._get_default_partner(cr, uid, context=ctx),
'date_start': fields.datetime.now,
}
_order = "priority desc, sequence, date_start, name, id"
def _check_recursion(self, cr, uid, ids, context=None):
for id in ids:
visited_branch = set()
visited_node = set()
res = self._check_cycle(cr, uid, id, visited_branch, visited_node, context=context)
if not res:
return False
return True
def _check_cycle(self, cr, uid, id, visited_branch, visited_node, context=None):
if id in visited_branch: #Cycle
return False
if id in visited_node: #Already tested don't work one more time for nothing
return True
visited_branch.add(id)
visited_node.add(id)
#visit child using DFS
task = self.browse(cr, uid, id, context=context)
for child in task.child_ids:
res = self._check_cycle(cr, uid, child.id, visited_branch, visited_node, context=context)
if not res:
return False
visited_branch.remove(id)
return True
def _check_dates(self, cr, uid, ids, context=None):
if context == None:
context = {}
obj_task = self.browse(cr, uid, ids[0], context=context)
start = obj_task.date_start or False
end = obj_task.date_end or False
if start and end :
if start > end:
return False
return True
_constraints = [
(_check_recursion, 'Error ! You cannot create recursive tasks.', ['parent_ids']),
(_check_dates, 'Error ! Task starting date must be lower than its ending date.', ['date_start','date_end'])
]
# Override view according to the company definition
def fields_view_get(self, cr, uid, view_id=None, view_type='form', context=None, toolbar=False, submenu=False):
users_obj = self.pool.get('res.users')
if context is None: context = {}
# read uom as admin to avoid access rights issues, e.g. for portal/share users,
# this should be safe (no context passed to avoid side-effects)
obj_tm = users_obj.browse(cr, SUPERUSER_ID, uid, context=context).company_id.project_time_mode_id
tm = obj_tm and obj_tm.name or 'Hours'
res = super(task, self).fields_view_get(cr, uid, view_id=view_id, view_type=view_type, context=context, toolbar=toolbar, submenu=submenu)
# read uom as admin to avoid access rights issues, e.g. for portal/share users,
# this should be safe (no context passed to avoid side-effects)
obj_tm = users_obj.browse(cr, SUPERUSER_ID, uid, context=context).company_id.project_time_mode_id
try:
# using get_object to get translation value
uom_hour = self.pool['ir.model.data'].get_object(cr, uid, 'product', 'product_uom_hour', context=context)
except ValueError:
uom_hour = False
if not obj_tm or not uom_hour or obj_tm.id == uom_hour.id:
return res
eview = etree.fromstring(res['arch'])
# if the project_time_mode_id is not in hours (so in days), display it as a float field
def _check_rec(eview):
if eview.attrib.get('widget','') == 'float_time':
eview.set('widget','float')
for child in eview:
_check_rec(child)
return True
_check_rec(eview)
res['arch'] = etree.tostring(eview)
# replace reference of 'Hours' to 'Day(s)'
for f in res['fields']:
# TODO this NOT work in different language than english
# the field 'Initially Planned Hours' should be replaced by 'Initially Planned Days'
# but string 'Initially Planned Days' is not available in translation
if 'Hours' in res['fields'][f]['string']:
res['fields'][f]['string'] = res['fields'][f]['string'].replace('Hours', obj_tm.name)
return res
def get_empty_list_help(self, cr, uid, help, context=None):
context = dict(context or {})
context['empty_list_help_id'] = context.get('default_project_id')
context['empty_list_help_model'] = 'project.project'
context['empty_list_help_document_name'] = _("tasks")
return super(task, self).get_empty_list_help(cr, uid, help, context=context)
# ----------------------------------------
# Case management
# ----------------------------------------
def stage_find(self, cr, uid, cases, section_id, domain=[], order='sequence', context=None):
""" Override of the base.stage method
Parameter of the stage search taken from the lead:
- section_id: if set, stages must belong to this section or
be a default stage; if not set, stages must be default
stages
"""
if isinstance(cases, (int, long)):
cases = self.browse(cr, uid, cases, context=context)
# collect all section_ids
section_ids = []
if section_id:
section_ids.append(section_id)
for task in cases:
if task.project_id:
section_ids.append(task.project_id.id)
search_domain = []
if section_ids:
search_domain = [('|')] * (len(section_ids) - 1)
for section_id in section_ids:
search_domain.append(('project_ids', '=', section_id))
search_domain += list(domain)
# perform search, return the first found
stage_ids = self.pool.get('project.task.type').search(cr, uid, search_domain, order=order, context=context)
if stage_ids:
return stage_ids[0]
return False
def _check_child_task(self, cr, uid, ids, context=None):
if context == None:
context = {}
tasks = self.browse(cr, uid, ids, context=context)
for task in tasks:
if task.child_ids:
for child in task.child_ids:
if child.stage_id and not child.stage_id.fold:
raise UserError(_("Child task still open.\nPlease cancel or complete child task first."))
return True
def _store_history(self, cr, uid, ids, context=None):
for task in self.browse(cr, uid, ids, context=context):
self.pool.get('project.task.history').create(cr, uid, {
'task_id': task.id,
'remaining_hours': task.remaining_hours,
'planned_hours': task.planned_hours,
'kanban_state': task.kanban_state,
'type_id': task.stage_id.id,
'user_id': task.user_id.id
}, context=context)
return True
# ------------------------------------------------
# CRUD overrides
# ------------------------------------------------
def create(self, cr, uid, vals, context=None):
context = dict(context or {})
# for default stage
if vals.get('project_id') and not context.get('default_project_id'):
context['default_project_id'] = vals.get('project_id')
# user_id change: update date_assign
if vals.get('user_id'):
vals['date_assign'] = fields.datetime.now()
# context: no_log, because subtype already handle this
create_context = dict(context, mail_create_nolog=True)
task_id = super(task, self).create(cr, uid, vals, context=create_context)
self._store_history(cr, uid, [task_id], context=context)
return task_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
# stage change: update date_last_stage_update
if 'stage_id' in vals:
vals['date_last_stage_update'] = fields.datetime.now()
# user_id change: update date_assign
if vals.get('user_id'):
vals['date_assign'] = fields.datetime.now()
# reset kanban state when changing stage
if 'stage_id' in vals:
if 'kanban_state' not in vals:
vals['kanban_state'] = 'normal'
result = super(task, self).write(cr, uid, ids, vals, context=context)
if any(item in vals for item in ['stage_id', 'remaining_hours', 'user_id', 'kanban_state']):
self._store_history(cr, uid, ids, context=context)
return result
def unlink(self, cr, uid, ids, context=None):
if context is None:
context = {}
self._check_child_task(cr, uid, ids, context=context)
res = super(task, self).unlink(cr, uid, ids, context)
return res
def _get_total_hours(self):
return self.remaining_hours
def _generate_task(self, cr, uid, tasks, ident=4, context=None):
context = context or {}
result = ""
ident = ' '*ident
company = self.pool["res.users"].browse(cr, uid, uid, context=context).company_id
duration_uom = {
'day(s)': 'd', 'days': 'd', 'day': 'd', 'd': 'd',
'month(s)': 'm', 'months': 'm', 'month': 'month', 'm': 'm',
'week(s)': 'w', 'weeks': 'w', 'week': 'w', 'w': 'w',
'hour(s)': 'H', 'hours': 'H', 'hour': 'H', 'h': 'H',
}.get(company.project_time_mode_id.name.lower(), "hour(s)")
for task in tasks:
if task.stage_id and task.stage_id.fold:
continue
result += '''
%sdef Task_%s():
%s todo = \"%.2f%s\"
%s effort = \"%.2f%s\"''' % (ident, task.id, ident, task.remaining_hours, duration_uom, ident, task._get_total_hours(), duration_uom)
start = []
for t2 in task.parent_ids:
start.append("up.Task_%s.end" % (t2.id,))
if start:
result += '''
%s start = max(%s)
''' % (ident,','.join(start))
if task.user_id:
result += '''
%s resource = %s
''' % (ident, 'User_'+str(task.user_id.id))
result += "\n"
return result
# ---------------------------------------------------
# Mail gateway
# ---------------------------------------------------
@api.multi
def _track_template(self, tracking):
res = super(task, self)._track_template(tracking)
test_task = self[0]
changes, tracking_value_ids = tracking[test_task.id]
if 'stage_id' in changes and test_task.stage_id.mail_template_id:
res['stage_id'] = (test_task.stage_id.mail_template_id, {'composition_mode': 'mass_mail'})
return res
def _track_subtype(self, cr, uid, ids, init_values, context=None):
record = self.browse(cr, uid, ids[0], context=context)
if 'kanban_state' in init_values and record.kanban_state == 'blocked':
return 'project.mt_task_blocked'
elif 'kanban_state' in init_values and record.kanban_state == 'done':
return 'project.mt_task_ready'
elif 'user_id' in init_values and record.user_id: # assigned -> new
return 'project.mt_task_new'
elif 'stage_id' in init_values and record.stage_id and record.stage_id.sequence <= 1: # start stage -> new
return 'project.mt_task_new'
elif 'stage_id' in init_values:
return 'project.mt_task_stage'
return super(task, self)._track_subtype(cr, uid, ids, init_values, context=context)
def _notification_group_recipients(self, cr, uid, ids, message, recipients, done_ids, group_data, context=None):
""" Override the mail.thread method to handle project users and officers
recipients. Indeed those will have specific action in their notification
emails: creating tasks, assigning it. """
group_project_user = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'project.group_project_user')
for recipient in recipients:
if recipient.id in done_ids:
continue
if recipient.user_ids and group_project_user in recipient.user_ids[0].groups_id.ids:
group_data['group_project_user'] |= recipient
done_ids.add(recipient.id)
return super(task, self)._notification_group_recipients(cr, uid, ids, message, recipients, done_ids, group_data, context=context)
def _notification_get_recipient_groups(self, cr, uid, ids, message, recipients, context=None):
res = super(task, self)._notification_get_recipient_groups(cr, uid, ids, message, recipients, context=context)
take_action = self._notification_link_helper(cr, uid, ids, 'assign', context=context)
new_action_id = self.pool['ir.model.data'].xmlid_to_res_id(cr, uid, 'project.action_view_task')
new_action = self._notification_link_helper(cr, uid, ids, 'new', context=context, action_id=new_action_id)
task_record = self.browse(cr, uid, ids[0], context=context)
actions = []
if not task_record.user_id:
actions.append({'url': take_action, 'title': _('I take it')})
else:
actions.append({'url': new_action, 'title': _('New Task')})
res['group_project_user'] = {
'actions': actions
}
return res
@api.cr_uid_context
def message_get_reply_to(self, cr, uid, ids, default=None, context=None):
""" Override to get the reply_to of the parent project. """
tasks = self.browse(cr, SUPERUSER_ID, ids, context=context)
project_ids = set([task.project_id.id for task in tasks if task.project_id])
aliases = self.pool['project.project'].message_get_reply_to(cr, uid, list(project_ids), default=default, context=context)
return dict((task.id, aliases.get(task.project_id and task.project_id.id or 0, False)) for task in tasks)
def email_split(self, cr, uid, ids, msg, context=None):
email_list = tools.email_split((msg.get('to') or '') + ',' + (msg.get('cc') or ''))
# check left-part is not already an alias
task_ids = self.browse(cr, uid, ids, context=context)
aliases = [task.project_id.alias_name for task in task_ids if task.project_id]
return filter(lambda x: x.split('@')[0] not in aliases, email_list)
def message_new(self, cr, uid, msg, custom_values=None, context=None):
""" Override to updates the document according to the email. """
if custom_values is None:
custom_values = {}
defaults = {
'name': msg.get('subject'),
'planned_hours': 0.0,
'partner_id': msg.get('author_id', False)
}
defaults.update(custom_values)
res = super(task, self).message_new(cr, uid, msg, custom_values=defaults, context=context)
email_list = self.email_split(cr, uid, [res], msg, context=context)
partner_ids = filter(None, self._find_partner_from_emails(cr, uid, [res], email_list, force_create=False, context=context))
self.message_subscribe(cr, uid, [res], partner_ids, context=context)
return res
def message_update(self, cr, uid, ids, msg, update_vals=None, context=None):
""" Override to update the task according to the email. """
if update_vals is None:
update_vals = {}
maps = {
'cost': 'planned_hours',
}
for line in msg['body'].split('\n'):
line = line.strip()
res = tools.command_re.match(line)
if res:
match = res.group(1).lower()
field = maps.get(match)
if field:
try:
update_vals[field] = float(res.group(2).lower())
except (ValueError, TypeError):
pass
email_list = self.email_split(cr, uid, ids, msg, context=context)
partner_ids = filter(None, self._find_partner_from_emails(cr, uid, ids, email_list, force_create=False, context=context))
self.message_subscribe(cr, uid, ids, partner_ids, context=context)
return super(task, self).message_update(cr, uid, ids, msg, update_vals=update_vals, context=context)
def message_get_suggested_recipients(self, cr, uid, ids, context=None):
recipients = super(task, self).message_get_suggested_recipients(cr, uid, ids, context=context)
for data in self.browse(cr, uid, ids, context=context):
if data.partner_id:
reason = _('Customer Email') if data.partner_id.email else _('Customer')
data._message_add_suggested_recipient(recipients, partner=data.partner_id, reason=reason)
return recipients
def message_get_email_values(self, cr, uid, ids, notif_mail=None, context=None):
res = super(task, self).message_get_email_values(cr, uid, ids, notif_mail=notif_mail, context=context)
current_task = self.browse(cr, uid, ids[0], context=context)
headers = {}
if res.get('headers'):
try:
headers.update(eval(res['headers']))
except Exception:
pass
if current_task.project_id:
current_objects = filter(None, headers.get('X-Odoo-Objects', '').split(','))
current_objects.insert(0, 'project.project-%s, ' % current_task.project_id.id)
headers['X-Odoo-Objects'] = ','.join(current_objects)
if current_task.tag_ids:
headers['X-Odoo-Tags'] = ','.join([tag.name for tag in current_task.tag_ids])
res['headers'] = repr(headers)
return res
class account_analytic_account(osv.osv):
_inherit = 'account.analytic.account'
_description = 'Analytic Account'
def _compute_project_count(self, cr, uid, ids, fieldnames, args, context=None):
result = dict.fromkeys(ids, 0)
for account in self.browse(cr, uid, ids, context=context):
result[account.id] = len(account.project_ids)
return result
_columns = {
'use_tasks': fields.boolean('Tasks', help="Check this box to manage internal activities through this project"),
'company_uom_id': fields.related('company_id', 'project_time_mode_id', string="Company UOM", type='many2one', relation='product.uom'),
'project_ids': fields.one2many('project.project', 'analytic_account_id', 'Projects'),
'project_count': fields.function(_compute_project_count, 'Project Count', type='integer')
}
def on_change_template(self, cr, uid, ids, template_id, date_start=False, context=None):
res = super(account_analytic_account, self).on_change_template(cr, uid, ids, template_id, date_start=date_start, context=context)
if template_id and 'value' in res:
template = self.browse(cr, uid, template_id, context=context)
res['value']['use_tasks'] = template.use_tasks
return res
def _trigger_project_creation(self, cr, uid, vals, context=None):
'''
This function is used to decide if a project needs to be automatically created or not when an analytic account is created. It returns True if it needs to be so, False otherwise.
'''
if context is None: context = {}
return vals.get('use_tasks') and not 'project_creation_in_progress' in context
@api.cr_uid_id_context
def project_create(self, cr, uid, analytic_account_id, vals, context=None):
'''
This function is called at the time of analytic account creation and is used to create a project automatically linked to it if the conditions are meet.
'''
project_pool = self.pool.get('project.project')
project_id = project_pool.search(cr, uid, [('analytic_account_id','=', analytic_account_id)])
if not project_id and self._trigger_project_creation(cr, uid, vals, context=context):
project_values = {
'name': vals.get('name'),
'analytic_account_id': analytic_account_id,
'use_tasks': True,
}
return project_pool.create(cr, uid, project_values, context=context)
return False
def create(self, cr, uid, vals, context=None):
if context is None:
context = {}
if vals.get('child_ids', False) and context.get('analytic_project_copy', False):
vals['child_ids'] = []
analytic_account_id = super(account_analytic_account, self).create(cr, uid, vals, context=context)
self.project_create(cr, uid, analytic_account_id, vals, context=context)
return analytic_account_id
def write(self, cr, uid, ids, vals, context=None):
if isinstance(ids, (int, long)):
ids = [ids]
vals_for_project = vals.copy()
for account in self.browse(cr, uid, ids, context=context):
if not vals.get('name'):
vals_for_project['name'] = account.name
self.project_create(cr, uid, account.id, vals_for_project, context=context)
return super(account_analytic_account, self).write(cr, uid, ids, vals, context=context)
def unlink(self, cr, uid, ids, context=None):
proj_ids = self.pool['project.project'].search(cr, uid, [('analytic_account_id', 'in', ids)])
has_tasks = self.pool['project.task'].search(cr, uid, [('project_id', 'in', proj_ids)], count=True, context=context)
if has_tasks:
raise UserError(_('Please remove existing tasks in the project linked to the accounts you want to delete.'))
return super(account_analytic_account, self).unlink(cr, uid, ids, context=context)
def name_search(self, cr, uid, name, args=None, operator='ilike', context=None, limit=100):
if args is None:
args = []
if context is None:
context={}
if context.get('current_model') == 'project.project':
project_ids = self.search(cr, uid, args + [('name', operator, name)], limit=limit, context=context)
return self.name_get(cr, uid, project_ids, context=context)
return super(account_analytic_account, self).name_search(cr, uid, name, args=args, operator=operator, context=context, limit=limit)
def projects_action(self, cr, uid, ids, context=None):
accounts = self.browse(cr, uid, ids, context=context)
project_ids = sum([account.project_ids.ids for account in accounts], [])
result = {
"type": "ir.actions.act_window",
"res_model": "project.project",
"views": [[False, "tree"], [False, "form"]],
"domain": [["id", "in", project_ids]],
"context": {"create": False},
"name": "Projects",
}
if len(project_ids) == 1:
result['views'] = [(False, "form")]
result['res_id'] = project_ids[0]
else:
result = {'type': 'ir.actions.act_window_close'}
return result
class project_project(osv.osv):
_inherit = 'project.project'
_defaults = {
'use_tasks': True
}
class project_task_history(osv.osv):
"""
Tasks History, used for cumulative flow charts (Lean/Agile)
"""
_name = 'project.task.history'
_description = 'History of Tasks'
_rec_name = 'task_id'
_log_access = False
def _get_date(self, cr, uid, ids, name, arg, context=None):
result = {}
for history in self.browse(cr, uid, ids, context=context):
if history.type_id and history.type_id.fold:
result[history.id] = history.date
continue
cr.execute('''select
date
from
project_task_history
where
task_id=%s and
id>%s
order by id limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
result[history.id] = res and res[0] or False
return result
def _get_related_date(self, cr, uid, ids, context=None):
result = []
for history in self.browse(cr, uid, ids, context=context):
cr.execute('''select
id
from
project_task_history
where
task_id=%s and
id<%s
order by id desc limit 1''', (history.task_id.id, history.id))
res = cr.fetchone()
if res:
result.append(res[0])
return result
_columns = {
'task_id': fields.many2one('project.task', 'Task', ondelete='cascade', required=True, select=True),
'type_id': fields.many2one('project.task.type', 'Stage'),
'kanban_state': fields.selection([('normal', 'Normal'), ('blocked', 'Blocked'), ('done', 'Ready for next stage')], 'Kanban State', required=False),
'date': fields.date('Date', select=True),
'end_date': fields.function(_get_date, string='End Date', type="date", store={
'project.task.history': (_get_related_date, None, 20)
}),
'remaining_hours': fields.float('Remaining Time', digits=(16, 2)),
'planned_hours': fields.float('Planned Time', digits=(16, 2)),
'user_id': fields.many2one('res.users', 'Responsible'),
}
_defaults = {
'date': fields.date.context_today,
}
class project_task_history_cumulative(osv.osv):
_name = 'project.task.history.cumulative'
_table = 'project_task_history_cumulative'
_inherit = 'project.task.history'
_auto = False
_columns = {
'end_date': fields.date('End Date'),
'nbr_tasks': fields.integer('# of Tasks', readonly=True),
'project_id': fields.many2one('project.project', 'Project'),
}
def init(self, cr):
tools.drop_view_if_exists(cr, 'project_task_history_cumulative')
cr.execute(""" CREATE VIEW project_task_history_cumulative AS (
SELECT
history.date::varchar||'-'||history.history_id::varchar AS id,
history.date AS end_date,
*
FROM (
SELECT
h.id AS history_id,
h.date+generate_series(0, CAST((coalesce(h.end_date, DATE 'tomorrow')::date - h.date) AS integer)-1) AS date,
h.task_id, h.type_id, h.user_id, h.kanban_state,
count(h.task_id) as nbr_tasks,
greatest(h.remaining_hours, 1) AS remaining_hours, greatest(h.planned_hours, 1) AS planned_hours,
t.project_id
FROM
project_task_history AS h
JOIN project_task AS t ON (h.task_id = t.id)
GROUP BY
h.id,
h.task_id,
t.project_id
) AS history
)
""")
class project_tags(osv.Model):
""" Tags of project's tasks (or issues) """
_name = "project.tags"
_description = "Tags of project's tasks, issues..."
_columns = {
'name': fields.char('Name', required=True),
'color': fields.integer('Color Index'),
}
_sql_constraints = [
('name_uniq', 'unique (name)', "Tag name already exists !"),
]
| gpl-3.0 | -7,487,525,680,511,415,000 | 48.675601 | 215 | 0.583155 | false |
alex-ip/geophys2netcdf | utils/rename_attribute.py | 1 | 1663 | '''
Created on Apr 7, 2016
@author: Alex Ip, Geoscience Australia
'''
import sys
import netCDF4
import subprocess
import re
from geophys2netcdf import ERS2NetCDF
def main():
assert len(
sys.argv) == 5, 'Usage: %s <root_dir> <file_template> <old_attribute_name> <new_attribute_name>' % sys.argv[0]
root_dir = sys.argv[1]
file_template = sys.argv[2]
old_attribute_name = sys.argv[3]
new_attribute_name = sys.argv[4]
nc_path_list = [filename for filename in subprocess.check_output(
['find', root_dir, '-name', file_template]).split('\n') if re.search('\.nc$', filename)]
for nc_path in nc_path_list:
print 'Renaming attribute in %s' % nc_path
nc_dataset = netCDF4.Dataset(nc_path, 'r+')
try:
# Rename attribute
value = getattr(nc_dataset, old_attribute_name)
setattr(nc_dataset, new_attribute_name, value)
delattr(nc_dataset, old_attribute_name)
print '%s.%s renamed to %s. (Value = %s)' % (nc_path, old_attribute_name, new_attribute_name, value)
except Exception as e:
print 'Unable to rename attribute %s to %s: %s' % (old_attribute_name, new_attribute_name, e.message)
nc_dataset.close()
print 'Updating metadata in %s' % nc_path
try:
g2n_object = ERS2NetCDF()
g2n_object.update_nc_metadata(nc_path, do_stats=True)
# Kind of redundant, but possibly useful for debugging
g2n_object.check_json_metadata()
except Exception as e:
print 'Metadata update failed: %s' % e.message
if __name__ == '__main__':
main()
| apache-2.0 | 6,970,008,681,008,030,000 | 31.607843 | 118 | 0.608539 | false |
yavorpap/snake | snake/geometry.py | 1 | 2024 | #-------------------------------------------------------------------------------
# Name: geometry
# Purpose:
#
# Author: Yavor
#
# Created:
# Copyright: (c) Yavor
# Licence: GLPv3
#-------------------------------------------------------------------------------
import math
class Point:
"""A class representing a point with integer coordinates.
"""
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __add__(self, vector):
x = self.x + vector.x
y = self.y + vector.y
return Point(x, y)
def __mod__(self, n):
x = self.x % n
y = self.y % n
return Point(x, y)
@staticmethod
def vector_from_points(first_point, second_point):
dx = second_point.x - first_point.x
dy = second_point.y - first_point.y
return FreeVector(dx, dy)
def __repr__(self):
return "Point with coordinates: {0}, {1}".format(self.x, self.y)
def __eq__(self, other_point):
if other_point is None:
return self is None
return (self.x == other_point.x) and (self.y == other_point.y)
class FreeVector:
"""A class representing a free vector,
that is either horizontal or vertical.
"""
def __init__(self, x=0, y=0):
self.x = x
self.y = y
def __mul__(self, factor):
x = self.x * factor
y = self.y * factor
return FreeVector(x, y)
def __add__(self, other_vector):
return FreeVector(self.x + other_vector.x, self.y + other_vector.y)
def __eq__(self, other_vector):
if self.x == other_vector.x and self.y == other_vector.y:
return True
return False
def length(self):
assert((self.x == 0) or (self.y == 0))
return abs(self.x) + abs(self.y)
def normalize(self):
length = self.length()
self.x //= length
self.y //= length
def __repr__(self):
return "FreeVector with coordinates: {0}, {1}".format(self.x, self.y)
| gpl-3.0 | -7,222,601,374,503,367,000 | 24.948718 | 80 | 0.495059 | false |
delMar43/wcmodtoolsources | WC1_clone/room_engine/save_expander.py | 1 | 5799 | import game_structures
from block_expander import conv_endian
from copy import copy
##to do:
##check to see if the system correctly transforms between saving and loading
class Savesystem:
def __init__(self, filename = "savegame.wld"):
self.saves = []
self.filename = filename
# open the save file
f = open(filename, 'rb')
str = f.read()
f.close()
slots = []
for i in range(0, len(str), 828):
slots.append(str[i:i+828])
for s in slots:
name = s[0:s[0:16].find("\0")]
#see which saved games exist
if (name[0:5] != "game "):
#instantiate those games in the list
self.saves.append(Savegame(name, s))
else:
self.saves.append(None)
def Load(self, i=None):
if (i is None):
print "specify a game to load"
elif i >= len(self.saves):
print "index too large"
elif self.saves[i] is None:
print "slot empty"
else:
self.saves[i].Load()
def Save(self, i=None):
if (i is None):
print "specify a game to save"
elif i >= len(self.saves):
print "index too large"
else:
saved = self.saves[i].Save()
# open the save file
f = open(self.filename, 'rb')
str = f.read()
f.close()
##overwrite the appropriate chunk of str with saved
#rewrite that chunk of the wld file
f = open(self.filename, 'wb')
f.write(str)
f.close()
class Savegame:
def __init__(self, name, data):
self.savename = name
self.data = data
def Save(self):
#parse game structures into outd
outd = ""
outd = self.savename.ljust(18, "\0")
for p in game_structures.pilots:
outd += p.name.ljust(14, "\0")
outd += p.callsign.ljust(14, "\0")
outd += "\0\0" #unknown
outd += chr(p.rank%256) + chr(p.rank/256)
outd += chr(p.missions%256) + chr(p.missions/256)
outd += chr(p.kills%256) + chr(p.kills/256)
outd += "\0\0" #unknown
outd += chr(0x42) + chr(0x9A) + chr(0x00) + chr(0x00)
pl = game_structures.Bluehair
outd += chr(pl.bronzestars)
outd += chr(pl.silverstars)
outd += chr(pl.goldstars)
outd += chr(pl.goldsun)
outd += chr(pl.pewter)
for r in range(0, len(game_structures.rib_names)):
outd += chr(pl.ribbons[game_structures.rib_names[r]])
outd += chr(game_structures.mission_i)
outd += chr(game_structures.series_i)
outd += chr(0x00)*9
for p in game_structures.pilots[:-1]:
if p.diedin == -1:
diedin = 0
else:
diedin = 10
outd += chr(diedin%256) + chr(diedin/256)
for a in game_structures.aces:
if a.diedin == -1:
diedin = 0
else:
diedin = 10
outd += chr(diedin)
outd += chr(game_structures.date%256) + chr(game_structures.date/256)
outd += chr(game_structures.year%256) + chr(game_structures.year/256)
#unknown
outd += chr(0x06) + chr(0x00) + chr(0x00) + chr(0x00)
outd += chr(pl.promotion_points%256) + chr(pl.promotion_points/256)
outd += chr(0x00) + chr(0x00) #unknown
outd += chr(pl.victory_points%256) + chr(pl.victory_points/256)
outd += chr(pl.series%256) + chr(pl.series/256)
for m in game_structures.missiond:
for n in m:
outd += n
return outd
def Load(self):
#parse self.data into game structures
pilotd = self.data[18:360]
ps = []
for i in range(0, len(pilotd), 38):
ps.append(pilotd[i:i+38])
for i in range(0, len(ps)):
p = ps[i]
name = p[0:p[0:14].find("\0")]
callsign = p[14:14+p[14:28].find("\0")]
#28-30 unknown
rank = conv_endian([ord(b) for b in p[30:32]], 0, 2)
missions = conv_endian([ord(b) for b in p[32:34]], 0, 2)
kills = conv_endian([ord(b) for b in p[34:36]], 0, 2)
#36-38 unkown
game_structures.pilots[i].name = name
game_structures.pilots[i].callsign = callsign
game_structures.pilots[i].rank = rank
game_structures.pilots[i].rank = missions
game_structures.pilots[i].rank = kills
#360-363 unknown
pl = game_structures.Bluehair
pl.bronzestars = ord(self.data[364])
pl.silverstars = ord(self.data[365])
pl.goldstars = ord(self.data[366])
pl.goldsuns = ord(self.data[367])
pl.pewter = ord(self.data[368])
ribbons = [ord(b) for b in self.data[369:381]]
for r in range(0, len(ribbons)):
pl.ribbons[game_structures.rib_names[r]] = ribbons[r]
game_structures.mission_i = ord(self.data[381])
game_structures.series_i = ord(self.data[382])
#383-391 unknown
##pilot and ace deaths may not be recorded correctly
pdeaths = self.data[392:408]
for i in range(0, 8):
game_structures.pilots[i].diedin = conv_endian([ord(b) for b in pdeaths[i:i+2]], 0, 2)
adeaths = self.data[408:412]
for i in range(0, 4):
game_structures.aces[i].diedin = ord(adeaths[i])
game_structures.date = conv_endian([ord(b) for b in self.data[412:414]], 0, 2)
game_structures.year = conv_endian([ord(b) for b in self.data[414:416]], 0, 2)
#416-419 unknown
pl.promotion_points = conv_endian([ord(b) for b in self.data[420:422]], 0, 2)
#422-423 uknown
pl.victory_points = conv_endian([ord(b) for b in self.data[424:426]], 0, 2)
pl.series = conv_endian([ord(b) for b in self.data[426:428]], 0, 2)
#it appears that the saved games store 400 bytes, 100 for each mission in the series
#it further appears that these 100 bytes are divided into four 25-byte chunks, one for each nav point in thie mission
#the exact structure of these 25 bytes is still undetermined
##should compare these 25 bytes to any unidentified branching structures and mission flags
missiond = self.data[428:828]
ms = []
for i in range(0, len(missiond), 100):
ms.append(missiond[i:i+100])
for m in ms:
navd = []
for i in range(0, len(m), 25):
navd.append(m[i:i+25])
game_structures.missiond.append(copy(navd))
savesys = Savesystem("savegame.wld")
#savesys.Load(6)
#savesys.Save(6)
| mit | 4,359,533,154,856,677,400 | 27.014493 | 119 | 0.634937 | false |
adazey/Muzez | libs/nltk/chunk/named_entity.py | 1 | 11160 | # Natural Language Toolkit: Chunk parsing API
#
# Copyright (C) 2001-2016 NLTK Project
# Author: Edward Loper <[email protected]>
# URL: <http://nltk.org/>
# For license information, see LICENSE.TXT
"""
Named entity chunker
"""
from __future__ import print_function
import os, re, pickle
from xml.etree import ElementTree as ET
from nltk.tag import ClassifierBasedTagger, pos_tag
try:
from nltk.classify import MaxentClassifier
except ImportError:
pass
from nltk.tree import Tree
from nltk.tokenize import word_tokenize
from nltk.data import find
from nltk.chunk.api import ChunkParserI
from nltk.chunk.util import ChunkScore
class NEChunkParserTagger(ClassifierBasedTagger):
"""
The IOB tagger used by the chunk parser.
"""
def __init__(self, train):
ClassifierBasedTagger.__init__(
self, train=train,
classifier_builder=self._classifier_builder)
def _classifier_builder(self, train):
return MaxentClassifier.train(train, algorithm='megam',
gaussian_prior_sigma=1,
trace=2)
def _english_wordlist(self):
try:
wl = self._en_wordlist
except AttributeError:
from nltk.corpus import words
self._en_wordlist = set(words.words('en-basic'))
wl = self._en_wordlist
return wl
def _feature_detector(self, tokens, index, history):
word = tokens[index][0]
pos = simplify_pos(tokens[index][1])
if index == 0:
prevword = prevprevword = None
prevpos = prevprevpos = None
prevshape = prevtag = prevprevtag = None
elif index == 1:
prevword = tokens[index-1][0].lower()
prevprevword = None
prevpos = simplify_pos(tokens[index-1][1])
prevprevpos = None
prevtag = history[index-1][0]
prevshape = prevprevtag = None
else:
prevword = tokens[index-1][0].lower()
prevprevword = tokens[index-2][0].lower()
prevpos = simplify_pos(tokens[index-1][1])
prevprevpos = simplify_pos(tokens[index-2][1])
prevtag = history[index-1]
prevprevtag = history[index-2]
prevshape = shape(prevword)
if index == len(tokens)-1:
nextword = nextnextword = None
nextpos = nextnextpos = None
elif index == len(tokens)-2:
nextword = tokens[index+1][0].lower()
nextpos = tokens[index+1][1].lower()
nextnextword = None
nextnextpos = None
else:
nextword = tokens[index+1][0].lower()
nextpos = tokens[index+1][1].lower()
nextnextword = tokens[index+2][0].lower()
nextnextpos = tokens[index+2][1].lower()
# 89.6
features = {
'bias': True,
'shape': shape(word),
'wordlen': len(word),
'prefix3': word[:3].lower(),
'suffix3': word[-3:].lower(),
'pos': pos,
'word': word,
'en-wordlist': (word in self._english_wordlist()),
'prevtag': prevtag,
'prevpos': prevpos,
'nextpos': nextpos,
'prevword': prevword,
'nextword': nextword,
'word+nextpos': '%s+%s' % (word.lower(), nextpos),
'pos+prevtag': '%s+%s' % (pos, prevtag),
'shape+prevtag': '%s+%s' % (prevshape, prevtag),
}
return features
class NEChunkParser(ChunkParserI):
"""
Expected input: list of pos-tagged words
"""
def __init__(self, train):
self._train(train)
def parse(self, tokens):
"""
Each token should be a pos-tagged word
"""
tagged = self._tagger.tag(tokens)
tree = self._tagged_to_parse(tagged)
return tree
def _train(self, corpus):
# Convert to tagged sequence
corpus = [self._parse_to_tagged(s) for s in corpus]
self._tagger = NEChunkParserTagger(train=corpus)
def _tagged_to_parse(self, tagged_tokens):
"""
Convert a list of tagged tokens to a chunk-parse tree.
"""
sent = Tree('S', [])
for (tok,tag) in tagged_tokens:
if tag == 'O':
sent.append(tok)
elif tag.startswith('B-'):
sent.append(Tree(tag[2:], [tok]))
elif tag.startswith('I-'):
if (sent and isinstance(sent[-1], Tree) and
sent[-1].label() == tag[2:]):
sent[-1].append(tok)
else:
sent.append(Tree(tag[2:], [tok]))
return sent
@staticmethod
def _parse_to_tagged(sent):
"""
Convert a chunk-parse tree to a list of tagged tokens.
"""
toks = []
for child in sent:
if isinstance(child, Tree):
if len(child) == 0:
print("Warning -- empty chunk in sentence")
continue
toks.append((child[0], 'B-%s' % child.label()))
for tok in child[1:]:
toks.append((tok, 'I-%s' % child.label()))
else:
toks.append((child, 'O'))
return toks
def shape(word):
if re.match('[0-9]+(\.[0-9]*)?|[0-9]*\.[0-9]+$', word, re.UNICODE):
return 'number'
elif re.match('\W+$', word, re.UNICODE):
return 'punct'
elif re.match('\w+$', word, re.UNICODE):
if word.istitle():
return 'upcase'
elif word.islower():
return 'downcase'
else:
return 'mixedcase'
else:
return 'other'
def simplify_pos(s):
if s.startswith('V'): return "V"
else: return s.split('-')[0]
def postag_tree(tree):
# Part-of-speech tagging.
words = tree.leaves()
tag_iter = (pos for (word, pos) in pos_tag(words))
newtree = Tree('S', [])
for child in tree:
if isinstance(child, Tree):
newtree.append(Tree(child.label(), []))
for subchild in child:
newtree[-1].append( (subchild, next(tag_iter)) )
else:
newtree.append( (child, next(tag_iter)) )
return newtree
def load_ace_data(roots, fmt='binary', skip_bnews=True):
for root in roots:
for root, dirs, files in os.walk(root):
if root.endswith('bnews') and skip_bnews:
continue
for f in files:
if f.endswith('.sgm'):
for sent in load_ace_file(os.path.join(root, f), fmt):
yield sent
def load_ace_file(textfile, fmt):
print(' - %s' % os.path.split(textfile)[1])
annfile = textfile+'.tmx.rdc.xml'
# Read the xml file, and get a list of entities
entities = []
with open(annfile, 'r') as infile:
xml = ET.parse(infile).getroot()
for entity in xml.findall('document/entity'):
typ = entity.find('entity_type').text
for mention in entity.findall('entity_mention'):
if mention.get('TYPE') != 'NAME': continue # only NEs
s = int(mention.find('head/charseq/start').text)
e = int(mention.find('head/charseq/end').text)+1
entities.append( (s, e, typ) )
# Read the text file, and mark the entities.
with open(textfile, 'r') as infile:
text = infile.read()
# Strip XML tags, since they don't count towards the indices
text = re.sub('<(?!/?TEXT)[^>]+>', '', text)
# Blank out anything before/after <TEXT>
def subfunc(m): return ' '*(m.end()-m.start()-6)
text = re.sub('[\s\S]*<TEXT>', subfunc, text)
text = re.sub('</TEXT>[\s\S]*', '', text)
# Simplify quotes
text = re.sub("``", ' "', text)
text = re.sub("''", '" ', text)
entity_types = set(typ for (s,e,typ) in entities)
# Binary distinction (NE or not NE)
if fmt == 'binary':
i = 0
toks = Tree('S', [])
for (s,e,typ) in sorted(entities):
if s < i: s = i # Overlapping! Deal with this better?
if e <= s: continue
toks.extend(word_tokenize(text[i:s]))
toks.append(Tree('NE', text[s:e].split()))
i = e
toks.extend(word_tokenize(text[i:]))
yield toks
# Multiclass distinction (NE type)
elif fmt == 'multiclass':
i = 0
toks = Tree('S', [])
for (s,e,typ) in sorted(entities):
if s < i: s = i # Overlapping! Deal with this better?
if e <= s: continue
toks.extend(word_tokenize(text[i:s]))
toks.append(Tree(typ, text[s:e].split()))
i = e
toks.extend(word_tokenize(text[i:]))
yield toks
else:
raise ValueError('bad fmt value')
# This probably belongs in a more general-purpose location (as does
# the parse_to_tagged function).
def cmp_chunks(correct, guessed):
correct = NEChunkParser._parse_to_tagged(correct)
guessed = NEChunkParser._parse_to_tagged(guessed)
ellipsis = False
for (w, ct), (w, gt) in zip(correct, guessed):
if ct == gt == 'O':
if not ellipsis:
print(" %-15s %-15s %s" % (ct, gt, w))
print(' %-15s %-15s %s' % ('...', '...', '...'))
ellipsis = True
else:
ellipsis = False
print(" %-15s %-15s %s" % (ct, gt, w))
def build_model(fmt='binary'):
print('Loading training data...')
train_paths = [find('corpora/ace_data/ace.dev'),
find('corpora/ace_data/ace.heldout'),
find('corpora/ace_data/bbn.dev'),
find('corpora/ace_data/muc.dev')]
train_trees = load_ace_data(train_paths, fmt)
train_data = [postag_tree(t) for t in train_trees]
print('Training...')
cp = NEChunkParser(train_data)
del train_data
print('Loading eval data...')
eval_paths = [find('corpora/ace_data/ace.eval')]
eval_trees = load_ace_data(eval_paths, fmt)
eval_data = [postag_tree(t) for t in eval_trees]
print('Evaluating...')
chunkscore = ChunkScore()
for i, correct in enumerate(eval_data):
guess = cp.parse(correct.leaves())
chunkscore.score(correct, guess)
if i < 3: cmp_chunks(correct, guess)
print(chunkscore)
outfilename = '/tmp/ne_chunker_%s.pickle' % fmt
print('Saving chunker to %s...' % outfilename)
with open(outfilename, 'wb') as outfile:
pickle.dump(cp, outfile, -1)
return cp
if __name__ == '__main__':
# Make sure that the pickled object has the right class name:
from nltk.chunk.named_entity import build_model
build_model('binary')
build_model('multiclass')
| gpl-3.0 | -490,737,461,081,699,840 | 31.716012 | 74 | 0.523746 | false |
Dylan-halls/Web-Stalker | urlsnuffler.py | 1 | 8365 | #!/usr/bin/env python3
from __future__ import print_function
import logging, socket, gtk, webkit, gobject, multiprocessing, sys, scapy_http.http, warnings, logger
logging.getLogger("scapy.runtime").setLevel(logging.ERROR)
warnings.filterwarnings("ignore")
from scapy.all import sniff
class Sniffer(object):
def __init__(self):
global log
log = logger.Logger()
log.info('Sniffer is starting up')
with open('Stalker.log', 'w') as file:
file.write('')
file.close()
log.info('Wiped previous session')
def pkt_handle(self, pkt):
if pkt.haslayer(scapy_http.http.HTTPRequest):
log.info('Caught http request')
http = pkt[scapy_http.http.HTTPRequest]
file = open('Stalker.log', 'a')
http = str(http).splitlines()
x = -1
urls = []
for i in http:
file.write("\nHTTP-Type: HTTP Request\n")
file.write("Source-Address: {}\n".format(pkt.src))
file.write("Destination-Address: {}\n".format(pkt.dst))
file.write("Destination_Port: {}\n".format(pkt.dport))
file.write("Source_Port: {}\n".format(pkt.sport))
x += 1
try:
try:
if 'GET' in http[x]:
g = http[x]
if 'HTTP' in g:
hh = True
file.write(g+'\n')
if 'POST' in http[x]:
file.write(http[x]+'\n')
file.write('Load: '+pkt.load+'\n')
if 'Host:' in http[x]:
h = http[x].replace("Host:", '').replace(" ", '')
oh = h
file.write('Host: '+h+'\n')
if hh == True:
h = 'http://'+h
if 'User-Agent:' in http[x]:
u = http[x].replace("User-Agent:", '')
file.write('User-Agent: '+u+'\n')
if 'Referer:' in http[x]:
r = http[x].replace("Referer:", '')
file.write('Referer: '+r+'\n')
if 'Cookie:' in http[x]:
c = http[x].replace("Cookie:", '')
file.write('Cookie: '+c+'\n')
try:
r = r.replace(" ", '')
print("\""+h+g[4:]+"\"","-","\"{0}\" -{1} - {2}".format(oh, u, r))
except UnboundLocalError: pass
except UnboundLocalError: pass
except IndexError: log.warn('Unorded Packet')
if 'Cookie:' in http[x]:
try:
c = http[x].replace("Cookie:", '')
print("\""+h+g[4:]+"\"","-","\"{0}\" -{1} - {2} -\033[1;33m{3}\033[00m".format(oh, u, r, c))
except UnboundLocalError: pass
elif pkt.haslayer(scapy_http.http.HTTPResponse):
log.info('http responce caught')
http = pkt[scapy_http.http.HTTPResponse]
file = open('Stalker.log', 'ab')
log.info('logging responce packet')
http = str(http).splitlines()
x = -1
urls = []
for i in http:
file.write("HTTP-Type: HTTP Responce\n")
file.write("Source-Address: {}\n".format(pkt.src))
file.write("Destination-Address: {}\n".format(pkt.dst))
file.write("Destination_Port: {}\n".format(pkt.dport))
file.write("Source_Port: {}\n".format(pkt.sport))
x += 1
if "HTTP/1.1 " in http[x]:
sl = http[x]
file.write(http[x])
if "Age:" in http[x]:
age = http[x]
file.write(http[x]+'\n')
if "ETag" in http[x]:
et = http[x]
file.write(http[x]+'\n')
if "Location" in http[x]:
loc = http[x]
file.write(http[x]+'\n')
if "Proxy-Authenticate" in http[x]:
pa = http[x]
file.write(http[x]+'\n')
if "Retry-After" in http[x]:
ra = http[x]
file.write(http[x]+'\n')
if "Server" in http[x]:
s = http[x]
file.write(http[x]+'\n')
if "Vary" in http[x]:
v = http[x]
file.write(http[x]+'\n')
if "WWW-Authenticate" in http[x]:
wwa = http[x]
file.write(http[x]+'\n')
if "Cache-Control" in http[x]:
cc = http[x]
file.write(http[x]+'\n')
if "Connection" in http[x]:
conn = http[x]
file.write(http[x]+'\n')
if "Date: " in http[x]:
dat = http[x]
file.write(http[x]+'\n')
if "Pragma" in http[x]:
pra = http[x]
file.write(http[x]+'\n')
if "Trailer" in http[x]:
tra = http[x]
file.write(http[x]+'\n')
if "Transfer-Encoding" in http[x]:
te = http[x]
file.write(http[x]+'\n')
if "Upgrade" in http[x]:
upg = http[x]
file.write(http[x]+'\n')
if "Via" in http[x]:
via = http[x]
file.write(http[x]+'\n')
if "Warning" in http[x]:
warn = http[x]
file.write(http[x]+'\n')
if "Keep-Alive" in http[x]:
ka = http[x]
file.write(http[x]+'\n')
if "Allow" in http[x]:
al = http[x]
file.write(http[x]+'\n')
if "Content-Encoding" in http[x]:
coe = http[x]
file.write(http[x]+'\n')
if "Content-Language" in http[x]:
col = http[x]
file.write(http[x]+'\n')
if "Content-Length" in http[x]:
cole = http[x]
file.write(http[x]+'\n')
if "Content-Location" in http[x]:
colo = http[x]
file.write(http[x]+'\n')
if "Content-MD5" in http[x]:
comd = http[x]
file.write(http[x]+'\n')
if "Content-Range" in http[x]:
cora = http[x]
file.write(http[x]+'\n')
if "Content-Type" in http[x]:
coty = http[x]
file.write(http[x]+'\n')
if "Expires" in http[x]:
ex = http[x]
file.write(http[x]+'\n')
if "Last-Modified" in http[x]:
lamo = http[x]
file.write(http[x]+'\n')
if "Headers" in http[x]:
hea = http[x]
file.write(http[x]+'\n')
if "Additional-Headers" in http[x]:
adhe = http[x]
file.write(http[x]+'\n')
file.write('\n')
try:
#31
#26
print("{} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {} - {}".format(sl, age, et, loc, pa, ra, s, v, wwwa, cc, conn, dat, pra, tra, te, upg, via, warn, ka, al, coe, col, cole, colo, comd, cora, coty, ex, lamo, hea, adhe))
except UnboundLocalError: pass
log.info('found: '+''.join(urls)+'urls!')
def main():
sn = Sniffer()
pkt = sniff(prn=sn.pkt_handle)
if __name__ == "__main__":
main()
| mit | 1,510,432,372,378,413,000 | 41.902564 | 355 | 0.375254 | false |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.