repo_name
stringlengths 5
100
| path
stringlengths 4
299
| copies
stringclasses 990
values | size
stringlengths 4
7
| content
stringlengths 666
1.03M
| license
stringclasses 15
values | hash
int64 -9,223,351,895,964,839,000
9,223,297,778B
| line_mean
float64 3.17
100
| line_max
int64 7
1k
| alpha_frac
float64 0.25
0.98
| autogenerated
bool 1
class |
---|---|---|---|---|---|---|---|---|---|---|
mjbrewer/testindex | magnum/tests/unit/common/test_safeutils.py | 17 | 3947 | # Copyright 2011 Justin Santa Barbara
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
from magnum.common import safe_utils
from magnum.tests import base
class GetCallArgsTestCase(base.BaseTestCase):
def _test_func(self, instance, red=None, blue=None):
pass
def test_all_kwargs(self):
args = ()
kwargs = {'instance': {'uuid': 1}, 'red': 3, 'blue': 4}
callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
# implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertIn('instance', callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertIn('red', callargs)
self.assertEqual(3, callargs['red'])
self.assertIn('blue', callargs)
self.assertEqual(4, callargs['blue'])
def test_all_args(self):
args = ({'uuid': 1}, 3, 4)
kwargs = {}
callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
# implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertIn('instance', callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertIn('red', callargs)
self.assertEqual(3, callargs['red'])
self.assertIn('blue', callargs)
self.assertEqual(4, callargs['blue'])
def test_mixed_args(self):
args = ({'uuid': 1}, 3)
kwargs = {'blue': 4}
callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
# implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertIn('instance', callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertIn('red', callargs)
self.assertEqual(3, callargs['red'])
self.assertIn('blue', callargs)
self.assertEqual(4, callargs['blue'])
def test_partial_kwargs(self):
args = ()
kwargs = {'instance': {'uuid': 1}, 'red': 3}
callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
# implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertIn('instance', callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertIn('red', callargs)
self.assertEqual(3, callargs['red'])
self.assertIn('blue', callargs)
self.assertIsNone(callargs['blue'])
def test_partial_args(self):
args = ({'uuid': 1}, 3)
kwargs = {}
callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
# implicit self counts as an arg
self.assertEqual(4, len(callargs))
self.assertIn('instance', callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertIn('red', callargs)
self.assertEqual(3, callargs['red'])
self.assertIn('blue', callargs)
self.assertIsNone(callargs['blue'])
def test_partial_mixed_args(self):
args = (3,)
kwargs = {'instance': {'uuid': 1}}
callargs = safe_utils.getcallargs(self._test_func, *args, **kwargs)
self.assertEqual(4, len(callargs))
self.assertIn('instance', callargs)
self.assertEqual({'uuid': 1}, callargs['instance'])
self.assertIn('red', callargs)
self.assertEqual(3, callargs['red'])
self.assertIn('blue', callargs)
self.assertIsNone(callargs['blue'])
| apache-2.0 | -6,538,411,188,926,366,000 | 38.868687 | 78 | 0.617178 | false |
michalliu/OpenWrt-Firefly-Libraries | staging_dir/target-mipsel_1004kc+dsp_uClibc-0.9.33.2/usr/lib/python3.4/test/test_io.py | 8 | 129855 | """Unit tests for the io module."""
# Tests of io are scattered over the test suite:
# * test_bufio - tests file buffering
# * test_memoryio - tests BytesIO and StringIO
# * test_fileio - tests FileIO
# * test_file - tests the file interface
# * test_io - tests everything else in the io module
# * test_univnewlines - tests universal newline support
# * test_largefile - tests operations on a file greater than 2**32 bytes
# (only enabled with -ulargefile)
################################################################################
# ATTENTION TEST WRITERS!!!
################################################################################
# When writing tests for io, it's important to test both the C and Python
# implementations. This is usually done by writing a base test that refers to
# the type it is testing as a attribute. Then it provides custom subclasses to
# test both implementations. This file has lots of examples.
################################################################################
import abc
import array
import errno
import locale
import os
import pickle
import random
import signal
import sys
import time
import unittest
import warnings
import weakref
from collections import deque, UserList
from itertools import cycle, count
from test import support
from test.script_helper import assert_python_ok
import codecs
import io # C implementation of io
import _pyio as pyio # Python implementation of io
try:
import threading
except ImportError:
threading = None
try:
import fcntl
except ImportError:
fcntl = None
def _default_chunk_size():
"""Get the default TextIOWrapper chunk size"""
with open(__file__, "r", encoding="latin-1") as f:
return f._CHUNK_SIZE
class MockRawIOWithoutRead:
"""A RawIO implementation without read(), so as to exercise the default
RawIO.read() which calls readinto()."""
def __init__(self, read_stack=()):
self._read_stack = list(read_stack)
self._write_stack = []
self._reads = 0
self._extraneous_reads = 0
def write(self, b):
self._write_stack.append(bytes(b))
return len(b)
def writable(self):
return True
def fileno(self):
return 42
def readable(self):
return True
def seekable(self):
return True
def seek(self, pos, whence):
return 0 # wrong but we gotta return something
def tell(self):
return 0 # same comment as above
def readinto(self, buf):
self._reads += 1
max_len = len(buf)
try:
data = self._read_stack[0]
except IndexError:
self._extraneous_reads += 1
return 0
if data is None:
del self._read_stack[0]
return None
n = len(data)
if len(data) <= max_len:
del self._read_stack[0]
buf[:n] = data
return n
else:
buf[:] = data[:max_len]
self._read_stack[0] = data[max_len:]
return max_len
def truncate(self, pos=None):
return pos
class CMockRawIOWithoutRead(MockRawIOWithoutRead, io.RawIOBase):
pass
class PyMockRawIOWithoutRead(MockRawIOWithoutRead, pyio.RawIOBase):
pass
class MockRawIO(MockRawIOWithoutRead):
def read(self, n=None):
self._reads += 1
try:
return self._read_stack.pop(0)
except:
self._extraneous_reads += 1
return b""
class CMockRawIO(MockRawIO, io.RawIOBase):
pass
class PyMockRawIO(MockRawIO, pyio.RawIOBase):
pass
class MisbehavedRawIO(MockRawIO):
def write(self, b):
return super().write(b) * 2
def read(self, n=None):
return super().read(n) * 2
def seek(self, pos, whence):
return -123
def tell(self):
return -456
def readinto(self, buf):
super().readinto(buf)
return len(buf) * 5
class CMisbehavedRawIO(MisbehavedRawIO, io.RawIOBase):
pass
class PyMisbehavedRawIO(MisbehavedRawIO, pyio.RawIOBase):
pass
class CloseFailureIO(MockRawIO):
closed = 0
def close(self):
if not self.closed:
self.closed = 1
raise OSError
class CCloseFailureIO(CloseFailureIO, io.RawIOBase):
pass
class PyCloseFailureIO(CloseFailureIO, pyio.RawIOBase):
pass
class MockFileIO:
def __init__(self, data):
self.read_history = []
super().__init__(data)
def read(self, n=None):
res = super().read(n)
self.read_history.append(None if res is None else len(res))
return res
def readinto(self, b):
res = super().readinto(b)
self.read_history.append(res)
return res
class CMockFileIO(MockFileIO, io.BytesIO):
pass
class PyMockFileIO(MockFileIO, pyio.BytesIO):
pass
class MockUnseekableIO:
def seekable(self):
return False
def seek(self, *args):
raise self.UnsupportedOperation("not seekable")
def tell(self, *args):
raise self.UnsupportedOperation("not seekable")
class CMockUnseekableIO(MockUnseekableIO, io.BytesIO):
UnsupportedOperation = io.UnsupportedOperation
class PyMockUnseekableIO(MockUnseekableIO, pyio.BytesIO):
UnsupportedOperation = pyio.UnsupportedOperation
class MockNonBlockWriterIO:
def __init__(self):
self._write_stack = []
self._blocker_char = None
def pop_written(self):
s = b"".join(self._write_stack)
self._write_stack[:] = []
return s
def block_on(self, char):
"""Block when a given char is encountered."""
self._blocker_char = char
def readable(self):
return True
def seekable(self):
return True
def writable(self):
return True
def write(self, b):
b = bytes(b)
n = -1
if self._blocker_char:
try:
n = b.index(self._blocker_char)
except ValueError:
pass
else:
if n > 0:
# write data up to the first blocker
self._write_stack.append(b[:n])
return n
else:
# cancel blocker and indicate would block
self._blocker_char = None
return None
self._write_stack.append(b)
return len(b)
class CMockNonBlockWriterIO(MockNonBlockWriterIO, io.RawIOBase):
BlockingIOError = io.BlockingIOError
class PyMockNonBlockWriterIO(MockNonBlockWriterIO, pyio.RawIOBase):
BlockingIOError = pyio.BlockingIOError
class IOTest(unittest.TestCase):
def setUp(self):
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def write_ops(self, f):
self.assertEqual(f.write(b"blah."), 5)
f.truncate(0)
self.assertEqual(f.tell(), 5)
f.seek(0)
self.assertEqual(f.write(b"blah."), 5)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"Hello."), 6)
self.assertEqual(f.tell(), 6)
self.assertEqual(f.seek(-1, 1), 5)
self.assertEqual(f.tell(), 5)
self.assertEqual(f.write(bytearray(b" world\n\n\n")), 9)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.write(b"h"), 1)
self.assertEqual(f.seek(-1, 2), 13)
self.assertEqual(f.tell(), 13)
self.assertEqual(f.truncate(12), 12)
self.assertEqual(f.tell(), 13)
self.assertRaises(TypeError, f.seek, 0.0)
def read_ops(self, f, buffered=False):
data = f.read(5)
self.assertEqual(data, b"hello")
data = bytearray(data)
self.assertEqual(f.readinto(data), 5)
self.assertEqual(data, b" worl")
self.assertEqual(f.readinto(data), 2)
self.assertEqual(len(data), 5)
self.assertEqual(data[:2], b"d\n")
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(20), b"hello world\n")
self.assertEqual(f.read(1), b"")
self.assertEqual(f.readinto(bytearray(b"x")), 0)
self.assertEqual(f.seek(-6, 2), 6)
self.assertEqual(f.read(5), b"world")
self.assertEqual(f.read(0), b"")
self.assertEqual(f.readinto(bytearray()), 0)
self.assertEqual(f.seek(-6, 1), 5)
self.assertEqual(f.read(5), b" worl")
self.assertEqual(f.tell(), 10)
self.assertRaises(TypeError, f.seek, 0.0)
if buffered:
f.seek(0)
self.assertEqual(f.read(), b"hello world\n")
f.seek(6)
self.assertEqual(f.read(), b"world\n")
self.assertEqual(f.read(), b"")
LARGE = 2**31
def large_file_ops(self, f):
assert f.readable()
assert f.writable()
self.assertEqual(f.seek(self.LARGE), self.LARGE)
self.assertEqual(f.tell(), self.LARGE)
self.assertEqual(f.write(b"xxx"), 3)
self.assertEqual(f.tell(), self.LARGE + 3)
self.assertEqual(f.seek(-1, 1), self.LARGE + 2)
self.assertEqual(f.truncate(), self.LARGE + 2)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 2)
self.assertEqual(f.truncate(self.LARGE + 1), self.LARGE + 1)
self.assertEqual(f.tell(), self.LARGE + 2)
self.assertEqual(f.seek(0, 2), self.LARGE + 1)
self.assertEqual(f.seek(-1, 2), self.LARGE)
self.assertEqual(f.read(2), b"x")
def test_invalid_operations(self):
# Try writing on a file opened in read mode and vice-versa.
exc = self.UnsupportedOperation
for mode in ("w", "wb"):
with self.open(support.TESTFN, mode) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "wb", buffering=0) as fp:
self.assertRaises(exc, fp.read)
self.assertRaises(exc, fp.readline)
with self.open(support.TESTFN, "rb", buffering=0) as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "rb") as fp:
self.assertRaises(exc, fp.write, b"blah")
self.assertRaises(exc, fp.writelines, [b"blah\n"])
with self.open(support.TESTFN, "r") as fp:
self.assertRaises(exc, fp.write, "blah")
self.assertRaises(exc, fp.writelines, ["blah\n"])
# Non-zero seeking from current or end pos
self.assertRaises(exc, fp.seek, 1, self.SEEK_CUR)
self.assertRaises(exc, fp.seek, -1, self.SEEK_END)
def test_open_handles_NUL_chars(self):
fn_with_NUL = 'foo\0bar'
self.assertRaises(TypeError, self.open, fn_with_NUL, 'w')
self.assertRaises(TypeError, self.open, bytes(fn_with_NUL, 'ascii'), 'w')
def test_raw_file_io(self):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f)
def test_buffered_file_io(self):
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.readable(), False)
self.assertEqual(f.writable(), True)
self.assertEqual(f.seekable(), True)
self.write_ops(f)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readable(), True)
self.assertEqual(f.writable(), False)
self.assertEqual(f.seekable(), True)
self.read_ops(f, True)
def test_readline(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"abc\ndef\nxyzzy\nfoo\x00bar\nanother line")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.readline(), b"abc\n")
self.assertEqual(f.readline(10), b"def\n")
self.assertEqual(f.readline(2), b"xy")
self.assertEqual(f.readline(4), b"zzy\n")
self.assertEqual(f.readline(), b"foo\x00bar\n")
self.assertEqual(f.readline(None), b"another line")
self.assertRaises(TypeError, f.readline, 5.3)
with self.open(support.TESTFN, "r") as f:
self.assertRaises(TypeError, f.readline, 5.3)
def test_raw_bytes_io(self):
f = self.BytesIO()
self.write_ops(f)
data = f.getvalue()
self.assertEqual(data, b"hello world\n")
f = self.BytesIO(data)
self.read_ops(f, True)
def test_large_file_ops(self):
# On Windows and Mac OSX this test comsumes large resources; It takes
# a long time to build the >2GB file and takes >2GB of disk space
# therefore the resource must be enabled to run this test.
if sys.platform[:3] == 'win' or sys.platform == 'darwin':
support.requires(
'largefile',
'test requires %s bytes and a long time to run' % self.LARGE)
with self.open(support.TESTFN, "w+b", 0) as f:
self.large_file_ops(f)
with self.open(support.TESTFN, "w+b") as f:
self.large_file_ops(f)
def test_with_open(self):
for bufsize in (0, 1, 100):
f = None
with self.open(support.TESTFN, "wb", bufsize) as f:
f.write(b"xxx")
self.assertEqual(f.closed, True)
f = None
try:
with self.open(support.TESTFN, "wb", bufsize) as f:
1/0
except ZeroDivisionError:
self.assertEqual(f.closed, True)
else:
self.fail("1/0 didn't raise an exception")
# issue 5008
def test_append_mode_tell(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "ab", buffering=0) as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "ab") as f:
self.assertEqual(f.tell(), 3)
with self.open(support.TESTFN, "a") as f:
self.assertTrue(f.tell() > 0)
def test_destructor(self):
record = []
class MyFileIO(self.FileIO):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
with support.check_warnings(('', ResourceWarning)):
f = MyFileIO(support.TESTFN, "wb")
f.write(b"xxx")
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def _check_base_destructor(self, base):
record = []
class MyIO(base):
def __init__(self):
# This exercises the availability of attributes on object
# destruction.
# (in the C version, close() is called by the tp_dealloc
# function, not by __del__)
self.on_del = 1
self.on_close = 2
self.on_flush = 3
def __del__(self):
record.append(self.on_del)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(self.on_close)
super().close()
def flush(self):
record.append(self.on_flush)
super().flush()
f = MyIO()
del f
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_IOBase_destructor(self):
self._check_base_destructor(self.IOBase)
def test_RawIOBase_destructor(self):
self._check_base_destructor(self.RawIOBase)
def test_BufferedIOBase_destructor(self):
self._check_base_destructor(self.BufferedIOBase)
def test_TextIOBase_destructor(self):
self._check_base_destructor(self.TextIOBase)
def test_close_flushes(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"xxx")
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"xxx")
def test_array_writes(self):
a = array.array('i', range(10))
n = len(a.tobytes())
with self.open(support.TESTFN, "wb", 0) as f:
self.assertEqual(f.write(a), n)
with self.open(support.TESTFN, "wb") as f:
self.assertEqual(f.write(a), n)
def test_closefd(self):
self.assertRaises(ValueError, self.open, support.TESTFN, 'w',
closefd=False)
def test_read_closed(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
with self.open(support.TESTFN, "r") as f:
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.read(), "egg\n")
file.seek(0)
file.close()
self.assertRaises(ValueError, file.read)
def test_no_closefd_with_filename(self):
# can't use closefd in combination with a file name
self.assertRaises(ValueError, self.open, support.TESTFN, "r", closefd=False)
def test_closefd_attr(self):
with self.open(support.TESTFN, "wb") as f:
f.write(b"egg\n")
with self.open(support.TESTFN, "r") as f:
self.assertEqual(f.buffer.raw.closefd, True)
file = self.open(f.fileno(), "r", closefd=False)
self.assertEqual(file.buffer.raw.closefd, False)
def test_garbage_collection(self):
# FileIO objects are collected, and collecting them flushes
# all data to disk.
with support.check_warnings(('', ResourceWarning)):
f = self.FileIO(support.TESTFN, "wb")
f.write(b"abcxxx")
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"abcxxx")
def test_unbounded_file(self):
# Issue #1174606: reading from an unbounded stream such as /dev/zero.
zero = "/dev/zero"
if not os.path.exists(zero):
self.skipTest("{0} does not exist".format(zero))
if sys.maxsize > 0x7FFFFFFF:
self.skipTest("test can only run in a 32-bit address space")
if support.real_max_memuse < support._2G:
self.skipTest("test requires at least 2GB of memory")
with self.open(zero, "rb", buffering=0) as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "rb") as f:
self.assertRaises(OverflowError, f.read)
with self.open(zero, "r") as f:
self.assertRaises(OverflowError, f.read)
def test_flush_error_on_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
def bad_flush():
raise OSError()
f.flush = bad_flush
self.assertRaises(OSError, f.close) # exception not swallowed
self.assertTrue(f.closed)
def test_multi_close(self):
f = self.open(support.TESTFN, "wb", buffering=0)
f.close()
f.close()
f.close()
self.assertRaises(ValueError, f.flush)
def test_RawIOBase_read(self):
# Exercise the default RawIOBase.read() implementation (which calls
# readinto() internally).
rawio = self.MockRawIOWithoutRead((b"abc", b"d", None, b"efg", None))
self.assertEqual(rawio.read(2), b"ab")
self.assertEqual(rawio.read(2), b"c")
self.assertEqual(rawio.read(2), b"d")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"ef")
self.assertEqual(rawio.read(2), b"g")
self.assertEqual(rawio.read(2), None)
self.assertEqual(rawio.read(2), b"")
def test_types_have_dict(self):
test = (
self.IOBase(),
self.RawIOBase(),
self.TextIOBase(),
self.StringIO(),
self.BytesIO()
)
for obj in test:
self.assertTrue(hasattr(obj, "__dict__"))
def test_opener(self):
with self.open(support.TESTFN, "w") as f:
f.write("egg\n")
fd = os.open(support.TESTFN, os.O_RDONLY)
def opener(path, flags):
return fd
with self.open("non-existent", "r", opener=opener) as f:
self.assertEqual(f.read(), "egg\n")
def test_fileio_closefd(self):
# Issue #4841
with self.open(__file__, 'rb') as f1, \
self.open(__file__, 'rb') as f2:
fileio = self.FileIO(f1.fileno(), closefd=False)
# .__init__() must not close f1
fileio.__init__(f2.fileno(), closefd=False)
f1.readline()
# .close() must not close f2
fileio.close()
f2.readline()
def test_nonbuffered_textio(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', buffering=0)
support.gc_collect()
self.assertEqual(recorded, [])
def test_invalid_newline(self):
with warnings.catch_warnings(record=True) as recorded:
with self.assertRaises(ValueError):
self.open(support.TESTFN, 'w', newline='invalid')
support.gc_collect()
self.assertEqual(recorded, [])
class CIOTest(IOTest):
def test_IOBase_finalize(self):
# Issue #12149: segmentation fault on _PyIOBase_finalize when both a
# class which inherits IOBase and an object of this class are caught
# in a reference cycle and close() is already in the method cache.
class MyIO(self.IOBase):
def close(self):
pass
# create an instance to populate the method cache
MyIO()
obj = MyIO()
obj.obj = obj
wr = weakref.ref(obj)
del MyIO
del obj
support.gc_collect()
self.assertTrue(wr() is None, wr)
class PyIOTest(IOTest):
pass
class CommonBufferedTests:
# Tests common to BufferedReader, BufferedWriter and BufferedRandom
def test_detach(self):
raw = self.MockRawIO()
buf = self.tp(raw)
self.assertIs(buf.detach(), raw)
self.assertRaises(ValueError, buf.detach)
repr(buf) # Should still work
def test_fileno(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertEqual(42, bufio.fileno())
@unittest.skip('test having existential crisis')
def test_no_fileno(self):
# XXX will we always have fileno() function? If so, kill
# this test. Else, write it.
pass
def test_invalid_args(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
# Invalid whence
self.assertRaises(ValueError, bufio.seek, 0, -1)
self.assertRaises(ValueError, bufio.seek, 0, 9)
def test_override_destructor(self):
tp = self.tp
record = []
class MyBufferedIO(tp):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
rawio = self.MockRawIO()
bufio = MyBufferedIO(rawio)
writable = bufio.writable()
del bufio
support.gc_collect()
if writable:
self.assertEqual(record, [1, 2, 3])
else:
self.assertEqual(record, [1, 2])
def test_context_manager(self):
# Test usability as a context manager
rawio = self.MockRawIO()
bufio = self.tp(rawio)
def _with():
with bufio:
pass
_with()
# bufio should now be closed, and using it a second time should raise
# a ValueError.
self.assertRaises(ValueError, _with)
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.tp(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
def test_repr(self):
raw = self.MockRawIO()
b = self.tp(raw)
clsname = "%s.%s" % (self.tp.__module__, self.tp.__name__)
self.assertEqual(repr(b), "<%s>" % clsname)
raw.name = "dummy"
self.assertEqual(repr(b), "<%s name='dummy'>" % clsname)
raw.name = b"dummy"
self.assertEqual(repr(b), "<%s name=b'dummy'>" % clsname)
def test_flush_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError()
raw.flush = bad_flush
b = self.tp(raw)
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
def test_close_error_on_close(self):
raw = self.MockRawIO()
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
b.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(b.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
raw = self.MockRawIO()
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
raw.close = bad_close
b = self.tp(raw)
b.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
b.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(b.closed)
def test_multi_close(self):
raw = self.MockRawIO()
b = self.tp(raw)
b.close()
b.close()
b.close()
self.assertRaises(ValueError, b.flush)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
def test_readonly_attributes(self):
raw = self.MockRawIO()
buf = self.tp(raw)
x = self.MockRawIO()
with self.assertRaises(AttributeError):
buf.raw = x
class SizeofTest:
@support.cpython_only
def test_sizeof(self):
bufsize1 = 4096
bufsize2 = 8192
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize1)
size = sys.getsizeof(bufio) - bufsize1
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize2)
self.assertEqual(sys.getsizeof(bufio), size + bufsize2)
@support.cpython_only
def test_buffer_freeing(self) :
bufsize = 4096
rawio = self.MockRawIO()
bufio = self.tp(rawio, buffer_size=bufsize)
size = sys.getsizeof(bufio) - bufsize
bufio.close()
self.assertEqual(sys.getsizeof(bufio), size)
class BufferedReaderTest(unittest.TestCase, CommonBufferedTests):
read_mode = "rb"
def test_constructor(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(b"abc", bufio.read())
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
rawio = self.MockRawIO([b"abc"])
bufio.__init__(rawio)
self.assertEqual(b"abc", bufio.read())
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.read, 0)
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.read(0), b'')
def test_read(self):
for arg in (None, 7):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(arg))
# Invalid args
self.assertRaises(ValueError, bufio.read, -2)
def test_read1(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"a", bufio.read(1))
self.assertEqual(b"b", bufio.read1(1))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"c", bufio.read1(100))
self.assertEqual(rawio._reads, 1)
self.assertEqual(b"d", bufio.read1(100))
self.assertEqual(rawio._reads, 2)
self.assertEqual(b"efg", bufio.read1(100))
self.assertEqual(rawio._reads, 3)
self.assertEqual(b"", bufio.read1(100))
self.assertEqual(rawio._reads, 4)
# Invalid args
self.assertRaises(ValueError, bufio.read1, -1)
def test_readinto(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
b = bytearray(2)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"cd")
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ef")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"gf")
self.assertEqual(bufio.readinto(b), 0)
self.assertEqual(b, b"gf")
rawio = self.MockRawIO((b"abc", None))
bufio = self.tp(rawio)
self.assertEqual(bufio.readinto(b), 2)
self.assertEqual(b, b"ab")
self.assertEqual(bufio.readinto(b), 1)
self.assertEqual(b, b"cb")
def test_readlines(self):
def bufio():
rawio = self.MockRawIO((b"abc\n", b"d\n", b"ef"))
return self.tp(rawio)
self.assertEqual(bufio().readlines(), [b"abc\n", b"d\n", b"ef"])
self.assertEqual(bufio().readlines(5), [b"abc\n", b"d\n"])
self.assertEqual(bufio().readlines(None), [b"abc\n", b"d\n", b"ef"])
def test_buffering(self):
data = b"abcdefghi"
dlen = len(data)
tests = [
[ 100, [ 3, 1, 4, 8 ], [ dlen, 0 ] ],
[ 100, [ 3, 3, 3], [ dlen ] ],
[ 4, [ 1, 2, 4, 2 ], [ 4, 4, 1 ] ],
]
for bufsize, buf_read_sizes, raw_read_sizes in tests:
rawio = self.MockFileIO(data)
bufio = self.tp(rawio, buffer_size=bufsize)
pos = 0
for nbytes in buf_read_sizes:
self.assertEqual(bufio.read(nbytes), data[pos:pos+nbytes])
pos += nbytes
# this is mildly implementation-dependent
self.assertEqual(rawio.read_history, raw_read_sizes)
def test_read_non_blocking(self):
# Inject some None's in there to simulate EWOULDBLOCK
rawio = self.MockRawIO((b"abc", b"d", None, b"efg", None, None, None))
bufio = self.tp(rawio)
self.assertEqual(b"abcd", bufio.read(6))
self.assertEqual(b"e", bufio.read(1))
self.assertEqual(b"fg", bufio.read())
self.assertEqual(b"", bufio.peek(1))
self.assertIsNone(bufio.read())
self.assertEqual(b"", bufio.read())
rawio = self.MockRawIO((b"a", None, None))
self.assertEqual(b"a", rawio.readall())
self.assertIsNone(rawio.readall())
def test_read_past_eof(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read(9000))
def test_read_all(self):
rawio = self.MockRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertEqual(b"abcdefg", bufio.read())
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes with exactly the same number of 0's,
# 1's... 255's. This will help us check that concurrent reading
# doesn't duplicate or forget contents.
N = 1000
l = list(range(256)) * N
random.shuffle(l)
s = bytes(bytearray(l))
with self.open(support.TESTFN, "wb") as f:
f.write(s)
with self.open(support.TESTFN, self.read_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
results = []
def f():
try:
# Intra-buffer read then buffer-flushing read
for n in cycle([1, 19]):
s = bufio.read(n)
if not s:
break
# list.append() is atomic
results.append(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
s = b''.join(results)
for i in range(256):
c = bytes(bytearray([i]))
self.assertEqual(s.count(c), N)
finally:
support.unlink(support.TESTFN)
def test_unseekable(self):
bufio = self.tp(self.MockUnseekableIO(b"A" * 10))
self.assertRaises(self.UnsupportedOperation, bufio.tell)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
bufio.read(1)
self.assertRaises(self.UnsupportedOperation, bufio.seek, 0)
self.assertRaises(self.UnsupportedOperation, bufio.tell)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
def test_no_extraneous_read(self):
# Issue #9550; when the raw IO object has satisfied the read request,
# we should not issue any additional reads, otherwise it may block
# (e.g. socket).
bufsize = 16
for n in (2, bufsize - 1, bufsize, bufsize + 1, bufsize * 2):
rawio = self.MockRawIO([b"x" * n])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
# Simple case: one raw read is enough to satisfy the request.
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
# A more complex case where two raw reads are needed to satisfy
# the request.
rawio = self.MockRawIO([b"x" * (n - 1), b"x"])
bufio = self.tp(rawio, bufsize)
self.assertEqual(bufio.read(n), b"x" * n)
self.assertEqual(rawio._extraneous_reads, 0,
"failed for {}: {} != 0".format(n, rawio._extraneous_reads))
class CBufferedReaderTest(BufferedReaderTest, SizeofTest):
tp = io.BufferedReader
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO([b"abc"])
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.read)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.read)
def test_misbehaved_io_read(self):
rawio = self.MisbehavedRawIO((b"abc", b"d", b"efg"))
bufio = self.tp(rawio)
# _pyio.BufferedReader seems to implement reading different, so that
# checking this is not so easy.
self.assertRaises(OSError, bufio.read, 10)
def test_garbage_collection(self):
# C BufferedReader objects are collected.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.f = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedReader"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedReaderTest(BufferedReaderTest):
tp = pyio.BufferedReader
class BufferedWriterTest(unittest.TestCase, CommonBufferedTests):
write_mode = "wb"
def test_constructor(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
bufio.__init__(rawio)
bufio.__init__(rawio, buffer_size=1024)
bufio.__init__(rawio, buffer_size=16)
self.assertEqual(3, bufio.write(b"abc"))
bufio.flush()
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
bufio.__init__(rawio)
self.assertEqual(3, bufio.write(b"ghi"))
bufio.flush()
self.assertEqual(b"".join(rawio._write_stack), b"abcghi")
def test_uninitialized(self):
bufio = self.tp.__new__(self.tp)
del bufio
bufio = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
bufio.write, b'')
bufio.__init__(self.MockRawIO())
self.assertEqual(bufio.write(b''), 0)
def test_detach_flush(self):
raw = self.MockRawIO()
buf = self.tp(raw)
buf.write(b"howdy!")
self.assertFalse(raw._write_stack)
buf.detach()
self.assertEqual(raw._write_stack, [b"howdy!"])
def test_write(self):
# Write to the buffered IO but don't overflow the buffer.
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
self.assertFalse(writer._write_stack)
def test_write_overflow(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
contents = b"abcdefghijklmnop"
for n in range(0, len(contents), 3):
bufio.write(contents[n:n+3])
flushed = b"".join(writer._write_stack)
# At least (total - 8) bytes were implicitly flushed, perhaps more
# depending on the implementation.
self.assertTrue(flushed.startswith(contents[:-8]), flushed)
def check_writes(self, intermediate_func):
# Lots of writes, test the flushed output is as expected.
contents = bytes(range(256)) * 1000
n = 0
writer = self.MockRawIO()
bufio = self.tp(writer, 13)
# Generator of write sizes: repeat each N 15 times then proceed to N+1
def gen_sizes():
for size in count(1):
for i in range(15):
yield size
sizes = gen_sizes()
while n < len(contents):
size = min(next(sizes), len(contents) - n)
self.assertEqual(bufio.write(contents[n:n+size]), size)
intermediate_func(bufio)
n += size
bufio.flush()
self.assertEqual(contents, b"".join(writer._write_stack))
def test_writes(self):
self.check_writes(lambda bufio: None)
def test_writes_and_flushes(self):
self.check_writes(lambda bufio: bufio.flush())
def test_writes_and_seeks(self):
def _seekabs(bufio):
pos = bufio.tell()
bufio.seek(pos + 1, 0)
bufio.seek(pos - 1, 0)
bufio.seek(pos, 0)
self.check_writes(_seekabs)
def _seekrel(bufio):
pos = bufio.seek(0, 1)
bufio.seek(+1, 1)
bufio.seek(-1, 1)
bufio.seek(pos, 0)
self.check_writes(_seekrel)
def test_writes_and_truncates(self):
self.check_writes(lambda bufio: bufio.truncate(bufio.tell()))
def test_write_non_blocking(self):
raw = self.MockNonBlockWriterIO()
bufio = self.tp(raw, 8)
self.assertEqual(bufio.write(b"abcd"), 4)
self.assertEqual(bufio.write(b"efghi"), 5)
# 1 byte will be written, the rest will be buffered
raw.block_on(b"k")
self.assertEqual(bufio.write(b"jklmn"), 5)
# 8 bytes will be written, 8 will be buffered and the rest will be lost
raw.block_on(b"0")
try:
bufio.write(b"opqrwxyz0123456789")
except self.BlockingIOError as e:
written = e.characters_written
else:
self.fail("BlockingIOError should have been raised")
self.assertEqual(written, 16)
self.assertEqual(raw.pop_written(),
b"abcdefghijklmnopqrwxyz")
self.assertEqual(bufio.write(b"ABCDEFGHI"), 9)
s = raw.pop_written()
# Previously buffered bytes were flushed
self.assertTrue(s.startswith(b"01234567A"), s)
def test_write_and_rewind(self):
raw = io.BytesIO()
bufio = self.tp(raw, 4)
self.assertEqual(bufio.write(b"abcdef"), 6)
self.assertEqual(bufio.tell(), 6)
bufio.seek(0, 0)
self.assertEqual(bufio.write(b"XY"), 2)
bufio.seek(6, 0)
self.assertEqual(raw.getvalue(), b"XYcdef")
self.assertEqual(bufio.write(b"123456"), 6)
bufio.flush()
self.assertEqual(raw.getvalue(), b"XYcdef123456")
def test_flush(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
bufio.flush()
self.assertEqual(b"abc", writer._write_stack[0])
def test_writelines(self):
l = [b'ab', b'cd', b'ef']
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_userlist(self):
l = UserList([b'ab', b'cd', b'ef'])
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.writelines(l)
bufio.flush()
self.assertEqual(b''.join(writer._write_stack), b'abcdef')
def test_writelines_error(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
self.assertRaises(TypeError, bufio.writelines, [1, 2, 3])
self.assertRaises(TypeError, bufio.writelines, None)
self.assertRaises(TypeError, bufio.writelines, 'abc')
def test_destructor(self):
writer = self.MockRawIO()
bufio = self.tp(writer, 8)
bufio.write(b"abc")
del bufio
support.gc_collect()
self.assertEqual(b"abc", writer._write_stack[0])
def test_truncate(self):
# Truncate implicitly flushes the buffer.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
bufio.write(b"abcdef")
self.assertEqual(bufio.truncate(3), 3)
self.assertEqual(bufio.tell(), 6)
with self.open(support.TESTFN, "rb", buffering=0) as f:
self.assertEqual(f.read(), b"abc")
@unittest.skipUnless(threading, 'Threading required for this test.')
@support.requires_resource('cpu')
def test_threads(self):
try:
# Write out many bytes from many threads and test they were
# all flushed.
N = 1000
contents = bytes(range(256)) * N
sizes = cycle([1, 19])
n = 0
queue = deque()
while n < len(contents):
size = next(sizes)
queue.append(contents[n:n+size])
n += size
del contents
# We use a real file object because it allows us to
# exercise situations where the GIL is released before
# writing the buffer to the raw streams. This is in addition
# to concurrency issues due to switching threads in the middle
# of Python code.
with self.open(support.TESTFN, self.write_mode, buffering=0) as raw:
bufio = self.tp(raw, 8)
errors = []
def f():
try:
while True:
try:
s = queue.popleft()
except IndexError:
return
bufio.write(s)
except Exception as e:
errors.append(e)
raise
threads = [threading.Thread(target=f) for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02) # yield
for t in threads:
t.join()
self.assertFalse(errors,
"the following exceptions were caught: %r" % errors)
bufio.close()
with self.open(support.TESTFN, "rb") as f:
s = f.read()
for i in range(256):
self.assertEqual(s.count(bytes([i])), N)
finally:
support.unlink(support.TESTFN)
def test_misbehaved_io(self):
rawio = self.MisbehavedRawIO()
bufio = self.tp(rawio, 5)
self.assertRaises(OSError, bufio.seek, 0)
self.assertRaises(OSError, bufio.tell)
self.assertRaises(OSError, bufio.write, b"abcdef")
def test_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), 8, 12)
def test_write_error_on_close(self):
raw = self.MockRawIO()
def bad_write(b):
raise OSError()
raw.write = bad_write
b = self.tp(raw)
b.write(b'spam')
self.assertRaises(OSError, b.close) # exception not swallowed
self.assertTrue(b.closed)
class CBufferedWriterTest(BufferedWriterTest, SizeofTest):
tp = io.BufferedWriter
def test_constructor(self):
BufferedWriterTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_initialization(self):
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=0)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-16)
self.assertRaises(ValueError, bufio.write, b"def")
self.assertRaises(ValueError, bufio.__init__, rawio, buffer_size=-1)
self.assertRaises(ValueError, bufio.write, b"def")
def test_garbage_collection(self):
# C BufferedWriter objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends into gc.garbage instead
with support.check_warnings(('', ResourceWarning)):
rawio = self.FileIO(support.TESTFN, "w+b")
f = self.tp(rawio)
f.write(b"123xxx")
f.x = f
wr = weakref.ref(f)
del f
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"123xxx")
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedWriter"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedWriterTest(BufferedWriterTest):
tp = pyio.BufferedWriter
class BufferedRWPairTest(unittest.TestCase):
def test_constructor(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
def test_uninitialized(self):
pair = self.tp.__new__(self.tp)
del pair
pair = self.tp.__new__(self.tp)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.read, 0)
self.assertRaisesRegex((ValueError, AttributeError),
'uninitialized|has no attribute',
pair.write, b'')
pair.__init__(self.MockRawIO(), self.MockRawIO())
self.assertEqual(pair.read(0), b'')
self.assertEqual(pair.write(b''), 0)
def test_detach(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertRaises(self.UnsupportedOperation, pair.detach)
def test_constructor_max_buffer_size_removal(self):
with self.assertRaises(TypeError):
self.tp(self.MockRawIO(), self.MockRawIO(), 8, 12)
def test_constructor_with_not_readable(self):
class NotReadable(MockRawIO):
def readable(self):
return False
self.assertRaises(OSError, self.tp, NotReadable(), self.MockRawIO())
def test_constructor_with_not_writeable(self):
class NotWriteable(MockRawIO):
def writable(self):
return False
self.assertRaises(OSError, self.tp, self.MockRawIO(), NotWriteable())
def test_read(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read(3), b"abc")
self.assertEqual(pair.read(1), b"d")
self.assertEqual(pair.read(), b"ef")
pair = self.tp(self.BytesIO(b"abc"), self.MockRawIO())
self.assertEqual(pair.read(None), b"abc")
def test_readlines(self):
pair = lambda: self.tp(self.BytesIO(b"abc\ndef\nh"), self.MockRawIO())
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(), [b"abc\n", b"def\n", b"h"])
self.assertEqual(pair().readlines(5), [b"abc\n", b"def\n"])
def test_read1(self):
# .read1() is delegated to the underlying reader object, so this test
# can be shallow.
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertEqual(pair.read1(3), b"abc")
def test_readinto(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
data = bytearray(5)
self.assertEqual(pair.readinto(data), 5)
self.assertEqual(data, b"abcde")
def test_write(self):
w = self.MockRawIO()
pair = self.tp(self.MockRawIO(), w)
pair.write(b"abc")
pair.flush()
pair.write(b"def")
pair.flush()
self.assertEqual(w._write_stack, [b"abc", b"def"])
def test_peek(self):
pair = self.tp(self.BytesIO(b"abcdef"), self.MockRawIO())
self.assertTrue(pair.peek(3).startswith(b"abc"))
self.assertEqual(pair.read(3), b"abc")
def test_readable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.readable())
def test_writeable(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertTrue(pair.writable())
def test_seekable(self):
# BufferedRWPairs are never seekable, even if their readers and writers
# are.
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.seekable())
# .flush() is delegated to the underlying writer object and has been
# tested in the test_write method.
def test_close_and_closed(self):
pair = self.tp(self.MockRawIO(), self.MockRawIO())
self.assertFalse(pair.closed)
pair.close()
self.assertTrue(pair.closed)
def test_isatty(self):
class SelectableIsAtty(MockRawIO):
def __init__(self, isatty):
MockRawIO.__init__(self)
self._isatty = isatty
def isatty(self):
return self._isatty
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(False))
self.assertFalse(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(False))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(False), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
pair = self.tp(SelectableIsAtty(True), SelectableIsAtty(True))
self.assertTrue(pair.isatty())
def test_weakref_clearing(self):
brw = self.tp(self.MockRawIO(), self.MockRawIO())
ref = weakref.ref(brw)
brw = None
ref = None # Shouldn't segfault.
class CBufferedRWPairTest(BufferedRWPairTest):
tp = io.BufferedRWPair
class PyBufferedRWPairTest(BufferedRWPairTest):
tp = pyio.BufferedRWPair
class BufferedRandomTest(BufferedReaderTest, BufferedWriterTest):
read_mode = "rb+"
write_mode = "wb+"
def test_constructor(self):
BufferedReaderTest.test_constructor(self)
BufferedWriterTest.test_constructor(self)
def test_uninitialized(self):
BufferedReaderTest.test_uninitialized(self)
BufferedWriterTest.test_uninitialized(self)
def test_read_and_write(self):
raw = self.MockRawIO((b"asdf", b"ghjk"))
rw = self.tp(raw, 8)
self.assertEqual(b"as", rw.read(2))
rw.write(b"ddd")
rw.write(b"eee")
self.assertFalse(raw._write_stack) # Buffer writes
self.assertEqual(b"ghjk", rw.read())
self.assertEqual(b"dddeee", raw._write_stack[0])
def test_seek_and_tell(self):
raw = self.BytesIO(b"asdfghjkl")
rw = self.tp(raw)
self.assertEqual(b"as", rw.read(2))
self.assertEqual(2, rw.tell())
rw.seek(0, 0)
self.assertEqual(b"asdf", rw.read(4))
rw.write(b"123f")
rw.seek(0, 0)
self.assertEqual(b"asdf123fl", rw.read())
self.assertEqual(9, rw.tell())
rw.seek(-4, 2)
self.assertEqual(5, rw.tell())
rw.seek(2, 1)
self.assertEqual(7, rw.tell())
self.assertEqual(b"fl", rw.read(11))
rw.flush()
self.assertEqual(b"asdf123fl", raw.getvalue())
self.assertRaises(TypeError, rw.seek, 0.0)
def check_flush_and_read(self, read_func):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
self.assertEqual(b"ab", read_func(bufio, 2))
bufio.write(b"12")
self.assertEqual(b"ef", read_func(bufio, 2))
self.assertEqual(6, bufio.tell())
bufio.flush()
self.assertEqual(6, bufio.tell())
self.assertEqual(b"ghi", read_func(bufio))
raw.seek(0, 0)
raw.write(b"XYZ")
# flush() resets the read buffer
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"XYZ", read_func(bufio, 3))
def test_flush_and_read(self):
self.check_flush_and_read(lambda bufio, *args: bufio.read(*args))
def test_flush_and_readinto(self):
def _readinto(bufio, n=-1):
b = bytearray(n if n >= 0 else 9999)
n = bufio.readinto(b)
return bytes(b[:n])
self.check_flush_and_read(_readinto)
def test_flush_and_peek(self):
def _peek(bufio, n=-1):
# This relies on the fact that the buffer can contain the whole
# raw stream, otherwise peek() can return less.
b = bufio.peek(n)
if n != -1:
b = b[:n]
bufio.seek(len(b), 1)
return b
self.check_flush_and_read(_peek)
def test_flush_and_write(self):
raw = self.BytesIO(b"abcdefghi")
bufio = self.tp(raw)
bufio.write(b"123")
bufio.flush()
bufio.write(b"45")
bufio.flush()
bufio.seek(0, 0)
self.assertEqual(b"12345fghi", raw.getvalue())
self.assertEqual(b"12345fghi", bufio.read())
def test_threads(self):
BufferedReaderTest.test_threads(self)
BufferedWriterTest.test_threads(self)
def test_writes_and_peek(self):
def _peek(bufio):
bufio.peek(1)
self.check_writes(_peek)
def _peek(bufio):
pos = bufio.tell()
bufio.seek(-1, 1)
bufio.peek(1)
bufio.seek(pos, 0)
self.check_writes(_peek)
def test_writes_and_reads(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.read(1)
self.check_writes(_read)
def test_writes_and_read1s(self):
def _read1(bufio):
bufio.seek(-1, 1)
bufio.read1(1)
self.check_writes(_read1)
def test_writes_and_readintos(self):
def _read(bufio):
bufio.seek(-1, 1)
bufio.readinto(bytearray(1))
self.check_writes(_read)
def test_write_after_readahead(self):
# Issue #6629: writing after the buffer was filled by readahead should
# first rewind the raw stream.
for overwrite_size in [1, 5]:
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 4)
# Trigger readahead
self.assertEqual(bufio.read(1), b"A")
self.assertEqual(bufio.tell(), 1)
# Overwriting should rewind the raw stream if it needs so
bufio.write(b"B" * overwrite_size)
self.assertEqual(bufio.tell(), overwrite_size + 1)
# If the write size was smaller than the buffer size, flush() and
# check that rewind happens.
bufio.flush()
self.assertEqual(bufio.tell(), overwrite_size + 1)
s = raw.getvalue()
self.assertEqual(s,
b"A" + b"B" * overwrite_size + b"A" * (9 - overwrite_size))
def test_write_rewind_write(self):
# Various combinations of reading / writing / seeking backwards / writing again
def mutate(bufio, pos1, pos2):
assert pos2 >= pos1
# Fill the buffer
bufio.seek(pos1)
bufio.read(pos2 - pos1)
bufio.write(b'\x02')
# This writes earlier than the previous write, but still inside
# the buffer.
bufio.seek(pos1)
bufio.write(b'\x01')
b = b"\x80\x81\x82\x83\x84"
for i in range(0, len(b)):
for j in range(i, len(b)):
raw = self.BytesIO(b)
bufio = self.tp(raw, 100)
mutate(bufio, i, j)
bufio.flush()
expected = bytearray(b)
expected[j] = 2
expected[i] = 1
self.assertEqual(raw.getvalue(), expected,
"failed result for i=%d, j=%d" % (i, j))
def test_truncate_after_read_or_write(self):
raw = self.BytesIO(b"A" * 10)
bufio = self.tp(raw, 100)
self.assertEqual(bufio.read(2), b"AA") # the read buffer gets filled
self.assertEqual(bufio.truncate(), 2)
self.assertEqual(bufio.write(b"BB"), 2) # the write buffer increases
self.assertEqual(bufio.truncate(), 4)
def test_misbehaved_io(self):
BufferedReaderTest.test_misbehaved_io(self)
BufferedWriterTest.test_misbehaved_io(self)
def test_interleaved_read_write(self):
# Test for issue #12213
with self.BytesIO(b'abcdefgh') as raw:
with self.tp(raw, 100) as f:
f.write(b"1")
self.assertEqual(f.read(1), b'b')
f.write(b'2')
self.assertEqual(f.read1(1), b'd')
f.write(b'3')
buf = bytearray(1)
f.readinto(buf)
self.assertEqual(buf, b'f')
f.write(b'4')
self.assertEqual(f.peek(1), b'h')
f.flush()
self.assertEqual(raw.getvalue(), b'1b2d3f4h')
with self.BytesIO(b'abc') as raw:
with self.tp(raw, 100) as f:
self.assertEqual(f.read(1), b'a')
f.write(b"2")
self.assertEqual(f.read(1), b'c')
f.flush()
self.assertEqual(raw.getvalue(), b'a2c')
def test_interleaved_readline_write(self):
with self.BytesIO(b'ab\ncdef\ng\n') as raw:
with self.tp(raw) as f:
f.write(b'1')
self.assertEqual(f.readline(), b'b\n')
f.write(b'2')
self.assertEqual(f.readline(), b'def\n')
f.write(b'3')
self.assertEqual(f.readline(), b'\n')
f.flush()
self.assertEqual(raw.getvalue(), b'1b\n2def\n3\n')
# You can't construct a BufferedRandom over a non-seekable stream.
test_unseekable = None
class CBufferedRandomTest(BufferedRandomTest, SizeofTest):
tp = io.BufferedRandom
def test_constructor(self):
BufferedRandomTest.test_constructor(self)
# The allocation can succeed on 32-bit builds, e.g. with more
# than 2GB RAM and a 64-bit kernel.
if sys.maxsize > 0x7FFFFFFF:
rawio = self.MockRawIO()
bufio = self.tp(rawio)
self.assertRaises((OverflowError, MemoryError, ValueError),
bufio.__init__, rawio, sys.maxsize)
def test_garbage_collection(self):
CBufferedReaderTest.test_garbage_collection(self)
CBufferedWriterTest.test_garbage_collection(self)
def test_args_error(self):
# Issue #17275
with self.assertRaisesRegex(TypeError, "BufferedRandom"):
self.tp(io.BytesIO(), 1024, 1024, 1024)
class PyBufferedRandomTest(BufferedRandomTest):
tp = pyio.BufferedRandom
# To fully exercise seek/tell, the StatefulIncrementalDecoder has these
# properties:
# - A single output character can correspond to many bytes of input.
# - The number of input bytes to complete the character can be
# undetermined until the last input byte is received.
# - The number of input bytes can vary depending on previous input.
# - A single input byte can correspond to many characters of output.
# - The number of output characters can be undetermined until the
# last input byte is received.
# - The number of output characters can vary depending on previous input.
class StatefulIncrementalDecoder(codecs.IncrementalDecoder):
"""
For testing seek/tell behavior with a stateful, buffering decoder.
Input is a sequence of words. Words may be fixed-length (length set
by input) or variable-length (period-terminated). In variable-length
mode, extra periods are ignored. Possible words are:
- 'i' followed by a number sets the input length, I (maximum 99).
When I is set to 0, words are space-terminated.
- 'o' followed by a number sets the output length, O (maximum 99).
- Any other word is converted into a word followed by a period on
the output. The output word consists of the input word truncated
or padded out with hyphens to make its length equal to O. If O
is 0, the word is output verbatim without truncating or padding.
I and O are initially set to 1. When I changes, any buffered input is
re-scanned according to the new I. EOF also terminates the last word.
"""
def __init__(self, errors='strict'):
codecs.IncrementalDecoder.__init__(self, errors)
self.reset()
def __repr__(self):
return '<SID %x>' % id(self)
def reset(self):
self.i = 1
self.o = 1
self.buffer = bytearray()
def getstate(self):
i, o = self.i ^ 1, self.o ^ 1 # so that flags = 0 after reset()
return bytes(self.buffer), i*100 + o
def setstate(self, state):
buffer, io = state
self.buffer = bytearray(buffer)
i, o = divmod(io, 100)
self.i, self.o = i ^ 1, o ^ 1
def decode(self, input, final=False):
output = ''
for b in input:
if self.i == 0: # variable-length, terminated with period
if b == ord('.'):
if self.buffer:
output += self.process_word()
else:
self.buffer.append(b)
else: # fixed-length, terminate after self.i bytes
self.buffer.append(b)
if len(self.buffer) == self.i:
output += self.process_word()
if final and self.buffer: # EOF terminates the last word
output += self.process_word()
return output
def process_word(self):
output = ''
if self.buffer[0] == ord('i'):
self.i = min(99, int(self.buffer[1:] or 0)) # set input length
elif self.buffer[0] == ord('o'):
self.o = min(99, int(self.buffer[1:] or 0)) # set output length
else:
output = self.buffer.decode('ascii')
if len(output) < self.o:
output += '-'*self.o # pad out with hyphens
if self.o:
output = output[:self.o] # truncate to output length
output += '.'
self.buffer = bytearray()
return output
codecEnabled = False
@classmethod
def lookupTestDecoder(cls, name):
if cls.codecEnabled and name == 'test_decoder':
latin1 = codecs.lookup('latin-1')
return codecs.CodecInfo(
name='test_decoder', encode=latin1.encode, decode=None,
incrementalencoder=None,
streamreader=None, streamwriter=None,
incrementaldecoder=cls)
# Register the previous decoder for testing.
# Disabled by default, tests will enable it.
codecs.register(StatefulIncrementalDecoder.lookupTestDecoder)
class StatefulIncrementalDecoderTest(unittest.TestCase):
"""
Make sure the StatefulIncrementalDecoder actually works.
"""
test_cases = [
# I=1, O=1 (fixed-length input == fixed-length output)
(b'abcd', False, 'a.b.c.d.'),
# I=0, O=0 (variable-length input, variable-length output)
(b'oiabcd', True, 'abcd.'),
# I=0, O=0 (should ignore extra periods)
(b'oi...abcd...', True, 'abcd.'),
# I=0, O=6 (variable-length input, fixed-length output)
(b'i.o6.x.xyz.toolongtofit.', False, 'x-----.xyz---.toolon.'),
# I=2, O=6 (fixed-length input < fixed-length output)
(b'i.i2.o6xyz', True, 'xy----.z-----.'),
# I=6, O=3 (fixed-length input > fixed-length output)
(b'i.o3.i6.abcdefghijklmnop', True, 'abc.ghi.mno.'),
# I=0, then 3; O=29, then 15 (with longer output)
(b'i.o29.a.b.cde.o15.abcdefghijabcdefghij.i3.a.b.c.d.ei00k.l.m', True,
'a----------------------------.' +
'b----------------------------.' +
'cde--------------------------.' +
'abcdefghijabcde.' +
'a.b------------.' +
'.c.------------.' +
'd.e------------.' +
'k--------------.' +
'l--------------.' +
'm--------------.')
]
def test_decoder(self):
# Try a few one-shot test cases.
for input, eof, output in self.test_cases:
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(input, eof), output)
# Also test an unfinished decode, followed by forcing EOF.
d = StatefulIncrementalDecoder()
self.assertEqual(d.decode(b'oiabcd'), '')
self.assertEqual(d.decode(b'', 1), 'abcd.')
class TextIOWrapperTest(unittest.TestCase):
def setUp(self):
self.testdata = b"AAA\r\nBBB\rCCC\r\nDDD\nEEE\r\n"
self.normalized = b"AAA\nBBB\nCCC\nDDD\nEEE\n".decode("ascii")
support.unlink(support.TESTFN)
def tearDown(self):
support.unlink(support.TESTFN)
def test_constructor(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
t.__init__(b, encoding="latin-1", newline="\r\n")
self.assertEqual(t.encoding, "latin-1")
self.assertEqual(t.line_buffering, False)
t.__init__(b, encoding="utf-8", line_buffering=True)
self.assertEqual(t.encoding, "utf-8")
self.assertEqual(t.line_buffering, True)
self.assertEqual("\xe9\n", t.readline())
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
def test_non_text_encoding_codecs_are_rejected(self):
# Ensure the constructor complains if passed a codec that isn't
# marked as a text encoding
# http://bugs.python.org/issue20404
r = self.BytesIO()
b = self.BufferedWriter(r)
with self.assertRaisesRegex(LookupError, "is not a text encoding"):
self.TextIOWrapper(b, encoding="hex")
def test_detach(self):
r = self.BytesIO()
b = self.BufferedWriter(r)
t = self.TextIOWrapper(b)
self.assertIs(t.detach(), b)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("howdy")
self.assertFalse(r.getvalue())
t.detach()
self.assertEqual(r.getvalue(), b"howdy")
self.assertRaises(ValueError, t.detach)
# Operations independent of the detached stream should still work
repr(t)
self.assertEqual(t.encoding, "ascii")
self.assertEqual(t.errors, "strict")
self.assertFalse(t.line_buffering)
def test_repr(self):
raw = self.BytesIO("hello".encode("utf-8"))
b = self.BufferedReader(raw)
t = self.TextIOWrapper(b, encoding="utf-8")
modname = self.TextIOWrapper.__module__
self.assertEqual(repr(t),
"<%s.TextIOWrapper encoding='utf-8'>" % modname)
raw.name = "dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' encoding='utf-8'>" % modname)
t.mode = "r"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name='dummy' mode='r' encoding='utf-8'>" % modname)
raw.name = b"dummy"
self.assertEqual(repr(t),
"<%s.TextIOWrapper name=b'dummy' mode='r' encoding='utf-8'>" % modname)
t.buffer.detach()
repr(t) # Should not raise an exception
def test_line_buffering(self):
r = self.BytesIO()
b = self.BufferedWriter(r, 1000)
t = self.TextIOWrapper(b, newline="\n", line_buffering=True)
t.write("X")
self.assertEqual(r.getvalue(), b"") # No flush happened
t.write("Y\nZ")
self.assertEqual(r.getvalue(), b"XY\nZ") # All got flushed
t.write("A\rB")
self.assertEqual(r.getvalue(), b"XY\nZA\rB")
def test_default_encoding(self):
old_environ = dict(os.environ)
try:
# try to get a user preferred encoding different than the current
# locale encoding to check that TextIOWrapper() uses the current
# locale encoding and not the user preferred encoding
for key in ('LC_ALL', 'LANG', 'LC_CTYPE'):
if key in os.environ:
del os.environ[key]
current_locale_encoding = locale.getpreferredencoding(False)
b = self.BytesIO()
t = self.TextIOWrapper(b)
self.assertEqual(t.encoding, current_locale_encoding)
finally:
os.environ.clear()
os.environ.update(old_environ)
@support.cpython_only
def test_device_encoding(self):
# Issue 15989
import _testcapi
b = self.BytesIO()
b.fileno = lambda: _testcapi.INT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
b.fileno = lambda: _testcapi.UINT_MAX + 1
self.assertRaises(OverflowError, self.TextIOWrapper, b)
def test_encoding(self):
# Check the encoding attribute is always set, and valid
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="utf-8")
self.assertEqual(t.encoding, "utf-8")
t = self.TextIOWrapper(b)
self.assertTrue(t.encoding is not None)
codecs.lookup(t.encoding)
def test_encoding_errors_reading(self):
# (1) default
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.read)
# (2) explicit strict
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.read)
# (3) ignore
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore")
self.assertEqual(t.read(), "abc\n\n")
# (4) replace
b = self.BytesIO(b"abc\n\xff\n")
t = self.TextIOWrapper(b, encoding="ascii", errors="replace")
self.assertEqual(t.read(), "abc\n\ufffd\n")
def test_encoding_errors_writing(self):
# (1) default
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
self.assertRaises(UnicodeError, t.write, "\xff")
# (2) explicit strict
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="strict")
self.assertRaises(UnicodeError, t.write, "\xff")
# (3) ignore
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="ignore",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abcdef\n")
# (4) replace
b = self.BytesIO()
t = self.TextIOWrapper(b, encoding="ascii", errors="replace",
newline="\n")
t.write("abc\xffdef\n")
t.flush()
self.assertEqual(b.getvalue(), b"abc?def\n")
def test_newlines(self):
input_lines = [ "unix\n", "windows\r\n", "os9\r", "last\n", "nonl" ]
tests = [
[ None, [ 'unix\n', 'windows\n', 'os9\n', 'last\n', 'nonl' ] ],
[ '', input_lines ],
[ '\n', [ "unix\n", "windows\r\n", "os9\rlast\n", "nonl" ] ],
[ '\r\n', [ "unix\nwindows\r\n", "os9\rlast\nnonl" ] ],
[ '\r', [ "unix\nwindows\r", "\nos9\r", "last\nnonl" ] ],
]
encodings = (
'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
# Try a range of buffer sizes to test the case where \r is the last
# character in TextIOWrapper._pending_line.
for encoding in encodings:
# XXX: str.encode() should return bytes
data = bytes(''.join(input_lines).encode(encoding))
for do_reads in (False, True):
for bufsize in range(1, 10):
for newline, exp_lines in tests:
bufio = self.BufferedReader(self.BytesIO(data), bufsize)
textio = self.TextIOWrapper(bufio, newline=newline,
encoding=encoding)
if do_reads:
got_lines = []
while True:
c2 = textio.read(2)
if c2 == '':
break
self.assertEqual(len(c2), 2)
got_lines.append(c2 + textio.readline())
else:
got_lines = list(textio)
for got_line, exp_line in zip(got_lines, exp_lines):
self.assertEqual(got_line, exp_line)
self.assertEqual(len(got_lines), len(exp_lines))
def test_newlines_input(self):
testdata = b"AAA\nBB\x00B\nCCC\rDDD\rEEE\r\nFFF\r\nGGG"
normalized = testdata.replace(b"\r\n", b"\n").replace(b"\r", b"\n")
for newline, expected in [
(None, normalized.decode("ascii").splitlines(keepends=True)),
("", testdata.decode("ascii").splitlines(keepends=True)),
("\n", ["AAA\n", "BB\x00B\n", "CCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r\n", ["AAA\nBB\x00B\nCCC\rDDD\rEEE\r\n", "FFF\r\n", "GGG"]),
("\r", ["AAA\nBB\x00B\nCCC\r", "DDD\r", "EEE\r", "\nFFF\r", "\nGGG"]),
]:
buf = self.BytesIO(testdata)
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
self.assertEqual(txt.readlines(), expected)
txt.seek(0)
self.assertEqual(txt.read(), "".join(expected))
def test_newlines_output(self):
testdict = {
"": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\n": b"AAA\nBBB\nCCC\nX\rY\r\nZ",
"\r": b"AAA\rBBB\rCCC\rX\rY\r\rZ",
"\r\n": b"AAA\r\nBBB\r\nCCC\r\nX\rY\r\r\nZ",
}
tests = [(None, testdict[os.linesep])] + sorted(testdict.items())
for newline, expected in tests:
buf = self.BytesIO()
txt = self.TextIOWrapper(buf, encoding="ascii", newline=newline)
txt.write("AAA\nB")
txt.write("BB\nCCC\n")
txt.write("X\rY\r\nZ")
txt.flush()
self.assertEqual(buf.closed, False)
self.assertEqual(buf.getvalue(), expected)
def test_destructor(self):
l = []
base = self.BytesIO
class MyBytesIO(base):
def close(self):
l.append(self.getvalue())
base.close(self)
b = MyBytesIO()
t = self.TextIOWrapper(b, encoding="ascii")
t.write("abc")
del t
support.gc_collect()
self.assertEqual([b"abc"], l)
def test_override_destructor(self):
record = []
class MyTextIO(self.TextIOWrapper):
def __del__(self):
record.append(1)
try:
f = super().__del__
except AttributeError:
pass
else:
f()
def close(self):
record.append(2)
super().close()
def flush(self):
record.append(3)
super().flush()
b = self.BytesIO()
t = MyTextIO(b, encoding="ascii")
del t
support.gc_collect()
self.assertEqual(record, [1, 2, 3])
def test_error_through_destructor(self):
# Test that the exception state is not modified by a destructor,
# even if close() fails.
rawio = self.CloseFailureIO()
def f():
self.TextIOWrapper(rawio).xyzzy
with support.captured_output("stderr") as s:
self.assertRaises(AttributeError, f)
s = s.getvalue().strip()
if s:
# The destructor *may* have printed an unraisable error, check it
self.assertEqual(len(s.splitlines()), 1)
self.assertTrue(s.startswith("Exception OSError: "), s)
self.assertTrue(s.endswith(" ignored"), s)
# Systematic tests of the text I/O API
def test_basic_io(self):
for chunksize in (1, 2, 3, 4, 5, 15, 16, 17, 31, 32, 33, 63, 64, 65):
for enc in "ascii", "latin-1", "utf-8" :# , "utf-16-be", "utf-16-le":
f = self.open(support.TESTFN, "w+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.write("abc"), 3)
f.close()
f = self.open(support.TESTFN, "r+", encoding=enc)
f._CHUNK_SIZE = chunksize
self.assertEqual(f.tell(), 0)
self.assertEqual(f.read(), "abc")
cookie = f.tell()
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.read(None), "abc")
f.seek(0)
self.assertEqual(f.read(2), "ab")
self.assertEqual(f.read(1), "c")
self.assertEqual(f.read(1), "")
self.assertEqual(f.read(), "")
self.assertEqual(f.tell(), cookie)
self.assertEqual(f.seek(0), 0)
self.assertEqual(f.seek(0, 2), cookie)
self.assertEqual(f.write("def"), 3)
self.assertEqual(f.seek(cookie), cookie)
self.assertEqual(f.read(), "def")
if enc.startswith("utf"):
self.multi_line_test(f, enc)
f.close()
def multi_line_test(self, f, enc):
f.seek(0)
f.truncate()
sample = "s\xff\u0fff\uffff"
wlines = []
for size in (0, 1, 2, 3, 4, 5, 30, 31, 32, 33, 62, 63, 64, 65, 1000):
chars = []
for i in range(size):
chars.append(sample[i % len(sample)])
line = "".join(chars) + "\n"
wlines.append((f.tell(), line))
f.write(line)
f.seek(0)
rlines = []
while True:
pos = f.tell()
line = f.readline()
if not line:
break
rlines.append((pos, line))
self.assertEqual(rlines, wlines)
def test_telling(self):
f = self.open(support.TESTFN, "w+", encoding="utf-8")
p0 = f.tell()
f.write("\xff\n")
p1 = f.tell()
f.write("\xff\n")
p2 = f.tell()
f.seek(0)
self.assertEqual(f.tell(), p0)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p1)
self.assertEqual(f.readline(), "\xff\n")
self.assertEqual(f.tell(), p2)
f.seek(0)
for line in f:
self.assertEqual(line, "\xff\n")
self.assertRaises(OSError, f.tell)
self.assertEqual(f.tell(), p2)
f.close()
def test_seeking(self):
chunk_size = _default_chunk_size()
prefix_size = chunk_size - 2
u_prefix = "a" * prefix_size
prefix = bytes(u_prefix.encode("utf-8"))
self.assertEqual(len(u_prefix), len(prefix))
u_suffix = "\u8888\n"
suffix = bytes(u_suffix.encode("utf-8"))
line = prefix + suffix
with self.open(support.TESTFN, "wb") as f:
f.write(line*2)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
s = f.read(prefix_size)
self.assertEqual(s, str(prefix, "ascii"))
self.assertEqual(f.tell(), prefix_size)
self.assertEqual(f.readline(), u_suffix)
def test_seeking_too(self):
# Regression test for a specific bug
data = b'\xe0\xbf\xbf\n'
with self.open(support.TESTFN, "wb") as f:
f.write(data)
with self.open(support.TESTFN, "r", encoding="utf-8") as f:
f._CHUNK_SIZE # Just test that it exists
f._CHUNK_SIZE = 2
f.readline()
f.tell()
def test_seek_and_tell(self):
#Test seek/tell using the StatefulIncrementalDecoder.
# Make test faster by doing smaller seeks
CHUNK_SIZE = 128
def test_seek_and_tell_with_data(data, min_pos=0):
"""Tell/seek to various points within a data stream and ensure
that the decoded data returned by read() is consistent."""
f = self.open(support.TESTFN, 'wb')
f.write(data)
f.close()
f = self.open(support.TESTFN, encoding='test_decoder')
f._CHUNK_SIZE = CHUNK_SIZE
decoded = f.read()
f.close()
for i in range(min_pos, len(decoded) + 1): # seek positions
for j in [1, 5, len(decoded) - i]: # read lengths
f = self.open(support.TESTFN, encoding='test_decoder')
self.assertEqual(f.read(i), decoded[:i])
cookie = f.tell()
self.assertEqual(f.read(j), decoded[i:i + j])
f.seek(cookie)
self.assertEqual(f.read(), decoded[i:])
f.close()
# Enable the test decoder.
StatefulIncrementalDecoder.codecEnabled = 1
# Run the tests.
try:
# Try each test case.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
test_seek_and_tell_with_data(input)
# Position each test case so that it crosses a chunk boundary.
for input, _, _ in StatefulIncrementalDecoderTest.test_cases:
offset = CHUNK_SIZE - len(input)//2
prefix = b'.'*offset
# Don't bother seeking into the prefix (takes too long).
min_pos = offset*2
test_seek_and_tell_with_data(prefix + input, min_pos)
# Ensure our test decoder won't interfere with subsequent tests.
finally:
StatefulIncrementalDecoder.codecEnabled = 0
def test_encoded_writes(self):
data = "1234567890"
tests = ("utf-16",
"utf-16-le",
"utf-16-be",
"utf-32",
"utf-32-le",
"utf-32-be")
for encoding in tests:
buf = self.BytesIO()
f = self.TextIOWrapper(buf, encoding=encoding)
# Check if the BOM is written only once (see issue1753).
f.write(data)
f.write(data)
f.seek(0)
self.assertEqual(f.read(), data * 2)
f.seek(0)
self.assertEqual(f.read(), data * 2)
self.assertEqual(buf.getvalue(), (data * 2).encode(encoding))
def test_unreadable(self):
class UnReadable(self.BytesIO):
def readable(self):
return False
txt = self.TextIOWrapper(UnReadable())
self.assertRaises(OSError, txt.read)
def test_read_one_by_one(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\r\nBB"))
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, "AA\nBB")
def test_readlines(self):
txt = self.TextIOWrapper(self.BytesIO(b"AA\nBB\nCC"))
self.assertEqual(txt.readlines(), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(None), ["AA\n", "BB\n", "CC"])
txt.seek(0)
self.assertEqual(txt.readlines(5), ["AA\n", "BB\n"])
# read in amounts equal to TextIOWrapper._CHUNK_SIZE which is 128.
def test_read_by_chunk(self):
# make sure "\r\n" straddles 128 char boundary.
txt = self.TextIOWrapper(self.BytesIO(b"A" * 127 + b"\r\nB"))
reads = ""
while True:
c = txt.read(128)
if not c:
break
reads += c
self.assertEqual(reads, "A"*127+"\nB")
def test_writelines(self):
l = ['ab', 'cd', 'ef']
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_userlist(self):
l = UserList(['ab', 'cd', 'ef'])
buf = self.BytesIO()
txt = self.TextIOWrapper(buf)
txt.writelines(l)
txt.flush()
self.assertEqual(buf.getvalue(), b'abcdef')
def test_writelines_error(self):
txt = self.TextIOWrapper(self.BytesIO())
self.assertRaises(TypeError, txt.writelines, [1, 2, 3])
self.assertRaises(TypeError, txt.writelines, None)
self.assertRaises(TypeError, txt.writelines, b'abc')
def test_issue1395_1(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
# read one char at a time
reads = ""
while True:
c = txt.read(1)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_2(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = ""
while True:
c = txt.read(4)
if not c:
break
reads += c
self.assertEqual(reads, self.normalized)
def test_issue1395_3(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read(4)
reads += txt.readline()
reads += txt.readline()
reads += txt.readline()
self.assertEqual(reads, self.normalized)
def test_issue1395_4(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
reads += txt.read()
self.assertEqual(reads, self.normalized)
def test_issue1395_5(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt._CHUNK_SIZE = 4
reads = txt.read(4)
pos = txt.tell()
txt.seek(0)
txt.seek(pos)
self.assertEqual(txt.read(4), "BBB\n")
def test_issue2282(self):
buffer = self.BytesIO(self.testdata)
txt = self.TextIOWrapper(buffer, encoding="ascii")
self.assertEqual(buffer.seekable(), txt.seekable())
def test_append_bom(self):
# The BOM is not written again when appending to a non-empty file
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaa'.encode(charset))
with self.open(filename, 'a', encoding=charset) as f:
f.write('xxx')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'aaaxxx'.encode(charset))
def test_seek_bom(self):
# Same test, but when seeking manually
filename = support.TESTFN
for charset in ('utf-8-sig', 'utf-16', 'utf-32'):
with self.open(filename, 'w', encoding=charset) as f:
f.write('aaa')
pos = f.tell()
with self.open(filename, 'r+', encoding=charset) as f:
f.seek(pos)
f.write('zzz')
f.seek(0)
f.write('bbb')
with self.open(filename, 'rb') as f:
self.assertEqual(f.read(), 'bbbzzz'.encode(charset))
def test_errors_property(self):
with self.open(support.TESTFN, "w") as f:
self.assertEqual(f.errors, "strict")
with self.open(support.TESTFN, "w", errors="replace") as f:
self.assertEqual(f.errors, "replace")
@support.no_tracing
@unittest.skipUnless(threading, 'Threading required for this test.')
def test_threads_write(self):
# Issue6750: concurrent writes could duplicate data
event = threading.Event()
with self.open(support.TESTFN, "w", buffering=1) as f:
def run(n):
text = "Thread%03d\n" % n
event.wait()
f.write(text)
threads = [threading.Thread(target=lambda n=x: run(n))
for x in range(20)]
for t in threads:
t.start()
time.sleep(0.02)
event.set()
for t in threads:
t.join()
with self.open(support.TESTFN) as f:
content = f.read()
for n in range(20):
self.assertEqual(content.count("Thread%03d\n" % n), 1)
def test_flush_error_on_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
def bad_flush():
raise OSError()
txt.flush = bad_flush
self.assertRaises(OSError, txt.close) # exception not swallowed
self.assertTrue(txt.closed)
def test_close_error_on_close(self):
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise OSError('flush')
def bad_close():
raise OSError('close')
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(OSError) as err: # exception not swallowed
txt.close()
self.assertEqual(err.exception.args, ('close',))
self.assertIsInstance(err.exception.__context__, OSError)
self.assertEqual(err.exception.__context__.args, ('flush',))
self.assertFalse(txt.closed)
def test_nonnormalized_close_error_on_close(self):
# Issue #21677
buffer = self.BytesIO(self.testdata)
def bad_flush():
raise non_existing_flush
def bad_close():
raise non_existing_close
buffer.close = bad_close
txt = self.TextIOWrapper(buffer, encoding="ascii")
txt.flush = bad_flush
with self.assertRaises(NameError) as err: # exception not swallowed
txt.close()
self.assertIn('non_existing_close', str(err.exception))
self.assertIsInstance(err.exception.__context__, NameError)
self.assertIn('non_existing_flush', str(err.exception.__context__))
self.assertFalse(txt.closed)
def test_multi_close(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
txt.close()
txt.close()
txt.close()
self.assertRaises(ValueError, txt.flush)
def test_unseekable(self):
txt = self.TextIOWrapper(self.MockUnseekableIO(self.testdata))
self.assertRaises(self.UnsupportedOperation, txt.tell)
self.assertRaises(self.UnsupportedOperation, txt.seek, 0)
def test_readonly_attributes(self):
txt = self.TextIOWrapper(self.BytesIO(self.testdata), encoding="ascii")
buf = self.BytesIO(self.testdata)
with self.assertRaises(AttributeError):
txt.buffer = buf
def test_rawio(self):
# Issue #12591: TextIOWrapper must work with raw I/O objects, so
# that subprocess.Popen() can have the required unbuffered
# semantics with universal_newlines=True.
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n')
# Reads
self.assertEqual(txt.read(4), 'abcd')
self.assertEqual(txt.readline(), 'efghi\n')
self.assertEqual(list(txt), ['jkl\n', 'opq\n'])
def test_rawio_write_through(self):
# Issue #12591: with write_through=True, writes don't need a flush
raw = self.MockRawIO([b'abc', b'def', b'ghi\njkl\nopq\n'])
txt = self.TextIOWrapper(raw, encoding='ascii', newline='\n',
write_through=True)
txt.write('1')
txt.write('23\n4')
txt.write('5')
self.assertEqual(b''.join(raw._write_stack), b'123\n45')
def test_bufio_write_through(self):
# Issue #21396: write_through=True doesn't force a flush()
# on the underlying binary buffered object.
flush_called, write_called = [], []
class BufferedWriter(self.BufferedWriter):
def flush(self, *args, **kwargs):
flush_called.append(True)
return super().flush(*args, **kwargs)
def write(self, *args, **kwargs):
write_called.append(True)
return super().write(*args, **kwargs)
rawio = self.BytesIO()
data = b"a"
bufio = BufferedWriter(rawio, len(data)*2)
textio = self.TextIOWrapper(bufio, encoding='ascii',
write_through=True)
# write to the buffered io but don't overflow the buffer
text = data.decode('ascii')
textio.write(text)
# buffer.flush is not called with write_through=True
self.assertFalse(flush_called)
# buffer.write *is* called with write_through=True
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), b"") # no flush
write_called = [] # reset
textio.write(text * 10) # total content is larger than bufio buffer
self.assertTrue(write_called)
self.assertEqual(rawio.getvalue(), data * 11) # all flushed
def test_read_nonbytes(self):
# Issue #17106
# Crash when underlying read() returns non-bytes
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read, 1)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.readline)
t = self.TextIOWrapper(self.StringIO('a'))
self.assertRaises(TypeError, t.read)
def test_illegal_decoder(self):
# Issue #17106
# Bypass the early encoding check added in issue 20404
def _make_illegal_wrapper():
quopri = codecs.lookup("quopri")
quopri._is_text_encoding = True
try:
t = self.TextIOWrapper(self.BytesIO(b'aaaaaa'),
newline='\n', encoding="quopri")
finally:
quopri._is_text_encoding = False
return t
# Crash when decoder returns non-string
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read, 1)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.readline)
t = _make_illegal_wrapper()
self.assertRaises(TypeError, t.read)
def _check_create_at_shutdown(self, **kwargs):
# Issue #20037: creating a TextIOWrapper at shutdown
# shouldn't crash the interpreter.
iomod = self.io.__name__
code = """if 1:
import codecs
import {iomod} as io
# Avoid looking up codecs at shutdown
codecs.lookup('utf-8')
class C:
def __init__(self):
self.buf = io.BytesIO()
def __del__(self):
io.TextIOWrapper(self.buf, **{kwargs})
print("ok")
c = C()
""".format(iomod=iomod, kwargs=kwargs)
return assert_python_ok("-c", code)
def test_create_at_shutdown_without_encoding(self):
rc, out, err = self._check_create_at_shutdown()
if err:
# Can error out with a RuntimeError if the module state
# isn't found.
self.assertIn(self.shutdown_error, err.decode())
else:
self.assertEqual("ok", out.decode().strip())
def test_create_at_shutdown_with_encoding(self):
rc, out, err = self._check_create_at_shutdown(encoding='utf-8',
errors='strict')
self.assertFalse(err)
self.assertEqual("ok", out.decode().strip())
def test_issue22849(self):
class F(object):
def readable(self): return True
def writable(self): return True
def seekable(self): return True
for i in range(10):
try:
self.TextIOWrapper(F(), encoding='utf-8')
except Exception:
pass
F.tell = lambda x: 0
t = self.TextIOWrapper(F(), encoding='utf-8')
class CTextIOWrapperTest(TextIOWrapperTest):
io = io
shutdown_error = "RuntimeError: could not find io module state"
def test_initialization(self):
r = self.BytesIO(b"\xc3\xa9\n\n")
b = self.BufferedReader(r, 1000)
t = self.TextIOWrapper(b)
self.assertRaises(TypeError, t.__init__, b, newline=42)
self.assertRaises(ValueError, t.read)
self.assertRaises(ValueError, t.__init__, b, newline='xyzzy')
self.assertRaises(ValueError, t.read)
t = self.TextIOWrapper.__new__(self.TextIOWrapper)
self.assertRaises(Exception, repr, t)
def test_garbage_collection(self):
# C TextIOWrapper objects are collected, and collecting them flushes
# all data to disk.
# The Python version has __del__, so it ends in gc.garbage instead.
with support.check_warnings(('', ResourceWarning)):
rawio = io.FileIO(support.TESTFN, "wb")
b = self.BufferedWriter(rawio)
t = self.TextIOWrapper(b, encoding="ascii")
t.write("456def")
t.x = t
wr = weakref.ref(t)
del t
support.gc_collect()
self.assertTrue(wr() is None, wr)
with self.open(support.TESTFN, "rb") as f:
self.assertEqual(f.read(), b"456def")
def test_rwpair_cleared_before_textio(self):
# Issue 13070: TextIOWrapper's finalization would crash when called
# after the reference to the underlying BufferedRWPair's writer got
# cleared by the GC.
for i in range(1000):
b1 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t1 = self.TextIOWrapper(b1, encoding="ascii")
b2 = self.BufferedRWPair(self.MockRawIO(), self.MockRawIO())
t2 = self.TextIOWrapper(b2, encoding="ascii")
# circular references
t1.buddy = t2
t2.buddy = t1
support.gc_collect()
class PyTextIOWrapperTest(TextIOWrapperTest):
io = pyio
#shutdown_error = "LookupError: unknown encoding: ascii"
shutdown_error = "TypeError: 'NoneType' object is not iterable"
class IncrementalNewlineDecoderTest(unittest.TestCase):
def check_newline_decoding_utf8(self, decoder):
# UTF-8 specific tests for a newline decoder
def _check_decode(b, s, **kwargs):
# We exercise getstate() / setstate() as well as decode()
state = decoder.getstate()
self.assertEqual(decoder.decode(b, **kwargs), s)
decoder.setstate(state)
self.assertEqual(decoder.decode(b, **kwargs), s)
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
_check_decode(b'\xa2', "")
_check_decode(b'\x88', "\u8888")
_check_decode(b'\xe8', "")
self.assertRaises(UnicodeDecodeError, decoder.decode, b'', final=True)
decoder.reset()
_check_decode(b'\n', "\n")
_check_decode(b'\r', "")
_check_decode(b'', "\n", final=True)
_check_decode(b'\r', "\n", final=True)
_check_decode(b'\r', "")
_check_decode(b'a', "\na")
_check_decode(b'\r\r\n', "\n\n")
_check_decode(b'\r', "")
_check_decode(b'\r', "\n")
_check_decode(b'\na', "\na")
_check_decode(b'\xe8\xa2\x88\r\n', "\u8888\n")
_check_decode(b'\xe8\xa2\x88', "\u8888")
_check_decode(b'\n', "\n")
_check_decode(b'\xe8\xa2\x88\r', "\u8888")
_check_decode(b'\n', "\n")
def check_newline_decoding(self, decoder, encoding):
result = []
if encoding is not None:
encoder = codecs.getincrementalencoder(encoding)()
def _decode_bytewise(s):
# Decode one byte at a time
for b in encoder.encode(s):
result.append(decoder.decode(bytes([b])))
else:
encoder = None
def _decode_bytewise(s):
# Decode one char at a time
for c in s:
result.append(decoder.decode(c))
self.assertEqual(decoder.newlines, None)
_decode_bytewise("abc\n\r")
self.assertEqual(decoder.newlines, '\n')
_decode_bytewise("\nabc")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual(decoder.newlines, ('\n', '\r\n'))
_decode_bytewise("abc")
self.assertEqual(decoder.newlines, ('\r', '\n', '\r\n'))
_decode_bytewise("abc\r")
self.assertEqual("".join(result), "abc\n\nabcabc\nabcabc")
decoder.reset()
input = "abc"
if encoder is not None:
encoder.reset()
input = encoder.encode(input)
self.assertEqual(decoder.decode(input), "abc")
self.assertEqual(decoder.newlines, None)
def test_newline_decoder(self):
encodings = (
# None meaning the IncrementalNewlineDecoder takes unicode input
# rather than bytes input
None, 'utf-8', 'latin-1',
'utf-16', 'utf-16-le', 'utf-16-be',
'utf-32', 'utf-32-le', 'utf-32-be',
)
for enc in encodings:
decoder = enc and codecs.getincrementaldecoder(enc)()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding(decoder, enc)
decoder = codecs.getincrementaldecoder("utf-8")()
decoder = self.IncrementalNewlineDecoder(decoder, translate=True)
self.check_newline_decoding_utf8(decoder)
def test_newline_bytes(self):
# Issue 5433: Excessive optimization in IncrementalNewlineDecoder
def _check(dec):
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0D00"), "\u0D00")
self.assertEqual(dec.newlines, None)
self.assertEqual(dec.decode("\u0A00"), "\u0A00")
self.assertEqual(dec.newlines, None)
dec = self.IncrementalNewlineDecoder(None, translate=False)
_check(dec)
dec = self.IncrementalNewlineDecoder(None, translate=True)
_check(dec)
class CIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
class PyIncrementalNewlineDecoderTest(IncrementalNewlineDecoderTest):
pass
# XXX Tests for open()
class MiscIOTest(unittest.TestCase):
def tearDown(self):
support.unlink(support.TESTFN)
def test___all__(self):
for name in self.io.__all__:
obj = getattr(self.io, name, None)
self.assertTrue(obj is not None, name)
if name == "open":
continue
elif "error" in name.lower() or name == "UnsupportedOperation":
self.assertTrue(issubclass(obj, Exception), name)
elif not name.startswith("SEEK_"):
self.assertTrue(issubclass(obj, self.IOBase))
def test_attributes(self):
f = self.open(support.TESTFN, "wb", buffering=0)
self.assertEqual(f.mode, "wb")
f.close()
with support.check_warnings(('', DeprecationWarning)):
f = self.open(support.TESTFN, "U")
self.assertEqual(f.name, support.TESTFN)
self.assertEqual(f.buffer.name, support.TESTFN)
self.assertEqual(f.buffer.raw.name, support.TESTFN)
self.assertEqual(f.mode, "U")
self.assertEqual(f.buffer.mode, "rb")
self.assertEqual(f.buffer.raw.mode, "rb")
f.close()
f = self.open(support.TESTFN, "w+")
self.assertEqual(f.mode, "w+")
self.assertEqual(f.buffer.mode, "rb+") # Does it really matter?
self.assertEqual(f.buffer.raw.mode, "rb+")
g = self.open(f.fileno(), "wb", closefd=False)
self.assertEqual(g.mode, "wb")
self.assertEqual(g.raw.mode, "wb")
self.assertEqual(g.name, f.fileno())
self.assertEqual(g.raw.name, f.fileno())
f.close()
g.close()
def test_io_after_close(self):
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "w", "buffering": 1},
{"mode": "w", "buffering": 2},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "r", "buffering": 1},
{"mode": "r", "buffering": 2},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+", "buffering": 1},
{"mode": "w+", "buffering": 2},
{"mode": "w+b", "buffering": 0},
]:
f = self.open(support.TESTFN, **kwargs)
f.close()
self.assertRaises(ValueError, f.flush)
self.assertRaises(ValueError, f.fileno)
self.assertRaises(ValueError, f.isatty)
self.assertRaises(ValueError, f.__iter__)
if hasattr(f, "peek"):
self.assertRaises(ValueError, f.peek, 1)
self.assertRaises(ValueError, f.read)
if hasattr(f, "read1"):
self.assertRaises(ValueError, f.read1, 1024)
if hasattr(f, "readall"):
self.assertRaises(ValueError, f.readall)
if hasattr(f, "readinto"):
self.assertRaises(ValueError, f.readinto, bytearray(1024))
self.assertRaises(ValueError, f.readline)
self.assertRaises(ValueError, f.readlines)
self.assertRaises(ValueError, f.seek, 0)
self.assertRaises(ValueError, f.tell)
self.assertRaises(ValueError, f.truncate)
self.assertRaises(ValueError, f.write,
b"" if "b" in kwargs['mode'] else "")
self.assertRaises(ValueError, f.writelines, [])
self.assertRaises(ValueError, next, f)
def test_blockingioerror(self):
# Various BlockingIOError issues
class C(str):
pass
c = C("")
b = self.BlockingIOError(1, c)
c.b = b
b.c = c
wr = weakref.ref(c)
del c, b
support.gc_collect()
self.assertTrue(wr() is None, wr)
def test_abcs(self):
# Test the visible base classes are ABCs.
self.assertIsInstance(self.IOBase, abc.ABCMeta)
self.assertIsInstance(self.RawIOBase, abc.ABCMeta)
self.assertIsInstance(self.BufferedIOBase, abc.ABCMeta)
self.assertIsInstance(self.TextIOBase, abc.ABCMeta)
def _check_abc_inheritance(self, abcmodule):
with self.open(support.TESTFN, "wb", buffering=0) as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "wb") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertIsInstance(f, abcmodule.BufferedIOBase)
self.assertNotIsInstance(f, abcmodule.TextIOBase)
with self.open(support.TESTFN, "w") as f:
self.assertIsInstance(f, abcmodule.IOBase)
self.assertNotIsInstance(f, abcmodule.RawIOBase)
self.assertNotIsInstance(f, abcmodule.BufferedIOBase)
self.assertIsInstance(f, abcmodule.TextIOBase)
def test_abc_inheritance(self):
# Test implementations inherit from their respective ABCs
self._check_abc_inheritance(self)
def test_abc_inheritance_official(self):
# Test implementations inherit from the official ABCs of the
# baseline "io" module.
self._check_abc_inheritance(io)
def _check_warn_on_dealloc(self, *args, **kwargs):
f = open(*args, **kwargs)
r = repr(f)
with self.assertWarns(ResourceWarning) as cm:
f = None
support.gc_collect()
self.assertIn(r, str(cm.warning.args[0]))
def test_warn_on_dealloc(self):
self._check_warn_on_dealloc(support.TESTFN, "wb", buffering=0)
self._check_warn_on_dealloc(support.TESTFN, "wb")
self._check_warn_on_dealloc(support.TESTFN, "w")
def _check_warn_on_dealloc_fd(self, *args, **kwargs):
fds = []
def cleanup_fds():
for fd in fds:
try:
os.close(fd)
except OSError as e:
if e.errno != errno.EBADF:
raise
self.addCleanup(cleanup_fds)
r, w = os.pipe()
fds += r, w
self._check_warn_on_dealloc(r, *args, **kwargs)
# When using closefd=False, there's no warning
r, w = os.pipe()
fds += r, w
with warnings.catch_warnings(record=True) as recorded:
open(r, *args, closefd=False, **kwargs)
support.gc_collect()
self.assertEqual(recorded, [])
def test_warn_on_dealloc_fd(self):
self._check_warn_on_dealloc_fd("rb", buffering=0)
self._check_warn_on_dealloc_fd("rb")
self._check_warn_on_dealloc_fd("r")
def test_pickling(self):
# Pickling file objects is forbidden
for kwargs in [
{"mode": "w"},
{"mode": "wb"},
{"mode": "wb", "buffering": 0},
{"mode": "r"},
{"mode": "rb"},
{"mode": "rb", "buffering": 0},
{"mode": "w+"},
{"mode": "w+b"},
{"mode": "w+b", "buffering": 0},
]:
for protocol in range(pickle.HIGHEST_PROTOCOL + 1):
with self.open(support.TESTFN, **kwargs) as f:
self.assertRaises(TypeError, pickle.dumps, f, protocol)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_bigbuf(self):
self._test_nonblock_pipe_write(16*1024)
@unittest.skipUnless(fcntl, 'fcntl required for this test')
def test_nonblock_pipe_write_smallbuf(self):
self._test_nonblock_pipe_write(1024)
def _set_non_blocking(self, fd):
flags = fcntl.fcntl(fd, fcntl.F_GETFL)
self.assertNotEqual(flags, -1)
res = fcntl.fcntl(fd, fcntl.F_SETFL, flags | os.O_NONBLOCK)
self.assertEqual(res, 0)
def _test_nonblock_pipe_write(self, bufsize):
sent = []
received = []
r, w = os.pipe()
self._set_non_blocking(r)
self._set_non_blocking(w)
# To exercise all code paths in the C implementation we need
# to play with buffer sizes. For instance, if we choose a
# buffer size less than or equal to _PIPE_BUF (4096 on Linux)
# then we will never get a partial write of the buffer.
rf = self.open(r, mode='rb', closefd=True, buffering=bufsize)
wf = self.open(w, mode='wb', closefd=True, buffering=bufsize)
with rf, wf:
for N in 9999, 73, 7574:
try:
i = 0
while True:
msg = bytes([i % 26 + 97]) * N
sent.append(msg)
wf.write(msg)
i += 1
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
sent[-1] = sent[-1][:e.characters_written]
received.append(rf.read())
msg = b'BLOCKED'
wf.write(msg)
sent.append(msg)
while True:
try:
wf.flush()
break
except self.BlockingIOError as e:
self.assertEqual(e.args[0], errno.EAGAIN)
self.assertEqual(e.args[2], e.characters_written)
self.assertEqual(e.characters_written, 0)
received.append(rf.read())
received += iter(rf.read, None)
sent, received = b''.join(sent), b''.join(received)
self.assertTrue(sent == received)
self.assertTrue(wf.closed)
self.assertTrue(rf.closed)
def test_create_fail(self):
# 'x' mode fails if file is existing
with self.open(support.TESTFN, 'w'):
pass
self.assertRaises(FileExistsError, self.open, support.TESTFN, 'x')
def test_create_writes(self):
# 'x' mode opens for writing
with self.open(support.TESTFN, 'xb') as f:
f.write(b"spam")
with self.open(support.TESTFN, 'rb') as f:
self.assertEqual(b"spam", f.read())
def test_open_allargs(self):
# there used to be a buffer overflow in the parser for rawmode
self.assertRaises(ValueError, self.open, support.TESTFN, 'rwax+')
class CMiscIOTest(MiscIOTest):
io = io
def test_readinto_buffer_overflow(self):
# Issue #18025
class BadReader(self.io.BufferedIOBase):
def read(self, n=-1):
return b'x' * 10**6
bufio = BadReader()
b = bytearray(2)
self.assertRaises(ValueError, bufio.readinto, b)
class PyMiscIOTest(MiscIOTest):
io = pyio
@unittest.skipIf(os.name == 'nt', 'POSIX signals required for this test.')
class SignalsTest(unittest.TestCase):
def setUp(self):
self.oldalrm = signal.signal(signal.SIGALRM, self.alarm_interrupt)
def tearDown(self):
signal.signal(signal.SIGALRM, self.oldalrm)
def alarm_interrupt(self, sig, frame):
1/0
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write(self, item, bytes, **fdopen_kwargs):
"""Check that a partial write, when it gets interrupted, properly
invokes the signal handler, and bubbles up the exception raised
in the latter."""
read_results = []
def _read():
if hasattr(signal, 'pthread_sigmask'):
signal.pthread_sigmask(signal.SIG_BLOCK, [signal.SIGALRM])
s = os.read(r, 1)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
try:
wio = self.io.open(w, **fdopen_kwargs)
t.start()
# Fill the pipe enough that the write will be blocking.
# It will be interrupted by the timer armed above. Since the
# other thread has read one byte, the low-level write will
# return with a successful (partial) result rather than an EINTR.
# The buffered IO layer must check for pending signal
# handlers, which in this case will invoke alarm_interrupt().
signal.alarm(1)
try:
self.assertRaises(ZeroDivisionError,
wio.write, item * (support.PIPE_MAX_SIZE // len(item) + 1))
finally:
signal.alarm(0)
t.join()
# We got one byte, get another one and check that it isn't a
# repeat of the first one.
read_results.append(os.read(r, 1))
self.assertEqual(read_results, [bytes[0:1], bytes[1:2]])
finally:
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and block again.
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_unbuffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb", buffering=0)
def test_interrupted_write_buffered(self):
self.check_interrupted_write(b"xy", b"xy", mode="wb")
# Issue #22331: The test hangs on FreeBSD 7.2
@support.requires_freebsd_version(8)
def test_interrupted_write_text(self):
self.check_interrupted_write("xy", b"xy", mode="w", encoding="ascii")
@support.no_tracing
def check_reentrant_write(self, data, **fdopen_kwargs):
def on_alarm(*args):
# Will be called reentrantly from the same thread
wio.write(data)
1/0
signal.signal(signal.SIGALRM, on_alarm)
r, w = os.pipe()
wio = self.io.open(w, **fdopen_kwargs)
try:
signal.alarm(1)
# Either the reentrant call to wio.write() fails with RuntimeError,
# or the signal handler raises ZeroDivisionError.
with self.assertRaises((ZeroDivisionError, RuntimeError)) as cm:
while 1:
for i in range(100):
wio.write(data)
wio.flush()
# Make sure the buffer doesn't fill up and block further writes
os.read(r, len(data) * 100)
exc = cm.exception
if isinstance(exc, RuntimeError):
self.assertTrue(str(exc).startswith("reentrant call"), str(exc))
finally:
wio.close()
os.close(r)
def test_reentrant_write_buffered(self):
self.check_reentrant_write(b"xy", mode="wb")
def test_reentrant_write_text(self):
self.check_reentrant_write("xy", mode="w", encoding="ascii")
def check_interrupted_read_retry(self, decode, **fdopen_kwargs):
"""Check that a buffered read, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
def alarm_handler(sig, frame):
os.write(w, b"bar")
signal.signal(signal.SIGALRM, alarm_handler)
try:
rio = self.io.open(r, **fdopen_kwargs)
os.write(w, b"foo")
signal.alarm(1)
# Expected behaviour:
# - first raw read() returns partial b"foo"
# - second raw read() returns EINTR
# - third raw read() returns b"bar"
self.assertEqual(decode(rio.read(6)), "foobar")
finally:
rio.close()
os.close(w)
os.close(r)
def test_interrupted_read_retry_buffered(self):
self.check_interrupted_read_retry(lambda x: x.decode('latin1'),
mode="rb")
def test_interrupted_read_retry_text(self):
self.check_interrupted_read_retry(lambda x: x,
mode="r")
@unittest.skipUnless(threading, 'Threading required for this test.')
def check_interrupted_write_retry(self, item, **fdopen_kwargs):
"""Check that a buffered write, when it gets interrupted (either
returning a partial result or EINTR), properly invokes the signal
handler and retries if the latter returned successfully."""
select = support.import_module("select")
# A quantity that exceeds the buffer size of an anonymous pipe's
# write end.
N = support.PIPE_MAX_SIZE
r, w = os.pipe()
fdopen_kwargs["closefd"] = False
# We need a separate thread to read from the pipe and allow the
# write() to finish. This thread is started after the SIGALRM is
# received (forcing a first EINTR in write()).
read_results = []
write_finished = False
def _read():
while not write_finished:
while r in select.select([r], [], [], 1.0)[0]:
s = os.read(r, 1024)
read_results.append(s)
t = threading.Thread(target=_read)
t.daemon = True
def alarm1(sig, frame):
signal.signal(signal.SIGALRM, alarm2)
signal.alarm(1)
def alarm2(sig, frame):
t.start()
signal.signal(signal.SIGALRM, alarm1)
try:
wio = self.io.open(w, **fdopen_kwargs)
signal.alarm(1)
# Expected behaviour:
# - first raw write() is partial (because of the limited pipe buffer
# and the first alarm)
# - second raw write() returns EINTR (because of the second alarm)
# - subsequent write()s are successful (either partial or complete)
self.assertEqual(N, wio.write(item * N))
wio.flush()
write_finished = True
t.join()
self.assertEqual(N, sum(len(x) for x in read_results))
finally:
write_finished = True
os.close(w)
os.close(r)
# This is deliberate. If we didn't close the file descriptor
# before closing wio, wio would try to flush its internal
# buffer, and could block (in case of failure).
try:
wio.close()
except OSError as e:
if e.errno != errno.EBADF:
raise
def test_interrupted_write_retry_buffered(self):
self.check_interrupted_write_retry(b"x", mode="wb")
def test_interrupted_write_retry_text(self):
self.check_interrupted_write_retry("x", mode="w", encoding="latin1")
class CSignalsTest(SignalsTest):
io = io
class PySignalsTest(SignalsTest):
io = pyio
# Handling reentrancy issues would slow down _pyio even more, so the
# tests are disabled.
test_reentrant_write_buffered = None
test_reentrant_write_text = None
def load_tests(*args):
tests = (CIOTest, PyIOTest,
CBufferedReaderTest, PyBufferedReaderTest,
CBufferedWriterTest, PyBufferedWriterTest,
CBufferedRWPairTest, PyBufferedRWPairTest,
CBufferedRandomTest, PyBufferedRandomTest,
StatefulIncrementalDecoderTest,
CIncrementalNewlineDecoderTest, PyIncrementalNewlineDecoderTest,
CTextIOWrapperTest, PyTextIOWrapperTest,
CMiscIOTest, PyMiscIOTest,
CSignalsTest, PySignalsTest,
)
# Put the namespaces of the IO module we are testing and some useful mock
# classes in the __dict__ of each test.
mocks = (MockRawIO, MisbehavedRawIO, MockFileIO, CloseFailureIO,
MockNonBlockWriterIO, MockUnseekableIO, MockRawIOWithoutRead)
all_members = io.__all__ + ["IncrementalNewlineDecoder"]
c_io_ns = {name : getattr(io, name) for name in all_members}
py_io_ns = {name : getattr(pyio, name) for name in all_members}
globs = globals()
c_io_ns.update((x.__name__, globs["C" + x.__name__]) for x in mocks)
py_io_ns.update((x.__name__, globs["Py" + x.__name__]) for x in mocks)
# Avoid turning open into a bound method.
py_io_ns["open"] = pyio.OpenWrapper
for test in tests:
if test.__name__.startswith("C"):
for name, obj in c_io_ns.items():
setattr(test, name, obj)
elif test.__name__.startswith("Py"):
for name, obj in py_io_ns.items():
setattr(test, name, obj)
suite = unittest.TestSuite([unittest.makeSuite(test) for test in tests])
return suite
if __name__ == "__main__":
unittest.main()
| gpl-2.0 | 7,882,316,852,661,960,000 | 35.932594 | 96 | 0.559616 | false |
paulmadore/Eric-IDE | 6-6.0.9/eric/Plugins/VcsPlugins/vcsPySvn/SvnCommitDialog.py | 2 | 4235 | # -*- coding: utf-8 -*-
# Copyright (c) 2003 - 2015 Detlev Offenbach <[email protected]>
#
"""
Module implementing a dialog to enter the commit message.
"""
from __future__ import unicode_literals
import pysvn
from PyQt5.QtCore import pyqtSignal, Qt, pyqtSlot
from PyQt5.QtWidgets import QWidget, QDialogButtonBox
from .Ui_SvnCommitDialog import Ui_SvnCommitDialog
import Preferences
class SvnCommitDialog(QWidget, Ui_SvnCommitDialog):
"""
Class implementing a dialog to enter the commit message.
@signal accepted() emitted, if the dialog was accepted
@signal rejected() emitted, if the dialog was rejected
"""
accepted = pyqtSignal()
rejected = pyqtSignal()
def __init__(self, changelists, parent=None):
"""
Constructor
@param changelists list of available change lists (list of strings)
@param parent parent widget (QWidget)
"""
super(SvnCommitDialog, self).__init__(
parent, Qt.WindowFlags(Qt.Window))
self.setupUi(self)
if pysvn.svn_version < (1, 5, 0) or pysvn.version < (1, 6, 0):
self.changeListsGroup.hide()
else:
self.changeLists.addItems(sorted(changelists))
def showEvent(self, evt):
"""
Protected method called when the dialog is about to be shown.
@param evt the event (QShowEvent)
"""
self.recentCommitMessages = Preferences.toList(
Preferences.Prefs.settings.value('Subversion/Commits'))
self.recentComboBox.clear()
self.recentComboBox.addItem("")
self.recentComboBox.addItems(self.recentCommitMessages)
def logMessage(self):
"""
Public method to retrieve the log message.
This method has the side effect of saving the 20 most recent
commit messages for reuse.
@return the log message (string)
"""
msg = self.logEdit.toPlainText()
if msg:
if msg in self.recentCommitMessages:
self.recentCommitMessages.remove(msg)
self.recentCommitMessages.insert(0, msg)
no = int(Preferences.Prefs.settings.value(
'Subversion/CommitMessages', 20))
del self.recentCommitMessages[no:]
Preferences.Prefs.settings.setValue(
'Subversion/Commits', self.recentCommitMessages)
return msg
def hasChangelists(self):
"""
Public method to check, if the user entered some changelists.
@return flag indicating availability of changelists (boolean)
"""
return len(self.changeLists.selectedItems()) > 0
def changelistsData(self):
"""
Public method to retrieve the changelists data.
@return tuple containing the changelists (list of strings) and a flag
indicating to keep changelists (boolean)
"""
slists = [l.text().strip() for l in self.changeLists.selectedItems()
if l.text().strip() != ""]
if len(slists) == 0:
return [], False
return slists, self.keepChangeListsCheckBox.isChecked()
def on_buttonBox_clicked(self, button):
"""
Private slot called by a button of the button box clicked.
@param button button that was clicked (QAbstractButton)
"""
if button == self.buttonBox.button(QDialogButtonBox.Cancel):
self.logEdit.clear()
def on_buttonBox_accepted(self):
"""
Private slot called by the buttonBox accepted signal.
"""
self.close()
self.accepted.emit()
def on_buttonBox_rejected(self):
"""
Private slot called by the buttonBox rejected signal.
"""
self.close()
self.rejected.emit()
@pyqtSlot(str)
def on_recentComboBox_activated(self, txt):
"""
Private slot to select a commit message from recent ones.
@param txt selected recent commit message (string)
"""
if txt:
self.logEdit.setPlainText(txt)
| gpl-3.0 | 8,617,319,802,010,897,000 | 30.37037 | 77 | 0.599292 | false |
HadiOfBBG/pegasusrises | csvploader.py | 1 | 3875 | import csv
from collections import defaultdict
import StringIO
from google.appengine.ext import blobstore
from google.appengine.ext.webapp import blobstore_handlers
from jinja_template import JinjaTemplating
from google.appengine.ext import db
from google.appengine.api import memcache
from xml.dom import minidom
from models import pegasusFiles
import urllib
from google.appengine.api import urlfetch
from poster.encode import multipart_encode, MultipartParam
import logging
from google.appengine.api import users
import json
from google.appengine.api import files
import cloudstorage as gcs
from google.appengine.api import app_identity
import webapp2
from pegasus_email import EmailHandler as EmailHandler
import time
retry_params = gcs.RetryParams(initial_delay=0.2,
max_delay=5.0,
backoff_factor=2,
max_retry_period=15)
gcs.set_default_retry_params(retry_params)
pegasusrise_converter_api = 'http://msrc.gopagoda.io/api/sendXLSForms'
urlfetch.set_default_fetch_deadline(60)
class CSVUploadHandler(JinjaTemplating,blobstore_handlers.BlobstoreUploadHandler,blobstore_handlers.BlobstoreDownloadHandler):
def get(self):
JinjaTemplating.render_template_only(self,'hadi_test.html')
def post(self):
# EmailHandler.sendEmail()
try:
content_from_server = json.loads(self.request.body)
url = content_from_server['downloadUrl']
form_fields = {
"filename":"guess",
"url":url
}
form_data = urllib.urlencode(form_fields)
result = urlfetch.fetch(url=pegasusrise_converter_api,
payload=form_data,
method=urlfetch.POST,
headers={'Content-Type': 'application/x-www-form-urlencoded'})
# self.response.out.write(result.content)
if "Successful form upload" in result.content:
form_result = {
"status":"success",
"content":"successful"
}
return self.response.out.write(json.dumps(form_result))
elif "File was not recognized" in result.content:
form_result = {
"status":"failed",
"content":"file_recognized"
}
return self.response.out.write(json.dumps(form_result))
elif "You must have a sheet named (case-sensitive): survey " or " There should be a choices sheet in this xlsform" in result.content:
form_result = {
"status":"failed",
"content":"There should be a survey, choices, and settings in this xlsform. Please ensure that the sheet names are all in small caps."
}
return self.response.out.write(json.dumps(form_result))
elif "application may have exceeded its per-minute or daily usage quotas" in result.content:
form_result = {
"status":"failed",
"content":"daily_quota_exceeded"
}
return self.response.out.write(json.dumps(form_result))
elif " Form Already Exists for this Namespace" in result.content:
form_result = {
"status":"failed",
"content":"form_already_exist"
}
return self.response.out.write(json.dumps(form_result))
else:
form_result = {
"status":"failed",
"content":result.content
}
return self.response.out.write(json.dumps(form_result))
except Exception,e :
form_result = {
"status":"failed",
"content":"timeout_exception"
}
return self.response.out.write(json.dumps(form_result)) | apache-2.0 | 8,669,387,720,736,268,000 | 34.236364 | 147 | 0.60671 | false |
jacksapper/math-thesis | ode.py | 1 | 2110 | # -*- coding: utf-8 -*-
#---IMPORTS---
import numpy as np
import matplotlib.pyplot as plt
#---CONSTANTS---
LBOUND = 0.
UBOUND = 1.
POINTS = 2**7
EPSILON = 10**-9
INITIAL = (0,1)
#Matrix is O(POINTS**2)
#---DERIVED CONSTANTS---
INTERVAL_LENGTH = (UBOUND-LBOUND)/(POINTS-1)
D0 = .5*(np.eye(POINTS-1,POINTS) \
+ np.roll(np.eye(POINTS-1,POINTS),1,1))
D1 = (1/INTERVAL_LENGTH)*(-1*np.eye(POINTS-1,POINTS) \
+ np.roll(np.eye(POINTS-1,POINTS),1,1))
D = D1-D0
A = D1.T @ D1 + D0.T @ D0
k = 1
display = [1,2,3,4,5,6]
#---FUNCTIONS---
def step_size(u, v, tech='dynamic', size=EPSILON/10):
if tech=='dynamic':
upper = u.dot(v)
lower = v.dot(v)
return upper/lower
elif tech=='static':
return size
def f(u):
f = D @ u
result = (f).dot(f)
return .5*result
def df(u):
grad2=(D.T @ D) @ u
if INITIAL is not None:
grad2[index] = 0
return grad2
def sobolev(u):
gradH = np.linalg.solve(A,u)
return gradH
def graph(x,y1,y2):
plt.plot(x,y1, 'r--',label='Approximation')
plt.plot(x,y2, label='Exact Solution')
plt.legend(loc='lower left')
#plt.savefig(
#'/home/jason/Dropbox/thesis/'
#'img/good-triv-bdd/{num}.png'
#.format(num=k), dpi=150)
#plt.show()
#---MAIN---
x = np.linspace(LBOUND,UBOUND,POINTS).T
yold = np.zeros(POINTS).T
ynew = 2. * np.ones(POINTS).T
yexact = np.exp(x)
if INITIAL is not None:
index = np.argmin(abs(x-INITIAL[0]))
ynew[index] = INITIAL[1]
A[index,:] = 0
A[index,index] = 1
while f(ynew) > EPSILON and k <= max(display):
grad = sobolev(df(ynew))
s = step_size((D @ ynew),(D @ grad),'dynamic')
#s = 10**-5
yold = np.copy(ynew)
ynew = yold - s*grad
if k in display:
print("|",k,"|",
round(f(ynew),3), "|",
round(s,3),"|",
round(ynew[POINTS//2],3),"|",
round(grad[POINTS//2],3),"|")
plt.plot(x,ynew,label='k={num}'.format(num=k))
k=k+1
print(k)
plt.legend(loc='lower right')
#plt.savefig(
#'/home/jason/Dropbox/thesis/'
#'newimg/sob-dyn-bdd.png',dpi=150)
plt.show() | gpl-3.0 | -7,026,794,695,713,071,000 | 22.455556 | 54 | 0.563507 | false |
Treisy/Django-Angular-Demo | product_wiki/urls.py | 1 | 1159 | from django.conf.urls import patterns, include, url
from django.contrib import admin
from .views import Index, Logout, FormView, PartialView
from products.forms import ProductForm, ProductImageForm, ProductTagForm
admin.autodiscover()
partial_patterns = patterns('',
url(r'^hero.html$', PartialView.as_view(template_name='hero.html'), name='hero'),
url(r'^list.html$', PartialView.as_view(template_name='list.html'), name='list'),
url(r'^product.html$', PartialView.as_view(template_name='product.html'), name='product'),
url(r'^product_form.html$', FormView.as_view(form_class=ProductForm), name='product_form'),
url(r'^image_form.html$', FormView.as_view(form_class=ProductImageForm), name='image_form'),
url(r'^tag_form.html$', FormView.as_view(form_class=ProductTagForm), name='tag_form'),
)
urlpatterns = patterns('',
url(r'^$', Index.as_view(), name='index'),
url(r'^logout/', Logout.as_view(), name='logout'),
url(r'^products/', include('products.urls'), name='products'),
url(r'^includes/', include(partial_patterns, namespace='partials')),
url(r'^admin/', include(admin.site.urls), name='admin'),
)
| mit | 7,677,513,963,242,823,000 | 41.925926 | 96 | 0.695427 | false |
batxes/4c2vhic | SHH_WT_models/SHH_WT_models_final_output_0.1_-0.1_11000/SHH_WT_models12285.py | 2 | 17580 | import _surface
import chimera
try:
import chimera.runCommand
except:
pass
from VolumePath import markerset as ms
try:
from VolumePath import Marker_Set, Link
new_marker_set=Marker_Set
except:
from VolumePath import volume_path_dialog
d= volume_path_dialog(True)
new_marker_set= d.new_marker_set
marker_sets={}
surf_sets={}
if "particle_0 geometry" not in marker_sets:
s=new_marker_set('particle_0 geometry')
marker_sets["particle_0 geometry"]=s
s= marker_sets["particle_0 geometry"]
mark=s.place_marker((-1690.3, 4290.58, 4615.36), (0.7, 0.7, 0.7), 890.203)
if "particle_1 geometry" not in marker_sets:
s=new_marker_set('particle_1 geometry')
marker_sets["particle_1 geometry"]=s
s= marker_sets["particle_1 geometry"]
mark=s.place_marker((-24.9377, 3891.66, 5123.37), (0.7, 0.7, 0.7), 792.956)
if "particle_2 geometry" not in marker_sets:
s=new_marker_set('particle_2 geometry')
marker_sets["particle_2 geometry"]=s
s= marker_sets["particle_2 geometry"]
mark=s.place_marker((1279.37, 4483.25, 3848.06), (0.7, 0.7, 0.7), 856.786)
if "particle_3 geometry" not in marker_sets:
s=new_marker_set('particle_3 geometry')
marker_sets["particle_3 geometry"]=s
s= marker_sets["particle_3 geometry"]
mark=s.place_marker((-766.906, 3953.81, 2860.49), (0.7, 0.7, 0.7), 963.679)
if "particle_4 geometry" not in marker_sets:
s=new_marker_set('particle_4 geometry')
marker_sets["particle_4 geometry"]=s
s= marker_sets["particle_4 geometry"]
mark=s.place_marker((-352.134, 4601, 1328.95), (0.7, 0.7, 0.7), 761.442)
if "particle_5 geometry" not in marker_sets:
s=new_marker_set('particle_5 geometry')
marker_sets["particle_5 geometry"]=s
s= marker_sets["particle_5 geometry"]
mark=s.place_marker((1832.28, 5753.66, 1351.86), (0.7, 0.7, 0.7), 961.183)
if "particle_6 geometry" not in marker_sets:
s=new_marker_set('particle_6 geometry')
marker_sets["particle_6 geometry"]=s
s= marker_sets["particle_6 geometry"]
mark=s.place_marker((2927.47, 7052.03, 1579.07), (0.7, 0.7, 0.7), 753.151)
if "particle_7 geometry" not in marker_sets:
s=new_marker_set('particle_7 geometry')
marker_sets["particle_7 geometry"]=s
s= marker_sets["particle_7 geometry"]
mark=s.place_marker((2121.82, 6943.95, 1107.92), (1, 0.7, 0), 1098.07)
if "particle_8 geometry" not in marker_sets:
s=new_marker_set('particle_8 geometry')
marker_sets["particle_8 geometry"]=s
s= marker_sets["particle_8 geometry"]
mark=s.place_marker((4117.51, 7968.08, 2632.24), (0.7, 0.7, 0.7), 1010.42)
if "particle_9 geometry" not in marker_sets:
s=new_marker_set('particle_9 geometry')
marker_sets["particle_9 geometry"]=s
s= marker_sets["particle_9 geometry"]
mark=s.place_marker((3893.38, 9593.43, 3067.57), (1, 0.7, 0), 821.043)
if "particle_10 geometry" not in marker_sets:
s=new_marker_set('particle_10 geometry')
marker_sets["particle_10 geometry"]=s
s= marker_sets["particle_10 geometry"]
mark=s.place_marker((5423.77, 9457.83, 4201.69), (0.7, 0.7, 0.7), 873.876)
if "particle_11 geometry" not in marker_sets:
s=new_marker_set('particle_11 geometry')
marker_sets["particle_11 geometry"]=s
s= marker_sets["particle_11 geometry"]
mark=s.place_marker((4836.66, 8672.7, 5147.62), (0.7, 0.7, 0.7), 625.532)
if "particle_12 geometry" not in marker_sets:
s=new_marker_set('particle_12 geometry')
marker_sets["particle_12 geometry"]=s
s= marker_sets["particle_12 geometry"]
mark=s.place_marker((4601.08, 7998.03, 6545.19), (0.7, 0.7, 0.7), 880.474)
if "particle_13 geometry" not in marker_sets:
s=new_marker_set('particle_13 geometry')
marker_sets["particle_13 geometry"]=s
s= marker_sets["particle_13 geometry"]
mark=s.place_marker((3063.66, 8051.7, 6309.53), (0.7, 0.7, 0.7), 659.161)
if "particle_14 geometry" not in marker_sets:
s=new_marker_set('particle_14 geometry')
marker_sets["particle_14 geometry"]=s
s= marker_sets["particle_14 geometry"]
mark=s.place_marker((2105.8, 8148.96, 8370.46), (0.7, 0.7, 0.7), 831.745)
if "particle_15 geometry" not in marker_sets:
s=new_marker_set('particle_15 geometry')
marker_sets["particle_15 geometry"]=s
s= marker_sets["particle_15 geometry"]
mark=s.place_marker((1825.99, 5954.71, 10535.8), (0.7, 0.7, 0.7), 803.065)
if "particle_16 geometry" not in marker_sets:
s=new_marker_set('particle_16 geometry')
marker_sets["particle_16 geometry"]=s
s= marker_sets["particle_16 geometry"]
mark=s.place_marker((2709.69, 4468.38, 9640.35), (0.7, 0.7, 0.7), 610.262)
if "particle_17 geometry" not in marker_sets:
s=new_marker_set('particle_17 geometry')
marker_sets["particle_17 geometry"]=s
s= marker_sets["particle_17 geometry"]
mark=s.place_marker((3853.55, 5216.93, 9937.44), (0.7, 0.7, 0.7), 741.265)
if "particle_18 geometry" not in marker_sets:
s=new_marker_set('particle_18 geometry')
marker_sets["particle_18 geometry"]=s
s= marker_sets["particle_18 geometry"]
mark=s.place_marker((4259.36, 6395.01, 8797.77), (0.7, 0.7, 0.7), 748.625)
if "particle_19 geometry" not in marker_sets:
s=new_marker_set('particle_19 geometry')
marker_sets["particle_19 geometry"]=s
s= marker_sets["particle_19 geometry"]
mark=s.place_marker((4720.08, 7807.27, 8904.44), (0.7, 0.7, 0.7), 677.181)
if "particle_20 geometry" not in marker_sets:
s=new_marker_set('particle_20 geometry')
marker_sets["particle_20 geometry"]=s
s= marker_sets["particle_20 geometry"]
mark=s.place_marker((5256.46, 7433.91, 6565.31), (0.7, 0.7, 0.7), 616.015)
if "particle_21 geometry" not in marker_sets:
s=new_marker_set('particle_21 geometry')
marker_sets["particle_21 geometry"]=s
s= marker_sets["particle_21 geometry"]
mark=s.place_marker((5006.41, 7054.07, 8560.47), (0.7, 0.7, 0.7), 653.154)
if "particle_22 geometry" not in marker_sets:
s=new_marker_set('particle_22 geometry')
marker_sets["particle_22 geometry"]=s
s= marker_sets["particle_22 geometry"]
mark=s.place_marker((5759.38, 6385.99, 8398), (0.7, 0.7, 0.7), 595.33)
if "particle_23 geometry" not in marker_sets:
s=new_marker_set('particle_23 geometry')
marker_sets["particle_23 geometry"]=s
s= marker_sets["particle_23 geometry"]
mark=s.place_marker((5669.82, 5455.97, 9257.5), (0.7, 0.7, 0.7), 627.901)
if "particle_24 geometry" not in marker_sets:
s=new_marker_set('particle_24 geometry')
marker_sets["particle_24 geometry"]=s
s= marker_sets["particle_24 geometry"]
mark=s.place_marker((4382.3, 4970.12, 9434.82), (0.7, 0.7, 0.7), 663.941)
if "particle_25 geometry" not in marker_sets:
s=new_marker_set('particle_25 geometry')
marker_sets["particle_25 geometry"]=s
s= marker_sets["particle_25 geometry"]
mark=s.place_marker((3407.47, 4624.76, 10633.2), (0.7, 0.7, 0.7), 663.899)
if "particle_26 geometry" not in marker_sets:
s=new_marker_set('particle_26 geometry')
marker_sets["particle_26 geometry"]=s
s= marker_sets["particle_26 geometry"]
mark=s.place_marker((4130.1, 5527.95, 9589.32), (0.7, 0.7, 0.7), 644.694)
if "particle_27 geometry" not in marker_sets:
s=new_marker_set('particle_27 geometry')
marker_sets["particle_27 geometry"]=s
s= marker_sets["particle_27 geometry"]
mark=s.place_marker((3948.58, 6318.28, 7508.14), (0.7, 0.7, 0.7), 896.802)
if "particle_28 geometry" not in marker_sets:
s=new_marker_set('particle_28 geometry')
marker_sets["particle_28 geometry"]=s
s= marker_sets["particle_28 geometry"]
mark=s.place_marker((5405.14, 6432.74, 7711.31), (0.7, 0.7, 0.7), 576.38)
if "particle_29 geometry" not in marker_sets:
s=new_marker_set('particle_29 geometry')
marker_sets["particle_29 geometry"]=s
s= marker_sets["particle_29 geometry"]
mark=s.place_marker((5818.05, 7025.15, 6776.29), (0.7, 0.7, 0.7), 635.092)
if "particle_30 geometry" not in marker_sets:
s=new_marker_set('particle_30 geometry')
marker_sets["particle_30 geometry"]=s
s= marker_sets["particle_30 geometry"]
mark=s.place_marker((6182.95, 6450.39, 6802.61), (0.7, 0.7, 0.7), 651.505)
if "particle_31 geometry" not in marker_sets:
s=new_marker_set('particle_31 geometry')
marker_sets["particle_31 geometry"]=s
s= marker_sets["particle_31 geometry"]
mark=s.place_marker((5393.4, 7748.82, 5974.5), (0.7, 0.7, 0.7), 718.042)
if "particle_32 geometry" not in marker_sets:
s=new_marker_set('particle_32 geometry')
marker_sets["particle_32 geometry"]=s
s= marker_sets["particle_32 geometry"]
mark=s.place_marker((6426.32, 7644.07, 7405.21), (0.7, 0.7, 0.7), 726.714)
if "particle_33 geometry" not in marker_sets:
s=new_marker_set('particle_33 geometry')
marker_sets["particle_33 geometry"]=s
s= marker_sets["particle_33 geometry"]
mark=s.place_marker((6171.91, 6231.13, 7874.79), (0.7, 0.7, 0.7), 673.585)
if "particle_34 geometry" not in marker_sets:
s=new_marker_set('particle_34 geometry')
marker_sets["particle_34 geometry"]=s
s= marker_sets["particle_34 geometry"]
mark=s.place_marker((5259.35, 6128.36, 8824.31), (0.7, 0.7, 0.7), 598.418)
if "particle_35 geometry" not in marker_sets:
s=new_marker_set('particle_35 geometry')
marker_sets["particle_35 geometry"]=s
s= marker_sets["particle_35 geometry"]
mark=s.place_marker((4657.13, 6314.64, 10079.7), (0.7, 0.7, 0.7), 693.382)
if "particle_36 geometry" not in marker_sets:
s=new_marker_set('particle_36 geometry')
marker_sets["particle_36 geometry"]=s
s= marker_sets["particle_36 geometry"]
mark=s.place_marker((5083.43, 6716.62, 7698.19), (0.7, 0.7, 0.7), 804.038)
if "particle_37 geometry" not in marker_sets:
s=new_marker_set('particle_37 geometry')
marker_sets["particle_37 geometry"]=s
s= marker_sets["particle_37 geometry"]
mark=s.place_marker((5308.22, 5875.42, 9379), (0.7, 0.7, 0.7), 816.178)
if "particle_38 geometry" not in marker_sets:
s=new_marker_set('particle_38 geometry')
marker_sets["particle_38 geometry"]=s
s= marker_sets["particle_38 geometry"]
mark=s.place_marker((4719.28, 5580.15, 8645.66), (0.7, 0.7, 0.7), 776.628)
if "particle_39 geometry" not in marker_sets:
s=new_marker_set('particle_39 geometry')
marker_sets["particle_39 geometry"]=s
s= marker_sets["particle_39 geometry"]
mark=s.place_marker((5954.63, 6316.39, 9259.81), (0.7, 0.7, 0.7), 750.656)
if "particle_40 geometry" not in marker_sets:
s=new_marker_set('particle_40 geometry')
marker_sets["particle_40 geometry"]=s
s= marker_sets["particle_40 geometry"]
mark=s.place_marker((6640.49, 6474.67, 7737.59), (0.7, 0.7, 0.7), 709.625)
if "particle_41 geometry" not in marker_sets:
s=new_marker_set('particle_41 geometry')
marker_sets["particle_41 geometry"]=s
s= marker_sets["particle_41 geometry"]
mark=s.place_marker((7611.41, 7904.52, 7060.05), (0.7, 0.7, 0.7), 927.681)
if "particle_42 geometry" not in marker_sets:
s=new_marker_set('particle_42 geometry')
marker_sets["particle_42 geometry"]=s
s= marker_sets["particle_42 geometry"]
mark=s.place_marker((9149.69, 7202.87, 9077.33), (0.7, 0.7, 0.7), 1088.21)
if "particle_43 geometry" not in marker_sets:
s=new_marker_set('particle_43 geometry')
marker_sets["particle_43 geometry"]=s
s= marker_sets["particle_43 geometry"]
mark=s.place_marker((8724.99, 7972.31, 7381.89), (0.7, 0.7, 0.7), 736.147)
if "particle_44 geometry" not in marker_sets:
s=new_marker_set('particle_44 geometry')
marker_sets["particle_44 geometry"]=s
s= marker_sets["particle_44 geometry"]
mark=s.place_marker((7536.35, 6935.17, 7931.27), (0.7, 0.7, 0.7), 861.101)
if "particle_45 geometry" not in marker_sets:
s=new_marker_set('particle_45 geometry')
marker_sets["particle_45 geometry"]=s
s= marker_sets["particle_45 geometry"]
mark=s.place_marker((7037.1, 6752.11, 6034.45), (0.7, 0.7, 0.7), 924.213)
if "particle_46 geometry" not in marker_sets:
s=new_marker_set('particle_46 geometry')
marker_sets["particle_46 geometry"]=s
s= marker_sets["particle_46 geometry"]
mark=s.place_marker((8227.92, 5178.32, 5921.07), (0.7, 0.7, 0.7), 881.828)
if "particle_47 geometry" not in marker_sets:
s=new_marker_set('particle_47 geometry')
marker_sets["particle_47 geometry"]=s
s= marker_sets["particle_47 geometry"]
mark=s.place_marker((9803.05, 5878.38, 7047.71), (0.7, 0.7, 0.7), 927.681)
if "particle_48 geometry" not in marker_sets:
s=new_marker_set('particle_48 geometry')
marker_sets["particle_48 geometry"]=s
s= marker_sets["particle_48 geometry"]
mark=s.place_marker((9114.78, 5347.59, 5372), (0.7, 0.7, 0.7), 831.576)
if "particle_49 geometry" not in marker_sets:
s=new_marker_set('particle_49 geometry')
marker_sets["particle_49 geometry"]=s
s= marker_sets["particle_49 geometry"]
mark=s.place_marker((7773.09, 5543.72, 4067.75), (0.7, 0.7, 0.7), 859.494)
if "particle_50 geometry" not in marker_sets:
s=new_marker_set('particle_50 geometry')
marker_sets["particle_50 geometry"]=s
s= marker_sets["particle_50 geometry"]
mark=s.place_marker((8793.34, 6302.67, 4449.39), (0.7, 0.7, 0.7), 704.845)
if "particle_51 geometry" not in marker_sets:
s=new_marker_set('particle_51 geometry')
marker_sets["particle_51 geometry"]=s
s= marker_sets["particle_51 geometry"]
mark=s.place_marker((7245.27, 6631.2, 3935.42), (0.7, 0.7, 0.7), 804.461)
if "particle_52 geometry" not in marker_sets:
s=new_marker_set('particle_52 geometry')
marker_sets["particle_52 geometry"]=s
s= marker_sets["particle_52 geometry"]
mark=s.place_marker((5482.84, 6706.42, 3809.95), (0.7, 0.7, 0.7), 934.111)
if "particle_53 geometry" not in marker_sets:
s=new_marker_set('particle_53 geometry')
marker_sets["particle_53 geometry"]=s
s= marker_sets["particle_53 geometry"]
mark=s.place_marker((5505.63, 7574.1, 2525.17), (0.7, 0.7, 0.7), 988.339)
if "particle_54 geometry" not in marker_sets:
s=new_marker_set('particle_54 geometry')
marker_sets["particle_54 geometry"]=s
s= marker_sets["particle_54 geometry"]
mark=s.place_marker((6251.14, 7490.1, 2286.31), (1, 0.7, 0), 803.7)
if "particle_55 geometry" not in marker_sets:
s=new_marker_set('particle_55 geometry')
marker_sets["particle_55 geometry"]=s
s= marker_sets["particle_55 geometry"]
mark=s.place_marker((6846.11, 5868.02, 3493.38), (0.7, 0.7, 0.7), 812.118)
if "particle_56 geometry" not in marker_sets:
s=new_marker_set('particle_56 geometry')
marker_sets["particle_56 geometry"]=s
s= marker_sets["particle_56 geometry"]
mark=s.place_marker((5809.37, 4129.1, 2698.34), (0.7, 0.7, 0.7), 1177.93)
if "particle_57 geometry" not in marker_sets:
s=new_marker_set('particle_57 geometry')
marker_sets["particle_57 geometry"]=s
s= marker_sets["particle_57 geometry"]
mark=s.place_marker((5981.53, 1616.22, 2915.52), (0.7, 0.7, 0.7), 1038.21)
if "particle_58 geometry" not in marker_sets:
s=new_marker_set('particle_58 geometry')
marker_sets["particle_58 geometry"]=s
s= marker_sets["particle_58 geometry"]
mark=s.place_marker((6098.99, 1038.78, 3036.13), (1, 0.7, 0), 758.016)
if "particle_59 geometry" not in marker_sets:
s=new_marker_set('particle_59 geometry')
marker_sets["particle_59 geometry"]=s
s= marker_sets["particle_59 geometry"]
mark=s.place_marker((5624.21, 1050.23, 2326.99), (0.7, 0.7, 0.7), 824.046)
if "particle_60 geometry" not in marker_sets:
s=new_marker_set('particle_60 geometry')
marker_sets["particle_60 geometry"]=s
s= marker_sets["particle_60 geometry"]
mark=s.place_marker((6219.02, 1735.06, 2632.86), (0.7, 0.7, 0.7), 793.379)
if "particle_61 geometry" not in marker_sets:
s=new_marker_set('particle_61 geometry')
marker_sets["particle_61 geometry"]=s
s= marker_sets["particle_61 geometry"]
mark=s.place_marker((6630.71, 1426.68, 1953.61), (0.7, 0.7, 0.7), 1011.56)
if "particle_62 geometry" not in marker_sets:
s=new_marker_set('particle_62 geometry')
marker_sets["particle_62 geometry"]=s
s= marker_sets["particle_62 geometry"]
mark=s.place_marker((5933.07, 2977.85, 2809.34), (0.7, 0.7, 0.7), 1097.01)
if "particle_63 geometry" not in marker_sets:
s=new_marker_set('particle_63 geometry')
marker_sets["particle_63 geometry"]=s
s= marker_sets["particle_63 geometry"]
mark=s.place_marker((6083.73, 1674.94, 1411.34), (0.7, 0.7, 0.7), 851.626)
if "particle_64 geometry" not in marker_sets:
s=new_marker_set('particle_64 geometry')
marker_sets["particle_64 geometry"]=s
s= marker_sets["particle_64 geometry"]
mark=s.place_marker((6176.46, -137.234, 491.226), (0.7, 0.7, 0.7), 869.434)
if "particle_65 geometry" not in marker_sets:
s=new_marker_set('particle_65 geometry')
marker_sets["particle_65 geometry"]=s
s= marker_sets["particle_65 geometry"]
mark=s.place_marker((7613.92, 950.761, 529.167), (0.7, 0.7, 0.7), 818.463)
if "particle_66 geometry" not in marker_sets:
s=new_marker_set('particle_66 geometry')
marker_sets["particle_66 geometry"]=s
s= marker_sets["particle_66 geometry"]
mark=s.place_marker((8269.95, -350.862, 1353.52), (0.7, 0.7, 0.7), 759.539)
if "particle_67 geometry" not in marker_sets:
s=new_marker_set('particle_67 geometry')
marker_sets["particle_67 geometry"]=s
s= marker_sets["particle_67 geometry"]
mark=s.place_marker((7166.23, 1797.68, 1763.36), (0.7, 0.7, 0.7), 1088.59)
if "particle_68 geometry" not in marker_sets:
s=new_marker_set('particle_68 geometry')
marker_sets["particle_68 geometry"]=s
s= marker_sets["particle_68 geometry"]
mark=s.place_marker((6629.52, -109.244, 1206.55), (0.7, 0.7, 0.7), 822.312)
if "particle_69 geometry" not in marker_sets:
s=new_marker_set('particle_69 geometry')
marker_sets["particle_69 geometry"]=s
s= marker_sets["particle_69 geometry"]
mark=s.place_marker((7975.76, -901.947, 1617.31), (0.7, 0.7, 0.7), 749.81)
if "particle_70 geometry" not in marker_sets:
s=new_marker_set('particle_70 geometry')
marker_sets["particle_70 geometry"]=s
s= marker_sets["particle_70 geometry"]
mark=s.place_marker((8197.58, 293.537, 2449.64), (0.7, 0.7, 0.7), 764.488)
for k in surf_sets.keys():
chimera.openModels.add([surf_sets[k]])
| gpl-3.0 | -7,974,832,448,865,208,000 | 46.131367 | 75 | 0.699659 | false |
danielkitta/libsigrokdecode | decoders/rgb_led_spi/pd.py | 8 | 2072 | ##
## This file is part of the libsigrokdecode project.
##
## Copyright (C) 2014 Matt Ranostay <[email protected]>
##
## This program is free software; you can redistribute it and/or modify
## it under the terms of the GNU General Public License as published by
## the Free Software Foundation; either version 2 of the License, or
## (at your option) any later version.
##
## This program is distributed in the hope that it will be useful,
## but WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
## GNU General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with this program; if not, write to the Free Software
## Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
##
import sigrokdecode as srd
class Decoder(srd.Decoder):
api_version = 2
id = 'rgb_led_spi'
name = 'RGB LED (SPI)'
longname = 'RGB LED string decoder (SPI)'
desc = 'RGB LED string protocol (RGB values clocked over SPI).'
license = 'gplv2'
inputs = ['spi']
outputs = ['rgb_led_spi']
annotations = (
('rgb', 'RGB values'),
)
def __init__(self, **kwargs):
self.ss_cmd, self.es_cmd = 0, 0
self.mosi_bytes = []
def start(self):
self.out_ann = self.register(srd.OUTPUT_ANN)
def putx(self, data):
self.put(self.ss_cmd, self.es_cmd, self.out_ann, data)
def decode(self, ss, es, data):
ptype, mosi, miso = data
# Only care about data packets.
if ptype != 'DATA':
return
self.ss, self.es = ss, es
if len(self.mosi_bytes) == 0:
self.ss_cmd = ss
self.mosi_bytes.append(mosi)
# RGB value == 3 bytes
if len(self.mosi_bytes) != 3:
return
red, green, blue = self.mosi_bytes
rgb_value = int(red) << 16 | int(green) << 8 | int(blue)
self.es_cmd = es
self.putx([0, ['#%.6x' % rgb_value]])
self.mosi_bytes = []
| gpl-3.0 | 4,428,087,484,116,131,000 | 29.925373 | 76 | 0.616313 | false |
vortex-ape/scikit-learn | sklearn/datasets/__init__.py | 4 | 3794 | """
The :mod:`sklearn.datasets` module includes utilities to load datasets,
including methods to load and fetch popular reference datasets. It also
features some artificial data generators.
"""
from .base import load_breast_cancer
from .base import load_boston
from .base import load_diabetes
from .base import load_digits
from .base import load_files
from .base import load_iris
from .base import load_linnerud
from .base import load_sample_images
from .base import load_sample_image
from .base import load_wine
from .base import get_data_home
from .base import clear_data_home
from .covtype import fetch_covtype
from .kddcup99 import fetch_kddcup99
from .mlcomp import load_mlcomp
from .lfw import fetch_lfw_pairs
from .lfw import fetch_lfw_people
from .twenty_newsgroups import fetch_20newsgroups
from .twenty_newsgroups import fetch_20newsgroups_vectorized
from .mldata import fetch_mldata, mldata_filename
from .openml import fetch_openml
from .samples_generator import make_classification
from .samples_generator import make_multilabel_classification
from .samples_generator import make_hastie_10_2
from .samples_generator import make_regression
from .samples_generator import make_blobs
from .samples_generator import make_moons
from .samples_generator import make_circles
from .samples_generator import make_friedman1
from .samples_generator import make_friedman2
from .samples_generator import make_friedman3
from .samples_generator import make_low_rank_matrix
from .samples_generator import make_sparse_coded_signal
from .samples_generator import make_sparse_uncorrelated
from .samples_generator import make_spd_matrix
from .samples_generator import make_swiss_roll
from .samples_generator import make_s_curve
from .samples_generator import make_sparse_spd_matrix
from .samples_generator import make_gaussian_quantiles
from .samples_generator import make_biclusters
from .samples_generator import make_checkerboard
from .svmlight_format import load_svmlight_file
from .svmlight_format import load_svmlight_files
from .svmlight_format import dump_svmlight_file
from .olivetti_faces import fetch_olivetti_faces
from .species_distributions import fetch_species_distributions
from .california_housing import fetch_california_housing
from .rcv1 import fetch_rcv1
__all__ = ['clear_data_home',
'dump_svmlight_file',
'fetch_20newsgroups',
'fetch_20newsgroups_vectorized',
'fetch_lfw_pairs',
'fetch_lfw_people',
'fetch_mldata',
'fetch_olivetti_faces',
'fetch_species_distributions',
'fetch_california_housing',
'fetch_covtype',
'fetch_rcv1',
'fetch_kddcup99',
'fetch_openml',
'get_data_home',
'load_boston',
'load_diabetes',
'load_digits',
'load_files',
'load_iris',
'load_breast_cancer',
'load_linnerud',
'load_mlcomp',
'load_sample_image',
'load_sample_images',
'load_svmlight_file',
'load_svmlight_files',
'load_wine',
'make_biclusters',
'make_blobs',
'make_circles',
'make_classification',
'make_checkerboard',
'make_friedman1',
'make_friedman2',
'make_friedman3',
'make_gaussian_quantiles',
'make_hastie_10_2',
'make_low_rank_matrix',
'make_moons',
'make_multilabel_classification',
'make_regression',
'make_s_curve',
'make_sparse_coded_signal',
'make_sparse_spd_matrix',
'make_sparse_uncorrelated',
'make_spd_matrix',
'make_swiss_roll',
'mldata_filename']
| bsd-3-clause | -360,707,843,709,354,500 | 35.480769 | 71 | 0.684238 | false |
devoncarew/sky_engine | ci/check_gn_format.py | 4 | 1616 | #!/usr/bin/env python
#
# Copyright 2013 The Flutter Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import sys
import subprocess
import os
import argparse
import errno
import shutil
def GetGNFiles(directory):
directory = os.path.abspath(directory)
gn_files = []
assert os.path.exists(directory), "Directory must exist %s" % directory
for root, dirs, files in os.walk(directory):
for file in files:
if file.endswith(".gn") or file.endswith(".gni"):
gn_files.append(os.path.join(root, file))
return gn_files
def main():
parser = argparse.ArgumentParser();
parser.add_argument('--gn-binary', dest='gn_binary', required=True, type=str)
parser.add_argument('--dry-run', dest='dry_run', default=False, action='store_true')
parser.add_argument('--root-directory', dest='root_directory', required=True, type=str)
args = parser.parse_args()
gn_binary = os.path.abspath(args.gn_binary)
assert os.path.exists(gn_binary), "GN Binary must exist %s" % gn_binary
gn_command = [ gn_binary, 'format']
if args.dry_run:
gn_command.append('--dry-run')
for gn_file in GetGNFiles(args.root_directory):
if subprocess.call(gn_command + [ gn_file ]) != 0:
print "ERROR: '%s' is incorrectly formatted." % os.path.relpath(gn_file, args.root_directory)
print "Format the same with 'gn format' using the 'gn' binary in third_party/gn/gn."
print "Or, run ./ci/check_gn_format.py without '--dry-run'"
return 1
return 0
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | -4,946,851,548,739,962,000 | 30.686275 | 99 | 0.681312 | false |
wolverineav/neutron | neutron/services/tag/tag_plugin.py | 4 | 5152 | #
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
import functools
from oslo_db import api as oslo_db_api
from oslo_db import exception as db_exc
from oslo_log import helpers as log_helpers
from oslo_log import log as logging
from sqlalchemy.orm import exc
from neutron.api.v2 import attributes
from neutron.db import api as db_api
from neutron.db import common_db_mixin
from neutron.db import models_v2
from neutron.db import tag_db as tag_model
from neutron.extensions import tag as tag_ext
LOG = logging.getLogger(__name__)
resource_model_map = {
attributes.NETWORKS: models_v2.Network,
# other resources can be added
}
def _extend_tags_dict(plugin, response_data, db_data):
tags = [tag_db.tag for tag_db in db_data.standard_attr.tags]
response_data['tags'] = tags
class TagPlugin(common_db_mixin.CommonDbMixin, tag_ext.TagPluginBase):
"""Implementation of the Neutron Tag Service Plugin."""
supported_extension_aliases = ['tag']
def _get_resource(self, context, resource, resource_id):
model = resource_model_map[resource]
try:
return self._get_by_id(context, model, resource_id)
except exc.NoResultFound:
raise tag_ext.TagResourceNotFound(resource=resource,
resource_id=resource_id)
@log_helpers.log_method_call
def get_tags(self, context, resource, resource_id):
res = self._get_resource(context, resource, resource_id)
tags = [tag_db.tag for tag_db in res.standard_attr.tags]
return dict(tags=tags)
@log_helpers.log_method_call
def get_tag(self, context, resource, resource_id, tag):
res = self._get_resource(context, resource, resource_id)
if not any(tag == tag_db.tag for tag_db in res.standard_attr.tags):
raise tag_ext.TagNotFound(tag=tag)
@log_helpers.log_method_call
@oslo_db_api.wrap_db_retry(
max_retries=db_api.MAX_RETRIES,
exception_checker=lambda e: isinstance(e, db_exc.DBDuplicateEntry))
def update_tags(self, context, resource, resource_id, body):
res = self._get_resource(context, resource, resource_id)
new_tags = set(body['tags'])
old_tags = {tag_db.tag for tag_db in res.standard_attr.tags}
tags_added = new_tags - old_tags
tags_removed = old_tags - new_tags
with context.session.begin(subtransactions=True):
for tag_db in res.standard_attr.tags:
if tag_db.tag in tags_removed:
context.session.delete(tag_db)
for tag in tags_added:
tag_db = tag_model.Tag(standard_attr_id=res.standard_attr_id,
tag=tag)
context.session.add(tag_db)
return body
@log_helpers.log_method_call
def update_tag(self, context, resource, resource_id, tag):
res = self._get_resource(context, resource, resource_id)
if any(tag == tag_db.tag for tag_db in res.standard_attr.tags):
return
try:
with context.session.begin(subtransactions=True):
tag_db = tag_model.Tag(standard_attr_id=res.standard_attr_id,
tag=tag)
context.session.add(tag_db)
except db_exc.DBDuplicateEntry:
pass
@log_helpers.log_method_call
def delete_tags(self, context, resource, resource_id):
res = self._get_resource(context, resource, resource_id)
with context.session.begin(subtransactions=True):
query = context.session.query(tag_model.Tag)
query = query.filter_by(standard_attr_id=res.standard_attr_id)
query.delete()
@log_helpers.log_method_call
def delete_tag(self, context, resource, resource_id, tag):
res = self._get_resource(context, resource, resource_id)
with context.session.begin(subtransactions=True):
query = context.session.query(tag_model.Tag)
query = query.filter_by(tag=tag,
standard_attr_id=res.standard_attr_id)
if not query.delete():
raise tag_ext.TagNotFound(tag=tag)
# support only _apply_dict_extend_functions supported resources
# at the moment.
for resource, model in resource_model_map.items():
common_db_mixin.CommonDbMixin.register_dict_extend_funcs(
resource, [_extend_tags_dict])
common_db_mixin.CommonDbMixin.register_model_query_hook(
model, "tag", None, None,
functools.partial(tag_model.apply_tag_filters, model))
| apache-2.0 | -9,123,757,742,919,907,000 | 39.25 | 78 | 0.644992 | false |
bentilly/heroes | lib/flask_restful/inputs.py | 16 | 9016 | from calendar import timegm
from datetime import datetime, time, timedelta
from email.utils import parsedate_tz, mktime_tz
import re
import aniso8601
import pytz
# Constants for upgrading date-based intervals to full datetimes.
START_OF_DAY = time(0, 0, 0, tzinfo=pytz.UTC)
END_OF_DAY = time(23, 59, 59, 999999, tzinfo=pytz.UTC)
# https://code.djangoproject.com/browser/django/trunk/django/core/validators.py
# basic auth added by frank
url_regex = re.compile(
r'^(?:http|ftp)s?://' # http:// or https://
r'(?:[^:@]+?:[^:@]*?@|)' # basic auth
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+'
r'(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
r'localhost|' # localhost...
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3}|' # ...or ipv4
r'\[?[A-F0-9]*:[A-F0-9:]+\]?)' # ...or ipv6
r'(?::\d+)?' # optional port
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
def url(value):
"""Validate a URL.
:param string value: The URL to validate
:returns: The URL if valid.
:raises: ValueError
"""
if not url_regex.search(value):
message = u"{0} is not a valid URL".format(value)
if url_regex.search('http://' + value):
message += u". Did you mean: http://{0}".format(value)
raise ValueError(message)
return value
class regex(object):
"""Validate a string based on a regular expression.
Example::
parser = reqparse.RequestParser()
parser.add_argument('example', type=inputs.regex('^[0-9]+$'))
Input to the ``example`` argument will be rejected if it contains anything
but numbers.
:param pattern: The regular expression the input must match
:type pattern: str
"""
def __init__(self, pattern):
self.pattern = pattern
self.re = re.compile(pattern)
def __call__(self, value):
if not self.re.search(value):
message = 'Value does not match pattern: "{}"'.format(self.pattern)
raise ValueError(message)
return value
def __deepcopy__(self, memo):
return regex(self.pattern)
def _normalize_interval(start, end, value):
"""Normalize datetime intervals.
Given a pair of datetime.date or datetime.datetime objects,
returns a 2-tuple of tz-aware UTC datetimes spanning the same interval.
For datetime.date objects, the returned interval starts at 00:00:00.0
on the first date and ends at 00:00:00.0 on the second.
Naive datetimes are upgraded to UTC.
Timezone-aware datetimes are normalized to the UTC tzdata.
Params:
- start: A date or datetime
- end: A date or datetime
"""
if not isinstance(start, datetime):
start = datetime.combine(start, START_OF_DAY)
end = datetime.combine(end, START_OF_DAY)
if start.tzinfo is None:
start = pytz.UTC.localize(start)
end = pytz.UTC.localize(end)
else:
start = start.astimezone(pytz.UTC)
end = end.astimezone(pytz.UTC)
return start, end
def _expand_datetime(start, value):
if not isinstance(start, datetime):
# Expand a single date object to be the interval spanning
# that entire day.
end = start + timedelta(days=1)
else:
# Expand a datetime based on the finest resolution provided
# in the original input string.
time = value.split('T')[1]
time_without_offset = re.sub('[+-].+', '', time)
num_separators = time_without_offset.count(':')
if num_separators == 0:
# Hour resolution
end = start + timedelta(hours=1)
elif num_separators == 1:
# Minute resolution:
end = start + timedelta(minutes=1)
else:
# Second resolution
end = start + timedelta(seconds=1)
return end
def _parse_interval(value):
"""Do some nasty try/except voodoo to get some sort of datetime
object(s) out of the string.
"""
try:
return sorted(aniso8601.parse_interval(value))
except ValueError:
try:
return aniso8601.parse_datetime(value), None
except ValueError:
return aniso8601.parse_date(value), None
def iso8601interval(value, argument='argument'):
"""Parses ISO 8601-formatted datetime intervals into tuples of datetimes.
Accepts both a single date(time) or a full interval using either start/end
or start/duration notation, with the following behavior:
- Intervals are defined as inclusive start, exclusive end
- Single datetimes are translated into the interval spanning the
largest resolution not specified in the input value, up to the day.
- The smallest accepted resolution is 1 second.
- All timezones are accepted as values; returned datetimes are
localized to UTC. Naive inputs and date inputs will are assumed UTC.
Examples::
"2013-01-01" -> datetime(2013, 1, 1), datetime(2013, 1, 2)
"2013-01-01T12" -> datetime(2013, 1, 1, 12), datetime(2013, 1, 1, 13)
"2013-01-01/2013-02-28" -> datetime(2013, 1, 1), datetime(2013, 2, 28)
"2013-01-01/P3D" -> datetime(2013, 1, 1), datetime(2013, 1, 4)
"2013-01-01T12:00/PT30M" -> datetime(2013, 1, 1, 12), datetime(2013, 1, 1, 12, 30)
"2013-01-01T06:00/2013-01-01T12:00" -> datetime(2013, 1, 1, 6), datetime(2013, 1, 1, 12)
:param str value: The ISO8601 date time as a string
:return: Two UTC datetimes, the start and the end of the specified interval
:rtype: A tuple (datetime, datetime)
:raises: ValueError, if the interval is invalid.
"""
try:
start, end = _parse_interval(value)
if end is None:
end = _expand_datetime(start, value)
start, end = _normalize_interval(start, end, value)
except ValueError:
raise ValueError(
"Invalid {arg}: {value}. {arg} must be a valid ISO8601 "
"date/time interval.".format(arg=argument, value=value),
)
return start, end
def date(value):
"""Parse a valid looking date in the format YYYY-mm-dd"""
date = datetime.strptime(value, "%Y-%m-%d")
return date
def _get_integer(value):
try:
return int(value)
except (TypeError, ValueError):
raise ValueError('{} is not a valid integer'.format(value))
def natural(value, argument='argument'):
""" Restrict input type to the natural numbers (0, 1, 2, 3...) """
value = _get_integer(value)
if value < 0:
error = ('Invalid {arg}: {value}. {arg} must be a non-negative '
'integer'.format(arg=argument, value=value))
raise ValueError(error)
return value
def positive(value, argument='argument'):
""" Restrict input type to the positive integers (1, 2, 3...) """
value = _get_integer(value)
if value < 1:
error = ('Invalid {arg}: {value}. {arg} must be a positive '
'integer'.format(arg=argument, value=value))
raise ValueError(error)
return value
class int_range(object):
""" Restrict input to an integer in a range (inclusive) """
def __init__(self, low, high, argument='argument'):
self.low = low
self.high = high
self.argument = argument
def __call__(self, value):
value = _get_integer(value)
if value < self.low or value > self.high:
error = ('Invalid {arg}: {val}. {arg} must be within the range {lo} - {hi}'
.format(arg=self.argument, val=value, lo=self.low, hi=self.high))
raise ValueError(error)
return value
def boolean(value):
"""Parse the string ``"true"`` or ``"false"`` as a boolean (case
insensitive). Also accepts ``"1"`` and ``"0"`` as ``True``/``False``
(respectively). If the input is from the request JSON body, the type is
already a native python boolean, and will be passed through without
further parsing.
"""
if type(value) == bool:
return value
if not value:
raise ValueError("boolean type must be non-null")
value = value.lower()
if value in ('true', '1',):
return True
if value in ('false', '0',):
return False
raise ValueError("Invalid literal for boolean(): {}".format(value))
def datetime_from_rfc822(datetime_str):
"""Turns an RFC822 formatted date into a datetime object.
Example::
inputs.datetime_from_rfc822("Wed, 02 Oct 2002 08:00:00 EST")
:param datetime_str: The RFC822-complying string to transform
:type datetime_str: str
:return: A datetime
"""
return datetime.fromtimestamp(mktime_tz(parsedate_tz(datetime_str)), pytz.utc)
def datetime_from_iso8601(datetime_str):
"""Turns an ISO8601 formatted date into a datetime object.
Example::
inputs.datetime_from_iso8601("2012-01-01T23:30:00+02:00")
:param datetime_str: The ISO8601-complying string to transform
:type datetime_str: str
:return: A datetime
"""
return aniso8601.parse_datetime(datetime_str)
| apache-2.0 | 365,122,012,350,359,200 | 31.2 | 96 | 0.618678 | false |
not-nexus/shelf | shelf/routes/artifact.py | 2 | 5229 | from flask import request, Blueprint
from shelf.endpoint_decorators import decorators
import shelf.response_map as response_map
artifact = Blueprint("artifact", __name__)
@artifact.route("/<bucket_name>/artifact/", methods=["GET", "HEAD"], defaults={"path": "/"})
@artifact.route("/<bucket_name>/artifact/<path:path>", methods=["GET", "HEAD"])
@decorators.foundation_headers
def get_path(container, bucket_name, path):
"""
Flask automatically maps HEAD requests to GET endpoint. We added it to the list of methods
to be more explicit. We handle it differently to avoid initiating the downloading of an
artifact as it is unnecessary.
"""
content = None
status_code = 204
if container.request.method == "HEAD":
container.artifact_manager.assign_artifact_links(path)
else:
content = container.artifact_manager.get_artifact(path)
if content:
status_code = 200
response = container.context_response_mapper.to_response(content, status_code)
return response
@artifact.route("/<bucket_name>/artifact/<path:path>", methods=["POST"])
@decorators.foundation_headers
def upload_artifact(container, bucket_name, path):
file_storage = request.files['file']
container.artifact_manager.upload_artifact(path, file_storage)
response = response_map.create_201()
response = container.context_response_mapper.to_response(response.data, response.status_code)
response.headers["Location"] = container.request.path
return response
@artifact.route("/<bucket_name>/artifact/<path:path>/_meta", methods=["GET", "HEAD"])
@decorators.foundation
def get_artifact_meta_route(container, bucket_name, path):
return get_artifact_meta(container, bucket_name, path)
def get_artifact_meta(container, bucket_name, path):
container.link_manager.assign_single(path)
metadata = container.metadata.manager.metadata
response = container.context_response_mapper.to_response(metadata, 200)
return response
@artifact.route("/<bucket_name>/artifact/<path:path>/_meta", methods=["PUT"])
@decorators.foundation
@decorators.validate_request("schemas/request-metadata.json")
def update_artifact_meta(container, bucket_name, path, data):
manager = container.metadata.manager
manager.try_update(data)
response = get_artifact_meta(container, bucket_name, path)
response.headers["Location"] = container.request.path
return response
@artifact.route("/<bucket_name>/artifact/<path:path>/_meta/<item>", methods=["GET"])
@decorators.foundation
def get_metadata_property_route(container, bucket_name, path, item):
return get_metadata_property(container, bucket_name, path, item)
def get_metadata_property(container, bucket_name, path, item):
manager = container.metadata.manager
data = manager.metadata.get(item)
if None is data:
response = response_map.create_404()
else:
response = response_map.create_200(data)
return response
@artifact.route("/<bucket_name>/artifact/<path:path>/_meta/<item>", methods=["POST", "PUT"])
@decorators.foundation
@decorators.validate_request("schemas/request-metadata-property.json")
def create_metadata_property(container, bucket_name, path, item, data):
manager = container.metadata.manager
exists = (item in manager.metadata)
result = None
if request.method == "PUT":
result = manager.try_update_property(item, data)
else:
result = manager.try_create_property(item, data)
if result.success:
response = get_metadata_property(container, bucket_name, path, item)
if not exists:
response.status_code = 201
response.headers["Location"] = container.request.path
else:
response = response_map.map_metadata_result_errors(result)
return response
@artifact.route("/<bucket_name>/artifact/<path:path>/_meta/<item>", methods=["DELETE"])
@decorators.foundation
def delete_metadata_property(container, bucket_name, path, item):
manager = container.metadata.manager
result = manager.try_delete_property(item)
response = None
if result.success:
response = response_map.create_204()
else:
response = response_map.map_metadata_result_errors(result)
return response
@artifact.route("/<bucket_name>/artifact/_search", methods=["POST"])
@decorators.foundation
@decorators.validate_request("schemas/search-request-criteria.json", {})
def root_search(container, bucket_name, data):
response = search(container, data)
return response
@artifact.route("/<bucket_name>/artifact/<path:path>/_search", methods=["POST"])
@decorators.foundation
@decorators.validate_request("schemas/search-request-criteria.json", {})
def path_search(container, bucket_name, path, data):
response = search(container, data)
return response
def search(container, criteria=None):
"""
Does a search with the given criteria.
Args:
container(shelf.container.Container)
criteria(dict | None)
Returns:
Flask response
"""
container.search_portal.search(criteria)
response = container.context_response_mapper.to_response(status_code=204)
return response
| mit | 4,755,239,638,220,954,000 | 32.305732 | 98 | 0.707592 | false |
dodobas/osm-export-tool2 | api/tests/test_views.py | 1 | 35098 | # -*- coding: utf-8 -*-
import json
import logging
import os
import uuid
from unittest import skip
from mock import patch
from django.contrib.auth.models import Group, User
from django.contrib.gis.geos import GEOSGeometry, Polygon
from django.core.files import File
from rest_framework import status
from rest_framework.authtoken.models import Token
from rest_framework.reverse import reverse
from rest_framework.test import APITestCase
from api.pagination import LinkHeaderPagination
from jobs.models import ExportConfig, ExportFormat, ExportProfile, Job
from tasks.models import ExportRun, ExportTask
logger = logging.getLogger(__name__)
class TestJobViewSet(APITestCase):
def setUp(self, ):
self.path = os.path.dirname(os.path.realpath(__file__))
self.group = Group.objects.create(name='TestDefaultExportExtentGroup')
profile = ExportProfile.objects.create(
name='DefaultExportProfile',
max_extent=2500000,
group=self.group
)
self.user = User.objects.create_user(
username='demo', email='[email protected]', password='demo'
)
extents = (-3.9, 16.1, 7.0, 27.6)
bbox = Polygon.from_bbox(extents)
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob', event='Test Activation',
description='Test description', user=self.user,
the_geom=the_geom)
format = ExportFormat.objects.get(slug='obf')
self.job.formats.add(format)
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
# create a test config
f = File(open(self.path + '/files/hdm_presets.xml'))
filename = f.name.split('/')[-1]
name = 'Test Configuration File'
self.config = ExportConfig.objects.create(name='Test Preset Config', filename=filename, upload=f, config_type='PRESET', user=self.user)
f.close()
self.assertIsNotNone(self.config)
self.job.configs.add(self.config)
self.tags = [
{
"name": "Telecommunication office",
"key": "office", "value": "telecommunication",
"data_model": "HDM",
"geom_types": ["point", "polygon"],
"groups": ['HDM Presets v2.11', 'Commercial and Economic', 'Telecommunication']
},
{
"name": "Radio or TV Studio",
"key": "amenity", "value": "studio",
"data_model": "OSM",
"geom_types": ["point", "polygon"],
"groups": ['HDM Presets v2.11', 'Commercial and Economic', 'Telecommunication']
},
{
"name": "Telecommunication antenna",
"key": "man_made", "value": "tower",
"data_model": "OSM",
"geom_types": ["point", "polygon"],
"groups": ['HDM Presets v2.11', 'Commercial and Economic', 'Telecommunication']
},
{
"name": "Telecommunication company retail office",
"key": "office", "value": "telecommunication",
"data_model": "OSM",
"geom_types": ["point", "polygon"],
"groups": ['HDM Presets v2.11', 'Commercial and Economic', 'Telecommunication']
}
]
def tearDown(self,):
self.config.delete() # clean up
def test_list(self, ):
expected = '/api/jobs'
url = reverse('api:jobs-list')
self.assertEquals(expected, url)
def test_get_job_detail(self, ):
expected = '/api/jobs/{0}'.format(self.job.uid)
url = reverse('api:jobs-detail', args=[self.job.uid])
self.assertEquals(expected, url)
data = {"uid": str(self.job.uid),
"name": "Test",
"url": 'http://testserver{0}'.format(url),
"description": "Test Description",
"exports": [{"uid": "8611792d-3d99-4c8f-a213-787bc7f3066",
"url": "http://testserver/api/formats/obf",
"name": "OBF Format",
"description": "OSMAnd OBF Export Format."}],
"created_at": "2015-05-21T19:46:37.163749Z",
"updated_at": "2015-05-21T19:46:47.207111Z",
"status": "SUCCESS"}
response = self.client.get(url)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_200_OK)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant content
self.assertEquals(response.data['uid'], data['uid'])
self.assertEquals(response.data['url'], data['url'])
self.assertEqual(response.data['exports'][0]['url'], data['exports'][0]['url'])
def test_delete_job(self, ):
url = reverse('api:jobs-detail', args=[self.job.uid])
response = self.client.delete(url)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_204_NO_CONTENT)
self.assertEquals(response['Content-Length'], '0')
self.assertEquals(response['Content-Language'], 'en')
@patch('api.views.ExportTaskRunner')
def test_create_job_success(self, mock):
task_runner = mock.return_value
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
config_uid = self.config.uid
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats,
'preset': config_uid,
'published': True,
'tags': self.tags
}
response = self.client.post(url, request_data, format='json')
job_uid = response.data['uid']
# test the ExportTaskRunner.run_task(job_id) method gets called.
task_runner.run_task.assert_called_once_with(job_uid=job_uid)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant response content
self.assertEqual(response.data['exports'][0]['slug'], request_data['formats'][0])
self.assertEqual(response.data['exports'][1]['slug'], request_data['formats'][1])
self.assertEqual(response.data['name'], request_data['name'])
self.assertEqual(response.data['description'], request_data['description'])
self.assertTrue(response.data['published'])
# check we have the correct tags
job = Job.objects.get(uid=job_uid)
tags = job.tags.all()
self.assertIsNotNone(tags)
self.assertEquals(233, len(tags))
@patch('api.views.ExportTaskRunner')
def test_create_job_with_config_success(self, mock):
task_runner = mock.return_value
config_uid = self.config.uid
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats,
'preset': config_uid,
'transform': '',
'translation': ''
}
response = self.client.post(url, request_data, format='json')
job_uid = response.data['uid']
# test the ExportTaskRunner.run_task(job_id) method gets called.
task_runner.run_task.assert_called_once_with(job_uid=job_uid)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant response content
self.assertEqual(response.data['exports'][0]['slug'], request_data['formats'][0])
self.assertEqual(response.data['exports'][1]['slug'], request_data['formats'][1])
self.assertEqual(response.data['name'], request_data['name'])
self.assertEqual(response.data['description'], request_data['description'])
self.assertFalse(response.data['published'])
configs = self.job.configs.all()
self.assertIsNotNone(configs[0])
@patch('api.views.ExportTaskRunner')
def test_create_job_with_tags(self, mock):
# delete the existing tags and test adding them with json
self.job.tags.all().delete()
task_runner = mock.return_value
config_uid = self.config.uid
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats,
# 'preset': config_uid,
'transform': '',
'translate': '',
'tags': self.tags
}
response = self.client.post(url, request_data, format='json')
job_uid = response.data['uid']
# test the ExportTaskRunner.run_task(job_id) method gets called.
task_runner.run_task.assert_called_once_with(job_uid=job_uid)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant response content
self.assertEqual(response.data['exports'][0]['slug'], request_data['formats'][0])
self.assertEqual(response.data['exports'][1]['slug'], request_data['formats'][1])
self.assertEqual(response.data['name'], request_data['name'])
self.assertEqual(response.data['description'], request_data['description'])
configs = self.job.configs.all()
# self.assertIsNotNone(configs[0])
def test_missing_bbox_param(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
# 'xmin': -3.9, missing
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['xmin is required.'], response.data['xmin'])
def test_invalid_bbox_param(self, ):
url = reverse('api:jobs-list')
formats = [str(format.uid) for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': '', # empty
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data, format='json')
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid xmin value.'], response.data['xmin'])
def test_invalid_bbox(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 7.0, # invalid
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid_bounds'], response.data['id'])
def test_lat_lon_bbox(self, ):
url = reverse('api:jobs-list')
formats = [str(format.uid) for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -227.14, # invalid
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(["Ensure this value is greater than or equal to -180."], response.data['xmin'])
def test_coord_nan(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 'xyz', # invalid
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid xmin value.'], response.data['xmin'])
def test_inverted_coords(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 7.0, # inverted
'ymin': 16.1,
'xmax': -3.9, # inverted
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['inverted_coordinates'], response.data['id'])
def test_empty_string_param(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
request_data = {
'name': 'TestJob',
'description': '', # empty
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['This field may not be blank.'], response.data['description'])
def test_missing_format_param(self, ):
url = reverse('api:jobs-list')
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
# 'formats': '', # missing
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['Select an export format.'], response.data['formats'])
def test_invalid_format_param(self, ):
url = reverse('api:jobs-list')
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': '', # invalid
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertIsNotNone(response.data['formats'])
def test_no_matching_format_slug(self, ):
url = reverse('api:jobs-list')
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -3.9,
'ymin': 16.1,
'xmax': 7.0,
'ymax': 27.6,
'formats': ['broken-format-one', 'broken-format-two']
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(response.data['formats'], ['invalid export format.'])
@patch('api.views.ExportTaskRunner')
def test_get_correct_region(self, mock):
task_runner = mock.return_value
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
# job extent spans africa / asia but greater intersection with asia
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 36.90,
'ymin': 13.54,
'xmax': 48.52,
'ymax': 20.24,
'formats': formats
}
response = self.client.post(url, request_data, format='json')
job_uid = response.data['uid']
# test the ExportTaskRunner.run_task(job_id) method gets called.
task_runner.run_task.assert_called_once_with(job_uid=job_uid)
# test the response headers
self.assertEquals(response.status_code, status.HTTP_202_ACCEPTED)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
# test significant response content
self.assertEqual(response.data['exports'][0]['slug'], request_data['formats'][0])
self.assertEqual(response.data['exports'][1]['slug'], request_data['formats'][1])
self.assertEqual(response.data['name'], request_data['name'])
self.assertEqual(response.data['description'], request_data['description'])
# test the region
region = response.data['region']
self.assertIsNotNone(region)
self.assertEquals(region['name'], 'Central Asia/Middle East')
def test_invalid_region(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
# job outside any region
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': 2.74,
'ymin': 47.66,
'xmax': 11.61,
'ymax': 54.24,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid_region'], response.data['id'])
def test_extents_too_large(self, ):
url = reverse('api:jobs-list')
formats = [format.slug for format in ExportFormat.objects.all()]
# job outside any region
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': -40,
'ymin': -10,
'xmax': 40,
'ymax': 20,
'formats': formats
}
response = self.client.post(url, request_data)
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(['invalid_extents'], response.data['id'])
class TestBBoxSearch(APITestCase):
"""
Test cases for testing bounding box searches.
"""
@patch('api.views.ExportTaskRunner')
def setUp(self, mock):
task_runner = mock.return_value
url = reverse('api:jobs-list')
# create dummy user
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create_user(
username='demo', email='[email protected]', password='demo'
)
# setup token authentication
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
# pull out the formats
formats = [format.slug for format in ExportFormat.objects.all()]
# create test jobs
extents = [(-3.9, 16.1, 7.0, 27.6), (36.90, 13.54, 48.52, 20.24),
(-71.79, -49.57, -67.14, -46.16), (-61.27, -6.49, -56.20, -2.25),
(-11.61, 32.07, -6.42, 36.31), (-10.66, 5.81, -2.45, 11.83),
(47.26, 34.58, 52.92, 39.15), (90.00, 11.28, 95.74, 17.02)]
for extent in extents:
request_data = {
'name': 'TestJob',
'description': 'Test description',
'event': 'Test Activation',
'xmin': extent[0],
'ymin': extent[1],
'xmax': extent[2],
'ymax': extent[3],
'formats': formats
}
response = self.client.post(url, request_data, format='json')
self.assertEquals(status.HTTP_202_ACCEPTED, response.status_code)
self.assertEquals(8, len(Job.objects.all()))
LinkHeaderPagination.page_size = 2
def test_bbox_search_success(self, ):
url = reverse('api:jobs-list')
extent = (-79.5, -16.16, 7.40, 52.44)
param = 'bbox={0},{1},{2},{3}'.format(extent[0], extent[1], extent[2], extent[3])
response = self.client.get('{0}?{1}'.format(url, param))
self.assertEquals(status.HTTP_206_PARTIAL_CONTENT, response.status_code)
self.assertEquals(2, len(response.data)) # 8 jobs in total but response is paginated
def test_list_jobs_no_bbox(self, ):
url = reverse('api:jobs-list')
response = self.client.get(url)
self.assertEquals(status.HTTP_206_PARTIAL_CONTENT, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals(response['Link'], '<http://testserver/api/jobs?page=2>; rel="next"')
self.assertEquals(2, len(response.data)) # 8 jobs in total but response is paginated
def test_bbox_search_missing_params(self, ):
url = reverse('api:jobs-list')
param = 'bbox=' # missing params
response = self.client.get('{0}?{1}'.format(url, param))
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals('missing_bbox_parameter', response.data['id'])
def test_bbox_missing_coord(self, ):
url = reverse('api:jobs-list')
extent = (-79.5, -16.16, 7.40) # one missing
param = 'bbox={0},{1},{2}'.format(extent[0], extent[1], extent[2])
response = self.client.get('{0}?{1}'.format(url, param))
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response['Content-Type'], 'application/json; version=1.0')
self.assertEquals(response['Content-Language'], 'en')
self.assertEquals('missing_bbox_parameter', response.data['id'])
class TestPagination(APITestCase):
pass
class TestExportRunViewSet(APITestCase):
"""
Test cases for ExportRunViewSet
"""
def setUp(self, ):
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='[email protected]', password='demo')
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
extents = (-3.9, 16.1, 7.0, 27.6)
bbox = Polygon.from_bbox(extents)
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', user=self.user,
the_geom=the_geom)
self.job_uid = str(self.job.uid)
self.run = ExportRun.objects.create(job=self.job, user=self.user)
self.run_uid = str(self.run.uid)
def test_retrieve_run(self, ):
expected = '/api/runs/{0}'.format(self.run_uid)
url = reverse('api:runs-detail', args=[self.run_uid])
self.assertEquals(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
result = response.data
# make sure we get the correct uid back out
self.assertEquals(self.run_uid, result[0].get('uid'))
def test_list_runs(self, ):
expected = '/api/runs'
url = reverse('api:runs-list')
self.assertEquals(expected, url)
query = '{0}?job_uid={1}'.format(url, self.job.uid)
response = self.client.get(query)
self.assertIsNotNone(response)
result = response.data
# make sure we get the correct uid back out
self.assertEquals(1, len(result))
self.assertEquals(self.run_uid, result[0].get('uid'))
class TestExportConfigViewSet(APITestCase):
"""
Test cases for ExportConfigViewSet
"""
def setUp(self, ):
self.path = os.path.dirname(os.path.realpath(__file__))
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='[email protected]', password='demo')
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', user=self.user,
the_geom=the_geom)
self.uid = self.job.uid
# setup token authentication
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
def test_create_config(self, ):
url = reverse('api:configs-list')
path = os.path.dirname(os.path.realpath(__file__))
f = File(open(path + '/files/Example Transform.sql', 'r'))
name = 'Test Export Config'
response = self.client.post(url, {'name': name, 'upload': f, 'config_type': 'TRANSFORM', 'published': True}, format='multipart')
data = response.data
uid = data['uid']
saved_config = ExportConfig.objects.get(uid=uid)
self.assertIsNotNone(saved_config)
self.assertEquals(name, saved_config.name)
self.assertTrue(saved_config.published)
self.assertEquals('example_transform.sql', saved_config.filename)
self.assertEquals('text/plain', saved_config.content_type)
saved_config.delete()
def test_invalid_config_type(self, ):
url = reverse('api:configs-list')
path = os.path.dirname(os.path.realpath(__file__))
f = open(path + '/files/Example Transform.sql', 'r')
self.assertIsNotNone(f)
response = self.client.post(url, {'upload': f, 'config_type': 'TRANSFORM-WRONG'}, format='multipart')
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
def test_invalid_name(self, ):
url = reverse('api:configs-list')
path = os.path.dirname(os.path.realpath(__file__))
f = open(path + '/files/Example Transform.sql', 'r')
self.assertIsNotNone(f)
response = self.client.post(url, {'upload': f, 'config_type': 'TRANSFORM'}, format='multipart')
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
self.assertEquals(response.data['name'], ['This field is required.'])
def test_invalid_upload(self, ):
url = reverse('api:configs-list')
response = self.client.post(url, {'upload': '', 'config_type': 'TRANSFORM-WRONG'}, format='multipart')
self.assertEquals(status.HTTP_400_BAD_REQUEST, response.status_code)
@skip('Transform not implemented.')
def test_update_config(self, ):
url = reverse('api:configs-list')
# create an initial config we can then update..
path = os.path.dirname(os.path.realpath(__file__))
f = File(open(path + '/files/Example Transform.sql', 'r'))
name = 'Test Export Config'
response = self.client.post(url, {'name': name, 'upload': f, 'config_type': 'TRANSFORM'}, format='multipart')
data = response.data
saved_uid = data['uid']
saved_config = ExportConfig.objects.get(uid=saved_uid)
# update the config
url = reverse('api:configs-detail', args=[saved_uid])
f = File(open(path + '/files/hdm_presets.xml', 'r'))
updated_name = 'Test Export Config Updated'
response = self.client.put(url, {'name': updated_name, 'upload': f, 'config_type': 'PRESET'}, format='multipart')
data = response.data
updated_uid = data['uid']
self.assertEquals(saved_uid, updated_uid) # check its the same uid
updated_config = ExportConfig.objects.get(uid=updated_uid)
self.assertIsNotNone(updated_config)
self.assertEquals('hdm_presets.xml', updated_config.filename)
self.assertEquals('application/xml', updated_config.content_type)
self.assertEquals('Test Export Config Updated', updated_config.name)
updated_config.delete()
try:
f = File(open(path + '/files/Example Transform.sql', 'r'))
except IOError:
pass # expected.. old file has been deleted during update.
class TestExportTaskViewSet(APITestCase):
"""
Test cases for ExportTaskViewSet
"""
def setUp(self, ):
self.path = os.path.dirname(os.path.realpath(__file__))
Group.objects.create(name='TestDefaultExportExtentGroup')
self.user = User.objects.create(username='demo', email='[email protected]', password='demo')
bbox = Polygon.from_bbox((-7.96, 22.6, -8.14, 27.12))
the_geom = GEOSGeometry(bbox, srid=4326)
self.job = Job.objects.create(name='TestJob',
description='Test description', user=self.user,
the_geom=the_geom)
# setup token authentication
token = Token.objects.create(user=self.user)
self.client.credentials(HTTP_AUTHORIZATION='Token ' + token.key,
HTTP_ACCEPT='application/json; version=1.0',
HTTP_ACCEPT_LANGUAGE='en',
HTTP_HOST='testserver')
self.run = ExportRun.objects.create(job=self.job)
self.celery_uid = str(uuid.uuid4())
self.task = ExportTask.objects.create(run=self.run, name='Shapefile Export',
celery_uid=self.celery_uid, status='SUCCESS')
self.task_uid = str(self.task.uid)
def test_retrieve(self, ):
expected = '/api/tasks/{0}'.format(self.task_uid)
url = reverse('api:tasks-detail', args=[self.task_uid])
self.assertEquals(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEquals(200, response.status_code)
result = json.dumps(response.data)
data = json.loads(result)
# make sure we get the correct uid back out
self.assertEquals(self.task_uid, data[0].get('uid'))
def test_list(self, ):
expected = '/api/tasks'.format(self.task_uid)
url = reverse('api:tasks-list')
self.assertEquals(expected, url)
response = self.client.get(url)
self.assertIsNotNone(response)
self.assertEquals(200, response.status_code)
result = json.dumps(response.data)
data = json.loads(result)
# should only be one task in the list
self.assertEquals(1, len(data))
# make sure we get the correct uid back out
self.assertEquals(self.task_uid, data[0].get('uid'))
| bsd-3-clause | -5,353,230,866,664,354,000 | 43.427848 | 143 | 0.581087 | false |
nevercast/home-assistant | homeassistant/components/http.py | 4 | 16083 | """
homeassistant.components.http
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
This module provides an API and a HTTP interface for debug purposes.
For more details about the RESTful API, please refer to the documentation at
https://home-assistant.io/developers/api/
"""
from datetime import timedelta
import gzip
from http import cookies
from http.server import SimpleHTTPRequestHandler, HTTPServer
import json
import logging
import os
from socketserver import ThreadingMixIn
import ssl
import threading
import time
from urllib.parse import urlparse, parse_qs
import homeassistant.core as ha
from homeassistant.const import (
SERVER_PORT, CONTENT_TYPE_JSON, CONTENT_TYPE_TEXT_PLAIN,
HTTP_HEADER_HA_AUTH, HTTP_HEADER_CONTENT_TYPE, HTTP_HEADER_ACCEPT_ENCODING,
HTTP_HEADER_CONTENT_ENCODING, HTTP_HEADER_VARY, HTTP_HEADER_CONTENT_LENGTH,
HTTP_HEADER_CACHE_CONTROL, HTTP_HEADER_EXPIRES, HTTP_OK, HTTP_UNAUTHORIZED,
HTTP_NOT_FOUND, HTTP_METHOD_NOT_ALLOWED, HTTP_UNPROCESSABLE_ENTITY)
import homeassistant.remote as rem
import homeassistant.util as util
import homeassistant.util.dt as date_util
import homeassistant.bootstrap as bootstrap
DOMAIN = "http"
CONF_API_PASSWORD = "api_password"
CONF_SERVER_HOST = "server_host"
CONF_SERVER_PORT = "server_port"
CONF_DEVELOPMENT = "development"
CONF_SSL_CERTIFICATE = 'ssl_certificate'
CONF_SSL_KEY = 'ssl_key'
DATA_API_PASSWORD = 'api_password'
# Throttling time in seconds for expired sessions check
SESSION_CLEAR_INTERVAL = timedelta(seconds=20)
SESSION_TIMEOUT_SECONDS = 1800
SESSION_KEY = 'sessionId'
_LOGGER = logging.getLogger(__name__)
def setup(hass, config):
""" Sets up the HTTP API and debug interface. """
conf = config.get(DOMAIN, {})
api_password = util.convert(conf.get(CONF_API_PASSWORD), str)
# If no server host is given, accept all incoming requests
server_host = conf.get(CONF_SERVER_HOST, '0.0.0.0')
server_port = conf.get(CONF_SERVER_PORT, SERVER_PORT)
development = str(conf.get(CONF_DEVELOPMENT, "")) == "1"
ssl_certificate = conf.get(CONF_SSL_CERTIFICATE)
ssl_key = conf.get(CONF_SSL_KEY)
try:
server = HomeAssistantHTTPServer(
(server_host, server_port), RequestHandler, hass, api_password,
development, ssl_certificate, ssl_key)
except OSError:
# If address already in use
_LOGGER.exception("Error setting up HTTP server")
return False
hass.bus.listen_once(
ha.EVENT_HOMEASSISTANT_START,
lambda event:
threading.Thread(target=server.start, daemon=True).start())
hass.http = server
hass.config.api = rem.API(util.get_local_ip(), api_password, server_port,
ssl_certificate is not None)
return True
# pylint: disable=too-many-instance-attributes
class HomeAssistantHTTPServer(ThreadingMixIn, HTTPServer):
""" Handle HTTP requests in a threaded fashion. """
# pylint: disable=too-few-public-methods
allow_reuse_address = True
daemon_threads = True
# pylint: disable=too-many-arguments
def __init__(self, server_address, request_handler_class,
hass, api_password, development, ssl_certificate, ssl_key):
super().__init__(server_address, request_handler_class)
self.server_address = server_address
self.hass = hass
self.api_password = api_password
self.development = development
self.paths = []
self.sessions = SessionStore()
self.use_ssl = ssl_certificate is not None
# We will lazy init this one if needed
self.event_forwarder = None
if development:
_LOGGER.info("running http in development mode")
if ssl_certificate is not None:
context = ssl.create_default_context(
purpose=ssl.Purpose.CLIENT_AUTH)
context.load_cert_chain(ssl_certificate, keyfile=ssl_key)
self.socket = context.wrap_socket(self.socket, server_side=True)
def start(self):
""" Starts the HTTP server. """
def stop_http(event):
""" Stops the HTTP server. """
self.shutdown()
self.hass.bus.listen_once(ha.EVENT_HOMEASSISTANT_STOP, stop_http)
protocol = 'https' if self.use_ssl else 'http'
_LOGGER.info(
"Starting web interface at %s://%s:%d",
protocol, self.server_address[0], self.server_address[1])
# 31-1-2015: Refactored frontend/api components out of this component
# To prevent stuff from breaking, load the two extracted components
bootstrap.setup_component(self.hass, 'api')
bootstrap.setup_component(self.hass, 'frontend')
self.serve_forever()
def register_path(self, method, url, callback, require_auth=True):
""" Registers a path with the server. """
self.paths.append((method, url, callback, require_auth))
def log_message(self, fmt, *args):
""" Redirect built-in log to HA logging """
# pylint: disable=no-self-use
_LOGGER.info(fmt, *args)
# pylint: disable=too-many-public-methods,too-many-locals
class RequestHandler(SimpleHTTPRequestHandler):
"""
Handles incoming HTTP requests
We extend from SimpleHTTPRequestHandler instead of Base so we
can use the guess content type methods.
"""
server_version = "HomeAssistant/1.0"
def __init__(self, req, client_addr, server):
""" Contructor, call the base constructor and set up session """
# Track if this was an authenticated request
self.authenticated = False
SimpleHTTPRequestHandler.__init__(self, req, client_addr, server)
def log_message(self, fmt, *arguments):
""" Redirect built-in log to HA logging """
if self.server.api_password is None:
_LOGGER.info(fmt, *arguments)
else:
_LOGGER.info(
fmt, *(arg.replace(self.server.api_password, '*******')
if isinstance(arg, str) else arg for arg in arguments))
def _handle_request(self, method): # pylint: disable=too-many-branches
""" Does some common checks and calls appropriate method. """
url = urlparse(self.path)
# Read query input. parse_qs gives a list for each value, we want last
data = {key: data[-1] for key, data in parse_qs(url.query).items()}
# Did we get post input ?
content_length = int(self.headers.get(HTTP_HEADER_CONTENT_LENGTH, 0))
if content_length:
body_content = self.rfile.read(content_length).decode("UTF-8")
try:
data.update(json.loads(body_content))
except (TypeError, ValueError):
# TypeError if JSON object is not a dict
# ValueError if we could not parse JSON
_LOGGER.exception(
"Exception parsing JSON: %s", body_content)
self.write_json_message(
"Error parsing JSON", HTTP_UNPROCESSABLE_ENTITY)
return
self.authenticated = (self.server.api_password is None or
self.headers.get(HTTP_HEADER_HA_AUTH) ==
self.server.api_password or
data.get(DATA_API_PASSWORD) ==
self.server.api_password or
self.verify_session())
if '_METHOD' in data:
method = data.pop('_METHOD')
# Var to keep track if we found a path that matched a handler but
# the method was different
path_matched_but_not_method = False
# Var to hold the handler for this path and method if found
handle_request_method = False
require_auth = True
# Check every handler to find matching result
for t_method, t_path, t_handler, t_auth in self.server.paths:
# we either do string-comparison or regular expression matching
# pylint: disable=maybe-no-member
if isinstance(t_path, str):
path_match = url.path == t_path
else:
path_match = t_path.match(url.path)
if path_match and method == t_method:
# Call the method
handle_request_method = t_handler
require_auth = t_auth
break
elif path_match:
path_matched_but_not_method = True
# Did we find a handler for the incoming request?
if handle_request_method:
# For some calls we need a valid password
if require_auth and not self.authenticated:
self.write_json_message(
"API password missing or incorrect.", HTTP_UNAUTHORIZED)
return
handle_request_method(self, path_match, data)
elif path_matched_but_not_method:
self.send_response(HTTP_METHOD_NOT_ALLOWED)
self.end_headers()
else:
self.send_response(HTTP_NOT_FOUND)
self.end_headers()
def do_HEAD(self): # pylint: disable=invalid-name
""" HEAD request handler. """
self._handle_request('HEAD')
def do_GET(self): # pylint: disable=invalid-name
""" GET request handler. """
self._handle_request('GET')
def do_POST(self): # pylint: disable=invalid-name
""" POST request handler. """
self._handle_request('POST')
def do_PUT(self): # pylint: disable=invalid-name
""" PUT request handler. """
self._handle_request('PUT')
def do_DELETE(self): # pylint: disable=invalid-name
""" DELETE request handler. """
self._handle_request('DELETE')
def write_json_message(self, message, status_code=HTTP_OK):
""" Helper method to return a message to the caller. """
self.write_json({'message': message}, status_code=status_code)
def write_json(self, data=None, status_code=HTTP_OK, location=None):
""" Helper method to return JSON to the caller. """
self.send_response(status_code)
self.send_header(HTTP_HEADER_CONTENT_TYPE, CONTENT_TYPE_JSON)
if location:
self.send_header('Location', location)
self.set_session_cookie_header()
self.end_headers()
if data is not None:
self.wfile.write(
json.dumps(data, indent=4, sort_keys=True,
cls=rem.JSONEncoder).encode("UTF-8"))
def write_text(self, message, status_code=HTTP_OK):
""" Helper method to return a text message to the caller. """
self.send_response(status_code)
self.send_header(HTTP_HEADER_CONTENT_TYPE, CONTENT_TYPE_TEXT_PLAIN)
self.set_session_cookie_header()
self.end_headers()
self.wfile.write(message.encode("UTF-8"))
def write_file(self, path, cache_headers=True):
""" Returns a file to the user. """
try:
with open(path, 'rb') as inp:
self.write_file_pointer(self.guess_type(path), inp,
cache_headers)
except IOError:
self.send_response(HTTP_NOT_FOUND)
self.end_headers()
_LOGGER.exception("Unable to serve %s", path)
def write_file_pointer(self, content_type, inp, cache_headers=True):
"""
Helper function to write a file pointer to the user.
Does not do error handling.
"""
do_gzip = 'gzip' in self.headers.get(HTTP_HEADER_ACCEPT_ENCODING, '')
self.send_response(HTTP_OK)
self.send_header(HTTP_HEADER_CONTENT_TYPE, content_type)
if cache_headers:
self.set_cache_header()
self.set_session_cookie_header()
if do_gzip:
gzip_data = gzip.compress(inp.read())
self.send_header(HTTP_HEADER_CONTENT_ENCODING, "gzip")
self.send_header(HTTP_HEADER_VARY, HTTP_HEADER_ACCEPT_ENCODING)
self.send_header(HTTP_HEADER_CONTENT_LENGTH, str(len(gzip_data)))
else:
fst = os.fstat(inp.fileno())
self.send_header(HTTP_HEADER_CONTENT_LENGTH, str(fst[6]))
self.end_headers()
if self.command == 'HEAD':
return
elif do_gzip:
self.wfile.write(gzip_data)
else:
self.copyfile(inp, self.wfile)
def set_cache_header(self):
""" Add cache headers if not in development """
if self.server.development:
return
# 1 year in seconds
cache_time = 365 * 86400
self.send_header(
HTTP_HEADER_CACHE_CONTROL,
"public, max-age={}".format(cache_time))
self.send_header(
HTTP_HEADER_EXPIRES,
self.date_time_string(time.time()+cache_time))
def set_session_cookie_header(self):
""" Add the header for the session cookie and return session id. """
if not self.authenticated:
return None
session_id = self.get_cookie_session_id()
if session_id is not None:
self.server.sessions.extend_validation(session_id)
return session_id
self.send_header(
'Set-Cookie',
'{}={}'.format(SESSION_KEY, self.server.sessions.create())
)
return session_id
def verify_session(self):
""" Verify that we are in a valid session. """
return self.get_cookie_session_id() is not None
def get_cookie_session_id(self):
"""
Extracts the current session id from the
cookie or returns None if not set or invalid
"""
if 'Cookie' not in self.headers:
return None
cookie = cookies.SimpleCookie()
try:
cookie.load(self.headers["Cookie"])
except cookies.CookieError:
return None
morsel = cookie.get(SESSION_KEY)
if morsel is None:
return None
session_id = cookie[SESSION_KEY].value
if self.server.sessions.is_valid(session_id):
return session_id
return None
def destroy_session(self):
""" Destroys session. """
session_id = self.get_cookie_session_id()
if session_id is None:
return
self.send_header('Set-Cookie', '')
self.server.sessions.destroy(session_id)
def session_valid_time():
""" Time till when a session will be valid. """
return date_util.utcnow() + timedelta(seconds=SESSION_TIMEOUT_SECONDS)
class SessionStore(object):
""" Responsible for storing and retrieving http sessions """
def __init__(self):
""" Set up the session store """
self._sessions = {}
self._lock = threading.RLock()
@util.Throttle(SESSION_CLEAR_INTERVAL)
def _remove_expired(self):
""" Remove any expired sessions. """
now = date_util.utcnow()
for key in [key for key, valid_time in self._sessions.items()
if valid_time < now]:
self._sessions.pop(key)
def is_valid(self, key):
""" Return True if a valid session is given. """
with self._lock:
self._remove_expired()
return (key in self._sessions and
self._sessions[key] > date_util.utcnow())
def extend_validation(self, key):
""" Extend a session validation time. """
with self._lock:
if key not in self._sessions:
return
self._sessions[key] = session_valid_time()
def destroy(self, key):
""" Destroy a session by key. """
with self._lock:
self._sessions.pop(key, None)
def create(self):
""" Creates a new session. """
with self._lock:
session_id = util.get_random_string(20)
while session_id in self._sessions:
session_id = util.get_random_string(20)
self._sessions[session_id] = session_valid_time()
return session_id
| mit | -639,019,440,193,216,800 | 32.5762 | 79 | 0.603308 | false |
freeworldxbmc/pluging.video.Jurassic.World.Media | resources/lib/resolvers/kingfiles.py | 3 | 1884 | # -*- coding: utf-8 -*-
'''
Genesis Add-on
Copyright (C) 2015 lambda
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
'''
import re,urllib
from resources.lib.libraries import client
from resources.lib.libraries import captcha
def resolve(url):
try:
result = client.request(url)
post = {}
f = client.parseDOM(result, 'Form', attrs = {'action': '' })[0]
k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})
post.update({'method_free': ' '})
post = urllib.urlencode(post)
result = client.request(url, post=post)
post = {}
f = client.parseDOM(result, 'Form', attrs = {'action': '' })[0]
k = client.parseDOM(f, 'input', ret='name', attrs = {'type': 'hidden'})
for i in k: post.update({i: client.parseDOM(f, 'input', ret='value', attrs = {'name': i})[0]})
post.update({'method_free': ' '})
post.update(captcha.request(result))
post = urllib.urlencode(post)
result = client.request(url, post=post)
url = re.compile("var\s+download_url *= *'(.+?)'").findall(result)[0]
return url
except:
return
| gpl-3.0 | -6,724,552,432,619,386,000 | 33.888889 | 102 | 0.623673 | false |
dontnod/weblate | weblate/trans/tests/test_models.py | 1 | 15261 | # -*- coding: utf-8 -*-
#
# Copyright © 2012 - 2019 Michal Čihař <[email protected]>
#
# This file is part of Weblate <https://weblate.org/>
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
#
"""Test for translation models."""
import os
import shutil
from django.core.management.color import no_style
from django.db import connection
from django.test import LiveServerTestCase, TestCase
from django.test.utils import override_settings
from weblate.auth.models import Group, User
from weblate.checks.models import Check
from weblate.lang.models import Language, Plural
from weblate.trans.models import (
AutoComponentList,
Component,
ComponentList,
Project,
Source,
Unit,
WhiteboardMessage,
)
from weblate.trans.tests.utils import RepoTestMixin, create_test_user
from weblate.utils.state import STATE_TRANSLATED
def fixup_languages_seq():
# Reset sequence for Language and Plural objects as
# we're manipulating with them in FixtureTestCase.setUpTestData
# and that seems to affect sequence for other tests as well
# on some PostgreSQL versions (probably sequence is not rolled back
# in a transaction).
commands = connection.ops.sequence_reset_sql(no_style(), [Language, Plural])
if commands:
with connection.cursor() as cursor:
for sql in commands:
cursor.execute(sql)
class BaseTestCase(TestCase):
@classmethod
def setUpTestData(cls):
fixup_languages_seq()
class BaseLiveServerTestCase(LiveServerTestCase):
@classmethod
def setUpTestData(cls):
fixup_languages_seq()
class RepoTestCase(BaseTestCase, RepoTestMixin):
"""Generic class for tests working with repositories."""
def setUp(self):
self.clone_test_repos()
class ProjectTest(RepoTestCase):
"""Project object testing."""
def test_create(self):
project = self.create_project()
self.assertTrue(os.path.exists(project.full_path))
self.assertTrue(project.slug in project.full_path)
def test_rename(self):
component = self.create_link()
self.assertTrue(
Component.objects.filter(repo='weblate://test/test').exists()
)
project = component.project
old_path = project.full_path
self.assertTrue(os.path.exists(old_path))
self.assertTrue(os.path.exists(
component.translation_set.all()[0].get_filename()
))
project.slug = 'changed'
project.save()
new_path = project.full_path
self.addCleanup(shutil.rmtree, new_path, True)
self.assertFalse(os.path.exists(old_path))
self.assertTrue(os.path.exists(new_path))
self.assertTrue(
Component.objects.filter(repo='weblate://changed/test').exists()
)
self.assertFalse(
Component.objects.filter(repo='weblate://test/test').exists()
)
component = Component.objects.get(pk=component.pk)
self.assertTrue(os.path.exists(
component.translation_set.all()[0].get_filename()
))
def test_delete(self):
project = self.create_project()
self.assertTrue(os.path.exists(project.full_path))
project.delete()
self.assertFalse(os.path.exists(project.full_path))
def test_delete_all(self):
project = self.create_project()
self.assertTrue(os.path.exists(project.full_path))
Project.objects.all().delete()
self.assertFalse(os.path.exists(project.full_path))
def test_acl(self):
"""Test for ACL handling."""
# Create user to verify ACL
user = create_test_user()
# Create project
project = self.create_project()
# Enable ACL
project.access_control = Project.ACCESS_PRIVATE
project.save()
# Check user does not have access
self.assertFalse(user.can_access_project(project))
# Add to ACL group
user.groups.add(Group.objects.get(name='Test@Translate'))
# Need to fetch user again to clear permission cache
user = User.objects.get(username='testuser')
# We now should have access
self.assertTrue(user.can_access_project(project))
class TranslationTest(RepoTestCase):
"""Translation testing."""
def test_basic(self):
component = self.create_component()
translation = component.translation_set.get(language_code='cs')
self.assertEqual(translation.stats.translated, 0)
self.assertEqual(translation.stats.all, 4)
self.assertEqual(translation.stats.fuzzy, 0)
def test_validation(self):
"""Translation validation"""
component = self.create_component()
translation = component.translation_set.get(language_code='cs')
translation.full_clean()
def test_update_stats(self):
"""Check update stats with no units."""
component = self.create_component()
translation = component.translation_set.get(language_code='cs')
self.assertEqual(translation.stats.all, 4)
self.assertEqual(translation.stats.all_words, 15)
translation.unit_set.all().delete()
translation.invalidate_cache()
self.assertEqual(translation.stats.all, 0)
self.assertEqual(translation.stats.all_words, 0)
def test_commit_groupping(self):
component = self.create_component()
translation = component.translation_set.get(language_code='cs')
user = create_test_user()
start_rev = component.repository.last_revision
# Initial translation
for unit in translation.unit_set.iterator():
unit.translate(user, 'test2', STATE_TRANSLATED)
# Translation completed, no commit forced
self.assertEqual(start_rev, component.repository.last_revision)
# Translation from same author should not trigger commit
for unit in translation.unit_set.iterator():
unit.translate(user, 'test3', STATE_TRANSLATED)
for unit in translation.unit_set.iterator():
unit.translate(user, 'test4', STATE_TRANSLATED)
self.assertEqual(start_rev, component.repository.last_revision)
# Translation from other author should trigger commmit
for i, unit in enumerate(translation.unit_set.iterator()):
user = User.objects.create(
full_name='User {}'.format(unit.pk),
username='user-{}'.format(unit.pk),
email='{}@example.com'.format(unit.pk)
)
# Fetch current pending state, it might have been
# updated by background commit
unit.pending = Unit.objects.get(pk=unit.pk).pending
unit.translate(user, 'test', STATE_TRANSLATED)
if i == 0:
# First edit should trigger commit
self.assertNotEqual(
start_rev, component.repository.last_revision
)
start_rev = component.repository.last_revision
# No further commit now
self.assertEqual(start_rev, component.repository.last_revision)
# Commit pending changes
translation.commit_pending('test', None)
self.assertNotEqual(start_rev, component.repository.last_revision)
class ComponentListTest(RepoTestCase):
"""Test(s) for ComponentList model."""
def test_slug(self):
"""Test ComponentList slug."""
clist = ComponentList()
clist.slug = 'slug'
self.assertEqual(clist.tab_slug(), 'list-slug')
def test_auto(self):
self.create_component()
clist = ComponentList.objects.create(
name='Name',
slug='slug'
)
AutoComponentList.objects.create(
project_match='^.*$',
component_match='^.*$',
componentlist=clist
)
self.assertEqual(
clist.components.count(), 1
)
def test_auto_create(self):
clist = ComponentList.objects.create(
name='Name',
slug='slug'
)
AutoComponentList.objects.create(
project_match='^.*$',
component_match='^.*$',
componentlist=clist
)
self.assertEqual(
clist.components.count(), 0
)
self.create_component()
self.assertEqual(
clist.components.count(), 1
)
def test_auto_nomatch(self):
self.create_component()
clist = ComponentList.objects.create(
name='Name',
slug='slug'
)
AutoComponentList.objects.create(
project_match='^none$',
component_match='^.*$',
componentlist=clist
)
self.assertEqual(
clist.components.count(), 0
)
class ModelTestCase(RepoTestCase):
def setUp(self):
super(ModelTestCase, self).setUp()
self.component = self.create_component()
class SourceTest(ModelTestCase):
"""Source objects testing."""
def test_exists(self):
self.assertTrue(Source.objects.exists())
def test_source_info(self):
unit = Unit.objects.all()[0]
self.assertIsNotNone(unit.source_info)
def test_priority(self):
unit = Unit.objects.all()[0]
self.assertEqual(unit.priority, 100)
source = unit.source_info
source.check_flags = 'priority:200'
source.save()
unit2 = Unit.objects.get(pk=unit.pk)
self.assertEqual(unit2.priority, 200)
def test_check_flags(self):
"""Setting of Source check_flags changes checks for related units."""
self.assertEqual(Check.objects.count(), 3)
check = Check.objects.all()[0]
unit = check.related_units[0]
self.assertEqual(self.component.stats.allchecks, 2)
source = unit.source_info
source.check_flags = 'ignore-{0}'.format(check.check)
source.save()
self.assertEqual(Check.objects.count(), 0)
self.assertEqual(
Component.objects.get(pk=self.component.pk).stats.allchecks,
0
)
class UnitTest(ModelTestCase):
def test_more_like(self):
unit = Unit.objects.all()[0]
self.assertEqual(Unit.objects.more_like_this(unit).count(), 0)
def test_newlines(self):
user = create_test_user()
unit = Unit.objects.all()[0]
unit.translate(user, 'new\nstring', STATE_TRANSLATED)
self.assertEqual(unit.target, 'new\nstring')
# New object to clear all_flags cache
unit = Unit.objects.all()[0]
unit.flags = 'dos-eol'
unit.translate(user, 'new\nstring', STATE_TRANSLATED)
self.assertEqual(unit.target, 'new\r\nstring')
unit.translate(user, 'other\r\nstring', STATE_TRANSLATED)
self.assertEqual(unit.target, 'other\r\nstring')
def test_flags(self):
unit = Unit.objects.all()[0]
unit.flags = 'no-wrap, ignore-same'
self.assertEqual(unit.all_flags.items(), {'no-wrap', 'ignore-same'})
def test_get_max_length_no_pk(self):
unit = Unit.objects.all()[0]
unit.pk = False
self.assertEqual(unit.get_max_length(), 10000)
def test_get_max_length_empty_source_default_fallback(self):
unit = Unit.objects.all()[0]
unit.pk = True
unit.source = ''
self.assertEqual(unit.get_max_length(), 100)
def test_get_max_length_default_fallback(self):
unit = Unit.objects.all()[0]
unit.pk = True
unit.source = 'My test source'
self.assertEqual(unit.get_max_length(), 140)
@override_settings(LIMIT_TRANSLATION_LENGTH_BY_SOURCE_LENGTH=False)
def test_get_max_length_empty_source_disabled_default_fallback(self):
unit = Unit.objects.all()[0]
unit.pk = True
unit.source = ''
self.assertEqual(unit.get_max_length(), 10000)
@override_settings(LIMIT_TRANSLATION_LENGTH_BY_SOURCE_LENGTH=False)
def test_get_max_length_disabled_default_fallback(self):
unit = Unit.objects.all()[0]
unit.pk = True
unit.source = 'My test source'
self.assertEqual(unit.get_max_length(), 10000)
class WhiteboardMessageTest(ModelTestCase):
"""Test(s) for WhiteboardMessage model."""
def setUp(self):
super(WhiteboardMessageTest, self).setUp()
WhiteboardMessage.objects.create(
language=Language.objects.get(code='cs'),
message='test cs',
)
WhiteboardMessage.objects.create(
language=Language.objects.get(code='de'),
message='test de',
)
WhiteboardMessage.objects.create(
project=self.component.project,
message='test project',
)
WhiteboardMessage.objects.create(
component=self.component,
project=self.component.project,
message='test component',
)
WhiteboardMessage.objects.create(
message='test global',
)
def verify_filter(self, messages, count, message=None):
"""
Verifies whether messages have given count and first
contains string.
"""
self.assertEqual(len(messages), count)
if message is not None:
self.assertEqual(messages[0].message, message)
def test_contextfilter_global(self):
self.verify_filter(
WhiteboardMessage.objects.context_filter(),
1,
'test global'
)
def test_contextfilter_project(self):
self.verify_filter(
WhiteboardMessage.objects.context_filter(
project=self.component.project,
),
1,
'test project'
)
def test_contextfilter_component(self):
self.verify_filter(
WhiteboardMessage.objects.context_filter(
component=self.component,
),
2
)
def test_contextfilter_translation(self):
self.verify_filter(
WhiteboardMessage.objects.context_filter(
component=self.component,
language=Language.objects.get(code='cs'),
),
3,
)
def test_contextfilter_language(self):
self.verify_filter(
WhiteboardMessage.objects.context_filter(
language=Language.objects.get(code='cs'),
),
1,
'test cs'
)
self.verify_filter(
WhiteboardMessage.objects.context_filter(
language=Language.objects.get(code='de'),
),
1,
'test de'
)
| gpl-3.0 | -6,364,523,919,517,820,000 | 32.60793 | 80 | 0.620134 | false |
jhayworth/config | .emacs.d/elpy/rpc-venv/lib/python2.7/site-packages/jedi/inference/syntax_tree.py | 2 | 34097 | """
Functions inferring the syntax tree.
"""
import copy
from parso.python import tree
from jedi._compatibility import force_unicode, unicode
from jedi import debug
from jedi import parser_utils
from jedi.inference.base_value import ValueSet, NO_VALUES, ContextualizedNode, \
iterator_to_value_set, iterate_values
from jedi.inference.lazy_value import LazyTreeValue
from jedi.inference import compiled
from jedi.inference import recursion
from jedi.inference import analysis
from jedi.inference import imports
from jedi.inference import arguments
from jedi.inference.value import ClassValue, FunctionValue
from jedi.inference.value import iterable
from jedi.inference.value.dynamic_arrays import ListModification, DictModification
from jedi.inference.value import TreeInstance
from jedi.inference.helpers import is_string, is_literal, is_number, \
get_names_of_node, is_big_annoying_library
from jedi.inference.compiled.access import COMPARISON_OPERATORS
from jedi.inference.cache import inference_state_method_cache
from jedi.inference.gradual.stub_value import VersionInfo
from jedi.inference.gradual import annotation
from jedi.inference.names import TreeNameDefinition
from jedi.inference.context import CompForContext
from jedi.inference.value.decorator import Decoratee
from jedi.plugins import plugin_manager
def _limit_value_infers(func):
"""
This is for now the way how we limit type inference going wild. There are
other ways to ensure recursion limits as well. This is mostly necessary
because of instance (self) access that can be quite tricky to limit.
I'm still not sure this is the way to go, but it looks okay for now and we
can still go anther way in the future. Tests are there. ~ dave
"""
def wrapper(context, *args, **kwargs):
n = context.tree_node
inference_state = context.inference_state
try:
inference_state.inferred_element_counts[n] += 1
maximum = 300
if context.parent_context is None \
and context.get_value() is inference_state.builtins_module:
# Builtins should have a more generous inference limit.
# It is important that builtins can be executed, otherwise some
# functions that depend on certain builtins features would be
# broken, see e.g. GH #1432
maximum *= 100
if inference_state.inferred_element_counts[n] > maximum:
debug.warning('In value %s there were too many inferences.', n)
return NO_VALUES
except KeyError:
inference_state.inferred_element_counts[n] = 1
return func(context, *args, **kwargs)
return wrapper
def infer_node(context, element):
if isinstance(context, CompForContext):
return _infer_node(context, element)
if_stmt = element
while if_stmt is not None:
if_stmt = if_stmt.parent
if if_stmt.type in ('if_stmt', 'for_stmt'):
break
if parser_utils.is_scope(if_stmt):
if_stmt = None
break
predefined_if_name_dict = context.predefined_names.get(if_stmt)
# TODO there's a lot of issues with this one. We actually should do
# this in a different way. Caching should only be active in certain
# cases and this all sucks.
if predefined_if_name_dict is None and if_stmt \
and if_stmt.type == 'if_stmt' and context.inference_state.is_analysis:
if_stmt_test = if_stmt.children[1]
name_dicts = [{}]
# If we already did a check, we don't want to do it again -> If
# value.predefined_names is filled, we stop.
# We don't want to check the if stmt itself, it's just about
# the content.
if element.start_pos > if_stmt_test.end_pos:
# Now we need to check if the names in the if_stmt match the
# names in the suite.
if_names = get_names_of_node(if_stmt_test)
element_names = get_names_of_node(element)
str_element_names = [e.value for e in element_names]
if any(i.value in str_element_names for i in if_names):
for if_name in if_names:
definitions = context.inference_state.infer(context, if_name)
# Every name that has multiple different definitions
# causes the complexity to rise. The complexity should
# never fall below 1.
if len(definitions) > 1:
if len(name_dicts) * len(definitions) > 16:
debug.dbg('Too many options for if branch inference %s.', if_stmt)
# There's only a certain amount of branches
# Jedi can infer, otherwise it will take to
# long.
name_dicts = [{}]
break
original_name_dicts = list(name_dicts)
name_dicts = []
for definition in definitions:
new_name_dicts = list(original_name_dicts)
for i, name_dict in enumerate(new_name_dicts):
new_name_dicts[i] = name_dict.copy()
new_name_dicts[i][if_name.value] = ValueSet([definition])
name_dicts += new_name_dicts
else:
for name_dict in name_dicts:
name_dict[if_name.value] = definitions
if len(name_dicts) > 1:
result = NO_VALUES
for name_dict in name_dicts:
with context.predefine_names(if_stmt, name_dict):
result |= _infer_node(context, element)
return result
else:
return _infer_node_if_inferred(context, element)
else:
if predefined_if_name_dict:
return _infer_node(context, element)
else:
return _infer_node_if_inferred(context, element)
def _infer_node_if_inferred(context, element):
"""
TODO This function is temporary: Merge with infer_node.
"""
parent = element
while parent is not None:
parent = parent.parent
predefined_if_name_dict = context.predefined_names.get(parent)
if predefined_if_name_dict is not None:
return _infer_node(context, element)
return _infer_node_cached(context, element)
@inference_state_method_cache(default=NO_VALUES)
def _infer_node_cached(context, element):
return _infer_node(context, element)
@debug.increase_indent
@_limit_value_infers
def _infer_node(context, element):
debug.dbg('infer_node %s@%s in %s', element, element.start_pos, context)
inference_state = context.inference_state
typ = element.type
if typ in ('name', 'number', 'string', 'atom', 'strings', 'keyword', 'fstring'):
return infer_atom(context, element)
elif typ == 'lambdef':
return ValueSet([FunctionValue.from_context(context, element)])
elif typ == 'expr_stmt':
return infer_expr_stmt(context, element)
elif typ in ('power', 'atom_expr'):
first_child = element.children[0]
children = element.children[1:]
had_await = False
if first_child.type == 'keyword' and first_child.value == 'await':
had_await = True
first_child = children.pop(0)
value_set = context.infer_node(first_child)
for (i, trailer) in enumerate(children):
if trailer == '**': # has a power operation.
right = context.infer_node(children[i + 1])
value_set = _infer_comparison(
context,
value_set,
trailer,
right
)
break
value_set = infer_trailer(context, value_set, trailer)
if had_await:
return value_set.py__await__().py__stop_iteration_returns()
return value_set
elif typ in ('testlist_star_expr', 'testlist',):
# The implicit tuple in statements.
return ValueSet([iterable.SequenceLiteralValue(inference_state, context, element)])
elif typ in ('not_test', 'factor'):
value_set = context.infer_node(element.children[-1])
for operator in element.children[:-1]:
value_set = infer_factor(value_set, operator)
return value_set
elif typ == 'test':
# `x if foo else y` case.
return (context.infer_node(element.children[0])
| context.infer_node(element.children[-1]))
elif typ == 'operator':
# Must be an ellipsis, other operators are not inferred.
# In Python 2 ellipsis is coded as three single dot tokens, not
# as one token 3 dot token.
if element.value not in ('.', '...'):
origin = element.parent
raise AssertionError("unhandled operator %s in %s " % (repr(element.value), origin))
return ValueSet([compiled.builtin_from_name(inference_state, u'Ellipsis')])
elif typ == 'dotted_name':
value_set = infer_atom(context, element.children[0])
for next_name in element.children[2::2]:
value_set = value_set.py__getattribute__(next_name, name_context=context)
return value_set
elif typ == 'eval_input':
return context.infer_node(element.children[0])
elif typ == 'annassign':
return annotation.infer_annotation(context, element.children[1]) \
.execute_annotation()
elif typ == 'yield_expr':
if len(element.children) and element.children[1].type == 'yield_arg':
# Implies that it's a yield from.
element = element.children[1].children[1]
generators = context.infer_node(element) \
.py__getattribute__('__iter__').execute_with_values()
return generators.py__stop_iteration_returns()
# Generator.send() is not implemented.
return NO_VALUES
elif typ == 'namedexpr_test':
return context.infer_node(element.children[2])
else:
return infer_or_test(context, element)
def infer_trailer(context, atom_values, trailer):
trailer_op, node = trailer.children[:2]
if node == ')': # `arglist` is optional.
node = None
if trailer_op == '[':
trailer_op, node, _ = trailer.children
return atom_values.get_item(
_infer_subscript_list(context, node),
ContextualizedNode(context, trailer)
)
else:
debug.dbg('infer_trailer: %s in %s', trailer, atom_values)
if trailer_op == '.':
return atom_values.py__getattribute__(
name_context=context,
name_or_str=node
)
else:
assert trailer_op == '(', 'trailer_op is actually %s' % trailer_op
args = arguments.TreeArguments(context.inference_state, context, node, trailer)
return atom_values.execute(args)
def infer_atom(context, atom):
"""
Basically to process ``atom`` nodes. The parser sometimes doesn't
generate the node (because it has just one child). In that case an atom
might be a name or a literal as well.
"""
state = context.inference_state
if atom.type == 'name':
if atom.value in ('True', 'False', 'None'):
# Python 2...
return ValueSet([compiled.builtin_from_name(state, atom.value)])
# This is the first global lookup.
stmt = tree.search_ancestor(
atom, 'expr_stmt', 'lambdef'
) or atom
if stmt.type == 'lambdef':
stmt = atom
position = stmt.start_pos
if _is_annotation_name(atom):
# Since Python 3.7 (with from __future__ import annotations),
# annotations are essentially strings and can reference objects
# that are defined further down in code. Therefore just set the
# position to None, so the finder will not try to stop at a certain
# position in the module.
position = None
return context.py__getattribute__(atom, position=position)
elif atom.type == 'keyword':
# For False/True/None
if atom.value in ('False', 'True', 'None'):
return ValueSet([compiled.builtin_from_name(state, atom.value)])
elif atom.value == 'print':
# print e.g. could be inferred like this in Python 2.7
return NO_VALUES
elif atom.value == 'yield':
# Contrary to yield from, yield can just appear alone to return a
# value when used with `.send()`.
return NO_VALUES
assert False, 'Cannot infer the keyword %s' % atom
elif isinstance(atom, tree.Literal):
string = state.compiled_subprocess.safe_literal_eval(atom.value)
return ValueSet([compiled.create_simple_object(state, string)])
elif atom.type == 'strings':
# Will be multiple string.
value_set = infer_atom(context, atom.children[0])
for string in atom.children[1:]:
right = infer_atom(context, string)
value_set = _infer_comparison(context, value_set, u'+', right)
return value_set
elif atom.type == 'fstring':
return compiled.get_string_value_set(state)
else:
c = atom.children
# Parentheses without commas are not tuples.
if c[0] == '(' and not len(c) == 2 \
and not(c[1].type == 'testlist_comp'
and len(c[1].children) > 1):
return context.infer_node(c[1])
try:
comp_for = c[1].children[1]
except (IndexError, AttributeError):
pass
else:
if comp_for == ':':
# Dict comprehensions have a colon at the 3rd index.
try:
comp_for = c[1].children[3]
except IndexError:
pass
if comp_for.type in ('comp_for', 'sync_comp_for'):
return ValueSet([iterable.comprehension_from_atom(
state, context, atom
)])
# It's a dict/list/tuple literal.
array_node = c[1]
try:
array_node_c = array_node.children
except AttributeError:
array_node_c = []
if c[0] == '{' and (array_node == '}' or ':' in array_node_c
or '**' in array_node_c):
new_value = iterable.DictLiteralValue(state, context, atom)
else:
new_value = iterable.SequenceLiteralValue(state, context, atom)
return ValueSet([new_value])
@_limit_value_infers
def infer_expr_stmt(context, stmt, seek_name=None):
with recursion.execution_allowed(context.inference_state, stmt) as allowed:
if allowed:
return _infer_expr_stmt(context, stmt, seek_name)
return NO_VALUES
@debug.increase_indent
def _infer_expr_stmt(context, stmt, seek_name=None):
"""
The starting point of the completion. A statement always owns a call
list, which are the calls, that a statement does. In case multiple
names are defined in the statement, `seek_name` returns the result for
this name.
expr_stmt: testlist_star_expr (annassign | augassign (yield_expr|testlist) |
('=' (yield_expr|testlist_star_expr))*)
annassign: ':' test ['=' test]
augassign: ('+=' | '-=' | '*=' | '@=' | '/=' | '%=' | '&=' | '|=' | '^=' |
'<<=' | '>>=' | '**=' | '//=')
:param stmt: A `tree.ExprStmt`.
"""
def check_setitem(stmt):
atom_expr = stmt.children[0]
if atom_expr.type not in ('atom_expr', 'power'):
return False, None
name = atom_expr.children[0]
if name.type != 'name' or len(atom_expr.children) != 2:
return False, None
trailer = atom_expr.children[-1]
return trailer.children[0] == '[', trailer.children[1]
debug.dbg('infer_expr_stmt %s (%s)', stmt, seek_name)
rhs = stmt.get_rhs()
value_set = context.infer_node(rhs)
if seek_name:
n = TreeNameDefinition(context, seek_name)
value_set = check_tuple_assignments(n, value_set)
first_operator = next(stmt.yield_operators(), None)
is_setitem, subscriptlist = check_setitem(stmt)
is_annassign = first_operator not in ('=', None) and first_operator.type == 'operator'
if is_annassign or is_setitem:
# `=` is always the last character in aug assignments -> -1
name = stmt.get_defined_names(include_setitem=True)[0].value
left_values = context.py__getattribute__(name, position=stmt.start_pos)
if is_setitem:
def to_mod(v):
c = ContextualizedSubscriptListNode(context, subscriptlist)
if v.array_type == 'dict':
return DictModification(v, value_set, c)
elif v.array_type == 'list':
return ListModification(v, value_set, c)
return v
value_set = ValueSet(to_mod(v) for v in left_values)
else:
operator = copy.copy(first_operator)
operator.value = operator.value[:-1]
for_stmt = tree.search_ancestor(stmt, 'for_stmt')
if for_stmt is not None and for_stmt.type == 'for_stmt' and value_set \
and parser_utils.for_stmt_defines_one_name(for_stmt):
# Iterate through result and add the values, that's possible
# only in for loops without clutter, because they are
# predictable. Also only do it, if the variable is not a tuple.
node = for_stmt.get_testlist()
cn = ContextualizedNode(context, node)
ordered = list(cn.infer().iterate(cn))
for lazy_value in ordered:
dct = {for_stmt.children[1].value: lazy_value.infer()}
with context.predefine_names(for_stmt, dct):
t = context.infer_node(rhs)
left_values = _infer_comparison(context, left_values, operator, t)
value_set = left_values
else:
value_set = _infer_comparison(context, left_values, operator, value_set)
debug.dbg('infer_expr_stmt result %s', value_set)
return value_set
def infer_or_test(context, or_test):
iterator = iter(or_test.children)
types = context.infer_node(next(iterator))
for operator in iterator:
right = next(iterator)
if operator.type == 'comp_op': # not in / is not
operator = ' '.join(c.value for c in operator.children)
# handle type inference of and/or here.
if operator in ('and', 'or'):
left_bools = set(left.py__bool__() for left in types)
if left_bools == {True}:
if operator == 'and':
types = context.infer_node(right)
elif left_bools == {False}:
if operator != 'and':
types = context.infer_node(right)
# Otherwise continue, because of uncertainty.
else:
types = _infer_comparison(context, types, operator,
context.infer_node(right))
debug.dbg('infer_or_test types %s', types)
return types
@iterator_to_value_set
def infer_factor(value_set, operator):
"""
Calculates `+`, `-`, `~` and `not` prefixes.
"""
for value in value_set:
if operator == '-':
if is_number(value):
yield value.negate()
elif operator == 'not':
b = value.py__bool__()
if b is None: # Uncertainty.
return
yield compiled.create_simple_object(value.inference_state, not b)
else:
yield value
def _literals_to_types(inference_state, result):
# Changes literals ('a', 1, 1.0, etc) to its type instances (str(),
# int(), float(), etc).
new_result = NO_VALUES
for typ in result:
if is_literal(typ):
# Literals are only valid as long as the operations are
# correct. Otherwise add a value-free instance.
cls = compiled.builtin_from_name(inference_state, typ.name.string_name)
new_result |= cls.execute_with_values()
else:
new_result |= ValueSet([typ])
return new_result
def _infer_comparison(context, left_values, operator, right_values):
state = context.inference_state
if not left_values or not right_values:
# illegal slices e.g. cause left/right_result to be None
result = (left_values or NO_VALUES) | (right_values or NO_VALUES)
return _literals_to_types(state, result)
else:
# I don't think there's a reasonable chance that a string
# operation is still correct, once we pass something like six
# objects.
if len(left_values) * len(right_values) > 6:
return _literals_to_types(state, left_values | right_values)
else:
return ValueSet.from_sets(
_infer_comparison_part(state, context, left, operator, right)
for left in left_values
for right in right_values
)
def _is_annotation_name(name):
ancestor = tree.search_ancestor(name, 'param', 'funcdef', 'expr_stmt')
if ancestor is None:
return False
if ancestor.type in ('param', 'funcdef'):
ann = ancestor.annotation
if ann is not None:
return ann.start_pos <= name.start_pos < ann.end_pos
elif ancestor.type == 'expr_stmt':
c = ancestor.children
if len(c) > 1 and c[1].type == 'annassign':
return c[1].start_pos <= name.start_pos < c[1].end_pos
return False
def _is_tuple(value):
return isinstance(value, iterable.Sequence) and value.array_type == 'tuple'
def _is_list(value):
return isinstance(value, iterable.Sequence) and value.array_type == 'list'
def _bool_to_value(inference_state, bool_):
return compiled.builtin_from_name(inference_state, force_unicode(str(bool_)))
def _get_tuple_ints(value):
if not isinstance(value, iterable.SequenceLiteralValue):
return None
numbers = []
for lazy_value in value.py__iter__():
if not isinstance(lazy_value, LazyTreeValue):
return None
node = lazy_value.data
if node.type != 'number':
return None
try:
numbers.append(int(node.value))
except ValueError:
return None
return numbers
def _infer_comparison_part(inference_state, context, left, operator, right):
l_is_num = is_number(left)
r_is_num = is_number(right)
if isinstance(operator, unicode):
str_operator = operator
else:
str_operator = force_unicode(str(operator.value))
if str_operator == '*':
# for iterables, ignore * operations
if isinstance(left, iterable.Sequence) or is_string(left):
return ValueSet([left])
elif isinstance(right, iterable.Sequence) or is_string(right):
return ValueSet([right])
elif str_operator == '+':
if l_is_num and r_is_num or is_string(left) and is_string(right):
return left.execute_operation(right, str_operator)
elif _is_tuple(left) and _is_tuple(right) or _is_list(left) and _is_list(right):
return ValueSet([iterable.MergedArray(inference_state, (left, right))])
elif str_operator == '-':
if l_is_num and r_is_num:
return left.execute_operation(right, str_operator)
elif str_operator == '%':
# With strings and numbers the left type typically remains. Except for
# `int() % float()`.
return ValueSet([left])
elif str_operator in COMPARISON_OPERATORS:
if left.is_compiled() and right.is_compiled():
# Possible, because the return is not an option. Just compare.
result = left.execute_operation(right, str_operator)
if result:
return result
else:
if str_operator in ('is', '!=', '==', 'is not'):
operation = COMPARISON_OPERATORS[str_operator]
bool_ = operation(left, right)
return ValueSet([_bool_to_value(inference_state, bool_)])
if isinstance(left, VersionInfo):
version_info = _get_tuple_ints(right)
if version_info is not None:
bool_result = compiled.access.COMPARISON_OPERATORS[operator](
inference_state.environment.version_info,
tuple(version_info)
)
return ValueSet([_bool_to_value(inference_state, bool_result)])
return ValueSet([
_bool_to_value(inference_state, True),
_bool_to_value(inference_state, False)
])
elif str_operator == 'in':
return NO_VALUES
def check(obj):
"""Checks if a Jedi object is either a float or an int."""
return isinstance(obj, TreeInstance) and \
obj.name.string_name in ('int', 'float')
# Static analysis, one is a number, the other one is not.
if str_operator in ('+', '-') and l_is_num != r_is_num \
and not (check(left) or check(right)):
message = "TypeError: unsupported operand type(s) for +: %s and %s"
analysis.add(context, 'type-error-operation', operator,
message % (left, right))
result = ValueSet([left, right])
debug.dbg('Used operator %s resulting in %s', operator, result)
return result
def _remove_statements(context, stmt, name):
"""
This is the part where statements are being stripped.
Due to lazy type inference, statements like a = func; b = a; b() have to be
inferred.
TODO merge with infer_expr_stmt?
"""
pep0484_values = \
annotation.find_type_from_comment_hint_assign(context, stmt, name)
if pep0484_values:
return pep0484_values
return infer_expr_stmt(context, stmt, seek_name=name)
@plugin_manager.decorate()
def tree_name_to_values(inference_state, context, tree_name):
value_set = NO_VALUES
module_node = context.get_root_context().tree_node
# First check for annotations, like: `foo: int = 3`
if module_node is not None:
names = module_node.get_used_names().get(tree_name.value, [])
for name in names:
expr_stmt = name.parent
if expr_stmt.type == "expr_stmt" and expr_stmt.children[1].type == "annassign":
correct_scope = parser_utils.get_parent_scope(name) == context.tree_node
if correct_scope:
value_set |= annotation.infer_annotation(
context, expr_stmt.children[1].children[1]
).execute_annotation()
if value_set:
return value_set
types = []
node = tree_name.get_definition(import_name_always=True, include_setitem=True)
if node is None:
node = tree_name.parent
if node.type == 'global_stmt':
c = context.create_context(tree_name)
if c.is_module():
# In case we are already part of the module, there is no point
# in looking up the global statement anymore, because it's not
# valid at that point anyway.
return NO_VALUES
# For global_stmt lookups, we only need the first possible scope,
# which means the function itself.
filter = next(c.get_filters())
names = filter.get(tree_name.value)
return ValueSet.from_sets(name.infer() for name in names)
elif node.type not in ('import_from', 'import_name'):
c = context.create_context(tree_name)
return infer_atom(c, tree_name)
typ = node.type
if typ == 'for_stmt':
types = annotation.find_type_from_comment_hint_for(context, node, tree_name)
if types:
return types
if typ == 'with_stmt':
types = annotation.find_type_from_comment_hint_with(context, node, tree_name)
if types:
return types
if typ in ('for_stmt', 'comp_for', 'sync_comp_for'):
try:
types = context.predefined_names[node][tree_name.value]
except KeyError:
cn = ContextualizedNode(context, node.children[3])
for_types = iterate_values(
cn.infer(),
contextualized_node=cn,
is_async=node.parent.type == 'async_stmt',
)
n = TreeNameDefinition(context, tree_name)
types = check_tuple_assignments(n, for_types)
elif typ == 'expr_stmt':
types = _remove_statements(context, node, tree_name)
elif typ == 'with_stmt':
value_managers = context.infer_node(node.get_test_node_from_name(tree_name))
enter_methods = value_managers.py__getattribute__(u'__enter__')
return enter_methods.execute_with_values()
elif typ in ('import_from', 'import_name'):
types = imports.infer_import(context, tree_name)
elif typ in ('funcdef', 'classdef'):
types = _apply_decorators(context, node)
elif typ == 'try_stmt':
# TODO an exception can also be a tuple. Check for those.
# TODO check for types that are not classes and add it to
# the static analysis report.
exceptions = context.infer_node(tree_name.get_previous_sibling().get_previous_sibling())
types = exceptions.execute_with_values()
elif typ == 'param':
types = NO_VALUES
elif typ == 'del_stmt':
types = NO_VALUES
else:
raise ValueError("Should not happen. type: %s" % typ)
return types
# We don't want to have functions/classes that are created by the same
# tree_node.
@inference_state_method_cache()
def _apply_decorators(context, node):
"""
Returns the function, that should to be executed in the end.
This is also the places where the decorators are processed.
"""
if node.type == 'classdef':
decoratee_value = ClassValue(
context.inference_state,
parent_context=context,
tree_node=node
)
else:
decoratee_value = FunctionValue.from_context(context, node)
initial = values = ValueSet([decoratee_value])
if is_big_annoying_library(context):
return values
for dec in reversed(node.get_decorators()):
debug.dbg('decorator: %s %s', dec, values, color="MAGENTA")
with debug.increase_indent_cm():
dec_values = context.infer_node(dec.children[1])
trailer_nodes = dec.children[2:-1]
if trailer_nodes:
# Create a trailer and infer it.
trailer = tree.PythonNode('trailer', trailer_nodes)
trailer.parent = dec
dec_values = infer_trailer(context, dec_values, trailer)
if not len(dec_values):
code = dec.get_code(include_prefix=False)
# For the short future, we don't want to hear about the runtime
# decorator in typing that was intentionally omitted. This is not
# "correct", but helps with debugging.
if code != '@runtime\n':
debug.warning('decorator not found: %s on %s', dec, node)
return initial
values = dec_values.execute(arguments.ValuesArguments([values]))
if not len(values):
debug.warning('not possible to resolve wrappers found %s', node)
return initial
debug.dbg('decorator end %s', values, color="MAGENTA")
if values != initial:
return ValueSet([Decoratee(c, decoratee_value) for c in values])
return values
def check_tuple_assignments(name, value_set):
"""
Checks if tuples are assigned.
"""
lazy_value = None
for index, node in name.assignment_indexes():
cn = ContextualizedNode(name.parent_context, node)
iterated = value_set.iterate(cn)
if isinstance(index, slice):
# For no star unpacking is not possible.
return NO_VALUES
for _ in range(index + 1):
try:
lazy_value = next(iterated)
except StopIteration:
# We could do this with the default param in next. But this
# would allow this loop to run for a very long time if the
# index number is high. Therefore break if the loop is
# finished.
return NO_VALUES
value_set = lazy_value.infer()
return value_set
class ContextualizedSubscriptListNode(ContextualizedNode):
def infer(self):
return _infer_subscript_list(self.context, self.node)
def _infer_subscript_list(context, index):
"""
Handles slices in subscript nodes.
"""
if index == ':':
# Like array[:]
return ValueSet([iterable.Slice(context, None, None, None)])
elif index.type == 'subscript' and not index.children[0] == '.':
# subscript basically implies a slice operation, except for Python 2's
# Ellipsis.
# e.g. array[:3]
result = []
for el in index.children:
if el == ':':
if not result:
result.append(None)
elif el.type == 'sliceop':
if len(el.children) == 2:
result.append(el.children[1])
else:
result.append(el)
result += [None] * (3 - len(result))
return ValueSet([iterable.Slice(context, *result)])
elif index.type == 'subscriptlist':
return ValueSet([iterable.SequenceLiteralValue(context.inference_state, context, index)])
# No slices
return context.infer_node(index)
| gpl-3.0 | 5,916,072,233,174,465,000 | 39.256198 | 97 | 0.588087 | false |
chubbypanda/principles-of-computing | practice_activity0.py | 5 | 4720 | # Practice Activity 0 for Principles of Computing class, by k., 06/18/2014
# Solitaire Mancala; GUI is provided in codeskulptor (obscure Python interpretter for the class) at:
# http://www.codeskulptor.org/#poc_mancala_gui.py
class SolitaireMancala(object):
'''
class to run the game logic
'''
def __init__(self):
self.state = [0]
def set_board(self, configuration):
'''
board to be a copy of the supplied configuration
'''
self.state = configuration[:]
def __str__(self):
'''
string corresponding to the current configuration of the board
'''
# logic of the game and internal representation are reversed
return str(self.state[::-1])
def get_num_seeds(self, house_num):
'''
return the number of seeds in the house with index house_num
'''
return self.state[house_num]
def is_legal_move(self, house_num):
'''
return True if moving the seeds from house house_num is legal;
if house_num is zero, is_legal_move should return False
'''
# a move is legal if house value equals to its distance from the store
return self.state[house_num] == house_num and house_num != 0
def is_game_won(self):
'''
return True if all houses contain no seeds
'''
# check if all positions (except for the store) are empty
return sum(self.state[1:]) == 0
def apply_move(self, house_num):
'''
apply a legal move for house house_num to the board
'''
if self.is_legal_move(house_num):
# adding +1 to each position lying in front of (and excluding) house_num
for position in xrange(len(self.state[:house_num])):
self.state[position] += 1
# current house (house_num) is then emptied
self.state[house_num] = 0
else:
print 'No can do, this is a illegal move!'
def choose_move(self):
'''
return the index for the legal move whose house is closest to the store;
if no legal move is available, return 0
'''
# if no legal move found, need to eventually return 0
index = 0
# checking through each position backwards just to arrive at closest one
for num in range(len(self.state))[::-1]:
if self.is_legal_move(num):
index = num
return index
def plan_moves(self):
'''
return a list of legal moves computed to win the game if possible
'''
legal_moves = []
# game isn't won yet and there is still at least one legal move
while not self.is_game_won() and self.choose_move() != 0:
# make a note of and apply every possible move suggested
legal_moves.append(self.choose_move())
self.apply_move(self.choose_move())
return legal_moves
# few simple tests
##p = SolitaireMancala()
##print p
##p.set_board([3, 1, 1, 1, 1, 1, 1])
##print p
##print p.get_num_seeds(3)
##print 'game won?', p.is_game_won()
##p.set_board([3, 0, 0, 0, 0, 0, 0])
##print p
##print 'game won?', p.is_game_won()
##p.set_board([0, 6, 5, 4, 3, 2, 1])
##print p.is_legal_move(6), p.is_legal_move(5), p.is_legal_move(4)
##p.set_board([0, 1, 2, 3, 5, 4, 6])
##print p.is_legal_move(6), p.is_legal_move(5), p.is_legal_move(4), p.is_legal_move(0)
##print p
##print p.choose_move()
##p.apply_move(0)
##print 'before move:', p
##p.apply_move(6)
##print 'after move :', p
##print p.choose_move()
##print 'before move:', p
##p.apply_move(5)
##print 'after move :', p
##p.apply_move(4)
##print p.choose_move()
##p.plan_moves()
##print
##q = SolitaireMancala()
##q.set_board([0, 1, 2, 2, 4, 0, 0])
##print 'before game: ', q
##print q.choose_move()
##q.plan_moves()
##print q.is_game_won()
##print 'game finished:', q
# checking tests 5a and 5b from the grader
##test5a = SolitaireMancala()
##test5a.set_board([0, 0, 1, 1, 3, 5])
##print test5a.choose_move(), type(test5a.choose_move())
##test5b = SolitaireMancala()
##test5b.set_board([0, 0, 1, 1, 3, 0, 0])
##print test5b.choose_move(), type(test5b.choose_move())
##print test5a.is_legal_move(0)
##print test5b.is_legal_move(0)
##print test5a.choose_move()
##game = SolitaireMancala()
##SolitaireMancala.set_board(game, [0,0,1])
##print SolitaireMancala.plan_moves(game)
##print SolitaireMancala.is_game_won(game)
# failed test 6c from the grader
##game = SolitaireMancala()
##SolitaireMancala.set_board(game, [0,0,1])
##SolitaireMancala.plan_moves(game)
##print SolitaireMancala.plan_moves(game)
##print SolitaireMancala.is_game_won(game)
| mit | 1,366,974,064,081,838,600 | 32.239437 | 100 | 0.607203 | false |
vladimir-ipatov/ganeti | lib/cmdlib/common.py | 1 | 44087 | #
#
# Copyright (C) 2006, 2007, 2008, 2009, 2010, 2011, 2012, 2013 Google Inc.
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but
# WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
# General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA
# 02110-1301, USA.
"""Common functions used by multiple logical units."""
import copy
import os
from ganeti import compat
from ganeti import constants
from ganeti import errors
from ganeti import hypervisor
from ganeti import locking
from ganeti import objects
from ganeti import opcodes
from ganeti import pathutils
from ganeti import rpc
from ganeti import ssconf
from ganeti import utils
# States of instance
INSTANCE_DOWN = [constants.ADMINST_DOWN]
INSTANCE_ONLINE = [constants.ADMINST_DOWN, constants.ADMINST_UP]
INSTANCE_NOT_RUNNING = [constants.ADMINST_DOWN, constants.ADMINST_OFFLINE]
#: Instance status in which an instance can be marked as offline/online
CAN_CHANGE_INSTANCE_OFFLINE = (frozenset(INSTANCE_DOWN) | frozenset([
constants.ADMINST_OFFLINE,
]))
def _ExpandItemName(expand_fn, name, kind):
"""Expand an item name.
@param expand_fn: the function to use for expansion
@param name: requested item name
@param kind: text description ('Node' or 'Instance')
@return: the result of the expand_fn, if successful
@raise errors.OpPrereqError: if the item is not found
"""
(uuid, full_name) = expand_fn(name)
if uuid is None or full_name is None:
raise errors.OpPrereqError("%s '%s' not known" % (kind, name),
errors.ECODE_NOENT)
return (uuid, full_name)
def ExpandInstanceUuidAndName(cfg, expected_uuid, name):
"""Wrapper over L{_ExpandItemName} for instance."""
(uuid, full_name) = _ExpandItemName(cfg.ExpandInstanceName, name, "Instance")
if expected_uuid is not None and uuid != expected_uuid:
raise errors.OpPrereqError(
"The instances UUID '%s' does not match the expected UUID '%s' for"
" instance '%s'. Maybe the instance changed since you submitted this"
" job." % (uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
return (uuid, full_name)
def ExpandNodeUuidAndName(cfg, expected_uuid, name):
"""Expand a short node name into the node UUID and full name.
@type cfg: L{config.ConfigWriter}
@param cfg: The cluster configuration
@type expected_uuid: string
@param expected_uuid: expected UUID for the node (or None if there is no
expectation). If it does not match, a L{errors.OpPrereqError} is
raised.
@type name: string
@param name: the short node name
"""
(uuid, full_name) = _ExpandItemName(cfg.ExpandNodeName, name, "Node")
if expected_uuid is not None and uuid != expected_uuid:
raise errors.OpPrereqError(
"The nodes UUID '%s' does not match the expected UUID '%s' for node"
" '%s'. Maybe the node changed since you submitted this job." %
(uuid, expected_uuid, full_name), errors.ECODE_NOTUNIQUE)
return (uuid, full_name)
def ShareAll():
"""Returns a dict declaring all lock levels shared.
"""
return dict.fromkeys(locking.LEVELS, 1)
def CheckNodeGroupInstances(cfg, group_uuid, owned_instance_names):
"""Checks if the instances in a node group are still correct.
@type cfg: L{config.ConfigWriter}
@param cfg: The cluster configuration
@type group_uuid: string
@param group_uuid: Node group UUID
@type owned_instance_names: set or frozenset
@param owned_instance_names: List of currently owned instances
"""
wanted_instances = frozenset(cfg.GetInstanceNames(
cfg.GetNodeGroupInstances(group_uuid)))
if owned_instance_names != wanted_instances:
raise errors.OpPrereqError("Instances in node group '%s' changed since"
" locks were acquired, wanted '%s', have '%s';"
" retry the operation" %
(group_uuid,
utils.CommaJoin(wanted_instances),
utils.CommaJoin(owned_instance_names)),
errors.ECODE_STATE)
return wanted_instances
def GetWantedNodes(lu, short_node_names):
"""Returns list of checked and expanded node names.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type short_node_names: list
@param short_node_names: list of node names or None for all nodes
@rtype: tuple of lists
@return: tupe with (list of node UUIDs, list of node names)
@raise errors.ProgrammerError: if the nodes parameter is wrong type
"""
if short_node_names:
node_uuids = [ExpandNodeUuidAndName(lu.cfg, None, name)[0]
for name in short_node_names]
else:
node_uuids = lu.cfg.GetNodeList()
return (node_uuids, [lu.cfg.GetNodeName(uuid) for uuid in node_uuids])
def GetWantedInstances(lu, short_inst_names):
"""Returns list of checked and expanded instance names.
@type lu: L{LogicalUnit}
@param lu: the logical unit on whose behalf we execute
@type short_inst_names: list
@param short_inst_names: list of instance names or None for all instances
@rtype: tuple of lists
@return: tuple of (instance UUIDs, instance names)
@raise errors.OpPrereqError: if the instances parameter is wrong type
@raise errors.OpPrereqError: if any of the passed instances is not found
"""
if short_inst_names:
inst_uuids = [ExpandInstanceUuidAndName(lu.cfg, None, name)[0]
for name in short_inst_names]
else:
inst_uuids = lu.cfg.GetInstanceList()
return (inst_uuids, [lu.cfg.GetInstanceName(uuid) for uuid in inst_uuids])
def RunPostHook(lu, node_name):
"""Runs the post-hook for an opcode on a single node.
"""
hm = lu.proc.BuildHooksManager(lu)
try:
hm.RunPhase(constants.HOOKS_PHASE_POST, node_names=[node_name])
except Exception, err: # pylint: disable=W0703
lu.LogWarning("Errors occurred running hooks on %s: %s",
node_name, err)
def RedistributeAncillaryFiles(lu):
"""Distribute additional files which are part of the cluster configuration.
ConfigWriter takes care of distributing the config and ssconf files, but
there are more files which should be distributed to all nodes. This function
makes sure those are copied.
"""
# Gather target nodes
cluster = lu.cfg.GetClusterInfo()
master_info = lu.cfg.GetMasterNodeInfo()
online_node_uuids = lu.cfg.GetOnlineNodeList()
online_node_uuid_set = frozenset(online_node_uuids)
vm_node_uuids = list(online_node_uuid_set.intersection(
lu.cfg.GetVmCapableNodeList()))
# Never distribute to master node
for node_uuids in [online_node_uuids, vm_node_uuids]:
if master_info.uuid in node_uuids:
node_uuids.remove(master_info.uuid)
# Gather file lists
(files_all, _, files_mc, files_vm) = \
ComputeAncillaryFiles(cluster, True)
# Never re-distribute configuration file from here
assert not (pathutils.CLUSTER_CONF_FILE in files_all or
pathutils.CLUSTER_CONF_FILE in files_vm)
assert not files_mc, "Master candidates not handled in this function"
filemap = [
(online_node_uuids, files_all),
(vm_node_uuids, files_vm),
]
# Upload the files
for (node_uuids, files) in filemap:
for fname in files:
UploadHelper(lu, node_uuids, fname)
def ComputeAncillaryFiles(cluster, redist):
"""Compute files external to Ganeti which need to be consistent.
@type redist: boolean
@param redist: Whether to include files which need to be redistributed
"""
# Compute files for all nodes
files_all = set([
pathutils.SSH_KNOWN_HOSTS_FILE,
pathutils.CONFD_HMAC_KEY,
pathutils.CLUSTER_DOMAIN_SECRET_FILE,
pathutils.SPICE_CERT_FILE,
pathutils.SPICE_CACERT_FILE,
pathutils.RAPI_USERS_FILE,
])
if redist:
# we need to ship at least the RAPI certificate
files_all.add(pathutils.RAPI_CERT_FILE)
else:
files_all.update(pathutils.ALL_CERT_FILES)
files_all.update(ssconf.SimpleStore().GetFileList())
if cluster.modify_etc_hosts:
files_all.add(pathutils.ETC_HOSTS)
if cluster.use_external_mip_script:
files_all.add(pathutils.EXTERNAL_MASTER_SETUP_SCRIPT)
# Files which are optional, these must:
# - be present in one other category as well
# - either exist or not exist on all nodes of that category (mc, vm all)
files_opt = set([
pathutils.RAPI_USERS_FILE,
])
# Files which should only be on master candidates
files_mc = set()
if not redist:
files_mc.add(pathutils.CLUSTER_CONF_FILE)
# File storage
if (not redist and (cluster.IsFileStorageEnabled() or
cluster.IsSharedFileStorageEnabled())):
files_all.add(pathutils.FILE_STORAGE_PATHS_FILE)
files_opt.add(pathutils.FILE_STORAGE_PATHS_FILE)
# Files which should only be on VM-capable nodes
files_vm = set(
filename
for hv_name in cluster.enabled_hypervisors
for filename in
hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[0])
files_opt |= set(
filename
for hv_name in cluster.enabled_hypervisors
for filename in
hypervisor.GetHypervisorClass(hv_name).GetAncillaryFiles()[1])
# Filenames in each category must be unique
all_files_set = files_all | files_mc | files_vm
assert (len(all_files_set) ==
sum(map(len, [files_all, files_mc, files_vm]))), \
"Found file listed in more than one file list"
# Optional files must be present in one other category
assert all_files_set.issuperset(files_opt), \
"Optional file not in a different required list"
# This one file should never ever be re-distributed via RPC
assert not (redist and
pathutils.FILE_STORAGE_PATHS_FILE in all_files_set)
return (files_all, files_opt, files_mc, files_vm)
def UploadHelper(lu, node_uuids, fname):
"""Helper for uploading a file and showing warnings.
"""
if os.path.exists(fname):
result = lu.rpc.call_upload_file(node_uuids, fname)
for to_node_uuids, to_result in result.items():
msg = to_result.fail_msg
if msg:
msg = ("Copy of file %s to node %s failed: %s" %
(fname, lu.cfg.GetNodeName(to_node_uuids), msg))
lu.LogWarning(msg)
def MergeAndVerifyHvState(op_input, obj_input):
"""Combines the hv state from an opcode with the one of the object
@param op_input: The input dict from the opcode
@param obj_input: The input dict from the objects
@return: The verified and updated dict
"""
if op_input:
invalid_hvs = set(op_input) - constants.HYPER_TYPES
if invalid_hvs:
raise errors.OpPrereqError("Invalid hypervisor(s) in hypervisor state:"
" %s" % utils.CommaJoin(invalid_hvs),
errors.ECODE_INVAL)
if obj_input is None:
obj_input = {}
type_check = constants.HVSTS_PARAMETER_TYPES
return _UpdateAndVerifySubDict(obj_input, op_input, type_check)
return None
def MergeAndVerifyDiskState(op_input, obj_input):
"""Combines the disk state from an opcode with the one of the object
@param op_input: The input dict from the opcode
@param obj_input: The input dict from the objects
@return: The verified and updated dict
"""
if op_input:
invalid_dst = set(op_input) - constants.DS_VALID_TYPES
if invalid_dst:
raise errors.OpPrereqError("Invalid storage type(s) in disk state: %s" %
utils.CommaJoin(invalid_dst),
errors.ECODE_INVAL)
type_check = constants.DSS_PARAMETER_TYPES
if obj_input is None:
obj_input = {}
return dict((key, _UpdateAndVerifySubDict(obj_input.get(key, {}), value,
type_check))
for key, value in op_input.items())
return None
def CheckOSParams(lu, required, node_uuids, osname, osparams):
"""OS parameters validation.
@type lu: L{LogicalUnit}
@param lu: the logical unit for which we check
@type required: boolean
@param required: whether the validation should fail if the OS is not
found
@type node_uuids: list
@param node_uuids: the list of nodes on which we should check
@type osname: string
@param osname: the name of the hypervisor we should use
@type osparams: dict
@param osparams: the parameters which we need to check
@raise errors.OpPrereqError: if the parameters are not valid
"""
node_uuids = _FilterVmNodes(lu, node_uuids)
result = lu.rpc.call_os_validate(node_uuids, required, osname,
[constants.OS_VALIDATE_PARAMETERS],
osparams)
for node_uuid, nres in result.items():
# we don't check for offline cases since this should be run only
# against the master node and/or an instance's nodes
nres.Raise("OS Parameters validation failed on node %s" %
lu.cfg.GetNodeName(node_uuid))
if not nres.payload:
lu.LogInfo("OS %s not found on node %s, validation skipped",
osname, lu.cfg.GetNodeName(node_uuid))
def CheckHVParams(lu, node_uuids, hvname, hvparams):
"""Hypervisor parameter validation.
This function abstract the hypervisor parameter validation to be
used in both instance create and instance modify.
@type lu: L{LogicalUnit}
@param lu: the logical unit for which we check
@type node_uuids: list
@param node_uuids: the list of nodes on which we should check
@type hvname: string
@param hvname: the name of the hypervisor we should use
@type hvparams: dict
@param hvparams: the parameters which we need to check
@raise errors.OpPrereqError: if the parameters are not valid
"""
node_uuids = _FilterVmNodes(lu, node_uuids)
cluster = lu.cfg.GetClusterInfo()
hvfull = objects.FillDict(cluster.hvparams.get(hvname, {}), hvparams)
hvinfo = lu.rpc.call_hypervisor_validate_params(node_uuids, hvname, hvfull)
for node_uuid in node_uuids:
info = hvinfo[node_uuid]
if info.offline:
continue
info.Raise("Hypervisor parameter validation failed on node %s" %
lu.cfg.GetNodeName(node_uuid))
def AdjustCandidatePool(lu, exceptions):
"""Adjust the candidate pool after node operations.
"""
mod_list = lu.cfg.MaintainCandidatePool(exceptions)
if mod_list:
lu.LogInfo("Promoted nodes to master candidate role: %s",
utils.CommaJoin(node.name for node in mod_list))
for node in mod_list:
lu.context.ReaddNode(node)
mc_now, mc_max, _ = lu.cfg.GetMasterCandidateStats(exceptions)
if mc_now > mc_max:
lu.LogInfo("Note: more nodes are candidates (%d) than desired (%d)" %
(mc_now, mc_max))
def CheckNodePVs(nresult, exclusive_storage):
"""Check node PVs.
"""
pvlist_dict = nresult.get(constants.NV_PVLIST, None)
if pvlist_dict is None:
return (["Can't get PV list from node"], None)
pvlist = map(objects.LvmPvInfo.FromDict, pvlist_dict)
errlist = []
# check that ':' is not present in PV names, since it's a
# special character for lvcreate (denotes the range of PEs to
# use on the PV)
for pv in pvlist:
if ":" in pv.name:
errlist.append("Invalid character ':' in PV '%s' of VG '%s'" %
(pv.name, pv.vg_name))
es_pvinfo = None
if exclusive_storage:
(errmsgs, es_pvinfo) = utils.LvmExclusiveCheckNodePvs(pvlist)
errlist.extend(errmsgs)
shared_pvs = nresult.get(constants.NV_EXCLUSIVEPVS, None)
if shared_pvs:
for (pvname, lvlist) in shared_pvs:
# TODO: Check that LVs are really unrelated (snapshots, DRBD meta...)
errlist.append("PV %s is shared among unrelated LVs (%s)" %
(pvname, utils.CommaJoin(lvlist)))
return (errlist, es_pvinfo)
def _ComputeMinMaxSpec(name, qualifier, ispecs, value):
"""Computes if value is in the desired range.
@param name: name of the parameter for which we perform the check
@param qualifier: a qualifier used in the error message (e.g. 'disk/1',
not just 'disk')
@param ispecs: dictionary containing min and max values
@param value: actual value that we want to use
@return: None or an error string
"""
if value in [None, constants.VALUE_AUTO]:
return None
max_v = ispecs[constants.ISPECS_MAX].get(name, value)
min_v = ispecs[constants.ISPECS_MIN].get(name, value)
if value > max_v or min_v > value:
if qualifier:
fqn = "%s/%s" % (name, qualifier)
else:
fqn = name
return ("%s value %s is not in range [%s, %s]" %
(fqn, value, min_v, max_v))
return None
def ComputeIPolicySpecViolation(ipolicy, mem_size, cpu_count, disk_count,
nic_count, disk_sizes, spindle_use,
disk_template,
_compute_fn=_ComputeMinMaxSpec):
"""Verifies ipolicy against provided specs.
@type ipolicy: dict
@param ipolicy: The ipolicy
@type mem_size: int
@param mem_size: The memory size
@type cpu_count: int
@param cpu_count: Used cpu cores
@type disk_count: int
@param disk_count: Number of disks used
@type nic_count: int
@param nic_count: Number of nics used
@type disk_sizes: list of ints
@param disk_sizes: Disk sizes of used disk (len must match C{disk_count})
@type spindle_use: int
@param spindle_use: The number of spindles this instance uses
@type disk_template: string
@param disk_template: The disk template of the instance
@param _compute_fn: The compute function (unittest only)
@return: A list of violations, or an empty list of no violations are found
"""
assert disk_count == len(disk_sizes)
test_settings = [
(constants.ISPEC_MEM_SIZE, "", mem_size),
(constants.ISPEC_CPU_COUNT, "", cpu_count),
(constants.ISPEC_NIC_COUNT, "", nic_count),
(constants.ISPEC_SPINDLE_USE, "", spindle_use),
] + [(constants.ISPEC_DISK_SIZE, str(idx), d)
for idx, d in enumerate(disk_sizes)]
if disk_template != constants.DT_DISKLESS:
# This check doesn't make sense for diskless instances
test_settings.append((constants.ISPEC_DISK_COUNT, "", disk_count))
ret = []
allowed_dts = ipolicy[constants.IPOLICY_DTS]
if disk_template not in allowed_dts:
ret.append("Disk template %s is not allowed (allowed templates: %s)" %
(disk_template, utils.CommaJoin(allowed_dts)))
min_errs = None
for minmax in ipolicy[constants.ISPECS_MINMAX]:
errs = filter(None,
(_compute_fn(name, qualifier, minmax, value)
for (name, qualifier, value) in test_settings))
if min_errs is None or len(errs) < len(min_errs):
min_errs = errs
assert min_errs is not None
return ret + min_errs
def ComputeIPolicyInstanceViolation(ipolicy, instance, cfg,
_compute_fn=ComputeIPolicySpecViolation):
"""Compute if instance meets the specs of ipolicy.
@type ipolicy: dict
@param ipolicy: The ipolicy to verify against
@type instance: L{objects.Instance}
@param instance: The instance to verify
@type cfg: L{config.ConfigWriter}
@param cfg: Cluster configuration
@param _compute_fn: The function to verify ipolicy (unittest only)
@see: L{ComputeIPolicySpecViolation}
"""
ret = []
be_full = cfg.GetClusterInfo().FillBE(instance)
mem_size = be_full[constants.BE_MAXMEM]
cpu_count = be_full[constants.BE_VCPUS]
es_flags = rpc.GetExclusiveStorageForNodes(cfg, instance.all_nodes)
if any(es_flags.values()):
# With exclusive storage use the actual spindles
try:
spindle_use = sum([disk.spindles for disk in instance.disks])
except TypeError:
ret.append("Number of spindles not configured for disks of instance %s"
" while exclusive storage is enabled, try running gnt-cluster"
" repair-disk-sizes" % instance.name)
# _ComputeMinMaxSpec ignores 'None's
spindle_use = None
else:
spindle_use = be_full[constants.BE_SPINDLE_USE]
disk_count = len(instance.disks)
disk_sizes = [disk.size for disk in instance.disks]
nic_count = len(instance.nics)
disk_template = instance.disk_template
return ret + _compute_fn(ipolicy, mem_size, cpu_count, disk_count, nic_count,
disk_sizes, spindle_use, disk_template)
def _ComputeViolatingInstances(ipolicy, instances, cfg):
"""Computes a set of instances who violates given ipolicy.
@param ipolicy: The ipolicy to verify
@type instances: L{objects.Instance}
@param instances: List of instances to verify
@type cfg: L{config.ConfigWriter}
@param cfg: Cluster configuration
@return: A frozenset of instance names violating the ipolicy
"""
return frozenset([inst.name for inst in instances
if ComputeIPolicyInstanceViolation(ipolicy, inst, cfg)])
def ComputeNewInstanceViolations(old_ipolicy, new_ipolicy, instances, cfg):
"""Computes a set of any instances that would violate the new ipolicy.
@param old_ipolicy: The current (still in-place) ipolicy
@param new_ipolicy: The new (to become) ipolicy
@param instances: List of instances to verify
@type cfg: L{config.ConfigWriter}
@param cfg: Cluster configuration
@return: A list of instances which violates the new ipolicy but
did not before
"""
return (_ComputeViolatingInstances(new_ipolicy, instances, cfg) -
_ComputeViolatingInstances(old_ipolicy, instances, cfg))
def GetUpdatedParams(old_params, update_dict,
use_default=True, use_none=False):
"""Return the new version of a parameter dictionary.
@type old_params: dict
@param old_params: old parameters
@type update_dict: dict
@param update_dict: dict containing new parameter values, or
constants.VALUE_DEFAULT to reset the parameter to its default
value
@param use_default: boolean
@type use_default: whether to recognise L{constants.VALUE_DEFAULT}
values as 'to be deleted' values
@param use_none: boolean
@type use_none: whether to recognise C{None} values as 'to be
deleted' values
@rtype: dict
@return: the new parameter dictionary
"""
params_copy = copy.deepcopy(old_params)
for key, val in update_dict.iteritems():
if ((use_default and val == constants.VALUE_DEFAULT) or
(use_none and val is None)):
try:
del params_copy[key]
except KeyError:
pass
else:
params_copy[key] = val
return params_copy
def GetUpdatedIPolicy(old_ipolicy, new_ipolicy, group_policy=False):
"""Return the new version of an instance policy.
@param group_policy: whether this policy applies to a group and thus
we should support removal of policy entries
"""
ipolicy = copy.deepcopy(old_ipolicy)
for key, value in new_ipolicy.items():
if key not in constants.IPOLICY_ALL_KEYS:
raise errors.OpPrereqError("Invalid key in new ipolicy: %s" % key,
errors.ECODE_INVAL)
if (not value or value == [constants.VALUE_DEFAULT] or
value == constants.VALUE_DEFAULT):
if group_policy:
if key in ipolicy:
del ipolicy[key]
else:
raise errors.OpPrereqError("Can't unset ipolicy attribute '%s'"
" on the cluster'" % key,
errors.ECODE_INVAL)
else:
if key in constants.IPOLICY_PARAMETERS:
# FIXME: we assume all such values are float
try:
ipolicy[key] = float(value)
except (TypeError, ValueError), err:
raise errors.OpPrereqError("Invalid value for attribute"
" '%s': '%s', error: %s" %
(key, value, err), errors.ECODE_INVAL)
elif key == constants.ISPECS_MINMAX:
for minmax in value:
for k in minmax.keys():
utils.ForceDictType(minmax[k], constants.ISPECS_PARAMETER_TYPES)
ipolicy[key] = value
elif key == constants.ISPECS_STD:
if group_policy:
msg = "%s cannot appear in group instance specs" % key
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
ipolicy[key] = GetUpdatedParams(old_ipolicy.get(key, {}), value,
use_none=False, use_default=False)
utils.ForceDictType(ipolicy[key], constants.ISPECS_PARAMETER_TYPES)
else:
# FIXME: we assume all others are lists; this should be redone
# in a nicer way
ipolicy[key] = list(value)
try:
objects.InstancePolicy.CheckParameterSyntax(ipolicy, not group_policy)
except errors.ConfigurationError, err:
raise errors.OpPrereqError("Invalid instance policy: %s" % err,
errors.ECODE_INVAL)
return ipolicy
def AnnotateDiskParams(instance, devs, cfg):
"""Little helper wrapper to the rpc annotation method.
@param instance: The instance object
@type devs: List of L{objects.Disk}
@param devs: The root devices (not any of its children!)
@param cfg: The config object
@returns The annotated disk copies
@see L{rpc.AnnotateDiskParams}
"""
return rpc.AnnotateDiskParams(devs, cfg.GetInstanceDiskParams(instance))
def SupportsOob(cfg, node):
"""Tells if node supports OOB.
@type cfg: L{config.ConfigWriter}
@param cfg: The cluster configuration
@type node: L{objects.Node}
@param node: The node
@return: The OOB script if supported or an empty string otherwise
"""
return cfg.GetNdParams(node)[constants.ND_OOB_PROGRAM]
def _UpdateAndVerifySubDict(base, updates, type_check):
"""Updates and verifies a dict with sub dicts of the same type.
@param base: The dict with the old data
@param updates: The dict with the new data
@param type_check: Dict suitable to ForceDictType to verify correct types
@returns: A new dict with updated and verified values
"""
def fn(old, value):
new = GetUpdatedParams(old, value)
utils.ForceDictType(new, type_check)
return new
ret = copy.deepcopy(base)
ret.update(dict((key, fn(base.get(key, {}), value))
for key, value in updates.items()))
return ret
def _FilterVmNodes(lu, node_uuids):
"""Filters out non-vm_capable nodes from a list.
@type lu: L{LogicalUnit}
@param lu: the logical unit for which we check
@type node_uuids: list
@param node_uuids: the list of nodes on which we should check
@rtype: list
@return: the list of vm-capable nodes
"""
vm_nodes = frozenset(lu.cfg.GetNonVmCapableNodeList())
return [uuid for uuid in node_uuids if uuid not in vm_nodes]
def GetDefaultIAllocator(cfg, ialloc):
"""Decides on which iallocator to use.
@type cfg: L{config.ConfigWriter}
@param cfg: Cluster configuration object
@type ialloc: string or None
@param ialloc: Iallocator specified in opcode
@rtype: string
@return: Iallocator name
"""
if not ialloc:
# Use default iallocator
ialloc = cfg.GetDefaultIAllocator()
if not ialloc:
raise errors.OpPrereqError("No iallocator was specified, neither in the"
" opcode nor as a cluster-wide default",
errors.ECODE_INVAL)
return ialloc
def CheckInstancesNodeGroups(cfg, instances, owned_groups, owned_node_uuids,
cur_group_uuid):
"""Checks if node groups for locked instances are still correct.
@type cfg: L{config.ConfigWriter}
@param cfg: Cluster configuration
@type instances: dict; string as key, L{objects.Instance} as value
@param instances: Dictionary, instance UUID as key, instance object as value
@type owned_groups: iterable of string
@param owned_groups: List of owned groups
@type owned_node_uuids: iterable of string
@param owned_node_uuids: List of owned nodes
@type cur_group_uuid: string or None
@param cur_group_uuid: Optional group UUID to check against instance's groups
"""
for (uuid, inst) in instances.items():
assert owned_node_uuids.issuperset(inst.all_nodes), \
"Instance %s's nodes changed while we kept the lock" % inst.name
inst_groups = CheckInstanceNodeGroups(cfg, uuid, owned_groups)
assert cur_group_uuid is None or cur_group_uuid in inst_groups, \
"Instance %s has no node in group %s" % (inst.name, cur_group_uuid)
def CheckInstanceNodeGroups(cfg, inst_uuid, owned_groups, primary_only=False):
"""Checks if the owned node groups are still correct for an instance.
@type cfg: L{config.ConfigWriter}
@param cfg: The cluster configuration
@type inst_uuid: string
@param inst_uuid: Instance UUID
@type owned_groups: set or frozenset
@param owned_groups: List of currently owned node groups
@type primary_only: boolean
@param primary_only: Whether to check node groups for only the primary node
"""
inst_groups = cfg.GetInstanceNodeGroups(inst_uuid, primary_only)
if not owned_groups.issuperset(inst_groups):
raise errors.OpPrereqError("Instance %s's node groups changed since"
" locks were acquired, current groups are"
" are '%s', owning groups '%s'; retry the"
" operation" %
(cfg.GetInstanceName(inst_uuid),
utils.CommaJoin(inst_groups),
utils.CommaJoin(owned_groups)),
errors.ECODE_STATE)
return inst_groups
def LoadNodeEvacResult(lu, alloc_result, early_release, use_nodes):
"""Unpacks the result of change-group and node-evacuate iallocator requests.
Iallocator modes L{constants.IALLOCATOR_MODE_NODE_EVAC} and
L{constants.IALLOCATOR_MODE_CHG_GROUP}.
@type lu: L{LogicalUnit}
@param lu: Logical unit instance
@type alloc_result: tuple/list
@param alloc_result: Result from iallocator
@type early_release: bool
@param early_release: Whether to release locks early if possible
@type use_nodes: bool
@param use_nodes: Whether to display node names instead of groups
"""
(moved, failed, jobs) = alloc_result
if failed:
failreason = utils.CommaJoin("%s (%s)" % (name, reason)
for (name, reason) in failed)
lu.LogWarning("Unable to evacuate instances %s", failreason)
raise errors.OpExecError("Unable to evacuate instances %s" % failreason)
if moved:
lu.LogInfo("Instances to be moved: %s",
utils.CommaJoin(
"%s (to %s)" %
(name, _NodeEvacDest(use_nodes, group, node_names))
for (name, group, node_names) in moved))
return [map(compat.partial(_SetOpEarlyRelease, early_release),
map(opcodes.OpCode.LoadOpCode, ops))
for ops in jobs]
def _NodeEvacDest(use_nodes, group, node_names):
"""Returns group or nodes depending on caller's choice.
"""
if use_nodes:
return utils.CommaJoin(node_names)
else:
return group
def _SetOpEarlyRelease(early_release, op):
"""Sets C{early_release} flag on opcodes if available.
"""
try:
op.early_release = early_release
except AttributeError:
assert not isinstance(op, opcodes.OpInstanceReplaceDisks)
return op
def MapInstanceLvsToNodes(instances):
"""Creates a map from (node, volume) to instance name.
@type instances: list of L{objects.Instance}
@rtype: dict; tuple of (node uuid, volume name) as key, L{objects.Instance}
object as value
"""
return dict(((node_uuid, vol), inst)
for inst in instances
for (node_uuid, vols) in inst.MapLVsByNode().items()
for vol in vols)
def CheckParamsNotGlobal(params, glob_pars, kind, bad_levels, good_levels):
"""Make sure that none of the given paramters is global.
If a global parameter is found, an L{errors.OpPrereqError} exception is
raised. This is used to avoid setting global parameters for individual nodes.
@type params: dictionary
@param params: Parameters to check
@type glob_pars: dictionary
@param glob_pars: Forbidden parameters
@type kind: string
@param kind: Kind of parameters (e.g. "node")
@type bad_levels: string
@param bad_levels: Level(s) at which the parameters are forbidden (e.g.
"instance")
@type good_levels: strings
@param good_levels: Level(s) at which the parameters are allowed (e.g.
"cluster or group")
"""
used_globals = glob_pars.intersection(params)
if used_globals:
msg = ("The following %s parameters are global and cannot"
" be customized at %s level, please modify them at"
" %s level: %s" %
(kind, bad_levels, good_levels, utils.CommaJoin(used_globals)))
raise errors.OpPrereqError(msg, errors.ECODE_INVAL)
def IsExclusiveStorageEnabledNode(cfg, node):
"""Whether exclusive_storage is in effect for the given node.
@type cfg: L{config.ConfigWriter}
@param cfg: The cluster configuration
@type node: L{objects.Node}
@param node: The node
@rtype: bool
@return: The effective value of exclusive_storage
"""
return cfg.GetNdParams(node)[constants.ND_EXCLUSIVE_STORAGE]
def CheckInstanceState(lu, instance, req_states, msg=None):
"""Ensure that an instance is in one of the required states.
@param lu: the LU on behalf of which we make the check
@param instance: the instance to check
@param msg: if passed, should be a message to replace the default one
@raise errors.OpPrereqError: if the instance is not in the required state
"""
if msg is None:
msg = ("can't use instance from outside %s states" %
utils.CommaJoin(req_states))
if instance.admin_state not in req_states:
raise errors.OpPrereqError("Instance '%s' is marked to be %s, %s" %
(instance.name, instance.admin_state, msg),
errors.ECODE_STATE)
if constants.ADMINST_UP not in req_states:
pnode_uuid = instance.primary_node
if not lu.cfg.GetNodeInfo(pnode_uuid).offline:
all_hvparams = lu.cfg.GetClusterInfo().hvparams
ins_l = lu.rpc.call_instance_list(
[pnode_uuid], [instance.hypervisor], all_hvparams)[pnode_uuid]
ins_l.Raise("Can't contact node %s for instance information" %
lu.cfg.GetNodeName(pnode_uuid),
prereq=True, ecode=errors.ECODE_ENVIRON)
if instance.name in ins_l.payload:
raise errors.OpPrereqError("Instance %s is running, %s" %
(instance.name, msg), errors.ECODE_STATE)
else:
lu.LogWarning("Primary node offline, ignoring check that instance"
" is down")
def CheckIAllocatorOrNode(lu, iallocator_slot, node_slot):
"""Check the sanity of iallocator and node arguments and use the
cluster-wide iallocator if appropriate.
Check that at most one of (iallocator, node) is specified. If none is
specified, or the iallocator is L{constants.DEFAULT_IALLOCATOR_SHORTCUT},
then the LU's opcode's iallocator slot is filled with the cluster-wide
default iallocator.
@type iallocator_slot: string
@param iallocator_slot: the name of the opcode iallocator slot
@type node_slot: string
@param node_slot: the name of the opcode target node slot
"""
node = getattr(lu.op, node_slot, None)
ialloc = getattr(lu.op, iallocator_slot, None)
if node == []:
node = None
if node is not None and ialloc is not None:
raise errors.OpPrereqError("Do not specify both, iallocator and node",
errors.ECODE_INVAL)
elif ((node is None and ialloc is None) or
ialloc == constants.DEFAULT_IALLOCATOR_SHORTCUT):
default_iallocator = lu.cfg.GetDefaultIAllocator()
if default_iallocator:
setattr(lu.op, iallocator_slot, default_iallocator)
else:
raise errors.OpPrereqError("No iallocator or node given and no"
" cluster-wide default iallocator found;"
" please specify either an iallocator or a"
" node, or set a cluster-wide default"
" iallocator", errors.ECODE_INVAL)
def FindFaultyInstanceDisks(cfg, rpc_runner, instance, node_uuid, prereq):
faulty = []
result = rpc_runner.call_blockdev_getmirrorstatus(
node_uuid, (instance.disks, instance))
result.Raise("Failed to get disk status from node %s" %
cfg.GetNodeName(node_uuid),
prereq=prereq, ecode=errors.ECODE_ENVIRON)
for idx, bdev_status in enumerate(result.payload):
if bdev_status and bdev_status.ldisk_status == constants.LDS_FAULTY:
faulty.append(idx)
return faulty
def CheckNodeOnline(lu, node_uuid, msg=None):
"""Ensure that a given node is online.
@param lu: the LU on behalf of which we make the check
@param node_uuid: the node to check
@param msg: if passed, should be a message to replace the default one
@raise errors.OpPrereqError: if the node is offline
"""
if msg is None:
msg = "Can't use offline node"
if lu.cfg.GetNodeInfo(node_uuid).offline:
raise errors.OpPrereqError("%s: %s" % (msg, lu.cfg.GetNodeName(node_uuid)),
errors.ECODE_STATE)
def CheckDiskTemplateEnabled(cluster, disk_template):
"""Helper function to check if a disk template is enabled.
@type cluster: C{objects.Cluster}
@param cluster: the cluster's configuration
@type disk_template: str
@param disk_template: the disk template to be checked
"""
assert disk_template is not None
if disk_template not in constants.DISK_TEMPLATES:
raise errors.OpPrereqError("'%s' is not a valid disk template."
" Valid disk templates are: %s" %
(disk_template,
",".join(constants.DISK_TEMPLATES)))
if not disk_template in cluster.enabled_disk_templates:
raise errors.OpPrereqError("Disk template '%s' is not enabled in cluster."
" Enabled disk templates are: %s" %
(disk_template,
",".join(cluster.enabled_disk_templates)))
def CheckStorageTypeEnabled(cluster, storage_type):
"""Helper function to check if a storage type is enabled.
@type cluster: C{objects.Cluster}
@param cluster: the cluster's configuration
@type storage_type: str
@param storage_type: the storage type to be checked
"""
assert storage_type is not None
assert storage_type in constants.STORAGE_TYPES
# special case for lvm-pv, because it cannot be enabled
# via disk templates
if storage_type == constants.ST_LVM_PV:
CheckStorageTypeEnabled(cluster, constants.ST_LVM_VG)
else:
possible_disk_templates = \
utils.storage.GetDiskTemplatesOfStorageType(storage_type)
for disk_template in possible_disk_templates:
if disk_template in cluster.enabled_disk_templates:
return
raise errors.OpPrereqError("No disk template of storage type '%s' is"
" enabled in this cluster. Enabled disk"
" templates are: %s" % (storage_type,
",".join(cluster.enabled_disk_templates)))
def CheckIpolicyVsDiskTemplates(ipolicy, enabled_disk_templates):
"""Checks ipolicy disk templates against enabled disk tempaltes.
@type ipolicy: dict
@param ipolicy: the new ipolicy
@type enabled_disk_templates: list of string
@param enabled_disk_templates: list of enabled disk templates on the
cluster
@raises errors.OpPrereqError: if there is at least one allowed disk
template that is not also enabled.
"""
assert constants.IPOLICY_DTS in ipolicy
allowed_disk_templates = ipolicy[constants.IPOLICY_DTS]
not_enabled = set(allowed_disk_templates) - set(enabled_disk_templates)
if not_enabled:
raise errors.OpPrereqError("The following disk template are allowed"
" by the ipolicy, but not enabled on the"
" cluster: %s" % utils.CommaJoin(not_enabled))
def CheckDiskAccessModeValidity(parameters):
"""Checks if the access parameter is legal.
@see: L{CheckDiskAccessModeConsistency} for cluster consistency checks.
@raise errors.OpPrereqError: if the check fails.
"""
if constants.DT_RBD in parameters:
access = parameters[constants.DT_RBD].get(constants.RBD_ACCESS,
constants.DISK_KERNELSPACE)
if access not in constants.DISK_VALID_ACCESS_MODES:
valid_vals_str = utils.CommaJoin(constants.DISK_VALID_ACCESS_MODES)
raise errors.OpPrereqError("Invalid value of '{d}:{a}': '{v}' (expected"
" one of {o})".format(d=constants.DT_RBD,
a=constants.RBD_ACCESS,
v=access,
o=valid_vals_str))
def CheckDiskAccessModeConsistency(parameters, cfg, group=None):
"""Checks if the access param is consistent with the cluster configuration.
@note: requires a configuration lock to run.
@param parameters: the parameters to validate
@param cfg: the cfg object of the cluster
@param group: if set, only check for consistency within this group.
@raise errors.OpPrereqError: if the LU attempts to change the access parameter
to an invalid value, such as "pink bunny".
@raise errors.OpPrereqError: if the LU attempts to change the access parameter
to an inconsistent value, such as asking for RBD
userspace access to the chroot hypervisor.
"""
CheckDiskAccessModeValidity(parameters)
if constants.DT_RBD in parameters:
access = parameters[constants.DT_RBD].get(constants.RBD_ACCESS,
constants.DISK_KERNELSPACE)
#Check the combination of instance hypervisor, disk template and access
#protocol is sane.
inst_uuids = cfg.GetNodeGroupInstances(group) if group else \
cfg.GetInstanceList()
for entry in inst_uuids:
#hyp, disk, access
inst = cfg.GetInstanceInfo(entry)
hv = inst.hypervisor
dt = inst.disk_template
#do not check for disk types that don't have this setting.
if dt != constants.DT_RBD:
continue
if not IsValidDiskAccessModeCombination(hv, dt, access):
raise errors.OpPrereqError("Instance {i}: cannot use '{a}' access"
" setting with {h} hypervisor and {d} disk"
" type.".format(i=inst.name,
a=access,
h=hv,
d=dt))
def IsValidDiskAccessModeCombination(hv, disk_template, mode):
"""Checks if an hypervisor can read a disk template with given mode.
@param hv: the hypervisor that will access the data
@param disk_template: the disk template the data is stored as
@param mode: how the hypervisor should access the data
@return: True if the hypervisor can read a given read disk_template
in the specified mode.
"""
if mode == constants.DISK_KERNELSPACE:
return True
if (hv == constants.HT_KVM and
disk_template == constants.DT_RBD and
mode == constants.DISK_USERSPACE):
return True
# Everything else:
return False
| gpl-2.0 | 7,406,222,431,369,780,000 | 35.136885 | 80 | 0.664323 | false |
mcameron/ansible-modules-extras | cloud/misc/proxmox.py | 3 | 17106 | #!/usr/bin/python
# This file is part of Ansible
#
# Ansible is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# Ansible is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with Ansible. If not, see <http://www.gnu.org/licenses/>.
DOCUMENTATION = '''
---
module: proxmox
short_description: management of instances in Proxmox VE cluster
description:
- allows you to create/delete/stop instances in Proxmox VE cluster
- Starting in Ansible 2.1, it automatically detects conainerization type (lxc for PVE 4, openvz for older)
version_added: "2.0"
options:
api_host:
description:
- the host of the Proxmox VE cluster
required: true
api_user:
description:
- the user to authenticate with
required: true
api_password:
description:
- the password to authenticate with
- you can use PROXMOX_PASSWORD environment variable
default: null
required: false
vmid:
description:
- the instance id
default: null
required: true
validate_certs:
description:
- enable / disable https certificate verification
default: false
required: false
type: boolean
node:
description:
- Proxmox VE node, when new VM will be created
- required only for C(state=present)
- for another states will be autodiscovered
default: null
required: false
password:
description:
- the instance root password
- required only for C(state=present)
default: null
required: false
hostname:
description:
- the instance hostname
- required only for C(state=present)
default: null
required: false
ostemplate:
description:
- the template for VM creating
- required only for C(state=present)
default: null
required: false
disk:
description:
- hard disk size in GB for instance
default: 3
required: false
cpus:
description:
- numbers of allocated cpus for instance
default: 1
required: false
memory:
description:
- memory size in MB for instance
default: 512
required: false
swap:
description:
- swap memory size in MB for instance
default: 0
required: false
netif:
description:
- specifies network interfaces for the container
default: null
required: false
type: string
ip_address:
description:
- specifies the address the container will be assigned
default: null
required: false
type: string
onboot:
description:
- specifies whether a VM will be started during system bootup
default: false
required: false
type: boolean
storage:
description:
- target storage
default: 'local'
required: false
type: string
cpuunits:
description:
- CPU weight for a VM
default: 1000
required: false
type: integer
nameserver:
description:
- sets DNS server IP address for a container
default: null
required: false
type: string
searchdomain:
description:
- sets DNS search domain for a container
default: null
required: false
type: string
timeout:
description:
- timeout for operations
default: 30
required: false
type: integer
force:
description:
- forcing operations
- can be used only with states C(present), C(stopped), C(restarted)
- with C(state=present) force option allow to overwrite existing container
- with states C(stopped) , C(restarted) allow to force stop instance
default: false
required: false
type: boolean
state:
description:
- Indicate desired state of the instance
choices: ['present', 'started', 'absent', 'stopped', 'restarted']
default: present
notes:
- Requires proxmoxer and requests modules on host. This modules can be installed with pip.
requirements: [ "proxmoxer", "requests" ]
author: "Sergei Antipov @UnderGreen"
'''
EXAMPLES = '''
# Create new container with minimal options
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
# Create new container with minimal options with force(it will rewrite existing container)
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_password='1q2w3e' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz' force=yes
# Create new container with minimal options use environment PROXMOX_PASSWORD variable(you should export it before)
- proxmox: vmid=100 node='uk-mc02' api_user='root@pam' api_host='node1' password='123456' hostname='example.org' ostemplate='local:vztmpl/ubuntu-14.04-x86_64.tar.gz'
# Start container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=started
# Stop container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped
# Stop container with force
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' force=yes state=stopped
# Restart container(stopped or mounted container you can't restart)
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=stopped
# Remove container
- proxmox: vmid=100 api_user='root@pam' api_password='1q2w3e' api_host='node1' state=absent
'''
import os
import time
try:
from proxmoxer import ProxmoxAPI
HAS_PROXMOXER = True
except ImportError:
HAS_PROXMOXER = False
VZ_TYPE=None
def get_instance(proxmox, vmid):
return [ vm for vm in proxmox.cluster.resources.get(type='vm') if vm['vmid'] == int(vmid) ]
def content_check(proxmox, node, ostemplate, storage):
return [ True for cnt in proxmox.nodes(node).storage(storage).content.get() if cnt['volid'] == ostemplate ]
def node_check(proxmox, node):
return [ True for nd in proxmox.nodes.get() if nd['node'] == node ]
def create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout, **kwargs):
proxmox_node = proxmox.nodes(node)
kwargs = dict((k,v) for k, v in kwargs.iteritems() if v is not None)
if VZ_TYPE =='lxc':
kwargs['cpulimit']=cpus
kwargs['rootfs']=disk
else:
kwargs['cpus']=cpus
kwargs['disk']=disk
taskid = getattr(proxmox_node, VZ_TYPE).create(vmid=vmid, storage=storage, memory=memory, swap=swap, **kwargs)
while timeout:
if ( proxmox_node.tasks(taskid).status.get()['status'] == 'stopped'
and proxmox_node.tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for creating VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def start_instance(module, proxmox, vm, vmid, timeout):
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.start.post()
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for starting VM. Last line in task before timeout: %s'
% proxmox.nodes(vm[0]['node']).tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def stop_instance(module, proxmox, vm, vmid, timeout, force):
if force:
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post(forceStop=1)
else:
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.shutdown.post()
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for stopping VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def umount_instance(module, proxmox, vm, vmid, timeout):
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.umount.post()
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
return True
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for unmounting VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
return False
def main():
module = AnsibleModule(
argument_spec = dict(
api_host = dict(required=True),
api_user = dict(required=True),
api_password = dict(no_log=True),
vmid = dict(required=True),
validate_certs = dict(type='bool', default='no'),
node = dict(),
password = dict(no_log=True),
hostname = dict(),
ostemplate = dict(),
disk = dict(type='int', default=3),
cpus = dict(type='int', default=1),
memory = dict(type='int', default=512),
swap = dict(type='int', default=0),
netif = dict(),
ip_address = dict(),
onboot = dict(type='bool', default='no'),
storage = dict(default='local'),
cpuunits = dict(type='int', default=1000),
nameserver = dict(),
searchdomain = dict(),
timeout = dict(type='int', default=30),
force = dict(type='bool', default='no'),
state = dict(default='present', choices=['present', 'absent', 'stopped', 'started', 'restarted']),
)
)
if not HAS_PROXMOXER:
module.fail_json(msg='proxmoxer required for this module')
state = module.params['state']
api_user = module.params['api_user']
api_host = module.params['api_host']
api_password = module.params['api_password']
vmid = module.params['vmid']
validate_certs = module.params['validate_certs']
node = module.params['node']
disk = module.params['disk']
cpus = module.params['cpus']
memory = module.params['memory']
swap = module.params['swap']
storage = module.params['storage']
timeout = module.params['timeout']
# If password not set get it from PROXMOX_PASSWORD env
if not api_password:
try:
api_password = os.environ['PROXMOX_PASSWORD']
except KeyError, e:
module.fail_json(msg='You should set api_password param or use PROXMOX_PASSWORD environment variable')
try:
proxmox = ProxmoxAPI(api_host, user=api_user, password=api_password, verify_ssl=validate_certs)
global VZ_TYPE
VZ_TYPE = 'openvz' if float(proxmox.version.get()['version']) < 4.0 else 'lxc'
except Exception, e:
module.fail_json(msg='authorization on proxmox cluster failed with exception: %s' % e)
if state == 'present':
try:
if get_instance(proxmox, vmid) and not module.params['force']:
module.exit_json(changed=False, msg="VM with vmid = %s is already exists" % vmid)
elif not (node, module.params['hostname'] and module.params['password'] and module.params['ostemplate']):
module.fail_json(msg='node, hostname, password and ostemplate are mandatory for creating vm')
elif not node_check(proxmox, node):
module.fail_json(msg="node '%s' not exists in cluster" % node)
elif not content_check(proxmox, node, module.params['ostemplate'], storage):
module.fail_json(msg="ostemplate '%s' not exists on node %s and storage %s"
% (module.params['ostemplate'], node, storage))
create_instance(module, proxmox, vmid, node, disk, storage, cpus, memory, swap, timeout,
password = module.params['password'],
hostname = module.params['hostname'],
ostemplate = module.params['ostemplate'],
netif = module.params['netif'],
ip_address = module.params['ip_address'],
onboot = int(module.params['onboot']),
cpuunits = module.params['cpuunits'],
nameserver = module.params['nameserver'],
searchdomain = module.params['searchdomain'],
force = int(module.params['force']))
module.exit_json(changed=True, msg="deployed VM %s from template %s" % (vmid, module.params['ostemplate']))
except Exception, e:
module.fail_json(msg="creation of %s VM %s failed with exception: %s" % ( VZ_TYPE, vmid, e ))
elif state == 'started':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
module.exit_json(changed=False, msg="VM %s is already running" % vmid)
if start_instance(module, proxmox, vm, vmid, timeout):
module.exit_json(changed=True, msg="VM %s started" % vmid)
except Exception, e:
module.fail_json(msg="starting of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'stopped':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
if module.params['force']:
if umount_instance(module, proxmox, vm, vmid, timeout):
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
else:
module.exit_json(changed=False, msg=("VM %s is already shutdown, but mounted. "
"You can use force option to umount it.") % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped':
module.exit_json(changed=False, msg="VM %s is already shutdown" % vmid)
if stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']):
module.exit_json(changed=True, msg="VM %s is shutting down" % vmid)
except Exception, e:
module.fail_json(msg="stopping of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'restarted':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.fail_json(msg='VM with vmid = %s not exists in cluster' % vmid)
if ( getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'stopped'
or getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted' ):
module.exit_json(changed=False, msg="VM %s is not running" % vmid)
if ( stop_instance(module, proxmox, vm, vmid, timeout, force = module.params['force']) and
start_instance(module, proxmox, vm, vmid, timeout) ):
module.exit_json(changed=True, msg="VM %s is restarted" % vmid)
except Exception, e:
module.fail_json(msg="restarting of VM %s failed with exception: %s" % ( vmid, e ))
elif state == 'absent':
try:
vm = get_instance(proxmox, vmid)
if not vm:
module.exit_json(changed=False, msg="VM %s does not exist" % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'running':
module.exit_json(changed=False, msg="VM %s is running. Stop it before deletion." % vmid)
if getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE)(vmid).status.current.get()['status'] == 'mounted':
module.exit_json(changed=False, msg="VM %s is mounted. Stop it with force option before deletion." % vmid)
taskid = getattr(proxmox.nodes(vm[0]['node']), VZ_TYPE).delete(vmid)
while timeout:
if ( proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['status'] == 'stopped'
and proxmox.nodes(vm[0]['node']).tasks(taskid).status.get()['exitstatus'] == 'OK' ):
module.exit_json(changed=True, msg="VM %s removed" % vmid)
timeout = timeout - 1
if timeout == 0:
module.fail_json(msg='Reached timeout while waiting for removing VM. Last line in task before timeout: %s'
% proxmox_node.tasks(taskid).log.get()[:1])
time.sleep(1)
except Exception, e:
module.fail_json(msg="deletion of VM %s failed with exception: %s" % ( vmid, e ))
# import module snippets
from ansible.module_utils.basic import *
main()
| gpl-3.0 | 4,040,748,815,247,288,300 | 37.440449 | 197 | 0.649129 | false |
ToontownUprising/src | otp/level/EntrancePoint.py | 3 | 1417 | from toontown.toonbase.ToontownGlobals import *
from direct.directnotify import DirectNotifyGlobal
import BasicEntities
class EntrancePoint(BasicEntities.NodePathEntity):
def __init__(self, level, entId):
BasicEntities.NodePathEntity.__init__(self, level, entId)
self.rotator = self.attachNewNode('rotator')
self.placer = self.rotator.attachNewNode('placer')
self.initEntrancePoint()
def destroy(self):
self.destroyEntrancePoint()
self.placer.removeNode()
self.rotator.removeNode()
del self.placer
del self.rotator
BasicEntities.NodePathEntity.destroy(self)
def placeToon(self, toon, toonIndex, numToons):
self.placer.setY(-self.radius)
self.rotator.setH(-self.theta * (numToons - 1) * 0.5 + toonIndex * self.theta)
toon.setPosHpr(self.placer, 0, 0, 0, 0, 0, 0)
def initEntrancePoint(self):
if self.entranceId >= 0:
self.level.entranceId2entity[self.entranceId] = self
def destroyEntrancePoint(self):
if self.entranceId >= 0:
if self.entranceId in self.level.entranceId2entity:
del self.level.entranceId2entity[self.entranceId]
if __dev__:
def attribChanged(self, *args):
BasicEntities.NodePathEntity.attribChanged(self, *args)
self.destroyEntrancePoint()
self.initEntrancePoint()
| mit | -2,693,942,233,462,477,000 | 34.425 | 86 | 0.661962 | false |
heromod/migrid | user-projects/miginterface/examples/mpi/mpi_example1.py | 1 | 1613 | #!/usr/bin/python
"""
An example script for running an MPI grid job using the mig interface module.
"""
import miginterface as mig
import time, sys
def main():
"""
Run an mpi job on a grid resource. To run in local mode please install mpi.
"""
# mig.debug_mode_on() # uncomment to enable debug print outs
# mig.local_mode_on() # uncomment to enable local mode execution
mig.test_connection() # Check if we can connect to the MiG server
mpi_file = "example.c" # mpi program source file
# The shell command to execute on the grid resource using 4 processes. We need to it compile on the resource first.
cmds = ["mpicc -O2 example.c -o example", "$MPI_WRAP mpirun -np 4 ./example Hello"]
# specify that we need require MPI as a runtime env and use the DIKU vgrid cluster
specifications = {"RUNTIMEENVIRONMENT":"MPI-WRAP-2.0", "VGRID":"DIKU"}
# Create and submit the grid job
job_id = mig.create_job(cmds, input_files=mpi_file, resource_specifications=specifications)
print "\nJob (ID : %s) submitted. \n\n" % job_id
# Wait for the job to finish while monitoring the status
polling_frequency = 10 # seconds
while not mig.job_finished(job_id):
job_info = mig.job_info(job_id) # get an info dictionary
print 'Grid job : %(ID)s \t %(STATUS)s ' % job_info
time.sleep(polling_frequency) # wait a while before polling again
print mig.job_output(job_id)
if __name__ == "__main__":
if "-l" in sys.argv:
mig.local_mode_on()
if "-d" in sys.argv:
mig.debug_mode_on()
main()
| gpl-2.0 | -6,414,359,659,538,479,000 | 34.065217 | 119 | 0.652201 | false |
miur/miur | logger/setup.py | 1 | 1353 | #
# SPDX-FileCopyrightText: 2020 Dmytro Kolomoiets <[email protected]> and contributors.
#
# SPDX-License-Identifier: GPL-3.0-only
#
# USAGE: call once from main() of each application to register ZMQ log handler
#
__all__ = [
'setup_logging',
]
import logging
import zmq
from zmq.log import handlers
def setup_logging(log_uri, lvl=None):
# BUG: PUBHandler('inproc://log') will bind to address instead of connecting
# FAIL: it creates additional two threads :: ZMQbg/IO/0 and ZMQbg/Reaper
# handler = handlers.PUBHandler(log_uri)
ctx = zmq.Context.instance()
log_sock = ctx.socket(zmq.PUB)
log_sock.connect(log_uri)
handler = handlers.PUBHandler(log_sock)
handler.root_topic = '' # cmpt OR %(process:d)/%(processName:s)
fmt = ' '.join([
"%(asctime)s",
"%(process:d)/%(processName:s)",
"%(thread:d)/%(threadName:s)",
"[%(name:s)/%(levelname:s)]",
"%(message)s",
":/%(pathname)s:%(lineno)d",
])
# ALT: use logging.dictConfig()
# SEE: https://stackoverflow.com/questions/38323810/does-pythons-logging-config-dictconfig-apply-the-loggers-configuration-setti
logging.basicConfig(
level=(lvl if lvl is not None else logging.DEBUG),
handlers=(handler,),
datefmt="%H:%M:%S.uuu",
format=fmt
)
return log_sock
| gpl-3.0 | 3,699,259,694,381,430,300 | 27.1875 | 132 | 0.636364 | false |
bjlittle/python-csp | test/test_contexts.py | 2 | 1464 | #!/usr/bin/python
"""
Test the CSP class, found in csp.csp and its context managers.
TODO: Replace this with proper unit testing.
Copyright (C) Sarah Mount, 2010.
This program is free software; you can redistribute it and/or
modify it under the terms of the GNU General Public License
as published by the Free Software Foundation; either version 2
of the License, or (at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU General Public License for more details.
You should have received a copy of the GNU General Public License
along with this program. If not, see <http://www.gnu.org/licenses/>.
"""
import sys
sys.path.insert(0, "..")
from csp.csp import CSP
def printme(*args):
print ' '.join(map(lambda x: str(x), args))
def testme1():
p = CSP()
with p.par():
p.process(printme, 1, 2, 3, 4, 5)
p.process(printme, 6, 7, 7, 8, 9)
p.process(printme, 2, 3, 6, 3, 2)
p.start()
def testme2():
p = CSP()
with p.seq():
p.process(printme, 1, 2, 3)
with p.par():
p.process(printme, 1)
p.process(printme, 2)
p.process(printme, 3)
p.process(printme, 5, 6, 7)
p.start()
if __name__ == '__main__':
print 'Test 1'
testme1()
print 'Test 2'
testme2()
| gpl-2.0 | -3,423,141,524,189,817,000 | 23 | 69 | 0.643443 | false |
wangscript/qb-archetype | quickbundle-rmwebdemo/WebContent/WEB-INF/jython/qb/tools/support/str/transfer_sql.py | 2 | 5536 | #coding=UTF-8
import os
from os.path import join
import sys
import re
import datetime
from os import listdir
from os.path import isdir, normpath
import math
import cPickle
import codecs
#from qb.tools.support.zip import zip_file
#from qb.tools.helper import date_helper
import codecs
start_time = datetime.datetime.now()
if len(sys.argv) < 4:
print '''\
This program transfer file from one encode to another encode.
Any number of files can be specified.
String '*' match any char and it must be in the file name
Usage: transfer_file_encode.py -sencode1 -dencode2 file1 file2 ......
examples:
transfer_file_encode.py -sUTF-8 -dGBK folder1 folder2 ......
'''
#transfer_file_encode.py -sGBK -dUTF-8 ./**/*java
sys.exit()
#init argv
s_encode = None
d_encode = None
for arg in sys.argv[1:]:
if arg.startswith('-s'):
s_encode = arg[2:].upper()
elif arg.startswith('-d'):
d_encode = arg[2:].upper()
else:
continue
print 'option:', arg
transfer_count = 0
skip_count = 0
sub_count = 0
ignore_files = ()
for arg in sys.argv[1:]:
if arg.startswith('-s') or arg.startswith('-d'):
continue
#zip_file.create(arg + '/../' + os.path.split(arg)[1] + '_' + str(date_helper.get_datetime_number()) + '.zip', arg)
if os.path.isfile(arg):
f = open(arg, 'r')
s = f.read()
f.close()
try:
s2 = s.decode(s_encode).encode(d_encode)
f2 = open(arg, "w")
f2.write(s2)
f2.close()
transfer_count = transfer_count + 1
except Exception:
print str(sys.exc_info()[0]),
print ', skiping ', arg
skip_count = skip_count + 1
#read eclipse project ignore list
eclipse_prefs = arg + '/.settings/org.eclipse.core.resources.prefs'
if os.path.exists(eclipse_prefs):
ignore_files = re.findall('encoding//([^=]+?)=.*?', open(eclipse_prefs, 'r').read())
index = 0
for ignore_file in ignore_files:
if arg.endswith('/'):
ignore_files[index] = arg + ignore_file
else:
ignore_files[index] = arg + '/' + ignore_file
index = index + 1
print 'ignore_files=', ignore_files
for root,dirs,files in os.walk(arg):
for filename in files:
temp_file = join(root,filename)
if ignore_files.__contains__(temp_file):
print 'ignore ' + temp_file
continue
f = open(temp_file, 'r')
s = f.read()
f.close()
#if s[:3] == codecs.BOM_UTF8:
#s = s[3:]
try:
s2 = s.decode(s_encode).encode(d_encode)
s2_original = s2
print 'transfering ', temp_file
if s_encode == 'GBK' or s_encode == 'GB2312':
p_encode = re.compile('''(gb2312|GBK)"''', re.IGNORECASE)
s2 = p_encode.sub('utf8', s2)
if d_encode == 'GBK' or d_encode == 'GB2312':
if temp_file.endswith('xml') or temp_file.endswith('xsl'):
if re.search('''<\?xml[^>]+?encoding="''' + s_encode + '''"[^>]*?\?>''', s2, re.IGNORECASE):
p_encode = re.compile('''encoding="''' + s_encode + '''"''', re.IGNORECASE)
s2 = p_encode.sub('encoding="gb2312"', s2, 1)
print 'info: subing ' + str(p_encode.findall(s2_original, re.IGNORECASE)) + '-->' + 'encoding="gb2312"'
sub_count = sub_count + 1
elif temp_file.endswith('jsp') or temp_file.endswith('htm') or temp_file.endswith('html') or temp_file.endswith('shtml'):
if re.search('charset=' + s_encode, s2, re.IGNORECASE):
p_charset_jsp = re.compile('''contentType\s*?=\s*?"text/html;\s*?charset=''' + s_encode, re.IGNORECASE)
s2 = p_charset_jsp.sub('contentType="text/html; charset=GBK', s2, 2)
print 'info: subing ' + str(p_charset_jsp.findall(s2_original, re.IGNORECASE)) + '-->' + 'contentType="text/html; charset=GBK',
p_charset_meta = re.compile('''content\s*?=\s*?"text/html;\s*?charset=''' + s_encode, re.IGNORECASE)
s2 = p_charset_meta.sub('content="text/html; charset=gb2312', s2, 2)
print '; ' + str(p_charset_meta.findall(s2_original, re.IGNORECASE)) + '-->' + 'content="text/html; charset=gb2312'
sub_count = sub_count + 1
if re.search('pageEncoding="' + s_encode + '"', s2, re.IGNORECASE):
p_pageEncoding_jsp = re.compile('pageEncoding="' + s_encode + '"', re.IGNORECASE)
s2 = p_pageEncoding_jsp.sub('pageEncoding="GBK"', s2, 2)
print 'info: subing ' + str(p_pageEncoding_jsp.findall(s2_original, re.IGNORECASE)) + '-->' + 'pageEncoding="GBK"'
f2 = open(temp_file, "w")
f2.write(s2)
f2.close()
transfer_count = transfer_count + 1
except Exception:
print str(sys.exc_info()[0]),
print ', skiping ', temp_file
skip_count = skip_count + 1
print 'cost ', datetime.datetime.now() - start_time, ', skip_count=', skip_count, ', transfer_count=', transfer_count, ', sub_count=', sub_count | apache-2.0 | 4,247,049,595,630,720,000 | 42.944444 | 155 | 0.525108 | false |
jfinkels/networkx | networkx/algorithms/flow/tests/test_maxflow_large_graph.py | 5 | 4847 | # -*- coding: utf-8 -*-
"""Maximum flow algorithms test suite on large graphs.
"""
__author__ = """Loïc Séguin-C. <[email protected]>"""
# Copyright (C) 2010 Loïc Séguin-C. <[email protected]>
# All rights reserved.
# BSD license.
import os
from nose.tools import *
import networkx as nx
from networkx.algorithms.flow import build_flow_dict, build_residual_network
from networkx.algorithms.flow import boykov_kolmogorov
from networkx.algorithms.flow import dinitz
from networkx.algorithms.flow import edmonds_karp
from networkx.algorithms.flow import preflow_push
from networkx.algorithms.flow import shortest_augmenting_path
flow_funcs = [
boykov_kolmogorov,
dinitz,
edmonds_karp,
preflow_push,
shortest_augmenting_path,
]
msg = "Assertion failed in function: {0}"
def gen_pyramid(N):
# This graph admits a flow of value 1 for which every arc is at
# capacity (except the arcs incident to the sink which have
# infinite capacity).
G = nx.DiGraph()
for i in range(N - 1):
cap = 1. / (i + 2)
for j in range(i + 1):
G.add_edge((i, j), (i + 1, j),
capacity = cap)
cap = 1. / (i + 1) - cap
G.add_edge((i, j), (i + 1, j + 1),
capacity = cap)
cap = 1. / (i + 2) - cap
for j in range(N):
G.add_edge((N - 1, j), 't')
return G
def read_graph(name):
dirname = os.path.dirname(__file__)
path = os.path.join(dirname, name + '.gpickle.bz2')
return nx.read_gpickle(path)
def validate_flows(G, s, t, soln_value, R, flow_func):
flow_value = R.graph['flow_value']
flow_dict = build_flow_dict(G, R)
assert_equal(soln_value, flow_value, msg=msg.format(flow_func.__name__))
assert_equal(set(G), set(flow_dict), msg=msg.format(flow_func.__name__))
for u in G:
assert_equal(set(G[u]), set(flow_dict[u]),
msg=msg.format(flow_func.__name__))
excess = dict((u, 0) for u in flow_dict)
for u in flow_dict:
for v, flow in flow_dict[u].items():
ok_(flow <= G[u][v].get('capacity', float('inf')),
msg=msg.format(flow_func.__name__))
ok_(flow >= 0, msg=msg.format(flow_func.__name__))
excess[u] -= flow
excess[v] += flow
for u, exc in excess.items():
if u == s:
assert_equal(exc, -soln_value, msg=msg.format(flow_func.__name__))
elif u == t:
assert_equal(exc, soln_value, msg=msg.format(flow_func.__name__))
else:
assert_equal(exc, 0, msg=msg.format(flow_func.__name__))
class TestMaxflowLargeGraph:
def test_complete_graph(self):
N = 50
G = nx.complete_graph(N)
nx.set_edge_attributes(G, 'capacity', 5)
R = build_residual_network(G, 'capacity')
kwargs = dict(residual=R)
for flow_func in flow_funcs:
kwargs['flow_func'] = flow_func
flow_value = nx.maximum_flow_value(G, 1, 2, **kwargs)
assert_equal(flow_value, 5 * (N - 1),
msg=msg.format(flow_func.__name__))
def test_pyramid(self):
N = 10
#N = 100 # this gives a graph with 5051 nodes
G = gen_pyramid(N)
R = build_residual_network(G, 'capacity')
kwargs = dict(residual=R)
for flow_func in flow_funcs:
kwargs['flow_func'] = flow_func
flow_value = nx.maximum_flow_value(G, (0, 0), 't', **kwargs)
assert_almost_equal(flow_value, 1.,
msg=msg.format(flow_func.__name__))
def test_gl1(self):
G = read_graph('gl1')
s = 1
t = len(G)
R = build_residual_network(G, 'capacity')
kwargs = dict(residual=R)
for flow_func in flow_funcs:
validate_flows(G, s, t, 156545, flow_func(G, s, t, **kwargs),
flow_func)
def test_gw1(self):
G = read_graph('gw1')
s = 1
t = len(G)
R = build_residual_network(G, 'capacity')
kwargs = dict(residual=R)
for flow_func in flow_funcs:
validate_flows(G, s, t, 1202018, flow_func(G, s, t, **kwargs),
flow_func)
def test_wlm3(self):
G = read_graph('wlm3')
s = 1
t = len(G)
R = build_residual_network(G, 'capacity')
kwargs = dict(residual=R)
for flow_func in flow_funcs:
validate_flows(G, s, t, 11875108, flow_func(G, s, t, **kwargs),
flow_func)
def test_preflow_push_global_relabel(self):
G = read_graph('gw1')
R = preflow_push(G, 1, len(G), global_relabel_freq=50)
assert_equal(R.graph['flow_value'], 1202018)
| bsd-3-clause | -5,912,663,946,936,184,000 | 31.945578 | 78 | 0.545736 | false |
google/timesketch | test_tools/sigma_verify_rules.py | 1 | 9226 | # Copyright 2020 Google Inc. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A tool to test sigma rules.
This tool can be used to verify your rules before running an analyzer.
It also does not require you to have a full blown Timesketch instance.
Default this tool will show only the rules that cause problems.
Example way of running the tool:
$ PYTHONPATH=. python3 test_tools/sigma_verify_rules.py --config_file
data/sigma_config.yaml --debug data/sigma/rules/windows/
--move data/sigma/rules/problematic/
"""
import logging
import os
import argparse
import sys
import pandas as pd
from timesketch.lib import sigma_util# pylint: disable=no-name-in-module
logger = logging.getLogger('timesketch.test_tool.sigma-verify')
logging.basicConfig(level=os.environ.get('LOGLEVEL', 'INFO'))
def get_sigma_blocklist(blocklist_path='./data/sigma_blocklist.csv'):
"""Get a dataframe of sigma rules to ignore.
This includes filenames, paths, ids.
Args:
blocklist_path(str): Path to a blocklist file.
The default value is './data/sigma_blocklist.csv'
Returns:
Pandas dataframe with blocklist
Raises:
ValueError: Sigma blocklist file is not readabale.
"""
if blocklist_path is None or blocklist_path == '':
blocklist_path = './data/sigma_blocklist.csv'
if not blocklist_path:
raise ValueError('No blocklist_file_path set via param or config file')
if not os.path.isfile(blocklist_path):
raise ValueError(
'Unable to open file: [{0:s}], it does not exist.'.format(
blocklist_path))
if not os.access(blocklist_path, os.R_OK):
raise ValueError(
'Unable to open file: [{0:s}], cannot open it for '
'read, please check permissions.'.format(blocklist_path))
return pd.read_csv(blocklist_path)
def run_verifier(rules_path, config_file_path, blocklist_path=None):
"""Run an sigma parsing test on a dir and returns results from the run.
Args:
rules_path (str): Path to the Sigma rules.
config_file_path (str): Path to a config file with Sigma mapping data.
blocklist_path (str): Optional path to a blocklist file.
The default value is none.
Raises:
IOError: if the path to either test or analyzer file does not exist
or if the analyzer module or class cannot be loaded.
Returns:
a tuple of lists:
- sigma_verified_rules with rules that can be added
- sigma_rules_with_problems with rules that should not be added
"""
if not config_file_path:
raise IOError('No config_file_path given')
if not os.path.isdir(rules_path):
raise IOError('Rules not found at path: {0:s}'.format(
rules_path))
if not os.path.isfile(config_file_path):
raise IOError('Config file path not found at path: {0:s}'.format(
config_file_path))
sigma_config = sigma_util.get_sigma_config_file(
config_file=config_file_path)
return_verified_rules = []
return_rules_with_problems = []
ignore = get_sigma_blocklist(blocklist_path)
ignore_list = list(ignore['path'].unique())
for dirpath, dirnames, files in os.walk(rules_path):
if 'deprecated' in [x.lower() for x in dirnames]:
dirnames.remove('deprecated')
for rule_filename in files:
if rule_filename.lower().endswith('.yml'):
# if a sub dir is found, do not try to parse it.
if os.path.isdir(os.path.join(dirpath, rule_filename)):
continue
rule_file_path = os.path.join(dirpath, rule_filename)
block_because_csv = False
if any(x in rule_file_path for x in ignore_list):
return_rules_with_problems.append(rule_file_path)
block_because_csv = True
if block_because_csv:
continue
try:
parsed_rule = sigma_util.get_sigma_rule(
rule_file_path, sigma_config)
# This except is to keep the unknown exceptions
# this function is made to catch them and document
# them the broad exception is needed
except Exception:# pylint: disable=broad-except
logger.debug('Rule parsing error', exc_info=True)
return_rules_with_problems.append(rule_file_path)
if parsed_rule:
return_verified_rules.append(rule_file_path)
else:
return_rules_with_problems.append(rule_file_path)
return return_verified_rules, return_rules_with_problems
def move_problematic_rule(filepath, move_to_path, reason=None):
"""Moves a problematic rule to a subfolder so it is not used again
Args:
filepath: path to the sigma rule that caused problems
move_to_path: path to move the problematic rules to
reason: optional reason why file is moved
"""
logging.info('Moving the rule: {0:s} to {1:s}'.format(
filepath, move_to_path))
try:
os.makedirs(move_to_path, exist_ok=True)
debug_path = os.path.join(move_to_path, 'debug.log')
with open(debug_path, 'a') as file_objec:
file_objec.write(f'{filepath}\n{reason}\n\n')
base_path = os.path.basename(filepath)
logging.info('Moving the rule: {0:s} to {1:s}'.format(
filepath, f'{move_to_path}{base_path}'))
os.rename(filepath, os.path.join(move_to_path, base_path))
except OSError:
logger.error('OS Error - rule not moved', exc_info=True)
if __name__ == '__main__':
description = (
'Mock an sigma parser run. This tool is intended for developers '
'of sigma rules as well as Timesketch server admins. '
'The tool can also be used for automatic testing to make sure the '
'rules are still working as intended.')
epilog = (
'Remember to feed the tool with proper rule data.'
)
arguments = argparse.ArgumentParser(
description=description, allow_abbrev=True)
arguments.add_argument(
'--config_file', '--file', dest='config_file_path', action='store',
default='', type=str, metavar='PATH_TO_TEST_FILE', help=(
'Path to the file containing the config data to feed sigma '
))
arguments.add_argument(
'--blocklist_file', dest='blocklist_file_path', action='store',
default='', type=str, metavar='PATH_TO_BLOCK_FILE', help=(
'Path to the file containing the blocklist '
))
arguments.add_argument(
'rules_path', action='store', default='', type=str,
metavar='PATH_TO_RULES', help='Path to the rules to test.')
arguments.add_argument(
'--debug', action='store_true', help='print debug messages ')
arguments.add_argument(
'--info', action='store_true', help='print info messages ')
arguments.add_argument(
'--move', dest='move_to_path', action='store',
default='', type=str, help=(
'Move problematic rules to this path'
))
try:
options = arguments.parse_args()
except UnicodeEncodeError:
print(arguments.format_help())
sys.exit(1)
if options.debug:
logger.setLevel(logging.DEBUG)
if options.info:
logger.setLevel(logging.INFO)
if not os.path.isfile(options.config_file_path):
print('Config file not found.')
sys.exit(1)
if not os.path.isdir(options.rules_path):
print('The path to the rules does not exist ({0:s})'.format(
options.rules_path))
sys.exit(1)
if len(options.blocklist_file_path) > 0:
if not os.path.isfile(options.blocklist_file_path):
print('Blocklist file not found.')
sys.exit(1)
sigma_verified_rules, sigma_rules_with_problems = run_verifier(
rules_path=options.rules_path,
config_file_path=options.config_file_path,
blocklist_path=options.blocklist_file_path)
if len(sigma_rules_with_problems) > 0:
print('### Do NOT import below.###')
for badrule in sigma_rules_with_problems:
if options.move_to_path:
move_problematic_rule(
badrule, options.move_to_path,
'sigma_verify_rules.py found an issue')
print(badrule)
if len(sigma_verified_rules) > 0:
logging.info('### You can import the following rules ###')
for goodrule in sigma_verified_rules:
logging.info(goodrule)
| apache-2.0 | -4,156,244,198,128,402,000 | 35.904 | 79 | 0.626816 | false |
spthaolt/VTK | Examples/Modelling/Python/Delaunay3D.py | 9 | 1692 | #!/usr/bin/env python
# This example shows how to use Delaunay3D with alpha shapes.
import vtk
# The points to be triangulated are generated randomly in the unit
# cube located at the origin. The points are then associated with a
# vtkPolyData.
math = vtk.vtkMath()
points = vtk.vtkPoints()
for i in range(0, 25):
points.InsertPoint(i, math.Random(0, 1), math.Random(0, 1),
math.Random(0, 1))
profile = vtk.vtkPolyData()
profile.SetPoints(points)
# Delaunay3D is used to triangulate the points. The Tolerance is the
# distance that nearly coincident points are merged
# together. (Delaunay does better if points are well spaced.) The
# alpha value is the radius of circumcircles, circumspheres. Any mesh
# entity whose circumcircle is smaller than this value is output.
delny = vtk.vtkDelaunay3D()
delny.SetInput(profile)
delny.SetTolerance(0.01)
delny.SetAlpha(0.2)
delny.BoundingTriangulationOff()
# Shrink the result to help see it better.
shrink = vtk.vtkShrinkFilter()
shrink.SetInputConnection(delny.GetOutputPort())
shrink.SetShrinkFactor(0.9)
map = vtk.vtkDataSetMapper()
map.SetInputConnection(shrink.GetOutputPort())
triangulation = vtk.vtkActor()
triangulation.SetMapper(map)
triangulation.GetProperty().SetColor(1, 0, 0)
# Create graphics stuff
ren = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.AddRenderer(ren)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
# Add the actors to the renderer, set the background and size
ren.AddActor(triangulation)
ren.SetBackground(1, 1, 1)
renWin.SetSize(250, 250)
renWin.Render()
cam1 = ren.GetActiveCamera()
cam1.Zoom(1.5)
iren.Initialize()
renWin.Render()
iren.Start()
| bsd-3-clause | 8,348,779,720,164,846,000 | 27.2 | 69 | 0.76182 | false |
simonwagner/mergepbx | src/plist/antlr/PlistParser.py | 4 | 16877 | # $ANTLR 3.2 Sep 23, 2009 12:02:23 Plist.g 2013-12-12 18:02:36
import sys
from .runtime.antlr3 import *
from .runtime.antlr3.compat import set, frozenset
from collections import OrderedDict
from ..escape import unescape_string
# for convenience in actions
HIDDEN = BaseRecognizer.HIDDEN
# token types
BRACE_OPEN=13
WS=16
ESC_SEQ=7
BRACE_CLOSE=14
WS_CHAR=5
IDENTIFIER=6
DICTIONARY_SEPERATOR=10
ARRAY_SEPERATOR=9
HEX_DIGIT=17
ASSIGNMENT=15
COMMENT=4
EOF=-1
BRACKET_CLOSE=12
STRING=8
BRACKET_OPEN=11
# token names
tokenNames = [
"<invalid>", "<EOR>", "<DOWN>", "<UP>",
"COMMENT", "WS_CHAR", "IDENTIFIER", "ESC_SEQ", "STRING", "ARRAY_SEPERATOR",
"DICTIONARY_SEPERATOR", "BRACKET_OPEN", "BRACKET_CLOSE", "BRACE_OPEN",
"BRACE_CLOSE", "ASSIGNMENT", "WS", "HEX_DIGIT"
]
class PlistParser(Parser):
grammarFileName = "Plist.g"
antlr_version = version_str_to_tuple("3.1 Sep 23, 2009 12:02:23")
antlr_version_str = "3.1 Sep 23, 2009 12:02:23"
tokenNames = tokenNames
def __init__(self, input, state=None, *args, **kwargs):
if state is None:
state = RecognizerSharedState()
super(PlistParser, self).__init__(input, state, *args, **kwargs)
def displayRecognitionError(self, tokenNames, exception):
pass
# $ANTLR start "plist"
# Plist.g:87:1: plist returns [value] : (lbl_value= dictionary | lbl_value= array );
def plist(self, ):
value = None
lbl_value = None
try:
try:
# Plist.g:91:5: (lbl_value= dictionary | lbl_value= array )
alt1 = 2
LA1_0 = self.input.LA(1)
if (LA1_0 == BRACE_OPEN) :
alt1 = 1
elif (LA1_0 == BRACKET_OPEN) :
alt1 = 2
else:
nvae = NoViableAltException("", 1, 0, self.input)
raise nvae
if alt1 == 1:
# Plist.g:91:8: lbl_value= dictionary
pass
self._state.following.append(self.FOLLOW_dictionary_in_plist474)
lbl_value = self.dictionary()
self._state.following.pop()
elif alt1 == 2:
# Plist.g:91:31: lbl_value= array
pass
self._state.following.append(self.FOLLOW_array_in_plist480)
lbl_value = self.array()
self._state.following.pop()
#action start
value = lbl_value
#action end
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "plist"
# $ANTLR start "value"
# Plist.g:93:1: value returns [value] : (lbl_value= dictionary | lbl_value= array | lbl_value= identifier | lbl_value= string );
def value(self, ):
value = None
lbl_value = None
try:
try:
# Plist.g:97:5: (lbl_value= dictionary | lbl_value= array | lbl_value= identifier | lbl_value= string )
alt2 = 4
LA2 = self.input.LA(1)
if LA2 == BRACE_OPEN:
alt2 = 1
elif LA2 == BRACKET_OPEN:
alt2 = 2
elif LA2 == IDENTIFIER:
alt2 = 3
elif LA2 == STRING:
alt2 = 4
else:
nvae = NoViableAltException("", 2, 0, self.input)
raise nvae
if alt2 == 1:
# Plist.g:97:8: lbl_value= dictionary
pass
self._state.following.append(self.FOLLOW_dictionary_in_value508)
lbl_value = self.dictionary()
self._state.following.pop()
elif alt2 == 2:
# Plist.g:97:31: lbl_value= array
pass
self._state.following.append(self.FOLLOW_array_in_value514)
lbl_value = self.array()
self._state.following.pop()
elif alt2 == 3:
# Plist.g:97:49: lbl_value= identifier
pass
self._state.following.append(self.FOLLOW_identifier_in_value520)
lbl_value = self.identifier()
self._state.following.pop()
elif alt2 == 4:
# Plist.g:97:72: lbl_value= string
pass
self._state.following.append(self.FOLLOW_string_in_value526)
lbl_value = self.string()
self._state.following.pop()
#action start
value = lbl_value
#action end
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "value"
# $ANTLR start "string"
# Plist.g:99:1: string returns [value] : lbl_string= STRING ;
def string(self, ):
value = None
lbl_string = None
try:
try:
# Plist.g:103:5: (lbl_string= STRING )
# Plist.g:103:9: lbl_string= STRING
pass
lbl_string=self.match(self.input, STRING, self.FOLLOW_STRING_in_string555)
#action start
value = unescape_string(lbl_string.text[1:-1])
#action end
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "string"
# $ANTLR start "identifier"
# Plist.g:105:1: identifier returns [value] : lbl_identifier= IDENTIFIER ;
def identifier(self, ):
value = None
lbl_identifier = None
try:
try:
# Plist.g:109:5: (lbl_identifier= IDENTIFIER )
# Plist.g:109:7: lbl_identifier= IDENTIFIER
pass
lbl_identifier=self.match(self.input, IDENTIFIER, self.FOLLOW_IDENTIFIER_in_identifier586)
#action start
value = lbl_identifier.text
#action end
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "identifier"
# $ANTLR start "array"
# Plist.g:112:1: array returns [value] : BRACKET_OPEN (lbl_first_value= value ( ARRAY_SEPERATOR lbl_value= value )* )? ( ARRAY_SEPERATOR )? BRACKET_CLOSE ;
def array(self, ):
value = None
lbl_first_value = None
lbl_value = None
value = []
try:
try:
# Plist.g:116:5: ( BRACKET_OPEN (lbl_first_value= value ( ARRAY_SEPERATOR lbl_value= value )* )? ( ARRAY_SEPERATOR )? BRACKET_CLOSE )
# Plist.g:116:8: BRACKET_OPEN (lbl_first_value= value ( ARRAY_SEPERATOR lbl_value= value )* )? ( ARRAY_SEPERATOR )? BRACKET_CLOSE
pass
self.match(self.input, BRACKET_OPEN, self.FOLLOW_BRACKET_OPEN_in_array617)
# Plist.g:116:21: (lbl_first_value= value ( ARRAY_SEPERATOR lbl_value= value )* )?
alt4 = 2
LA4_0 = self.input.LA(1)
if (LA4_0 == IDENTIFIER or LA4_0 == STRING or LA4_0 == BRACKET_OPEN or LA4_0 == BRACE_OPEN) :
alt4 = 1
if alt4 == 1:
# Plist.g:116:22: lbl_first_value= value ( ARRAY_SEPERATOR lbl_value= value )*
pass
self._state.following.append(self.FOLLOW_value_in_array622)
lbl_first_value = self.value()
self._state.following.pop()
#action start
value.append(lbl_first_value)
#action end
# Plist.g:116:84: ( ARRAY_SEPERATOR lbl_value= value )*
while True: #loop3
alt3 = 2
LA3_0 = self.input.LA(1)
if (LA3_0 == ARRAY_SEPERATOR) :
LA3_1 = self.input.LA(2)
if (LA3_1 == IDENTIFIER or LA3_1 == STRING or LA3_1 == BRACKET_OPEN or LA3_1 == BRACE_OPEN) :
alt3 = 1
if alt3 == 1:
# Plist.g:116:85: ARRAY_SEPERATOR lbl_value= value
pass
self.match(self.input, ARRAY_SEPERATOR, self.FOLLOW_ARRAY_SEPERATOR_in_array627)
self._state.following.append(self.FOLLOW_value_in_array631)
lbl_value = self.value()
self._state.following.pop()
#action start
value.append(lbl_value)
#action end
else:
break #loop3
# Plist.g:116:155: ( ARRAY_SEPERATOR )?
alt5 = 2
LA5_0 = self.input.LA(1)
if (LA5_0 == ARRAY_SEPERATOR) :
alt5 = 1
if alt5 == 1:
# Plist.g:116:156: ARRAY_SEPERATOR
pass
self.match(self.input, ARRAY_SEPERATOR, self.FOLLOW_ARRAY_SEPERATOR_in_array640)
self.match(self.input, BRACKET_CLOSE, self.FOLLOW_BRACKET_CLOSE_in_array644)
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "array"
# $ANTLR start "dictionary_key"
# Plist.g:119:1: dictionary_key returns [value] : (lbl_key= identifier | lbl_key= string ) ;
def dictionary_key(self, ):
value = None
lbl_key = None
try:
try:
# Plist.g:123:2: ( (lbl_key= identifier | lbl_key= string ) )
# Plist.g:123:6: (lbl_key= identifier | lbl_key= string )
pass
# Plist.g:123:6: (lbl_key= identifier | lbl_key= string )
alt6 = 2
LA6_0 = self.input.LA(1)
if (LA6_0 == IDENTIFIER) :
alt6 = 1
elif (LA6_0 == STRING) :
alt6 = 2
else:
nvae = NoViableAltException("", 6, 0, self.input)
raise nvae
if alt6 == 1:
# Plist.g:123:7: lbl_key= identifier
pass
self._state.following.append(self.FOLLOW_identifier_in_dictionary_key673)
lbl_key = self.identifier()
self._state.following.pop()
elif alt6 == 2:
# Plist.g:123:28: lbl_key= string
pass
self._state.following.append(self.FOLLOW_string_in_dictionary_key679)
lbl_key = self.string()
self._state.following.pop()
#action start
value = lbl_key
#action end
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "dictionary_key"
# $ANTLR start "dictionary_entry"
# Plist.g:126:1: dictionary_entry returns [value] : lbl_key= dictionary_key ASSIGNMENT lbl_value= value DICTIONARY_SEPERATOR ;
def dictionary_entry(self, ):
value = None
lbl_key = None
lbl_value = None
try:
try:
# Plist.g:130:5: (lbl_key= dictionary_key ASSIGNMENT lbl_value= value DICTIONARY_SEPERATOR )
# Plist.g:130:8: lbl_key= dictionary_key ASSIGNMENT lbl_value= value DICTIONARY_SEPERATOR
pass
self._state.following.append(self.FOLLOW_dictionary_key_in_dictionary_entry710)
lbl_key = self.dictionary_key()
self._state.following.pop()
self.match(self.input, ASSIGNMENT, self.FOLLOW_ASSIGNMENT_in_dictionary_entry712)
self._state.following.append(self.FOLLOW_value_in_dictionary_entry716)
lbl_value = self.value()
self._state.following.pop()
self.match(self.input, DICTIONARY_SEPERATOR, self.FOLLOW_DICTIONARY_SEPERATOR_in_dictionary_entry718)
#action start
value = (lbl_key, lbl_value)
#action end
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "dictionary_entry"
# $ANTLR start "dictionary"
# Plist.g:133:1: dictionary returns [value] : BRACE_OPEN (lbl_entry= dictionary_entry )* BRACE_CLOSE ;
def dictionary(self, ):
value = None
lbl_entry = None
entries = []
try:
try:
# Plist.g:140:5: ( BRACE_OPEN (lbl_entry= dictionary_entry )* BRACE_CLOSE )
# Plist.g:140:8: BRACE_OPEN (lbl_entry= dictionary_entry )* BRACE_CLOSE
pass
self.match(self.input, BRACE_OPEN, self.FOLLOW_BRACE_OPEN_in_dictionary758)
# Plist.g:140:19: (lbl_entry= dictionary_entry )*
while True: #loop7
alt7 = 2
LA7_0 = self.input.LA(1)
if (LA7_0 == IDENTIFIER or LA7_0 == STRING) :
alt7 = 1
if alt7 == 1:
# Plist.g:140:20: lbl_entry= dictionary_entry
pass
self._state.following.append(self.FOLLOW_dictionary_entry_in_dictionary763)
lbl_entry = self.dictionary_entry()
self._state.following.pop()
#action start
entries.append(lbl_entry)
#action end
else:
break #loop7
self.match(self.input, BRACE_CLOSE, self.FOLLOW_BRACE_CLOSE_in_dictionary769)
#action start
value = OrderedDict(entries)
#action end
except RecognitionException, e:
raise e
finally:
pass
return value
# $ANTLR end "dictionary"
# Delegated rules
FOLLOW_dictionary_in_plist474 = frozenset([1])
FOLLOW_array_in_plist480 = frozenset([1])
FOLLOW_dictionary_in_value508 = frozenset([1])
FOLLOW_array_in_value514 = frozenset([1])
FOLLOW_identifier_in_value520 = frozenset([1])
FOLLOW_string_in_value526 = frozenset([1])
FOLLOW_STRING_in_string555 = frozenset([1])
FOLLOW_IDENTIFIER_in_identifier586 = frozenset([1])
FOLLOW_BRACKET_OPEN_in_array617 = frozenset([6, 8, 9, 11, 12, 13])
FOLLOW_value_in_array622 = frozenset([9, 12])
FOLLOW_ARRAY_SEPERATOR_in_array627 = frozenset([6, 8, 11, 13])
FOLLOW_value_in_array631 = frozenset([9, 12])
FOLLOW_ARRAY_SEPERATOR_in_array640 = frozenset([12])
FOLLOW_BRACKET_CLOSE_in_array644 = frozenset([1])
FOLLOW_identifier_in_dictionary_key673 = frozenset([1])
FOLLOW_string_in_dictionary_key679 = frozenset([1])
FOLLOW_dictionary_key_in_dictionary_entry710 = frozenset([15])
FOLLOW_ASSIGNMENT_in_dictionary_entry712 = frozenset([6, 8, 11, 13])
FOLLOW_value_in_dictionary_entry716 = frozenset([10])
FOLLOW_DICTIONARY_SEPERATOR_in_dictionary_entry718 = frozenset([1])
FOLLOW_BRACE_OPEN_in_dictionary758 = frozenset([6, 8, 11, 13, 14])
FOLLOW_dictionary_entry_in_dictionary763 = frozenset([6, 8, 11, 13, 14])
FOLLOW_BRACE_CLOSE_in_dictionary769 = frozenset([1])
def main(argv, stdin=sys.stdin, stdout=sys.stdout, stderr=sys.stderr):
from antlr3.main import ParserMain
main = ParserMain("PlistLexer", PlistParser)
main.stdin = stdin
main.stdout = stdout
main.stderr = stderr
main.execute(argv)
if __name__ == '__main__':
main(sys.argv)
| gpl-3.0 | -557,432,898,065,954,100 | 27.605085 | 159 | 0.486639 | false |
attwad/python-osc | pythonosc/parsing/osc_types.py | 1 | 14013 | """Functions to get OSC types from datagrams and vice versa"""
import struct
from pythonosc.parsing import ntp
from datetime import datetime, timedelta, date
from typing import Union, Tuple
class ParseError(Exception):
"""Base exception for when a datagram parsing error occurs."""
class BuildError(Exception):
"""Base exception for when a datagram building error occurs."""
# Constant for special ntp datagram sequences that represent an immediate time.
IMMEDIATELY = 0
# Datagram length in bytes for types that have a fixed size.
_INT_DGRAM_LEN = 4
_UINT64_DGRAM_LEN = 8
_FLOAT_DGRAM_LEN = 4
_DOUBLE_DGRAM_LEN = 8
_TIMETAG_DGRAM_LEN = 8
# Strings and blob dgram length is always a multiple of 4 bytes.
_STRING_DGRAM_PAD = 4
_BLOB_DGRAM_PAD = 4
_EMPTY_STR_DGRAM = b'\x00\x00\x00\x00'
def write_string(val: str) -> bytes:
"""Returns the OSC string equivalent of the given python string.
Raises:
- BuildError if the string could not be encoded.
"""
try:
dgram = val.encode('utf-8') # Default, but better be explicit.
except (UnicodeEncodeError, AttributeError) as e:
raise BuildError('Incorrect string, could not encode {}'.format(e))
diff = _STRING_DGRAM_PAD - (len(dgram) % _STRING_DGRAM_PAD)
dgram += (b'\x00' * diff)
return dgram
def get_string(dgram: bytes, start_index: int) -> Tuple[str, int]:
"""Get a python string from the datagram, starting at pos start_index.
According to the specifications, a string is:
"A sequence of non-null ASCII characters followed by a null,
followed by 0-3 additional null characters to make the total number
of bits a multiple of 32".
Args:
dgram: A datagram packet.
start_index: An index where the string starts in the datagram.
Returns:
A tuple containing the string and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
if start_index < 0:
raise ParseError('start_index < 0')
offset = 0
try:
if (len(dgram) > start_index + _STRING_DGRAM_PAD
and dgram[start_index + _STRING_DGRAM_PAD] == _EMPTY_STR_DGRAM):
return '', start_index + _STRING_DGRAM_PAD
while dgram[start_index + offset] != 0:
offset += 1
# Align to a byte word.
if (offset) % _STRING_DGRAM_PAD == 0:
offset += _STRING_DGRAM_PAD
else:
offset += (-offset % _STRING_DGRAM_PAD)
# Python slices do not raise an IndexError past the last index,
# do it ourselves.
if offset > len(dgram[start_index:]):
raise ParseError('Datagram is too short')
data_str = dgram[start_index:start_index + offset]
return data_str.replace(b'\x00', b'').decode('utf-8'), start_index + offset
except IndexError as ie:
raise ParseError('Could not parse datagram %s' % ie)
except TypeError as te:
raise ParseError('Could not parse datagram %s' % te)
def write_int(val: int) -> bytes:
"""Returns the datagram for the given integer parameter value
Raises:
- BuildError if the int could not be converted.
"""
try:
return struct.pack('>i', val)
except struct.error as e:
raise BuildError('Wrong argument value passed: {}'.format(e))
def get_int(dgram: bytes, start_index: int) -> Tuple[int, int]:
"""Get a 32-bit big-endian two's complement integer from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the integer starts in the datagram.
Returns:
A tuple containing the integer and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _INT_DGRAM_LEN:
raise ParseError('Datagram is too short')
return (
struct.unpack('>i',
dgram[start_index:start_index + _INT_DGRAM_LEN])[0],
start_index + _INT_DGRAM_LEN)
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram %s' % e)
def get_uint64(dgram: bytes, start_index: int) -> Tuple[int, int]:
"""Get a 64-bit big-endian unsigned integer from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the integer starts in the datagram.
Returns:
A tuple containing the integer and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _UINT64_DGRAM_LEN:
raise ParseError('Datagram is too short')
return (
struct.unpack('>Q',
dgram[start_index:start_index + _UINT64_DGRAM_LEN])[0],
start_index + _UINT64_DGRAM_LEN)
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram %s' % e)
def get_timetag(dgram: bytes, start_index: int) -> Tuple[datetime, int]:
"""Get a 64-bit OSC time tag from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the osc time tag starts in the datagram.
Returns:
A tuple containing the tuple of time of sending in utc as datetime and the
fraction of the current second and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _TIMETAG_DGRAM_LEN:
raise ParseError('Datagram is too short')
timetag, _ = get_uint64(dgram, start_index)
seconds, fraction = ntp.parse_timestamp(timetag)
hours, seconds = seconds // 3600, seconds % 3600
minutes, seconds = seconds // 60, seconds % 60
utc = (datetime.combine(ntp._NTP_EPOCH, datetime.min.time()) +
timedelta(hours=hours, minutes=minutes, seconds=seconds))
return (utc, fraction), start_index + _TIMETAG_DGRAM_LEN
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram %s' % e)
def write_float(val: float) -> bytes:
"""Returns the datagram for the given float parameter value
Raises:
- BuildError if the float could not be converted.
"""
try:
return struct.pack('>f', val)
except struct.error as e:
raise BuildError('Wrong argument value passed: {}'.format(e))
def get_float(dgram: bytes, start_index: int) -> Tuple[float, int]:
"""Get a 32-bit big-endian IEEE 754 floating point number from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the float starts in the datagram.
Returns:
A tuple containing the float and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _FLOAT_DGRAM_LEN:
# Noticed that Reaktor doesn't send the last bunch of \x00 needed to make
# the float representation complete in some cases, thus we pad here to
# account for that.
dgram = dgram + b'\x00' * (_FLOAT_DGRAM_LEN - len(dgram[start_index:]))
return (
struct.unpack('>f',
dgram[start_index:start_index + _FLOAT_DGRAM_LEN])[0],
start_index + _FLOAT_DGRAM_LEN)
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram %s' % e)
def write_double(val: float) -> bytes:
"""Returns the datagram for the given double parameter value
Raises:
- BuildError if the double could not be converted.
"""
try:
return struct.pack('>d', val)
except struct.error as e:
raise BuildError('Wrong argument value passed: {}'.format(e))
def get_double(dgram: bytes, start_index: int) -> Tuple[float, int]:
"""Get a 64-bit big-endian IEEE 754 floating point number from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the double starts in the datagram.
Returns:
A tuple containing the double and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _DOUBLE_DGRAM_LEN:
raise ParseError('Datagram is too short')
return (
struct.unpack('>d',
dgram[start_index:start_index + _DOUBLE_DGRAM_LEN])[0],
start_index + _DOUBLE_DGRAM_LEN)
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram {}'.format(e))
def get_blob(dgram: bytes, start_index: int) -> Tuple[bytes, int]:
""" Get a blob from the datagram.
According to the specifications, a blob is made of
"an int32 size count, followed by that many 8-bit bytes of arbitrary
binary data, followed by 0-3 additional zero bytes to make the total
number of bits a multiple of 32".
Args:
dgram: A datagram packet.
start_index: An index where the float starts in the datagram.
Returns:
A tuple containing the blob and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
size, int_offset = get_int(dgram, start_index)
# Make the size a multiple of 32 bits.
total_size = size + (-size % _BLOB_DGRAM_PAD)
end_index = int_offset + size
if end_index - start_index > len(dgram[start_index:]):
raise ParseError('Datagram is too short.')
return dgram[int_offset:int_offset + size], int_offset + total_size
def write_blob(val: bytes) -> bytes:
"""Returns the datagram for the given blob parameter value.
Raises:
- BuildError if the value was empty or if its size didn't fit an OSC int.
"""
if not val:
raise BuildError('Blob value cannot be empty')
dgram = write_int(len(val))
dgram += val
while len(dgram) % _BLOB_DGRAM_PAD != 0:
dgram += b'\x00'
return dgram
def get_date(dgram: bytes, start_index: int) -> Tuple[float, int]:
"""Get a 64-bit big-endian fixed-point time tag as a date from the datagram.
According to the specifications, a date is represented as is:
"the first 32 bits specify the number of seconds since midnight on
January 1, 1900, and the last 32 bits specify fractional parts of a second
to a precision of about 200 picoseconds".
Args:
dgram: A datagram packet.
start_index: An index where the date starts in the datagram.
Returns:
A tuple containing the system date and the new end index.
returns osc_immediately (0) if the corresponding OSC sequence was found.
Raises:
ParseError if the datagram could not be parsed.
"""
# Check for the special case first.
if dgram[start_index:start_index + _TIMETAG_DGRAM_LEN] == ntp.IMMEDIATELY:
return IMMEDIATELY, start_index + _TIMETAG_DGRAM_LEN
if len(dgram[start_index:]) < _TIMETAG_DGRAM_LEN:
raise ParseError('Datagram is too short')
timetag, start_index = get_uint64(dgram, start_index)
seconds = timetag * ntp._NTP_TIMESTAMP_TO_SECONDS
return ntp.ntp_time_to_system_epoch(seconds), start_index
def write_date(system_time: Union[int, float]) -> bytes:
if system_time == IMMEDIATELY:
return ntp.IMMEDIATELY
try:
return ntp.system_time_to_ntp(system_time)
except ntp.NtpError as ntpe:
raise BuildError(ntpe)
def write_rgba(val: bytes) -> bytes:
"""Returns the datagram for the given rgba32 parameter value
Raises:
- BuildError if the int could not be converted.
"""
try:
return struct.pack('>I', val)
except struct.error as e:
raise BuildError('Wrong argument value passed: {}'.format(e))
def get_rgba(dgram: bytes, start_index: int) -> Tuple[bytes, int]:
"""Get an rgba32 integer from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the integer starts in the datagram.
Returns:
A tuple containing the integer and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _INT_DGRAM_LEN:
raise ParseError('Datagram is too short')
return (
struct.unpack('>I',
dgram[start_index:start_index + _INT_DGRAM_LEN])[0],
start_index + _INT_DGRAM_LEN)
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram %s' % e)
def write_midi(val: Tuple[Tuple[int, int, int, int], int]) -> bytes:
"""Returns the datagram for the given MIDI message parameter value
A valid MIDI message: (port id, status byte, data1, data2).
Raises:
- BuildError if the MIDI message could not be converted.
"""
if len(val) != 4:
raise BuildError('MIDI message length is invalid')
try:
value = sum((value & 0xFF) << 8 * (3 - pos) for pos, value in enumerate(val))
return struct.pack('>I', value)
except struct.error as e:
raise BuildError('Wrong argument value passed: {}'.format(e))
def get_midi(dgram: bytes, start_index: int) -> Tuple[Tuple[int, int, int, int], int]:
"""Get a MIDI message (port id, status byte, data1, data2) from the datagram.
Args:
dgram: A datagram packet.
start_index: An index where the MIDI message starts in the datagram.
Returns:
A tuple containing the MIDI message and the new end index.
Raises:
ParseError if the datagram could not be parsed.
"""
try:
if len(dgram[start_index:]) < _INT_DGRAM_LEN:
raise ParseError('Datagram is too short')
val = struct.unpack('>I',
dgram[start_index:start_index + _INT_DGRAM_LEN])[0]
midi_msg = tuple((val & 0xFF << 8 * i) >> 8 * i for i in range(3, -1, -1))
return (midi_msg, start_index + _INT_DGRAM_LEN)
except (struct.error, TypeError) as e:
raise ParseError('Could not parse datagram %s' % e)
| unlicense | 7,643,043,762,583,972,000 | 32.685096 | 86 | 0.634768 | false |
benrudolph/commcare-hq | custom/_legacy/psi/reports/__init__.py | 2 | 12431 | from corehq.apps.fixtures.models import FixtureDataItem, FixtureDataType
from corehq.apps.reports.filters.base import BaseSingleOptionFilter
from corehq.apps.reports.filters.fixtures import AsyncDrillableFilter
from corehq.apps.reports.standard import CustomProjectReport, DatespanMixin
from corehq.apps.reports.basic import Column, FunctionView, SummingTabularReport
from util import get_unique_combinations
from couchdbkit_aggregate.fn import mean
from dimagi.utils.decorators.memoized import memoized
DEMO_TYPES = ["asha", "aww", "anm", "ngo", "cbo", "vhnd"]
class StateDistrictField(AsyncDrillableFilter):
label = "State and District"
slug = "location"
hierarchy = [{"type": "state", "display": "name"},
{"type": "district", "parent_ref": "state_id", "references": "id", "display": "name"},]
class StateDistrictBlockField(AsyncDrillableFilter):
label = "State/District/Block"
slug = "location"
hierarchy = [{"type": "state", "display": "name"},
{"type": "district", "parent_ref": "state_id", "references": "id", "display": "name"},
{"type": "block", "parent_ref": "district_id", "references": "id", "display": "name"}]
class AsyncPlaceField(AsyncDrillableFilter):
label = "State/District/Block/Village"
slug = "location"
hierarchy = [{"type": "state", "display": "name"},
{"type": "district", "parent_ref": "state_id", "references": "id", "display": "name"},
{"type": "block", "parent_ref": "district_id", "references": "id", "display": "name"},
{"type": "village", "parent_ref": "block_id", "references": "id", "display": "name"}]
class DemoTypeField(BaseSingleOptionFilter):
slug = "demo_type"
label = "Worker Type"
default_text = "Aggregate"
@property
def options(self):
return [('_all', 'All worker types')] + [(dt, dt) for dt in DEMO_TYPES]
class AggregateAtField(BaseSingleOptionFilter):
"""
To Use: SUbclass and specify what the field options should be
"""
slug = "aggregate_at"
label = "Aggregate at what level"
@property
def default_option(self):
return "Default: %s" % self.field_opts[-1]
@property
def field_opts(self):
raise NotImplementedError('Subclass me fully!')
@property
def options(self):
return [(f.lower(), f) for f in [fo for fo in self.field_opts if fo != self.selected]]
class AASD(AggregateAtField):
field_opts = ["State", "District"]
class AASDB(AggregateAtField):
field_opts = ["State", "District", "Block"]
class AASDBV(AggregateAtField):
field_opts = ["State", "District", "Block", "Village"]
@memoized
def get_village_fdt(domain):
return FixtureDataType.by_domain_tag(domain, 'village').one()
@memoized
def get_village(req, id):
village_fdt = get_village_fdt(req.domain)
return FixtureDataItem.by_field_value(req.domain, village_fdt, 'id', float(id)).one()
def get_village_name(key, req):
return get_village(req, key[4]).fields_without_attributes.get("name", id)
def get_village_class(key, req):
return get_village(req, key[4]).fields_without_attributes.get("village_class", "No data")
class PSIReport(SummingTabularReport, CustomProjectReport, DatespanMixin):
is_cacheable = True
update_after = True
fields = ['corehq.apps.reports.filters.dates.DatespanFilter','psi.reports.AsyncPlaceField',]
state_name = Column("State", calculate_fn=lambda key, _: key[1])
district_name = Column("District", calculate_fn=lambda key, _: key[2])
block_name = Column("Block", calculate_fn=lambda key, _: key[3])
village_name = Column("Village", calculate_fn=get_village_name)
village_code = Column("Village Code", calculate_fn=lambda key, _: key[4])
village_class = Column("Village Class", calculate_fn =get_village_class)
def selected_fixture(self):
fixture = self.request.GET.get('fixture_id', "")
return fixture.split(':') if fixture else None
@property
@memoized
def place_types(self):
opts = ['state', 'district', 'block', 'village']
agg_at = self.request.GET.get('aggregate_at', None)
agg_at = agg_at if agg_at and opts.index(agg_at) <= opts.index(self.default_aggregation) else self.default_aggregation
return opts[:opts.index(agg_at) + 1]
@property
def initial_column_order(self):
ret = tuple([col + '_name' for col in self.place_types[:3]])
if len(self.place_types) > 3:
ret += ('village_name', 'village_code', 'village_class')
return ret
@property
def start_and_end_keys(self):
return ([self.datespan.startdate_param_utc],
[self.datespan.enddate_param_utc])
@property
def keys(self):
combos = get_unique_combinations(self.domain, place_types=self.place_types, place=self.selected_fixture())
for c in combos:
yield [self.domain] + [c[pt] for pt in self.place_types]
class PSIEventsReport(PSIReport):
fields = ['corehq.apps.reports.filters.dates.DatespanFilter',
'psi.reports.StateDistrictField',
'psi.reports.AASD',]
name = "Event Demonstration Report"
exportable = True
emailable = True
slug = "event_demonstations"
section_name = "event demonstrations"
default_aggregation = 'district'
couch_view = 'psi/events'
@property
def default_column_order(self):
return self.initial_column_order + (
'events',
'males',
'females',
'attendees',
'leaflets',
'gifts',
)
events = Column("Number of events", key='events')
males = Column("Number of male attendees", key='males')
females = Column("Number of female attendees", key='females')
attendees = Column("Total number of attendees", key='attendees')
leaflets = Column("Total number of leaflets distributed", key='leaflets')
gifts = Column("Total number of gifts distributed", key='gifts')
class PSIHDReport(PSIReport):
name = "Household Demonstrations Report"
exportable = True
emailable = True
slug = "household_demonstations"
section_name = "household demonstrations"
fields = ['corehq.apps.reports.filters.dates.DatespanFilter',
'psi.reports.AsyncPlaceField',
'psi.reports.DemoTypeField',
'psi.reports.AASDBV',]
default_aggregation = 'village'
def __init__(self, request, **kwargs):
"""
This is necessary because the demo_type column's calculate_functions needs to have information from the
request. (to determine place types) Since columns are only initialized when the class is defined (using the
ColumnCollector metaclass), the demo_type column needs to be initialized here when it has access to request
"""
super(PSIHDReport, self).__init__(request, **kwargs)
calculate_fn = lambda key, _: key[len(self.place_types) + 1]
self.columns['demo_type'] = Column("Worker Type", calculate_fn=calculate_fn)
self.columns['demo_type'].view = FunctionView(calculate_fn=calculate_fn)
self.function_views['demo_type'] = self.columns['demo_type'].view
@property
@memoized
def selected_dt(self):
return self.request.GET.get('demo_type', "")
@property
def keys(self):
combos = get_unique_combinations(self.domain, place_types=self.place_types, place=self.selected_fixture())
selected_demo_type = self.request.GET.get('demo_type', "")
for c in combos:
if self.selected_dt:
if self.selected_dt == '_all':
for dt in DEMO_TYPES:
yield [self.domain] + [c[pt] for pt in self.place_types] + [dt]
else:
yield [self.domain] + [c[pt] for pt in self.place_types] + [selected_demo_type]
else:
yield [self.domain] + [c[pt] for pt in self.place_types]
couch_view = 'psi/household_demonstrations'
@property
def default_column_order(self):
to_add = [
'demonstrations',
'children',
'leaflets',
'kits',
]
if self.selected_dt:
to_add.insert(0, "demo_type")
return self.initial_column_order + tuple(to_add)
demo_type = Column("Worker Type", calculate_fn=lambda key, _: key[5])
demonstrations = Column("Number of demonstrations done", key="demonstrations")
children = Column("Number of 0-6 year old children", key="children")
leaflets = Column("Total number of leaflets distributed", key='leaflets')
kits = Column("Number of kits sold", key="kits")
class PSISSReport(PSIReport):
name = "Sensitization Sessions Report"
exportable = True
emailable = True
slug = "sensitization_sessions"
section_name = "sensitization sessions"
fields = ['corehq.apps.reports.filters.dates.DatespanFilter',
'psi.reports.StateDistrictBlockField',
'psi.reports.AASDB',]
default_aggregation = 'block'
couch_view = 'psi/sensitization'
@property
def default_column_order(self):
return self.initial_column_order + (
'sessions',
'ayush_doctors',
"mbbs_doctors",
"asha_supervisors",
"ashas",
"awws",
"other",
"attendees",
)
sessions = Column("Number of Sessions", key="sessions")
ayush_doctors = Column("Ayush Sensitized", key="ayush_doctors")
mbbs_doctors = Column("MBBS Sensitized", key="mbbs_doctors")
asha_supervisors = Column("Asha Supervisors Sensitized", key="asha_supervisors")
ashas = Column("Ashas Sensitized", key="ashas")
awws = Column("AWW Sensitized", key="awws")
other = Column("Others (ANM, MPW, etc.)", key="other")
attendees = Column("VHND Attendees", key='attendees')
class PSITSReport(PSIReport):
name = "Training Sessions Report"
exportable = True
emailable = True
slug = "training_sessions"
section_name = "training sessions"
fields = ['corehq.apps.reports.filters.dates.DatespanFilter',
'psi.reports.StateDistrictField',
'psi.reports.AASD',]
default_aggregation = 'district'
couch_view = 'psi/training'
@property
def default_column_order(self):
return self.initial_column_order + (
"priv_trained",
"priv_ayush_trained",
"priv_allo_trained",
"priv_avg_diff",
"priv_gt80",
"pub_trained",
"pub_ayush_trained",
"pub_allo_trained",
"pub_avg_diff",
"pub_gt80",
"dep_trained",
"dep_pers_trained",
"dep_avg_diff",
"dep_gt80",
"flw_trained",
"flw_pers_trained",
"flw_avg_diff",
"flw_gt80",
)
priv_trained = Column("Private: Number of Trainings", key="priv_trained")
priv_ayush_trained = Column("Private: Ayush trained", key="priv_ayush_trained")
priv_allo_trained = Column("Private: Allopathics trained", key="priv_allo_trained")
priv_avg_diff = Column("Private: Learning changed", key="priv_avg_diff", reduce_fn=mean)
priv_gt80 = Column("Private: Num > 80%", key="priv_gt80")
pub_trained = Column("Public: Number of Trainings", key="pub_trained")
pub_ayush_trained = Column("Public: Ayush trained", key="pub_ayush_trained")
pub_allo_trained = Column("Public: Allopathics trained", key="pub_allo_trained")
pub_avg_diff = Column("Public: Learning changed", key="pub_avg_diff", reduce_fn=mean)
pub_gt80 = Column("Public: Num > 80%", key="pub_gt80")
dep_trained = Column("Depot: Number of Trainings", key="dep_trained")
dep_pers_trained = Column("Depot: Number of Personnel Trained", key="dep_pers_trained")
dep_avg_diff = Column("Depot: Learning changed", key="dep_avg_diff", reduce_fn=mean)
dep_gt80 = Column("Depot: Num > 80%", key="dep_gt80")
flw_trained = Column("FLW: Number of Trainings", key="flw_trained")
flw_pers_trained = Column("FLW: Number of Personnel Trained", key="flw_pers_trained")
flw_avg_diff = Column("FLW: Learning changed", key="flw_avg_diff", reduce_fn=mean)
flw_gt80 = Column("FLW: Num > 80%", key="flw_gt80")
| bsd-3-clause | -6,300,116,814,458,997,000 | 35.031884 | 126 | 0.628027 | false |
msparapa/das | das/optimalcontrol/tests/test_full_ocp_to_bvp_sol.py | 1 | 1966 | from ...bvpsol.bvpsol import *
from ...bvpsol.bvp import *
from ..optimalcontrol import *
tol = 0.0005
def test_brachistochrone_collocation_unreduced():
OCP = Problem()
Independent('t', 's', OCP)
State('x', 'v*cos(theta)', 'm', OCP)
State('y', 'v*sin(theta)', 'm', OCP)
State('v', '-g*sin(theta)', 'm/s', OCP)
ConstantofMotion('lambda_x', OCP)
ConstantofMotion('lambda_y', OCP)
Control('theta', 'nd', OCP)
Cost('1', '', '', 'nd', OCP)
Constraint('x-0', 0, 'initial', OCP)
Constraint('y-0', 0, 'initial', OCP)
Constraint('v-0', 0, 'initial', OCP)
Constraint('x-1', 1, 'terminal', OCP)
Constraint('y+1', -1, 'terminal', OCP)
Constraint('lambda_v-0', 0, 'terminal', OCP)
Constraint('-g*lambda_v*sin(theta) + lambda_x*v*cos(theta) + lambda_y*v*sin(theta) + 1', 0, 'terminal', OCP)
Quantity('g', '9.80665', 'm/s^2', OCP)
BVP = NecessaryConditions(OCP, opt_type='bvp', control_type='algebraic', verbose=0)
BVP.sol.x = np.array([0, 1])
BVP.sol.y = np.array([[0, 0, 0, -0.15, 0.1, -0.2], [1, -1, 2.5, -0.15, 0.1, -0.2]])
BVP.sol.params = np.array([0.5])
BVP.sol.consts = np.array([10])
prob_out = Collocation(bvp=BVP, options=[0,5])
assert abs(prob_out.sol.x[0] - 0) < tol
assert abs(prob_out.sol.x[-1] - 1) < tol
assert abs(prob_out.sol.y[0][0] - 0) < tol
assert abs(prob_out.sol.y[0][1] - 0) < tol
assert abs(prob_out.sol.y[0][2] - 0) < tol
assert abs(prob_out.sol.y[0][3] + 2.0889e-01) < tol
assert abs(prob_out.sol.y[0][4] - 7.9772e-02) < tol
assert abs(prob_out.sol.y[0][5] + 0.1) < tol
assert abs(prob_out.sol.y[-1][0] - 1) < tol
assert abs(prob_out.sol.y[-1][1] + 1) < tol
assert abs(prob_out.sol.y[-1][2] - 4.472) < tol
assert abs(prob_out.sol.y[-1][3] + 0.2086) < tol
assert abs(prob_out.sol.y[-1][4] - 0.08) < tol
assert abs(prob_out.sol.y[-1][5] - 0) < tol
assert abs(prob_out.sol.params[0] - 0.5773) < tol
| gpl-3.0 | -4,708,808,203,404,356,000 | 40.829787 | 112 | 0.572737 | false |
Learningtribes/edx-platform | cms/djangoapps/contentstore/views/course.py | 1 | 71400 | """
Views related to operations on course objects
"""
import copy
import json
import logging
import random
import string # pylint: disable=deprecated-module
from django.conf import settings
from django.contrib.auth.decorators import login_required
from django.core.exceptions import PermissionDenied
from django.core.urlresolvers import reverse
from django.http import HttpResponse, HttpResponseBadRequest, HttpResponseNotFound, Http404
from django.shortcuts import redirect
import django.utils
from django.utils.translation import ugettext as _
from django.views.decorators.http import require_http_methods, require_GET
from django.views.decorators.csrf import ensure_csrf_cookie
from opaque_keys import InvalidKeyError
from opaque_keys.edx.keys import CourseKey
from opaque_keys.edx.locations import Location
from .component import (
ADVANCED_COMPONENT_TYPES,
)
from .item import create_xblock_info
from .library import LIBRARIES_ENABLED
from ccx_keys.locator import CCXLocator
from contentstore import utils
from contentstore.course_group_config import (
COHORT_SCHEME,
GroupConfiguration,
GroupConfigurationsValidationError,
RANDOM_SCHEME,
)
from contentstore.course_info_model import get_course_updates, update_course_updates, delete_course_update
from contentstore.courseware_index import CoursewareSearchIndexer, SearchIndexingError
from contentstore.push_notification import push_notification_enabled
from contentstore.tasks import rerun_course
from contentstore.utils import (
add_instructor,
initialize_permissions,
get_lms_link_for_item,
remove_all_instructors,
reverse_course_url,
reverse_library_url,
reverse_usage_url,
reverse_url,
add_course_mode)
from contentstore.views.entrance_exam import (
create_entrance_exam,
delete_entrance_exam,
update_entrance_exam,
)
from course_action_state.managers import CourseActionStateItemNotFoundError
from course_action_state.models import CourseRerunState, CourseRerunUIStateManager
from course_creators.views import get_course_creator_status, add_user_with_status_unrequested
from edxmako.shortcuts import render_to_response
from models.settings.course_grading import CourseGradingModel
from models.settings.course_metadata import CourseMetadata
from models.settings.encoder import CourseSettingsEncoder
from openedx.core.djangoapps.content.course_structures.api.v0 import api, errors
from openedx.core.djangoapps.credit.api import is_credit_course, get_credit_requirements
from openedx.core.djangoapps.credit.tasks import update_credit_course_requirements
from openedx.core.djangoapps.models.course_details import CourseDetails
from openedx.core.djangoapps.programs.models import ProgramsApiConfig
from openedx.core.djangoapps.programs.utils import get_programs
from openedx.core.djangoapps.self_paced.models import SelfPacedConfiguration
from openedx.core.djangoapps.site_configuration import helpers as configuration_helpers
from openedx.core.lib.course_tabs import CourseTabPluginManager
from openedx.core.lib.courses import course_image_url
from openedx.core.djangolib.js_utils import dump_js_escaped_json
from student import auth
from student.auth import has_course_author_access, has_studio_write_access, has_studio_read_access
from student.roles import (
CourseInstructorRole, CourseStaffRole, CourseCreatorRole, GlobalStaff, UserBasedRole
)
from util.date_utils import get_default_time_display
from util.json_request import JsonResponse, JsonResponseBadRequest, expect_json
from util.milestones_helpers import (
is_entrance_exams_enabled,
is_prerequisite_courses_enabled,
is_valid_course_key,
set_prerequisite_courses,
)
from util.organizations_helpers import (
add_organization_course,
get_organization_by_short_name,
organizations_enabled,
)
from util.string_utils import _has_non_ascii_characters
from xmodule.contentstore.content import StaticContent
from xmodule.course_module import CourseFields
from xmodule.course_module import DEFAULT_START_DATE
from xmodule.error_module import ErrorDescriptor
from xmodule.modulestore import EdxJSONEncoder
from xmodule.modulestore.django import modulestore
from xmodule.modulestore.exceptions import ItemNotFoundError, DuplicateCourseError
from xmodule.tabs import CourseTab, CourseTabList, InvalidTabsException
log = logging.getLogger(__name__)
__all__ = ['course_info_handler', 'course_handler', 'course_listing',
'course_info_update_handler', 'course_search_index_handler',
'course_rerun_handler',
'settings_handler',
'grading_handler',
'advanced_settings_handler',
'course_notifications_handler',
'textbooks_list_handler', 'textbooks_detail_handler',
'group_configurations_list_handler', 'group_configurations_detail_handler']
class AccessListFallback(Exception):
"""
An exception that is raised whenever we need to `fall back` to fetching *all* courses
available to a user, rather than using a shorter method (i.e. fetching by group)
"""
pass
def get_course_and_check_access(course_key, user, depth=0):
"""
Internal method used to calculate and return the locator and course module
for the view functions in this file.
"""
if not has_studio_read_access(user, course_key):
raise PermissionDenied()
course_module = modulestore().get_course(course_key, depth=depth)
return course_module
def reindex_course_and_check_access(course_key, user):
"""
Internal method used to restart indexing on a course.
"""
if not has_course_author_access(user, course_key):
raise PermissionDenied()
return CoursewareSearchIndexer.do_course_reindex(modulestore(), course_key)
@login_required
def course_notifications_handler(request, course_key_string=None, action_state_id=None):
"""
Handle incoming requests for notifications in a RESTful way.
course_key_string and action_state_id must both be set; else a HttpBadResponseRequest is returned.
For each of these operations, the requesting user must have access to the course;
else a PermissionDenied error is returned.
GET
json: return json representing information about the notification (action, state, etc)
DELETE
json: return json repressing success or failure of dismissal/deletion of the notification
PUT
Raises a NotImplementedError.
POST
Raises a NotImplementedError.
"""
# ensure that we have a course and an action state
if not course_key_string or not action_state_id:
return HttpResponseBadRequest()
response_format = request.GET.get('format') or request.POST.get('format') or 'html'
course_key = CourseKey.from_string(course_key_string)
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if not has_studio_write_access(request.user, course_key):
raise PermissionDenied()
if request.method == 'GET':
return _course_notifications_json_get(action_state_id)
elif request.method == 'DELETE':
# we assume any delete requests dismiss actions from the UI
return _dismiss_notification(request, action_state_id)
elif request.method == 'PUT':
raise NotImplementedError()
elif request.method == 'POST':
raise NotImplementedError()
else:
return HttpResponseBadRequest()
else:
return HttpResponseNotFound()
def _course_notifications_json_get(course_action_state_id):
"""
Return the action and the action state for the given id
"""
try:
action_state = CourseRerunState.objects.find_first(id=course_action_state_id)
except CourseActionStateItemNotFoundError:
return HttpResponseBadRequest()
action_state_info = {
'action': action_state.action,
'state': action_state.state,
'should_display': action_state.should_display
}
return JsonResponse(action_state_info)
def _dismiss_notification(request, course_action_state_id): # pylint: disable=unused-argument
"""
Update the display of the course notification
"""
try:
action_state = CourseRerunState.objects.find_first(id=course_action_state_id)
except CourseActionStateItemNotFoundError:
# Can't dismiss a notification that doesn't exist in the first place
return HttpResponseBadRequest()
if action_state.state == CourseRerunUIStateManager.State.FAILED:
# We remove all permissions for this course key at this time, since
# no further access is required to a course that failed to be created.
remove_all_instructors(action_state.course_key)
# The CourseRerunState is no longer needed by the UI; delete
action_state.delete()
return JsonResponse({'success': True})
# pylint: disable=unused-argument
@login_required
def course_handler(request, course_key_string=None):
"""
The restful handler for course specific requests.
It provides the course tree with the necessary information for identifying and labeling the parts. The root
will typically be a 'course' object but may not be especially as we support modules.
GET
html: return course listing page if not given a course id
html: return html page overview for the given course if given a course id
json: return json representing the course branch's index entry as well as dag w/ all of the children
replaced w/ json docs where each doc has {'_id': , 'display_name': , 'children': }
POST
json: create a course, return resulting json
descriptor (same as in GET course/...). Leaving off /branch/draft would imply create the course w/ default
branches. Cannot change the structure contents ('_id', 'display_name', 'children') but can change the
index entry.
PUT
json: update this course (index entry not xblock) such as repointing head, changing display name, org,
course, run. Return same json as above.
DELETE
json: delete this branch from this course (leaving off /branch/draft would imply delete the course)
"""
try:
response_format = request.GET.get('format') or request.POST.get('format') or 'html'
if response_format == 'json' or 'application/json' in request.META.get('HTTP_ACCEPT', 'application/json'):
if request.method == 'GET':
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=None)
return JsonResponse(_course_outline_json(request, course_module))
elif request.method == 'POST': # not sure if this is only post. If one will have ids, it goes after access
return _create_or_rerun_course(request)
elif not has_studio_write_access(request.user, CourseKey.from_string(course_key_string)):
raise PermissionDenied()
elif request.method == 'PUT':
raise NotImplementedError()
elif request.method == 'DELETE':
raise NotImplementedError()
else:
return HttpResponseBadRequest()
elif request.method == 'GET': # assume html
if course_key_string is None:
return redirect(reverse("home"))
else:
return course_index(request, CourseKey.from_string(course_key_string))
else:
return HttpResponseNotFound()
except InvalidKeyError:
raise Http404
@login_required
@ensure_csrf_cookie
@require_http_methods(["GET"])
def course_rerun_handler(request, course_key_string):
"""
The restful handler for course reruns.
GET
html: return html page with form to rerun a course for the given course id
"""
# Only global staff (PMs) are able to rerun courses during the soft launch
if not GlobalStaff().has_user(request.user):
raise PermissionDenied()
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=3)
if request.method == 'GET':
return render_to_response('course-create-rerun.html', {
'source_course_key': course_key,
'display_name': course_module.display_name,
'user': request.user,
'course_creator_status': _get_course_creator_status(request.user),
'allow_unicode_course_id': settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID', False)
})
@login_required
@ensure_csrf_cookie
@require_GET
def course_search_index_handler(request, course_key_string):
"""
The restful handler for course indexing.
GET
html: return status of indexing task
json: return status of indexing task
"""
# Only global staff (PMs) are able to index courses
if not GlobalStaff().has_user(request.user):
raise PermissionDenied()
course_key = CourseKey.from_string(course_key_string)
content_type = request.META.get('CONTENT_TYPE', None)
if content_type is None:
content_type = "application/json; charset=utf-8"
with modulestore().bulk_operations(course_key):
try:
reindex_course_and_check_access(course_key, request.user)
except SearchIndexingError as search_err:
return HttpResponse(dump_js_escaped_json({
"user_message": search_err.error_list
}), content_type=content_type, status=500)
return HttpResponse(dump_js_escaped_json({
"user_message": _("Course has been successfully reindexed.")
}), content_type=content_type, status=200)
def _course_outline_json(request, course_module):
"""
Returns a JSON representation of the course module and recursively all of its children.
"""
return create_xblock_info(
course_module,
include_child_info=True,
course_outline=True,
include_children_predicate=lambda xblock: not xblock.category == 'vertical',
user=request.user
)
def get_in_process_course_actions(request):
"""
Get all in-process course actions
"""
return [
course for course in
CourseRerunState.objects.find_all(
exclude_args={'state': CourseRerunUIStateManager.State.SUCCEEDED}, should_display=True
)
if has_studio_read_access(request.user, course.course_key)
]
def _accessible_courses_summary_list(request):
"""
List all courses available to the logged in user by iterating through all the courses
"""
def course_filter(course_summary):
"""
Filter out unusable and inaccessible courses
"""
# pylint: disable=fixme
# TODO remove this condition when templates purged from db
if course_summary.location.course == 'templates':
return False
return has_studio_read_access(request.user, course_summary.id)
courses_summary = filter(course_filter, modulestore().get_course_summaries())
in_process_course_actions = get_in_process_course_actions(request)
return courses_summary, in_process_course_actions
def _accessible_courses_list(request):
"""
List all courses available to the logged in user by iterating through all the courses
"""
def course_filter(course):
"""
Filter out unusable and inaccessible courses
"""
if isinstance(course, ErrorDescriptor):
return False
# Custom Courses for edX (CCX) is an edX feature for re-using course content.
# CCXs cannot be edited in Studio (aka cms) and should not be shown in this dashboard.
if isinstance(course.id, CCXLocator):
return False
# pylint: disable=fixme
# TODO remove this condition when templates purged from db
if course.location.course == 'templates':
return False
return has_studio_read_access(request.user, course.id)
courses = filter(course_filter, modulestore().get_courses())
in_process_course_actions = get_in_process_course_actions(request)
return courses, in_process_course_actions
def _accessible_courses_list_from_groups(request):
"""
List all courses available to the logged in user by reversing access group names
"""
def filter_ccx(course_access):
""" CCXs cannot be edited in Studio and should not be shown in this dashboard """
return not isinstance(course_access.course_id, CCXLocator)
courses_list = {}
in_process_course_actions = []
instructor_courses = UserBasedRole(request.user, CourseInstructorRole.ROLE).courses_with_role()
staff_courses = UserBasedRole(request.user, CourseStaffRole.ROLE).courses_with_role()
all_courses = filter(filter_ccx, instructor_courses | staff_courses)
for course_access in all_courses:
course_key = course_access.course_id
if course_key is None:
# If the course_access does not have a course_id, it's an org-based role, so we fall back
raise AccessListFallback
if course_key not in courses_list:
# check for any course action state for this course
in_process_course_actions.extend(
CourseRerunState.objects.find_all(
exclude_args={'state': CourseRerunUIStateManager.State.SUCCEEDED},
should_display=True,
course_key=course_key,
)
)
# check for the course itself
try:
course = modulestore().get_course(course_key)
except ItemNotFoundError:
# If a user has access to a course that doesn't exist, don't do anything with that course
pass
if course is not None and not isinstance(course, ErrorDescriptor):
# ignore deleted, errored or ccx courses
courses_list[course_key] = course
return courses_list.values(), in_process_course_actions
def _accessible_libraries_list(user):
"""
List all libraries available to the logged in user by iterating through all libraries
"""
# No need to worry about ErrorDescriptors - split's get_libraries() never returns them.
return [lib for lib in modulestore().get_libraries() if has_studio_read_access(user, lib.location.library_key)]
@login_required
@ensure_csrf_cookie
def course_listing(request):
"""
List all courses available to the logged in user
"""
courses, in_process_course_actions = get_courses_accessible_to_user(request)
libraries = _accessible_libraries_list(request.user) if LIBRARIES_ENABLED else []
programs_config = ProgramsApiConfig.current()
raw_programs = get_programs(request.user) if programs_config.is_studio_tab_enabled else []
# Sort programs alphabetically by name.
# TODO: Support ordering in the Programs API itself.
programs = sorted(raw_programs, key=lambda p: p['name'].lower())
def format_in_process_course_view(uca):
"""
Return a dict of the data which the view requires for each unsucceeded course
"""
return {
'display_name': uca.display_name,
'course_key': unicode(uca.course_key),
'org': uca.course_key.org,
'number': uca.course_key.course,
'run': uca.course_key.run,
'is_failed': True if uca.state == CourseRerunUIStateManager.State.FAILED else False,
'is_in_progress': True if uca.state == CourseRerunUIStateManager.State.IN_PROGRESS else False,
'dismiss_link': reverse_course_url(
'course_notifications_handler',
uca.course_key,
kwargs={
'action_state_id': uca.id,
},
) if uca.state == CourseRerunUIStateManager.State.FAILED else ''
}
def format_library_for_view(library):
"""
Return a dict of the data which the view requires for each library
"""
return {
'display_name': library.display_name,
'library_key': unicode(library.location.library_key),
'url': reverse_library_url('library_handler', unicode(library.location.library_key)),
'org': library.display_org_with_default,
'number': library.display_number_with_default,
'can_edit': has_studio_write_access(request.user, library.location.library_key),
}
courses = _remove_in_process_courses(courses, in_process_course_actions)
in_process_course_actions = [format_in_process_course_view(uca) for uca in in_process_course_actions]
return render_to_response('index.html', {
'courses': courses,
'in_process_course_actions': in_process_course_actions,
'libraries_enabled': LIBRARIES_ENABLED,
'libraries': [format_library_for_view(lib) for lib in libraries],
'show_new_library_button': LIBRARIES_ENABLED and request.user.is_active,
'user': request.user,
'request_course_creator_url': reverse('contentstore.views.request_course_creator'),
'course_creator_status': _get_course_creator_status(request.user),
'rerun_creator_status': GlobalStaff().has_user(request.user),
'allow_unicode_course_id': settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID', False),
'allow_course_reruns': settings.FEATURES.get('ALLOW_COURSE_RERUNS', True),
'is_programs_enabled': programs_config.is_studio_tab_enabled and request.user.is_staff,
'programs': programs,
'program_authoring_url': reverse('programs'),
})
def _get_rerun_link_for_item(course_key):
""" Returns the rerun link for the given course key. """
return reverse_course_url('course_rerun_handler', course_key)
def _deprecated_blocks_info(course_module, deprecated_block_types):
"""
Returns deprecation information about `deprecated_block_types`
Arguments:
course_module (CourseDescriptor): course object
deprecated_block_types (list): list of deprecated blocks types
Returns:
Dict with following keys:
block_types (list): list containing types of all deprecated blocks
block_types_enabled (bool): True if any or all `deprecated_blocks` present in Advanced Module List else False
blocks (list): List of `deprecated_block_types` component names and their parent's url
advance_settings_url (str): URL to advance settings page
"""
data = {
'block_types': deprecated_block_types,
'block_types_enabled': any(
block_type in course_module.advanced_modules for block_type in deprecated_block_types
),
'blocks': [],
'advance_settings_url': reverse_course_url('advanced_settings_handler', course_module.id)
}
try:
structure_data = api.course_structure(course_module.id, block_types=deprecated_block_types)
except errors.CourseStructureNotAvailableError:
return data
for block in structure_data['blocks'].values():
data['blocks'].append([reverse_usage_url('container_handler', block['parent']), block['display_name']])
return data
@login_required
@ensure_csrf_cookie
def course_index(request, course_key):
"""
Display an editable course overview.
org, course, name: Attributes of the Location for the item to edit
"""
# A depth of None implies the whole course. The course outline needs this in order to compute has_changes.
# A unit may not have a draft version, but one of its components could, and hence the unit itself has changes.
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user, depth=None)
if not course_module:
raise Http404
lms_link = get_lms_link_for_item(course_module.location)
reindex_link = None
if settings.FEATURES.get('ENABLE_COURSEWARE_INDEX', False):
reindex_link = "/course/{course_id}/search_reindex".format(course_id=unicode(course_key))
sections = course_module.get_children()
course_structure = _course_outline_json(request, course_module)
locator_to_show = request.GET.get('show', None)
course_release_date = get_default_time_display(course_module.start) if course_module.start != DEFAULT_START_DATE else _("Unscheduled")
settings_url = reverse_course_url('settings_handler', course_key)
try:
current_action = CourseRerunState.objects.find_first(course_key=course_key, should_display=True)
except (ItemNotFoundError, CourseActionStateItemNotFoundError):
current_action = None
deprecated_blocks_info = _deprecated_blocks_info(course_module, settings.DEPRECATED_BLOCK_TYPES)
return render_to_response('course_outline.html', {
'context_course': course_module,
'lms_link': lms_link,
'sections': sections,
'course_structure': course_structure,
'initial_state': course_outline_initial_state(locator_to_show, course_structure) if locator_to_show else None,
'rerun_notification_id': current_action.id if current_action else None,
'course_release_date': course_release_date,
'settings_url': settings_url,
'reindex_link': reindex_link,
'deprecated_blocks_info': deprecated_blocks_info,
'notification_dismiss_url': reverse_course_url(
'course_notifications_handler',
current_action.course_key,
kwargs={
'action_state_id': current_action.id,
},
) if current_action else None,
})
def get_courses_accessible_to_user(request):
"""
Try to get all courses by first reversing django groups and fallback to old method if it fails
Note: overhead of pymongo reads will increase if getting courses from django groups fails
"""
if GlobalStaff().has_user(request.user):
# user has global access so no need to get courses from django groups
courses, in_process_course_actions = _accessible_courses_summary_list(request)
else:
try:
courses, in_process_course_actions = _accessible_courses_list_from_groups(request)
except AccessListFallback:
# user have some old groups or there was some error getting courses from django groups
# so fallback to iterating through all courses
courses, in_process_course_actions = _accessible_courses_summary_list(request)
return courses, in_process_course_actions
def _remove_in_process_courses(courses, in_process_course_actions):
"""
removes any in-process courses in courses list. in-process actually refers to courses
that are in the process of being generated for re-run
"""
def format_course_for_view(course):
"""
Return a dict of the data which the view requires for each course
"""
return {
'display_name': course.display_name,
'course_key': unicode(course.location.course_key),
'url': reverse_course_url('course_handler', course.id),
'lms_link': get_lms_link_for_item(course.location),
'rerun_link': _get_rerun_link_for_item(course.id),
'org': course.display_org_with_default,
'number': course.display_number_with_default,
'run': course.location.run
}
in_process_action_course_keys = [uca.course_key for uca in in_process_course_actions]
courses = [
format_course_for_view(course)
for course in courses
if not isinstance(course, ErrorDescriptor) and (course.id not in in_process_action_course_keys)
]
return courses
def course_outline_initial_state(locator_to_show, course_structure):
"""
Returns the desired initial state for the course outline view. If the 'show' request parameter
was provided, then the view's initial state will be to have the desired item fully expanded
and to scroll to see the new item.
"""
def find_xblock_info(xblock_info, locator):
"""
Finds the xblock info for the specified locator.
"""
if xblock_info['id'] == locator:
return xblock_info
children = xblock_info['child_info']['children'] if xblock_info.get('child_info', None) else None
if children:
for child_xblock_info in children:
result = find_xblock_info(child_xblock_info, locator)
if result:
return result
return None
def collect_all_locators(locators, xblock_info):
"""
Collect all the locators for an xblock and its children.
"""
locators.append(xblock_info['id'])
children = xblock_info['child_info']['children'] if xblock_info.get('child_info', None) else None
if children:
for child_xblock_info in children:
collect_all_locators(locators, child_xblock_info)
selected_xblock_info = find_xblock_info(course_structure, locator_to_show)
if not selected_xblock_info:
return None
expanded_locators = []
collect_all_locators(expanded_locators, selected_xblock_info)
return {
'locator_to_show': locator_to_show,
'expanded_locators': expanded_locators
}
@expect_json
def _create_or_rerun_course(request):
"""
To be called by requests that create a new destination course (i.e., create_new_course and rerun_course)
Returns the destination course_key and overriding fields for the new course.
Raises DuplicateCourseError and InvalidKeyError
"""
if not auth.user_has_role(request.user, CourseCreatorRole()):
raise PermissionDenied()
try:
org = request.json.get('org')
course = request.json.get('number', request.json.get('course'))
display_name = request.json.get('display_name')
# force the start date for reruns and allow us to override start via the client
start = request.json.get('start', CourseFields.start.default)
run = request.json.get('run')
# allow/disable unicode characters in course_id according to settings
if not settings.FEATURES.get('ALLOW_UNICODE_COURSE_ID'):
if _has_non_ascii_characters(org) or _has_non_ascii_characters(course) or _has_non_ascii_characters(run):
return JsonResponse(
{'error': _('Special characters not allowed in organization, course number, and course run.')},
status=400
)
fields = {'start': start}
if display_name is not None:
fields['display_name'] = display_name
# Set a unique wiki_slug for newly created courses. To maintain active wiki_slugs for
# existing xml courses this cannot be changed in CourseDescriptor.
# # TODO get rid of defining wiki slug in this org/course/run specific way and reconcile
# w/ xmodule.course_module.CourseDescriptor.__init__
wiki_slug = u"{0}.{1}.{2}".format(org, course, run)
definition_data = {'wiki_slug': wiki_slug}
fields.update(definition_data)
if 'source_course_key' in request.json:
return _rerun_course(request, org, course, run, fields)
else:
return _create_new_course(request, org, course, run, fields)
except DuplicateCourseError:
return JsonResponse({
'ErrMsg': _(
'There is already a course defined with the same '
'organization and course number. Please '
'change either organization or course number to be unique.'
),
'OrgErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'),
'CourseErrMsg': _(
'Please change either the organization or '
'course number so that it is unique.'),
})
except InvalidKeyError as error:
return JsonResponse({
"ErrMsg": _("Unable to create course '{name}'.\n\n{err}").format(name=display_name, err=error.message)}
)
def _create_new_course(request, org, number, run, fields):
"""
Create a new course.
Returns the URL for the course overview page.
Raises DuplicateCourseError if the course already exists
"""
org_data = get_organization_by_short_name(org)
if not org_data and organizations_enabled():
return JsonResponse(
{'error': _('You must link this course to an organization in order to continue. '
'Organization you selected does not exist in the system, '
'you will need to add it to the system')},
status=400
)
store_for_new_course = modulestore().default_modulestore.get_modulestore_type()
new_course = create_new_course_in_store(store_for_new_course, request.user, org, number, run, fields)
add_organization_course(org_data, new_course.id)
return JsonResponse({
'url': reverse_course_url('course_handler', new_course.id),
'course_key': unicode(new_course.id),
})
def create_new_course_in_store(store, user, org, number, run, fields):
"""
Create course in store w/ handling instructor enrollment, permissions, and defaulting the wiki slug.
Separated out b/c command line course creation uses this as well as the web interface.
"""
# Set default language from settings and enable web certs
fields.update({
'language': getattr(settings, 'DEFAULT_COURSE_LANGUAGE', 'en'),
'cert_html_view_enabled': True,
})
with modulestore().default_store(store):
# Creating the course raises DuplicateCourseError if an existing course with this org/name is found
new_course = modulestore().create_course(
org,
number,
run,
user.id,
fields=fields,
)
# Make sure user has instructor and staff access to the new course
add_instructor(new_course.id, user, user)
add_course_mode(new_course.id)
# Initialize permissions for user in the new course
initialize_permissions(new_course.id, user)
return new_course
def _rerun_course(request, org, number, run, fields):
"""
Reruns an existing course.
Returns the URL for the course listing page.
"""
source_course_key = CourseKey.from_string(request.json.get('source_course_key'))
# verify user has access to the original course
if not has_studio_write_access(request.user, source_course_key):
raise PermissionDenied()
# create destination course key
store = modulestore()
with store.default_store('split'):
destination_course_key = store.make_course_key(org, number, run)
# verify org course and run don't already exist
if store.has_course(destination_course_key, ignore_case=True):
raise DuplicateCourseError(source_course_key, destination_course_key)
# Make sure user has instructor and staff access to the destination course
# so the user can see the updated status for that course
add_instructor(destination_course_key, request.user, request.user)
# Mark the action as initiated
CourseRerunState.objects.initiated(source_course_key, destination_course_key, request.user, fields['display_name'])
# Clear the fields that must be reset for the rerun
fields['advertised_start'] = None
# Rerun the course as a new celery task
json_fields = json.dumps(fields, cls=EdxJSONEncoder)
rerun_course.delay(unicode(source_course_key), unicode(destination_course_key), request.user.id, json_fields)
# Return course listing page
return JsonResponse({
'url': reverse_url('course_handler'),
'destination_course_key': unicode(destination_course_key)
})
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(["GET"])
def course_info_handler(request, course_key_string):
"""
GET
html: return html for editing the course info handouts and updates.
"""
try:
course_key = CourseKey.from_string(course_key_string)
except InvalidKeyError:
raise Http404
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if not course_module:
raise Http404
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
return render_to_response(
'course_info.html',
{
'context_course': course_module,
'updates_url': reverse_course_url('course_info_update_handler', course_key),
'handouts_locator': course_key.make_usage_key('course_info', 'handouts'),
'base_asset_url': StaticContent.get_base_url_path_for_course_assets(course_module.id),
'push_notification_enabled': push_notification_enabled()
}
)
else:
return HttpResponseBadRequest("Only supports html requests")
# pylint: disable=unused-argument
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
@expect_json
def course_info_update_handler(request, course_key_string, provided_id=None):
"""
restful CRUD operations on course_info updates.
provided_id should be none if it's new (create) and index otherwise.
GET
json: return the course info update models
POST
json: create an update
PUT or DELETE
json: change an existing update
"""
if 'application/json' not in request.META.get('HTTP_ACCEPT', 'application/json'):
return HttpResponseBadRequest("Only supports json requests")
course_key = CourseKey.from_string(course_key_string)
usage_key = course_key.make_usage_key('course_info', 'updates')
if provided_id == '':
provided_id = None
# check that logged in user has permissions to this item (GET shouldn't require this level?)
if not has_studio_write_access(request.user, usage_key.course_key):
raise PermissionDenied()
if request.method == 'GET':
course_updates = get_course_updates(usage_key, provided_id, request.user.id)
if isinstance(course_updates, dict) and course_updates.get('error'):
return JsonResponse(course_updates, course_updates.get('status', 400))
else:
return JsonResponse(course_updates)
elif request.method == 'DELETE':
try:
return JsonResponse(delete_course_update(usage_key, request.json, provided_id, request.user))
except:
return HttpResponseBadRequest(
"Failed to delete",
content_type="text/plain"
)
# can be either and sometimes django is rewriting one to the other:
elif request.method in ('POST', 'PUT'):
try:
return JsonResponse(update_course_updates(usage_key, request.json, provided_id, request.user))
except:
return HttpResponseBadRequest(
"Failed to save",
content_type="text/plain"
)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "PUT", "POST"))
@expect_json
def settings_handler(request, course_key_string):
"""
Course settings for dates and about pages
GET
html: get the page
json: get the CourseDetails model
PUT
json: update the Course and About xblocks through the CourseDetails model
"""
course_key = CourseKey.from_string(course_key_string)
credit_eligibility_enabled = settings.FEATURES.get('ENABLE_CREDIT_ELIGIBILITY', False)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
upload_asset_url = reverse_course_url('assets_handler', course_key)
# see if the ORG of this course can be attributed to a defined configuration . In that case, the
# course about page should be editable in Studio
marketing_site_enabled = configuration_helpers.get_value_for_org(
course_module.location.org,
'ENABLE_MKTG_SITE',
settings.FEATURES.get('ENABLE_MKTG_SITE', False)
)
enable_extended_course_details = configuration_helpers.get_value_for_org(
course_module.location.org,
'ENABLE_EXTENDED_COURSE_DETAILS',
settings.FEATURES.get('ENABLE_EXTENDED_COURSE_DETAILS', False)
)
about_page_editable = not marketing_site_enabled
enrollment_end_editable = GlobalStaff().has_user(request.user) or not marketing_site_enabled
short_description_editable = settings.FEATURES.get('EDITABLE_SHORT_DESCRIPTION', True)
self_paced_enabled = SelfPacedConfiguration.current().enabled
settings_context = {
'context_course': course_module,
'course_locator': course_key,
'lms_link_for_about_page': utils.get_lms_link_for_about_page(course_key),
'course_image_url': course_image_url(course_module, 'course_image'),
'banner_image_url': course_image_url(course_module, 'banner_image'),
'video_thumbnail_image_url': course_image_url(course_module, 'video_thumbnail_image'),
'details_url': reverse_course_url('settings_handler', course_key),
'about_page_editable': about_page_editable,
'short_description_editable': short_description_editable,
'upload_asset_url': upload_asset_url,
'course_handler_url': reverse_course_url('course_handler', course_key),
'language_options': settings.ALL_LANGUAGES,
'category_options': settings.COURSE_CATEGORIES,
'vendor_image_url': course_image_url(course_module, 'vendor_image'),
'credit_eligibility_enabled': credit_eligibility_enabled,
'is_credit_course': False,
'show_min_grade_warning': False,
'enrollment_end_editable': enrollment_end_editable,
'is_prerequisite_courses_enabled': is_prerequisite_courses_enabled(),
'is_entrance_exams_enabled': is_entrance_exams_enabled(),
'self_paced_enabled': self_paced_enabled,
'enable_extended_course_details': enable_extended_course_details
}
if is_prerequisite_courses_enabled():
courses, in_process_course_actions = get_courses_accessible_to_user(request)
# exclude current course from the list of available courses
courses = [course for course in courses if course.id != course_key]
if courses:
courses = _remove_in_process_courses(courses, in_process_course_actions)
settings_context.update({'possible_pre_requisite_courses': courses})
if credit_eligibility_enabled:
if is_credit_course(course_key):
# get and all credit eligibility requirements
credit_requirements = get_credit_requirements(course_key)
# pair together requirements with same 'namespace' values
paired_requirements = {}
for requirement in credit_requirements:
namespace = requirement.pop("namespace")
paired_requirements.setdefault(namespace, []).append(requirement)
# if 'minimum_grade_credit' of a course is not set or 0 then
# show warning message to course author.
show_min_grade_warning = False if course_module.minimum_grade_credit > 0 else True
settings_context.update(
{
'is_credit_course': True,
'credit_requirements': paired_requirements,
'show_min_grade_warning': show_min_grade_warning,
}
)
return render_to_response('settings.html', settings_context)
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
course_details = CourseDetails.fetch(course_key)
return JsonResponse(
course_details,
# encoder serializes dates, old locations, and instances
encoder=CourseSettingsEncoder
)
# For every other possible method type submitted by the caller...
else:
# if pre-requisite course feature is enabled set pre-requisite course
if is_prerequisite_courses_enabled():
prerequisite_course_keys = request.json.get('pre_requisite_courses', [])
if prerequisite_course_keys:
if not all(is_valid_course_key(course_key) for course_key in prerequisite_course_keys):
return JsonResponseBadRequest({"error": _("Invalid prerequisite course key")})
set_prerequisite_courses(course_key, prerequisite_course_keys)
# If the entrance exams feature has been enabled, we'll need to check for some
# feature-specific settings and handle them accordingly
# We have to be careful that we're only executing the following logic if we actually
# need to create or delete an entrance exam from the specified course
if is_entrance_exams_enabled():
course_entrance_exam_present = course_module.entrance_exam_enabled
entrance_exam_enabled = request.json.get('entrance_exam_enabled', '') == 'true'
ee_min_score_pct = request.json.get('entrance_exam_minimum_score_pct', None)
# If the entrance exam box on the settings screen has been checked...
if entrance_exam_enabled:
# Load the default minimum score threshold from settings, then try to override it
entrance_exam_minimum_score_pct = float(settings.ENTRANCE_EXAM_MIN_SCORE_PCT)
if ee_min_score_pct:
entrance_exam_minimum_score_pct = float(ee_min_score_pct)
if entrance_exam_minimum_score_pct.is_integer():
entrance_exam_minimum_score_pct = entrance_exam_minimum_score_pct / 100
entrance_exam_minimum_score_pct = unicode(entrance_exam_minimum_score_pct)
# If there's already an entrance exam defined, we'll update the existing one
if course_entrance_exam_present:
exam_data = {
'entrance_exam_minimum_score_pct': entrance_exam_minimum_score_pct
}
update_entrance_exam(request, course_key, exam_data)
# If there's no entrance exam defined, we'll create a new one
else:
create_entrance_exam(request, course_key, entrance_exam_minimum_score_pct)
# If the entrance exam box on the settings screen has been unchecked,
# and the course has an entrance exam attached...
elif not entrance_exam_enabled and course_entrance_exam_present:
delete_entrance_exam(request, course_key)
# Perform the normal update workflow for the CourseDetails model
return JsonResponse(
CourseDetails.update_from_json(course_key, request.json, request.user),
encoder=CourseSettingsEncoder
)
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
@expect_json
def grading_handler(request, course_key_string, grader_index=None):
"""
Course Grading policy configuration
GET
html: get the page
json no grader_index: get the CourseGrading model (graceperiod, cutoffs, and graders)
json w/ grader_index: get the specific grader
PUT
json no grader_index: update the Course through the CourseGrading model
json w/ grader_index: create or update the specific grader (create if index out of range)
"""
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
course_details = CourseGradingModel.fetch(course_key)
return render_to_response('settings_graders.html', {
'context_course': course_module,
'course_locator': course_key,
'course_details': course_details,
'grading_url': reverse_course_url('grading_handler', course_key),
'is_credit_course': is_credit_course(course_key),
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
if grader_index is None:
return JsonResponse(
CourseGradingModel.fetch(course_key),
# encoder serializes dates, old locations, and instances
encoder=CourseSettingsEncoder
)
else:
return JsonResponse(CourseGradingModel.fetch_grader(course_key, grader_index))
elif request.method in ('POST', 'PUT'): # post or put, doesn't matter.
# update credit course requirements if 'minimum_grade_credit'
# field value is changed
if 'minimum_grade_credit' in request.json:
update_credit_course_requirements.delay(unicode(course_key))
# None implies update the whole model (cutoffs, graceperiod, and graders) not a specific grader
if grader_index is None:
return JsonResponse(
CourseGradingModel.update_from_json(course_key, request.json, request.user),
encoder=CourseSettingsEncoder
)
else:
return JsonResponse(
CourseGradingModel.update_grader_from_json(course_key, request.json, request.user)
)
elif request.method == "DELETE" and grader_index is not None:
CourseGradingModel.delete_grader(course_key, grader_index, request.user)
return JsonResponse()
def _refresh_course_tabs(request, course_module):
"""
Automatically adds/removes tabs if changes to the course require them.
Raises:
InvalidTabsException: raised if there's a problem with the new version of the tabs.
"""
def update_tab(tabs, tab_type, tab_enabled):
"""
Adds or removes a course tab based upon whether it is enabled.
"""
tab_panel = {
"type": tab_type.type,
}
has_tab = tab_panel in tabs
if tab_enabled and not has_tab:
tabs.append(CourseTab.from_json(tab_panel))
elif not tab_enabled and has_tab:
tabs.remove(tab_panel)
course_tabs = copy.copy(course_module.tabs)
# Additionally update any tabs that are provided by non-dynamic course views
for tab_type in CourseTabPluginManager.get_tab_types():
if not tab_type.is_dynamic and tab_type.is_default:
tab_enabled = tab_type.is_enabled(course_module, user=request.user)
update_tab(course_tabs, tab_type, tab_enabled)
CourseTabList.validate_tabs(course_tabs)
# Save the tabs into the course if they have been changed
if course_tabs != course_module.tabs:
course_module.tabs = course_tabs
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT"))
@expect_json
def advanced_settings_handler(request, course_key_string):
"""
Course settings configuration
GET
html: get the page
json: get the model
PUT, POST
json: update the Course's settings. The payload is a json rep of the
metadata dicts.
"""
course_key = CourseKey.from_string(course_key_string)
with modulestore().bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', '') and request.method == 'GET':
return render_to_response('settings_advanced.html', {
'context_course': course_module,
'advanced_dict': CourseMetadata.fetch(course_module),
'advanced_settings_url': reverse_course_url('advanced_settings_handler', course_key)
})
elif 'application/json' in request.META.get('HTTP_ACCEPT', ''):
if request.method == 'GET':
return JsonResponse(CourseMetadata.fetch(course_module))
else:
try:
# validate data formats and update the course module.
# Note: don't update mongo yet, but wait until after any tabs are changed
is_valid, errors, updated_data = CourseMetadata.validate_and_update_from_json(
course_module,
request.json,
user=request.user,
)
if is_valid:
try:
# update the course tabs if required by any setting changes
_refresh_course_tabs(request, course_module)
except InvalidTabsException as err:
log.exception(err.message)
response_message = [
{
'message': _('An error occurred while trying to save your tabs'),
'model': {'display_name': _('Tabs Exception')}
}
]
return JsonResponseBadRequest(response_message)
# now update mongo
modulestore().update_item(course_module, request.user.id)
return JsonResponse(updated_data)
else:
return JsonResponseBadRequest(errors)
# Handle all errors that validation doesn't catch
except (TypeError, ValueError, InvalidTabsException) as err:
return HttpResponseBadRequest(
django.utils.html.escape(err.message),
content_type="text/plain"
)
class TextbookValidationError(Exception):
"An error thrown when a textbook input is invalid"
pass
def validate_textbooks_json(text):
"""
Validate the given text as representing a single PDF textbook
"""
try:
textbooks = json.loads(text)
except ValueError:
raise TextbookValidationError("invalid JSON")
if not isinstance(textbooks, (list, tuple)):
raise TextbookValidationError("must be JSON list")
for textbook in textbooks:
validate_textbook_json(textbook)
# check specified IDs for uniqueness
all_ids = [textbook["id"] for textbook in textbooks if "id" in textbook]
unique_ids = set(all_ids)
if len(all_ids) > len(unique_ids):
raise TextbookValidationError("IDs must be unique")
return textbooks
def validate_textbook_json(textbook):
"""
Validate the given text as representing a list of PDF textbooks
"""
if isinstance(textbook, basestring):
try:
textbook = json.loads(textbook)
except ValueError:
raise TextbookValidationError("invalid JSON")
if not isinstance(textbook, dict):
raise TextbookValidationError("must be JSON object")
if not textbook.get("tab_title"):
raise TextbookValidationError("must have tab_title")
tid = unicode(textbook.get("id", ""))
if tid and not tid[0].isdigit():
raise TextbookValidationError("textbook ID must start with a digit")
return textbook
def assign_textbook_id(textbook, used_ids=()):
"""
Return an ID that can be assigned to a textbook
and doesn't match the used_ids
"""
tid = Location.clean(textbook["tab_title"])
if not tid[0].isdigit():
# stick a random digit in front
tid = random.choice(string.digits) + tid
while tid in used_ids:
# add a random ASCII character to the end
tid = tid + random.choice(string.ascii_lowercase)
return tid
@require_http_methods(("GET", "POST", "PUT"))
@login_required
@ensure_csrf_cookie
def textbooks_list_handler(request, course_key_string):
"""
A RESTful handler for textbook collections.
GET
html: return textbook list page (Backbone application)
json: return JSON representation of all textbooks in this course
POST
json: create a new textbook for this course
PUT
json: overwrite all textbooks in the course with the given list
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
if "application/json" not in request.META.get('HTTP_ACCEPT', 'text/html'):
# return HTML page
upload_asset_url = reverse_course_url('assets_handler', course_key)
textbook_url = reverse_course_url('textbooks_list_handler', course_key)
return render_to_response('textbooks.html', {
'context_course': course,
'textbooks': course.pdf_textbooks,
'upload_asset_url': upload_asset_url,
'textbook_url': textbook_url,
})
# from here on down, we know the client has requested JSON
if request.method == 'GET':
return JsonResponse(course.pdf_textbooks)
elif request.method == 'PUT':
try:
textbooks = validate_textbooks_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
tids = set(t["id"] for t in textbooks if "id" in t)
for textbook in textbooks:
if "id" not in textbook:
tid = assign_textbook_id(textbook, tids)
textbook["id"] = tid
tids.add(tid)
if not any(tab['type'] == 'pdf_textbooks' for tab in course.tabs):
course.tabs.append(CourseTab.load('pdf_textbooks'))
course.pdf_textbooks = textbooks
store.update_item(course, request.user.id)
return JsonResponse(course.pdf_textbooks)
elif request.method == 'POST':
# create a new textbook for the course
try:
textbook = validate_textbook_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
if not textbook.get("id"):
tids = set(t["id"] for t in course.pdf_textbooks if "id" in t)
textbook["id"] = assign_textbook_id(textbook, tids)
existing = course.pdf_textbooks
existing.append(textbook)
course.pdf_textbooks = existing
if not any(tab['type'] == 'pdf_textbooks' for tab in course.tabs):
course.tabs.append(CourseTab.load('pdf_textbooks'))
store.update_item(course, request.user.id)
resp = JsonResponse(textbook, status=201)
resp["Location"] = reverse_course_url(
'textbooks_detail_handler',
course.id,
kwargs={'textbook_id': textbook["id"]}
)
return resp
@login_required
@ensure_csrf_cookie
@require_http_methods(("GET", "POST", "PUT", "DELETE"))
def textbooks_detail_handler(request, course_key_string, textbook_id):
"""
JSON API endpoint for manipulating a textbook via its internal ID.
Used by the Backbone application.
GET
json: return JSON representation of textbook
POST or PUT
json: update textbook based on provided information
DELETE
json: remove textbook
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course_module = get_course_and_check_access(course_key, request.user)
matching_id = [tb for tb in course_module.pdf_textbooks
if unicode(tb.get("id")) == unicode(textbook_id)]
if matching_id:
textbook = matching_id[0]
else:
textbook = None
if request.method == 'GET':
if not textbook:
return JsonResponse(status=404)
return JsonResponse(textbook)
elif request.method in ('POST', 'PUT'): # can be either and sometimes
# django is rewriting one to the other
try:
new_textbook = validate_textbook_json(request.body)
except TextbookValidationError as err:
return JsonResponse({"error": err.message}, status=400)
new_textbook["id"] = textbook_id
if textbook:
i = course_module.pdf_textbooks.index(textbook)
new_textbooks = course_module.pdf_textbooks[0:i]
new_textbooks.append(new_textbook)
new_textbooks.extend(course_module.pdf_textbooks[i + 1:])
course_module.pdf_textbooks = new_textbooks
else:
course_module.pdf_textbooks.append(new_textbook)
store.update_item(course_module, request.user.id)
return JsonResponse(new_textbook, status=201)
elif request.method == 'DELETE':
if not textbook:
return JsonResponse(status=404)
i = course_module.pdf_textbooks.index(textbook)
remaining_textbooks = course_module.pdf_textbooks[0:i]
remaining_textbooks.extend(course_module.pdf_textbooks[i + 1:])
course_module.pdf_textbooks = remaining_textbooks
store.update_item(course_module, request.user.id)
return JsonResponse()
def remove_content_or_experiment_group(request, store, course, configuration, group_configuration_id, group_id=None):
"""
Remove content group or experiment group configuration only if it's not in use.
"""
configuration_index = course.user_partitions.index(configuration)
if configuration.scheme.name == RANDOM_SCHEME:
usages = GroupConfiguration.get_content_experiment_usage_info(store, course)
used = int(group_configuration_id) in usages
if used:
return JsonResponse(
{"error": _("This group configuration is in use and cannot be deleted.")},
status=400
)
course.user_partitions.pop(configuration_index)
elif configuration.scheme.name == COHORT_SCHEME:
if not group_id:
return JsonResponse(status=404)
group_id = int(group_id)
usages = GroupConfiguration.get_content_groups_usage_info(store, course)
used = group_id in usages
if used:
return JsonResponse(
{"error": _("This content group is in use and cannot be deleted.")},
status=400
)
matching_groups = [group for group in configuration.groups if group.id == group_id]
if matching_groups:
group_index = configuration.groups.index(matching_groups[0])
configuration.groups.pop(group_index)
else:
return JsonResponse(status=404)
course.user_partitions[configuration_index] = configuration
store.update_item(course, request.user.id)
return JsonResponse(status=204)
@require_http_methods(("GET", "POST"))
@login_required
@ensure_csrf_cookie
def group_configurations_list_handler(request, course_key_string):
"""
A RESTful handler for Group Configurations
GET
html: return Group Configurations list page (Backbone application)
POST
json: create new group configuration
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
if 'text/html' in request.META.get('HTTP_ACCEPT', 'text/html'):
group_configuration_url = reverse_course_url('group_configurations_list_handler', course_key)
course_outline_url = reverse_course_url('course_handler', course_key)
should_show_experiment_groups = are_content_experiments_enabled(course)
if should_show_experiment_groups:
experiment_group_configurations = GroupConfiguration.get_split_test_partitions_with_usage(store, course)
else:
experiment_group_configurations = None
content_group_configuration = GroupConfiguration.get_or_create_content_group(store, course)
return render_to_response('group_configurations.html', {
'context_course': course,
'group_configuration_url': group_configuration_url,
'course_outline_url': course_outline_url,
'experiment_group_configurations': experiment_group_configurations,
'should_show_experiment_groups': should_show_experiment_groups,
'content_group_configuration': content_group_configuration
})
elif "application/json" in request.META.get('HTTP_ACCEPT'):
if request.method == 'POST':
# create a new group configuration for the course
try:
new_configuration = GroupConfiguration(request.body, course).get_user_partition()
except GroupConfigurationsValidationError as err:
return JsonResponse({"error": err.message}, status=400)
course.user_partitions.append(new_configuration)
response = JsonResponse(new_configuration.to_json(), status=201)
response["Location"] = reverse_course_url(
'group_configurations_detail_handler',
course.id,
kwargs={'group_configuration_id': new_configuration.id}
)
store.update_item(course, request.user.id)
return response
else:
return HttpResponse(status=406)
@login_required
@ensure_csrf_cookie
@require_http_methods(("POST", "PUT", "DELETE"))
def group_configurations_detail_handler(request, course_key_string, group_configuration_id, group_id=None):
"""
JSON API endpoint for manipulating a group configuration via its internal ID.
Used by the Backbone application.
POST or PUT
json: update group configuration based on provided information
"""
course_key = CourseKey.from_string(course_key_string)
store = modulestore()
with store.bulk_operations(course_key):
course = get_course_and_check_access(course_key, request.user)
matching_id = [p for p in course.user_partitions
if unicode(p.id) == unicode(group_configuration_id)]
if matching_id:
configuration = matching_id[0]
else:
configuration = None
if request.method in ('POST', 'PUT'): # can be either and sometimes
# django is rewriting one to the other
try:
new_configuration = GroupConfiguration(request.body, course, group_configuration_id).get_user_partition()
except GroupConfigurationsValidationError as err:
return JsonResponse({"error": err.message}, status=400)
if configuration:
index = course.user_partitions.index(configuration)
course.user_partitions[index] = new_configuration
else:
course.user_partitions.append(new_configuration)
store.update_item(course, request.user.id)
configuration = GroupConfiguration.update_usage_info(store, course, new_configuration)
return JsonResponse(configuration, status=201)
elif request.method == "DELETE":
if not configuration:
return JsonResponse(status=404)
return remove_content_or_experiment_group(
request=request,
store=store,
course=course,
configuration=configuration,
group_configuration_id=group_configuration_id,
group_id=group_id
)
def are_content_experiments_enabled(course):
"""
Returns True if content experiments have been enabled for the course.
"""
return (
'split_test' in ADVANCED_COMPONENT_TYPES and
'split_test' in course.advanced_modules
)
def _get_course_creator_status(user):
"""
Helper method for returning the course creator status for a particular user,
taking into account the values of DISABLE_COURSE_CREATION and ENABLE_CREATOR_GROUP.
If the user passed in has not previously visited the index page, it will be
added with status 'unrequested' if the course creator group is in use.
"""
if user.is_staff:
course_creator_status = 'granted'
elif settings.FEATURES.get('DISABLE_COURSE_CREATION', False):
course_creator_status = 'disallowed_for_this_site'
elif settings.FEATURES.get('ENABLE_CREATOR_GROUP', False):
course_creator_status = get_course_creator_status(user)
if course_creator_status is None:
# User not grandfathered in as an existing user, has not previously visited the dashboard page.
# Add the user to the course creator admin table with status 'unrequested'.
add_user_with_status_unrequested(user)
course_creator_status = get_course_creator_status(user)
else:
course_creator_status = 'granted'
return course_creator_status
| agpl-3.0 | -7,682,541,370,460,719,000 | 42.377886 | 142 | 0.638978 | false |
rachelalbert/CS294-26_code | project6_code/main.py | 1 | 1979 | import numpy as np
import matplotlib.pyplot as plt
from scipy.interpolate import interp2d
from glob import glob
fpath = '/Users/rachel/Desktop/tarot_cards/*.png'
flist = glob(fpath)
print('loading images...')
images = []
for fname in flist:
images.append(plt.imread(fname))
def shift_image(im, shift):
delta_y = shift[0]
delta_x = shift[1]
imOut = np.zeros(im.shape)
for i, c in enumerate(np.dsplit(im, 3)):
c = c[:, :, 0]
Y = np.arange(c.shape[0])
X = np.arange(c.shape[1])
f = interp2d(X + delta_x, Y + delta_y, c)
imOut[:, :, i] = f(X, Y)
return imOut
def shifted_images():
for fnum, f in enumerate(np.arange(-9, 13)):
#for fnum, f in enumerate(np.arange(-2, 2)):
print(f)
total = 0.
for i, curr_img in enumerate(images):
# get x and y coords for each image
yval = np.floor(i/17.)
xval = i % 17.
total += shift_image(curr_img, (f*(9.-yval), f*(9.-xval)))
out_name = './output/frame_{0}.jpg'.format(fnum)
plt.imsave(out_name, total/len(images))
def get_aperture(ay, ax):
a2 = (int(np.round(ay/2.)), int(np.round(ax/2.)))
coords = np.arange(289).reshape((17, 17))
return np.array(coords[8 - a2[0]: 9 + a2[0],
8 - a2[1]: 9 + a2[1]].flatten())
def aperture_images():
print('computing aperture images...')
ays = [ 1, 17, 5, 17]
axs = [17, 1, 17, 5]
#ays = np.arange(1, 17)
#axs = ays.copy()
for anum, (ay, ax) in enumerate(zip(ays, axs)):
print('aperture {0} of {1}'.format(anum, len(ays)-1))
coords = get_aperture(ay, ax)
if len(coords) == 1:
out_im = images[coords]
else:
out_im = np.mean([images[i] for i in coords], axis=0)
out_name = './output/apertures/asymmetric/aperture_{0}_{1}.jpg'.format(ay, ax)
plt.imsave(out_name, out_im)
shifted_images()
aperture_images() | mit | 3,345,637,577,840,648,700 | 28.117647 | 86 | 0.553815 | false |
swayand/Checkmate | data/translator.py | 1 | 1656 | #!/usr/bin/env python
import os, sys
import json
if len(sys.argv) < 2:
exit("Usage: ./translator (file_var.j in old format) (file_var.j in new format)")
infile = sys.argv[1]
outfile = sys.argv[2]
injfile = open(infile, "rb")
parameters = json.loads(injfile.read())
injfile.close()
if "version" in parameters and parameters["version"] == 2.0:
exit(infile+" is newest version already!")
try:
# Add parameters that did not exist before
parameters["expectation_known"] = "y"
parameters["version"] = 2.0
parameters["author"] = "CheckMATE"
parameters["authoremail"] = "[email protected]"
# Remove parameters that should not be there
if "files" in parameters:
parameters.pop("files")
if "CURRLEVEL" in parameters:
parameters.pop("CURRLEVEL")
# dict() parameters have to be put as dict()
for p in parameters:
if type(parameters[p]) in [type("string"), type(u"string")]:
if (parameters[p].startswith("[") or parameters[p].startswith("{")):
parameters[p] = eval(parameters[p])
# Some integers have to be put as real integers
parameters["electron_niso"] = int(parameters["electron_niso"])
parameters["muon_niso"] = int(parameters["muon_niso"])
parameters["photon_niso"] = int(parameters["photon_niso"])
if parameters["jets_btagging"] == "y":
parameters["jets_btagging_n"] = int(parameters["jets_btagging_n"])
except Exception, e:
print str(e)
print "Problem with "+infile
jfile = open(outfile, "wb")
jfile.write(json.dumps(parameters, sort_keys=True, indent=2))
jfile.close()
print "Translated "+infile+" successfully!"
| gpl-2.0 | 1,584,684,689,227,110,000 | 32.12 | 84 | 0.664855 | false |
abdullah2891/remo | remo/profiles/migrations/0051_auto__add_field_userprofile_date_left_program.py | 3 | 11137 | # -*- coding: utf-8 -*-
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding field 'UserProfile.date_left_program'
db.add_column('profiles_userprofile', 'date_left_program',
self.gf('django.db.models.fields.DateField')(null=True, blank=True),
keep_default=False)
def backwards(self, orm):
# Deleting field 'UserProfile.date_left_program'
db.delete_column('profiles_userprofile', 'date_left_program')
models = {
'auth.group': {
'Meta': {'object_name': 'Group'},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '80'}),
'permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'})
},
'auth.permission': {
'Meta': {'ordering': "('content_type__app_label', 'content_type__model', 'codename')", 'unique_together': "(('content_type', 'codename'),)", 'object_name': 'Permission'},
'codename': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'content_type': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['contenttypes.ContentType']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '50'})
},
'auth.user': {
'Meta': {'object_name': 'User'},
'date_joined': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'email': ('django.db.models.fields.EmailField', [], {'max_length': '75', 'blank': 'True'}),
'first_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'groups': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Group']", 'symmetrical': 'False', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'is_active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'is_staff': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_superuser': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'last_login': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'last_name': ('django.db.models.fields.CharField', [], {'max_length': '30', 'blank': 'True'}),
'password': ('django.db.models.fields.CharField', [], {'max_length': '128'}),
'user_permissions': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['auth.Permission']", 'symmetrical': 'False', 'blank': 'True'}),
'username': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '30'})
},
'contenttypes.contenttype': {
'Meta': {'ordering': "('name',)", 'unique_together': "(('app_label', 'model'),)", 'object_name': 'ContentType', 'db_table': "'django_content_type'"},
'app_label': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'model': ('django.db.models.fields.CharField', [], {'max_length': '100'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '100'})
},
'profiles.functionalarea': {
'Meta': {'ordering': "['name']", 'object_name': 'FunctionalArea'},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'unique': 'True', 'max_length': '100'}),
'slug': ('django.db.models.fields.SlugField', [], {'max_length': '100', 'blank': 'True'})
},
'profiles.useravatar': {
'Meta': {'object_name': 'UserAvatar'},
'avatar_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '400'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime(2015, 1, 27, 0, 0)', 'auto_now': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'})
},
'profiles.userprofile': {
'Meta': {'object_name': 'UserProfile'},
'added_by': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'users_added'", 'null': 'True', 'to': "orm['auth.User']"}),
'bio': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'birth_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'city': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'country': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'current_streak_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'date_joined_program': ('django.db.models.fields.DateField', [], {'blank': 'True'}),
'date_left_program': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'diaspora_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'display_name': ('django.db.models.fields.CharField', [], {'default': "''", 'unique': 'True', 'max_length': '50', 'blank': 'True'}),
'facebook_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'first_report_notification': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_matching'", 'symmetrical': 'False', 'to': "orm['profiles.FunctionalArea']"}),
'gender': ('django.db.models.fields.NullBooleanField', [], {'default': 'None', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'irc_channels': ('django.db.models.fields.TextField', [], {'default': "''", 'blank': 'True'}),
'irc_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'is_rotm_nominee': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'is_unavailable': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'jabber_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50', 'blank': 'True'}),
'lat': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'linkedin_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'local_name': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'lon': ('django.db.models.fields.FloatField', [], {'null': 'True'}),
'longest_streak_end': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'longest_streak_start': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'mentor': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'mentees'", 'null': 'True', 'on_delete': 'models.SET_NULL', 'to': "orm['auth.User']"}),
'mozillian_username': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '40', 'blank': 'True'}),
'mozillians_profile_url': ('django.db.models.fields.URLField', [], {'max_length': '200'}),
'personal_blog_feed': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'personal_website_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'}),
'private_email': ('django.db.models.fields.EmailField', [], {'default': "''", 'max_length': '75', 'null': 'True'}),
'receive_email_on_add_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'receive_email_on_add_event_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'receive_email_on_add_voting_comment': ('django.db.models.fields.BooleanField', [], {'default': 'True'}),
'region': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '50'}),
'registration_complete': ('django.db.models.fields.BooleanField', [], {'default': 'False'}),
'second_report_notification': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'timezone': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '100', 'blank': 'True'}),
'tracked_functional_areas': ('django.db.models.fields.related.ManyToManyField', [], {'related_name': "'users_tracking'", 'symmetrical': 'False', 'to': "orm['profiles.FunctionalArea']"}),
'twitter_account': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '16', 'blank': 'True'}),
'unavailability_task_id': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '256', 'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.OneToOneField', [], {'to': "orm['auth.User']", 'unique': 'True'}),
'wiki_profile_url': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '200', 'blank': 'True'})
},
'profiles.userstatus': {
'Meta': {'ordering': "['-expected_date', '-created_on']", 'object_name': 'UserStatus'},
'created_on': ('django.db.models.fields.DateTimeField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'expected_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'replacement_rep': ('django.db.models.fields.related.ForeignKey', [], {'blank': 'True', 'related_name': "'replaced_rep'", 'null': 'True', 'to': "orm['auth.User']"}),
'return_date': ('django.db.models.fields.DateField', [], {'null': 'True', 'blank': 'True'}),
'user': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'status'", 'to': "orm['auth.User']"})
}
}
complete_apps = ['profiles'] | bsd-3-clause | -3,303,939,626,081,606,000 | 84.022901 | 198 | 0.55356 | false |
pombreda/cogen | examples/callevt-profile.py | 4 | 1476 | from cogen.common import *
count = 0
@coroutine
def cogen_b():
global count
count += 1
yield
yield
yield
@coroutine
def cogen_a(prio):
for i in xrange(10000):
yield events.Call(cogen_b, prio=prio)
def normal_a():
global count
count += 1
yield
yield
yield
def normal_b():
for i in xrange(10000):
for i in normal_a():
pass
def cogen_call(prio=priority.FIRST):
m = Scheduler(default_priority=priority.FIRST)
m.add(cogen_a, args=(prio,))
m.run()
def normal_call():
normal_b()
if __name__ == "__main__":
#~ cogen_call()
import timeit
print timeit.Timer(
'normal_call()',
"from __main__ import normal_call"
).timeit(3)
print count
print timeit.Timer(
'cogen_call()',
"from __main__ import cogen_call"
).timeit(3)
print count
import cProfile, os
cProfile.run("cogen_call()", "cprofile.log")
#cProfile.run("normal_call()", "cprofile.log")
import pstats
for i in [
'calls','cumulative','file','module',
'pcalls','line','name','nfl','stdname','time'
]:
stats = pstats.Stats("cprofile.log",
stream = file('cprofile.%s.%s.txt' % (
os.path.split(__file__)[1],
i
),'w'
)
)
stats.sort_stats(i)
stats.print_stats()
| mit | 8,679,295,486,521,430,000 | 21.806452 | 53 | 0.508808 | false |
radosuav/QGIS | python/plugins/processing/ProcessingPlugin.py | 2 | 14116 | # -*- coding: utf-8 -*-
"""
***************************************************************************
ProcessingPlugin.py
---------------------
Date : August 2012
Copyright : (C) 2012 by Victor Olaya
Email : volayaf at gmail dot com
***************************************************************************
* *
* This program is free software; you can redistribute it and/or modify *
* it under the terms of the GNU General Public License as published by *
* the Free Software Foundation; either version 2 of the License, or *
* (at your option) any later version. *
* *
***************************************************************************
"""
__author__ = 'Victor Olaya'
__date__ = 'August 2012'
__copyright__ = '(C) 2012, Victor Olaya'
import shutil
import os
import sys
from functools import partial
from qgis.core import (QgsApplication,
QgsProcessingUtils,
QgsProcessingModelAlgorithm,
QgsDataItemProvider,
QgsDataProvider,
QgsDataItem,
QgsMapLayerType,
QgsMimeDataUtils)
from qgis.gui import (QgsOptionsWidgetFactory,
QgsCustomDropHandler)
from qgis.PyQt.QtCore import Qt, QCoreApplication, QDir, QFileInfo
from qgis.PyQt.QtWidgets import QMenu, QAction
from qgis.PyQt.QtGui import QIcon, QKeySequence
from qgis.utils import iface
from processing.core.Processing import Processing
from processing.gui.AlgorithmDialog import AlgorithmDialog
from processing.gui.ProcessingToolbox import ProcessingToolbox
from processing.gui.HistoryDialog import HistoryDialog
from processing.gui.ConfigDialog import ConfigOptionsPage
from processing.gui.ResultsDock import ResultsDock
from processing.gui.AlgorithmLocatorFilter import (AlgorithmLocatorFilter,
InPlaceAlgorithmLocatorFilter)
from processing.modeler.ModelerDialog import ModelerDialog
from processing.tools.system import tempHelpFolder
from processing.gui.menus import removeMenus, initializeMenus, createMenus
from processing.core.ProcessingResults import resultsList
pluginPath = os.path.dirname(__file__)
class ProcessingOptionsFactory(QgsOptionsWidgetFactory):
def __init__(self):
super(QgsOptionsWidgetFactory, self).__init__()
def icon(self):
return QgsApplication.getThemeIcon('/processingAlgorithm.svg')
def createWidget(self, parent):
return ConfigOptionsPage(parent)
class ProcessingDropHandler(QgsCustomDropHandler):
def handleFileDrop(self, file):
if not file.lower().endswith('.model3'):
return False
return self.runAlg(file)
@staticmethod
def runAlg(file):
alg = QgsProcessingModelAlgorithm()
if not alg.fromFile(file):
return False
alg.setProvider(QgsApplication.processingRegistry().providerById('model'))
dlg = AlgorithmDialog(alg, parent=iface.mainWindow())
dlg.show()
return True
def customUriProviderKey(self):
return 'processing'
def handleCustomUriDrop(self, uri):
path = uri.uri
self.runAlg(path)
class ProcessingModelItem(QgsDataItem):
def __init__(self, parent, name, path):
super(ProcessingModelItem, self).__init__(QgsDataItem.Custom, parent, name, path)
self.setState(QgsDataItem.Populated) # no children
self.setIconName(":/images/themes/default/processingModel.svg")
self.setToolTip(QDir.toNativeSeparators(path))
def hasDragEnabled(self):
return True
def handleDoubleClick(self):
self.runModel()
return True
def mimeUri(self):
u = QgsMimeDataUtils.Uri()
u.layerType = "custom"
u.providerKey = "processing"
u.name = self.name()
u.uri = self.path()
return u
def runModel(self):
ProcessingDropHandler.runAlg(self.path())
def editModel(self):
dlg = ModelerDialog()
dlg.loadModel(self.path())
dlg.show()
def actions(self, parent):
run_model_action = QAction(QCoreApplication.translate('ProcessingPlugin', '&Run Model…'), parent)
run_model_action.triggered.connect(self.runModel)
edit_model_action = QAction(QCoreApplication.translate('ProcessingPlugin', '&Edit Model…'), parent)
edit_model_action.triggered.connect(self.editModel)
return [run_model_action, edit_model_action]
class ProcessingDataItemProvider(QgsDataItemProvider):
def __init__(self):
super(ProcessingDataItemProvider, self).__init__()
def name(self):
return 'processing'
def capabilities(self):
return QgsDataProvider.File
def createDataItem(self, path, parentItem):
file_info = QFileInfo(path)
if file_info.suffix().lower() == 'model3':
alg = QgsProcessingModelAlgorithm()
if alg.fromFile(path):
return ProcessingModelItem(parentItem, alg.name(), path)
return None
class ProcessingPlugin:
def __init__(self, iface):
self.iface = iface
self.options_factory = None
self.drop_handler = None
self.item_provider = None
self.locator_filter = None
self.edit_features_locator_filter = None
self.initialized = False
self.initProcessing()
def initProcessing(self):
if not self.initialized:
self.initialized = True
Processing.initialize()
def initGui(self):
self.options_factory = ProcessingOptionsFactory()
self.options_factory.setTitle(self.tr('Processing'))
iface.registerOptionsWidgetFactory(self.options_factory)
self.drop_handler = ProcessingDropHandler()
iface.registerCustomDropHandler(self.drop_handler)
self.item_provider = ProcessingDataItemProvider()
QgsApplication.dataItemProviderRegistry().addProvider(self.item_provider)
self.locator_filter = AlgorithmLocatorFilter()
iface.registerLocatorFilter(self.locator_filter)
# Invalidate the locator filter for in-place when active layer changes
iface.currentLayerChanged.connect(lambda _: self.iface.invalidateLocatorResults())
self.edit_features_locator_filter = InPlaceAlgorithmLocatorFilter()
iface.registerLocatorFilter(self.edit_features_locator_filter)
self.toolbox = ProcessingToolbox()
self.iface.addDockWidget(Qt.RightDockWidgetArea, self.toolbox)
self.toolbox.hide()
self.toolbox.visibilityChanged.connect(self.toolboxVisibilityChanged)
self.resultsDock = ResultsDock()
self.iface.addDockWidget(Qt.RightDockWidgetArea, self.resultsDock)
self.resultsDock.hide()
self.menu = QMenu(self.iface.mainWindow().menuBar())
self.menu.setObjectName('processing')
self.menu.setTitle(self.tr('Pro&cessing'))
self.toolboxAction = QAction(self.tr('&Toolbox'), self.iface.mainWindow())
self.toolboxAction.setCheckable(True)
self.toolboxAction.setObjectName('toolboxAction')
self.toolboxAction.setIcon(
QgsApplication.getThemeIcon("/processingAlgorithm.svg"))
self.iface.registerMainWindowAction(self.toolboxAction,
QKeySequence('Ctrl+Alt+T').toString(QKeySequence.NativeText))
self.toolboxAction.toggled.connect(self.openToolbox)
self.iface.attributesToolBar().insertAction(self.iface.actionOpenStatisticalSummary(), self.toolboxAction)
self.menu.addAction(self.toolboxAction)
self.modelerAction = QAction(
QgsApplication.getThemeIcon("/processingModel.svg"),
QCoreApplication.translate('ProcessingPlugin', '&Graphical Modeler…'), self.iface.mainWindow())
self.modelerAction.setObjectName('modelerAction')
self.modelerAction.triggered.connect(self.openModeler)
self.iface.registerMainWindowAction(self.modelerAction,
QKeySequence('Ctrl+Alt+G').toString(QKeySequence.NativeText))
self.menu.addAction(self.modelerAction)
self.historyAction = QAction(
QgsApplication.getThemeIcon("/mIconHistory.svg"),
QCoreApplication.translate('ProcessingPlugin', '&History…'), self.iface.mainWindow())
self.historyAction.setObjectName('historyAction')
self.historyAction.triggered.connect(self.openHistory)
self.iface.registerMainWindowAction(self.historyAction,
QKeySequence('Ctrl+Alt+H').toString(QKeySequence.NativeText))
self.menu.addAction(self.historyAction)
self.toolbox.processingToolbar.addAction(self.historyAction)
self.resultsAction = QAction(
QgsApplication.getThemeIcon("/processingResult.svg"),
self.tr('&Results Viewer'), self.iface.mainWindow())
self.resultsAction.setObjectName('resultsViewer')
self.resultsAction.setCheckable(True)
self.iface.registerMainWindowAction(self.resultsAction,
QKeySequence('Ctrl+Alt+R').toString(QKeySequence.NativeText))
self.menu.addAction(self.resultsAction)
self.toolbox.processingToolbar.addAction(self.resultsAction)
self.resultsDock.visibilityChanged.connect(self.resultsAction.setChecked)
self.resultsAction.toggled.connect(self.resultsDock.setUserVisible)
self.toolbox.processingToolbar.addSeparator()
self.editInPlaceAction = QAction(
QgsApplication.getThemeIcon("/mActionProcessSelected.svg"),
self.tr('Edit Features In-Place'), self.iface.mainWindow())
self.editInPlaceAction.setObjectName('editInPlaceFeatures')
self.editInPlaceAction.setCheckable(True)
self.editInPlaceAction.toggled.connect(self.editSelected)
self.menu.addAction(self.editInPlaceAction)
self.toolbox.processingToolbar.addAction(self.editInPlaceAction)
self.toolbox.processingToolbar.addSeparator()
self.optionsAction = QAction(
QgsApplication.getThemeIcon("/mActionOptions.svg"),
self.tr('Options'), self.iface.mainWindow())
self.optionsAction.setObjectName('optionsAction')
self.optionsAction.triggered.connect(self.openProcessingOptions)
self.toolbox.processingToolbar.addAction(self.optionsAction)
menuBar = self.iface.mainWindow().menuBar()
menuBar.insertMenu(
self.iface.firstRightStandardMenu().menuAction(), self.menu)
self.menu.addSeparator()
initializeMenus()
createMenus()
# In-place editing button state sync
self.iface.currentLayerChanged.connect(self.sync_in_place_button_state)
self.iface.mapCanvas().selectionChanged.connect(self.sync_in_place_button_state)
self.iface.actionToggleEditing().triggered.connect(partial(self.sync_in_place_button_state, None))
self.sync_in_place_button_state()
def sync_in_place_button_state(self, layer=None):
"""Synchronise the button state with layer state"""
if layer is None:
layer = self.iface.activeLayer()
old_enabled_state = self.editInPlaceAction.isEnabled()
new_enabled_state = layer is not None and layer.type() == QgsMapLayerType.VectorLayer
self.editInPlaceAction.setEnabled(new_enabled_state)
if new_enabled_state != old_enabled_state:
self.toolbox.set_in_place_edit_mode(new_enabled_state and self.editInPlaceAction.isChecked())
def openProcessingOptions(self):
self.iface.showOptionsDialog(self.iface.mainWindow(), currentPage='processingOptions')
def unload(self):
self.toolbox.setVisible(False)
self.iface.removeDockWidget(self.toolbox)
self.iface.attributesToolBar().removeAction(self.toolboxAction)
self.resultsDock.setVisible(False)
self.iface.removeDockWidget(self.resultsDock)
self.toolbox.deleteLater()
self.menu.deleteLater()
# also delete temporary help files
folder = tempHelpFolder()
if QDir(folder).exists():
shutil.rmtree(folder, True)
self.iface.unregisterMainWindowAction(self.toolboxAction)
self.iface.unregisterMainWindowAction(self.modelerAction)
self.iface.unregisterMainWindowAction(self.historyAction)
self.iface.unregisterMainWindowAction(self.resultsAction)
self.iface.unregisterOptionsWidgetFactory(self.options_factory)
self.iface.deregisterLocatorFilter(self.locator_filter)
self.iface.deregisterLocatorFilter(self.edit_features_locator_filter)
self.iface.unregisterCustomDropHandler(self.drop_handler)
QgsApplication.dataItemProviderRegistry().removeProvider(self.item_provider)
removeMenus()
Processing.deinitialize()
def openToolbox(self, show):
self.toolbox.setUserVisible(show)
def toolboxVisibilityChanged(self, visible):
self.toolboxAction.setChecked(visible)
def openModeler(self):
dlg = ModelerDialog()
dlg.update_model.connect(self.updateModel)
dlg.show()
def updateModel(self):
model_provider = QgsApplication.processingRegistry().providerById('model')
model_provider.refreshAlgorithms()
def openResults(self):
if self.resultsDock.isVisible():
self.resultsDock.hide()
else:
self.resultsDock.show()
def openHistory(self):
dlg = HistoryDialog()
dlg.exec_()
def tr(self, message):
return QCoreApplication.translate('ProcessingPlugin', message)
def editSelected(self, enabled):
self.toolbox.set_in_place_edit_mode(enabled)
| gpl-2.0 | -6,861,216,234,714,790,000 | 38.518207 | 114 | 0.661327 | false |
nelson-liu/scikit-learn | examples/svm/plot_svm_scale_c.py | 19 | 5409 | """
==============================================
Scaling the regularization parameter for SVCs
==============================================
The following example illustrates the effect of scaling the
regularization parameter when using :ref:`svm` for
:ref:`classification <svm_classification>`.
For SVC classification, we are interested in a risk minimization for the
equation:
.. math::
C \sum_{i=1, n} \mathcal{L} (f(x_i), y_i) + \Omega (w)
where
- :math:`C` is used to set the amount of regularization
- :math:`\mathcal{L}` is a `loss` function of our samples
and our model parameters.
- :math:`\Omega` is a `penalty` function of our model parameters
If we consider the loss function to be the individual error per
sample, then the data-fit term, or the sum of the error for each sample, will
increase as we add more samples. The penalization term, however, will not
increase.
When using, for example, :ref:`cross validation <cross_validation>`, to
set the amount of regularization with `C`, there will be a
different amount of samples between the main problem and the smaller problems
within the folds of the cross validation.
Since our loss function is dependent on the amount of samples, the latter
will influence the selected value of `C`.
The question that arises is `How do we optimally adjust C to
account for the different amount of training samples?`
The figures below are used to illustrate the effect of scaling our
`C` to compensate for the change in the number of samples, in the
case of using an `l1` penalty, as well as the `l2` penalty.
l1-penalty case
-----------------
In the `l1` case, theory says that prediction consistency
(i.e. that under given hypothesis, the estimator
learned predicts as well as a model knowing the true distribution)
is not possible because of the bias of the `l1`. It does say, however,
that model consistency, in terms of finding the right set of non-zero
parameters as well as their signs, can be achieved by scaling
`C1`.
l2-penalty case
-----------------
The theory says that in order to achieve prediction consistency, the
penalty parameter should be kept constant
as the number of samples grow.
Simulations
------------
The two figures below plot the values of `C` on the `x-axis` and the
corresponding cross-validation scores on the `y-axis`, for several different
fractions of a generated data-set.
In the `l1` penalty case, the cross-validation-error correlates best with
the test-error, when scaling our `C` with the number of samples, `n`,
which can be seen in the first figure.
For the `l2` penalty case, the best result comes from the case where `C`
is not scaled.
.. topic:: Note:
Two separate datasets are used for the two different plots. The reason
behind this is the `l1` case works better on sparse data, while `l2`
is better suited to the non-sparse case.
"""
print(__doc__)
# Author: Andreas Mueller <[email protected]>
# Jaques Grobler <[email protected]>
# License: BSD 3 clause
import numpy as np
import matplotlib.pyplot as plt
from sklearn.svm import LinearSVC
from sklearn.model_selection import ShuffleSplit
from sklearn.model_selection import GridSearchCV
from sklearn.utils import check_random_state
from sklearn import datasets
rnd = check_random_state(1)
# set up dataset
n_samples = 100
n_features = 300
# l1 data (only 5 informative features)
X_1, y_1 = datasets.make_classification(n_samples=n_samples,
n_features=n_features, n_informative=5,
random_state=1)
# l2 data: non sparse, but less features
y_2 = np.sign(.5 - rnd.rand(n_samples))
X_2 = rnd.randn(n_samples, n_features / 5) + y_2[:, np.newaxis]
X_2 += 5 * rnd.randn(n_samples, n_features / 5)
clf_sets = [(LinearSVC(penalty='l1', loss='squared_hinge', dual=False,
tol=1e-3),
np.logspace(-2.3, -1.3, 10), X_1, y_1),
(LinearSVC(penalty='l2', loss='squared_hinge', dual=True,
tol=1e-4),
np.logspace(-4.5, -2, 10), X_2, y_2)]
colors = ['navy', 'cyan', 'darkorange']
lw = 2
for fignum, (clf, cs, X, y) in enumerate(clf_sets):
# set up the plot for each regressor
plt.figure(fignum, figsize=(9, 10))
for k, train_size in enumerate(np.linspace(0.3, 0.7, 3)[::-1]):
param_grid = dict(C=cs)
# To get nice curve, we need a large number of iterations to
# reduce the variance
grid = GridSearchCV(clf, refit=False, param_grid=param_grid,
cv=ShuffleSplit(train_size=train_size,
n_splits=250, random_state=1))
grid.fit(X, y)
scores = grid.cv_results_['mean_test_score']
scales = [(1, 'No scaling'),
((n_samples * train_size), '1/n_samples'),
]
for subplotnum, (scaler, name) in enumerate(scales):
plt.subplot(2, 1, subplotnum + 1)
plt.xlabel('C')
plt.ylabel('CV Score')
grid_cs = cs * float(scaler) # scale the C's
plt.semilogx(grid_cs, scores, label="fraction %.2f" %
train_size, color=colors[k], lw=lw)
plt.title('scaling=%s, penalty=%s, loss=%s' %
(name, clf.penalty, clf.loss))
plt.legend(loc="best")
plt.show()
| bsd-3-clause | 3,283,908,061,440,834,000 | 34.821192 | 79 | 0.647439 | false |
saurabh6790/frappe | frappe/tests/test_website.py | 1 | 3138 | from __future__ import unicode_literals
import unittest
import frappe
from frappe.website import render
from frappe.website.utils import get_home_page
from frappe.utils import set_request
class TestWebsite(unittest.TestCase):
def test_home_page_for_role(self):
frappe.delete_doc_if_exists('User', '[email protected]')
frappe.delete_doc_if_exists('Role', 'home-page-test')
frappe.delete_doc_if_exists('Web Page', 'home-page-test')
user = frappe.get_doc(dict(
doctype='User',
email='[email protected]',
first_name='test')).insert()
role = frappe.get_doc(dict(
doctype = 'Role',
role_name = 'home-page-test',
desk_access = 0,
home_page = '/home-page-test'
)).insert()
user.add_roles(role.name)
user.save()
frappe.set_user('[email protected]')
self.assertEqual(get_home_page(), 'home-page-test')
frappe.set_user('Administrator')
role.home_page = ''
role.save()
# home page via portal settings
frappe.db.set_value('Portal Settings', None, 'default_portal_home', 'test-portal-home')
frappe.set_user('[email protected]')
frappe.cache().hdel('home_page', frappe.session.user)
self.assertEqual(get_home_page(), 'test-portal-home')
def test_page_load(self):
frappe.set_user('Guest')
set_request(method='POST', path='login')
response = render.render()
self.assertEqual(response.status_code, 200)
html = frappe.safe_decode(response.get_data())
self.assertTrue('// login.js' in html)
self.assertTrue('<!-- login.html -->' in html)
frappe.set_user('Administrator')
def test_redirect(self):
import frappe.hooks
frappe.hooks.website_redirects = [
dict(source=r'/testfrom', target=r'://testto1'),
dict(source=r'/testfromregex.*', target=r'://testto2'),
dict(source=r'/testsub/(.*)', target=r'://testto3/\1')
]
website_settings = frappe.get_doc('Website Settings')
website_settings.append('route_redirects', {
'source': '/testsource',
'target': '/testtarget'
})
website_settings.save()
frappe.cache().delete_key('app_hooks')
frappe.cache().delete_key('website_redirects')
set_request(method='GET', path='/testfrom')
response = render.render()
self.assertEqual(response.status_code, 301)
self.assertEqual(response.headers.get('Location'), r'://testto1')
set_request(method='GET', path='/testfromregex/test')
response = render.render()
self.assertEqual(response.status_code, 301)
self.assertEqual(response.headers.get('Location'), r'://testto2')
set_request(method='GET', path='/testsub/me')
response = render.render()
self.assertEqual(response.status_code, 301)
self.assertEqual(response.headers.get('Location'), r'://testto3/me')
set_request(method='GET', path='/test404')
response = render.render()
self.assertEqual(response.status_code, 404)
set_request(method='GET', path='/testsource')
response = render.render()
self.assertEqual(response.status_code, 301)
self.assertEqual(response.headers.get('Location'), '/testtarget')
delattr(frappe.hooks, 'website_redirects')
frappe.cache().delete_key('app_hooks')
| mit | -1,457,067,847,997,497,300 | 29.764706 | 89 | 0.69949 | false |
calliope-project/calliope | calliope/backend/pyomo/constraints/dispatch.py | 1 | 11639 | """
Copyright (C) since 2013 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
dispatch.py
~~~~~~~~~~~~~~~~~
Energy dispatch constraints, limiting production/consumption to the capacities
of the technologies
"""
import pyomo.core as po
from calliope.backend.pyomo.util import get_param, get_previous_timestep
def carrier_production_max_constraint_rule(
backend_model, carrier, node, tech, timestep
):
"""
Set maximum carrier production. All technologies.
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{carrier_{prod}}(loc::tech::carrier, timestep) \\leq energy_{cap}(loc::tech)
\\times timestep\\_resolution(timestep) \\times parasitic\\_eff(loc::tec)
"""
carrier_prod = backend_model.carrier_prod[carrier, node, tech, timestep]
timestep_resolution = backend_model.timestep_resolution[timestep]
parasitic_eff = get_param(backend_model, "parasitic_eff", (node, tech, timestep))
return carrier_prod <= (
backend_model.energy_cap[node, tech] * timestep_resolution * parasitic_eff
)
def carrier_production_min_constraint_rule(
backend_model, carrier, node, tech, timestep
):
"""
Set minimum carrier production. All technologies except ``conversion_plus``.
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{carrier_{prod}}(loc::tech::carrier, timestep) \\geq energy_{cap}(loc::tech)
\\times timestep\\_resolution(timestep) \\times energy_{cap,min\\_use}(loc::tec)
"""
carrier_prod = backend_model.carrier_prod[carrier, node, tech, timestep]
timestep_resolution = backend_model.timestep_resolution[timestep]
min_use = get_param(backend_model, "energy_cap_min_use", (node, tech, timestep))
return carrier_prod >= (
backend_model.energy_cap[node, tech] * timestep_resolution * min_use
)
def carrier_consumption_max_constraint_rule(
backend_model, carrier, node, tech, timestep
):
"""
Set maximum carrier consumption for demand, storage, and transmission techs.
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{carrier_{con}}(loc::tech::carrier, timestep) \\geq
-1 \\times energy_{cap}(loc::tech)
\\times timestep\\_resolution(timestep)
"""
carrier_con = backend_model.carrier_con[carrier, node, tech, timestep]
timestep_resolution = backend_model.timestep_resolution[timestep]
return carrier_con >= (
-1 * backend_model.energy_cap[node, tech] * timestep_resolution
)
def resource_max_constraint_rule(backend_model, node, tech, timestep):
"""
Set maximum resource consumed by supply_plus techs.
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{resource_{con}}(loc::tech, timestep) \\leq
timestep\\_resolution(timestep) \\times resource_{cap}(loc::tech)
"""
timestep_resolution = backend_model.timestep_resolution[timestep]
return backend_model.resource_con[node, tech, timestep] <= (
timestep_resolution * backend_model.resource_cap[node, tech]
)
def storage_max_constraint_rule(backend_model, node, tech, timestep):
"""
Set maximum stored energy. Supply_plus & storage techs only.
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{storage}(loc::tech, timestep) \\leq
storage_{cap}(loc::tech)
"""
return (
backend_model.storage[node, tech, timestep]
<= backend_model.storage_cap[node, tech]
)
def storage_discharge_depth_constraint_rule(backend_model, node, tech, timestep):
"""
Forces storage state of charge to be greater than the allowed depth of discharge.
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{storage}(loc::tech, timestep) >=
\\boldsymbol{storage_discharge_depth}\\forall loc::tech \\in loc::techs_{storage}, \\forall timestep \\in timesteps
"""
storage_discharge_depth = get_param(
backend_model, "storage_discharge_depth", (node, tech)
)
return (
backend_model.storage[node, tech, timestep]
>= storage_discharge_depth * backend_model.storage_cap[node, tech]
)
def ramping_up_constraint_rule(backend_model, carrier, node, tech, timestep):
"""
Ramping up constraint.
.. container:: scrolling-wrapper
.. math::
diff(loc::tech::carrier, timestep) \\leq max\\_ramping\\_rate(loc::tech::carrier, timestep)
"""
return ramping_constraint(backend_model, carrier, node, tech, timestep, direction=0)
def ramping_down_constraint_rule(backend_model, carrier, node, tech, timestep):
"""
Ramping down constraint.
.. container:: scrolling-wrapper
.. math::
-1 \\times max\\_ramping\\_rate(loc::tech::carrier, timestep) \\leq diff(loc::tech::carrier, timestep)
"""
return ramping_constraint(backend_model, carrier, node, tech, timestep, direction=1)
def ramping_constraint(backend_model, carrier, node, tech, timestep, direction=0):
"""
Ramping rate constraints.
Direction: 0 is up, 1 is down.
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{max\\_ramping\\_rate}(loc::tech::carrier, timestep) =
energy_{ramping}(loc::tech, timestep) \\times energy_{cap}(loc::tech)
\\boldsymbol{diff}(loc::tech::carrier, timestep) =
(carrier_{prod}(loc::tech::carrier, timestep) + carrier_{con}(loc::tech::carrier, timestep))
/ timestep\\_resolution(timestep) -
(carrier_{prod}(loc::tech::carrier, timestep-1) + carrier_{con}(loc::tech::carrier, timestep-1))
/ timestep\\_resolution(timestep-1)
"""
# No constraint for first timestep
# Pyomo returns the order 1-indexed, but we want 0-indexing
if backend_model.timesteps.ord(timestep) - 1 == 0:
return po.Constraint.NoConstraint
else:
previous_step = get_previous_timestep(backend_model.timesteps, timestep)
time_res = backend_model.timestep_resolution[timestep]
time_res_prev = backend_model.timestep_resolution[previous_step]
# Ramping rate (fraction of installed capacity per hour)
ramping_rate = get_param(
backend_model, "energy_ramping", (node, tech, timestep)
)
try:
prod_this = backend_model.carrier_prod[carrier, node, tech, timestep]
prod_prev = backend_model.carrier_prod[carrier, node, tech, previous_step]
except KeyError:
prod_this = 0
prod_prev = 0
try:
con_this = backend_model.carrier_con[carrier, node, tech, timestep]
con_prev = backend_model.carrier_con[carrier, node, tech, previous_step]
except KeyError:
con_this = 0
con_prev = 0
diff = (prod_this + con_this) / time_res - (
prod_prev + con_prev
) / time_res_prev
max_ramping_rate = ramping_rate * backend_model.energy_cap[node, tech]
if direction == 0:
return diff <= max_ramping_rate
else:
return -1 * max_ramping_rate <= diff
def storage_intra_max_constraint_rule(backend_model, node, tech, timestep):
"""
When clustering days, to reduce the timeseries length, set limits on
intra-cluster auxiliary maximum storage decision variable.
`Ref: DOI 10.1016/j.apenergy.2018.01.023 <https://doi.org/10.1016/j.apenergy.2018.01.023>`_
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{storage}(loc::tech, timestep) \\leq
\\boldsymbol{storage_{intra\\_cluster, max}}(loc::tech, cluster(timestep))
\\quad \\forall loc::tech \\in loc::techs_{store}, \\forall timestep \\in timesteps
Where :math:`cluster(timestep)` is the cluster number in which the timestep
is located.
"""
cluster = backend_model.timestep_cluster[timestep].value
return (
backend_model.storage[node, tech, timestep]
<= backend_model.storage_intra_cluster_max[cluster, node, tech]
)
def storage_intra_min_constraint_rule(backend_model, node, tech, timestep):
"""
When clustering days, to reduce the timeseries length, set limits on
intra-cluster auxiliary minimum storage decision variable.
`Ref: DOI 10.1016/j.apenergy.2018.01.023 <https://doi.org/10.1016/j.apenergy.2018.01.023>`_
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{storage}(loc::tech, timestep) \\geq
\\boldsymbol{storage_{intra\\_cluster, min}}(loc::tech, cluster(timestep))
\\quad \\forall loc::tech \\in loc::techs_{store}, \\forall timestep \\in timesteps
Where :math:`cluster(timestep)` is the cluster number in which the timestep
is located.
"""
cluster = backend_model.timestep_cluster[timestep].value
return (
backend_model.storage[node, tech, timestep]
>= backend_model.storage_intra_cluster_min[cluster, node, tech]
)
def storage_inter_max_constraint_rule(backend_model, node, tech, datestep):
"""
When clustering days, to reduce the timeseries length, set maximum limit on
the intra-cluster and inter-date stored energy.
intra-cluster = all timesteps in a single cluster
datesteps = all dates in the unclustered timeseries (each has a corresponding cluster)
`Ref: DOI 10.1016/j.apenergy.2018.01.023 <https://doi.org/10.1016/j.apenergy.2018.01.023>`_
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{storage_{inter\\_cluster}}(loc::tech, datestep) +
\\boldsymbol{storage_{intra\\_cluster, max}}(loc::tech, cluster(datestep))
\\leq \\boldsymbol{storage_{cap}}(loc::tech) \\quad \\forall
loc::tech \\in loc::techs_{store}, \\forall datestep \\in datesteps
Where :math:`cluster(datestep)` is the cluster number in which the datestep
is located.
"""
cluster = backend_model.lookup_datestep_cluster[datestep].value
return (
backend_model.storage_inter_cluster[node, tech, datestep]
+ backend_model.storage_intra_cluster_max[cluster, node, tech]
<= backend_model.storage_cap[node, tech]
)
def storage_inter_min_constraint_rule(backend_model, node, tech, datestep):
"""
When clustering days, to reduce the timeseries length, set minimum limit on
the intra-cluster and inter-date stored energy.
intra-cluster = all timesteps in a single cluster
datesteps = all dates in the unclustered timeseries (each has a corresponding cluster)
`Ref: DOI 10.1016/j.apenergy.2018.01.023 <https://doi.org/10.1016/j.apenergy.2018.01.023>`_
.. container:: scrolling-wrapper
.. math::
\\boldsymbol{storage_{inter\\_cluster}}(loc::tech, datestep)
\\times (1 - storage\\_loss(loc::tech, timestep))^{24} +
\\boldsymbol{storage_{intra\\_cluster, min}}(loc::tech, cluster(datestep))
\\geq 0 \\quad \\forall loc::tech \\in loc::techs_{store},
\\forall datestep \\in datesteps
Where :math:`cluster(datestep)` is the cluster number in which the datestep
is located.
"""
cluster = backend_model.lookup_datestep_cluster[datestep].value
storage_loss = get_param(backend_model, "storage_loss", (node, tech))
return (
backend_model.storage_inter_cluster[node, tech, datestep]
* ((1 - storage_loss) ** 24)
+ backend_model.storage_intra_cluster_min[cluster, node, tech]
>= 0
)
| apache-2.0 | 1,616,952,395,252,772,900 | 33.434911 | 127 | 0.644299 | false |
YosefLab/scVI | scvi/core/models/base.py | 1 | 9396 | import inspect
import logging
import os
import pickle
from abc import ABC, abstractmethod
from typing import Optional, Sequence
import numpy as np
import rich
import torch
from anndata import AnnData
from rich.text import Text
from scvi import _CONSTANTS, settings
from scvi.data import get_from_registry, transfer_anndata_setup
from scvi.data._utils import (
_check_anndata_setup_equivalence,
_check_nonnegative_integers,
)
from scvi.core.models._utils import (
_initialize_model,
_load_saved_files,
_validate_var_names,
)
logger = logging.getLogger(__name__)
class BaseModelClass(ABC):
def __init__(self, adata: Optional[AnnData] = None, use_cuda=False):
if adata is not None:
if "_scvi" not in adata.uns.keys():
raise ValueError(
"Please setup your AnnData with scvi.data.setup_anndata(adata) first"
)
self.adata = adata
self.scvi_setup_dict_ = adata.uns["_scvi"]
self.summary_stats = self.scvi_setup_dict_["summary_stats"]
self._validate_anndata(adata, copy_if_view=False)
self.is_trained_ = False
self.use_cuda = use_cuda and torch.cuda.is_available()
self._model_summary_string = ""
self.train_indices_ = None
self.test_indices_ = None
self.validation_indices_ = None
self.history_ = None
def _make_scvi_dl(
self,
adata: AnnData,
indices: Optional[Sequence[int]] = None,
batch_size: Optional[int] = None,
**data_loader_kwargs,
):
"""Create a ScviDataLoader object for data iteration."""
if batch_size is None:
batch_size = settings.batch_size
if indices is None:
indices = np.arange(adata.n_obs)
post = self._scvi_dl_class(
self.model,
adata,
shuffle=False,
indices=indices,
use_cuda=self.use_cuda,
batch_size=batch_size,
**data_loader_kwargs,
).sequential()
return post
def _validate_anndata(
self, adata: Optional[AnnData] = None, copy_if_view: bool = True
):
"""Validate anndata has been properly registered, transfer if necessary."""
if adata is None:
adata = self.adata
if adata.is_view:
if copy_if_view:
logger.info("Received view of anndata, making copy.")
adata = adata.copy()
else:
raise ValueError("Please run `adata = adata.copy()`")
if "_scvi" not in adata.uns_keys():
logger.info(
"Input adata not setup with scvi. "
+ "attempting to transfer anndata setup"
)
transfer_anndata_setup(self.scvi_setup_dict_, adata)
is_nonneg_int = _check_nonnegative_integers(
get_from_registry(adata, _CONSTANTS.X_KEY)
)
if not is_nonneg_int:
logger.warning(
"Make sure the registered X field in anndata contains unnormalized count data."
)
_check_anndata_setup_equivalence(self.scvi_setup_dict_, adata)
return adata
@property
@abstractmethod
def _scvi_dl_class(self):
pass
@property
@abstractmethod
def _trainer_class(self):
pass
@abstractmethod
def train(self):
pass
@property
def is_trained(self):
return self.is_trained_
@property
def test_indices(self):
return self.test_indices_
@property
def train_indices(self):
return self.train_indices_
@property
def validation_indices(self):
return self.validation_indices_
@property
def history(self):
"""Returns computed metrics during training."""
return self.history_
def _get_user_attributes(self):
# returns all the self attributes defined in a model class, eg, self.is_trained_
attributes = inspect.getmembers(self, lambda a: not (inspect.isroutine(a)))
attributes = [
a for a in attributes if not (a[0].startswith("__") and a[0].endswith("__"))
]
attributes = [a for a in attributes if not a[0].startswith("_abc_")]
return attributes
def _get_init_params(self, locals):
# returns the model init signiture with associated passed in values
# except the anndata objects passed in
init = self.__init__
sig = inspect.signature(init)
init_params = [p for p in sig.parameters]
user_params = {p: locals[p] for p in locals if p in init_params}
user_params = {
k: v for (k, v) in user_params.items() if not isinstance(v, AnnData)
}
return user_params
def save(
self,
dir_path: str,
overwrite: bool = False,
save_anndata: bool = False,
**anndata_write_kwargs,
):
"""
Save the state of the model.
Neither the trainer optimizer state nor the trainer history are saved.
Model files are not expected to be reproducibly saved and loaded across versions
until we reach version 1.0.
Parameters
----------
dir_path
Path to a directory.
overwrite
Overwrite existing data or not. If `False` and directory
already exists at `dir_path`, error will be raised.
save_anndata
If True, also saves the anndata
anndata_write_kwargs
Kwargs for anndata write function
"""
# get all the user attributes
user_attributes = self._get_user_attributes()
# only save the public attributes with _ at the very end
user_attributes = {a[0]: a[1] for a in user_attributes if a[0][-1] == "_"}
# save the model state dict and the trainer state dict only
if not os.path.exists(dir_path) or overwrite:
os.makedirs(dir_path, exist_ok=overwrite)
else:
raise ValueError(
"{} already exists. Please provide an unexisting directory for saving.".format(
dir_path
)
)
if save_anndata:
self.adata.write(
os.path.join(dir_path, "adata.h5ad"), **anndata_write_kwargs
)
model_save_path = os.path.join(dir_path, "model_params.pt")
attr_save_path = os.path.join(dir_path, "attr.pkl")
varnames_save_path = os.path.join(dir_path, "var_names.csv")
var_names = self.adata.var_names.astype(str)
var_names = var_names.to_numpy()
np.savetxt(varnames_save_path, var_names, fmt="%s")
torch.save(self.model.state_dict(), model_save_path)
with open(attr_save_path, "wb") as f:
pickle.dump(user_attributes, f)
@classmethod
def load(
cls,
dir_path: str,
adata: Optional[AnnData] = None,
use_cuda: bool = False,
):
"""
Instantiate a model from the saved output.
Parameters
----------
dir_path
Path to saved outputs.
adata
AnnData organized in the same way as data used to train model.
It is not necessary to run :func:`~scvi.data.setup_anndata`,
as AnnData is validated against the saved `scvi` setup dictionary.
If None, will check for and load anndata saved with the model.
use_cuda
Whether to load model on GPU.
Returns
-------
Model with loaded state dictionaries.
Examples
--------
>>> vae = SCVI.load(adata, save_path)
>>> vae.get_latent_representation()
"""
load_adata = adata is None
use_cuda = use_cuda and torch.cuda.is_available()
map_location = torch.device("cpu") if use_cuda is False else None
(
scvi_setup_dict,
attr_dict,
var_names,
model_state_dict,
new_adata,
) = _load_saved_files(dir_path, load_adata, map_location=map_location)
adata = new_adata if new_adata is not None else adata
_validate_var_names(adata, var_names)
transfer_anndata_setup(scvi_setup_dict, adata)
model = _initialize_model(cls, adata, attr_dict, use_cuda)
# set saved attrs for loaded model
for attr, val in attr_dict.items():
setattr(model, attr, val)
model.model.load_state_dict(model_state_dict)
if use_cuda:
model.model.cuda()
model.model.eval()
model._validate_anndata(adata)
return model
def __repr__(
self,
):
summary_string = self._model_summary_string
summary_string += "\nTraining status: {}".format(
"Trained" if self.is_trained_ else "Not Trained"
)
rich.print(summary_string)
command = "scvi.data.view_anndata_setup(model.adata)"
command_len = len(command)
print_adata_str = "\n\nTo print summary of associated AnnData, use: " + command
text = Text(print_adata_str)
text.stylize(
"dark_violet", len(print_adata_str) - command_len, len(print_adata_str)
)
console = rich.console.Console()
console.print(text)
return ""
| bsd-3-clause | 729,081,549,799,827,000 | 30.850847 | 95 | 0.578012 | false |
kylehogan/hil | examples/dbinit.py | 4 | 1405 | #!/usr/bin/python
"""
Register nodes with HaaS.
This is intended to be used as a template for either creating a mock HaaS setup
for development or to be modified to register real-life nodes that follow a
particular pattern.
In the example environment for which this module is written, there are 10
nodes which have IPMI interfaces that are sequentially numbered starting with
10.0.0.0, have a username of "ADMIN_USER" and password of "ADMIN_PASSWORD".
The ports are also numbered sequentially and are named following a dell switch
scheme, which have ports that look like "R10SW1::GI1/0/5"
It could be used in an environment similar to the one which
``haas.cfg`` corresponds, though could also be used for development with the
``haas.cfg.dev*``
"""
from subprocess import check_call
N_NODES = 6
ipmi_user = "ADMIN_USER"
ipmi_pass = "ADMIN_PASSWORD"
switch = "mock01"
def haas(*args):
args = map(str, args)
print args
check_call(['haas'] + args)
haas('switch_register', switch, 'mock', 'ip', 'user', 'pass')
for node in range(N_NODES):
ipmi_ip = "10.0.0." + str(node + 1)
nic_port = "R10SW1::GI1/0/%d" % (node)
nic_name = 'nic1'
haas('node_register', node, "mock", ipmi_ip, ipmi_user, ipmi_pass)
haas('node_register_nic', node, nic_name, 'FillThisInLater')
haas('port_register', switch, nic_port)
haas('port_connect_nic', switch, nic_port, node, nic_name)
| apache-2.0 | -2,548,514,659,423,620,600 | 30.931818 | 79 | 0.706762 | false |
terianil/minimongo | setup.py | 2 | 1695 | # -*- coding: utf-8 -*-
import os
import sys
import subprocess
try:
from setuptools import find_packages, setup, Command
except ImportError:
from distutils.core import find_packages, setup, Command
here = os.path.abspath(os.path.dirname(__file__))
DESCRIPTION = "Minimal database Model management for MongoDB"
try:
LONG_DESCRIPTION = open(os.path.join(here, "README.rst")).read()
except IOError:
print("Warning: IOError raised: cannot open README.rst.")
LONG_DESCRIPTION = DESCRIPTION
CLASSIFIERS = (
"Development Status :: 3 - Alpha",
"License :: OSI Approved :: BSD License"
"Intended Audience :: Developers",
"Topic :: Database",
"Programming Language :: Python",
"Programming Language :: Python :: 2",
)
class PyTest(Command):
"""Unfortunately :mod:`setuptools` support only :mod:`unittest`
based tests, thus, we have to overider build-in ``test`` command
to run :mod:`pytest`."""
user_options = []
initialize_options = finalize_options = lambda self: None
def run(self):
errno = subprocess.call([sys.executable, "runtests.py"])
raise SystemExit(errno)
requires = ["pymongo"]
setup(name="minimongo",
version="0.2.9",
packages=find_packages(),
cmdclass={"test": PyTest},
platforms=["any"],
install_requires = ["pymongo<=2.8", "six"],
zip_safe=False,
include_package_data=True,
author="Steve Lacy",
author_email="[email protected]",
description=DESCRIPTION,
long_description=LONG_DESCRIPTION,
classifiers=CLASSIFIERS,
keywords=["mongo", "mongodb", "pymongo", "orm"],
url="https://github.com/slacy/minimongo",
)
| bsd-2-clause | 8,395,541,138,083,490,000 | 25.076923 | 68 | 0.653687 | false |
Nikea/VisTrails | vistrails/gui/modules/list_configuration.py | 2 | 5171 | from PyQt4 import QtCore, QtGui
from vistrails.core.system import get_vistrails_basic_pkg_id
from vistrails.gui.modules.module_configure import StandardModuleConfigurationWidget
class ListConfigurationWidget(StandardModuleConfigurationWidget):
"""
Configuration widget allowing to choose the number of ports.
This is used to build a List from several modules while ensuring a given
order. If no particular ordering is needed, connecting multiple ports to
the 'head' input ports should be sufficient.
"""
def __init__(self, module, controller, parent=None):
""" ListConfigurationWidget(module: Module,
controller: VistrailController,
parent: QWidget)
-> TupleConfigurationWidget
Let StandardModuleConfigurationWidget constructor store the
controller/module object from the builder and set up the
configuration widget.
After StandardModuleConfigurationWidget constructor, all of
these will be available:
self.module : the Module object int the pipeline
self.controller: the current vistrail controller
"""
StandardModuleConfigurationWidget.__init__(self, module,
controller, parent)
# Give it a nice window title
self.setWindowTitle("List Configuration")
# Add an empty vertical layout
centralLayout = QtGui.QVBoxLayout()
centralLayout.setMargin(0)
centralLayout.setSpacing(0)
self.setLayout(centralLayout)
# Add the configuration widget
config_layout = QtGui.QFormLayout()
self.number = QtGui.QSpinBox()
self.number.setValue(self.countAdditionalPorts())
self.connect(self.number, QtCore.SIGNAL('valueChanged(int)'),
lambda r: self.updateState())
config_layout.addRow("Number of additional connections:", self.number)
centralLayout.addLayout(config_layout)
self.createButtons()
def activate(self):
self.number.focusWidget(QtCore.Qt.ActiveWindowFocusReason)
def createButtons(self):
""" createButtons() -> None
Create and connect signals to Ok & Cancel button
"""
self.buttonLayout = QtGui.QHBoxLayout()
self.buttonLayout.setMargin(5)
self.saveButton = QtGui.QPushButton("&Save", self)
self.saveButton.setFixedWidth(100)
self.saveButton.setEnabled(False)
self.buttonLayout.addWidget(self.saveButton)
self.resetButton = QtGui.QPushButton("&Reset", self)
self.resetButton.setFixedWidth(100)
self.resetButton.setEnabled(False)
self.buttonLayout.addWidget(self.resetButton)
self.layout().addLayout(self.buttonLayout)
self.connect(self.saveButton, QtCore.SIGNAL('clicked(bool)'),
self.saveTriggered)
self.connect(self.resetButton, QtCore.SIGNAL('clicked(bool)'),
self.resetTriggered)
def saveTriggered(self, checked = False):
""" saveTriggered(checked: bool) -> None
Update vistrail controller and module when the user click Ok
"""
if self.updateVistrail():
self.saveButton.setEnabled(False)
self.resetButton.setEnabled(False)
self.state_changed = False
self.emit(QtCore.SIGNAL('stateChanged'))
self.emit(QtCore.SIGNAL('doneConfigure'), self.module.id)
def closeEvent(self, event):
self.askToSaveChanges()
event.accept()
def updateVistrail(self):
""" updateVistrail() -> None
Update Vistrail to contain changes in the port table
"""
requested = self.number.value()
current = self.countAdditionalPorts()
if requested == current:
# Nothing changed
return
if requested > current:
sigstring = '(%s:Module)' % get_vistrails_basic_pkg_id()
add_ports = [('input', 'item%d' % i, sigstring, -1)
for i in xrange(current, requested)]
self.controller.update_ports(self.module.id, [], add_ports)
elif requested < current:
delete_ports = [('input', p.name) for p in self.module.input_port_specs[requested-current:]]
self.controller.update_ports(self.module.id, delete_ports, [])
return True
def countAdditionalPorts(self):
return len(self.module.input_port_specs)
def resetTriggered(self, checked = False):
self.number.setValue(self.countAdditionalPorts())
self.saveButton.setEnabled(False)
self.resetButton.setEnabled(False)
self.state_changed = False
self.emit(QtCore.SIGNAL('stateChanged'))
def updateState(self):
if not self.hasFocus():
self.setFocus(QtCore.Qt.TabFocusReason)
self.saveButton.setEnabled(True)
self.resetButton.setEnabled(True)
if not self.state_changed:
self.state_changed = True
self.emit(QtCore.SIGNAL('stateChanged'))
| bsd-3-clause | 618,113,028,576,103,700 | 38.174242 | 104 | 0.635467 | false |
afronski/playground-other | python/samples/rabbitmq-python/06.RPC/rpc_client.py | 2 | 1182 | import pika
import uuid
class FibonacciRPCClient(object):
def __init__(self):
self.connection = pika.BlockingConnection(pika.ConnectionParameters(host = "localhost"))
self.channel = self.connection.channel()
result = self.channel.queue_declare(exclusive = True)
self.callback_queue = result.method.queue
self.channel.basic_consume(self.onResponse, no_ack = True, queue = self.callback_queue)
def onResponse(self, channel, method, props, body):
if self.corr_id == props.correlation_id:
self.response = body
def call(self, n):
self.response = None
self.corr_id = str(uuid.uuid4())
properties = pika.BasicProperties(reply_to = self.callback_queue, correlation_id = self.corr_id)
body = str(n)
self.channel.basic_publish(exchange = "", routing_key = "rpc_queue", properties = properties, body = body)
while self.response is None:
self.connection.process_data_events()
return int(self.response)
fibonacciRPC = FibonacciRPCClient()
print " [x] Requesting fibonacci(30)."
response = fibonacciRPC.call(30)
print " [.] Got %r." % response | mit | 1,069,546,968,288,265,300 | 30.131579 | 114 | 0.659898 | false |
bratsche/Neutron-Drive | google_appengine/lib/webob_1_1_1/tests/test_misc.py | 5 | 3987 | import cgi, sys
from cStringIO import StringIO
from webob import html_escape, Response
from webob.multidict import *
from nose.tools import eq_ as eq, assert_raises
def test_html_escape():
for v, s in [
# unsafe chars
('these chars: < > & "', 'these chars: < > & "'),
(' ', ' '),
('è', '&egrave;'),
# The apostrophe is *not* escaped, which some might consider to be
# a serious bug (see, e.g. http://www.cvedetails.com/cve/CVE-2010-2480/)
(u'the majestic m\xf8ose', 'the majestic møose'),
#("'", "'")
# 8-bit strings are passed through
(u'\xe9', 'é'),
(u'the majestic m\xf8ose'.encode('utf-8'), 'the majestic m\xc3\xb8ose'),
# ``None`` is treated specially, and returns the empty string.
(None, ''),
# Objects that define a ``__html__`` method handle their own escaping
(t_esc_HTML(), '<div>hello</div>'),
# Things that are not strings are converted to strings and then escaped
(42, '42'),
(Exception("expected a '<'."), "expected a '<'."),
# If an object implements both ``__str__`` and ``__unicode__``, the latter
# is preferred
(t_esc_SuperMoose(), 'møose'),
(t_esc_Unicode(), 'é'),
(t_esc_UnsafeAttrs(), '<UnsafeAttrs>'),
]:
eq(html_escape(v), s)
class t_esc_HTML(object):
def __html__(self):
return '<div>hello</div>'
class t_esc_Unicode(object):
def __unicode__(self):
return u'\xe9'
class t_esc_UnsafeAttrs(object):
attr = 'value'
def __getattr__(self):
return self.attr
def __repr__(self):
return '<UnsafeAttrs>'
class t_esc_SuperMoose(object):
def __str__(self):
return u'm\xf8ose'.encode('UTF-8')
def __unicode__(self):
return u'm\xf8ose'
def test_multidict():
d = MultiDict(a=1, b=2)
eq(d['a'], 1)
eq(d.getall('c'), [])
d.add('a', 2)
eq(d['a'], 2)
eq(d.getall('a'), [1, 2])
d['b'] = 4
eq(d.getall('b'), [4])
eq(d.keys(), ['a', 'a', 'b'])
eq(d.items(), [('a', 1), ('a', 2), ('b', 4)])
eq(d.mixed(), {'a': [1, 2], 'b': 4})
# test getone
# KeyError: "Multiple values match 'a': [1, 2]"
assert_raises(KeyError, d.getone, 'a')
eq(d.getone('b'), 4)
# KeyError: "Key not found: 'g'"
assert_raises(KeyError, d.getone, 'g')
eq(d.dict_of_lists(), {'a': [1, 2], 'b': [4]})
assert 'b' in d
assert 'e' not in d
d.clear()
assert 'b' not in d
d['a'] = 4
d.add('a', 5)
e = d.copy()
assert 'a' in e
e.clear()
e['f'] = 42
d.update(e)
eq(d, MultiDict([('a', 4), ('a', 5), ('f', 42)]))
f = d.pop('a')
eq(f, 4)
eq(d['a'], 5)
eq(d.pop('g', 42), 42)
assert_raises(KeyError, d.pop, 'n')
# TypeError: pop expected at most 2 arguments, got 3
assert_raises(TypeError, d.pop, 4, 2, 3)
d.setdefault('g', []).append(4)
eq(d, MultiDict([('a', 5), ('f', 42), ('g', [4])]))
def test_multidict_init():
d = MultiDict([('a', 'b')], c=2)
eq(repr(d), "MultiDict([('a', 'b'), ('c', 2)])")
eq(d, MultiDict([('a', 'b')], c=2))
# TypeError: MultiDict can only be called with one positional argument
assert_raises(TypeError, MultiDict, 1, 2, 3)
# TypeError: MultiDict.view_list(obj) takes only actual list objects, not None
assert_raises(TypeError, MultiDict.view_list, None)
def test_multidict_cgi():
env = {'QUERY_STRING': ''}
fs = cgi.FieldStorage(environ=env)
fs.filename = '\xc3\xb8'
plain = MultiDict(key='\xc3\xb8', fs=fs)
ua = UnicodeMultiDict(multi=plain, encoding='utf-8')
eq(ua.getall('key'), [u'\xf8'])
eq(repr(ua.getall('fs')), "[FieldStorage(None, u'\\xf8', [])]")
ub = UnicodeMultiDict(multi=ua, encoding='utf-8')
eq(ub.getall('key'), [u'\xf8'])
eq(repr(ub.getall('fs')), "[FieldStorage(None, u'\\xf8', [])]")
| bsd-3-clause | 1,691,257,841,240,426,800 | 26.881119 | 82 | 0.534738 | false |
ageron/tensorflow | tensorflow/examples/speech_commands/models_test.py | 19 | 4908 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for speech commands models."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow as tf
from tensorflow.examples.speech_commands import models
from tensorflow.python.framework import test_util
from tensorflow.python.platform import test
class ModelsTest(test.TestCase):
def _modelSettings(self):
return models.prepare_model_settings(
label_count=10,
sample_rate=16000,
clip_duration_ms=1000,
window_size_ms=20,
window_stride_ms=10,
feature_bin_count=40,
preprocess="mfcc")
def testPrepareModelSettings(self):
self.assertIsNotNone(
models.prepare_model_settings(
label_count=10,
sample_rate=16000,
clip_duration_ms=1000,
window_size_ms=20,
window_stride_ms=10,
feature_bin_count=40,
preprocess="mfcc"))
@test_util.run_deprecated_v1
def testCreateModelConvTraining(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_prob = models.create_model(fingerprint_input,
model_settings, "conv", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_prob)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
@test_util.run_deprecated_v1
def testCreateModelConvInference(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits = models.create_model(fingerprint_input, model_settings, "conv",
False)
self.assertIsNotNone(logits)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
@test_util.run_deprecated_v1
def testCreateModelLowLatencyConvTraining(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_prob = models.create_model(
fingerprint_input, model_settings, "low_latency_conv", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_prob)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
@test_util.run_deprecated_v1
def testCreateModelFullyConnectedTraining(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_prob = models.create_model(
fingerprint_input, model_settings, "single_fc", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_prob)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
def testCreateModelBadArchitecture(self):
model_settings = self._modelSettings()
with self.cached_session():
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
with self.assertRaises(Exception) as e:
models.create_model(fingerprint_input, model_settings,
"bad_architecture", True)
self.assertTrue("not recognized" in str(e.exception))
@test_util.run_deprecated_v1
def testCreateModelTinyConvTraining(self):
model_settings = self._modelSettings()
with self.cached_session() as sess:
fingerprint_input = tf.zeros([1, model_settings["fingerprint_size"]])
logits, dropout_prob = models.create_model(
fingerprint_input, model_settings, "tiny_conv", True)
self.assertIsNotNone(logits)
self.assertIsNotNone(dropout_prob)
self.assertIsNotNone(sess.graph.get_tensor_by_name(logits.name))
self.assertIsNotNone(sess.graph.get_tensor_by_name(dropout_prob.name))
if __name__ == "__main__":
test.main()
| apache-2.0 | -5,846,188,645,384,907,000 | 39.9 | 80 | 0.682559 | false |
ellipsis14/dolfin | test/unit/python/book/test_chapter_1.py | 3 | 5087 | #!/usr/bin/env py.test
"""
Unit tests for Chapter 1 (A FEniCS tutorial).
"""
# Copyright (C) 2011-2014 Hans Petter Langtangen and Anders Logg
#
# This file is part of DOLFIN.
#
# DOLFIN is free software: you can redistribute it and/or modify
# it under the terms of the GNU Lesser General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# DOLFIN is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU Lesser General Public License for more details.
#
# You should have received a copy of the GNU Lesser General Public License
# along with DOLFIN. If not, see <http://www.gnu.org/licenses/>.
from __future__ import print_function
import pytest
import inspect, os, sys
from dolfin import *
from dolfin_utils.test import skip_in_parallel, cd_tempdir, gc_barrier
from runpy import run_path as runpy_run_path
def run_path(path, args):
"Replacement for runpy.run_path when it doesn't exist"
sys.argv = ["foo"] + [str(arg) for arg in args]
try:
runpy_run_path(path)
except SystemExit as e:
if e.args[0] == 0:
pass
else:
raise e
def run_test(path, args=[]):
"Run test script implied by name of calling function, neat trick..."
gc_barrier()
# Figure out name of script to be run
script_name = inspect.stack()[1][3].split("test_")[1] + ".py"
file_path = os.path.join(*([os.path.dirname(__file__)] + ["chapter_1_files"] + \
path + [script_name]))
# Print a message
print()
print("Running tutorial example %s" % file_path)
print("-------------------------------------------------------------------------")
# Remember default DOLFIN parameters
dolfin_parameters = parameters.copy()
# Run script with default parameters
run_path(file_path, args)
# Try reading parameters, might not always work if running without PETSc
# and the parameter file specifies PETSc to be used
try:
file = File(os.path.join(os.path.dirname(__file__), "dolfin_parameters.xml"))
file >> parameters
print()
print("Running again using stored parameter values")
print()
new_parameters = True
except:
print()
print("Unable to read old parameters, skipping this test")
print()
new_parameters = False
# Run script again with book parameters
if new_parameters:
run_path(file_path, args)
# Reset parameters
parameters.assign(dolfin_parameters)
gc_barrier()
@skip_in_parallel
def test_dn3_p2D():
run_test(["stationary", "poisson"])
@skip_in_parallel
def test_dn3_p2D():
run_test(["stationary", "poisson"])
@skip_in_parallel
def test_dnr_p2D():
run_test(["stationary", "poisson"])
@skip_in_parallel
def test_d5_p2D(cd_tempdir):
run_test(["stationary", "poisson"])
@skip_in_parallel
def test_d1_p2D(cd_tempdir):
run_test(["stationary", "poisson"])
@skip_in_parallel
def test_paD():
run_test(["stationary", "poisson"], [8, 8])
@skip_in_parallel
def test_d3_p2D(cd_tempdir):
run_test(["stationary", "poisson"], [1])
@skip_in_parallel
def test_d6_p2D():
run_test(["stationary", "poisson"], [1])
@skip_in_parallel
def test_dn2_p2D():
run_test(["stationary", "poisson"])
@skip_in_parallel
def test_d2_p2D(cd_tempdir):
run_test(["stationary", "poisson"])
@skip_in_parallel
def test_mat2x_p2D():
run_test(["stationary", "poisson"])
@skip_in_parallel
def test_dn1_p2D():
run_test(["stationary", "poisson"])
@skip_in_parallel
def test_dn4_p2D():
run_test(["stationary", "poisson"])
@skip_in_parallel
def disabled_test_vcp2D():
# Disabled since it depends on scitools
run_test(["stationary", "poisson"])
@skip_in_parallel
def test_d4_p2D():
run_test(["stationary", "poisson"])
@skip_in_parallel
def test_mat2_p2D():
run_test(["stationary", "poisson"])
@skip_in_parallel
def test_membrane1v():
run_test(["stationary", "poisson"])
@skip_in_parallel
def test_membrane1():
run_test(["stationary", "poisson"])
@skip_in_parallel
def test_pde_newton_np():
run_test(["stationary", "nonlinear_poisson"], [1, 8, 8])
@skip_in_parallel
def test_picard_np():
run_test(["stationary", "nonlinear_poisson"], [1, 8, 8])
@skip_in_parallel
def test_vp1_np():
run_test(["stationary", "nonlinear_poisson"], ["a", "g", 1, 8, 8])
@skip_in_parallel
def test_vp2_np():
run_test(["stationary", "nonlinear_poisson"], ["a", "g", 1, 8, 8])
@skip_in_parallel
def test_alg_newton_np():
run_test(["stationary", "nonlinear_poisson"], [1, 8, 8])
@skip_in_parallel
def test_d1_d2D():
run_test(["transient", "diffusion"])
@skip_in_parallel
def test_d2_d2D():
run_test(["transient", "diffusion"])
@skip_in_parallel
def disabled_test_sin_daD():
# Disabled since it depends on scitools
run_test(["transient", "diffusion"], [1, 1.5, 4, 40])
| gpl-3.0 | -5,259,891,238,202,269,000 | 25.357513 | 86 | 0.643601 | false |
RoboCupULaval/RULEngine | Util/Vector.py | 3 | 8830 | # Under MIT License, see LICENSE.txt
import math as m
from .Position import Position
from .Pose import Pose
class Vector(object):
def __init__(self, length=1.0, direction=0.0):
"""
Args:
length (float): norme du vecteur
direction (float): orientation en radians
"""
assert isinstance(length, (int, float)), 'length should be int or float value.'
assert isinstance(direction, (int, float)), 'direction should be int or float value.'
x = length * m.cos(direction)
y = length * m.sin(direction)
self._attributes = [length, direction, x, y]
# *** GETTER / SETTER ***
def _getlength(self):
return self._attributes[0]
def _setlength(self, length):
assert (isinstance(length, (int, float)))
self._attributes[0] = length
self._attributes[2] = length * m.cos(self._attributes[1])
self._attributes[3] = length * m.sin(self._attributes[1])
""" Make self.length with setter and getter attributes """
length = property(_getlength, _setlength)
def _getdirection(self):
return self._attributes[1]
def _setdirection(self, direction: float):
assert isinstance(direction, (int, float))
self._attributes[1] = direction
self._attributes[2] = self._attributes[0] * m.cos(direction)
self._attributes[3] = self._attributes[0] * m.sin(direction)
"""Make self.direction with setter and getter attributes """
direction = property(_getdirection, _setdirection)
def _getx(self):
return self._attributes[2]
def _setx(self, x: float):
assert isinstance(x, (int, float)), 'value should be Position or int or float.'
self._attributes[2] = x
self._attributes[0] = m.sqrt(x ** 2 + self._attributes[3] ** 2)
self._attributes[1] = m.atan2(self._attributes[3], x)
""" Make self.x with setter and getter attributes """
x = property(_getx, _setx)
def _gety(self):
return self._attributes[3]
def _sety(self, y):
assert (isinstance(y, (int, float)))
self._attributes[3] = y
self._attributes[0] = m.sqrt(y ** 2 + self._attributes[2] ** 2)
self._attributes[1] = m.atan2(y, self._attributes[2])
""" Make self.y with setter and getter attributes """
y = property(_gety, _sety)
# *** OPERATORS ***
def __eq__(self, other) -> bool:
"""
Deux vecteur sont égaux s'ils ont la même norme et la même
orientation
Args:
other (Vector): membre de droite
Returns:
True si les deux vecteur sont égaux
"""
assert isinstance(other, Vector)
#: FIXME: on compare des floats, utilisé la lib std
return round(self.length, 10) == round(other.length, 10) and \
round(self.direction, 10) == round(other.direction,
10)
def __ne__(self, other):
"""
The != operator
:param other: The comparison vector
:return: A boolean stating whether the two Vectors are not equal
"""
assert (isinstance(other, Vector))
return not self.__eq__(other)
def __add__(self, other):
"""
The + operator
:param other: A Position, a Pose or a Vector
:return: An object of the same type as the input parameter other
Note : if other is of type Pose, returns a new Pose whose orientation is the same as the current vector
"""
assert (isinstance(other, (Position, Pose, Vector)))
if isinstance(other, Position):
return Position(other.x + self.x, other.y + self.y)
elif isinstance(other, Pose):
p = Position(other.position.x + self.x, other.position.y + self.y)
return Pose(p, self.direction)
elif isinstance(other, Vector):
x = self.x + other.x
y = self.y + other.y
return Vector(m.sqrt(x ** 2 + y ** 2), m.atan2(y, x))
def __radd__(self, other):
"""
Allows commutativity for Position + Vector and Pose + Vector
:param other: A Position or a Pose
:return: An object of the same type as the input parameter other
Note : if other is of type Pose, returns a new Pose whose orientation is the same as the current vector
"""
assert (isinstance(other, (Position, Pose)))
if isinstance(other, Position):
return Position(other.x + self.x, other.y + self.y)
elif isinstance(other, Pose):
p = Position(other.position.x + self.x, other.position.y + self.y)
return Pose(p, self.direction)
def __iadd__(self, other):
"""
The += operator
:param other: A Vector to add to the current Vector
:return: The current Vector is modified
"""
assert (isinstance(other, Vector))
x = self.x + other.x
y = self.y + other.y
self.length = m.sqrt(x ** 2 + y ** 2)
self.direction = m.atan2(y, x)
return self
def __sub__(self, other):
"""
The - operator
:param other: A Vector
:return: The new Vector resulting from the substraction
"""
assert (isinstance(other, Vector))
x = self.x - other.x
y = self.y - other.y
return Vector(m.sqrt(x ** 2 + y ** 2), m.atan2(y, x))
def __isub__(self, other):
"""
The -= operator
:param other: A Vector to substract from the current Vector
:return: The current Vector is modified
"""
assert (isinstance(other, Vector))
x = self.x - other.x
y = self.y - other.y
self.length = m.sqrt(x ** 2 + y ** 2)
self.direction = m.atan2(y, x)
return self
def __neg__(self):
"""
The unary arithmetic operation -
:return: the opposite vector
"""
return self.__mul__(-1)
def __mul__(self, scalar):
"""
Scalar Multiplication
:param scalar: a real number
:return: a new vector resulting of the scalar multiplication
"""
assert (isinstance(scalar, (int, float)))
if scalar >= 0:
return Vector(length=scalar * self.length, direction=self.direction)
else:
return Vector(length=-1 * scalar * self.length, direction=-1 * self.direction)
def __rmul__(self, scalar):
"""
Allows commutativity for int*Vector
:param scalar: a real number
:return: a new vector resulting of the scalar multiplication
"""
assert (isinstance(scalar, (int, float)))
if scalar >= 0:
return Vector(length=scalar * self.length, direction=self.direction)
else:
return Vector(length=-1 * scalar * self.length, direction=-1 * self.direction)
def __imul__(self, scalar):
"""
Incremental scalar multiplication
:param scalar: a real number
:return: the current resized vector
"""
assert(isinstance(scalar, (int, float)))
if scalar >= 0:
self.length *= scalar
else:
self.length *= -1 * scalar
self.direction *= -1
return self
def __str__(self):
return "(Length = {}, Direction = {})".format(int(self.length), int(self.direction))
def __repr__(self):
return "Vector(" + str(self.length) + ", " + str(self.direction) + ")"
# *** GENERAL METHODS ***
def dot(self, vector):
"""
The dot product
:param vector: The second Vector of the dot product
:return: The result of the dot product in a float
"""
return self.length * vector.length * m.cos(self.direction - vector.direction)
def unit(self):
"""
:return: A unit Vector whose direction is the same as the current Vector
"""
return Vector(length=1, direction=self.direction)
def normal(self, plus90=True):
"""
:param plus90: A boolean stating if the direction of the normal Vector is equal to the direction of
the current Vector plus pi/2 (True) or minus pi/2 (False)
:return: A unit Vector perpendicular to the current Vector
"""
if plus90:
return Vector(length=1, direction=self.direction + m.pi / 2)
else:
return Vector(length=1, direction=self.direction - m.pi / 2)
def getangle(self, vector):
"""
:param vector: The Vector
:return: The smallest angle between the two Vectors, in radians
"""
return m.fabs(self.direction - vector.direction)
| mit | -61,378,902,882,137,520 | 34.584677 | 111 | 0.565666 | false |
lvdongr/spark | python/pyspark/serializers.py | 3 | 23903 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
PySpark supports custom serializers for transferring data; this can improve
performance.
By default, PySpark uses L{PickleSerializer} to serialize objects using Python's
C{cPickle} serializer, which can serialize nearly any Python object.
Other serializers, like L{MarshalSerializer}, support fewer datatypes but can be
faster.
The serializer is chosen when creating L{SparkContext}:
>>> from pyspark.context import SparkContext
>>> from pyspark.serializers import MarshalSerializer
>>> sc = SparkContext('local', 'test', serializer=MarshalSerializer())
>>> sc.parallelize(list(range(1000))).map(lambda x: 2 * x).take(10)
[0, 2, 4, 6, 8, 10, 12, 14, 16, 18]
>>> sc.stop()
PySpark serializes objects in batches; by default, the batch size is chosen based
on the size of objects and is also configurable by SparkContext's C{batchSize}
parameter:
>>> sc = SparkContext('local', 'test', batchSize=2)
>>> rdd = sc.parallelize(range(16), 4).map(lambda x: x)
Behind the scenes, this creates a JavaRDD with four partitions, each of
which contains two batches of two objects:
>>> rdd.glom().collect()
[[0, 1, 2, 3], [4, 5, 6, 7], [8, 9, 10, 11], [12, 13, 14, 15]]
>>> int(rdd._jrdd.count())
8
>>> sc.stop()
"""
import sys
from itertools import chain, product
import marshal
import struct
import types
import collections
import zlib
import itertools
if sys.version < '3':
import cPickle as pickle
protocol = 2
from itertools import izip as zip, imap as map
else:
import pickle
protocol = 3
xrange = range
from pyspark import cloudpickle
from pyspark.util import _exception_message
__all__ = ["PickleSerializer", "MarshalSerializer", "UTF8Deserializer"]
class SpecialLengths(object):
END_OF_DATA_SECTION = -1
PYTHON_EXCEPTION_THROWN = -2
TIMING_DATA = -3
END_OF_STREAM = -4
NULL = -5
START_ARROW_STREAM = -6
class Serializer(object):
def dump_stream(self, iterator, stream):
"""
Serialize an iterator of objects to the output stream.
"""
raise NotImplementedError
def load_stream(self, stream):
"""
Return an iterator of deserialized objects from the input stream.
"""
raise NotImplementedError
def _load_stream_without_unbatching(self, stream):
"""
Return an iterator of deserialized batches (iterable) of objects from the input stream.
If the serializer does not operate on batches the default implementation returns an
iterator of single element lists.
"""
return map(lambda x: [x], self.load_stream(stream))
# Note: our notion of "equality" is that output generated by
# equal serializers can be deserialized using the same serializer.
# This default implementation handles the simple cases;
# subclasses should override __eq__ as appropriate.
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not self.__eq__(other)
def __repr__(self):
return "%s()" % self.__class__.__name__
def __hash__(self):
return hash(str(self))
class FramedSerializer(Serializer):
"""
Serializer that writes objects as a stream of (length, data) pairs,
where C{length} is a 32-bit integer and data is C{length} bytes.
"""
def __init__(self):
# On Python 2.6, we can't write bytearrays to streams, so we need to convert them
# to strings first. Check if the version number is that old.
self._only_write_strings = sys.version_info[0:2] <= (2, 6)
def dump_stream(self, iterator, stream):
for obj in iterator:
self._write_with_length(obj, stream)
def load_stream(self, stream):
while True:
try:
yield self._read_with_length(stream)
except EOFError:
return
def _write_with_length(self, obj, stream):
serialized = self.dumps(obj)
if serialized is None:
raise ValueError("serialized value should not be None")
if len(serialized) > (1 << 31):
raise ValueError("can not serialize object larger than 2G")
write_int(len(serialized), stream)
if self._only_write_strings:
stream.write(str(serialized))
else:
stream.write(serialized)
def _read_with_length(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
obj = stream.read(length)
if len(obj) < length:
raise EOFError
return self.loads(obj)
def dumps(self, obj):
"""
Serialize an object into a byte array.
When batching is used, this will be called with an array of objects.
"""
raise NotImplementedError
def loads(self, obj):
"""
Deserialize an object from a byte array.
"""
raise NotImplementedError
class ArrowStreamSerializer(Serializer):
"""
Serializes Arrow record batches as a stream.
"""
def dump_stream(self, iterator, stream):
import pyarrow as pa
writer = None
try:
for batch in iterator:
if writer is None:
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
import pyarrow as pa
reader = pa.open_stream(stream)
for batch in reader:
yield batch
def __repr__(self):
return "ArrowStreamSerializer"
def _create_batch(series, timezone):
"""
Create an Arrow record batch from the given pandas.Series or list of Series, with optional type.
:param series: A single pandas.Series, list of Series, or list of (series, arrow_type)
:param timezone: A timezone to respect when handling timestamp values
:return: Arrow RecordBatch
"""
import decimal
from distutils.version import LooseVersion
import pyarrow as pa
from pyspark.sql.types import _check_series_convert_timestamps_internal
# Make input conform to [(series1, type1), (series2, type2), ...]
if not isinstance(series, (list, tuple)) or \
(len(series) == 2 and isinstance(series[1], pa.DataType)):
series = [series]
series = ((s, None) if not isinstance(s, (list, tuple)) else s for s in series)
def create_array(s, t):
mask = s.isnull()
# Ensure timestamp series are in expected form for Spark internal representation
# TODO: maybe don't need None check anymore as of Arrow 0.9.1
if t is not None and pa.types.is_timestamp(t):
s = _check_series_convert_timestamps_internal(s.fillna(0), timezone)
# TODO: need cast after Arrow conversion, ns values cause error with pandas 0.19.2
return pa.Array.from_pandas(s, mask=mask).cast(t, safe=False)
elif t is not None and pa.types.is_string(t) and sys.version < '3':
# TODO: need decode before converting to Arrow in Python 2
# TODO: don't need as of Arrow 0.9.1
return pa.Array.from_pandas(s.apply(
lambda v: v.decode("utf-8") if isinstance(v, str) else v), mask=mask, type=t)
elif t is not None and pa.types.is_decimal(t) and \
LooseVersion("0.9.0") <= LooseVersion(pa.__version__) < LooseVersion("0.10.0"):
# TODO: see ARROW-2432. Remove when the minimum PyArrow version becomes 0.10.0.
return pa.Array.from_pandas(s.apply(
lambda v: decimal.Decimal('NaN') if v is None else v), mask=mask, type=t)
return pa.Array.from_pandas(s, mask=mask, type=t)
arrs = [create_array(s, t) for s, t in series]
return pa.RecordBatch.from_arrays(arrs, ["_%d" % i for i in xrange(len(arrs))])
class ArrowStreamPandasSerializer(Serializer):
"""
Serializes Pandas.Series as Arrow data with Arrow streaming format.
"""
def __init__(self, timezone):
super(ArrowStreamPandasSerializer, self).__init__()
self._timezone = timezone
def arrow_to_pandas(self, arrow_column):
from pyspark.sql.types import from_arrow_type, \
_check_series_convert_date, _check_series_localize_timestamps
s = arrow_column.to_pandas()
s = _check_series_convert_date(s, from_arrow_type(arrow_column.type))
s = _check_series_localize_timestamps(s, self._timezone)
return s
def dump_stream(self, iterator, stream):
"""
Make ArrowRecordBatches from Pandas Series and serialize. Input is a single series or
a list of series accompanied by an optional pyarrow type to coerce the data to.
"""
import pyarrow as pa
writer = None
try:
for series in iterator:
batch = _create_batch(series, self._timezone)
if writer is None:
write_int(SpecialLengths.START_ARROW_STREAM, stream)
writer = pa.RecordBatchStreamWriter(stream, batch.schema)
writer.write_batch(batch)
finally:
if writer is not None:
writer.close()
def load_stream(self, stream):
"""
Deserialize ArrowRecordBatches to an Arrow table and return as a list of pandas.Series.
"""
import pyarrow as pa
reader = pa.open_stream(stream)
for batch in reader:
yield [self.arrow_to_pandas(c) for c in pa.Table.from_batches([batch]).itercolumns()]
def __repr__(self):
return "ArrowStreamPandasSerializer"
class BatchedSerializer(Serializer):
"""
Serializes a stream of objects in batches by calling its wrapped
Serializer with streams of objects.
"""
UNLIMITED_BATCH_SIZE = -1
UNKNOWN_BATCH_SIZE = 0
def __init__(self, serializer, batchSize=UNLIMITED_BATCH_SIZE):
self.serializer = serializer
self.batchSize = batchSize
def _batched(self, iterator):
if self.batchSize == self.UNLIMITED_BATCH_SIZE:
yield list(iterator)
elif hasattr(iterator, "__len__") and hasattr(iterator, "__getslice__"):
n = len(iterator)
for i in xrange(0, n, self.batchSize):
yield iterator[i: i + self.batchSize]
else:
items = []
count = 0
for item in iterator:
items.append(item)
count += 1
if count == self.batchSize:
yield items
items = []
count = 0
if items:
yield items
def dump_stream(self, iterator, stream):
self.serializer.dump_stream(self._batched(iterator), stream)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def _load_stream_without_unbatching(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "BatchedSerializer(%s, %d)" % (str(self.serializer), self.batchSize)
class FlattenedValuesSerializer(BatchedSerializer):
"""
Serializes a stream of list of pairs, split the list of values
which contain more than a certain number of objects to make them
have similar sizes.
"""
def __init__(self, serializer, batchSize=10):
BatchedSerializer.__init__(self, serializer, batchSize)
def _batched(self, iterator):
n = self.batchSize
for key, values in iterator:
for i in range(0, len(values), n):
yield key, values[i:i + n]
def load_stream(self, stream):
return self.serializer.load_stream(stream)
def __repr__(self):
return "FlattenedValuesSerializer(%s, %d)" % (self.serializer, self.batchSize)
class AutoBatchedSerializer(BatchedSerializer):
"""
Choose the size of batch automatically based on the size of object
"""
def __init__(self, serializer, bestSize=1 << 16):
BatchedSerializer.__init__(self, serializer, self.UNKNOWN_BATCH_SIZE)
self.bestSize = bestSize
def dump_stream(self, iterator, stream):
batch, best = 1, self.bestSize
iterator = iter(iterator)
while True:
vs = list(itertools.islice(iterator, batch))
if not vs:
break
bytes = self.serializer.dumps(vs)
write_int(len(bytes), stream)
stream.write(bytes)
size = len(bytes)
if size < best:
batch *= 2
elif size > best * 10 and batch > 1:
batch //= 2
def __repr__(self):
return "AutoBatchedSerializer(%s)" % self.serializer
class CartesianDeserializer(Serializer):
"""
Deserializes the JavaRDD cartesian() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD cartesian,
we additionally need to do the cartesian within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# for correctness with repeated cartesian/zip this must be returned as one batch
yield product(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "CartesianDeserializer(%s, %s)" % \
(str(self.key_ser), str(self.val_ser))
class PairDeserializer(Serializer):
"""
Deserializes the JavaRDD zip() of two PythonRDDs.
Due to pyspark batching we cannot simply use the result of the Java RDD zip,
we additionally need to do the zip within each pair of batches.
"""
def __init__(self, key_ser, val_ser):
self.key_ser = key_ser
self.val_ser = val_ser
def _load_stream_without_unbatching(self, stream):
key_batch_stream = self.key_ser._load_stream_without_unbatching(stream)
val_batch_stream = self.val_ser._load_stream_without_unbatching(stream)
for (key_batch, val_batch) in zip(key_batch_stream, val_batch_stream):
# For double-zipped RDDs, the batches can be iterators from other PairDeserializer,
# instead of lists. We need to convert them to lists if needed.
key_batch = key_batch if hasattr(key_batch, '__len__') else list(key_batch)
val_batch = val_batch if hasattr(val_batch, '__len__') else list(val_batch)
if len(key_batch) != len(val_batch):
raise ValueError("Can not deserialize PairRDD with different number of items"
" in batches: (%d, %d)" % (len(key_batch), len(val_batch)))
# for correctness with repeated cartesian/zip this must be returned as one batch
yield zip(key_batch, val_batch)
def load_stream(self, stream):
return chain.from_iterable(self._load_stream_without_unbatching(stream))
def __repr__(self):
return "PairDeserializer(%s, %s)" % (str(self.key_ser), str(self.val_ser))
class NoOpSerializer(FramedSerializer):
def loads(self, obj):
return obj
def dumps(self, obj):
return obj
# Hack namedtuple, make it picklable
__cls = {}
def _restore(name, fields, value):
""" Restore an object of namedtuple"""
k = (name, fields)
cls = __cls.get(k)
if cls is None:
cls = collections.namedtuple(name, fields)
__cls[k] = cls
return cls(*value)
def _hack_namedtuple(cls):
""" Make class generated by namedtuple picklable """
name = cls.__name__
fields = cls._fields
def __reduce__(self):
return (_restore, (name, fields, tuple(self)))
cls.__reduce__ = __reduce__
cls._is_namedtuple_ = True
return cls
def _hijack_namedtuple():
""" Hack namedtuple() to make it picklable """
# hijack only one time
if hasattr(collections.namedtuple, "__hijack"):
return
global _old_namedtuple # or it will put in closure
global _old_namedtuple_kwdefaults # or it will put in closure too
def _copy_func(f):
return types.FunctionType(f.__code__, f.__globals__, f.__name__,
f.__defaults__, f.__closure__)
def _kwdefaults(f):
# __kwdefaults__ contains the default values of keyword-only arguments which are
# introduced from Python 3. The possible cases for __kwdefaults__ in namedtuple
# are as below:
#
# - Does not exist in Python 2.
# - Returns None in <= Python 3.5.x.
# - Returns a dictionary containing the default values to the keys from Python 3.6.x
# (See https://bugs.python.org/issue25628).
kargs = getattr(f, "__kwdefaults__", None)
if kargs is None:
return {}
else:
return kargs
_old_namedtuple = _copy_func(collections.namedtuple)
_old_namedtuple_kwdefaults = _kwdefaults(collections.namedtuple)
def namedtuple(*args, **kwargs):
for k, v in _old_namedtuple_kwdefaults.items():
kwargs[k] = kwargs.get(k, v)
cls = _old_namedtuple(*args, **kwargs)
return _hack_namedtuple(cls)
# replace namedtuple with the new one
collections.namedtuple.__globals__["_old_namedtuple_kwdefaults"] = _old_namedtuple_kwdefaults
collections.namedtuple.__globals__["_old_namedtuple"] = _old_namedtuple
collections.namedtuple.__globals__["_hack_namedtuple"] = _hack_namedtuple
collections.namedtuple.__code__ = namedtuple.__code__
collections.namedtuple.__hijack = 1
# hack the cls already generated by namedtuple.
# Those created in other modules can be pickled as normal,
# so only hack those in __main__ module
for n, o in sys.modules["__main__"].__dict__.items():
if (type(o) is type and o.__base__ is tuple
and hasattr(o, "_fields")
and "__reduce__" not in o.__dict__):
_hack_namedtuple(o) # hack inplace
_hijack_namedtuple()
class PickleSerializer(FramedSerializer):
"""
Serializes objects using Python's pickle serializer:
http://docs.python.org/2/library/pickle.html
This serializer supports nearly any Python object, but may
not be as fast as more specialized serializers.
"""
def dumps(self, obj):
return pickle.dumps(obj, protocol)
if sys.version >= '3':
def loads(self, obj, encoding="bytes"):
return pickle.loads(obj, encoding=encoding)
else:
def loads(self, obj, encoding=None):
return pickle.loads(obj)
class CloudPickleSerializer(PickleSerializer):
def dumps(self, obj):
try:
return cloudpickle.dumps(obj, 2)
except pickle.PickleError:
raise
except Exception as e:
emsg = _exception_message(e)
if "'i' format requires" in emsg:
msg = "Object too large to serialize: %s" % emsg
else:
msg = "Could not serialize object: %s: %s" % (e.__class__.__name__, emsg)
cloudpickle.print_exec(sys.stderr)
raise pickle.PicklingError(msg)
class MarshalSerializer(FramedSerializer):
"""
Serializes objects using Python's Marshal serializer:
http://docs.python.org/2/library/marshal.html
This serializer is faster than PickleSerializer but supports fewer datatypes.
"""
def dumps(self, obj):
return marshal.dumps(obj)
def loads(self, obj):
return marshal.loads(obj)
class AutoSerializer(FramedSerializer):
"""
Choose marshal or pickle as serialization protocol automatically
"""
def __init__(self):
FramedSerializer.__init__(self)
self._type = None
def dumps(self, obj):
if self._type is not None:
return b'P' + pickle.dumps(obj, -1)
try:
return b'M' + marshal.dumps(obj)
except Exception:
self._type = b'P'
return b'P' + pickle.dumps(obj, -1)
def loads(self, obj):
_type = obj[0]
if _type == b'M':
return marshal.loads(obj[1:])
elif _type == b'P':
return pickle.loads(obj[1:])
else:
raise ValueError("invalid serialization type: %s" % _type)
class CompressedSerializer(FramedSerializer):
"""
Compress the serialized data
"""
def __init__(self, serializer):
FramedSerializer.__init__(self)
assert isinstance(serializer, FramedSerializer), "serializer must be a FramedSerializer"
self.serializer = serializer
def dumps(self, obj):
return zlib.compress(self.serializer.dumps(obj), 1)
def loads(self, obj):
return self.serializer.loads(zlib.decompress(obj))
def __repr__(self):
return "CompressedSerializer(%s)" % self.serializer
class UTF8Deserializer(Serializer):
"""
Deserializes streams written by String.getBytes.
"""
def __init__(self, use_unicode=True):
self.use_unicode = use_unicode
def loads(self, stream):
length = read_int(stream)
if length == SpecialLengths.END_OF_DATA_SECTION:
raise EOFError
elif length == SpecialLengths.NULL:
return None
s = stream.read(length)
return s.decode("utf-8") if self.use_unicode else s
def load_stream(self, stream):
try:
while True:
yield self.loads(stream)
except struct.error:
return
except EOFError:
return
def __repr__(self):
return "UTF8Deserializer(%s)" % self.use_unicode
def read_long(stream):
length = stream.read(8)
if not length:
raise EOFError
return struct.unpack("!q", length)[0]
def write_long(value, stream):
stream.write(struct.pack("!q", value))
def pack_long(value):
return struct.pack("!q", value)
def read_int(stream):
length = stream.read(4)
if not length:
raise EOFError
return struct.unpack("!i", length)[0]
def write_int(value, stream):
stream.write(struct.pack("!i", value))
def read_bool(stream):
length = stream.read(1)
if not length:
raise EOFError
return struct.unpack("!?", length)[0]
def write_with_length(obj, stream):
write_int(len(obj), stream)
stream.write(obj)
if __name__ == '__main__':
import doctest
(failure_count, test_count) = doctest.testmod()
if failure_count:
sys.exit(-1)
| apache-2.0 | -8,785,708,909,053,550,000 | 31.388889 | 100 | 0.621972 | false |
dataxu/jenkins-job-builder | jenkins_jobs/modules/notifications.py | 2 | 3891 | # Copyright 2012 Hewlett-Packard Development Company, L.P.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
The Notifications module allows you to configure Jenkins to notify
other applications about various build phases. It requires the
Jenkins notification plugin.
**Component**: notifications
:Macro: notification
:Entry Point: jenkins_jobs.notifications
"""
import xml.etree.ElementTree as XML
from jenkins_jobs.errors import JenkinsJobsException
import jenkins_jobs.modules.base
def http_endpoint(registry, xml_parent, data):
"""yaml: http
Defines an HTTP notification endpoint.
Requires the Jenkins :jenkins-wiki:`Notification Plugin
<Notification+Plugin>`.
:arg str format: notification payload format, JSON (default) or XML
:arg str event: job events that trigger notifications: started,
completed, finalized or all (default)
:arg str url: URL of the endpoint
:arg str timeout: Timeout in milliseconds for sending notification
request (30 seconds by default)
:arg str log: Number lines of log messages to send (0 by default).
Use -1 for all (use with caution).
Example:
.. literalinclude:: \
/../../tests/notifications/fixtures/http-endpoint002.yaml
:language: yaml
"""
endpoint_element = XML.SubElement(xml_parent,
'com.tikal.hudson.plugins.notification.'
'Endpoint')
supported_formats = ['JSON', 'XML']
fmt = data.get('format', 'JSON').upper()
if fmt not in supported_formats:
raise JenkinsJobsException(
"format must be one of %s" %
", ".join(supported_formats))
else:
XML.SubElement(endpoint_element, 'format').text = fmt
XML.SubElement(endpoint_element, 'protocol').text = 'HTTP'
supported_events = ['started', 'completed', 'finalized', 'all']
event = data.get('event', 'all').lower()
if event not in supported_events:
raise JenkinsJobsException(
"event must be one of %s" %
", ".join(supported_events))
else:
XML.SubElement(endpoint_element, 'event').text = event
XML.SubElement(endpoint_element, 'timeout').text = str(data.get('timeout',
30000))
XML.SubElement(endpoint_element, 'url').text = data['url']
XML.SubElement(endpoint_element, 'loglines').text = str(data.get('log', 0))
class Notifications(jenkins_jobs.modules.base.Base):
sequence = 22
component_type = 'notification'
component_list_type = 'notifications'
def gen_xml(self, xml_parent, data):
properties = xml_parent.find('properties')
if properties is None:
properties = XML.SubElement(xml_parent, 'properties')
notifications = data.get('notifications', [])
if notifications:
notify_element = XML.SubElement(properties,
'com.tikal.hudson.plugins.'
'notification.'
'HudsonNotificationProperty')
endpoints_element = XML.SubElement(notify_element, 'endpoints')
for endpoint in notifications:
self.registry.dispatch('notification',
endpoints_element, endpoint)
| apache-2.0 | -2,689,454,665,151,219,000 | 36.057143 | 79 | 0.63634 | false |
xzturn/tensorflow | tensorflow/compiler/mlir/tensorflow/tests/tf_saved_model/partially_shaped_variables.py | 20 | 1677 | # Copyright 2019 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# RUN: %p/partially_shaped_variables | FileCheck %s
# pylint: disable=missing-docstring,line-too-long
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import tensorflow.compat.v2 as tf
from tensorflow.compiler.mlir.tensorflow.tests.tf_saved_model import common
class TestModule(tf.Module):
def __init__(self):
super(TestModule, self).__init__()
# CHECK: "tf_saved_model.global_tensor"() {is_mutable, {{.*}} tf_saved_model.exported_names = ["v0"], type = tensor<*xf32>, value = dense<0.000000e+00> : tensor<1xf32>} : () -> ()
# CHECK: "tf_saved_model.global_tensor"() {is_mutable, {{.*}} tf_saved_model.exported_names = ["v1"], type = tensor<?xf32>, value = dense<[0.000000e+00, 1.000000e+00]> : tensor<2xf32>} : () -> ()
self.v0 = tf.Variable([0.], shape=tf.TensorShape(None))
self.v1 = tf.Variable([0., 1.], shape=[None])
if __name__ == '__main__':
common.do_test(TestModule, exported_names=[])
| apache-2.0 | -3,186,341,485,859,464,700 | 43.131579 | 199 | 0.666667 | false |
bernard357/shellbot | shellbot/lists/__init__.py | 1 | 5677 | # Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import logging
from shellbot import Context
from .base import List
__all__ = [
'List',
'ListFactory',
]
class ListFactory(object):
"""
Manages named lists
Example::
factory = ListFactory(context=my_context)
factory.configure()
...
my_list = factory.get_list('The Famous Four')
"""
def __init__(self, context=None):
self.context = context if context else Context()
self.lists = {}
def configure(self):
"""
Loads lists as defined in context
This function looks for the key ``lists`` and below in the
context, and creates a dictionary of named lists.
Example configuration in YAML format::
lists:
- name: The Famous Four
items:
- [email protected]
- [email protected]
- [email protected]
- [email protected]
- name: Support Team
items:
- [email protected]
- [email protected]
Note that list names are all put to lower case internally, for easy
subsequent references. With the previous examples, you can
retrieve the first list with `The Famous Four` or with
`the famous four`. This is spacially convenient for lists used
as commands, when invoked from a mobile device.
"""
settings = self.context.get('lists', [])
for attributes in settings:
if not isinstance(attributes, dict):
logging.warning(u"Found a list that is not a dictionary")
logging.debug(u"- {}".format(str(attributes)))
continue
name = attributes.get('name')
if not name:
logging.warning(u"Missing attribute 'name' in list")
logging.debug(u"- {}".format(str(attributes)))
continue
name = name.lower() # align across chat devices
self.lists[name] = self.build_list(attributes)
def build_list(self, attributes):
"""
Builds one list
Example in YAML::
- name: The Famous Four
as_command: true
items:
- [email protected]
- [email protected]
- [email protected]
- [email protected]
The ``as_command`` parameter is a boolean that indicates if the list
can be used as a shell command. When ``as_command`` is set to true,
the named list appears in the list of shell commands. Members of the
list are added to a channel when the name of the list is submitted to
the shell.
"""
assert isinstance(attributes, dict)
items = attributes.get('items', [])
list = List(items=items)
list.name = attributes.get('name')
list.as_command = attributes.get('as_command', False)
return list
def get_list(self, name):
"""
Gets a named list
:param name: Name of the target list
:type name: str
:return: an iterator
An empty list is returned when the name is unknown.
Example use case, where an alert is sent to members of a team::
for person in factory.get_list('SupportTeam'):
number = get_phone_number(person)
send_sms(important_message, number)
"""
if name:
name = name.lower() # align across chat devices
return self.lists.get(name, [])
def list_commands(self):
"""
Lists items that can be invoked as shell commands
:return: an iterator of list names
"""
for name in self.lists.keys():
list = self.lists[name]
if list.as_command:
yield list.name
def apply_to_list(self, name, apply):
"""
Handles each item of a named list
:param name: designates the list to use
:type name: str
:param apply: the function that is applied to each item
:type apply: callable
This function calls the provided function for each item of a named
list.
For example, you could write an alerting system like this::
def alert(person):
number = get_phone_number(person)
send_sms(important_message, number)
factory.apply_to_list('SupportTeam', alert)
Lambda functions are welcome as well. For example, this can be useful
for the straightforward addition of participants to a given bot::
factory.apply_to_list(name='SupportTeam',
apply=lambda x: my_bot.add_participant(x))
"""
for item in self.get_list(name):
apply(item)
| apache-2.0 | -4,369,942,357,568,065,500 | 29.686486 | 77 | 0.586225 | false |
edfungus/Crouton-ESP8266-Example | luatool.py | 1 | 11277 | #!/usr/bin/env python2
#
# ESP8266 luatool
# Author e-mail: [email protected]
# Site: http://esp8266.ru
# Contributions from: https://github.com/sej7278
#
# This program is free software; you can redistribute it and/or modify it under
# the terms of the GNU General Public License as published by the Free Software
# Foundation; either version 2 of the License, or (at your option) any later version.
#
# This program is distributed in the hope that it will be useful, but WITHOUT
# ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS
# FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License along with
# this program; if not, write to the Free Software Foundation, Inc., 51 Franklin
# Street, Fifth Floor, Boston, MA 02110-1301 USA.
import sys
import serial
from time import sleep
import socket
import argparse
from os.path import basename
version = "0.6.4"
class TransportError(Exception):
"""Custom exception to represent errors with a transport
"""
def __init__(self, message):
self.message = message
class AbstractTransport:
def __init__(self):
raise NotImplementedError('abstract transports cannot be instantiated.')
def close(self):
raise NotImplementedError('Function not implemented')
def read(self, length):
raise NotImplementedError('Function not implemented')
def writeln(self, data, check=1):
raise NotImplementedError('Function not implemented')
def writer(self, data):
self.writeln("file.writeline([==[" + data + "]==])\r")
def performcheck(self, expected):
line = ''
char = ''
while char != chr(62): # '>'
char = self.read(1)
if char == '':
raise Exception('No proper answer from MCU')
if char == chr(13) or char == chr(10): # LF or CR
if line != '':
line = line.strip()
if line+'\r' == expected:
sys.stdout.write(" -> ok")
else:
if line[:4] == "lua:":
sys.stdout.write("\r\n\r\nLua ERROR: %s" % line)
raise Exception('ERROR from Lua interpreter\r\n\r\n')
else:
expected = expected.split("\r")[0]
sys.stdout.write("\r\n\r\nERROR")
sys.stdout.write("\r\n send string : '%s'" % data)
sys.stdout.write("\r\n expected echo : '%s'" % data)
sys.stdout.write("\r\n but got answer : '%s'" % line)
sys.stdout.write("\r\n\r\n")
raise Exception('Error sending data to MCU\r\n\r\n')
line = ''
else:
line += char
class SerialTransport(AbstractTransport):
def __init__(self, port, baud):
self.port = port
self.baud = baud
self.serial = None
try:
self.serial = serial.Serial(port, baud)
except serial.SerialException as e:
raise TransportError(e.strerror)
self.serial.timeout = 3
self.serial.interCharTimeout = 3
def writeln(self, data, check=1):
if self.serial.inWaiting() > 0:
self.serial.flushInput()
if len(data) > 0:
sys.stdout.write("\r\n->")
sys.stdout.write(data.split("\r")[0])
self.serial.write(data)
sleep(0.3)
if check > 0:
self.performcheck(data)
else:
sys.stdout.write(" -> send without check")
def read(self, length):
return self.serial.read(length)
def close(self):
self.serial.flush()
self.serial.close()
class TcpSocketTransport(AbstractTransport):
def __init__(self, host, port):
self.host = host
self.port = port
self.socket = None
try:
self.socket = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
except socket.error as e:
raise TransportError(e.strerror)
try:
self.socket.connect((host, port))
except socket.error as e:
raise TransportError(e.strerror)
# read intro from telnet server (see telnet_srv.lua)
self.socket.recv(50)
def writeln(self, data, check=1):
if len(data) > 0:
sys.stdout.write("\r\n->")
sys.stdout.write(data.split("\r")[0])
self.socket.sendall(data)
if check > 0:
self.performcheck(data)
else:
sys.stdout.write(" -> send without check")
def read(self, length):
return self.socket.recv(length)
def close(self):
self.socket.close()
def decidetransport(cliargs):
if cliargs.ip:
data = cliargs.ip.split(':')
host = data[0]
if len(data) == 2:
port = int(data[1])
else:
port = 23
return TcpSocketTransport(host, port)
else:
return SerialTransport(cliargs.port, cliargs.baud)
if __name__ == '__main__':
# parse arguments or use defaults
parser = argparse.ArgumentParser(description='ESP8266 Lua script uploader.')
parser.add_argument('-p', '--port', default='/dev/ttyUSB0', help='Device name, default /dev/ttyUSB0')
parser.add_argument('-b', '--baud', default=9600, help='Baudrate, default 9600')
parser.add_argument('-f', '--src', default='main.lua', help='Source file on computer, default main.lua')
parser.add_argument('-t', '--dest', default=None, help='Destination file on MCU, default to source file name')
parser.add_argument('-c', '--compile', action='store_true', help='Compile lua to lc after upload')
parser.add_argument('-r', '--restart', action='store_true', help='Restart MCU after upload')
parser.add_argument('-d', '--dofile', action='store_true', help='Run the Lua script after upload')
parser.add_argument('-v', '--verbose', action='store_true', help="Show progress messages.")
parser.add_argument('-a', '--append', action='store_true', help='Append source file to destination file.')
parser.add_argument('-l', '--list', action='store_true', help='List files on device')
parser.add_argument('-w', '--wipe', action='store_true', help='Delete all lua/lc files on device.')
parser.add_argument('-i', '--id', action='store_true', help='Query the modules chip id.')
parser.add_argument('--delete', default=None, help='Delete a lua/lc file from device.')
parser.add_argument('--ip', default=None, help='Connect to a telnet server on the device (--ip IP[:port])')
args = parser.parse_args()
transport = decidetransport(args)
if args.list:
transport.writeln("local l = file.list();for k,v in pairs(l) do print('name:'..k..', size:'..v)end\r", 0)
while True:
char = transport.read(1)
if char == '' or char == chr(62):
break
sys.stdout.write(char)
sys.exit(0)
if args.id:
transport.writeln("=node.chipid()\r", 0)
id=""
while True:
char = transport.read(1)
if char == '' or char == chr(62):
break
if char.isdigit():
id += char
print("\n"+id)
sys.exit(0)
if args.wipe:
transport.writeln("local l = file.list();for k,v in pairs(l) do print(k)end\r", 0)
file_list = []
fn = ""
while True:
char = transport.read(1)
if char == '' or char == chr(62):
break
if char not in ['\r', '\n']:
fn += char
else:
if fn:
file_list.append(fn.strip())
fn = ''
for fn in file_list[1:]: # first line is the list command sent to device
if args.verbose:
sys.stderr.write("Delete file {} from device.\r\n".format(fn))
transport.writeln("file.remove(\"" + fn + "\")\r")
sys.exit(0)
if args.delete:
transport.writeln("file.remove(\"" + args.delete + "\")\r")
sys.exit(0)
if args.dest is None:
args.dest = basename(args.src)
# open source file for reading
try:
f = open(args.src, "rt")
except:
sys.stderr.write("Could not open input file \"%s\"\n" % args.src)
sys.exit(1)
# Verify the selected file will not exceed the size of the serial buffer.
# The size of the buffer is 256. This script does not accept files with
# lines longer than 230 characters to have some room for command overhead.
for ln in f:
if len(ln) > 230:
sys.stderr.write("File \"%s\" contains a line with more than 240 "
"characters. This exceeds the size of the serial buffer.\n"
% args.src)
f.close()
sys.exit(1)
# Go back to the beginning of the file after verifying it has the correct
# line length
f.seek(0)
# set serial timeout
if args.verbose:
sys.stderr.write("Upload starting\r\n")
# remove existing file on device
if args.append==False:
if args.verbose:
sys.stderr.write("Stage 1. Deleting old file from flash memory")
transport.writeln("file.open(\"" + args.dest + "\", \"w\")\r")
transport.writeln("file.close()\r")
transport.writeln("file.remove(\"" + args.dest + "\")\r")
else:
if args.verbose:
sys.stderr.write("[SKIPPED] Stage 1. Deleting old file from flash memory [SKIPPED]")
# read source file line by line and write to device
if args.verbose:
sys.stderr.write("\r\nStage 2. Creating file in flash memory and write first line")
if args.append:
transport.writeln("file.open(\"" + args.dest + "\", \"a+\")\r")
else:
transport.writeln("file.open(\"" + args.dest + "\", \"w+\")\r")
line = f.readline()
if args.verbose:
sys.stderr.write("\r\nStage 3. Start writing data to flash memory...")
while line != '':
transport.writer(line.strip())
line = f.readline()
# close both files
f.close()
if args.verbose:
sys.stderr.write("\r\nStage 4. Flush data and closing file")
transport.writeln("file.flush()\r")
transport.writeln("file.close()\r")
# compile?
if args.compile:
if args.verbose:
sys.stderr.write("\r\nStage 5. Compiling")
transport.writeln("node.compile(\"" + args.dest + "\")\r")
transport.writeln("file.remove(\"" + args.dest + "\")\r")
# restart or dofile
if args.restart:
transport.writeln("node.restart()\r")
if args.dofile: # never exec if restart=1
transport.writeln("dofile(\"" + args.dest + "\")\r", 0)
# close serial port
transport.close()
# flush screen
sys.stdout.flush()
sys.stderr.flush()
sys.stderr.write("\r\n--->>> All done <<<---\r\n")
| mit | 2,816,334,924,600,987,000 | 35.028754 | 132 | 0.562118 | false |
anentropic/py-mysql2pgsql | mysql2pgsql/lib/postgres_writer.py | 1 | 11590 | from __future__ import absolute_import
import re
from cStringIO import StringIO
from datetime import datetime, date, timedelta
from psycopg2.extensions import QuotedString, Binary, AsIs
from .writer import Writer
class PostgresWriter(Writer):
"""Base class for :py:class:`mysql2pgsql.lib.postgres_file_writer.PostgresFileWriter`
and :py:class:`mysql2pgsql.lib.postgres_db_writer.PostgresDbWriter`.
"""
def column_description(self, column):
return '"%s" %s' % (column['name'], self.column_type_info(column))
def column_type(self, column):
return self.column_type_info(column).split(" ")[0]
def column_type_info(self, column):
"""
"""
if column.get('auto_increment', None):
return 'integer DEFAULT nextval(\'%s_%s_seq\'::regclass) NOT NULL' % (
column['table_name'], column['name'])
null = "" if column['null'] else " NOT NULL"
def get_type(column):
"""This in conjunction with :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader._convert_type`
determines the PostgreSQL data type. In my opinion this is way too fugly, will need
to refactor one day.
"""
def t(v): return not v == None
default = (' DEFAULT %s' % QuotedString(column['default']).getquoted()) if t(column['default']) else None
if column['type'] == 'char':
default = ('%s::char' % default) if t(default) else None
return default, 'character(%s)' % column['length']
elif column['type'] == 'varchar':
default = ('%s::character varying' % default) if t(default) else None
return default, 'character varying(%s)' % column['length']
elif column['type'] == 'integer':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'integer'
elif column['type'] == 'bigint':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'bigint'
elif column['type'] == 'tinyint':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'smallint'
elif column['type'] == 'boolean':
default = (" DEFAULT %s" % ('true' if int(column['default']) == 1 else 'false')) if t(default) else None
return default, 'boolean'
elif column['type'] == 'float':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'real'
elif column['type'] == 'float unsigned':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'real'
elif column['type'] in ('numeric', 'decimal'):
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'numeric(%s, %s)' % (column['length'] or 20, column['decimals'] or 0)
elif column['type'] == 'double precision':
default = (" DEFAULT %s" % (column['default'] if t(column['default']) else 'NULL')) if t(default) else None
return default, 'double precision'
elif column['type'] == 'datetime':
default = None
return default, 'timestamp without time zone'
elif column['type'] == 'date':
default = None
return default, 'date'
elif column['type'] == 'timestamp':
if "CURRENT_TIMESTAMP" in column['default']:
default = ' DEFAULT CURRENT_TIMESTAMP'
if "0000-00-00 00:00" in column['default']:
default = " DEFAULT '1970-01-01 00:00'"
if "0000-00-00 00:00:00" in column['default']:
default = " DEFAULT '1970-01-01 00:00:00'"
return default, 'timestamp without time zone'
elif column['type'] == 'time':
default = " DEFAULT NOW()" if t(default) else None
return default, 'time without time zone'
elif 'blob' in column['type'] or 'binary' in column['type']:
return default, 'bytea'
elif column['type'] in ('tinytext', 'mediumtext', 'longtext', 'text'):
return default, 'text'
elif re.search(r'^enum', column['type']):
default = (' %s::character varying' % default) if t(default) else None
enum = re.sub(r'enum|\(|\)', '', column['type'])
max_enum_size = max([(len(e) - 2) for e in enum.split(',')])
return default, ' character varying(%s) check(%s in (%s))' % (max_enum_size, column['name'], enum)
elif 'bit(' in column['type']:
return ' DEFAULT %s' % column['default'].upper() if column['default'] else column['default'], 'varbit(%s)' % re.search(r'\((\d+)\)', column['type']).group(1)
elif 'set(' in column['type']:
if default:
default = ' DEFAULT ARRAY[%s]::text[]' % ','.join(QuotedString(v).getquoted() for v in re.search(r"'(.*)'", default).group(1).split(','))
return default, 'text[]'
else:
raise Exception('unknown %s' % column['type'])
default, column_type = get_type(column)
return '%s%s%s' % (column_type, (default if not default == None else ''), null)
def process_row(self, table, row):
"""Examines row data from MySQL and alters
the values when necessary to be compatible with
sending to PostgreSQL via the copy command
"""
for index, column in enumerate(table.columns):
column_type = self.column_type(column)
if row[index] == None and ('timestamp' not in column_type or not column['default']):
row[index] = '\N'
elif row[index] == None and column['default']:
row[index] = '1970-01-01 00:00:00'
elif 'bit' in column_type:
row[index] = bin(ord(row[index]))[2:]
elif row[index].__class__ in (str, unicode):
if column_type == 'bytea':
row[index] = Binary(row[index]).getquoted()[1:-8] if row[index] else row[index]
elif 'text[' in column_type:
row[index] = '{%s}' % ','.join('"%s"' % v.replace('"', r'\"') for v in row[index].split(','))
else:
row[index] = row[index].replace('\\', r'\\').replace('\n', r'\n').replace('\t', r'\t').replace('\r', r'\r').replace('\0', '')
elif column_type == 'boolean':
row[index] = 't' if row[index] == 1 else 'f' if row[index] == 0 else row[index]
elif row[index].__class__ in (date, datetime):
row[index] = row[index].isoformat()
elif row[index].__class__ is timedelta:
row[index] = datetime.utcfromtimestamp(row[index].total_seconds()).time().isoformat()
else:
row[index] = AsIs(row[index]).getquoted()
def table_attributes(self, table):
primary_keys = []
serial_key = None
maxval = None
columns = StringIO()
for column in table.columns:
if column['auto_increment']:
serial_key = column['name']
maxval = 1 if column['maxval'] < 1 else column['maxval'] + 1
if column['primary_key']:
primary_keys.append(column['name'])
columns.write(' %s,\n' % self.column_description(column))
return primary_keys, serial_key, maxval, columns.getvalue()[:-2]
def truncate(self, table):
serial_key = None
maxval = None
for column in table.columns:
if column['auto_increment']:
serial_key = column['name']
maxval = 1 if column['maxval'] < 1 else column['maxval'] + 1
truncate_sql = 'TRUNCATE "%s" CASCADE;' % table.name
serial_key_sql = None
if serial_key:
serial_key_sql = "SELECT pg_catalog.setval(pg_get_serial_sequence(%(table_name)s, %(serial_key)s), %(maxval)s, true);" % {
'table_name': QuotedString(table.name).getquoted(),
'serial_key': QuotedString(serial_key).getquoted(),
'maxval': maxval}
return (truncate_sql, serial_key_sql)
def write_table(self, table):
primary_keys, serial_key, maxval, columns = self.table_attributes(table)
serial_key_sql = []
table_sql = []
if serial_key:
serial_key_seq = '%s_%s_seq' % (table.name, serial_key)
serial_key_sql.append('DROP SEQUENCE IF EXISTS %s CASCADE;' % serial_key_seq)
serial_key_sql.append("""CREATE SEQUENCE %s INCREMENT BY 1
NO MAXVALUE NO MINVALUE CACHE 1;""" % serial_key_seq)
serial_key_sql.append('SELECT pg_catalog.setval(%s, %s, true);' % (QuotedString(serial_key_seq).getquoted(), maxval))
table_sql.append('DROP TABLE IF EXISTS "%s" CASCADE;' % table.name)
table_sql.append('CREATE TABLE "%s" (\n%s\n)\nWITHOUT OIDS;' % (table.name, columns))
return (table_sql, serial_key_sql)
def write_indexes(self, table):
index_sql = []
primary_index = [idx for idx in table.indexes if idx.get('primary', None)]
if primary_index:
index_sql.append('ALTER TABLE "%(table_name)s" ADD CONSTRAINT "%(index_name)s_pkey" PRIMARY KEY(%(column_names)s);' % {
'table_name': table.name,
'index_name': '%s_%s' % (table.name, '_'.join(re.sub('[\W]+', '', c) for c in primary_index[0]['columns'])),
'column_names': ', '.join('%s' % col for col in primary_index[0]['columns']),
})
for index in table.indexes:
if 'primary' in index:
continue
unique = 'UNIQUE ' if index.get('unique', None) else ''
index_name = '%s_%s' % (table.name, '_'.join(index['columns']))
index_sql.append('DROP INDEX IF EXISTS "%s" CASCADE;' % index_name)
index_sql.append('CREATE %(unique)sINDEX "%(index_name)s" ON "%(table_name)s" (%(column_names)s);' % {
'unique': unique,
'index_name': index_name,
'table_name': table.name,
'column_names': ', '.join('"%s"' % col for col in index['columns']),
})
return index_sql
def write_constraints(self, table):
constraint_sql = []
for key in table.foreign_keys:
constraint_sql.append("""ALTER TABLE "%(table_name)s" ADD FOREIGN KEY ("%(column_name)s")
REFERENCES "%(ref_table_name)s"(%(ref_column_name)s);""" % {
'table_name': table.name,
'column_name': key['column'],
'ref_table_name': key['ref_table'],
'ref_column_name': key['ref_column']})
return constraint_sql
def close(self):
raise NotImplementedError
def write_contents(self, table, reader):
raise NotImplementedError
| mit | 6,767,887,667,339,594,000 | 50.057269 | 173 | 0.534944 | false |
developerworks/horizon | horizon/dashboards/nova/instances_and_volumes/volumes/forms.py | 1 | 4640 | # vim: tabstop=4 shiftwidth=4 softtabstop=4
# Copyright 2012 Nebula, Inc.
# All rights reserved.
"""
Views for managing Nova volumes.
"""
import logging
from django import shortcuts
from django.contrib import messages
from django.utils.translation import ugettext as _
from horizon import api
from horizon import forms
from horizon import exceptions
from novaclient import exceptions as novaclient_exceptions
from .tables import ACTIVE_STATES
LOG = logging.getLogger(__name__)
class CreateForm(forms.SelfHandlingForm):
name = forms.CharField(max_length="255", label="Volume Name")
description = forms.CharField(widget=forms.Textarea,
label=_("Description"), required=False)
size = forms.IntegerField(min_value=1, label="Size (GB)")
def handle(self, request, data):
try:
api.volume_create(request, data['size'], data['name'],
data['description'])
message = 'Creating volume "%s"' % data['name']
LOG.info(message)
messages.info(request, message)
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in CreateVolume")
messages.error(request,
_('Error Creating Volume: %s') % e.message)
return shortcuts.redirect("horizon:nova:instances_and_volumes:index")
class AttachForm(forms.SelfHandlingForm):
instance = forms.ChoiceField(label="Attach to Instance",
help_text=_("Select an instance to "
"attach to."))
device = forms.CharField(label="Device Name", initial="/dev/vdc")
def __init__(self, *args, **kwargs):
super(AttachForm, self).__init__(*args, **kwargs)
# populate volume_id
volume_id = kwargs.get('initial', {}).get('volume_id', [])
self.fields['volume_id'] = forms.CharField(widget=forms.HiddenInput(),
initial=volume_id)
# Populate instance choices
instance_list = kwargs.get('initial', {}).get('instances', [])
instances = [('', "Select an instance")]
for instance in instance_list:
if instance.status in ACTIVE_STATES:
instances.append((instance.id, '%s (%s)' % (instance.name,
instance.id)))
self.fields['instance'].choices = instances
def handle(self, request, data):
try:
api.volume_attach(request,
data['volume_id'],
data['instance'],
data['device'])
vol_name = api.volume_get(request, data['volume_id']).displayName
message = (_('Attaching volume %(vol)s to instance \
%(inst)s at %(dev)s') %
{"vol": vol_name, "inst": data['instance'],
"dev": data['device']})
LOG.info(message)
messages.info(request, message)
except novaclient_exceptions.ClientException, e:
LOG.exception("ClientException in AttachVolume")
messages.error(request,
_('Error attaching volume: %s') % e.message)
return shortcuts.redirect(
"horizon:nova:instances_and_volumes:index")
class CreateSnapshotForm(forms.SelfHandlingForm):
name = forms.CharField(max_length="255", label=_("Snapshot Name"))
description = forms.CharField(widget=forms.Textarea,
label=_("Description"), required=False)
def __init__(self, *args, **kwargs):
super(CreateSnapshotForm, self).__init__(*args, **kwargs)
# populate volume_id
volume_id = kwargs.get('initial', {}).get('volume_id', [])
self.fields['volume_id'] = forms.CharField(widget=forms.HiddenInput(),
initial=volume_id)
def handle(self, request, data):
try:
api.volume_snapshot_create(request,
data['volume_id'],
data['name'],
data['description'])
message = _('Creating volume snapshot "%s"') % data['name']
LOG.info(message)
messages.info(request, message)
except:
exceptions.handle(request,
_('Error Creating Volume Snapshot: %(exc)s'))
return shortcuts.redirect("horizon:nova:images_and_snapshots:index")
| apache-2.0 | -1,323,758,585,300,165,000 | 38.65812 | 78 | 0.55194 | false |
valkyriesavage/invenio | modules/bibsched/lib/bibtask.py | 1 | 38662 | # -*- coding: utf-8 -*-
##
## This file is part of Invenio.
## Copyright (C) 2007, 2008, 2009, 2010, 2011 CERN.
##
## Invenio is free software; you can redistribute it and/or
## modify it under the terms of the GNU General Public License as
## published by the Free Software Foundation; either version 2 of the
## License, or (at your option) any later version.
##
## Invenio is distributed in the hope that it will be useful, but
## WITHOUT ANY WARRANTY; without even the implied warranty of
## MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
## General Public License for more details.
##
## You should have received a copy of the GNU General Public License
## along with Invenio; if not, write to the Free Software Foundation, Inc.,
## 59 Temple Place, Suite 330, Boston, MA 02111-1307, USA.
"""Invenio Bibliographic Task Class.
BibTask class.
A BibTask is an executable under CFG_BINDIR, whose name is stored in
bibtask_config.CFG_BIBTASK_VALID_TASKS.
A valid task must call the task_init function with the proper parameters.
Generic task related parameters (user, sleeptime, runtime, task_id, task_name
verbose)
go to _TASK_PARAMS global dictionary accessible through task_get_task_param.
Option specific to the particular BibTask go to _OPTIONS global dictionary
and are accessible via task_get_option/task_set_option.
In order to log something properly, just use write_message(s) with the desired
verbose level.
task_update_status and task_update_progress can be used to update the status
of the task (DONE, FAILED, DONE WITH ERRORS...) and it's progress
(1 out 100..) within the bibsched monitor.
It is possible to enqueue a BibTask via API call by means of
task_low_level_submission.
"""
__revision__ = "$Id$"
import getopt
import getpass
import marshal
import os
import pwd
import re
import signal
import sys
import time
import datetime
import traceback
import logging
import logging.handlers
from invenio.dbquery import run_sql, _db_login
from invenio.access_control_engine import acc_authorize_action
from invenio.config import CFG_PREFIX, CFG_BINDIR, CFG_LOGDIR, \
CFG_BIBSCHED_PROCESS_USER, CFG_TMPDIR
from invenio.errorlib import register_exception
from invenio.access_control_config import CFG_EXTERNAL_AUTH_USING_SSO, \
CFG_EXTERNAL_AUTHENTICATION
from invenio.webuser import get_user_preferences, get_email
from invenio.bibtask_config import CFG_BIBTASK_VALID_TASKS, \
CFG_BIBTASK_DEFAULT_TASK_SETTINGS
from invenio.dateutils import parse_runtime_limit
# Global _TASK_PARAMS dictionary.
_TASK_PARAMS = {
'version': '',
'task_stop_helper_fnc': None,
'task_name': os.path.basename(sys.argv[0]),
'task_specific_name': '',
'user': '',
# If the task is not initialized (usually a developer debugging
# a single method), output all messages.
'verbose': 9,
'sleeptime': '',
'runtime': time.strftime("%Y-%m-%d %H:%M:%S"),
'priority': 0,
'runtime_limit': None,
'profile': [],
}
# Global _OPTIONS dictionary.
_OPTIONS = {}
# Which tasks don't need to ask the user for authorization?
CFG_VALID_PROCESSES_NO_AUTH_NEEDED = ("bibupload", )
CFG_TASK_IS_NOT_A_DEAMON = ("bibupload", )
def fix_argv_paths(paths, argv=None):
"""Given the argv vector of cli parameters, and a list of path that
can be relative and may have been specified within argv,
it substitute all the occurencies of these paths in argv.
argv is changed in place and returned.
"""
if argv is None:
argv = sys.argv
for path in paths:
for count in xrange(len(argv)):
if path == argv[count]:
argv[count] = os.path.abspath(path)
return argv
def task_low_level_submission(name, user, *argv):
"""Let special lowlevel enqueuing of a task on the bibsche queue.
@param name: is the name of the bibtask. It must be a valid executable under
C{CFG_BINDIR}.
@type name: string
@param user: is a string that will appear as the "user" submitting the task.
Since task are submitted via API it make sense to set the
user to the name of the module/function that called
task_low_level_submission.
@type user: string
@param argv: are all the additional CLI parameters that would have been
passed on the CLI (one parameter per variable).
e.g.:
>>> task_low_level_submission('bibupload', 'admin', '-a', '/tmp/z.xml')
@type: strings
@return: the task identifier when the task is correctly enqueued.
@rtype: int
@note: use absolute paths in argv
"""
def get_priority(argv):
"""Try to get the priority by analysing the arguments."""
priority = 0
argv = list(argv)
while True:
try:
opts, args = getopt.gnu_getopt(argv, 'P:', ['priority='])
except getopt.GetoptError, err:
## We remove one by one all the non recognized parameters
if len(err.opt) > 1:
argv = [arg for arg in argv if arg != '--%s' % err.opt and not arg.startswith('--%s=' % err.opt)]
else:
argv = [arg for arg in argv if not arg.startswith('-%s' % err.opt)]
else:
break
for opt in opts:
if opt[0] in ('-P', '--priority'):
try:
priority = int(opt[1])
except ValueError:
pass
return priority
def get_special_name(argv):
"""Try to get the special name by analysing the arguments."""
special_name = ''
argv = list(argv)
while True:
try:
opts, args = getopt.gnu_getopt(argv, 'N:', ['name='])
except getopt.GetoptError, err:
## We remove one by one all the non recognized parameters
if len(err.opt) > 1:
argv = [arg for arg in argv if arg != '--%s' % err.opt and not arg.startswith('--%s=' % err.opt)]
else:
argv = [arg for arg in argv if not arg.startswith('-%s' % err.opt)]
else:
break
for opt in opts:
if opt[0] in ('-N', '--name'):
special_name = opt[1]
return special_name
task_id = None
try:
if not name in CFG_BIBTASK_VALID_TASKS:
raise StandardError('%s is not a valid task name' % name)
priority = get_priority(argv)
special_name = get_special_name(argv)
argv = tuple([os.path.join(CFG_BINDIR, name)] + list(argv))
if special_name:
name = '%s:%s' % (name, special_name)
## submit task:
task_id = run_sql("""INSERT INTO schTASK (proc,user,
runtime,sleeptime,status,progress,arguments,priority)
VALUES (%s,%s,NOW(),'','WAITING','',%s,%s)""",
(name, user, marshal.dumps(argv), priority))
except Exception:
register_exception(alert_admin=True)
if task_id:
run_sql("""DELETE FROM schTASK WHERE id=%s""", (task_id, ))
raise
return task_id
def setup_loggers(task_id=None):
"""Sets up the logging system."""
logger = logging.getLogger()
for handler in logger.handlers:
## Let's clean the handlers in case some piece of code has already
## fired any write_message, i.e. any call to debug, info, etc.
## which triggered a call to logging.basicConfig()
logger.removeHandler(handler)
formatter = logging.Formatter('%(asctime)s --> %(message)s', '%Y-%m-%d %H:%M:%S')
if task_id is not None:
err_logger = logging.handlers.RotatingFileHandler(os.path.join(CFG_LOGDIR, 'bibsched_task_%d.err' % _TASK_PARAMS['task_id']), 'a', 1*1024*1024, 10)
log_logger = logging.handlers.RotatingFileHandler(os.path.join(CFG_LOGDIR, 'bibsched_task_%d.log' % _TASK_PARAMS['task_id']), 'a', 1*1024*1024, 10)
log_logger.setFormatter(formatter)
log_logger.setLevel(logging.DEBUG)
err_logger.setFormatter(formatter)
err_logger.setLevel(logging.WARNING)
logger.addHandler(err_logger)
logger.addHandler(log_logger)
stdout_logger = logging.StreamHandler(sys.stdout)
stdout_logger.setFormatter(formatter)
stdout_logger.setLevel(logging.DEBUG)
stderr_logger = logging.StreamHandler(sys.stderr)
stderr_logger.setFormatter(formatter)
stderr_logger.setLevel(logging.WARNING)
logger.addHandler(stderr_logger)
logger.addHandler(stdout_logger)
logger.setLevel(logging.INFO)
return logger
def task_init(
authorization_action="",
authorization_msg="",
description="",
help_specific_usage="",
version=__revision__,
specific_params=("", []),
task_stop_helper_fnc=None,
task_submit_elaborate_specific_parameter_fnc=None,
task_submit_check_options_fnc=None,
task_run_fnc=None):
""" Initialize a BibTask.
@param authorization_action: is the name of the authorization action
connected with this task;
@param authorization_msg: is the header printed when asking for an
authorization password;
@param description: is the generic description printed in the usage page;
@param help_specific_usage: is the specific parameter help
@param task_stop_fnc: is a function that will be called
whenever the task is stopped
@param task_submit_elaborate_specific_parameter_fnc: will be called passing
a key and a value, for parsing specific cli parameters. Must return True if
it has recognized the parameter. Must eventually update the options with
bibtask_set_option;
@param task_submit_check_options: must check the validity of options (via
bibtask_get_option) once all the options where parsed;
@param task_run_fnc: will be called as the main core function. Must return
False in case of errors.
"""
global _TASK_PARAMS, _OPTIONS
_TASK_PARAMS = {
"version" : version,
"task_stop_helper_fnc" : task_stop_helper_fnc,
"task_name" : os.path.basename(sys.argv[0]),
"task_specific_name" : '',
"user" : '',
"verbose" : 1,
"sleeptime" : '',
"runtime" : time.strftime("%Y-%m-%d %H:%M:%S"),
"priority" : 0,
"runtime_limit" : None,
"profile" : [],
}
to_be_submitted = True
if len(sys.argv) == 2 and sys.argv[1].isdigit():
_TASK_PARAMS['task_id'] = int(sys.argv[1])
argv = _task_get_options(_TASK_PARAMS['task_id'], _TASK_PARAMS['task_name'])
to_be_submitted = False
else:
argv = sys.argv
setup_loggers(_TASK_PARAMS.get('task_id'))
if type(argv) is dict:
# FIXME: REMOVE AFTER MAJOR RELEASE 1.0
# This is needed for old task submitted before CLI parameters
# where stored in DB and _OPTIONS dictionary was stored instead.
_OPTIONS = argv
else:
try:
_task_build_params(_TASK_PARAMS['task_name'], argv, description,
help_specific_usage, version, specific_params,
task_submit_elaborate_specific_parameter_fnc,
task_submit_check_options_fnc)
except SystemExit:
raise
except Exception, e:
register_exception(alert_admin=True)
write_message("Error in parsing the parameters: %s." % e, sys.stderr)
write_message("Exiting.", sys.stderr)
if not to_be_submitted:
task_update_status("ERROR")
raise
write_message('argv=%s' % (argv, ), verbose=9)
write_message('_OPTIONS=%s' % (_OPTIONS, ), verbose=9)
write_message('_TASK_PARAMS=%s' % (_TASK_PARAMS, ), verbose=9)
if to_be_submitted:
_task_submit(argv, authorization_action, authorization_msg)
else:
try:
if task_get_task_param('profile'):
try:
from cStringIO import StringIO
import pstats
filename = os.path.join(CFG_TMPDIR, 'bibsched_task_%s.pyprof' % _TASK_PARAMS['task_id'])
existing_sorts = pstats.Stats.sort_arg_dict_default.keys()
required_sorts = []
profile_dump = []
for sort in task_get_task_param('profile'):
if sort not in existing_sorts:
sort = 'cumulative'
if sort not in required_sorts:
required_sorts.append(sort)
if sys.hexversion < 0x02050000:
import hotshot
import hotshot.stats
pr = hotshot.Profile(filename)
ret = pr.runcall(_task_run, task_run_fnc)
for sort_type in required_sorts:
tmp_out = sys.stdout
sys.stdout = StringIO()
hotshot.stats.load(filename).strip_dirs().sort_stats(sort_type).print_stats()
# pylint: disable=E1103
# This is a hack. sys.stdout is a StringIO in this case.
profile_dump.append(sys.stdout.getvalue())
# pylint: enable=E1103
sys.stdout = tmp_out
else:
import cProfile
pr = cProfile.Profile()
ret = pr.runcall(_task_run, task_run_fnc)
pr.dump_stats(filename)
for sort_type in required_sorts:
strstream = StringIO()
pstats.Stats(filename, stream=strstream).strip_dirs().sort_stats(sort_type).print_stats()
profile_dump.append(strstream.getvalue())
profile_dump = '\n'.join(profile_dump)
profile_dump += '\nYou can use profile=%s' % existing_sorts
open(os.path.join(CFG_LOGDIR, 'bibsched_task_%d.log' % _TASK_PARAMS['task_id']), 'a').write("%s" % profile_dump)
os.remove(filename)
except ImportError:
ret = _task_run(task_run_fnc)
write_message("ERROR: The Python Profiler is not installed!", stream=sys.stderr)
else:
ret = _task_run(task_run_fnc)
if not ret:
write_message("Error occurred. Exiting.", sys.stderr)
except Exception, e:
register_exception(alert_admin=True)
write_message("Unexpected error occurred: %s." % e, sys.stderr)
write_message("Traceback is:", sys.stderr)
write_messages(''.join(traceback.format_tb(sys.exc_info()[2])), sys.stderr)
write_message("Exiting.", sys.stderr)
task_update_status("ERROR")
logging.shutdown()
def _task_build_params(
task_name,
argv,
description="",
help_specific_usage="",
version=__revision__,
specific_params=("", []),
task_submit_elaborate_specific_parameter_fnc=None,
task_submit_check_options_fnc=None):
""" Build the BibTask params.
@param argv: a list of string as in sys.argv
@param description: is the generic description printed in the usage page;
@param help_specific_usage: is the specific parameter help
@param task_submit_elaborate_specific_parameter_fnc: will be called passing
a key and a value, for parsing specific cli parameters. Must return True if
it has recognized the parameter. Must eventually update the options with
bibtask_set_option;
@param task_submit_check_options: must check the validity of options (via
bibtask_get_option) once all the options where parsed;
"""
global _OPTIONS
_OPTIONS = {}
if task_name in CFG_BIBTASK_DEFAULT_TASK_SETTINGS:
_OPTIONS.update(CFG_BIBTASK_DEFAULT_TASK_SETTINGS[task_name])
# set user-defined options:
try:
(short_params, long_params) = specific_params
opts, args = getopt.gnu_getopt(argv[1:], "hVv:u:s:t:P:N:L:" +
short_params, [
"help",
"version",
"verbose=",
"user=",
"sleep=",
"runtime=",
"priority=",
"name=",
"limit=",
"profile="
] + long_params)
except getopt.GetoptError, err:
_usage(1, err, help_specific_usage=help_specific_usage, description=description)
try:
for opt in opts:
if opt[0] in ("-h", "--help"):
_usage(0, help_specific_usage=help_specific_usage, description=description)
elif opt[0] in ("-V", "--version"):
print _TASK_PARAMS["version"]
sys.exit(0)
elif opt[0] in ("-u", "--user"):
_TASK_PARAMS["user"] = opt[1]
elif opt[0] in ("-v", "--verbose"):
_TASK_PARAMS["verbose"] = int(opt[1])
elif opt[0] in ("-s", "--sleeptime"):
if task_name not in CFG_TASK_IS_NOT_A_DEAMON:
get_datetime(opt[1]) # see if it is a valid shift
_TASK_PARAMS["sleeptime"] = opt[1]
elif opt[0] in ("-t", "--runtime"):
_TASK_PARAMS["runtime"] = get_datetime(opt[1])
elif opt[0] in ("-P", "--priority"):
_TASK_PARAMS["priority"] = int(opt[1])
elif opt[0] in ("-N", "--name"):
_TASK_PARAMS["task_specific_name"] = opt[1]
elif opt[0] in ("-L", "--limit"):
_TASK_PARAMS["runtime_limit"] = parse_runtime_limit(opt[1])
elif opt[0] in ("--profile", ):
_TASK_PARAMS["profile"] += opt[1].split(',')
elif not callable(task_submit_elaborate_specific_parameter_fnc) or \
not task_submit_elaborate_specific_parameter_fnc(opt[0],
opt[1], opts, args):
_usage(1, help_specific_usage=help_specific_usage, description=description)
except StandardError, e:
_usage(e, help_specific_usage=help_specific_usage, description=description)
if callable(task_submit_check_options_fnc):
if not task_submit_check_options_fnc():
_usage(1, help_specific_usage=help_specific_usage, description=description)
def task_set_option(key, value):
"""Set an value to key in the option dictionary of the task"""
global _OPTIONS
try:
_OPTIONS[key] = value
except NameError:
_OPTIONS = {key : value}
def task_get_option(key, default=None):
"""Returns the value corresponding to key in the option dictionary of the task"""
try:
return _OPTIONS.get(key, default)
except NameError:
return default
def task_has_option(key):
"""Map the has_key query to _OPTIONS"""
try:
return _OPTIONS.has_key(key)
except NameError:
return False
def task_get_task_param(key, default=None):
"""Returns the value corresponding to the particular task param"""
try:
return _TASK_PARAMS.get(key, default)
except NameError:
return default
def task_set_task_param(key, value):
"""Set the value corresponding to the particular task param"""
global _TASK_PARAMS
try:
_TASK_PARAMS[key] = value
except NameError:
_TASK_PARAMS = {key : value}
def task_update_progress(msg):
"""Updates progress information in the BibSched task table."""
write_message("Updating task progress to %s." % msg, verbose=9)
if "task_id" in _TASK_PARAMS:
return run_sql("UPDATE schTASK SET progress=%s where id=%s",
(msg, _TASK_PARAMS["task_id"]))
def task_update_status(val):
"""Updates status information in the BibSched task table."""
write_message("Updating task status to %s." % val, verbose=9)
if "task_id" in _TASK_PARAMS:
return run_sql("UPDATE schTASK SET status=%s where id=%s",
(val, _TASK_PARAMS["task_id"]))
def task_read_status():
"""Read status information in the BibSched task table."""
res = run_sql("SELECT status FROM schTASK where id=%s",
(_TASK_PARAMS['task_id'],), 1)
try:
out = res[0][0]
except:
out = 'UNKNOWN'
return out
def write_messages(msgs, stream=sys.stdout, verbose=1):
"""Write many messages through write_message"""
for msg in msgs.split('\n'):
write_message(msg, stream, verbose)
def write_message(msg, stream=sys.stdout, verbose=1):
"""Write message and flush output stream (may be sys.stdout or sys.stderr).
Useful for debugging stuff."""
if msg and _TASK_PARAMS['verbose'] >= verbose:
if stream == sys.stdout:
logging.info(msg)
elif stream == sys.stderr:
logging.error(msg)
else:
sys.stderr.write("Unknown stream %s. [must be sys.stdout or sys.stderr]\n" % stream)
else:
logging.debug(msg)
_RE_SHIFT = re.compile("([-\+]{0,1})([\d]+)([dhms])")
def get_datetime(var, format_string="%Y-%m-%d %H:%M:%S"):
"""Returns a date string according to the format string.
It can handle normal date strings and shifts with respect
to now."""
date = time.time()
factors = {"d":24*3600, "h":3600, "m":60, "s":1}
m = _RE_SHIFT.match(var)
if m:
sign = m.groups()[0] == "-" and -1 or 1
factor = factors[m.groups()[2]]
value = float(m.groups()[1])
date = time.localtime(date + sign * factor * value)
date = time.strftime(format_string, date)
else:
date = time.strptime(var, format_string)
date = time.strftime(format_string, date)
return date
def task_sleep_now_if_required(can_stop_too=False):
"""This function should be called during safe state of BibTask,
e.g. after flushing caches or outside of run_sql calls.
"""
status = task_read_status()
write_message('Entering task_sleep_now_if_required with status=%s' % status, verbose=9)
if status == 'ABOUT TO SLEEP':
write_message("sleeping...")
task_update_status("SLEEPING")
signal.signal(signal.SIGTSTP, _task_sig_dumb)
os.kill(os.getpid(), signal.SIGSTOP)
time.sleep(1)
task_update_status("CONTINUING")
write_message("... continuing...")
signal.signal(signal.SIGTSTP, _task_sig_sleep)
elif status == 'ABOUT TO STOP' and can_stop_too:
write_message("stopped")
task_update_status("STOPPED")
sys.exit(0)
runtime_limit = task_get_option("limit")
if runtime_limit is not None:
if not (runtime_limit[0] <= time.time() <= runtime_limit[1]):
if can_stop_too:
write_message("stopped (outside runtime limit)")
task_update_status("STOPPED")
sys.exit(0)
def authenticate(user, authorization_action, authorization_msg=""):
"""Authenticate the user against the user database.
Check for its password, if it exists.
Check for authorization_action access rights.
Return user name upon authorization success,
do system exit upon authorization failure.
"""
# With SSO it's impossible to check for pwd
if CFG_EXTERNAL_AUTH_USING_SSO or os.path.basename(sys.argv[0]) in CFG_VALID_PROCESSES_NO_AUTH_NEEDED:
return user
if authorization_msg:
print authorization_msg
print "=" * len(authorization_msg)
if user == "":
print >> sys.stdout, "\rUsername: ",
try:
user = sys.stdin.readline().lower().strip()
except EOFError:
sys.stderr.write("\n")
sys.exit(1)
except KeyboardInterrupt:
sys.stderr.write("\n")
sys.exit(1)
else:
print >> sys.stdout, "\rUsername:", user
## first check user:
# p_un passed may be an email or a nickname:
res = run_sql("select id from user where email=%s", (user,), 1) + \
run_sql("select id from user where nickname=%s", (user,), 1)
if not res:
print "Sorry, %s does not exist." % user
sys.exit(1)
else:
uid = res[0][0]
ok = False
login_method = get_user_preferences(uid)['login_method']
if not CFG_EXTERNAL_AUTHENTICATION[login_method]:
#Local authentication, let's see if we want passwords.
res = run_sql("select id from user where id=%s "
"and password=AES_ENCRYPT(email,'')",
(uid,), 1)
if res:
ok = True
if not ok:
try:
password_entered = getpass.getpass()
except EOFError:
sys.stderr.write("\n")
sys.exit(1)
except KeyboardInterrupt:
sys.stderr.write("\n")
sys.exit(1)
if not CFG_EXTERNAL_AUTHENTICATION[login_method]:
res = run_sql("select id from user where id=%s "
"and password=AES_ENCRYPT(email, %s)",
(uid, password_entered), 1)
if res:
ok = True
else:
if CFG_EXTERNAL_AUTHENTICATION[login_method].auth_user(get_email(uid), password_entered):
ok = True
if not ok:
print "Sorry, wrong credentials for %s." % user
sys.exit(1)
else:
## secondly check authorization for the authorization_action:
(auth_code, auth_message) = acc_authorize_action(uid, authorization_action)
if auth_code != 0:
print auth_message
sys.exit(1)
return user
def _task_submit(argv, authorization_action, authorization_msg):
"""Submits task to the BibSched task queue. This is what people will
be invoking via command line."""
## check as whom we want to submit?
check_running_process_user()
## sanity check: remove eventual "task" option:
## authenticate user:
_TASK_PARAMS['user'] = authenticate(_TASK_PARAMS["user"], authorization_action, authorization_msg)
## submit task:
if _TASK_PARAMS['task_specific_name']:
task_name = '%s:%s' % (_TASK_PARAMS['task_name'], _TASK_PARAMS['task_specific_name'])
else:
task_name = _TASK_PARAMS['task_name']
write_message("storing task options %s\n" % argv, verbose=9)
_TASK_PARAMS['task_id'] = run_sql("""INSERT INTO schTASK (proc,user,
runtime,sleeptime,status,progress,arguments,priority)
VALUES (%s,%s,%s,%s,'WAITING','',%s, %s)""",
(task_name, _TASK_PARAMS['user'], _TASK_PARAMS["runtime"],
_TASK_PARAMS["sleeptime"], marshal.dumps(argv), _TASK_PARAMS['priority']))
## update task number:
write_message("Task #%d submitted." % _TASK_PARAMS['task_id'])
return _TASK_PARAMS['task_id']
def _task_get_options(task_id, task_name):
"""Returns options for the task 'id' read from the BibSched task
queue table."""
out = {}
res = run_sql("SELECT arguments FROM schTASK WHERE id=%s AND proc LIKE %s",
(task_id, task_name+'%'))
try:
out = marshal.loads(res[0][0])
except:
write_message("Error: %s task %d does not seem to exist." \
% (task_name, task_id), sys.stderr)
task_update_status('ERROR')
sys.exit(1)
write_message('Options retrieved: %s' % (out, ), verbose=9)
return out
def _task_run(task_run_fnc):
"""Runs the task by fetching arguments from the BibSched task queue.
This is what BibSched will be invoking via daemon call.
The task prints Fibonacci numbers for up to NUM on the stdout, and some
messages on stderr.
@param task_run_fnc: will be called as the main core function. Must return
False in case of errors.
Return True in case of success and False in case of failure."""
## We prepare the pid file inside /prefix/var/run/taskname_id.pid
check_running_process_user()
try:
pidfile_name = os.path.join(CFG_PREFIX, 'var', 'run',
'bibsched_task_%d.pid' % _TASK_PARAMS['task_id'])
pidfile = open(pidfile_name, 'w')
pidfile.write(str(os.getpid()))
pidfile.close()
except OSError:
register_exception(alert_admin=True)
task_update_status("ERROR")
return False
## check task status:
task_status = task_read_status()
if task_status not in ("WAITING", "SCHEDULED"):
write_message("Error: The task #%d is %s. I expected WAITING or SCHEDULED." %
(_TASK_PARAMS['task_id'], task_status), sys.stderr)
return False
time_now = time.time()
if _TASK_PARAMS['runtime_limit'] is not None and os.environ.get('BIBSCHED_MODE', 'manual') != 'manual':
if not _TASK_PARAMS['runtime_limit'][0][0] <= time_now <= _TASK_PARAMS['runtime_limit'][0][1]:
if time_now <= _TASK_PARAMS['runtime_limit'][0][0]:
new_runtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(_TASK_PARAMS['runtime_limit'][0][0]))
else:
new_runtime = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime(_TASK_PARAMS['runtime_limit'][1][0]))
progress = run_sql("SELECT progress FROM schTASK WHERE id=%s", (_TASK_PARAMS['task_id'], ))
if progress:
progress = progress[0][0]
else:
progress = ''
g = re.match(r'Postponed (\d+) time\(s\)', progress)
if g:
postponed_times = int(g.group(1))
else:
postponed_times = 0
run_sql("UPDATE schTASK SET runtime=%s, status='WAITING', progress=%s WHERE id=%s", (new_runtime, 'Postponed %d time(s)' % (postponed_times + 1), _TASK_PARAMS['task_id']))
write_message("Task #%d postponed because outside of runtime limit" % _TASK_PARAMS['task_id'])
return True
## initialize signal handler:
signal.signal(signal.SIGUSR2, signal.SIG_IGN)
signal.signal(signal.SIGTSTP, _task_sig_sleep)
signal.signal(signal.SIGTERM, _task_sig_stop)
signal.signal(signal.SIGQUIT, _task_sig_stop)
signal.signal(signal.SIGABRT, _task_sig_suicide)
signal.signal(signal.SIGINT, _task_sig_stop)
## we can run the task now:
write_message("Task #%d started." % _TASK_PARAMS['task_id'])
task_update_status("RUNNING")
## run the task:
_TASK_PARAMS['task_starting_time'] = time.strftime("%Y-%m-%d %H:%M:%S", time.localtime())
sleeptime = _TASK_PARAMS['sleeptime']
try:
try:
if callable(task_run_fnc) and task_run_fnc():
task_update_status("DONE")
else:
task_update_status("DONE WITH ERRORS")
except SystemExit:
pass
except:
register_exception(alert_admin=True)
task_update_status("ERROR")
finally:
task_status = task_read_status()
if sleeptime:
new_runtime = get_datetime(sleeptime)
## The task is a daemon. We resubmit it
if task_status == 'DONE':
## It has finished in a good way. We recycle the database row
run_sql("UPDATE schTASK SET runtime=%s, status='WAITING', progress='' WHERE id=%s", (new_runtime, _TASK_PARAMS['task_id']))
write_message("Task #%d finished and resubmitted." % _TASK_PARAMS['task_id'])
elif task_status == 'STOPPED':
run_sql("UPDATE schTASK SET status='WAITING', progress='' WHERE id=%s", (_TASK_PARAMS['task_id'], ))
write_message("Task #%d stopped and resubmitted." % _TASK_PARAMS['task_id'])
else:
## We keep the bad result and we resubmit with another id.
#res = run_sql('SELECT proc,user,sleeptime,arguments,priority FROM schTASK WHERE id=%s', (_TASK_PARAMS['task_id'], ))
#proc, user, sleeptime, arguments, priority = res[0]
#run_sql("""INSERT INTO schTASK (proc,user,
#runtime,sleeptime,status,arguments,priority)
#VALUES (%s,%s,%s,%s,'WAITING',%s, %s)""",
#(proc, user, new_runtime, sleeptime, arguments, priority))
write_message("Task #%d finished but not resubmitted. [%s]" % (_TASK_PARAMS['task_id'], task_status))
else:
## we are done:
write_message("Task #%d finished. [%s]" % (_TASK_PARAMS['task_id'], task_status))
## Removing the pid
os.remove(pidfile_name)
return True
def _usage(exitcode=1, msg="", help_specific_usage="", description=""):
"""Prints usage info."""
if msg:
sys.stderr.write("Error: %s.\n" % msg)
sys.stderr.write("Usage: %s [options]\n" % sys.argv[0])
if help_specific_usage:
sys.stderr.write("Command options:\n")
sys.stderr.write(help_specific_usage)
sys.stderr.write("Scheduling options:\n")
sys.stderr.write(" -u, --user=USER\tUser name under which to submit this"
" task.\n")
sys.stderr.write(" -t, --runtime=TIME\tTime to execute the task. [default=now]\n"
"\t\t\tExamples: +15s, 5m, 3h, 2002-10-27 13:57:26.\n")
sys.stderr.write(" -s, --sleeptime=SLEEP\tSleeping frequency after"
" which to repeat the task.\n"
"\t\t\tExamples: 30m, 2h, 1d. [default=no]\n")
sys.stderr.write(" -L --limit=LIMIT\tTime limit when it is"
" allowed to execute the task.\n"
"\t\t\tExamples: 22:00-03:00, Sunday 01:00-05:00.\n"
"\t\t\tSyntax: [Wee[kday]] [hh[:mm][-hh[:mm]]].\n")
sys.stderr.write(" -P, --priority=PRI\tTask priority (0=default, 1=higher, etc).\n")
sys.stderr.write(" -N, --name=NAME\tTask specific name (advanced option).\n")
sys.stderr.write("General options:\n")
sys.stderr.write(" -h, --help\t\tPrint this help.\n")
sys.stderr.write(" -V, --version\t\tPrint version information.\n")
sys.stderr.write(" -v, --verbose=LEVEL\tVerbose level (0=min,"
" 1=default, 9=max).\n")
sys.stderr.write(" --profile=STATS\tPrint profile information. STATS is a comma-separated\n\t\t\tlist of desired output stats (calls, cumulative,\n\t\t\tfile, line, module, name, nfl, pcalls, stdname, time).\n")
if description:
sys.stderr.write(description)
sys.exit(exitcode)
def _task_sig_sleep(sig, frame):
"""Signal handler for the 'sleep' signal sent by BibSched."""
signal.signal(signal.SIGTSTP, signal.SIG_IGN)
write_message("task_sig_sleep(), got signal %s frame %s"
% (sig, frame), verbose=9)
write_message("sleeping as soon as possible...")
_db_login(1)
task_update_status("ABOUT TO SLEEP")
def _task_sig_stop(sig, frame):
"""Signal handler for the 'stop' signal sent by BibSched."""
write_message("task_sig_stop(), got signal %s frame %s"
% (sig, frame), verbose=9)
write_message("stopping as soon as possible...")
_db_login(1) # To avoid concurrency with an interrupted run_sql call
task_update_status("ABOUT TO STOP")
def _task_sig_suicide(sig, frame):
"""Signal handler for the 'suicide' signal sent by BibSched."""
write_message("task_sig_suicide(), got signal %s frame %s"
% (sig, frame), verbose=9)
write_message("suiciding myself now...")
task_update_status("SUICIDING")
write_message("suicided")
_db_login(1)
task_update_status("SUICIDED")
sys.exit(1)
def _task_sig_dumb(sig, frame):
"""Dumb signal handler."""
pass
_RE_PSLINE = re.compile('^\s*(.+?)\s+(.+?)\s*$')
def guess_apache_process_user_from_ps():
"""Guess Apache process user by parsing the list of running processes."""
apache_users = []
try:
# Tested on Linux, Sun and MacOS X
for line in os.popen('ps -A -o user,comm').readlines():
g = _RE_PSLINE.match(line)
if g:
username = g.group(2)
process = os.path.basename(g.group(1))
if process in ('apache', 'apache2', 'httpd') :
if username not in apache_users and username != 'root':
apache_users.append(username)
except Exception, e:
print >> sys.stderr, "WARNING: %s" % e
return tuple(apache_users)
def guess_apache_process_user():
"""
Return the possible name of the user running the Apache server process.
(Look at running OS processes or look at OS users defined in /etc/passwd.)
"""
apache_users = guess_apache_process_user_from_ps() + ('apache2', 'apache', 'www-data')
for username in apache_users:
try:
userline = pwd.getpwnam(username)
return userline[0]
except KeyError:
pass
print >> sys.stderr, "ERROR: Cannot detect Apache server process user. Please set the correct value in CFG_BIBSCHED_PROCESS_USER."
sys.exit(1)
def check_running_process_user():
"""
Check that the user running this program is the same as the user
configured in CFG_BIBSCHED_PROCESS_USER or as the user running the
Apache webserver process.
"""
running_as_user = pwd.getpwuid(os.getuid())[0]
if CFG_BIBSCHED_PROCESS_USER:
# We have the expected bibsched process user defined in config,
# so check against her, not against Apache.
if running_as_user != CFG_BIBSCHED_PROCESS_USER:
print >> sys.stderr, """ERROR: You must run "%(x_proc)s" as the user set up in your
CFG_BIBSCHED_PROCESS_USER (seems to be "%(x_user)s").
You may want to do "sudo -u %(x_user)s %(x_proc)s ..." to do so.
If you think this is not right, please set CFG_BIBSCHED_PROCESS_USER
appropriately and rerun "inveniocfg --update-config-py".""" % \
{'x_proc': os.path.basename(sys.argv[0]), 'x_user': CFG_BIBSCHED_PROCESS_USER}
sys.exit(1)
elif running_as_user != guess_apache_process_user(): # not defined in config, check against Apache
print >> sys.stderr, """ERROR: You must run "%(x_proc)s" as the same user that runs your Apache server
process (seems to be "%(x_user)s").
You may want to do "sudo -u %(x_user)s %(x_proc)s ..." to do so.
If you think this is not right, please set CFG_BIBSCHED_PROCESS_USER
appropriately and rerun "inveniocfg --update-config-py".""" % \
{'x_proc': os.path.basename(sys.argv[0]), 'x_user': guess_apache_process_user()}
sys.exit(1)
return
| gpl-2.0 | 8,104,900,002,468,720,000 | 40.978284 | 220 | 0.594253 | false |
crscardellino/thesis | thesis/dataset/base.py | 1 | 9238 | # -*- coding: utf-8 -*-
from __future__ import absolute_import, unicode_literals
import numpy as np
import pickle
from collections import namedtuple
from gensim.models import Word2Vec
from scipy.sparse import csr_matrix
_InstanceId = namedtuple('InstanceId', 'corpus file sentence lemma idx')
class CorpusDataset(object):
def __init__(self, dataset, feature_dict_path=None, word_vector_model=None,
dataset_extra=None, dtype=np.float32):
if feature_dict_path is not None:
with open(feature_dict_path, 'rb') as f:
self._features_dicts = pickle.load(f)
else:
self._features_dicts = None
if word_vector_model is None or dataset_extra is not None:
self._data = csr_matrix((dataset['data'], dataset['indices'], dataset['indptr']),
shape=dataset['shape'], dtype=dtype)
self._input_vector_size = self._data.shape[1]
self._data_extra = None
self._word_vector_model = None
if dataset_extra is not None:
self._data_extra = dataset_extra['data']
self._word_vector_model = word_vector_model
try:
self._word_vector_size = self._word_vector_model.vector_size
except AttributeError:
self._word_vector_size = next(iter(self._word_vector_model.values())).shape[0]
self._input_vector_size += self._data_extra.shape[1] * self._word_vector_size
# FIXME: This is horrible!
if self._features_dicts is not None:
for wwidx, word_window in enumerate(self._data_extra):
for widx, word in enumerate(word_window):
for t in word:
if t in self._word_vector_model:
self._features_dicts[wwidx]['vector:word:%d' % widx] = t
break
else:
self._data = dataset['data']
self._data_extra = None
self._word_vector_model = word_vector_model
try:
self._word_vector_size = self._word_vector_model.vector_size
except AttributeError:
self._word_vector_size = next(iter(self._word_vector_model.values())).shape[0]
self._input_vector_size = self._data.shape[1] * self._word_vector_size
self._features_dicts = []
for word_window in self._data:
self._features_dicts.append({})
for widx, word in enumerate(word_window):
for t in word:
if t in self._word_vector_model:
self._features_dicts[-1]['vector:word:%d' % widx] = t
break
self._lemmas = None
self._unique_lemmas = None
self.dtype = dtype
def _word_window_to_vector(self, word_window):
vector = []
for word in word_window:
try:
vector.append(self._word_vector_model[next(t for t in word if t in self._word_vector_model)])
except StopIteration:
vector.append(np.zeros(self._word_vector_size, dtype=self.dtype))
return np.concatenate(vector)
def data(self, lemma=None, limit=0):
data = self._data if lemma is None else self._data[np.where(self._lemmas == lemma)[0], :]
extra_data = None
if self._word_vector_model is not None:
if self._data_extra is None:
data = np.array([self._word_window_to_vector(ww) for ww in data])
else:
extra_data = self._data_extra if lemma is None \
else self._data_extra[np.where(self._lemmas == lemma)[0], :]
extra_data = np.array([self._word_window_to_vector(ww) for ww in extra_data])
limit = min(limit, data.shape[0])
if limit > 0:
data = data[:limit, :]
if extra_data is not None:
extra_data = extra_data[:limit, :]
if extra_data is not None:
data = np.hstack((data.toarray(), extra_data))
assert data.shape[1] == self._input_vector_size
return data
def input_vector_size(self):
return self._input_vector_size
def num_examples(self, lemma=None):
return self.data(lemma).shape[0]
@property
def num_lemmas(self):
return self._unique_lemmas.shape[0]
@property
def word_vector_model(self):
return self._word_vector_model
def features_dictionaries(self, lemma=None, limit=0):
if lemma is None:
features_dict = self._features_dicts
else:
features_dict = [self._features_dicts[idx] for idx in np.where(self._lemmas == lemma)[0]]
limit = min(limit, len(features_dict))
if limit > 0:
features_dict = features_dict[:limit]
return features_dict
class SenseCorpusDataset(CorpusDataset):
def __init__(self, dataset_path, features_dict_path=None, word_vector_model=None,
dataset_extra=None, dtype=np.float32):
dataset = np.load(dataset_path)
dataset_extra = np.load(dataset_extra) if dataset_extra is not None else None
super(SenseCorpusDataset, self).__init__(dataset, features_dict_path, word_vector_model, dataset_extra, dtype)
self._lemmas = dataset['lemmas']
self._unique_lemmas = np.unique(self._lemmas)
self._target = dataset['target']
self._sentences = dataset['sentences']
self._train_classes = dataset['train_classes']
def target(self, lemma=None, limit=0):
if lemma is None:
target = self._target
else:
target = self._target[np.where(self._lemmas == lemma)[0]]
limit = min(limit, target.shape[0])
if limit > 0:
target = target[:limit, :]
return target
def traverse_dataset_by_lemma(self, return_features=False):
for lemma in self._unique_lemmas:
if return_features:
yield lemma, self.data(lemma), self.target(lemma), self.features_dictionaries(lemma)
else:
yield lemma, self.data(lemma), self.target(lemma)
def output_vector_size(self, lemma=None):
if lemma is None:
return self._train_classes.shape[0]
else:
return np.array([cls for cls in self._train_classes if lemma == cls.split('.')[1]]).shape[0]
def lemmas_index(self, lemma):
return np.where(self._lemmas == lemma)[0]
def num_examples(self, lemma=None):
return self.data(lemma).shape[0]
@property
def num_lemmas(self):
return self._unique_lemmas.shape[0]
def train_classes(self, lemma=None):
if lemma is None:
return self._train_classes
else:
return np.array([cls for cls in self._train_classes if cls.split('.')[1] == lemma])
class SenseCorpusDatasets(object):
def __init__(self, train_dataset_path, test_dataset_path, train_features_dict_path=None,
test_features_dict_path=None, word_vector_model_path=None,
train_dataset_extra=None, test_dataset_extra=None, dtype=np.float32):
try:
word_vector_model = Word2Vec.load_word2vec_format(word_vector_model_path, binary=True)\
if word_vector_model_path is not None else None
except UnicodeDecodeError:
with open(word_vector_model_path, 'rb') as fvectors:
word_vector_model = pickle.load(fvectors)
self.train_dataset = SenseCorpusDataset(train_dataset_path, train_features_dict_path,
word_vector_model, train_dataset_extra, dtype)
self.test_dataset = SenseCorpusDataset(test_dataset_path, test_features_dict_path,
word_vector_model, test_dataset_extra, dtype)
class UnlabeledCorpusDataset(CorpusDataset):
def __init__(self, dataset_path, features_dict_path=None, word_vector_model=None,
dataset_extra=None, dtype=np.float32):
dataset = np.load(dataset_path)
dataset_extra = np.load(dataset_extra) if dataset_extra is not None else None
super(UnlabeledCorpusDataset, self).__init__(dataset, features_dict_path, word_vector_model,
dataset_extra, dtype)
self._instances_id = [_InstanceId(*iid.split(':')) for iid in dataset['instances_id']]
self._lemmas = np.array([iid.lemma for iid in self._instances_id])
self._unique_lemmas = np.unique(self._lemmas)
def instances_id(self, lemma=None, limit=0):
if lemma is None:
instances_id = self._instances_id
else:
instances_id = [self._instances_id[idx] for idx in np.where(self._lemmas == lemma)[0]]
limit = min(limit, len(instances_id))
if limit > 0:
instances_id = instances_id[:limit]
return instances_id
def has_lemma(self, lemma):
return lemma in set(self._unique_lemmas)
| mit | 2,390,123,767,422,768,000 | 37.65272 | 118 | 0.575666 | false |
lsaffre/lino-welfare | lino_welfare/modlib/active_job_search/models.py | 1 | 1802 | # -*- coding: UTF-8 -*-
# Copyright 2014-2017 Rumma & Ko Ltd
# License: BSD (see file COPYING for details)
"""
The :xfile:`models.py` module for the
:mod:`lino_welfare.modlib.active_job_search` app.
"""
from django.db import models
from django.utils.translation import ugettext_lazy as _
from lino.api import dd
from lino_welfare.modlib.integ.roles import IntegrationStaff, IntegUser
class Proof(dd.Model):
class Meta:
app_label = 'active_job_search'
verbose_name = _("Proof of search")
verbose_name_plural = _("Proofs of search")
client = dd.ForeignKey('pcsw.Client')
date = models.DateField(_("Date"), blank=True, null=True)
company = dd.ForeignKey('contacts.Company', blank=True, null=True)
spontaneous = models.BooleanField(_("Spontaneous"), default=False)
response = models.BooleanField(_("Response to offer"), default=False)
remarks = dd.RichTextField(
_("Remarks"),
blank=True, null=True, format='plain')
class Proofs(dd.Table):
required_roles = dd.login_required(IntegrationStaff)
model = 'active_job_search.Proof'
detail_layout = """
date client company id
spontaneous response
remarks
"""
class ProofsByClient(Proofs):
required_roles = dd.login_required(IntegUser)
master_key = 'client'
column_names = "date company spontaneous response *"
auto_fit_column_widths = True
dd.inject_field(
'pcsw.Client', 'geographic_area',
models.CharField(
_("Geographic area"), blank=True, max_length=200,
help_text=_(
"The area for which we are seeking a job.")))
dd.inject_field(
'pcsw.Client', 'child_custody',
models.TextField(
_("Child custody"), blank=True,
help_text=_("Notes concerning child custody.")))
| agpl-3.0 | 3,020,308,845,371,457,500 | 25.5 | 73 | 0.660377 | false |
changkun/AugmentedTouch | src/script/new/KMMoment.py | 2 | 2415 | import time
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from sklearn.cluster import MiniBatchKMeans
from sklearn.cross_validation import train_test_split
from loaddata import loadUserData
from loaddata import splitMomentDataByFeature
from loaddata import splitMomentDataByLabel
from loaddata import splitMomentDataByFeatureAndLabel
userid=1
device=1
featureCondition=3
classificationCondition=1
offsetFeatureOn=True
batch_size = 45
my_test_size = 0.3
my_random_state = 42
data, label = splitMomentDataByFeatureAndLabel(userid, device, featureCondition, classificationCondition, offsetFeatureOn=offsetFeatureOn)
trainingData, testData, trainingLabel, testLabel = train_test_split(data, label, test_size=my_test_size, random_state=my_random_state)
def plot3DLabel(data, label, trainLabel):
print data.shape
print label
fig = plt.figure()
ax = fig.add_subplot(211, projection='3d')
x = [float(value)/736 for value in data[:,0]]
y = [float(value)/414 for value in data[:,1]]
z = [float(value) for value in data[:,2]]
label = [1 if value=='1' else 0 for value in label]
ax.scatter(x,y,z,c=label, marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('roll')
ax = fig.add_subplot(212, projection='3d')
ax.scatter(x,y,z,c=trainLabel, marker='o')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('roll')
plt.show()
mbk = MiniBatchKMeans(init='k-means++', n_clusters=2,batch_size=batch_size,\
n_init=10, max_no_improvement=10, verbose=0)
t0 = time.time()
mbk.fit(trainingData)
t_mini_batch = time.time() - t0
mbk_means_labels = mbk.labels_
mbk_means_cluster_centers = mbk.cluster_centers_
mbk_means_labels_unique = np.unique(mbk_means_labels)
plot3DLabel(trainingData, trainingLabel, mbk_means_labels)
def testingWithModel(testData, testLabel, model):
error_count = 0.0
result = model.predict(testData)
for i, la in enumerate(result):
if la != testLabel[i]:
error_count += 1
return error_count/result.shape[0]
#print testingWithModel(testData, testLabel, mbk)
print mbk_means_labels_unique
# print trainingData[:,0], trainingData[:,1], trainingData[:,2]
# fig = plt.figure()
# ax = fig.add_subplot(111, projection='3d')
# ax.scatter(trainingData[:,0],trainingData[:,1],trainingData[:,2],color='red')
# plt.show()
| gpl-2.0 | 7,702,797,521,755,485,000 | 28.096386 | 138 | 0.713043 | false |
chuan9/chromium-crosswalk | remoting/tools/remote_test_helper/jsonrpclib.py | 51 | 9346 | # Copyright 2014 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""Module to implement the JSON-RPC protocol.
This module uses xmlrpclib as the base and only overrides those
portions that implement the XML-RPC protocol. These portions are rewritten
to use the JSON-RPC protocol instead.
When large portions of code need to be rewritten the original code and
comments are preserved. The intention here is to keep the amount of code
change to a minimum.
This module only depends on default Python modules. No third party code is
required to use this module.
"""
import json
import urllib
import xmlrpclib as _base
__version__ = '1.0.0'
gzip_encode = _base.gzip_encode
class Error(Exception):
def __str__(self):
return repr(self)
class ProtocolError(Error):
"""Indicates a JSON protocol error."""
def __init__(self, url, errcode, errmsg, headers):
Error.__init__(self)
self.url = url
self.errcode = errcode
self.errmsg = errmsg
self.headers = headers
def __repr__(self):
return (
'<ProtocolError for %s: %s %s>' %
(self.url, self.errcode, self.errmsg))
class ResponseError(Error):
"""Indicates a broken response package."""
pass
class Fault(Error):
"""Indicates an JSON-RPC fault package."""
def __init__(self, code, message):
Error.__init__(self)
if not isinstance(code, int):
raise ProtocolError('Fault code must be an integer.')
self.code = code
self.message = message
def __repr__(self):
return (
'<Fault %s: %s>' %
(self.code, repr(self.message))
)
def CreateRequest(methodname, params, ident=''):
"""Create a valid JSON-RPC request.
Args:
methodname: The name of the remote method to invoke.
params: The parameters to pass to the remote method. This should be a
list or tuple and able to be encoded by the default JSON parser.
Returns:
A valid JSON-RPC request object.
"""
request = {
'jsonrpc': '2.0',
'method': methodname,
'params': params,
'id': ident
}
return request
def CreateRequestString(methodname, params, ident=''):
"""Create a valid JSON-RPC request string.
Args:
methodname: The name of the remote method to invoke.
params: The parameters to pass to the remote method.
These parameters need to be encode-able by the default JSON parser.
ident: The request identifier.
Returns:
A valid JSON-RPC request string.
"""
return json.dumps(CreateRequest(methodname, params, ident))
def CreateResponse(data, ident):
"""Create a JSON-RPC response.
Args:
data: The data to return.
ident: The response identifier.
Returns:
A valid JSON-RPC response object.
"""
if isinstance(data, Fault):
response = {
'jsonrpc': '2.0',
'error': {
'code': data.code,
'message': data.message},
'id': ident
}
else:
response = {
'jsonrpc': '2.0',
'response': data,
'id': ident
}
return response
def CreateResponseString(data, ident):
"""Create a JSON-RPC response string.
Args:
data: The data to return.
ident: The response identifier.
Returns:
A valid JSON-RPC response object.
"""
return json.dumps(CreateResponse(data, ident))
def ParseHTTPResponse(response):
"""Parse an HTTP response object and return the JSON object.
Args:
response: An HTTP response object.
Returns:
The returned JSON-RPC object.
Raises:
ProtocolError: if the object format is not correct.
Fault: If a Fault error is returned from the server.
"""
# Check for new http response object, else it is a file object
if hasattr(response, 'getheader'):
if response.getheader('Content-Encoding', '') == 'gzip':
stream = _base.GzipDecodedResponse(response)
else:
stream = response
else:
stream = response
data = ''
while 1:
chunk = stream.read(1024)
if not chunk:
break
data += chunk
response = json.loads(data)
ValidateBasicJSONRPCData(response)
if 'response' in response:
ValidateResponse(response)
return response['response']
elif 'error' in response:
ValidateError(response)
code = response['error']['code']
message = response['error']['message']
raise Fault(code, message)
else:
raise ProtocolError('No valid JSON returned')
def ValidateRequest(data):
"""Validate a JSON-RPC request object.
Args:
data: The JSON-RPC object (dict).
Raises:
ProtocolError: if the object format is not correct.
"""
ValidateBasicJSONRPCData(data)
if 'method' not in data or 'params' not in data:
raise ProtocolError('JSON is not a valid request')
def ValidateResponse(data):
"""Validate a JSON-RPC response object.
Args:
data: The JSON-RPC object (dict).
Raises:
ProtocolError: if the object format is not correct.
"""
ValidateBasicJSONRPCData(data)
if 'response' not in data:
raise ProtocolError('JSON is not a valid response')
def ValidateError(data):
"""Validate a JSON-RPC error object.
Args:
data: The JSON-RPC object (dict).
Raises:
ProtocolError: if the object format is not correct.
"""
ValidateBasicJSONRPCData(data)
if ('error' not in data or
'code' not in data['error'] or
'message' not in data['error']):
raise ProtocolError('JSON is not a valid error response')
def ValidateBasicJSONRPCData(data):
"""Validate a basic JSON-RPC object.
Args:
data: The JSON-RPC object (dict).
Raises:
ProtocolError: if the object format is not correct.
"""
error = None
if not isinstance(data, dict):
error = 'JSON data is not a dictionary'
elif 'jsonrpc' not in data or data['jsonrpc'] != '2.0':
error = 'JSON is not a valid JSON RPC 2.0 message'
elif 'id' not in data:
error = 'JSON data missing required id entry'
if error:
raise ProtocolError(error)
class Transport(_base.Transport):
"""RPC transport class.
This class extends the functionality of xmlrpclib.Transport and only
overrides the operations needed to change the protocol from XML-RPC to
JSON-RPC.
"""
user_agent = 'jsonrpclib.py/' + __version__
def send_content(self, connection, request_body):
"""Send the request."""
connection.putheader('Content-Type','application/json')
#optionally encode the request
if (self.encode_threshold is not None and
self.encode_threshold < len(request_body) and
gzip):
connection.putheader('Content-Encoding', 'gzip')
request_body = gzip_encode(request_body)
connection.putheader('Content-Length', str(len(request_body)))
connection.endheaders(request_body)
def single_request(self, host, handler, request_body, verbose=0):
"""Issue a single JSON-RPC request."""
h = self.make_connection(host)
if verbose:
h.set_debuglevel(1)
try:
self.send_request(h, handler, request_body)
self.send_host(h, host)
self.send_user_agent(h)
self.send_content(h, request_body)
response = h.getresponse(buffering=True)
if response.status == 200:
self.verbose = verbose
return self.parse_response(response)
except Fault:
raise
except Exception:
# All unexpected errors leave connection in
# a strange state, so we clear it.
self.close()
raise
# discard any response data and raise exception
if response.getheader('content-length', 0):
response.read()
raise ProtocolError(
host + handler,
response.status, response.reason,
response.msg,
)
def parse_response(self, response):
"""Parse the HTTP resoponse from the server."""
return ParseHTTPResponse(response)
class SafeTransport(_base.SafeTransport):
"""Transport class for HTTPS servers.
This class extends the functionality of xmlrpclib.SafeTransport and only
overrides the operations needed to change the protocol from XML-RPC to
JSON-RPC.
"""
def parse_response(self, response):
return ParseHTTPResponse(response)
class ServerProxy(_base.ServerProxy):
"""Proxy class to the RPC server.
This class extends the functionality of xmlrpclib.ServerProxy and only
overrides the operations needed to change the protocol from XML-RPC to
JSON-RPC.
"""
def __init__(self, uri, transport=None, encoding=None, verbose=0,
allow_none=0, use_datetime=0):
urltype, _ = urllib.splittype(uri)
if urltype not in ('http', 'https'):
raise IOError('unsupported JSON-RPC protocol')
_base.ServerProxy.__init__(self, uri, transport, encoding, verbose,
allow_none, use_datetime)
if transport is None:
if type == 'https':
transport = SafeTransport(use_datetime=use_datetime)
else:
transport = Transport(use_datetime=use_datetime)
self.__transport = transport
def __request(self, methodname, params):
"""Call a method on the remote server."""
request = CreateRequestString(methodname, params)
response = self.__transport.request(
self.__host,
self.__handler,
request,
verbose=self.__verbose
)
return response
| bsd-3-clause | 5,556,331,560,828,983,000 | 24.675824 | 75 | 0.664455 | false |
mitdbg/modeldb | protos/gen/python/protos/public/modeldb/Job_pb2_grpc.py | 2 | 3840 | # Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
import grpc
from ..modeldb import Job_pb2 as modeldb_dot_Job__pb2
class JobServiceStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.createJob = channel.unary_unary(
'/ai.verta.modeldb.JobService/createJob',
request_serializer=modeldb_dot_Job__pb2.CreateJob.SerializeToString,
response_deserializer=modeldb_dot_Job__pb2.CreateJob.Response.FromString,
)
self.getJob = channel.unary_unary(
'/ai.verta.modeldb.JobService/getJob',
request_serializer=modeldb_dot_Job__pb2.GetJob.SerializeToString,
response_deserializer=modeldb_dot_Job__pb2.GetJob.Response.FromString,
)
self.updateJob = channel.unary_unary(
'/ai.verta.modeldb.JobService/updateJob',
request_serializer=modeldb_dot_Job__pb2.UpdateJob.SerializeToString,
response_deserializer=modeldb_dot_Job__pb2.UpdateJob.Response.FromString,
)
self.deleteJob = channel.unary_unary(
'/ai.verta.modeldb.JobService/deleteJob',
request_serializer=modeldb_dot_Job__pb2.DeleteJob.SerializeToString,
response_deserializer=modeldb_dot_Job__pb2.DeleteJob.Response.FromString,
)
class JobServiceServicer(object):
# missing associated documentation comment in .proto file
pass
def createJob(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def getJob(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def updateJob(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def deleteJob(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_JobServiceServicer_to_server(servicer, server):
rpc_method_handlers = {
'createJob': grpc.unary_unary_rpc_method_handler(
servicer.createJob,
request_deserializer=modeldb_dot_Job__pb2.CreateJob.FromString,
response_serializer=modeldb_dot_Job__pb2.CreateJob.Response.SerializeToString,
),
'getJob': grpc.unary_unary_rpc_method_handler(
servicer.getJob,
request_deserializer=modeldb_dot_Job__pb2.GetJob.FromString,
response_serializer=modeldb_dot_Job__pb2.GetJob.Response.SerializeToString,
),
'updateJob': grpc.unary_unary_rpc_method_handler(
servicer.updateJob,
request_deserializer=modeldb_dot_Job__pb2.UpdateJob.FromString,
response_serializer=modeldb_dot_Job__pb2.UpdateJob.Response.SerializeToString,
),
'deleteJob': grpc.unary_unary_rpc_method_handler(
servicer.deleteJob,
request_deserializer=modeldb_dot_Job__pb2.DeleteJob.FromString,
response_serializer=modeldb_dot_Job__pb2.DeleteJob.Response.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'ai.verta.modeldb.JobService', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
| mit | 4,864,343,337,607,885,000 | 38.587629 | 88 | 0.715625 | false |
tu-darmstadt-ros-pkg/hector_flexbe_behavior | hector_flexbe_states/src/hector_flexbe_states/move_along_path.py | 1 | 2254 | #!/usr/bin/env python
import math
import rospy
import tf
from flexbe_core import EventState, Logger
from flexbe_core.proxy import ProxyActionClient, ProxyPublisher
from hector_move_base_msgs.msg import MoveBaseAction, MoveBaseGoal, MoveBaseActionPath
from rospy import Time
from visualization_msgs.msg import MarkerArray, Marker
'''
Created on 15.06.2015
@author: Philipp Schillinger
'''
class MoveAlongPath(EventState):
'''
Lets the robot move along a given path.
># path PoseStamped[] Array of Positions of the robot.
># speed float Speed of the robot
<= reached Robot is now located at the specified waypoint.
<= failed Failed to send a motion request to the action server.
'''
def __init__(self):
'''
Constructor
'''
super(MoveAlongPath, self).__init__(outcomes=['reached', 'failed'], input_keys=['path','speed'])
self._failed = False
self._reached = False
self._pathTopic = '/controller/path'
self._marker_topic = '/debug/path'
self._pub = ProxyPublisher({self._pathTopic: MoveBaseActionPath, self._marker_topic: MarkerArray})
def execute(self, userdata):
'''
Execute this state
'''
if self._failed:
return 'failed'
if self._reached:
return 'reached'
def on_enter(self, userdata):
ma = MarkerArray()
self._path = MoveBaseActionPath()
for i in range(len(userdata.path.poses)):
marker = Marker(type=Marker.ARROW)
marker.header = userdata.path.header
marker.pose = userdata.path.poses[i].pose
marker.scale.x = 0.2
marker.scale.y = 0.02
marker.scale.z = 0.02
marker.color.b = 1.0
marker.color.r = 0.9 - 0.7 * i / len(userdata.path.poses)
marker.color.g = 0.9 - 0.7 * i / len(userdata.path.poses)
marker.color.a = 0.8 - 0.5 * i / len(userdata.path.poses)
marker.id = i
ma.markers.append(marker)
self._failed = False
self._path.goal.target_path.poses = userdata.path.poses
self._path.goal.target_path.header.frame_id = 'map'
self._pub.publish(self._pathTopic, self._path)
self._pub.publish(self._marker_topic, ma)
self._reached = True
def on_stop(self):
pass
def on_exit(self, userdata):
pass
def on_pause(self):
pass
def on_resume(self, userdata):
self.on_enter(userdata)
| bsd-3-clause | -2,808,486,417,456,087,000 | 21.767677 | 100 | 0.680124 | false |
niun/pyoscope | tests/realtime_plot_demo.py | 2 | 1378 | #!/usr/bin/env python
#
# PyUSBtmc
# display_channel.py
#
# Copyright (c) 2011 Mike Hadmack
# Copyright (c) 2010 Matt Mets
# This code is distributed under the MIT license
''' realtime_plot_demo.py
Realtime plot of both channels
This is a fork of realtime_chart.py to use the newer RigolScope interface
NOTE: This code has not yet been adapted or tested with pyoscope
'''
import numpy
from matplotlib import pyplot
import sys
import os
sys.path.append(os.path.expanduser('.'))
from rigol import RigolScope
import time
# Initialize our scope
scope = RigolScope("/dev/usbtmc0")
# Turn on interactive plotting
pyplot.ion()
while 1: # How can this loop be broken other than ^C?
# introduce a delay so that a break is recognized?
time.sleep(0.1)
scope.grabData()
data1 = scope.getScaledWaveform(1)
data2 = scope.getScaledWaveform(2)
t = scope.getTimeAxis()
# Start data acquisition again, and put the scope back in local mode
scope.forceTrigger()
# Plot the data
pyplot.clf() # Clear current plot figure
pyplot.plot(t, data1)
pyplot.plot(t, data2)
pyplot.title("Oscilloscope data")
pyplot.ylabel("Voltage (V)")
pyplot.xlabel("Time (s)")
pyplot.xlim(t[0], t[599])
# need to somehow supress the vertical autoscaling.
# Maybe a button to autoscale on demand?
pyplot.draw()
| mit | 5,325,496,992,515,884,000 | 25.5 | 77 | 0.69521 | false |
psav/cfme_tests | cfme/tests/containers/test_start_page.py | 3 | 2702 | # -*- coding: utf-8 -*-
from collections import namedtuple
import pytest
from cfme.containers.overview import ContainersOverviewView
from cfme.containers.node import NodeAllView
from cfme.containers.pod import PodAllView
from cfme.containers.service import ServiceAllView
from cfme.containers.provider import ContainersProvider, ContainerProvidersView
from cfme.containers.project import ProjectAllView
from cfme.containers.image_registry import ImageRegistryAllView
from cfme.containers.template import TemplateAllView
from cfme.containers.replicator import ReplicatorAllView
from cfme.containers.route import RouteAllView
from cfme.containers.container import ContainerAllView
from cfme.utils.appliance.implementations.ui import navigate_to
from cfme.utils.version import current_version
from cfme.configure.settings import Visual
pytestmark = [
pytest.mark.uncollectif(lambda: current_version() < "5.8"),
pytest.mark.usefixtures("setup_provider"),
pytest.mark.provider([ContainersProvider], scope='function')
]
DataSet = namedtuple('DataSet', ['obj_view', 'page_name'])
data_sets = (
DataSet(ContainersOverviewView, 'Compute / Containers / Overview'),
DataSet(ContainerProvidersView, 'Compute / Containers / Providers'),
DataSet(NodeAllView, 'Compute / Containers / Container Nodes'),
DataSet(PodAllView, 'Compute / Containers / Pods'),
DataSet(ServiceAllView, 'Compute / Containers / Container Services'),
DataSet(ProjectAllView, 'Compute / Containers / Projects'),
DataSet(ImageRegistryAllView, 'Compute / Containers / Image Registries'),
DataSet(TemplateAllView, 'Compute / Containers / Container Templates'),
DataSet(ReplicatorAllView, 'Compute / Containers / Replicators'),
DataSet(RouteAllView, 'Compute / Containers / Routes'),
# https://bugzilla.redhat.com/show_bug.cgi?id=1510376
# from cfme.containers.volume import VolumeAllView
# DataSet(VolumeAllView, 'Compute / Containers / Volumes'),
# https://bugzilla.redhat.com/show_bug.cgi?id=1466350
DataSet(ContainerAllView, 'Compute / Containers / Containers')
)
@pytest.mark.polarion('CMP-10601')
def test_start_page(appliance, soft_assert):
for data_set in data_sets:
appliance.user.my_settings.visual.login_page = data_set.page_name
login_page = navigate_to(appliance.server, 'LoginScreen')
login_page.login_admin()
view = appliance.browser.create_view(data_set.obj_view)
soft_assert(
view.is_displayed,
'Configured start page is "{page_name}", but the start page now is "{cur_page}".'
.format(page_name=data_set.page_name, cur_page=view.navigation.currently_selected)
)
| gpl-2.0 | -7,240,210,812,952,324,000 | 43.295082 | 94 | 0.747964 | false |
qedi-r/home-assistant | homeassistant/components/plugwise/climate.py | 2 | 9762 | """Plugwise Climate component for HomeAssistant."""
import logging
import voluptuous as vol
import haanna
import homeassistant.helpers.config_validation as cv
from homeassistant.components.climate import PLATFORM_SCHEMA, ClimateDevice
from homeassistant.components.climate.const import (
CURRENT_HVAC_HEAT,
CURRENT_HVAC_COOL,
CURRENT_HVAC_IDLE,
HVAC_MODE_HEAT,
HVAC_MODE_HEAT_COOL,
HVAC_MODE_AUTO,
SUPPORT_PRESET_MODE,
SUPPORT_TARGET_TEMPERATURE,
)
from homeassistant.const import (
ATTR_TEMPERATURE,
CONF_HOST,
CONF_NAME,
CONF_PASSWORD,
CONF_PORT,
CONF_USERNAME,
TEMP_CELSIUS,
)
from homeassistant.exceptions import PlatformNotReady
SUPPORT_FLAGS = SUPPORT_TARGET_TEMPERATURE | SUPPORT_PRESET_MODE
_LOGGER = logging.getLogger(__name__)
# Configuration directives
CONF_MIN_TEMP = "min_temp"
CONF_MAX_TEMP = "max_temp"
CONF_LEGACY = "legacy_anna"
# Default directives
DEFAULT_NAME = "Plugwise Thermostat"
DEFAULT_USERNAME = "smile"
DEFAULT_TIMEOUT = 10
DEFAULT_PORT = 80
DEFAULT_ICON = "mdi:thermometer"
DEFAULT_MIN_TEMP = 4
DEFAULT_MAX_TEMP = 30
# HVAC modes
HVAC_MODES_1 = [HVAC_MODE_HEAT, HVAC_MODE_AUTO]
HVAC_MODES_2 = [HVAC_MODE_HEAT_COOL, HVAC_MODE_AUTO]
# Read platform configuration
PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend(
{
vol.Optional(CONF_NAME, default=DEFAULT_NAME): cv.string,
vol.Required(CONF_PASSWORD): cv.string,
vol.Required(CONF_HOST): cv.string,
vol.Optional(CONF_LEGACY, default=False): cv.boolean,
vol.Optional(CONF_PORT, default=DEFAULT_PORT): cv.port,
vol.Optional(CONF_USERNAME, default=DEFAULT_USERNAME): cv.string,
vol.Optional(CONF_MIN_TEMP, default=DEFAULT_MIN_TEMP): cv.positive_int,
vol.Optional(CONF_MAX_TEMP, default=DEFAULT_MAX_TEMP): cv.positive_int,
}
)
def setup_platform(hass, config, add_entities, discovery_info=None):
"""Add the Plugwise (Anna) Thermostate."""
api = haanna.Haanna(
config[CONF_USERNAME],
config[CONF_PASSWORD],
config[CONF_HOST],
config[CONF_PORT],
config[CONF_LEGACY],
)
try:
api.ping_anna_thermostat()
except OSError:
_LOGGER.debug("Ping failed, retrying later", exc_info=True)
raise PlatformNotReady
devices = [
ThermostatDevice(
api, config[CONF_NAME], config[CONF_MIN_TEMP], config[CONF_MAX_TEMP]
)
]
add_entities(devices, True)
class ThermostatDevice(ClimateDevice):
"""Representation of an Plugwise thermostat."""
def __init__(self, api, name, min_temp, max_temp):
"""Set up the Plugwise API."""
self._api = api
self._min_temp = min_temp
self._max_temp = max_temp
self._name = name
self._domain_objects = None
self._outdoor_temperature = None
self._selected_schema = None
self._preset_mode = None
self._presets = None
self._presets_list = None
self._heating_status = None
self._cooling_status = None
self._schema_names = None
self._schema_status = None
self._current_temperature = None
self._thermostat_temperature = None
self._boiler_temperature = None
self._water_pressure = None
self._schedule_temperature = None
self._hvac_mode = None
@property
def hvac_action(self):
"""Return the current action."""
if self._heating_status:
return CURRENT_HVAC_HEAT
if self._cooling_status:
return CURRENT_HVAC_COOL
return CURRENT_HVAC_IDLE
@property
def name(self):
"""Return the name of the thermostat, if any."""
return self._name
@property
def icon(self):
"""Return the icon to use in the frontend."""
return DEFAULT_ICON
@property
def supported_features(self):
"""Return the list of supported features."""
return SUPPORT_FLAGS
@property
def device_state_attributes(self):
"""Return the device specific state attributes."""
attributes = {}
if self._outdoor_temperature:
attributes["outdoor_temperature"] = self._outdoor_temperature
attributes["available_schemas"] = self._schema_names
attributes["selected_schema"] = self._selected_schema
if self._boiler_temperature:
attributes["boiler_temperature"] = self._boiler_temperature
if self._water_pressure:
attributes["water_pressure"] = self._water_pressure
return attributes
@property
def preset_modes(self):
"""Return the available preset modes list.
And make the presets with their temperatures available.
"""
return self._presets_list
@property
def hvac_modes(self):
"""Return the available hvac modes list."""
if self._heating_status is not None:
if self._cooling_status is not None:
return HVAC_MODES_2
return HVAC_MODES_1
return None
@property
def hvac_mode(self):
"""Return current active hvac state."""
if self._schema_status:
return HVAC_MODE_AUTO
if self._heating_status:
if self._cooling_status:
return HVAC_MODE_HEAT_COOL
return HVAC_MODE_HEAT
return None
@property
def target_temperature(self):
"""Return the target_temperature.
From the XML the thermostat-value is used because it updates 'immediately'
compared to the target_temperature-value. This way the information on the card
is "immediately" updated after changing the preset, temperature, etc.
"""
return self._thermostat_temperature
@property
def preset_mode(self):
"""Return the active selected schedule-name.
Or return the active preset, or return Temporary in case of a manual change
in the set-temperature with a weekschedule active,
or return Manual in case of a manual change and no weekschedule active.
"""
if self._presets:
presets = self._presets
preset_temperature = presets.get(self._preset_mode, "none")
if self.hvac_mode == HVAC_MODE_AUTO:
if self._thermostat_temperature == self._schedule_temperature:
return "{}".format(self._selected_schema)
if self._thermostat_temperature == preset_temperature:
return self._preset_mode
return "Temporary"
if self._thermostat_temperature != preset_temperature:
return "Manual"
return self._preset_mode
return None
@property
def current_temperature(self):
"""Return the current room temperature."""
return self._current_temperature
@property
def min_temp(self):
"""Return the minimal temperature possible to set."""
return self._min_temp
@property
def max_temp(self):
"""Return the maximum temperature possible to set."""
return self._max_temp
@property
def temperature_unit(self):
"""Return the unit of measured temperature."""
return TEMP_CELSIUS
def set_temperature(self, **kwargs):
"""Set new target temperature."""
_LOGGER.debug("Adjusting temperature")
temperature = kwargs.get(ATTR_TEMPERATURE)
if temperature is not None and self._min_temp < temperature < self._max_temp:
_LOGGER.debug("Changing temporary temperature")
self._api.set_temperature(self._domain_objects, temperature)
else:
_LOGGER.error("Invalid temperature requested")
def set_hvac_mode(self, hvac_mode):
"""Set the hvac mode."""
_LOGGER.debug("Adjusting hvac_mode (i.e. schedule/schema)")
schema_mode = "false"
if hvac_mode == HVAC_MODE_AUTO:
schema_mode = "true"
self._api.set_schema_state(
self._domain_objects, self._selected_schema, schema_mode
)
def set_preset_mode(self, preset_mode):
"""Set the preset mode."""
_LOGGER.debug("Changing preset mode")
self._api.set_preset(self._domain_objects, preset_mode)
def update(self):
"""Update the data from the thermostat."""
_LOGGER.debug("Update called")
self._domain_objects = self._api.get_domain_objects()
self._outdoor_temperature = self._api.get_outdoor_temperature(
self._domain_objects
)
self._selected_schema = self._api.get_active_schema_name(self._domain_objects)
self._preset_mode = self._api.get_current_preset(self._domain_objects)
self._presets = self._api.get_presets(self._domain_objects)
self._presets_list = list(self._api.get_presets(self._domain_objects))
self._heating_status = self._api.get_heating_status(self._domain_objects)
self._cooling_status = self._api.get_cooling_status(self._domain_objects)
self._schema_names = self._api.get_schema_names(self._domain_objects)
self._schema_status = self._api.get_schema_state(self._domain_objects)
self._current_temperature = self._api.get_current_temperature(
self._domain_objects
)
self._thermostat_temperature = self._api.get_thermostat_temperature(
self._domain_objects
)
self._schedule_temperature = self._api.get_schedule_temperature(
self._domain_objects
)
self._boiler_temperature = self._api.get_boiler_temperature(
self._domain_objects
)
self._water_pressure = self._api.get_water_pressure(self._domain_objects)
| apache-2.0 | 2,685,025,198,586,849,300 | 33.132867 | 86 | 0.631121 | false |
BelledonneCommunications/linphone | wrappers/cpp/genwrapper.py | 1 | 21775 | #!/usr/bin/python
#
# Copyright (c) 2010-2019 Belledonne Communications SARL.
#
# This file is part of Liblinphone.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <http://www.gnu.org/licenses/>.
#
import pystache
import re
import argparse
import os
import os.path
import sys
import errno
import logging
sys.path.insert(0, os.path.join(os.path.dirname(__file__), '..', '..', 'tools'))
import genapixml as CApi
import abstractapi as AbsApi
import metadoc
import metaname
class CppTranslator:
sharedPtrTypeExtractor = re.compile('^(const )?std::shared_ptr<(.+)>( &)?$')
def __init__(self, rootNs=None):
self.nameTranslator = metaname.Translator.get('Cpp')
self.langTranslator = AbsApi.Translator.get('Cpp')
self.langTranslator.ambigousTypes.append('LinphonePayloadType')
self.docTranslator = metadoc.DoxygenTranslator('Cpp')
self.rootNs = rootNs
def translate_enum(self, enum):
enumDict = {}
enumDict['name'] = enum.name.translate(self.nameTranslator)
enumDict['doc'] = enum.briefDescription.translate(self.docTranslator)
enumDict['enumerators'] = []
for enumerator in enum.enumerators:
enumeratorDict = self.translate_enumerator(enumerator)
enumeratorDict['notLast'] = (enumerator is not enum.enumerators[-1])
enumDict['enumerators'].append(enumeratorDict)
return enumDict
def translate_enumerator(self, enumerator):
enumeratorDict = {
'name' : enumerator.name.translate(self.nameTranslator),
'value' : enumerator.translate_value(self.langTranslator)
}
try:
enumeratorDict['doc'] = enumerator.briefDescription.translate(self.docTranslator)
except metadoc.TranslationError as e:
logging.error(e.msg())
return enumeratorDict
def translate_class(self, _class):
islistenable = _class.listenerInterface is not None
classDict = {
'islistenable' : islistenable,
'isnotlistenable' : not islistenable,
'isNotListener' : True,
'isListener' : False,
'isVcard' : (_class.name.to_c() == 'LinphoneVcard'),
'className' : _class.name.translate(self.nameTranslator),
'cClassName' : '::' + _class.name.to_c(),
'privCClassName' : '_' + _class.name.to_c(),
'parentClassName' : 'Object' if _class.refcountable else None,
'enums' : [],
'methods' : [],
'staticMethods' : [],
'wrapperCbs' : [],
'friendClasses' : []
}
if _class.name.to_c() == 'LinphoneCore':
classDict['friendClasses'].append({'name': 'Factory'});
try:
classDict['briefDoc'] = _class.briefDescription.translate(self.docTranslator, tagAsBrief=True)
classDict['detailedDoc'] = _class.detailedDescription.translate(self.docTranslator)
except metadoc.TranslationError as e:
logging.error(e.msg())
if islistenable:
classDict['listenerClassName'] = _class.listenerInterface.name.translate(self.nameTranslator)
classDict['cListenerName'] = _class.listenerInterface.name.to_c()
classDict['cppListenerName'] = _class.listenerInterface.name.translate(self.nameTranslator)
for method in _class.listenerInterface.instanceMethods:
if method.returnType.cDecl == 'void':
classDict['wrapperCbs'].append(self._generate_wrapper_callback(_class, method))
classDict['parentClassName'] = 'MultiListenableObject'
classDict['listenerCreator'] = 'linphone_factory_create_' + _class.listenerInterface.name.to_snake_case()[:-len('_listener')] + '_cbs'
classDict['callbacksAdder'] = _class.name.to_snake_case(fullName=True)+ '_add_callbacks'
classDict['callbacksRemover'] = _class.name.to_snake_case(fullName=True)+ '_remove_callbacks'
classDict['userDataSetter'] = _class.listenerInterface.name.to_snake_case(fullName=True)[:-len('_listener')] + '_cbs_set_user_data'
classDict['userDataGetter'] = _class.listenerInterface.name.to_snake_case(fullName=True)[:-len('_listener')] + '_cbs_get_user_data'
classDict['currentCallbacksGetter'] = _class.name.to_snake_case(fullName=True) + '_get_current_callbacks'
for enum in _class.enums:
classDict['enums'].append(self.translate_enum(enum))
for _property in _class.properties:
classDict['methods'] += self.translate_property(_property)
for method in _class.instanceMethods:
methodDict = self.translate_method(method)
classDict['methods'].append(methodDict)
for method in _class.classMethods:
methodDict = self.translate_method(method)
classDict['staticMethods'].append(methodDict)
return classDict
def _generate_wrapper_callback(self, listenedClass, method):
namespace = method.find_first_ancestor_by_type(AbsApi.Namespace)
listenedClass = method.find_first_ancestor_by_type(AbsApi.Interface).listenedClass
params = {}
params['name'] = method.name.to_snake_case(fullName=True) + '_cb'
args = []
wrappedArgs = []
for arg in method.args:
args.append(arg.type.cDecl + ' ' + arg.name.to_c())
wrappedArgs.append(self._wrap_c_expression_to_cpp(arg.name.to_c(), arg.type, usedNamespace=namespace))
params['params'] = ', '.join(args)
params['returnType'] = method.returnType.cDecl
wrapperCbDict = {}
wrapperCbDict['cbName'] = params['name']
wrapperCbDict['declArgs'] = params['params']
wrapperCbDict['firstArgName'] = method.args[0].name.to_c()
wrapperCbDict['returnType'] = params['returnType']
wrapperCbDict['callbackSetter'] = listenedClass.name.to_snake_case(fullName=True) + '_cbs_set_' + method.name.to_snake_case()[3:]
wrapperCbDict['cppMethodCallingLine'] = 'listener->{methodName}({wrappedArgs})'.format(
methodName=method.name.to_camel_case(lower=True),
wrappedArgs=', '.join(wrappedArgs))
wrapperCbDict['cppMethodCallingLine'] = self._wrap_cpp_expression_to_c(wrapperCbDict['cppMethodCallingLine'], method.returnType)
return wrapperCbDict
def translate_interface(self, interface):
intDict = {
'inheritFrom' : {'name': 'Listener'},
'className' : interface.name.translate(self.nameTranslator),
'constructor' : None,
'parentClassName' : 'Listener',
'isNotListener' : False,
'isListener' : True,
'methods' : []
}
for method in interface.instanceMethods:
methodDict = self.translate_method(method, genImpl=False)
intDict['methods'].append(methodDict)
return intDict
def translate_property(self, _property):
res = []
if _property.getter is not None:
res.append(self.translate_method(_property.getter))
if _property.setter is not None:
res.append(self.translate_method(_property.setter))
return res
def translate_method(self, method, genImpl=True):
namespace = method.find_first_ancestor_by_type(AbsApi.Class, AbsApi.Interface)
methodDict = {
'declPrototype': method.translate_as_prototype(self.langTranslator, namespace=namespace),
'implPrototype': method.translate_as_prototype(self.langTranslator, namespace=AbsApi.GlobalNs),
'deprecated': method.deprecated,
'suffix': '',
}
try:
methodDict['briefDoc'] = method.briefDescription.translate(self.docTranslator, tagAsBrief=True) if method.briefDescription is not None else None
methodDict['detailedDoc'] = method.detailedDescription.translate(self.docTranslator) if method.detailedDescription is not None else None
except metadoc.TranslationError as e:
logging.error(e.msg())
if type(method.parent) is AbsApi.Interface:
if isinstance(method.returnType, AbsApi.BaseType) and method.returnType.name == 'void':
methodDict['suffix'] = ' {}'
else:
methodDict['suffix'] = ' = 0'
if genImpl:
methodDict['sourceCode' ] = self._generate_source_code(method, usedNamespace=namespace)
return methodDict
def _generate_source_code(self, method, usedNamespace=None):
nsName = usedNamespace.name if usedNamespace is not None else None
params = {
'functionName': method.name.to_c(),
'args': self._generate_wrapped_arguments(method, usedNamespace=usedNamespace)
}
if method.name.to_camel_case(lower=True) != 'setListener':
cExpr = '{functionName}({args})'.format(**params)
cppExpr = self._wrap_c_expression_to_cpp(cExpr, method.returnType, usedNamespace=usedNamespace)
else:
cppExpr = 'ListenableObject::setListener(std::static_pointer_cast<Listener>({0}))'.format(method.args[0].name.to_snake_case())
if type(method.returnType) is AbsApi.BaseType and method.returnType.name == 'void' and not method.returnType.isref:
return cppExpr + ';'
else:
return 'return {0};'.format(cppExpr)
def _generate_wrapped_arguments(self, method, usedNamespace=None):
args = []
if method.type == AbsApi.Method.Type.Instance:
_class = method.find_first_ancestor_by_type(AbsApi.Class)
argStr = '(::{0} *)mPrivPtr'.format(_class.name.to_camel_case(fullName=True))
args.append(argStr)
for arg in method.args:
paramName = arg.name.to_camel_case(lower=True)
args.append(self._wrap_cpp_expression_to_c(paramName, arg.type, usedNamespace=usedNamespace))
return ', '.join(args)
def _wrap_cpp_expression_to_c(self, cppExpr, exprtype, usedNamespace=None):
if isinstance(exprtype, AbsApi.BaseType):
if exprtype.name == 'string':
cExpr = 'StringUtilities::cppStringToC({0})'.format(cppExpr);
else:
cExpr = cppExpr
elif isinstance(exprtype, AbsApi.EnumType):
cExpr = '(::{0}){1}'.format(exprtype.desc.name.to_c(), cppExpr)
elif isinstance(exprtype, AbsApi.ClassType):
cPtrType = exprtype.desc.name.to_c()
if exprtype.desc.refcountable:
ptrType = exprtype.translate(self.langTranslator, namespace=usedNamespace)
ptrType = CppTranslator.sharedPtrTypeExtractor.match(ptrType).group(2)
param = {
'ptrType' : ptrType,
'cPtrType': cPtrType,
'cppExpr' : cppExpr,
'object' : 'const Object' if exprtype.isconst else 'Object'
}
cExpr = '(::{cPtrType} *)Object::sharedPtrToCPtr(std::static_pointer_cast<{object},{ptrType}>({cppExpr}))'.format(**param)
else:
if exprtype.isref:
cExpr = '(const ::{_type} *)({expr}).c_struct()'.format(_type=cPtrType, expr=cppExpr)
else:
cExpr = '*(const ::{_type} *)({expr}).c_struct()'.format(_type=cPtrType, expr=cppExpr)
elif isinstance(exprtype, AbsApi.ListType):
if isinstance(exprtype.containedTypeDesc, AbsApi.BaseType) and exprtype.containedTypeDesc.name == 'string':
cExpr = 'StringBctbxListWrapper({0}).c_list()'.format(cppExpr)
elif isinstance(exprtype.containedTypeDesc, AbsApi.ClassType):
ptrType = exprtype.containedTypeDesc.translate(self.langTranslator, namespace=usedNamespace)
if exprtype.containedTypeDesc.desc.refcountable:
ptrType = CppTranslator.sharedPtrTypeExtractor.match(ptrType).group(2)
cExpr = 'ObjectBctbxListWrapper<{0}>({1}).c_list()'.format(ptrType, cppExpr)
else:
cType = exprtype.containedTypeDesc.desc.name.to_c()
if exprtype.isconst:
cExpr = 'StructBctbxListWrapper<{0},{1}>({2}).c_list()'.format(ptrType, cType, cppExpr)
else:
cExpr = 'StructBctbxListWrapper<{0},{1}>::cppListToBctbxList({2})'.format(ptrType, cType, cppExpr)
else:
raise AbsApi.Error('translation of bctbx_list_t of enums or basic C types is not supported')
return cExpr
def _wrap_c_expression_to_cpp(self, cExpr, exprtype, usedNamespace=None):
if isinstance(exprtype, AbsApi.BaseType):
if exprtype.name == 'string':
return 'StringUtilities::cStringToCpp({0})'.format(cExpr)
elif exprtype.name == 'string_array':
return 'StringUtilities::cStringArrayToCppList({0})'.format(cExpr)
elif exprtype.name == 'boolean':
return '({0} != FALSE)'.format(cExpr)
else:
return cExpr
elif isinstance(exprtype, AbsApi.EnumType):
cppEnumName = exprtype.translate(self.langTranslator, namespace=usedNamespace)
return '({0}){1}'.format(cppEnumName, cExpr)
elif isinstance(exprtype, AbsApi.ClassType):
cppReturnType = exprtype.translate(self.langTranslator, namespace=usedNamespace)
if exprtype.desc.refcountable:
cppReturnType = CppTranslator.sharedPtrTypeExtractor.match(cppReturnType).group(2)
if isinstance(exprtype.parent, AbsApi.Method) and exprtype.parent.returnAllocatedObject:
return 'Object::cPtrToSharedPtr<{0}>({1}, false)'.format(cppReturnType, cExpr)
else:
return 'Object::cPtrToSharedPtr<{0}>({1})'.format(cppReturnType, cExpr)
else:
if exprtype.isref:
return '{0}({1})'.format(exprtype.desc.name.to_camel_case(), cExpr)
else:
return '{0}(StructWrapper<::{1}>({2}).ptr())'.format(
exprtype.desc.name.to_camel_case(),
exprtype.desc.name.to_c(),
cExpr)
elif isinstance(exprtype, AbsApi.ListType):
if isinstance(exprtype.containedTypeDesc, AbsApi.BaseType) and exprtype.containedTypeDesc.name == 'string':
return 'StringBctbxListWrapper::bctbxListToCppList({0})'.format(cExpr)
elif isinstance(exprtype.containedTypeDesc, AbsApi.ClassType):
cppReturnType = exprtype.containedTypeDesc.translate(self.langTranslator, namespace=usedNamespace)
takeRef = 'false' if isinstance(exprtype.parent, AbsApi.Method) and exprtype.parent.returnAllocatedObject else 'true'
if exprtype.containedTypeDesc.desc.refcountable:
cppReturnType = CppTranslator.sharedPtrTypeExtractor.match(cppReturnType).group(2)
return 'ObjectBctbxListWrapper<{0}>::bctbxListToCppList({1}, {2})'.format(cppReturnType, cExpr, takeRef)
else:
cType = exprtype.containedTypeDesc.desc.name.to_c()
return 'StructBctbxListWrapper<{0},{1}>::bctbxListToCppList({2}, {3})'.format(cppReturnType, cType, cExpr, takeRef)
else:
raise AbsApi.Error('translation of bctbx_list_t of enums or basic C types is not supported')
else:
return cExpr
@staticmethod
def fail(obj):
raise AbsApi.Error('Cannot translate {0} type'.format(type(obj)))
class EnumsHeader:
def __init__(self, translator):
self.translator = translator
self.enums = []
def add_enum(self, enum):
self.enums.append(self.translator.translate_enum(enum))
class ClassHeader:
def __init__(self, _class, translator):
if type(_class) is AbsApi.Class:
self._class = translator.translate_class(_class)
else:
self._class = translator.translate_interface(_class)
self.rootNs = translator.rootNs
self.define = '_{0}_HH'.format(_class.name.to_snake_case(upper=True, fullName=True))
self.filename = '{0}.hh'.format(_class.name.to_snake_case())
self.priorDeclarations = []
self.private_type = _class.name.to_camel_case(fullName=True)
self.includes = {'internal': [], 'external': []}
self._populate_needed_includes(_class)
def _populate_needed_includes(self, _class):
if type(_class) is AbsApi.Class:
for _property in _class.properties:
if _property.setter is not None:
self._populate_needed_includes_from_method(_property.setter)
if _property.getter is not None:
self._populate_needed_includes_from_method(_property.getter)
if type(_class) is AbsApi.Class:
methods = _class.classMethods + _class.instanceMethods
else:
methods = _class.instanceMethods
for method in methods:
self._populate_needed_includes_from_type(method.returnType)
for arg in method.args:
self._populate_needed_includes_from_type(arg.type)
if isinstance(_class, AbsApi.Class) and _class.listenerInterface is not None:
decl = 'class ' + _class.listenerInterface.name.translate(metaname.Translator.get('Cpp'))
self._add_prior_declaration(decl)
currentClassInclude = _class.name.to_snake_case()
if currentClassInclude in self.includes['internal']:
self.includes['internal'].remove(currentClassInclude)
def _populate_needed_includes_from_method(self, method):
self._populate_needed_includes_from_type(method.returnType)
for arg in method.args:
self._populate_needed_includes_from_type(arg.type)
def _populate_needed_includes_from_type(self, type_):
translator = metaname.Translator.get('Cpp')
if isinstance(type_, AbsApi.ClassType):
class_ = type_.desc
if class_.parent == self.rootNs:
decl = 'class ' + class_.name.translate(translator)
self._add_prior_declaration(decl)
else:
rootClass = class_.find_first_ancestor_by_type(AbsApi.Namespace, priorAncestor=True)
self._add_include('internal', rootClass.name.to_snake_case())
elif isinstance(type_, AbsApi.EnumType):
enum = type_.desc
if enum.parent == self.rootNs:
headerFile = 'enums'
else:
rootClass = enum.find_first_ancestor_by_type(AbsApi.Namespace, priorAncestor=True)
headerFile = rootClass.name.to_snake_case()
self._add_include('internal', headerFile)
elif isinstance(type_, AbsApi.BaseType):
if type_.name == 'integer' and isinstance(type_.size, int):
self._add_include('external', 'cstdint')
elif type_.name == 'string':
self._add_include('external', 'string')
elif isinstance(type_, AbsApi.ListType):
self._add_include('external', 'list')
self._populate_needed_includes_from_type(type_.containedTypeDesc)
def _add_include(self, location, name):
if next((x for x in self.includes[location] if x['name']==name), None) is None:
self.includes[location].append({'name': name})
def _add_prior_declaration(self, decl):
if next((x for x in self.priorDeclarations if x['declaration']==decl), None) is None:
self.priorDeclarations.append({'declaration': decl})
class MainHeader:
def __init__(self):
self.includes = []
self.define = '_LINPHONE_HH'
def add_include(self, include):
self.includes.append({'name': include})
class ClassImpl:
def __init__(self):
self.classes = []
self.namespace = 'linphone'
class GenWrapper:
def __init__(self, includedir, srcdir, xmldir):
self.includedir = includedir
self.srcdir = srcdir
project = CApi.Project()
project.initFromDir(xmldir)
project.check()
self.parser = AbsApi.CParser(project)
self.parser.functionBl += [
'linphone_factory_create_shared_core',
'linphone_factory_create_shared_core_with_config',
'linphone_config_new_for_shared_core',
'linphone_push_notification_message_new',
'linphone_push_notification_message_ref',
'linphone_push_notification_message_unref',
'linphone_push_notification_message_is_using_user_defaults',
'linphone_push_notification_message_get_call_id',
'linphone_push_notification_message_is_text',
'linphone_push_notification_message_get_text_content',
'linphone_push_notification_message_get_subject',
'linphone_push_notification_message_get_from_addr',
'linphone_push_notification_message_get_local_addr',
'linphone_push_notification_message_get_peer_addr',
'linphone_core_get_new_message_from_callid',
'linphone_core_get_new_chat_room_from_conf_addr'
]
self.parser.parse_all()
self.translator = CppTranslator(self.parser.namespace)
self.renderer = pystache.Renderer()
self.mainHeader = MainHeader()
self.impl = ClassImpl()
def render_all(self):
header = EnumsHeader(self.translator)
for enum in self.parser.namespace.enums:
header.add_enum(enum)
self.render(header, self.includedir + '/enums.hh')
self.mainHeader.add_include('enums.hh')
for _class in self.parser.interfacesIndex.values():
self.render_header(_class)
for _class in self.parser.classesIndex.values():
self.render_header(_class)
self.render(self.mainHeader, self.includedir + '/linphone.hh')
self.render(self.impl, self.srcdir + '/linphone++.cc')
def render(self, item, path):
tmppath = path + '.tmp'
content = ''
with open(tmppath, mode='w') as f:
f.write(self.renderer.render(item))
with open(tmppath, mode='rU') as f:
content = f.read()
with open(path, mode='w') as f:
f.write(content)
os.unlink(tmppath)
def render_header(self, _class):
if _class is not None:
header = ClassHeader(_class, self.translator)
headerName = _class.name.to_snake_case() + '.hh'
self.mainHeader.add_include(headerName)
self.render(header, self.includedir + '/' + header.filename)
if type(_class) is not AbsApi.Interface:
self.impl.classes.append(header._class)
if __name__ == '__main__':
try:
argparser = argparse.ArgumentParser(description='Generate source files for the C++ wrapper')
argparser.add_argument('xmldir', type=str, help='Directory where the XML documentation of the Linphone\'s API generated by Doxygen is placed')
argparser.add_argument('-o --output', type=str, help='the directory where to generate the source files', dest='outputdir', default='.')
argparser.add_argument('-v --verbose', help='Show warning and info traces.', action='store_true', default=False, dest='verbose_mode')
argparser.add_argument('-d --debug', help='Show all traces.', action='store_true', default=False, dest='debug_mode')
args = argparser.parse_args()
if args.debug_mode:
loglevel = logging.DEBUG
elif args.verbose_mode:
loglevel = logging.INFO
else:
loglevel = logging.ERROR
logging.basicConfig(format='%(levelname)s[%(name)s]: %(message)s', level=loglevel)
includedir = args.outputdir + '/include/linphone++'
srcdir = args.outputdir + '/src'
if not os.path.exists(includedir):
os.makedirs(includedir)
if not os.path.exists(srcdir):
os.makedirs(srcdir)
genwrapper = GenWrapper(includedir, srcdir, args.xmldir)
genwrapper.render_all()
except AbsApi.Error as e:
logging.critical(e)
| gpl-3.0 | -3,895,618,410,983,869,400 | 39.324074 | 147 | 0.712147 | false |
darkryder/django | tests/test_runner/test_parallel.py | 2 | 2056 | import unittest
from django.test import SimpleTestCase
from django.test.runner import RemoteTestResult
from django.utils import six
try:
import tblib
except ImportError:
tblib = None
class ParallelTestRunnerTest(SimpleTestCase):
"""
End-to-end tests of the parallel test runner.
These tests are only meaningful when running tests in parallel using
the --parallel option, though it doesn't hurt to run them not in
parallel.
"""
@unittest.skipUnless(six.PY3, 'subtests were added in Python 3.4')
def test_subtest(self):
"""
Check that passing subtests work.
"""
for i in range(2):
with self.subTest(index=i):
self.assertEqual(i, i)
class SampleFailingSubtest(SimpleTestCase):
# This method name doesn't begin with "test" to prevent test discovery
# from seeing it.
def dummy_test(self):
"""
A dummy test for testing subTest failures.
"""
for i in range(3):
with self.subTest(index=i):
self.assertEqual(i, 1)
class RemoteTestResultTest(SimpleTestCase):
@unittest.skipUnless(six.PY3 and tblib is not None, 'requires tblib to be installed')
def test_add_failing_subtests(self):
"""
Failing subtests are added correctly using addSubTest().
"""
# Manually run a test with failing subtests to prevent the failures
# from affecting the actual test run.
result = RemoteTestResult()
subtest_test = SampleFailingSubtest(methodName='dummy_test')
subtest_test.run(result=result)
events = result.events
self.assertEqual(len(events), 4)
event = events[1]
self.assertEqual(event[0], 'addSubTest')
self.assertEqual(str(event[2]), 'dummy_test (test_runner.test_parallel.SampleFailingSubtest) (index=0)')
self.assertEqual(repr(event[3][1]), "AssertionError('0 != 1',)")
event = events[2]
self.assertEqual(repr(event[3][1]), "AssertionError('2 != 1',)")
| bsd-3-clause | 1,228,747,512,798,652,200 | 29.686567 | 112 | 0.645428 | false |
TBillTech/domsocket | domsocket/data/test/app.py | 1 | 10245 | #!/usr/bin/python
"""Copyright (c) 2015 TBillTech.
This Source Code Form is subject to the terms of the Mozilla Public
License, v. 2.0. If a copy of the MPL was not distributed with this
file, You can obtain one at http://mozilla.org/MPL/2.0/.
"""
from domsocket.zmq_runner import ZMQRunner, domsocket_js_path
from domsocket.event import Event
from domsocket.basic_widgets.html_tag import HTMLTag
from domsocket.element import Element
from widgets.login_dialog import LoginDialog
from widgets.login_button import LoginButton
from widgets.increment_widget import IncrementWidget
from domsocket.text_node import TextNode
from domsocket.element_error import ElementError
from operator import index
theRunner = None
class App(Element):
_html_source_app_name = 'tester'
def __init__(self):
super(App, self).__init__()
self.tag = 'div'
def on_create(self, nodeid, ws, child_index):
super(App, self).on_create(nodeid, ws, child_index)
self.first_paragraph_show()
self.sub_body_show()
self.login_button_show()
self.login_dialog_show()
self.longlist_show()
self.increment_widget_show()
self.noop = lambda x: x
def first_paragraph_show(self):
self.create_first_paragraph()
self._attribute_removal_test()
self._text_node_test()
self._immutable_attribute_test()
def create_first_paragraph(self):
first_paragraph_kwargs = dict()
first_paragraph_kwargs['text_node'] = TextNode('Hello World!')
first_paragraph_kwargs['class'] = 'first'
first_paragraph_kwargs['useful'] = 'true'
first_paragraph_kwargs['toremove'] = 'remove_this'
self.first_paragraph = HTMLTag('p', first_paragraph_kwargs)
def _attribute_removal_test(self):
self.first_paragraph.toremove = 'remove_this_instead'
del self.first_paragraph.toremove
self.first_paragraph._toremove = 'hidden data'
del self.first_paragraph._toremove
try:
if self.first_paragraph._toremove == 'hidden data':
pass
raise ElementError('Could not remove hidden data _toremove')
except AttributeError:
pass
def _text_node_test(self):
self.first_paragraph.text_node = TextNode('Hello World!')
self.first_paragraph.text_node.text = 'Hello World! -- changed to!'
self.first_paragraph.text_node = TextNode('Hello World! -- changed to!')
self.first_paragraph.text_node = TextNode('Hello World! -- changed!')
self.first_paragraph += [TextNode('A'), TextNode('B'), TextNode('C')]
self.first_paragraph[-3:] = []
def _immutable_attribute_test(self):
try:
self.first_paragraph.useful = None
raise Error('attribute useful should not be allowed to set to None!')
except ElementError:
pass
try:
self.first_paragraph.id = 'wrong.name'
raise Error('Should not be allowed to modify id!')
except ElementError:
pass
try:
del self.first_paragraph.id
raise Error('Should not be allowed to delete id!')
except ElementError:
pass
def sub_body_show(self):
self.create_sub_body()
self._slice_tests()
self._item_tests()
def create_sub_body(self):
sub_body_kwargs = dict()
sub_body_kwargs['class'] = 'sub_body_class_tochange'
sub_body_kwargs['subp_child'] = self.sub_paragraph_child()
sub_body_kwargs['sub_body_divA'] = self.sub_body_divA_child()
sub_body_kwargs['sub_body_divB'] = self.sub_body_divA_child()
self.sub_body = HTMLTag('body', sub_body_kwargs)
if self.sub_body.getattr_class() != 'sub_body_class_tochange':
raise Error('getattr_class return is wrong')
self.sub_body.setattr_class('sub_body_class')
if not self.sub_body.get_html_source_app_name() == 'tester':
raise Error('source app name is not tester!')
def _slice_tests(self):
del self.sub_body.sub_body_divA[1:2]
self.sub_body.sub_body_divA[2:2] = [HTMLTag('span')]
self.sub_body.sub_body_divA[3] = [HTMLTag('li')]
self.sub_body.sub_body_divA[self.sub_body.sub_body_divA[4]] = [HTMLTag('span')]
self.sub_body.sub_body_divA[-1] = [HTMLTag('span')]
self.sub_body.sub_body_divA[7:13:2] = [HTMLTag('p'), HTMLTag('p'), HTMLTag('p')]
def _item_tests(self):
del self.sub_body.sub_body_divB
self.sub_body.sub_body_divB = self.sub_body_divA_child()
self.sub_body.sub_body_divB = self.sub_body_divA_child()
del self.sub_body[self.sub_body.sub_body_divB]
def sub_paragraph_child(self):
text_child = TextNode('Hello World! -- from the sub paragraph')
return HTMLTag('p', text_child)
def sub_body_divA_child(self):
return HTMLTag('div', HTMLTag('div'), HTMLTag('div'),
[HTMLTag('p'), HTMLTag('p'), HTMLTag('div'),
HTMLTag('div'), HTMLTag('div'), HTMLTag('div'),
HTMLTag('div'), HTMLTag('div'), HTMLTag('div'),
HTMLTag('div'), HTMLTag('div'), HTMLTag('div'),
HTMLTag('div'), HTMLTag('div'), HTMLTag('div')],
custom_class='custom_class_info',
keyword2='keyword2_info')
def login_button_show(self):
self.test_login_button = LoginButton()
on_focus = Event()
on_focus.add_observer(self, App.on_focus)
self.test_login_button.focus = on_focus
self.test_login_button.set_focus()
def on_focus(self, theLoginButton, msg):
if msg['event']['target'] != self.test_login_button.id:
raise Error('on_focus target "%s" != test_login_button id "%s"' %
(msg['event']['target'], self.test_login_button.id))
self.focus_found = HTMLTag('p', TextNode('on focus event returned'))
def login_dialog_show(self):
self.login = LoginDialog()
self._test_event_out_of_order()
def _test_event_out_of_order(self):
login_event = Event(client_no_bubble=True)
login_event.add_argument(self.login._username, 'value')
self.login._loginButton.click = login_event
login_event.add_observer(self, App.on_login)
login_event.add_argument(self.login._password, 'value')
def on_login(self, theLoginButton, msg):
authenticated = self.authenticate()
if authenticated:
self.on_authenticated()
else:
self.on_not_authenticated()
def on_authenticated(self):
if 'invalid' in self:
del self.invalid
self.valid = HTMLTag('p', TextNode('username and password is valid'))
self._test_replace_event()
def on_not_authenticated(self):
if 'valid' in self:
del self.invalid
self.invalid = HTMLTag('p', TextNode('username and/or password is invalid'))
self._test_remove_event_argument()
def _test_remove_event_argument(self):
self.login._loginButton.click.remove_argument(self.login._password, 'value')
def _test_replace_event(self):
self.login._loginButton.click.remove_observer(self, App.on_login)
del self.login._loginButton.click
self.login._loginButton.click = Event()
self.login._loginButton.click.add_observer(self, App.colorize_valid)
def authenticate(self):
if self.login._username.value == "bad" or self.login._password.value == "bad":
return False
return True
def colorize_valid(self, theLoginButton, msg):
self.valid.style = 'color:green'
def longlist_show(self):
self.longlist = HTMLTag('ul')
for index in range(100):
self.longlist += [self.new_list_element()]
self.add_select(self.longlist[-1])
self.longlist[10:90] = []
for index in range(100):
self.longlist += [self.new_list_element()]
self.add_select(self.longlist[-1])
self.longlist[:] = []
for index in range(50):
self.longlist += [self.new_list_element()]
self.add_select(self.longlist[-1])
self.longlist[10:] = []
def new_list_element(self):
return HTMLTag('li', count=len(self.longlist))
def add_select(self, the_li):
if not the_li.find_parent(App) == self:
raise ElementError('Child the_li is not a descendant of self')
if not the_li.find_handler('on_select') == self.on_select:
raise ElementError('could not find on_select handler for the_li')
the_li.selector = HTMLTag('input', { 'type': 'checkbox' } )
select_click = Event()
select_click.add_observer(self, App.on_select)
the_li.selector.click = select_click
def on_select(self, the_checkbox, msg):
pass
def client_has_closed_ws(self, code, reason):
print('Test client has closed')
theRunner.stop()
def increment_widget_show(self):
self.incrementor = IncrementWidget(self.on_increment)
self._increment_expected_value = 3
self.incrementor.do_increment(3)
def on_increment(self, current_value):
if self._increment_expected_value != current_value:
raise ElementError('on increment expected %s != current %s' % \
(self._increment_expected_value, current_value))
self._increment_expected_value += 3
if current_value > 3:
del self.incrementor
if __name__ == '__main__':
manifest = {
('.','app.html') : ('.','app.html'),
('.','app.conf') : ('.','app.conf'),
'css' : (),
'scripts' : ('domsocket.js','increment_widget.js'),
('scripts', 'domsocket.js') : (domsocket_js_path, 'domsocket.js'),
('scripts', 'increment_widget.js') : ('widgets', 'increment_widget.js'),
'static' : (),
'style' : (),
'toolkits' : ()
}
with ZMQRunner(App, manifest) as runner:
theRunner = runner
runner.run()
| mpl-2.0 | 3,703,522,824,436,072,400 | 38.103053 | 88 | 0.599317 | false |
ericsnowcurrently/micropython | tests/pyb/rtc.py | 25 | 1430 | import pyb
from pyb import RTC
rtc = RTC()
print(rtc)
# make sure that 1 second passes correctly
rtc.datetime((2014, 1, 1, 1, 0, 0, 0, 0))
pyb.delay(1001)
print(rtc.datetime()[:7])
def set_and_print(datetime):
rtc.datetime(datetime)
print(rtc.datetime()[:7])
# make sure that setting works correctly
set_and_print((2000, 1, 1, 1, 0, 0, 0, 0))
set_and_print((2000, 1, 31, 1, 0, 0, 0, 0))
set_and_print((2000, 12, 31, 1, 0, 0, 0, 0))
set_and_print((2016, 12, 31, 1, 0, 0, 0, 0))
set_and_print((2016, 12, 31, 7, 0, 0, 0, 0))
set_and_print((2016, 12, 31, 7, 1, 0, 0, 0))
set_and_print((2016, 12, 31, 7, 12, 0, 0, 0))
set_and_print((2016, 12, 31, 7, 13, 0, 0, 0))
set_and_print((2016, 12, 31, 7, 23, 0, 0, 0))
set_and_print((2016, 12, 31, 7, 23, 1, 0, 0))
set_and_print((2016, 12, 31, 7, 23, 59, 0, 0))
set_and_print((2016, 12, 31, 7, 23, 59, 1, 0))
set_and_print((2016, 12, 31, 7, 23, 59, 59, 0))
set_and_print((2099, 12, 31, 7, 23, 59, 59, 0))
# check that calibration works correctly
# save existing calibration value:
cal_tmp = rtc.calibration()
def set_and_print_calib(cal):
rtc.calibration(cal)
print(rtc.calibration())
set_and_print_calib(512)
set_and_print_calib(511)
set_and_print_calib(345)
set_and_print_calib(1)
set_and_print_calib(0)
set_and_print_calib(-1)
set_and_print_calib(-123)
set_and_print_calib(-510)
set_and_print_calib(-511)
# restore existing calibration value
rtc.calibration(cal_tmp)
| mit | -3,832,771,854,199,834,600 | 27.039216 | 47 | 0.641958 | false |
CKehl/pylearn2 | pylearn2/training_algorithms/sgd.py | 6 | 48163 | """
Stochastic Gradient Descent and related functionality such as
learning rate adaptation, momentum, and Polyak averaging.
"""
from __future__ import division
__authors__ = "Ian Goodfellow"
__copyright__ = "Copyright 2010-2012, Universite de Montreal"
__credits__ = ["Ian Goodfellow, David Warde-Farley"]
__license__ = "3-clause BSD"
__maintainer__ = "David Warde-Farley"
__email__ = "pylearn-dev@googlegroups"
import logging
import warnings
import numpy as np
from theano.compat import six
from theano import config
from theano import function
from theano.gof.op import get_debug_values
from pylearn2.compat import OrderedDict, first_key
from pylearn2.monitor import Monitor
from pylearn2.space import CompositeSpace, NullSpace
from pylearn2.train_extensions import TrainExtension
from pylearn2.training_algorithms.training_algorithm import TrainingAlgorithm
from pylearn2.training_algorithms.learning_rule import Momentum
from pylearn2.training_algorithms.learning_rule import (
MomentumAdjustor as LRMomentumAdjustor)
from pylearn2.utils.iteration import is_stochastic, has_uniform_batch_size
from pylearn2.utils import py_integer_types, py_float_types
from pylearn2.utils import safe_zip
from pylearn2.utils import serial
from pylearn2.utils import sharedX
from pylearn2.utils import contains_nan
from pylearn2.utils import contains_inf
from pylearn2.utils import isfinite
from pylearn2.utils.data_specs import DataSpecsMapping
from pylearn2.utils.exc import reraise_as
from pylearn2.utils.timing import log_timing
from pylearn2.utils.rng import make_np_rng
log = logging.getLogger(__name__)
class SGD(TrainingAlgorithm):
"""
SGD = (Minibatch) Stochastic Gradient Descent.
A TrainingAlgorithm that does stochastic gradient descent on
minibatches of training examples.
For theoretical background on this algorithm, see Yoshua Bengio's
machine learning course notes on the subject:
http://www.iro.umontreal.ca/~pift6266/H10/notes/gradient.html
Parameters
----------
learning_rate : float
The learning rate to use. Train object callbacks can change the
learning rate after each epoch. SGD update_callbacks can change
it after each minibatch.
cost : pylearn2.costs.cost.Cost, optional
Cost object specifying the objective function to be minimized.
Optionally, may be None. In this case, SGD will call the model's
get_default_cost method to obtain the objective function.
batch_size : int, optional
The size of the batch to be used.
If not specified, the model will be asked for the batch size, so
you must have specified the batch size there.
(Some models are rigidly defined to only work with one batch size)
monitoring_batch_size : int, optional
The size of the monitoring batches.
monitoring_batches : int, optional
At the start of each epoch, we run "monitoring", to evaluate
quantities such as the validation set error.
monitoring_batches, if specified, determines the number of batches
to draw from the iterator for each monitoring dataset.
Unnecessary if not using monitoring or if `monitor_iteration_mode`
is 'sequential' and `batch_size` is specified (number of
batches will be calculated based on full dataset size).
TODO: make it possible to specify different monitoring_batches
for each monitoring dataset. The Monitor itself already supports
this.
monitoring_dataset : Dataset or dictionary, optional
If not specified, no monitoring is used.
If specified to be a Dataset, monitor on that Dataset.
If specified to be dictionary, the keys should be string names
of datasets, and the values should be Datasets. All monitoring
channels will be computed for all monitoring Datasets and will
have the dataset name and an underscore prepended to them.
monitor_iteration_mode : str, optional
The iteration mode used to iterate over the examples in all
monitoring datasets. If not specified, defaults to 'sequential'.
TODO: make it possible to specify different modes for different
datasets.
termination_criterion : instance of \
pylearn2.termination_criteria.TerminationCriterion, optional
Used to determine when the algorithm should stop running.
If not specified, runs forever--or more realistically, until
external factors halt the python process (Kansas 1977).
update_callbacks : list, optional
If specified, each member of the list should be a callable that
accepts an SGD instance as its only argument.
All callbacks will be called with this SGD instance after each
SGD step.
learning_rule : training_algorithms.learning_rule.LearningRule, optional
A learning rule computes the new parameter values given old
parameters and first-order gradients. If learning_rule is None,
sgd.SGD will update parameters according to the standard SGD
learning rule:
.. code-block:: none
param := param - learning_rate * d cost / d param
This argument allows more sophisticated learning rules, such
as SGD with momentum.
set_batch_size : bool, optional
Defaults to False.
If True, and batch_size conflicts with model.force_batch_size,
will call model.set_batch_size(batch_size) in an attempt to
change model.force_batch_size
train_iteration_mode : str, optional
Defaults to 'shuffled_sequential'.
The iteration mode to use for iterating through training examples.
batches_per_iter : int, optional
The number of batches to draw from the iterator over training
examples.
If iteration mode is 'sequential' or 'shuffled_sequential', this
is unnecessary; when unspecified we will iterate over all examples.
theano_function_mode : a valid argument to theano.function's \
'mode' parameter, optional
The theano mode to compile the updates function with. Note that
pylearn2 includes some wraplinker modes that are not bundled with
theano. See pylearn2.devtools. These extra modes let you do
things like check for NaNs at every step, or record md5 digests
of all computations performed by the update function to help
isolate problems with nondeterminism.
monitoring_costs : OrderedDict, optional
A dictionary of Cost instances. Keys should be string containing
the name of the cost. The Monitor will also include all
channels defined by these Costs, even though we don't train
using them.
seed : valid argument to np.random.RandomState, optional
The seed used for the random number generate to be passed to the
training dataset iterator (if any)
"""
def __init__(self, learning_rate, cost=None, batch_size=None,
monitoring_batch_size=None, monitoring_batches=None,
monitoring_dataset=None,
monitor_iteration_mode='sequential',
termination_criterion=None, update_callbacks=None,
learning_rule=None, set_batch_size=False,
train_iteration_mode=None, batches_per_iter=None,
theano_function_mode=None, monitoring_costs=None,
seed=[2012, 10, 5]):
if isinstance(cost, (list, tuple, set)):
raise TypeError("SGD no longer supports using collections of " +
"Costs to represent a sum of Costs. Use " +
"pylearn2.costs.cost.SumOfCosts instead.")
self.learning_rule = learning_rule
self.learning_rate = sharedX(learning_rate, 'learning_rate')
self.cost = cost
self.batch_size = batch_size
self.set_batch_size = set_batch_size
self.batches_per_iter = batches_per_iter
self._set_monitoring_dataset(monitoring_dataset)
self.monitoring_batch_size = monitoring_batch_size
self.monitoring_batches = monitoring_batches
self.monitor_iteration_mode = monitor_iteration_mode
if monitoring_dataset is None:
if monitoring_batch_size is not None:
raise ValueError("Specified a monitoring batch size " +
"but not a monitoring dataset.")
if monitoring_batches is not None:
raise ValueError("Specified an amount of monitoring batches " +
"but not a monitoring dataset.")
self.termination_criterion = termination_criterion
self._register_update_callbacks(update_callbacks)
if train_iteration_mode is None:
train_iteration_mode = 'shuffled_sequential'
self.train_iteration_mode = train_iteration_mode
self.first = True
self.rng = make_np_rng(seed, which_method=["randn", "randint"])
self.theano_function_mode = theano_function_mode
self.monitoring_costs = monitoring_costs
def _setup_monitor(self):
"""
Set up monitor to model the objective value, learning rate,
momentum (if applicable), and extra channels defined by
the cost.
This method must be called after `learning_rule.get_updates`,
since it may have an effect on `learning_rule.add_channels_to_monitor`
(that is currently the case for `learning_rule.RMSProp`).
"""
if bool(self.monitoring_dataset):
if (self.monitoring_batch_size is None and
self.monitoring_batches is None):
self.monitoring_batch_size = self.batch_size
self.monitoring_batches = self.batches_per_iter
self.monitor.setup(dataset=self.monitoring_dataset,
cost=self.cost,
batch_size=self.monitoring_batch_size,
num_batches=self.monitoring_batches,
extra_costs=self.monitoring_costs,
mode=self.monitor_iteration_mode)
dataset_name = first_key(self.monitoring_dataset)
monitoring_dataset = self.monitoring_dataset[dataset_name]
# TODO: have Monitor support non-data-dependent channels
self.monitor.add_channel(name='learning_rate',
ipt=None,
val=self.learning_rate,
data_specs=(NullSpace(), ''),
dataset=monitoring_dataset)
if self.learning_rule:
self.learning_rule.add_channels_to_monitor(
self.monitor,
monitoring_dataset)
def setup(self, model, dataset):
"""
Compiles the theano functions needed for the train method.
Parameters
----------
model : a Model instance
dataset : Dataset
"""
if self.cost is None:
self.cost = model.get_default_cost()
inf_params = [param for param in model.get_params()
if contains_inf(param.get_value())]
if len(inf_params) > 0:
raise ValueError("These params are Inf: "+str(inf_params))
if any([contains_nan(param.get_value())
for param in model.get_params()]):
nan_params = [param for param in model.get_params()
if contains_nan(param.get_value())]
raise ValueError("These params are NaN: "+str(nan_params))
self.model = model
self._synchronize_batch_size(model)
model._test_batch_size = self.batch_size
self.monitor = Monitor.get_monitor(model)
self.monitor._sanity_check()
# test if force batch size and batch size
has_force_batch_size = getattr(model, "force_batch_size", False)
train_dataset_is_uneven = \
dataset.get_num_examples() % self.batch_size != 0
has_monitoring_datasets = bool(self.monitoring_dataset)
if has_monitoring_datasets:
monitoring_datasets_are_uneven = \
any(d.get_num_examples() % self.batch_size
!= 0 for d in self.monitoring_dataset.values())
else:
monitoring_datasets_are_uneven = False # or True it doesn't matter
if has_force_batch_size and train_dataset_is_uneven and \
not has_uniform_batch_size(self.train_iteration_mode):
raise ValueError("Dataset size is not a multiple of batch size."
"You should set train_iteration_mode (and "
"maybe monitor_iteration_mode) to "
"even_sequential, even_shuffled_sequential or "
"even_batchwise_shuffled_sequential")
if has_force_batch_size and has_monitoring_datasets and \
monitoring_datasets_are_uneven and \
not has_uniform_batch_size(self.monitor_iteration_mode):
raise ValueError("Dataset size is not a multiple of batch size."
"You should set monitor_iteration_mode to "
"even_sequential, even_shuffled_sequential or "
"even_batchwise_shuffled_sequential")
data_specs = self.cost.get_data_specs(self.model)
mapping = DataSpecsMapping(data_specs)
space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
# Build a flat tuple of Theano Variables, one for each space.
# We want that so that if the same space/source is specified
# more than once in data_specs, only one Theano Variable
# is generated for it, and the corresponding value is passed
# only once to the compiled Theano function.
theano_args = []
for space, source in safe_zip(space_tuple, source_tuple):
name = '%s[%s]' % (self.__class__.__name__, source)
arg = space.make_theano_batch(name=name,
batch_size=self.batch_size)
theano_args.append(arg)
theano_args = tuple(theano_args)
# Methods of `self.cost` need args to be passed in a format compatible
# with data_specs
nested_args = mapping.nest(theano_args)
fixed_var_descr = self.cost.get_fixed_var_descr(model, nested_args)
self.on_load_batch = fixed_var_descr.on_load_batch
cost_value = self.cost.expr(model, nested_args,
** fixed_var_descr.fixed_vars)
if cost_value is not None and cost_value.name is None:
# Concatenate the name of all tensors in theano_args !?
cost_value.name = 'objective'
learning_rate = self.learning_rate
params = list(model.get_params())
assert len(params) > 0
for i, param in enumerate(params):
if param.name is None:
param.name = 'sgd_params[%d]' % i
grads, updates = self.cost.get_gradients(model, nested_args,
** fixed_var_descr.fixed_vars)
if not isinstance(grads, OrderedDict):
raise TypeError(str(type(self.cost)) + ".get_gradients returned " +
"something with" + str(type(grads)) + "as its " +
"first member. Expected OrderedDict.")
for param in grads:
assert param in params
for param in params:
assert param in grads
for param in grads:
if grads[param].name is None and cost_value is not None:
grads[param].name = ('grad(%(costname)s, %(paramname)s)' %
{'costname': cost_value.name,
'paramname': param.name})
assert grads[param].dtype == param.dtype
lr_scalers = model.get_lr_scalers()
for key in lr_scalers:
if key not in params:
raise ValueError(
"Tried to scale the learning rate on " +
str(key) + " which is not an optimization parameter.")
log.info('Parameter and initial learning rate summary:')
for param in params:
param_name = param.name
if param_name is None:
param_name = 'anon_param'
lr = learning_rate.get_value() * lr_scalers.get(param, 1.)
log.info('\t' + param_name + ': ' + str(lr))
if self.learning_rule:
updates.update(self.learning_rule.get_updates(
learning_rate, grads, lr_scalers))
else:
# Use standard SGD updates with fixed learning rate.
updates.update(dict(safe_zip(params, [param - learning_rate *
lr_scalers.get(param, 1.) * grads[param]
for param in params])))
for param in params:
if updates[param].name is None:
updates[param].name = 'sgd_update(' + param.name + ')'
model.modify_updates(updates)
for param in params:
update = updates[param]
if update.name is None:
update.name = 'censor(sgd_update(' + param.name + '))'
for update_val in get_debug_values(update):
if contains_inf(update_val):
raise ValueError("debug value of %s contains infs" %
update.name)
if contains_nan(update_val):
raise ValueError("debug value of %s contains nans" %
update.name)
# Set up monitor to model the objective value, learning rate,
# momentum (if applicable), and extra channels defined by
# the cost.
# We have to do that after learning_rule.get_updates has been
# called, since it may have an effect on
# learning_rule.add_channels_to_monitor (that is currently the case
# for AdaDelta and RMSProp).
self._setup_monitor()
with log_timing(log, 'Compiling sgd_update'):
self.sgd_update = function(theano_args,
updates=updates,
name='sgd_update',
on_unused_input='ignore',
mode=self.theano_function_mode)
self.params = params
def train(self, dataset):
"""
Runs one epoch of SGD training on the specified dataset.
Parameters
----------
dataset : Dataset
"""
if not hasattr(self, 'sgd_update'):
raise Exception("train called without first calling setup")
# Make sure none of the parameters have bad values
for param in self.params:
value = param.get_value(borrow=True)
if not isfinite(value):
raise Exception("NaN in " + param.name)
self.first = False
rng = self.rng
if not is_stochastic(self.train_iteration_mode):
rng = None
data_specs = self.cost.get_data_specs(self.model)
# The iterator should be built from flat data specs, so it returns
# flat, non-redundent tuples of data.
mapping = DataSpecsMapping(data_specs)
space_tuple = mapping.flatten(data_specs[0], return_tuple=True)
source_tuple = mapping.flatten(data_specs[1], return_tuple=True)
if len(space_tuple) == 0:
# No data will be returned by the iterator, and it is impossible
# to know the size of the actual batch.
# It is not decided yet what the right thing to do should be.
raise NotImplementedError(
"Unable to train with SGD, because "
"the cost does not actually use data from the data set. "
"data_specs: %s" % str(data_specs))
flat_data_specs = (CompositeSpace(space_tuple), source_tuple)
iterator = dataset.iterator(mode=self.train_iteration_mode,
batch_size=self.batch_size,
data_specs=flat_data_specs,
return_tuple=True, rng=rng,
num_batches=self.batches_per_iter)
on_load_batch = self.on_load_batch
for batch in iterator:
for callback in on_load_batch:
callback(*batch)
self.sgd_update(*batch)
# iterator might return a smaller batch if dataset size
# isn't divisible by batch_size
# Note: if data_specs[0] is a NullSpace, there is no way to know
# how many examples would actually have been in the batch,
# since it was empty, so actual_batch_size would be reported as 0.
actual_batch_size = flat_data_specs[0].np_batch_size(batch)
self.monitor.report_batch(actual_batch_size)
for callback in self.update_callbacks:
callback(self)
# Make sure none of the parameters have bad values
for param in self.params:
value = param.get_value(borrow=True)
if not isfinite(value):
raise Exception("NaN in " + param.name)
def continue_learning(self, model):
"""
Returns True if the algorithm should continue running, or False
if it has reached convergence / started overfitting and should
stop.
Parameters
----------
model : a Model instance
"""
if self.termination_criterion is None:
return True
else:
return self.termination_criterion.continue_learning(self.model)
class MonitorBasedLRAdjuster(TrainExtension):
"""
A TrainExtension that uses the on_monitor callback to adjust
the learning rate on each epoch. It pulls out a channel
from the model's monitor and adjusts the learning rate
based on what happened to the monitoring channel on the last
epoch. If the channel is greater than high_trigger times
its previous value, the learning rate will be scaled by
shrink_amt (which should be < 1 for this scheme to make
sense). The idea is that in this case the learning algorithm
is overshooting the bottom of the objective function.
If the objective is less than high_trigger but
greater than low_trigger times its previous value, the
learning rate will be scaled by grow_amt (which should be > 1
for this scheme to make sense). The idea is that the learning
algorithm is making progress but at too slow of a rate.
Parameters
----------
high_trigger : float, optional
See class-level docstring
low_trigger : float, optional
See class-level docstring
grow_amt : float, optional
See class-level docstring
min_lr : float, optional
All updates to the learning rate are clipped to be at least
this value.
max_lr : float, optional
All updates to the learning rate are clipped to be at most
this value.
dataset_name : str, optional
If specified, use dataset_name + "_objective" as the channel
to guide the learning rate adaptation.
channel_name : str, optional
If specified, use channel_name as the channel to guide the
learning rate adaptation. Conflicts with dataset_name.
If neither dataset_name nor channel_name is specified, uses
"objective"
"""
def __init__(self, high_trigger=1., shrink_amt=.99,
low_trigger=.99, grow_amt=1.01,
min_lr=1e-7, max_lr=1.,
dataset_name=None, channel_name=None):
self.high_trigger = high_trigger
self.shrink_amt = shrink_amt
self.low_trigger = low_trigger
self.grow_amt = grow_amt
self.min_lr = min_lr
self.max_lr = max_lr
self.dataset_name = None
if channel_name is not None:
self.channel_name = channel_name
else:
if dataset_name is not None:
self.channel_name = dataset_name + '_objective'
self.dataset_name = dataset_name
else:
self.channel_name = None
def on_monitor(self, model, dataset, algorithm):
"""
Adjusts the learning rate based on the contents of model.monitor
Parameters
----------
model : a Model instance
dataset : Dataset
algorithm : WRITEME
"""
model = algorithm.model
lr = algorithm.learning_rate
current_learning_rate = lr.get_value()
assert hasattr(model, 'monitor'), ("no monitor associated with "
+ str(model))
monitor = model.monitor
monitor_channel_specified = True
if self.channel_name is None:
monitor_channel_specified = False
channels = [elem for elem in monitor.channels
if elem.endswith("objective")]
if len(channels) < 1:
raise ValueError(
"There are no monitoring channels that end "
"with \"objective\". Please specify either "
"channel_name or dataset_name.")
elif len(channels) > 1:
datasets = algorithm.monitoring_dataset.keys()
raise ValueError(
"There are multiple monitoring channels that"
"end with \"_objective\". The list of available "
"datasets are: " +
str(datasets) + " . Please specify either "
"channel_name or dataset_name in the "
"MonitorBasedLRAdjuster constructor to "
'disambiguate.')
else:
self.channel_name = channels[0]
warnings.warn('The channel that has been chosen for '
'monitoring is: ' + str(self.channel_name) + '.')
try:
v = monitor.channels[self.channel_name].val_record
except KeyError:
err_input = ''
if monitor_channel_specified:
if self.dataset_name:
err_input = 'The dataset_name \'' + str(
self.dataset_name) + '\' is not valid.'
else:
err_input = 'The channel_name \'' + str(
self.channel_name) + '\' is not valid.'
err_message = 'There is no monitoring channel named \'' + \
str(self.channel_name) + '\'. You probably need to ' + \
'specify a valid monitoring channel by using either ' + \
'dataset_name or channel_name in the ' + \
'MonitorBasedLRAdjuster constructor. ' + err_input
reraise_as(ValueError(err_message))
if len(v) < 1:
if monitor.dataset is None:
assert len(v) == 0
raise ValueError(
"You're trying to use a monitor-based "
"learning rate adjustor but the monitor has no "
"entries because you didn't specify a "
"monitoring dataset.")
raise ValueError(
"For some reason there are no monitor entries"
"yet the MonitorBasedLRAdjuster has been "
"called. This should never happen. The Train"
" object should call the monitor once on "
"initialization, then call the callbacks. "
"It seems you are either calling the "
"callback manually rather than as part of a "
"training algorithm, or there is a problem "
"with the Train object.")
if len(v) == 1:
# only the initial monitoring has happened
# no learning has happened, so we can't adjust learning rate yet
# just do nothing
return
rval = current_learning_rate
log.info("monitoring channel is {0}".format(self.channel_name))
if v[-1] > self.high_trigger * v[-2]:
rval *= self.shrink_amt
log.info("shrinking learning rate to %f" % rval)
elif v[-1] > self.low_trigger * v[-2]:
rval *= self.grow_amt
log.info("growing learning rate to %f" % rval)
rval = max(self.min_lr, rval)
rval = min(self.max_lr, rval)
lr.set_value(np.cast[lr.dtype](rval))
class PatienceBasedTermCrit(object):
"""
A monitor-based termination criterion using a geometrically increasing
amount of patience. If the selected channel has decreased by a certain
proportion when comparing to the lowest value seen yet, the patience is
set to a factor of the number of examples seen, which by default
(patience_increase=2.) ensures the model has seen as many examples as the
number of examples that lead to the lowest value before concluding a local
optima has been reached.
Note: Technically, the patience corresponds to a number of epochs to be
independent of the size of the dataset, so be aware of that when choosing
initial_patience.
Parameters
----------
prop_decrease : float
The factor X in the (1 - X) * best_value threshold
initial_patience : int
Minimal number of epochs the model has to run before it can stop
patience_increase : float, optional
The factor X in the patience = X * n_iter update.
channel_name : string, optional
Name of the channel to examine. If None and the monitor
has only one channel, this channel will be used; otherwise, an
error will be raised.
"""
def __init__(self, prop_decrease, initial_patience,
patience_increase=2., channel_name=None):
self._channel_name = channel_name
self.prop_decrease = prop_decrease
self.patience = initial_patience
self.best_value = np.inf
self.patience_increase = patience_increase
def __call__(self, model):
"""
Returns True or False depending on whether the optimization should
stop or not. The optimization should stop if it has run for a number
of epochs superior to the patience without any improvement.
Parameters
----------
model : Model
The model used in the experiment and from which the monitor used
in the termination criterion will be extracted.
Returns
-------
bool
True or False, indicating if the optimization should stop or not.
"""
monitor = model.monitor
# In the case the monitor has only one channel, the channel_name can
# be omitted and the criterion will examine the only channel
# available. However, if the monitor has multiple channels, leaving
# the channel_name unspecified will raise an error.
if self._channel_name is None:
if len(monitor.channels) != 1:
raise ValueError("Only single-channel monitors are supported "
"for channel_name == None")
v = monitor.channels.values()[0].val_record
else:
v = monitor.channels[self._channel_name].val_record
# If the channel value decrease is higher than the threshold, we
# update the best value to this value and we update the patience.
if v[-1] < self.best_value * (1. - self.prop_decrease):
# Using the max between actual patience and updated patience
# ensures that the model will run for at least the initial
# patience and that it would behave correctly if the user
# chooses a dumb value (i.e. less than 1)
self.patience = max(self.patience, len(v) * self.patience_increase)
self.best_value = v[-1]
return len(v) < self.patience
class AnnealedLearningRate(object):
"""
This is a callback for the SGD algorithm rather than the Train object.
This anneals the learning rate to decrease as 1/t where t is the number
of gradient descent updates done so far. Use OneOverEpoch as Train object
callback if you would prefer 1/t where t is epochs.
Parameters
----------
anneal_start : int
The epoch on which to begin annealing
"""
def __init__(self, anneal_start):
self._initialized = False
self._count = 0
self._anneal_start = anneal_start
def __call__(self, algorithm):
"""
Updates the learning rate according to the annealing schedule.
Parameters
----------
algorithm : WRITEME
"""
if not self._initialized:
self._base = algorithm.learning_rate.get_value()
self._initialized = True
self._count += 1
algorithm.learning_rate.set_value(np.cast[config.floatX](
self.current_learning_rate()))
def current_learning_rate(self):
"""
Returns the current desired learning rate according to the
annealing schedule.
"""
return self._base * min(1, self._anneal_start / self._count)
class ExponentialDecay(object):
"""
This is a callback for the `SGD` algorithm rather than the `Train` object.
This anneals the learning rate by dividing by decay_factor after each
gradient descent step. It will not shrink the learning rate beyond
`min_lr`.
Parameters
----------
decay_factor : float
The learning rate at step t is given by
`init_learning_rate / (decay_factor ** t)`
min_lr : float
The learning rate will be clipped to be at least this value
"""
def __init__(self, decay_factor, min_lr):
if isinstance(decay_factor, str):
decay_factor = float(decay_factor)
if isinstance(min_lr, str):
min_lr = float(min_lr)
assert isinstance(decay_factor, float)
assert isinstance(min_lr, float)
self.__dict__.update(locals())
del self.self
self._count = 0
self._min_reached = False
def __call__(self, algorithm):
"""
Updates the learning rate according to the exponential decay schedule.
Parameters
----------
algorithm : SGD
The SGD instance whose `learning_rate` field should be modified.
"""
if self._count == 0:
self._base_lr = algorithm.learning_rate.get_value()
self._count += 1
if not self._min_reached:
# If we keep on executing the exponentiation on each mini-batch,
# we will eventually get an OverflowError. So make sure we
# only do the computation until min_lr is reached.
new_lr = self._base_lr / (self.decay_factor ** self._count)
if new_lr <= self.min_lr:
self._min_reached = True
new_lr = self.min_lr
else:
new_lr = self.min_lr
new_lr = np.cast[config.floatX](new_lr)
algorithm.learning_rate.set_value(new_lr)
class LinearDecay(object):
"""
This is a callback for the SGD algorithm rather than the Train object.
This anneals the learning rate to decay_factor times of the initial value
during time start till saturate.
Parameters
----------
start : int
The step at which to start decreasing the learning rate
saturate : int
The step at which to stop decreating the learning rate
decay_factor : float
`final learning rate = decay_factor * initial learning rate`
"""
def __init__(self, start, saturate, decay_factor):
if isinstance(decay_factor, str):
decay_factor = float(decay_factor)
if isinstance(start, str):
start = float(start)
if isinstance(saturate, str):
saturate = float(saturate)
assert isinstance(decay_factor, float)
assert isinstance(start, (py_integer_types, py_float_types))
assert isinstance(saturate, (py_integer_types, py_float_types))
assert saturate > start
assert start > 0
self.__dict__.update(locals())
del self.self
self._count = 0
def __call__(self, algorithm):
"""
Adjusts the learning rate according to the linear decay schedule
Parameters
----------
algorithm : WRITEME
"""
if self._count == 0:
self._base_lr = algorithm.learning_rate.get_value()
self._step = ((self._base_lr - self._base_lr * self.decay_factor) /
(self.saturate - self.start + 1))
self._count += 1
if self._count >= self.start:
if self._count < self.saturate:
new_lr = self._base_lr - self._step * (self._count
- self.start + 1)
else:
new_lr = self._base_lr * self.decay_factor
else:
new_lr = self._base_lr
assert new_lr > 0
new_lr = np.cast[config.floatX](new_lr)
algorithm.learning_rate.set_value(new_lr)
class EpochMonitor(object):
"""
This is a callback for the SGD algorithm rather than the Train object.
It can log one-line progress summaries and/or full monitor updates at
regular intervals within epochs, which can be useful for large datasets.
Note that each monitor update increases the calculation time of the epoch.
Parameters
----------
model : pylearn2 model instance
The model being monitored
tick_rate : int (optional)
Log one-line updates every `tick_rate` batches
monitor_rate : int (optional)
Call full monitor updates within epochs every `monitor_rate` batches
YAML usage
----------
model: &model !obj:pylearn2.models.mlp.MLP {
...
},
algorithm: !obj:pylearn2.training_algorithms.sgd.SGD {
update_callbacks: [
!obj:pylearn2.training_algorithms.sgd.EpochMonitor {
model: *model,
tick_rate: 20,
monitor_rate: 110 }],
...
}
"""
def __init__(self, model, tick_rate=None, monitor_rate=None):
self.model = model
self.tick_rate = tick_rate
self.monitor_rate = monitor_rate
self.batches = 0
self.epoch = 1
def __call__(self, algorithm):
if self.model.monitor.get_epochs_seen() == self.epoch:
self.epoch += 1
self.batches = 0
else:
self.batches += 1
if self.monitor_rate and self.batches and not (
self.batches % self.monitor_rate):
self.model.monitor.__call__()
elif self.tick_rate and not self.batches % self.tick_rate:
log.info('Epoch {}: {} batches seen'.format(
self.epoch, self.batches))
class OneOverEpoch(TrainExtension):
"""
Scales the learning rate like one over # epochs
Parameters
----------
start : int
The epoch on which to start shrinking the learning rate
half_life : int, optional
How many epochs after start it will take for the learning rate to lose
half its value for the first time (to lose the next half of its value
will take twice as long)
min_lr : float, optional
The minimum value the learning rate can take on
"""
def __init__(self, start, half_life=None, min_lr=1e-6):
self.__dict__.update(locals())
del self.self
self._initialized = False
self._count = 0
assert start >= 0
if half_life is None:
self.half_life = start + 1
else:
assert half_life > 0
def on_monitor(self, model, dataset, algorithm):
"""
Adjusts the learning rate according to the decay schedule.
Parameters
----------
model : a Model instance
dataset : Dataset
algorithm : WRITEME
"""
if not self._initialized:
self._init_lr = algorithm.learning_rate.get_value()
if self._init_lr < self.min_lr:
raise ValueError("The initial learning rate is smaller than " +
"the minimum allowed learning rate.")
self._initialized = True
self._count += 1
algorithm.learning_rate.set_value(np.cast[config.floatX](
self.current_lr()))
def current_lr(self):
"""
Returns the learning rate currently desired by the decay schedule.
"""
if self._count < self.start:
scale = 1
else:
scale = float(self.half_life) / float(self._count -
self.start + self.half_life)
lr = self._init_lr * scale
clipped = max(self.min_lr, lr)
return clipped
class LinearDecayOverEpoch(TrainExtension):
"""
Scales the learning rate linearly on each epochs
Parameters
----------
start : int
The epoch on which to start shrinking the learning rate
saturate : int
The epoch to saturate the shrinkage
decay_factor : float
The final value would be initial learning rate times decay_factor
"""
def __init__(self, start, saturate, decay_factor):
self.__dict__.update(locals())
del self.self
self._initialized = False
self._count = 0
assert isinstance(decay_factor, float)
assert isinstance(start, (py_integer_types, py_float_types))
assert isinstance(saturate, (py_integer_types, py_float_types))
assert saturate > start
assert start >= 0
assert saturate >= start
def setup(self, model, dataset, algorithm):
"""
Initializes the decay schedule based on epochs_seen.
Parameters
----------
model : pylearn2.models.Model
The model to which the training algorithm is applied.
dataset : pylearn2.datasets.Dataset
The dataset to which the model is applied.
algorithm : pylearn2.training_algorithms.TrainingAlgorithm
Describes how gradients should be updated.
"""
monitor = Monitor.get_monitor(model)
self._count = monitor.get_epochs_seen()
self._apply_learning_rate(algorithm)
def on_monitor(self, model, dataset, algorithm):
"""
Updates the learning rate based on the linear decay schedule.
Parameters
----------
model : a Model instance
dataset : Dataset
algorithm : WRITEME
"""
self._count += 1
self._apply_learning_rate(algorithm)
def _apply_learning_rate(self, algorithm):
"""
Updates the learning rate on algorithm based on the epochs elapsed.
"""
if not self._initialized:
self._init_lr = algorithm.learning_rate.get_value()
self._step = ((self._init_lr - self._init_lr * self.decay_factor) /
(self.saturate - self.start + 1))
self._initialized = True
algorithm.learning_rate.set_value(np.cast[config.floatX](
self.current_lr()))
def current_lr(self):
"""
Returns the learning rate currently desired by the decay schedule.
"""
if self._count >= self.start:
if self._count < self.saturate:
new_lr = self._init_lr - self._step * (self._count
- self.start + 1)
else:
new_lr = self._init_lr * self.decay_factor
else:
new_lr = self._init_lr
assert new_lr > 0
return new_lr
class _PolyakWorker(object):
"""
Only to be used by the PolyakAveraging TrainingCallback below.
Do not use directly.
A callback for the SGD class.
Parameters
----------
model : a Model
The model whose parameters we want to train with Polyak averaging
"""
def __init__(self, model):
avg_updates = OrderedDict()
t = sharedX(1.)
self.param_to_mean = OrderedDict()
for param in model.get_params():
mean = sharedX(param.get_value())
assert type(mean) == type(param)
self.param_to_mean[param] = mean
avg_updates[mean] = mean - (mean - param) / t
avg_updates[t] = t + 1.
self.avg = function([], updates=avg_updates)
def __call__(self, algorithm):
"""
To be called after each SGD step.
Updates the Polyak averaged-parameters for this model
Parameters
----------
algorithm : WRITEME
"""
self.avg()
class PolyakAveraging(TrainExtension):
"""
See "A Tutorial on Stochastic Approximation Algorithms
for Training Restricted Boltzmann Machines and
Deep Belief Nets" by Kevin Swersky et al
This functionality is still a work in progress. Currently,
your model needs to implement "add_polyak_channels" to
use it.
The problem is that Polyak averaging shouldn't modify
the model parameters. It should keep a second copy
that it averages in the background. This second copy
doesn't get to come back in and affect the learning process
though.
(IG tried having the second copy get pushed back into
the model once per epoch, but this turned out to be
harmful, at least in limited tests)
So we need a cleaner interface for monitoring the
averaged copy of the parameters, and we need to make
sure the saved model at the end uses the averaged
parameters, not the parameters used for computing
the gradients during training.
TODO: make use of the new on_save callback instead
of duplicating Train's save_freq flag
Parameters
----------
start : int
The epoch after which to start averaging (0 = start averaging
immediately)
save_path : str, optional
WRITEME
save_freq : int, optional
WRITEME
Notes
-----
This is usually used with a fixed, rather than annealed learning
rate. It may be used in conjunction with momentum.
"""
def __init__(self, start, save_path=None, save_freq=1):
self.__dict__.update(locals())
del self.self
self._count = 0
assert isinstance(start, py_integer_types)
assert start >= 0
def on_monitor(self, model, dataset, algorithm):
"""
Make sure Polyak-averaged model gets monitored.
Save the model if necessary.
Parameters
----------
model : a Model instance
dataset : Dataset
algorithm : WRITEME
"""
if self._count == self.start:
self._worker = _PolyakWorker(model)
algorithm.update_callbacks.append(self._worker)
# HACK
try:
model.add_polyak_channels(self._worker.param_to_mean,
algorithm.monitoring_dataset)
except AttributeError:
pass
elif self.save_path is not None and self._count > self.start and \
self._count % self.save_freq == 0:
saved_params = OrderedDict()
for param in model.get_params():
saved_params[param] = param.get_value()
param.set_value(self._worker.param_to_mean[param].get_value())
serial.save(self.save_path, model)
for param in model.get_params():
param.set_value(saved_params[param])
self._count += 1
| bsd-3-clause | 7,701,738,028,510,791,000 | 39.202838 | 79 | 0.595935 | false |
ylatuya/Flumotion | flumotion/common/signals.py | 2 | 3177 | # -*- Mode: Python; test-case-name: flumotion.test.test_common_signals -*-
# vi:si:et:sw=4:sts=4:ts=4
#
# Flumotion - a streaming media server
# Copyright (C) 2007 Fluendo, S.L. (www.fluendo.com).
# All rights reserved.
# This file may be distributed and/or modified under the terms of
# the GNU General Public License version 2 as published by
# the Free Software Foundation.
# This file is distributed without any warranty; without even the implied
# warranty of merchantability or fitness for a particular purpose.
# See "LICENSE.GPL" in the source distribution for more information.
# Licensees having purchased or holding a valid Flumotion Advanced
# Streaming Server license may use this file in accordance with the
# Flumotion Advanced Streaming Server Commercial License Agreement.
# See "LICENSE.Flumotion" in the source distribution for more information.
# Headers in this file shall remain intact.
"""synchronous message passing between python objects
"""
import warnings
from flumotion.common import log
__version__ = "$Rev$"
class SignalMixin(object):
__signals__ = ()
__signalConnections = None
__signalId = 0
def __ensureSignals(self):
if self.__signalConnections is None:
self.__signalConnections = {}
def connect(self, signalName, proc, *args, **kwargs):
self.__ensureSignals()
if signalName not in self.__signals__:
raise ValueError('Unknown signal for object of type %r: %s'
% (type(self), signalName))
sid = self.__signalId
self.__signalConnections[sid] = (signalName, proc, args, kwargs)
self.__signalId += 1
return sid
def disconnect(self, signalId):
self.__ensureSignals()
if signalId not in self.__signalConnections:
raise ValueError('Unknown signal ID: %s' % (signalId, ))
del self.__signalConnections[signalId]
# F0.8
def disconnect_by_func(self, func):
warnings.warn("Please call disconnectByFunction instead",
DeprecationWarning, stacklevel=2)
self.disconnectByFunction(func)
def disconnectByFunction(self, function):
self.__ensureSignals()
for signalId, conn in self.__signalConnections.items():
name, proc, args, kwargs = conn
if proc == function:
break
else:
raise ValueError(
'No signal connected to function: %r' % (function, ))
del self.__signalConnections[signalId]
def emit(self, signalName, *args):
self.__ensureSignals()
if signalName not in self.__signals__:
raise ValueError('Emitting unknown signal %s' % signalName)
connections = self.__signalConnections
for name, proc, pargs, pkwargs in connections.values():
if name == signalName:
try:
proc(self, *(args + pargs), **pkwargs)
except Exception, e:
log.warning("signalmixin", "Exception calling "
"signal handler %r: %s", proc,
log.getExceptionMessage(e))
| gpl-2.0 | -509,771,151,639,042,200 | 32.442105 | 74 | 0.629839 | false |
SimVascular/VTK | Filters/Modeling/Testing/Python/TestBoxFunction.py | 20 | 1707 | #!/usr/bin/env python
import vtk
from vtk.test import Testing
from vtk.util.misc import vtkGetDataRoot
VTK_DATA_ROOT = vtkGetDataRoot()
box = vtk.vtkBox()
box.SetXMin(0,2,4)
box.SetXMax(2,4,6)
sample = vtk.vtkSampleFunction()
sample.SetSampleDimensions(30,30,30)
sample.SetImplicitFunction(box)
sample.SetModelBounds(0,1.5,1,5,2,8)
sample.ComputeNormalsOn()
contours = vtk.vtkContourFilter()
contours.SetInputConnection(sample.GetOutputPort())
contours.GenerateValues(5,-0.5,1.5)
w = vtk.vtkPolyDataWriter()
w.SetInputConnection(contours.GetOutputPort())
w.SetFileName("junk.vtk")
#w Write
contMapper = vtk.vtkPolyDataMapper()
contMapper.SetInputConnection(contours.GetOutputPort())
contMapper.SetScalarRange(-0.5,1.5)
contActor = vtk.vtkActor()
contActor.SetMapper(contMapper)
# We'll put a simple outline around the data.
outline = vtk.vtkOutlineFilter()
outline.SetInputConnection(sample.GetOutputPort())
outlineMapper = vtk.vtkPolyDataMapper()
outlineMapper.SetInputConnection(outline.GetOutputPort())
outlineActor = vtk.vtkActor()
outlineActor.SetMapper(outlineMapper)
outlineActor.GetProperty().SetColor(0,0,0)
# The usual rendering stuff.
ren1 = vtk.vtkRenderer()
renWin = vtk.vtkRenderWindow()
renWin.SetMultiSamples(0)
renWin.AddRenderer(ren1)
renWin.SetSize(500,500)
iren = vtk.vtkRenderWindowInteractor()
iren.SetRenderWindow(renWin)
ren1.SetBackground(1,1,1)
ren1.AddActor(contActor)
ren1.AddActor(outlineActor)
camera = vtk.vtkCamera()
camera.SetClippingRange(6.31875,20.689)
camera.SetFocalPoint(0.75,3,5)
camera.SetPosition(9.07114,-4.10065,-1.38712)
camera.SetViewAngle(30)
camera.SetViewUp(-0.580577,-0.802756,0.13606)
ren1.SetActiveCamera(camera)
iren.Initialize()
# --- end of script --
| bsd-3-clause | 6,586,355,180,249,756,000 | 30.611111 | 57 | 0.797891 | false |
udp3f/gemini | gemini/gim.py | 3 | 20905 | #!/usr/bin/env python
from __future__ import absolute_import, print_function
import sys
from collections import Counter, defaultdict
from . import GeminiQuery
from . import sql_utils
try:
from compiler import compile
except ImportError:
basestring = str
from .gemini_constants import *
from .gemini_bcolz import filter, NoGTIndexException
from .gemini_utils import to_str, PY3
from .mendelianerror import mendelian_error
import itertools as it
import operator as op
from inheritance import Family
from unidecode import unidecode
class GeminiInheritanceModel(object):
required_columns = ("gene", "family_id", "family_members",
"family_genotypes", "samples", "family_count")
def __init__(self, args):
self.args = args
self.gq = GeminiQuery.GeminiQuery(args.db, include_gt_cols=True)
self.added = []
self.gt_cols = self.gq.gt_cols
if not args.columns:
args.columns = "*," + ", ".join(self.gt_cols)
self.set_family_info()
@property
def query(self):
if self.args.columns is not None:
# the user only wants to report a subset of the columns
query = "SELECT " + str(self.args.columns) + " FROM variants "
else:
# report the kitchen sink
query = "SELECT chrom, start, end, * %s " \
+ "FROM variants" % ", ".join(self.gt_cols)
query = sql_utils.ensure_columns(query, ['variant_id', 'gene'])
# add any non-genotype column limits to the where clause
if self.args.filter:
query += " WHERE (" + self.args.filter + ")"
if hasattr(self.args, 'X'):
if self.args.X == []:
self.args.X = ['chrX', 'X']
part = "chrom IN (%s)" % ", ".join("'%s'" % x for x in self.args.X)
if " WHERE " in query:
query += " AND " + part
else:
query += " WHERE " + part
# auto_rec and auto_dom candidates should be limited to
# variants affecting genes.
if self.model in ("auto_rec", "auto_dom", "comp_het") or \
(self.model == "de_novo" and self.args.min_kindreds is not None):
# we require the "gene" column for the auto_* tools
if " WHERE " in query:
query += " AND gene is not NULL"
else:
query += " WHERE gene is not NULL"
# only need to order by gene for comp_het and when min_kindreds is used.
if self.model == "comp_het" or not (
self.args.min_kindreds in (None, 1)
and (self.args.families is None
or not "," in self.args.families)):
query += " ORDER by chrom, gene"
return query
def bcolz_candidates(self):
"""
Get all the variant ids that meet the genotype filter for any fam.
"""
variant_ids = set()
try:
for i, family_id in enumerate(self.family_ids):
gt_filter = self.family_masks[i]
if isinstance(gt_filter, dict):
for k, flt in gt_filter.items():
variant_ids.update(filter(self.args.db, flt, {}))
else:
if gt_filter == 'False': continue
# TODO: maybe we should just or these together and call filter once.
variant_ids.update(filter(self.args.db, gt_filter, {}))
return sorted(set(variant_ids))
except NoGTIndexException:
return None
def gen_candidates(self, group_key):
if isinstance(group_key, basestring):
group_key = op.itemgetter(group_key)
q = self.query
vids = self.bcolz_candidates()
if vids is None:
self.gq.run(q, needs_genotypes=True)
elif len(vids) > 0:
q = GeminiQuery.add_variant_ids_to_query(q, vids)
self.gq.run(q, needs_genotypes=True)
else:
# no variants met the criteria
raise StopIteration
for grp_key, grp in it.groupby(self.gq, group_key):
yield grp_key, grp
def all_candidates(self):
_, candidates = self.gen_candidates(group_key=None)
for candidate in candidates:
yield candidate
def gene_candidates(self):
for gene, candidates in self.gen_candidates(group_key="gene"):
yield gene, candidates
def set_family_info(self):
"""
Extract the relevant genotype filters, as well all labels
for each family in the database.
"""
self.families = families = Family.from_cursor(self.gq.conn).values()
args = self.args
self.family_ids = []
self.family_masks = []
kwargs = {'only_affected': not getattr(self.args, "allow_unaffected", False),
'min_gq': args.min_gq}
if self.model == "mendel_violations":
kwargs = {'only_affected': self.args.only_affected}
if self.model != "comp_het" and self.model != "mendel_violations":
if hasattr(self.args, 'lenient'):
kwargs['strict'] = not self.args.lenient
elif self.model == "comp_het":
kwargs['pattern_only'] = self.args.pattern_only
if hasattr(self.args, 'gt_phred_ll'):
kwargs['gt_ll'] = self.args.gt_phred_ll
if self.model in ('x_rec', 'x_dom', 'x_denovo'):
kwargs.pop('only_affected')
requested_fams = None if not args.families else set(args.families.split(","))
for family in families:
if requested_fams is None or family.family_id in requested_fams:
# e.g. family.auto_rec(gt_ll, min_depth)
family_filter = getattr(family, self.model)(
min_depth=self.args.min_sample_depth,
**kwargs)
else:
family_filter = 'False'
self.family_masks.append(family_filter)
self.family_ids.append(family.family_id)
def report_candidates(self):
args = self.args
req_cols = ['gt_types', 'gts']
if args.min_sample_depth and self.args.min_sample_depth > 0:
req_cols.append('gt_depths')
if getattr(args, 'gt_phred_ll', False) or self.model == "mendel_violations":
for col in ['gt_phred_ll_homref', 'gt_phred_ll_het',
'gt_phred_ll_homalt']:
if col in self.gt_cols:
req_cols.append(col)
if args.min_gq != 0 and 'gt_quals' in self.gt_cols:
req_cols.append('gt_quals')
is_mendel = False
if any(isinstance(m, dict) for m in self.family_masks):
assert self.model == "mendel_violations"
is_mendel = True
masks = []
# mdict contains filter for 'de novo', 'LOH', etc.
for mdict in self.family_masks:
if isinstance(mdict, basestring):
masks.append(mdict)
continue
m = {}
for k, mask in mdict.items():
m[k] = 'False' if mask is None or mask.strip("(").strip(")") == 'False' else mask
if m[k] != 'False':
m[k] = compile(m[k], m[k], 'eval')
masks.append(m)
else:
# 1 mask per family
masks = ['False' if m is None or m is False or m.strip('(').strip(')') in
('empty', 'False') else m for m in self.family_masks]
masks = [compile(m, m, 'eval') if m != 'False' else 'False' for m in masks]
requested_fams = None if not args.families else set(args.families.split(","))
for gene, li in self.candidates():
kindreds = set()
to_yield = []
seen = set()
for row in li:
# comp_het sets a family_id that met the filter. so we use it
# for this check instead of checking all families.
cur_fam = row.print_fields.get('family_id')
for col in self.added:
try:
del row.print_fields[col]
except ValueError:
pass
cols = dict((col, row[col]) for col in req_cols)
fams_to_test = enumerate(self.families) if cur_fam is None \
else [(i, f) for i, f in enumerate(self.families) if f.family_id == cur_fam]
# limit to families requested by the user.
if requested_fams is not None:
fams_to_test = ((i, f) for i, f in fams_to_test if f.family_id in requested_fams)
fams, models = [], []
if is_mendel:
for i, f in fams_to_test:
mask_dict = masks[i]
for inh_model, mask in mask_dict.items():
if masks[i] != 'False' and eval(mask, cols):
if f in fams:
models[-1] += ";" + inh_model
else:
fams.append(f)
models.append(inh_model)
else:
fams = [f for i, f in fams_to_test
if masks[i] != 'False' and eval(masks[i], cols)]
kindreds.update(f.family_id for f in fams)
for fam in fams:
pdict = row.print_fields.copy()
kindreds.add(fam.family_id)
# get a *shallow* copy of the ordered dict.
# populate with the fields required by the tools.
pdict["family_id"] = fam.family_id
pdict["family_members"] = ",".join("%s" % m for m in fam.subjects)
if PY3:
pdict["family_genotypes"] = to_str(",".join(eval(str(to_str(s)), cols) for s in fam.gts))
else:
pdict["family_genotypes"] = ",".join(eval(str(s), cols) for s in fam.gts)
pdict["samples"] = ",".join(x.name or x.sample_id for x in fam.subjects if x.affected)
pdict["family_count"] = len(fams)
if is_mendel:
pdict["violation"] = ";".join(models)
# TODO: check args, may need fam.subjects_with_parent
pdict['samples'] = fam.affecteds_with_parent if args.only_affected else fam.samples_with_parent
vs = []
if all(c in self.gt_cols for c in ('gt_phred_ll_homref', 'gt_phred_ll_het', 'gt_phred_ll_homalt')):
pdict["violation_prob"] = ""
for s in pdict['samples']:
# mom, dad, kid
mdk = str(s.mom.genotype_lls + s.dad.genotype_lls + s.genotype_lls)
# get all 3 at once so we just do 1 eval.
vals = eval(mdk, cols)
vs.append(mendelian_error(vals[:3], vals[3:6], vals[6:], pls=True))
pdict["violation_prob"] = ",".join("%.5f" % v for v in vs if v is not None)
pdict['samples'] = ",".join(s.name or str(s.sample_id) for s in pdict['samples'])
s = str(pdict)
if s in seen:
continue
seen.add(s)
to_yield.append(pdict)
if 0 < len(kindreds) >= self.args.min_kindreds:
if 'comp_het_id' in to_yield[0]:
counts = Counter((item['comp_het_id'], item['family_id']) for item in to_yield)
# remove singletons.
to_yield = [item for item in to_yield if counts[(item['comp_het_id'], item['family_id'])] > 1]
# re-check min_kindreds
if len(set(item['family_id'] for item in to_yield)) < self.args.min_kindreds:
continue
for item in to_yield:
item['family_count'] = len(kindreds)
yield item
def run(self):
has_gts = False
from .gemini_bcolz import gt_cols_types
for i, s in enumerate(self.report_candidates()):
if i == 0:
has_gts = [x[0] for x in gt_cols_types if x[0] in s] or False
print("\t".join(s.keys()))
if has_gts:
for col in has_gts:
s[col] = str(s[col]).replace('\n', '')
try:
print("\t".join(map(str, s.values())))
except UnicodeEncodeError:
vals = []
for v in s.values():
if isinstance(v, unicode):
vals.append(unidecode(v))
else:
vals.append(str(v))
print("\t".join(vals))
class XRec(GeminiInheritanceModel):
model = "x_rec"
def candidates(self):
for g, li in self.gen_candidates('gene'):
yield g, li
class XDenovo(XRec):
model = "x_denovo"
class XDom(XRec):
model = "x_dom"
class AutoDom(GeminiInheritanceModel):
model = "auto_dom"
def candidates(self):
for g, li in self.gen_candidates('gene'):
yield g, li
class AutoRec(AutoDom):
model = "auto_rec"
class DeNovo(GeminiInheritanceModel):
model = "de_novo"
def candidates(self):
kins = self.args.min_kindreds
for g, li in self.gen_candidates('gene' if kins is not None else None):
yield g, li
class MendelViolations(GeminiInheritanceModel):
model = "mendel_violations"
def candidates(self):
for g, li in self.gen_candidates(None):
yield g, li
class CompoundHet(GeminiInheritanceModel):
model = "comp_het"
@property
def query(self):
args = self.args
if args.columns is not None:
custom_columns = self._add_necessary_columns(str(args.columns))
query = "SELECT " + custom_columns + \
" FROM variants " + \
" WHERE (%s) " % args.where
else:
# report the kitchen sink
query = "SELECT *" + \
", gts, gt_types, gt_phases, gt_depths, \
gt_ref_depths, gt_alt_depths, gt_quals" + \
" FROM variants " + \
" WHERE (%s) " % args.where
if args.filter: query += " AND (" + args.filter + ")"
# we need to order results by gene so that we can sweep through the results
return query + " ORDER BY chrom, gene"
def _add_necessary_columns(self, custom_columns):
"""
Convenience function to tack on columns that are necessary for
the functionality of the tool but yet have not been specifically
requested by the user.
"""
# we need to add the variant's chrom, start and gene if
# not already there.
custom_columns = [x.strip() for x in custom_columns.split(",")]
if "*" in custom_columns:
return ",".join(custom_columns)
self.added = []
for col in ("gene", "chrom", "start", "ref", "alt", "variant_id"):
if not col in custom_columns:
custom_columns.append(col)
if col != "variant_id":
self.added.append(col)
return ",".join(custom_columns)
def filter_candidates(self, candidates,
comp_het_counter=[0]):
"""
Refine candidate heterozygote pairs based on user's filters.
"""
args = self.args
# once we are in here, we know that we have a single gene.
requested_fams = None if args.families is None else set(args.families.split(","))
for idx, comp_het in enumerate(candidates):
comp_het_counter[0] += 1
for fam_ch in candidates[comp_het]:
if fam_ch['priority'] > args.max_priority:
continue
# when to use affected_unphased?
for subject in (fam_ch['candidates'] if args.pattern_only else fam_ch['affected_phased'] + fam_ch['affected_unphased']):
family_id = subject.family_id
if requested_fams is not None and not family_id in requested_fams:
continue
ch_id = str(comp_het_counter[0])
cid = "%s_%d_%d" % (ch_id, comp_het[0].row['variant_id'],
comp_het[1].row['variant_id'])
for i in (0, 1):
pdict = comp_het[i].row.print_fields.copy()
# set these to keep order in the ordered dict.
pdict["family_id"] = family_id
pdict["family_members"] = None
pdict["family_genotypes"] = None
pdict["samples"] = None
pdict["family_count"] = None
pdict["comp_het_id"] = cid
pdict['priority'] = fam_ch['priority']
comp_het[i].row.print_fields = pdict
# TODO: check this yield.
yield comp_het[i].row
def candidates(self):
args = self.args
self.gq._connect_to_database()
fams = self.fams = Family.from_cursor(self.gq.conn)
if args.families:
fams = {f: fam for f, fam in fams.items() if f in set(args.families.split(","))}
for grp, li in self.gen_candidates('gene'):
samples_w_hetpair = defaultdict(list)
sites, strs = [], []
for row in li:
gt_types, gt_bases, gt_phases = row['gt_types'], row['gts'], row['gt_phases']
site = Site(row)
site.gt_phases, site.gt_bases, site.gt_types = gt_phases, gt_bases, gt_types
sites.append((str(site), site))
for family_id, fam in fams.items():
# if a site has been deemed "impossible", we store and then
# skip it to avoid compuational overhead on it multiple times.
impossible_sites = {}
for i, (strsite1, site1) in enumerate(sites[:-1], start=1):
if strsite1 in impossible_sites:
continue
for (strsite2, site2) in sites[i:]:
if strsite2 in impossible_sites:
continue
ch = fam.comp_het_pair(site1.gt_types, site1.gt_bases,
site2.gt_types, site2.gt_bases,
site1.gt_phases, site2.gt_phases,
ref1=site1.row['ref'],
alt1=site1.row['alt'],
ref2=site2.row['ref'],
alt2=site2.row['alt'],
allow_unaffected=args.allow_unaffected,
fast_mode=True,
pattern_only=args.pattern_only)
if ch.get('impossible') == 'site1':
impossible_sites[strsite1] = True
break
if ch.get('impossible') == 'site2':
impossible_sites[strsite2] = True
if not ch['candidate']: continue
samples_w_hetpair[(site1, site2)].append(ch)
yield grp, self.filter_candidates(samples_w_hetpair)
class Site(object):
__slots__ = ('row', 'gt_phases', 'gt_bases', 'gt_types')
def __init__(self, row):
self.row = row
self.gt_phases = None
self.gt_bases = None
self.gt_types = None
def __eq__(self, other):
return self.row['chrom'] == other.row['chrom'] and \
self.row['start'] == other.row['start']
def __str__(self):
return ",".join((self.row['chrom'],
str(self.row['start']),
#str(self.row['end']),
self.row['ref'],
self.row['alt']))
__repr__ = __str__
def __hash__(self):
"hash the site based on chrom+start"
return sum(ord(c) for c in self.row['chrom']) + int(self.row['start'])
| mit | 1,041,871,655,229,049,500 | 38.667932 | 136 | 0.492131 | false |
mano3m/CouchPotatoServer | libs/pyasn1/type/univ.py | 20 | 39555 | # ASN.1 "universal" data types
import operator, sys
from pyasn1.type import base, tag, constraint, namedtype, namedval, tagmap
from pyasn1.codec.ber import eoo
from pyasn1.compat import octets
from pyasn1 import error
# "Simple" ASN.1 types (yet incomplete)
class Integer(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x02)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def __and__(self, value): return self.clone(self._value & value)
def __rand__(self, value): return self.clone(value & self._value)
def __or__(self, value): return self.clone(self._value | value)
def __ror__(self, value): return self.clone(value | self._value)
def __xor__(self, value): return self.clone(self._value ^ value)
def __rxor__(self, value): return self.clone(value ^ self._value)
def __lshift__(self, value): return self.clone(self._value << value)
def __rshift__(self, value): return self.clone(self._value >> value)
def __add__(self, value): return self.clone(self._value + value)
def __radd__(self, value): return self.clone(value + self._value)
def __sub__(self, value): return self.clone(self._value - value)
def __rsub__(self, value): return self.clone(value - self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self.clone(value * self._value)
def __mod__(self, value): return self.clone(self._value % value)
def __rmod__(self, value): return self.clone(value % self._value)
def __pow__(self, value, modulo=None): return self.clone(pow(self._value, value, modulo))
def __rpow__(self, value): return self.clone(pow(value, self._value))
if sys.version_info[0] <= 2:
def __div__(self, value): return self.clone(self._value // value)
def __rdiv__(self, value): return self.clone(value // self._value)
else:
def __truediv__(self, value): return self.clone(self._value / value)
def __rtruediv__(self, value): return self.clone(value / self._value)
def __divmod__(self, value): return self.clone(self._value // value)
def __rdivmod__(self, value): return self.clone(value // self._value)
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __int__(self): return int(self._value)
if sys.version_info[0] <= 2:
def __long__(self): return long(self._value)
def __float__(self): return float(self._value)
def __abs__(self): return abs(self._value)
def __index__(self): return int(self._value)
def __lt__(self, value): return self._value < value
def __le__(self, value): return self._value <= value
def __eq__(self, value): return self._value == value
def __ne__(self, value): return self._value != value
def __gt__(self, value): return self._value > value
def __ge__(self, value): return self._value >= value
def prettyIn(self, value):
if not isinstance(value, str):
return int(value)
r = self.__namedValues.getValue(value)
if r is not None:
return r
try:
return int(value)
except ValueError:
raise error.PyAsn1Error(
'Can\'t coerce %s into integer: %s' % (value, sys.exc_info()[1])
)
def prettyOut(self, value):
r = self.__namedValues.getName(value)
return r is None and str(value) or repr(r)
def getNamedValues(self): return self.__namedValues
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
class Boolean(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x01),
)
subtypeSpec = Integer.subtypeSpec+constraint.SingleValueConstraint(0,1)
namedValues = Integer.namedValues.clone(('False', 0), ('True', 1))
class BitString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x03)
)
namedValues = namedval.NamedValues()
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if namedValues is None:
self.__namedValues = self.namedValues
else:
self.__namedValues = namedValues
base.AbstractSimpleAsn1Item.__init__(
self, value, tagSet, subtypeSpec
)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
namedValues=None):
if value is None and tagSet is None and subtypeSpec is None \
and namedValues is None:
return self
if value is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def subtype(self, value=None, implicitTag=None, explicitTag=None,
subtypeSpec=None, namedValues=None):
if value is None:
value = self._value
if implicitTag is not None:
tagSet = self._tagSet.tagImplicitly(implicitTag)
elif explicitTag is not None:
tagSet = self._tagSet.tagExplicitly(explicitTag)
else:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
else:
subtypeSpec = subtypeSpec + self._subtypeSpec
if namedValues is None:
namedValues = self.__namedValues
else:
namedValues = namedValues + self.__namedValues
return self.__class__(value, tagSet, subtypeSpec, namedValues)
def __str__(self): return str(tuple(self))
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value): return self.clone(self._value + value)
def __radd__(self, value): return self.clone(value + self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self * value
def prettyIn(self, value):
r = []
if not value:
return ()
elif isinstance(value, str):
if value[0] == '\'':
if value[-2:] == '\'B':
for v in value[1:-2]:
if v == '0':
r.append(0)
elif v == '1':
r.append(1)
else:
raise error.PyAsn1Error(
'Non-binary BIT STRING initializer %s' % (v,)
)
return tuple(r)
elif value[-2:] == '\'H':
for v in value[1:-2]:
i = 4
v = int(v, 16)
while i:
i = i - 1
r.append((v>>i)&0x01)
return tuple(r)
else:
raise error.PyAsn1Error(
'Bad BIT STRING value notation %s' % value
)
else:
for i in value.split(','):
j = self.__namedValues.getValue(i)
if j is None:
raise error.PyAsn1Error(
'Unknown bit identifier \'%s\'' % i
)
if j >= len(r):
r.extend([0]*(j-len(r)+1))
r[j] = 1
return tuple(r)
elif isinstance(value, (tuple, list)):
r = tuple(value)
for b in r:
if b and b != 1:
raise error.PyAsn1Error(
'Non-binary BitString initializer \'%s\'' % (r,)
)
return r
elif isinstance(value, BitString):
return tuple(value)
else:
raise error.PyAsn1Error(
'Bad BitString initializer type \'%s\'' % (value,)
)
def prettyOut(self, value):
return '\"\'%s\'B\"' % ''.join([str(x) for x in value])
class OctetString(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x04)
)
defaultBinValue = defaultHexValue = base.noValue
encoding = 'us-ascii'
def __init__(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if encoding is None:
self._encoding = self.encoding
else:
self._encoding = encoding
if binValue is not None:
value = self.fromBinaryString(binValue)
if hexValue is not None:
value = self.fromHexString(hexValue)
if value is None or value is base.noValue:
value = self.defaultHexValue
if value is None or value is base.noValue:
value = self.defaultBinValue
self.__intValue = None
base.AbstractSimpleAsn1Item.__init__(self, value, tagSet, subtypeSpec)
def clone(self, value=None, tagSet=None, subtypeSpec=None,
encoding=None, binValue=None, hexValue=None):
if value is None and tagSet is None and subtypeSpec is None and \
encoding is None and binValue is None and hexValue is None:
return self
if value is None and binValue is None and hexValue is None:
value = self._value
if tagSet is None:
tagSet = self._tagSet
if subtypeSpec is None:
subtypeSpec = self._subtypeSpec
if encoding is None:
encoding = self._encoding
return self.__class__(
value, tagSet, subtypeSpec, encoding, binValue, hexValue
)
if sys.version_info[0] <= 2:
def prettyIn(self, value):
if isinstance(value, str):
return value
elif isinstance(value, (tuple, list)):
try:
return ''.join([ chr(x) for x in value ])
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
return str(value)
else:
def prettyIn(self, value):
if isinstance(value, bytes):
return value
elif isinstance(value, OctetString):
return value.asOctets()
elif isinstance(value, (tuple, list, map)):
try:
return bytes(value)
except ValueError:
raise error.PyAsn1Error(
'Bad OctetString initializer \'%s\'' % (value,)
)
else:
try:
return str(value).encode(self._encoding)
except UnicodeEncodeError:
raise error.PyAsn1Error(
'Can\'t encode string \'%s\' with \'%s\' codec' % (value, self._encoding)
)
def fromBinaryString(self, value):
bitNo = 8; byte = 0; r = ()
for v in value:
if bitNo:
bitNo = bitNo - 1
else:
bitNo = 7
r = r + (byte,)
byte = 0
if v == '0':
v = 0
elif v == '1':
v = 1
else:
raise error.PyAsn1Error(
'Non-binary OCTET STRING initializer %s' % (v,)
)
byte = byte | (v << bitNo)
return octets.ints2octs(r + (byte,))
def fromHexString(self, value):
r = p = ()
for v in value:
if p:
r = r + (int(p+v, 16),)
p = ()
else:
p = v
if p:
r = r + (int(p+'0', 16),)
return octets.ints2octs(r)
def prettyOut(self, value):
if sys.version_info[0] <= 2:
numbers = tuple([ ord(x) for x in value ])
else:
numbers = tuple(value)
if [ x for x in numbers if x < 32 or x > 126 ]:
return '0x' + ''.join([ '%.2x' % x for x in numbers ])
else:
return str(value)
def __repr__(self):
if self._value is base.noValue:
return self.__class__.__name__ + '()'
if [ x for x in self.asNumbers() if x < 32 or x > 126 ]:
return self.__class__.__name__ + '(hexValue=\'' + ''.join([ '%.2x' % x for x in self.asNumbers() ])+'\')'
else:
return self.__class__.__name__ + '(\'' + self.prettyOut(self._value) + '\')'
if sys.version_info[0] <= 2:
def __str__(self): return str(self._value)
def __unicode__(self):
return self._value.decode(self._encoding, 'ignore')
def asOctets(self): return self._value
def asNumbers(self):
if self.__intValue is None:
self.__intValue = tuple([ ord(x) for x in self._value ])
return self.__intValue
else:
def __str__(self): return self._value.decode(self._encoding, 'ignore')
def __bytes__(self): return self._value
def asOctets(self): return self._value
def asNumbers(self):
if self.__intValue is None:
self.__intValue = tuple(self._value)
return self.__intValue
# Immutable sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(operator.getitem(self._value, i))
else:
return self._value[i]
def __add__(self, value): return self.clone(self._value + self.prettyIn(value))
def __radd__(self, value): return self.clone(self.prettyIn(value) + self._value)
def __mul__(self, value): return self.clone(self._value * value)
def __rmul__(self, value): return self * value
class Null(OctetString):
defaultValue = ''.encode() # This is tightly constrained
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x05)
)
subtypeSpec = OctetString.subtypeSpec+constraint.SingleValueConstraint(''.encode())
if sys.version_info[0] <= 2:
intTypes = (int, long)
else:
intTypes = int
class ObjectIdentifier(base.AbstractSimpleAsn1Item):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x06)
)
def __add__(self, other): return self.clone(self._value + other)
def __radd__(self, other): return self.clone(other + self._value)
def asTuple(self): return self._value
# Sequence object protocol
def __len__(self):
if self._len is None:
self._len = len(self._value)
return self._len
def __getitem__(self, i):
if isinstance(i, slice):
return self.clone(
operator.getitem(self._value, i)
)
else:
return self._value[i]
def __str__(self): return self.prettyPrint()
def index(self, suboid): return self._value.index(suboid)
def isPrefixOf(self, value):
"""Returns true if argument OID resides deeper in the OID tree"""
l = len(self)
if l <= len(value):
if self._value[:l] == value[:l]:
return 1
return 0
def prettyIn(self, value):
"""Dotted -> tuple of numerics OID converter"""
if isinstance(value, tuple):
pass
elif isinstance(value, ObjectIdentifier):
return tuple(value)
elif isinstance(value, str):
r = []
for element in [ x for x in value.split('.') if x != '' ]:
try:
r.append(int(element, 0))
except ValueError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__, sys.exc_info()[1])
)
value = tuple(r)
else:
try:
value = tuple(value)
except TypeError:
raise error.PyAsn1Error(
'Malformed Object ID %s at %s: %s' %
(str(value), self.__class__.__name__,sys.exc_info()[1])
)
for x in value:
if not isinstance(x, intTypes) or x < 0:
raise error.PyAsn1Error(
'Invalid sub-ID in %s at %s' % (value, self.__class__.__name__)
)
return value
def prettyOut(self, value): return '.'.join([ str(x) for x in value ])
class Real(base.AbstractSimpleAsn1Item):
try:
_plusInf = float('inf')
_minusInf = float('-inf')
_inf = (_plusInf, _minusInf)
except ValueError:
# Infinity support is platform and Python dependent
_plusInf = _minusInf = None
_inf = ()
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x09)
)
def __normalizeBase10(self, value):
m, b, e = value
while m and m % 10 == 0:
m = m / 10
e = e + 1
return m, b, e
def prettyIn(self, value):
if isinstance(value, tuple) and len(value) == 3:
for d in value:
if not isinstance(d, intTypes):
raise error.PyAsn1Error(
'Lame Real value syntax: %s' % (value,)
)
if value[1] not in (2, 10):
raise error.PyAsn1Error(
'Prohibited base for Real value: %s' % value[1]
)
if value[1] == 10:
value = self.__normalizeBase10(value)
return value
elif isinstance(value, intTypes):
return self.__normalizeBase10((value, 10, 0))
elif isinstance(value, float):
if self._inf and value in self._inf:
return value
else:
e = 0
while int(value) != value:
value = value * 10
e = e - 1
return self.__normalizeBase10((int(value), 10, e))
elif isinstance(value, Real):
return tuple(value)
elif isinstance(value, str): # handle infinite literal
try:
return float(value)
except ValueError:
pass
raise error.PyAsn1Error(
'Bad real value syntax: %s' % (value,)
)
def prettyOut(self, value):
if value in self._inf:
return '\'%s\'' % value
else:
return str(value)
def isPlusInfinity(self): return self._value == self._plusInf
def isMinusInfinity(self): return self._value == self._minusInf
def isInfinity(self): return self._value in self._inf
def __str__(self): return str(float(self))
def __add__(self, value): return self.clone(float(self) + value)
def __radd__(self, value): return self + value
def __mul__(self, value): return self.clone(float(self) * value)
def __rmul__(self, value): return self * value
def __sub__(self, value): return self.clone(float(self) - value)
def __rsub__(self, value): return self.clone(value - float(self))
def __mod__(self, value): return self.clone(float(self) % value)
def __rmod__(self, value): return self.clone(value % float(self))
def __pow__(self, value, modulo=None): return self.clone(pow(float(self), value, modulo))
def __rpow__(self, value): return self.clone(pow(value, float(self)))
if sys.version_info[0] <= 2:
def __div__(self, value): return self.clone(float(self) / value)
def __rdiv__(self, value): return self.clone(value / float(self))
else:
def __truediv__(self, value): return self.clone(float(self) / value)
def __rtruediv__(self, value): return self.clone(value / float(self))
def __divmod__(self, value): return self.clone(float(self) // value)
def __rdivmod__(self, value): return self.clone(value // float(self))
def __int__(self): return int(float(self))
if sys.version_info[0] <= 2:
def __long__(self): return long(float(self))
def __float__(self):
if self._value in self._inf:
return self._value
else:
return float(
self._value[0] * pow(self._value[1], self._value[2])
)
def __abs__(self): return abs(float(self))
def __lt__(self, value): return float(self) < value
def __le__(self, value): return float(self) <= value
def __eq__(self, value): return float(self) == value
def __ne__(self, value): return float(self) != value
def __gt__(self, value): return float(self) > value
def __ge__(self, value): return float(self) >= value
if sys.version_info[0] <= 2:
def __nonzero__(self): return bool(float(self))
else:
def __bool__(self): return bool(float(self))
__hash__ = base.AbstractSimpleAsn1Item.__hash__
def __getitem__(self, idx):
if self._value in self._inf:
raise error.PyAsn1Error('Invalid infinite value operation')
else:
return self._value[idx]
class Enumerated(Integer):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatSimple, 0x0A)
)
# "Structured" ASN.1 types
class SetOf(base.AbstractConstructedAsn1Item):
componentType = None
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 1
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0; l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
if self._componentType is not None and \
not self._componentType.isSuperTypeOf(value):
raise error.PyAsn1Error('Component type error %s' % value)
def getComponentByPosition(self, idx): return self._componentValues[idx]
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if value is None:
if self._componentValues[idx] is None:
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
self._componentValues[idx] = self._componentType.clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
if self._componentType is None:
raise error.PyAsn1Error('Component type not defined')
if isinstance(self._componentType, base.AbstractSimpleAsn1Item):
value = self._componentType.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentType is not None:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getComponentTagMap(self):
if self._componentType is not None:
return self._componentType.getTagMap()
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
r = r + ' '*scope
if self._componentValues[idx] is None:
r = r + '<empty>'
else:
r = r + self._componentValues[idx].prettyPrint(scope)
return r
class SequenceOf(SetOf):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 2
class SequenceAndSetBase(base.AbstractConstructedAsn1Item):
componentType = namedtype.NamedTypes()
def __init__(self, componentType=None, tagSet=None,
subtypeSpec=None, sizeSpec=None):
base.AbstractConstructedAsn1Item.__init__(
self, componentType, tagSet, subtypeSpec, sizeSpec
)
if self._componentType is None:
self._componentTypeLen = 0
else:
self._componentTypeLen = len(self._componentType)
def __getitem__(self, idx):
if isinstance(idx, str):
return self.getComponentByName(idx)
else:
return base.AbstractConstructedAsn1Item.__getitem__(self, idx)
def __setitem__(self, idx, value):
if isinstance(idx, str):
self.setComponentByName(idx, value)
else:
base.AbstractConstructedAsn1Item.__setitem__(self, idx, value)
def _cloneComponentValues(self, myClone, cloneValueFlag):
idx = 0; l = len(self._componentValues)
while idx < l:
c = self._componentValues[idx]
if c is not None:
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByPosition(
idx, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByPosition(idx, c.clone())
idx = idx + 1
def _verifyComponent(self, idx, value):
if idx >= self._componentTypeLen:
raise error.PyAsn1Error(
'Component type error out of range'
)
t = self._componentType[idx].getType()
if not t.isSuperTypeOf(value):
raise error.PyAsn1Error('Component type error %r vs %r' % (t, value))
def getComponentByName(self, name):
return self.getComponentByPosition(
self._componentType.getPositionByName(name)
)
def setComponentByName(self, name, value=None, verifyConstraints=True):
return self.setComponentByPosition(
self._componentType.getPositionByName(name), value,
verifyConstraints
)
def getComponentByPosition(self, idx):
try:
return self._componentValues[idx]
except IndexError:
if idx < self._componentTypeLen:
return
raise
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = self._componentValuesSet + 1
return self
elif not isinstance(value, base.Asn1Item):
t = self._componentType.getTypeByPosition(idx)
if isinstance(t, base.AbstractSimpleAsn1Item):
value = t.clone(value=value)
else:
raise error.PyAsn1Error('Instance value required')
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
if self._componentValues[idx] is None:
self._componentValuesSet = self._componentValuesSet + 1
self._componentValues[idx] = value
return self
def getNameByPosition(self, idx):
if self._componentTypeLen:
return self._componentType.getNameByPosition(idx)
def getDefaultComponentByPosition(self, idx):
if self._componentTypeLen and self._componentType[idx].isDefaulted:
return self._componentType[idx].getType()
def getComponentType(self):
if self._componentTypeLen:
return self._componentType
def setDefaultComponents(self):
if self._componentTypeLen == self._componentValuesSet:
return
idx = self._componentTypeLen
while idx:
idx = idx - 1
if self._componentType[idx].isDefaulted:
if self.getComponentByPosition(idx) is None:
self.setComponentByPosition(idx)
elif not self._componentType[idx].isOptional:
if self.getComponentByPosition(idx) is None:
raise error.PyAsn1Error(
'Uninitialized component #%s at %r' % (idx, self)
)
def prettyPrint(self, scope=0):
scope = scope + 1
r = self.__class__.__name__ + ':\n'
for idx in range(len(self._componentValues)):
if self._componentValues[idx] is not None:
r = r + ' '*scope
componentType = self.getComponentType()
if componentType is None:
r = r + '<no-name>'
else:
r = r + componentType.getNameByPosition(idx)
r = '%s=%s\n' % (
r, self._componentValues[idx].prettyPrint(scope)
)
return r
class Sequence(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x10)
)
typeId = 3
def getComponentTagMapNearPosition(self, idx):
if self._componentType:
return self._componentType.getTagMapNearPosition(idx)
def getComponentPositionNearType(self, tagSet, idx):
if self._componentType:
return self._componentType.getPositionNearType(tagSet, idx)
else:
return idx
class Set(SequenceAndSetBase):
tagSet = baseTagSet = tag.initTagSet(
tag.Tag(tag.tagClassUniversal, tag.tagFormatConstructed, 0x11)
)
typeId = 4
def getComponent(self, innerFlag=0): return self
def getComponentByType(self, tagSet, innerFlag=0):
c = self.getComponentByPosition(
self._componentType.getPositionByType(tagSet)
)
if innerFlag and isinstance(c, Set):
# get inner component by inner tagSet
return c.getComponent(1)
else:
# get outer component by inner tagSet
return c
def setComponentByType(self, tagSet, value=None, innerFlag=0,
verifyConstraints=True):
idx = self._componentType.getPositionByType(tagSet)
t = self._componentType.getTypeByPosition(idx)
if innerFlag: # set inner component by inner tagSet
if t.getTagSet():
return self.setComponentByPosition(
idx, value, verifyConstraints
)
else:
t = self.setComponentByPosition(idx).getComponentByPosition(idx)
return t.setComponentByType(
tagSet, value, innerFlag, verifyConstraints
)
else: # set outer component by inner tagSet
return self.setComponentByPosition(
idx, value, verifyConstraints
)
def getComponentTagMap(self):
if self._componentType:
return self._componentType.getTagMap(True)
def getComponentPositionByType(self, tagSet):
if self._componentType:
return self._componentType.getPositionByType(tagSet)
class Choice(Set):
tagSet = baseTagSet = tag.TagSet() # untagged
sizeSpec = constraint.ConstraintsIntersection(
constraint.ValueSizeConstraint(1, 1)
)
typeId = 5
_currentIdx = None
def __eq__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] == other
return NotImplemented
def __ne__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] != other
return NotImplemented
def __lt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] < other
return NotImplemented
def __le__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] <= other
return NotImplemented
def __gt__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] > other
return NotImplemented
def __ge__(self, other):
if self._componentValues:
return self._componentValues[self._currentIdx] >= other
return NotImplemented
if sys.version_info[0] <= 2:
def __nonzero__(self, other): return bool(self._componentValues)
else:
def __bool__(self, other): return bool(self._componentValues)
def __len__(self): return self._currentIdx is not None and 1 or 0
def verifySizeSpec(self):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
self._sizeSpec(' ')
def _cloneComponentValues(self, myClone, cloneValueFlag):
try:
c = self.getComponent()
except error.PyAsn1Error:
pass
else:
if isinstance(c, Choice):
tagSet = c.getEffectiveTagSet()
else:
tagSet = c.getTagSet()
if isinstance(c, base.AbstractConstructedAsn1Item):
myClone.setComponentByType(
tagSet, c.clone(cloneValueFlag=cloneValueFlag)
)
else:
myClone.setComponentByType(tagSet, c.clone())
def setComponentByPosition(self, idx, value=None, verifyConstraints=True):
l = len(self._componentValues)
if idx >= l:
self._componentValues = self._componentValues + (idx-l+1)*[None]
if self._currentIdx is not None:
self._componentValues[self._currentIdx] = None
if value is None:
if self._componentValues[idx] is None:
self._componentValues[idx] = self._componentType.getTypeByPosition(idx).clone()
self._componentValuesSet = 1
self._currentIdx = idx
return self
elif not isinstance(value, base.Asn1Item):
value = self._componentType.getTypeByPosition(idx).clone(
value=value
)
if verifyConstraints:
if self._componentTypeLen:
self._verifyComponent(idx, value)
self._verifySubtypeSpec(value, idx)
self._componentValues[idx] = value
self._currentIdx = idx
self._componentValuesSet = 1
return self
def getMinTagSet(self):
if self._tagSet:
return self._tagSet
else:
return self._componentType.genMinTagSet()
def getEffectiveTagSet(self):
if self._tagSet:
return self._tagSet
else:
c = self.getComponent()
if isinstance(c, Choice):
return c.getEffectiveTagSet()
else:
return c.getTagSet()
def getTagMap(self):
if self._tagSet:
return Set.getTagMap(self)
else:
return Set.getComponentTagMap(self)
def getComponent(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
c = self._componentValues[self._currentIdx]
if innerFlag and isinstance(c, Choice):
return c.getComponent(innerFlag)
else:
return c
def getName(self, innerFlag=0):
if self._currentIdx is None:
raise error.PyAsn1Error('Component not chosen')
else:
if innerFlag:
c = self._componentValues[self._currentIdx]
if isinstance(c, Choice):
return c.getName(innerFlag)
return self._componentType.getNameByPosition(self._currentIdx)
def setDefaultComponents(self): pass
class Any(OctetString):
tagSet = baseTagSet = tag.TagSet() # untagged
typeId = 6
def getTagMap(self):
return tagmap.TagMap(
{ self.getTagSet(): self },
{ eoo.endOfOctets.getTagSet(): eoo.endOfOctets },
self
)
# XXX
# coercion rules?
| gpl-3.0 | -8,862,421,050,925,229,000 | 37.143684 | 117 | 0.551182 | false |
vrbagalkote/avocado-misc-tests-1 | generic/nstress.py | 1 | 2316 | #!/usr/bin/env python
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.
#
# See LICENSE for more details.
#
# Copyright: 2017 IBM
# Author: Pavithra <[email protected]>
import os
from avocado import Test
from avocado import main
from avocado.utils import distro, archive, process
class NStress(Test):
is_fail = 0
def run_cmd(self, cmd):
cmd_result = process.run(cmd, ignore_status=True, sudo=True,
shell=True)
if cmd_result.exit_status != 0:
self.log.info("%s test failed" % cmd)
self.is_fail += 1
return
def setUp(self):
dist = distro.detect()
if dist.name == 'Ubuntu':
tar_ball = self.params.get('tar_ball_ubuntu', default='nstress_Ubuntu1410_ppc64_Nov_2015.tar')
elif dist.name == 'rhel':
tar_ball = self.params.get('tar_ball_rhel', default='nstress_RHEL71_LE_ppc64_Nov_2015.tar')
elif dist.name == 'SuSE':
tar_ball = self.params.get('tar_ball_sles', default='nstress_SLES12_ppc64_Nov_2015.tar')
url = os.path.join('http://public.dhe.ibm.com/systems/power/community/wikifiles/PerfTools/',
tar_ball)
tarball = self.fetch_asset(url, expire='10d')
archive.extract(tarball, self.srcdir)
self.duration = self.params.get('duration', default=300)
def test(self):
os.chdir(self.srcdir)
self.run_cmd("./nmem -m 250 -s %s" % self.duration)
self.run_cmd("./nmem64 -m 2047 -s %s" % self.duration)
if self.is_fail >= 1:
self.fail("nstress test failed")
''' ncpu retrun code is 1 even after successful completion'''
ncpu_result = process.run("./ncpu -p 255 -s %s" % self.duration, ignore_status=True, sudo=True)
if ncpu_result.exit_status != 1:
self.log.info("ncpu test failed")
if __name__ == "__main__":
main()
| gpl-2.0 | -7,630,319,848,840,054,000 | 36.354839 | 106 | 0.62867 | false |
ruschelp/cortex-vfx | test/IECoreHoudini/ops/objectDebug/objectDebug-1.py | 12 | 1247 | from IECore import *
class objectDebug( Op ) :
def __init__( self ) :
Op.__init__( self,
"Op that prints out debug information about an object parameter.",
ObjectParameter(
name = "result",
description = "A pass through of the input object parameter.",
defaultValue = NullObject(),
type = TypeId.Object
)
)
self.parameters().addParameter(
ObjectParameter(
name = "input",
description = "The input object.",
defaultValue = NullObject(),
type = TypeId.Object
)
)
self.parameters().addParameter(
BoolParameter(
name = "quiet",
description = "Silences the debug output.",
defaultValue = False
)
)
def doOperation( self, args ) :
object = args['input']
quiet = args['quiet'].value
if not quiet:
# Print the objects name and its str() representation
print object.typeName(), object
# For meshes & points we can print out more verbose information
if object.typeId()==TypeId.MeshPrimitive or object.typeId()==TypeId.PointsPrimitive:
for k in object.keys():
primvar = object[k]
print "[%s]" % k, primvar.interpolation, primvar.data.typeName()
print "\t", primvar.data
return object
registerRunTimeTyped( objectDebug )
| bsd-3-clause | -1,610,881,850,505,618,000 | 22.980769 | 87 | 0.655172 | false |
zhounanshu/Flask-AppBuilder | examples/quickimages/app/views.py | 3 | 3138 | from models import Person, PersonGroup
from flask.ext.appbuilder.views import ModelView, BaseView
from flask.ext.appbuilder.charts.views import GroupByChartView
from flask.ext.appbuilder.models.group import aggregate_count
from flask.ext.appbuilder.models.sqla.interface import SQLAInterface
from flask.ext.appbuilder.widgets import ListThumbnail
from app import app, db, appbuilder
class PersonModelView(ModelView):
datamodel = SQLAInterface(Person, db.session)
list_title = 'List Contacts'
show_title = 'Show Contact'
add_title = 'Add Contact'
edit_title = 'Edit Contact'
#list_widget = ListThumbnail
label_columns = {'person_group_id': 'Group', 'photo_img': 'Photo', 'photo_img_thumbnail': 'Photo'}
list_columns = ['photo_img_thumbnail', 'name', 'personal_celphone', 'business_celphone', 'birthday', 'person_group']
show_fieldsets = [
('Summary', {'fields': ['photo_img', 'name', 'address', 'person_group']}),
('Personal Info',
{'fields': ['birthday', 'personal_phone', 'personal_celphone', 'personal_email'], 'expanded': False}),
('Professional Info',
{'fields': ['business_function', 'business_phone', 'business_celphone', 'business_email'], 'expanded': False}),
('Extra', {'fields': ['notes'], 'expanded': False}),
]
add_fieldsets = [
('Summary', {'fields': ['name', 'photo', 'address', 'person_group']}),
('Personal Info',
{'fields': ['birthday', 'personal_phone', 'personal_celphone', 'personal_email'], 'expanded': False}),
('Professional Info',
{'fields': ['business_function', 'business_phone', 'business_celphone', 'business_email'], 'expanded': False}),
('Extra', {'fields': ['notes'], 'expanded': False}),
]
edit_fieldsets = [
('Summary', {'fields': ['name', 'photo', 'address', 'person_group']}),
('Personal Info',
{'fields': ['birthday', 'personal_phone', 'personal_celphone', 'personal_email'], 'expanded': False}),
('Professional Info',
{'fields': ['business_function', 'business_phone', 'business_celphone', 'business_email'], 'expanded': False}),
('Extra', {'fields': ['notes'], 'expanded': False}),
]
class GroupModelView(ModelView):
datamodel = SQLAInterface(PersonGroup, db.session)
related_views = [PersonModelView]
label_columns = {'phone1': 'Phone (1)', 'phone2': 'Phone (2)', 'taxid': 'Tax ID'}
list_columns = ['name', 'notes']
class PersonChartView(GroupByChartView):
datamodel = SQLAInterface(Person)
chart_title = 'Grouped Persons'
label_columns = PersonModelView.label_columns
chart_type = 'PieChart'
definitions = [
{
'group': 'person_group',
'series': [(aggregate_count,'person_group')]
}
]
db.create_all()
appbuilder.add_view(GroupModelView(), "List Groups", icon="fa-folder-open-o", category="Contacts")
appbuilder.add_view(PersonModelView(), "List Contacts", icon="fa-envelope", category="Contacts")
appbuilder.add_view(PersonChartView(), "Contacts Chart", icon="fa-dashboard", category="Contacts")
| bsd-3-clause | 6,342,332,440,027,834,000 | 39.753247 | 120 | 0.641491 | false |
kidmaple/CoolWall | user/python/Lib/ihooks.py | 4 | 17026 | """Import hook support.
Consistent use of this module will make it possible to change the
different mechanisms involved in loading modules independently.
While the built-in module imp exports interfaces to the built-in
module searching and loading algorithm, and it is possible to replace
the built-in function __import__ in order to change the semantics of
the import statement, until now it has been difficult to combine the
effect of different __import__ hacks, like loading modules from URLs
by rimport.py, or restricted execution by rexec.py.
This module defines three new concepts:
1) A "file system hooks" class provides an interface to a filesystem.
One hooks class is defined (Hooks), which uses the interface provided
by standard modules os and os.path. It should be used as the base
class for other hooks classes.
2) A "module loader" class provides an interface to to search for a
module in a search path and to load it. It defines a method which
searches for a module in a single directory; by overriding this method
one can redefine the details of the search. If the directory is None,
built-in and frozen modules are searched instead.
Two module loader class are defined, both implementing the search
strategy used by the built-in __import__ function: ModuleLoader uses
the imp module's find_module interface, while HookableModuleLoader
uses a file system hooks class to interact with the file system. Both
use the imp module's load_* interfaces to actually load the module.
3) A "module importer" class provides an interface to import a
module, as well as interfaces to reload and unload a module. It also
provides interfaces to install and uninstall itself instead of the
default __import__ and reload (and unload) functions.
One module importer class is defined (ModuleImporter), which uses a
module loader instance passed in (by default HookableModuleLoader is
instantiated).
The classes defined here should be used as base classes for extended
functionality along those lines.
If a module mporter class supports dotted names, its import_module()
must return a different value depending on whether it is called on
behalf of a "from ... import ..." statement or not. (This is caused
by the way the __import__ hook is used by the Python interpreter.) It
would also do wise to install a different version of reload().
"""
import __builtin__
import imp
import os
import sys
import string
VERBOSE = 0
from imp import C_EXTENSION, PY_SOURCE, PY_COMPILED
from imp import C_BUILTIN, PY_FROZEN, PKG_DIRECTORY
BUILTIN_MODULE = C_BUILTIN
FROZEN_MODULE = PY_FROZEN
class _Verbose:
def __init__(self, verbose = VERBOSE):
self.verbose = verbose
def get_verbose(self):
return self.verbose
def set_verbose(self, verbose):
self.verbose = verbose
# XXX The following is an experimental interface
def note(self, *args):
if self.verbose:
apply(self.message, args)
def message(self, format, *args):
if args:
print format%args
else:
print format
class BasicModuleLoader(_Verbose):
"""Basic module loader.
This provides the same functionality as built-in import. It
doesn't deal with checking sys.modules -- all it provides is
find_module() and a load_module(), as well as find_module_in_dir()
which searches just one directory, and can be overridden by a
derived class to change the module search algorithm when the basic
dependency on sys.path is unchanged.
The interface is a little more convenient than imp's:
find_module(name, [path]) returns None or 'stuff', and
load_module(name, stuff) loads the module.
"""
def find_module(self, name, path = None):
if path is None:
path = [None] + self.default_path()
for dir in path:
stuff = self.find_module_in_dir(name, dir)
if stuff: return stuff
return None
def default_path(self):
return sys.path
def find_module_in_dir(self, name, dir):
if dir is None:
return self.find_builtin_module(name)
else:
try:
return imp.find_module(name, [dir])
except ImportError:
return None
def find_builtin_module(self, name):
# XXX frozen packages?
if imp.is_builtin(name):
return None, '', ('', '', BUILTIN_MODULE)
if imp.is_frozen(name):
return None, '', ('', '', FROZEN_MODULE)
return None
def load_module(self, name, stuff):
file, filename, info = stuff
try:
return imp.load_module(name, file, filename, info)
finally:
if file: file.close()
class Hooks(_Verbose):
"""Hooks into the filesystem and interpreter.
By deriving a subclass you can redefine your filesystem interface,
e.g. to merge it with the URL space.
This base class behaves just like the native filesystem.
"""
# imp interface
def get_suffixes(self): return imp.get_suffixes()
def new_module(self, name): return imp.new_module(name)
def is_builtin(self, name): return imp.is_builtin(name)
def init_builtin(self, name): return imp.init_builtin(name)
def is_frozen(self, name): return imp.is_frozen(name)
def init_frozen(self, name): return imp.init_frozen(name)
def get_frozen_object(self, name): return imp.get_frozen_object(name)
def load_source(self, name, filename, file=None):
return imp.load_source(name, filename, file)
def load_compiled(self, name, filename, file=None):
return imp.load_compiled(name, filename, file)
def load_dynamic(self, name, filename, file=None):
return imp.load_dynamic(name, filename, file)
def load_package(self, name, filename, file=None):
return imp.load_module(name, file, filename, ("", "", PKG_DIRECTORY))
def add_module(self, name):
d = self.modules_dict()
if d.has_key(name): return d[name]
d[name] = m = self.new_module(name)
return m
# sys interface
def modules_dict(self): return sys.modules
def default_path(self): return sys.path
def path_split(self, x): return os.path.split(x)
def path_join(self, x, y): return os.path.join(x, y)
def path_isabs(self, x): return os.path.isabs(x)
# etc.
def path_exists(self, x): return os.path.exists(x)
def path_isdir(self, x): return os.path.isdir(x)
def path_isfile(self, x): return os.path.isfile(x)
def path_islink(self, x): return os.path.islink(x)
# etc.
def openfile(self, *x): return apply(open, x)
openfile_error = IOError
def listdir(self, x): return os.listdir(x)
listdir_error = os.error
# etc.
class ModuleLoader(BasicModuleLoader):
"""Default module loader; uses file system hooks.
By defining suitable hooks, you might be able to load modules from
other sources than the file system, e.g. from compressed or
encrypted files, tar files or (if you're brave!) URLs.
"""
def __init__(self, hooks = None, verbose = VERBOSE):
BasicModuleLoader.__init__(self, verbose)
self.hooks = hooks or Hooks(verbose)
def default_path(self):
return self.hooks.default_path()
def modules_dict(self):
return self.hooks.modules_dict()
def get_hooks(self):
return self.hooks
def set_hooks(self, hooks):
self.hooks = hooks
def find_builtin_module(self, name):
# XXX frozen packages?
if self.hooks.is_builtin(name):
return None, '', ('', '', BUILTIN_MODULE)
if self.hooks.is_frozen(name):
return None, '', ('', '', FROZEN_MODULE)
return None
def find_module_in_dir(self, name, dir, allow_packages=1):
if dir is None:
return self.find_builtin_module(name)
if allow_packages:
fullname = self.hooks.path_join(dir, name)
if self.hooks.path_isdir(fullname):
stuff = self.find_module_in_dir("__init__", fullname, 0)
if stuff:
file = stuff[0]
if file: file.close()
return None, fullname, ('', '', PKG_DIRECTORY)
for info in self.hooks.get_suffixes():
suff, mode, type = info
fullname = self.hooks.path_join(dir, name+suff)
try:
fp = self.hooks.openfile(fullname, mode)
return fp, fullname, info
except self.hooks.openfile_error:
pass
return None
def load_module(self, name, stuff):
file, filename, info = stuff
(suff, mode, type) = info
try:
if type == BUILTIN_MODULE:
return self.hooks.init_builtin(name)
if type == FROZEN_MODULE:
return self.hooks.init_frozen(name)
if type == C_EXTENSION:
m = self.hooks.load_dynamic(name, filename, file)
elif type == PY_SOURCE:
m = self.hooks.load_source(name, filename, file)
elif type == PY_COMPILED:
m = self.hooks.load_compiled(name, filename, file)
elif type == PKG_DIRECTORY:
m = self.hooks.load_package(name, filename, file)
else:
raise ImportError, "Unrecognized module type (%s) for %s" % \
(`type`, name)
finally:
if file: file.close()
m.__file__ = filename
return m
class FancyModuleLoader(ModuleLoader):
"""Fancy module loader -- parses and execs the code itself."""
def load_module(self, name, stuff):
file, filename, (suff, mode, type) = stuff
realfilename = filename
path = None
if type == PKG_DIRECTORY:
initstuff = self.find_module_in_dir("__init__", filename, 0)
if not initstuff:
raise ImportError, "No __init__ module in package %s" % name
initfile, initfilename, initinfo = initstuff
initsuff, initmode, inittype = initinfo
if inittype not in (PY_COMPILED, PY_SOURCE):
if initfile: initfile.close()
raise ImportError, \
"Bad type (%s) for __init__ module in package %s" % (
`inittype`, name)
path = [filename]
file = initfile
realfilename = initfilename
type = inittype
if type == FROZEN_MODULE:
code = self.hooks.get_frozen_object(name)
elif type == PY_COMPILED:
import marshal
file.seek(8)
code = marshal.load(file)
elif type == PY_SOURCE:
data = file.read()
code = compile(data, realfilename, 'exec')
else:
return ModuleLoader.load_module(self, name, stuff)
m = self.hooks.add_module(name)
if path:
m.__path__ = path
m.__file__ = filename
exec code in m.__dict__
return m
class BasicModuleImporter(_Verbose):
"""Basic module importer; uses module loader.
This provides basic import facilities but no package imports.
"""
def __init__(self, loader = None, verbose = VERBOSE):
_Verbose.__init__(self, verbose)
self.loader = loader or ModuleLoader(None, verbose)
self.modules = self.loader.modules_dict()
def get_loader(self):
return self.loader
def set_loader(self, loader):
self.loader = loader
def get_hooks(self):
return self.loader.get_hooks()
def set_hooks(self, hooks):
return self.loader.set_hooks(hooks)
def import_module(self, name, globals={}, locals={}, fromlist=[]):
if self.modules.has_key(name):
return self.modules[name] # Fast path
stuff = self.loader.find_module(name)
if not stuff:
raise ImportError, "No module named %s" % name
return self.loader.load_module(name, stuff)
def reload(self, module, path = None):
name = module.__name__
stuff = self.loader.find_module(name, path)
if not stuff:
raise ImportError, "Module %s not found for reload" % name
return self.loader.load_module(name, stuff)
def unload(self, module):
del self.modules[module.__name__]
# XXX Should this try to clear the module's namespace?
def install(self):
self.save_import_module = __builtin__.__import__
self.save_reload = __builtin__.reload
if not hasattr(__builtin__, 'unload'):
__builtin__.unload = None
self.save_unload = __builtin__.unload
__builtin__.__import__ = self.import_module
__builtin__.reload = self.reload
__builtin__.unload = self.unload
def uninstall(self):
__builtin__.__import__ = self.save_import_module
__builtin__.reload = self.save_reload
__builtin__.unload = self.save_unload
if not __builtin__.unload:
del __builtin__.unload
class ModuleImporter(BasicModuleImporter):
"""A module importer that supports packages."""
def import_module(self, name, globals=None, locals=None, fromlist=None):
parent = self.determine_parent(globals)
q, tail = self.find_head_package(parent, name)
m = self.load_tail(q, tail)
if not fromlist:
return q
if hasattr(m, "__path__"):
self.ensure_fromlist(m, fromlist)
return m
def determine_parent(self, globals):
if not globals or not globals.has_key("__name__"):
return None
pname = globals['__name__']
if globals.has_key("__path__"):
parent = self.modules[pname]
assert globals is parent.__dict__
return parent
if '.' in pname:
i = string.rfind(pname, '.')
pname = pname[:i]
parent = self.modules[pname]
assert parent.__name__ == pname
return parent
return None
def find_head_package(self, parent, name):
if '.' in name:
i = string.find(name, '.')
head = name[:i]
tail = name[i+1:]
else:
head = name
tail = ""
if parent:
qname = "%s.%s" % (parent.__name__, head)
else:
qname = head
q = self.import_it(head, qname, parent)
if q: return q, tail
if parent:
qname = head
parent = None
q = self.import_it(head, qname, parent)
if q: return q, tail
raise ImportError, "No module named " + qname
def load_tail(self, q, tail):
m = q
while tail:
i = string.find(tail, '.')
if i < 0: i = len(tail)
head, tail = tail[:i], tail[i+1:]
mname = "%s.%s" % (m.__name__, head)
m = self.import_it(head, mname, m)
if not m:
raise ImportError, "No module named " + mname
return m
def ensure_fromlist(self, m, fromlist, recursive=0):
for sub in fromlist:
if sub == "*":
if not recursive:
try:
all = m.__all__
except AttributeError:
pass
else:
self.ensure_fromlist(m, all, 1)
continue
if sub != "*" and not hasattr(m, sub):
subname = "%s.%s" % (m.__name__, sub)
submod = self.import_it(sub, subname, m)
if not submod:
raise ImportError, "No module named " + subname
def import_it(self, partname, fqname, parent, force_load=0):
if not partname:
raise ValueError, "Empty module name"
if not force_load:
try:
return self.modules[fqname]
except KeyError:
pass
try:
path = parent and parent.__path__
except AttributeError:
return None
stuff = self.loader.find_module(partname, path)
if not stuff:
return None
m = self.loader.load_module(fqname, stuff)
if parent:
setattr(parent, partname, m)
return m
def reload(self, module):
name = module.__name__
if '.' not in name:
return self.import_it(name, name, None, force_load=1)
i = string.rfind(name, '.')
pname = name[:i]
parent = self.modules[pname]
return self.import_it(name[i+1:], name, parent, force_load=1)
default_importer = None
current_importer = None
def install(importer = None):
global current_importer
current_importer = importer or default_importer or ModuleImporter()
current_importer.install()
def uninstall():
global current_importer
current_importer.uninstall()
| gpl-2.0 | 2,783,969,446,145,869,300 | 32.384314 | 77 | 0.595912 | false |
teleshoes/n9-button-monitor | src/config.py | 1 | 9729 | #!/usr/bin/python
#N9 Button Monitor
#Copyright (C) 2013 Elliot Wolk
#Copyright (C) 2013 Lcferrum
#
#This program is free software: you can redistribute it and/or modify
#it under the terms of the GNU General Public License as published by
#the Free Software Foundation, either version 3 of the License, or
#(at your option) any later version.
#
#You should have received a copy of the GNU General Public License
#along with this program. If not, see <http://www.gnu.org/licenses/>.
from ledpattern import LedPattern
from clicktimer import getButtons
from dbusbtn import DbusButton
from prxbtn import ProximityButton
import sys
import os
import re
import subprocess
CAMERA_SOUND = "/usr/share/sounds/ui-tones/snd_camera_shutter.wav"
def getUserConfigFilePath():
return "/home/user/.config/n9-button-monitor.ini"
def getSystemConfigFilePath():
return "/opt/n9-button-monitor/data/default-config.ini"
class Config():
def __init__(self, actionDict, validButtonNames, validClickTypeNames):
self.actionDict = actionDict
self.validActionNames = actionDict.getActionLambdaDict().keys()
self.validConditionNames = actionDict.getConditionLambdaDict().keys()
self.validButtonNames = validButtonNames
self.validClickTypeNames = validClickTypeNames
self.dbusButton = DbusButton()
self.proximityButton = ProximityButton()
self.lastTimeStamp = None
self.resetConfig()
self.initRegex()
def checkConfigFile(self):
timestamp = self.getTimeStamp()
if self.lastTimeStamp == None or self.lastTimeStamp != timestamp:
print >> sys.stderr, "refreshing config"
self.ensureUserConf()
try:
self.parse(self.readConf(getUserConfigFilePath()))
except:
print >> sys.stderr, "INVALID CONFIG, ATTEMPTING TO USE DEFAULT"
self.parse(self.readConf(getSystemConfigFilePath()))
self.lastTimeStamp = timestamp
def getTimeStamp(self):
if os.path.isfile(getUserConfigFilePath()):
cmdArr = ["stat", "-t", getUserConfigFilePath()]
out, err = subprocess.Popen(cmdArr, stdout=subprocess.PIPE).communicate()
if err == None or len(err) == 0:
return out
else:
return "error"
else:
return "missing"
def resetConfig(self):
self.torchAutoShutOffTimeMs=300000
self.cameraDisabled=0
self.quickSnapShutterSound=CAMERA_SOUND
self.quickSnapSaveSound=''
self.quickSnapShutterLedPattern=LedPattern('blink')
self.quickSnapSaveLedPattern=LedPattern('doubleblink')
self.longClickDelayMs=400
self.doubleClickDelayMs=400
self.trebleClickDelayMs=600
self.dbusButton.setPatternDelayMs(1500)
self.dbusButton.clearButtonSyns()
self.actionMapSet = ActionMapSet([])
def initRegex(self):
self.intRe = re.compile(
"^\\s*(?P<key>[a-zA-Z0-9]+)" + "\\s*=\\s*" + "(?P<value>\d+)\\s*(#.*)?$")
self.strRe = re.compile(
"^\\s*(?P<key>[a-zA-Z0-9]+)" + "\\s*=\\s*" + "(?P<value>.*?)\\s*(#.*)?$")
self.commentRe = re.compile("^\\s*#")
self.emptyRe = re.compile("^\\s*$")
self.actionMapRe = re.compile(""
+ "^"
+ "\\s*action\\s*=\\s*"
+ "(?P<actionName>" + "|".join(self.validActionNames) + ")"
+ "(?:" + "\(" + "(?P<actionParam>[^)]*)" + "\)" + ")?"
+ "\\s*,\\s*"
+ "(?P<button>" + "|".join(self.validButtonNames) + ")"
+ "(?:" + "\(" + "(?P<buttonParam>[^)]*)" + "\)" + ")?"
+ "\\s*,\\s*"
+ "(?P<clickType>" + "|".join(self.validClickTypeNames) + ")"
+ "\\s*,\\s*"
+ "(?P<condName>" + "|".join(self.validConditionNames) + ")"
+ "(?:" + "\(" + "(?P<condParam>[^)]*)" + "\)" + ")?"
+ "\\s*(#.*)?"
+ "$"
)
self.dbusButtonSynRe = re.compile(""
+ "^"
+ "\\s*dbusButtonSyn\\s*=\\s*"
+ "(?P<syn>[a-zA-Z0-9_\\-]+)"
+ "\\s*(?:=>|,|->)\\s*"
+ "(?P<target>[a-zA-Z0-9_\\-]+)"
+ "$"
)
def readConf(self, confFile):
if os.path.isfile(confFile):
return open(confFile,"rb").read()
else:
return None
def ensureUserConf(self):
userConf = getUserConfigFilePath()
if not os.path.isfile(userConf):
print "WARNING: no config file at '" + userConf + "'"
print "Copying default config:\n"
os.system("cp " + getSystemConfigFilePath() + " " + userConf)
def parse(self, confText):
self.resetConfig()
actionMaps = []
for line in confText.splitlines():
actionMapMatch = self.actionMapRe.match(line)
dbusButtonSynMatch = self.dbusButtonSynRe.match(line)
intMatch = self.intRe.match(line)
strMatch = self.strRe.match(line)
commentMatch = self.commentRe.match(line)
emptyMatch = self.emptyRe.match(line)
intKey = None
if intMatch != None:
intKey = intMatch.group("key")
intVal = int(intMatch.group("value"))
strKey = None
if strMatch != None:
strKey = strMatch.group("key")
strVal = strMatch.group("value")
if actionMapMatch != None:
actionMaps.append(ActionMap(
self.actionDict,
actionName = actionMapMatch.group("actionName"),
actionParam = actionMapMatch.group("actionParam"),
condName = actionMapMatch.group("condName"),
condParam = actionMapMatch.group("condParam"),
button = actionMapMatch.group("button"),
buttonParam = actionMapMatch.group("buttonParam"),
clickType = actionMapMatch.group("clickType"),
))
elif dbusButtonSynMatch != None:
self.dbusButton.addButtonSyn(
dbusButtonSynMatch.group("syn"), dbusButtonSynMatch.group("target"))
elif intKey == "torchAutoShutOffTimeMs":
self.torchAutoShutOffTimeMs = intVal
elif intKey == "cameraDisabled":
self.cameraDisabled = intVal
elif intKey == "longClickDelayMs":
self.longClickDelayMs = intVal
elif intKey == "doubleClickDelayMs":
self.doubleClickDelayMs = intVal
elif intKey == "trebleClickDelayMs":
self.trebleClickDelayMs = intVal
elif intKey == "dbusPatternDelayMs":
self.dbusButton.setPatternDelayMs(intVal)
elif strKey == "quickSnapShutterSound":
self.quickSnapShutterSound = strVal
elif strKey == "quickSnapSaveSound":
self.quickSnapSaveSound = strVal
elif strKey == "quickSnapShutterLedPattern":
self.quickSnapShutterLedPattern = LedPattern(strVal)
elif strKey == "quickSnapSaveLedPattern":
self.quickSnapSaveLedPattern = LedPattern(strVal)
elif commentMatch == None and emptyMatch == None:
print >> sys.stderr, "Unparseable config entry: " + line
raise Exception("Unparseable config entry: " + line)
self.actionMapSet = ActionMapSet(actionMaps)
def getActionMapSet(self):
return self.actionMapSet
class ActionMapSet():
def __init__(self, actionMaps):
self.actionMaps = actionMaps
self.actionMapsByDbusButton = dict()
self.actionMapsByKeyByClickType = dict()
for a in actionMaps:
if a.key == a.clickType:
if a.buttonParam != None:
if not a.buttonParam in self.actionMapsByKeyByClickType:
self.actionMapsByKeyByClickType[a.buttonParam] = dict()
actionMapsByKey = self.actionMapsByKeyByClickType[a.buttonParam]
if not a.key in actionMapsByKey:
actionMapsByKey[a.key] = []
actionMapsByKey[a.key].append(a)
else:
if not a.clickType in self.actionMapsByKeyByClickType:
self.actionMapsByKeyByClickType[a.clickType] = dict()
actionMapsByKey = self.actionMapsByKeyByClickType[a.clickType]
if not a.key in actionMapsByKey:
actionMapsByKey[a.key] = []
actionMapsByKey[a.key].append(a)
def getActionMapsForDbus(self, button):
if not button in self.actionMapsByDbusButton:
return []
else:
return self.actionMapsByDbusButton[button]
def getActionMapsForKey(self, key, clickType):
if not clickType in self.actionMapsByKeyByClickType:
return []
elif not key in self.actionMapsByKeyByClickType[clickType]:
return []
else:
return self.actionMapsByKeyByClickType[clickType][key]
class ActionMap():
def __init__(self, actionDict,
actionName, actionParam,
condName, condParam,
button, buttonParam, clickType):
self.actionName = actionName
self.actionParam = actionParam
self.condName = condName
self.condParam = condParam
self.button = button
self.buttonParam = buttonParam
self.key = getButtons()[button]
self.clickType = clickType
self.actionLambda = self.getLambda(actionDict.getActionLambdaDict(),
self.actionName, self.actionParam)
self.condLambda = self.getLambda(actionDict.getConditionLambdaDict(),
self.condName, self.condParam)
def maybeRun(self):
if self.condLambda == None or self.condLambda():
self.actionLambda()
def __str__(self):
if self.actionParam == None:
param = ""
else:
param = "(" + self.actionParam + ")"
action = self.actionName + param
return (str(self.key) + "[" + self.clickType + "]: " + action)
def getLambda(self, lambdaDict, lambdaName, lambdaParam):
lam = lambdaDict[lambdaName]
assert self.isLambda(lam), "'" + lambdaName + "' not defined"
if lambdaParam != None:
try:
lam = lam(lambdaParam)
assert self.isLambda(lam)
except:
msg = ("'" + lambdaName + "' does not accept an argument\n" +
"{given: '" + lambdaParam + "'}")
print >> sys.stderr, msg
raise Exception(msg)
return lam
def isLambda(self, v):
return isinstance(v, type(lambda: None)) and v.__name__ == '<lambda>'
| gpl-3.0 | 6,974,518,348,037,700,000 | 36.419231 | 79 | 0.641895 | false |
huguesv/PTVS | Python/Product/Miniconda/Miniconda3-x64/Lib/idlelib/zoomheight.py | 5 | 1571 | "Zoom a window to maximum height."
import re
import sys
from idlelib import macosx
class ZoomHeight:
def __init__(self, editwin):
self.editwin = editwin
def zoom_height_event(self, event=None):
top = self.editwin.top
zoomed = zoom_height(top)
menu_status = 'Restore' if zoomed else 'Zoom'
self.editwin.update_menu_label(menu='options', index='* Height',
label=f'{menu_status} Height')
return "break"
def zoom_height(top):
geom = top.wm_geometry()
m = re.match(r"(\d+)x(\d+)\+(-?\d+)\+(-?\d+)", geom)
if not m:
top.bell()
return
width, height, x, y = map(int, m.groups())
newheight = top.winfo_screenheight()
if sys.platform == 'win32':
newy = 0
newheight = newheight - 72
elif macosx.isAquaTk():
# The '88' below is a magic number that avoids placing the bottom
# of the window below the panel on my machine. I don't know how
# to calculate the correct value for this with tkinter.
newy = 22
newheight = newheight - newy - 88
else:
#newy = 24
newy = 0
#newheight = newheight - 96
newheight = newheight - 88
if height >= newheight:
newgeom = ""
else:
newgeom = "%dx%d+%d+%d" % (width, newheight, x, newy)
top.wm_geometry(newgeom)
return newgeom != ""
if __name__ == "__main__":
from unittest import main
main('idlelib.idle_test.test_zoomheight', verbosity=2, exit=False)
# Add htest?
| apache-2.0 | -2,960,363,129,399,956,500 | 25.627119 | 73 | 0.569064 | false |
LukeCarrier/py3k-pyrg | pyrg/pyrg.py | 1 | 8423 | #!/usr/bin/env python
"""pyrg - colorized Python's UnitTest Result Tool"""
from configparser import ConfigParser
from subprocess import Popen, PIPE
from select import poll, POLLIN
from optparse import OptionParser
import sys
import re
import os
import pwd
__version__ = '0.2.6'
__author__ = 'Hideo Hattroi <[email protected]>'
__license__ = 'NewBSDLicense'
__all__ = ['get_color', 'parse_unittest_result_verbose',
'parse_unittest_result', 'set_configuration']
DEFAULT_CONFIG_PATH = "/home/%s/.pyrgrc" % (pwd.getpwuid(os.getuid())[0])
PRINT_COLOR_SET_DEFAULT = {
'ok': 'green',
'fail': 'red',
'error': 'yellow',
'function': 'cyan',
}
PRINT_COLOR_SET = PRINT_COLOR_SET_DEFAULT.copy()
COLOR_MAP = {
'black': '[30m%s[0m',
'gray': '[1;30m%s[0m',
#'black ': '[2;30m%s[0m', ## not work
'red': '[31m%s[0m',
'pink': '[1;31m%s[0m',
'darkred': '[2;31m%s[0m',
'green': '[32m%s[0m',
'yellowgreen': '[1;32m%s[0m',
'darkgreen': '[2;32m%s[0m',
'brown': '[33m%s[0m',
'yellow': '[1;33m%s[0m',
'gold': '[2;33m%s[0m',
'blue': '[34m%s[0m',
'lightblue': '[1;34m%s[0m',
'darkblue': '[2;34m%s[0m',
'magenta': '[35m%s[0m',
'lightmagenta': '[1;35m%s[0m',
'darkmagenta': '[2;35m%s[0m',
'cyan': '[36m%s[0m',
'lightcyan': '[1;36m%s[0m',
'darkcyan': '[2;36m%s[0m',
'silver': '[37m%s[0m',
'white': '[1;37m%s[0m',
'darksilver': '[2;37m%s[0m',
}
def get_color(key):
"""color name get from COLOR_MAP dict."""
return COLOR_MAP[PRINT_COLOR_SET[key]]
def parse_result_line(line):
"""parse to test result when fail tests"""
err = False
fail = False
if 'errors' in line:
err = True
if 'failures' in line:
fail = True
if err and fail:
f = line.split('=')[1].split(',')[0]
e = line.split('=')[2].split(')')[0]
result = "(%s=%s, " % (get_color('fail') % "failures",
get_color('fail') % f)
result += "%s=%s)" % (get_color('error') % "errors",
get_color('error') % e)
elif fail and not err:
l = line.split('=')[1].split(')')[0]
result = "(%s=%s)" % (get_color('fail') % "failures",
get_color('fail') % l)
elif err and not fail:
l = line.split('=')[1].split(')')[0]
result = "(%s=%s)" % (get_color('error') % "errors",
get_color('error') % l)
return get_color('fail') % "FAILED" + " %s" % result
def parse_lineone(line):
"""parse to test result line1"""
results = []
line = line.strip()
for char in line:
if '.' == char:
results.append(get_color('ok') % ".")
elif 'E' == char:
results.append(get_color('error') % "E")
elif 'F' == char:
results.append(get_color('fail') % "F")
else:
results.append(char)
return "".join(results)
def coloring_method(line):
"""colorized method line"""
return get_color('function') % line
def parse_unittest_result(lines):
"""parse test result"""
results = []
err_verbose = re.compile("ERROR:")
fail_verbose = re.compile("FAIL:")
unittests_ok = re.compile("OK")
unittests_failed = re.compile("FAILED")
if not lines:
return ""
results.append(parse_lineone(lines[0]) + '\n')
for line in lines[1:]:
if unittests_ok.match(line):
result = get_color('ok') % "OK"
elif unittests_failed.match(line):
result = parse_result_line(line)
elif fail_verbose.match(line):
result = "%s: %s\n" % (get_color('fail') % "FAIL",
coloring_method(line[6:-1]))
elif err_verbose.match(line):
result = "%s: %s\n" % (get_color('error') % "ERROR",
coloring_method(line[7:-1]))
else:
result = line
results.append(result)
return "".join(results)
def parse_unittest_result_verbose(lines):
"""parse test result, verbose print mode."""
ok = re.compile("ok$")
fail = re.compile("FAIL$")
err = re.compile("ERROR$")
fail_verbose = re.compile("FAIL:")
err_verbose = re.compile("ERROR:")
unittests_ok = re.compile("OK")
unittests_failed = re.compile("FAILED")
results = []
for line in lines:
if ok.search(line):
tmp = ok.split(line)
result = tmp[0] + get_color('ok') % "ok" + "\n"
elif fail.search(line):
tmp = fail.split(line)
result = tmp[0] + get_color('fail') % "FAIL" + "\n"
elif err.search(line):
tmp = err.split(line)
result = tmp[0] + get_color('error') % "ERROR" + "\n"
elif fail_verbose.match(line):
result = "%s: %s\n" % (get_color('fail') % "FAIL",
coloring_method(line[6:-1]))
elif err_verbose.match(line):
result = "%s: %s\n" % (get_color('error') % "ERROR",
coloring_method(line[7:-1]))
elif unittests_ok.match(line):
result = get_color('ok') % "OK"
elif unittests_failed.match(line):
result = parse_result_line(line)
else:
result = line
results.append(result)
return "".join(results)
def set_configuration(filename):
"""setting to printing color map"""
ret = PRINT_COLOR_SET_DEFAULT.copy()
if not os.path.exists(filename):
return ret
configure = ConfigParser()
configure.read(filename)
for setkey, color in configure.items('color'):
if not setkey in PRINT_COLOR_SET:
continue
if color in COLOR_MAP:
ret[setkey] = color
else:
ret[setkey] = PRINT_COLOR_SET_DEFAULT[setkey]
return ret
def get_optionparser():
"""return to optparse's OptionParser object."""
parser = OptionParser(version="pyrg: %s" % __version__,
description=__doc__,
usage="Usage: pyrg [options] TEST_SCRIPT.py\n"\
" : python TEST_SCRIPT.py |& pyrg")
parser.add_option('-v', '--verbose', action='store_true',
dest='mode_verbose',
help='print to verbose result for unittest.')
parser.add_option('-d', '--default-color', action='store_true',
dest='mode_defaultcolor',
help='used to default color setting.')
parser.add_option('-f', '--config-file', dest='config_filename',
help='configuration file path')
return parser
def check_verbose(line):
verbose = re.compile("(ok$|ERROR$|FAIL$)")
return verbose.search(line)
def main():
"""execute command line tool"""
global PRINT_COLOR_SET
parser = get_optionparser()
(opts, args) = parser.parse_args()
if not opts.mode_defaultcolor:
if opts.config_filename:
PRINT_COLOR_SET = set_configuration(opts.config_filename)
else:
PRINT_COLOR_SET = set_configuration(DEFAULT_CONFIG_PATH)
if len(args):
if opts.mode_verbose:
cmdline = ['python', args[0], '-v']
if len(args) >= 2:
cmdline += [i for i in args[1:]]
proc = Popen(cmdline, stdout=PIPE, stderr=PIPE)
result = proc.communicate()[1]
print(parse_unittest_result_verbose(result.splitlines(1)))
else:
cmdline = ['python']
cmdline += [i for i in args]
proc = Popen(cmdline, stdout=PIPE, stderr=PIPE)
result = proc.communicate()[1]
print(parse_unittest_result(result.splitlines(1)))
else:
poller = poll()
poller.register(sys.stdin, POLLIN)
pollret = poller.poll(1)
if len(pollret) == 1 and pollret[0][1] & POLLIN:
lines = sys.stdin.readlines()
if check_verbose(lines[0]):
print(parse_unittest_result_verbose(lines))
else:
print(parse_unittest_result(lines))
else:
parser.print_help()
if __name__ == '__main__':
sys.exit(main())
| bsd-3-clause | 7,217,174,423,480,943,000 | 33.101215 | 73 | 0.51763 | false |
spulec/moto | moto/rds2/exceptions.py | 2 | 4182 | from __future__ import unicode_literals
from jinja2 import Template
from werkzeug.exceptions import BadRequest
class RDSClientError(BadRequest):
def __init__(self, code, message):
super(RDSClientError, self).__init__()
template = Template(
"""
<RDSClientError>
<Error>
<Code>{{ code }}</Code>
<Message>{{ message }}</Message>
<Type>Sender</Type>
</Error>
<RequestId>6876f774-7273-11e4-85dc-39e55ca848d1</RequestId>
</RDSClientError>"""
)
self.description = template.render(code=code, message=message)
class DBInstanceNotFoundError(RDSClientError):
def __init__(self, database_identifier):
super(DBInstanceNotFoundError, self).__init__(
"DBInstanceNotFound",
"DBInstance {0} not found.".format(database_identifier),
)
class DBSnapshotNotFoundError(RDSClientError):
def __init__(self, snapshot_identifier):
super(DBSnapshotNotFoundError, self).__init__(
"DBSnapshotNotFound",
"DBSnapshot {} not found.".format(snapshot_identifier),
)
class DBSecurityGroupNotFoundError(RDSClientError):
def __init__(self, security_group_name):
super(DBSecurityGroupNotFoundError, self).__init__(
"DBSecurityGroupNotFound",
"Security Group {0} not found.".format(security_group_name),
)
class DBSubnetGroupNotFoundError(RDSClientError):
def __init__(self, subnet_group_name):
super(DBSubnetGroupNotFoundError, self).__init__(
"DBSubnetGroupNotFound",
"Subnet Group {0} not found.".format(subnet_group_name),
)
class DBParameterGroupNotFoundError(RDSClientError):
def __init__(self, db_parameter_group_name):
super(DBParameterGroupNotFoundError, self).__init__(
"DBParameterGroupNotFound",
"DB Parameter Group {0} not found.".format(db_parameter_group_name),
)
class OptionGroupNotFoundFaultError(RDSClientError):
def __init__(self, option_group_name):
super(OptionGroupNotFoundFaultError, self).__init__(
"OptionGroupNotFoundFault",
"Specified OptionGroupName: {0} not found.".format(option_group_name),
)
class InvalidDBClusterStateFaultError(RDSClientError):
def __init__(self, database_identifier):
super(InvalidDBClusterStateFaultError, self).__init__(
"InvalidDBClusterStateFault",
"Invalid DB type, when trying to perform StopDBInstance on {0}e. See AWS RDS documentation on rds.stop_db_instance".format(
database_identifier
),
)
class InvalidDBInstanceStateError(RDSClientError):
def __init__(self, database_identifier, istate):
estate = (
"in available state"
if istate == "stop"
else "stopped, it cannot be started"
)
super(InvalidDBInstanceStateError, self).__init__(
"InvalidDBInstanceState",
"Instance {} is not {}.".format(database_identifier, estate),
)
class SnapshotQuotaExceededError(RDSClientError):
def __init__(self):
super(SnapshotQuotaExceededError, self).__init__(
"SnapshotQuotaExceeded",
"The request cannot be processed because it would exceed the maximum number of snapshots.",
)
class DBSnapshotAlreadyExistsError(RDSClientError):
def __init__(self, database_snapshot_identifier):
super(DBSnapshotAlreadyExistsError, self).__init__(
"DBSnapshotAlreadyExists",
"Cannot create the snapshot because a snapshot with the identifier {} already exists.".format(
database_snapshot_identifier
),
)
class InvalidParameterValue(RDSClientError):
def __init__(self, message):
super(InvalidParameterValue, self).__init__("InvalidParameterValue", message)
class InvalidParameterCombination(RDSClientError):
def __init__(self, message):
super(InvalidParameterCombination, self).__init__(
"InvalidParameterCombination", message
)
| apache-2.0 | -3,694,546,571,745,217,000 | 33.278689 | 135 | 0.636777 | false |
hurdlea/SimpleCV | SimpleCV/tests/test_stereovision.py | 12 | 5828 | # /usr/bin/python
# To run this test you need python nose tools installed
# Run test just use:
# nosetest test_stereovision.py
#
import os, sys, pickle
from SimpleCV import *
from nose.tools import with_setup, nottest
VISUAL_TEST = True # if TRUE we save the images - otherwise we DIFF against them - the default is False
SHOW_WARNING_TESTS = False # show that warnings are working - tests will pass but warnings are generated.
#colors
black = Color.BLACK
white = Color.WHITE
red = Color.RED
green = Color.GREEN
blue = Color.BLUE
###############
# TODO -
# Examples of how to do profiling
# Examples of how to do a single test -
# UPDATE THE VISUAL TESTS WITH EXAMPLES.
# Fix exif data
# Turn off test warnings using decorators.
# Write a use the tests doc.
#images
pair1 = ("../sampleimages/stereo1_left.png" , "../sampleimages/stereo1_right.png")
pair2 = ("../sampleimages/stereo2_left.png" , "../sampleimages/stereo2_right.png")
pair3 = ("../sampleimages/stereo1_real_left.png" , "../sampleimages/stereo1_real_right.png")
pair4 = ("../sampleimages/stereo2_real_left.png" , "../sampleimages/stereo2_real_right.png")
pair5 = ("../sampleimages/stereo3_real_left.png" , "../sampleimages/stereo3_real_right.png")
correct_pairs = [pair1,pair2,pair3,pair4,pair5]
#standards path
standard_path = "./standard/"
#Given a set of images, a path, and a tolerance do the image diff.
def imgDiffs(test_imgs,name_stem,tolerance,path):
count = len(test_imgs)
for idx in range(0,count):
lhs = test_imgs[idx].applyLayers() # this catches drawing methods
fname = standard_path+name_stem+str(idx)+".jpg"
rhs = Image(fname)
if( lhs.width == rhs.width and lhs.height == rhs.height ):
diff = (lhs-rhs)
val = np.average(diff.getNumpy())
if( val > tolerance ):
print val
return True
return False
#Save a list of images to a standard path.
def imgSaves(test_imgs, name_stem, path=standard_path):
count = len(test_imgs)
for idx in range(0,count):
fname = standard_path+name_stem+str(idx)+".jpg"
test_imgs[idx].save(fname)#,quality=95)
#perform the actual image save and image diffs.
def perform_diff(result,name_stem,tolerance=2.0,path=standard_path):
if(VISUAL_TEST): # save the correct images for a visual test
imgSaves(result,name_stem,path)
else: # otherwise we test our output against the visual test
if( imgDiffs(result,name_stem,tolerance,path) ):
assert False
else:
pass
#These function names are required by nose test, please leave them as is
def setup_context():
img = Image(pair1[0])
def destroy_context():
img = ""
@with_setup(setup_context, destroy_context)
def test_findFundamentalMat():
for pairs in correct_pairs :
img1 = Image(pairs[0])
img2 = Image(pairs[1])
StereoImg = StereoImage(img1,img2)
if ( not StereoImg.findFundamentalMat()):
assert False
def test_findHomography():
for pairs in correct_pairs :
img1 = Image(pairs[0])
img2 = Image(pairs[1])
StereoImg = StereoImage(img1,img2)
if (not StereoImg.findHomography()):
assert False
def test_findDisparityMap():
dips = []
for pairs in correct_pairs :
img1 = Image(pairs[0])
img2 = Image(pairs[1])
StereoImg = StereoImage(img1,img2)
dips.append(StereoImg.findDisparityMap(method="BM"))
name_stem = "test_disparitymapBM"
perform_diff(dips,name_stem)
dips = []
for pairs in correct_pairs :
img1 = Image(pairs[0])
img2 = Image(pairs[1])
StereoImg = StereoImage(img1,img2)
dips.append(StereoImg.findDisparityMap(method="SGBM"))
name_stem = "test_disparitymapSGBM"
perform_diff(dips,name_stem)
def test_eline():
for pairs in correct_pairs :
img1 = Image(pairs[0])
img2 = Image(pairs[1])
StereoImg = StereoImage(img1,img2)
F,ptsLeft,ptsRight = StereoImg.findFundamentalMat()
for pts in ptsLeft :
line = StereoImg.Eline(pts,F,2)
if (line == None):
assert False
def test_projectPoint():
for pairs in correct_pairs :
img1 = Image(pairs[0])
img2 = Image(pairs[1])
StereoImg = StereoImage(img1,img2)
H,ptsLeft,ptsRight = StereoImg.findHomography()
for pts in ptsLeft :
line = StereoImg.projectPoint(pts,H,2)
if (line == None):
assert False
def test_StereoCalibration():
cam = StereoCamera()
try :
cam1 = Camera(0)
cam2 = Camera(1)
cam1.getImage()
cam2.getImage()
try :
cam = StereoCamera()
calib = cam.StereoCalibration(0,1,nboards=1)
if (calib):
assert True
else :
assert False
except:
assert False
except :
assert True
def test_loadCalibration():
cam = StereoCamera()
calbib = cam.loadCalibration("Stereo","./StereoVision/")
if (calbib) :
assert True
else :
assert False
def test_StereoRectify():
cam = StereoCamera()
calib = cam.loadCalibration("Stereo","./StereoVision/")
rectify = cam.stereoRectify(calib)
if rectify :
assert True
else :
assert False
def test_getImagesUndistort():
img1 = Image(correct_pairs[0][0]).resize(352,288)
img2 = Image(correct_pairs[0][1]).resize(352,288)
cam = StereoCamera()
calib = cam.loadCalibration("Stereo","./StereoVision/")
rectify = cam.stereoRectify(calib)
rectLeft,rectRight = cam.getImagesUndistort(img1,img2,calib,rectify)
if rectLeft and rectRight :
assert True
else :
assert False
| bsd-3-clause | -3,789,183,460,749,299,700 | 30 | 106 | 0.630233 | false |
Daniel-CA/odoo-addons | account_invoice_temporal/tests/test_account_invoice_temporal.py | 4 | 1531 | # -*- coding: utf-8 -*-
# (c) 2015 Esther Martín - AvanzOSC
# License AGPL-3 - See http://www.gnu.org/licenses/agpl-3.0.html
import openerp.tests.common as common
from openerp import exceptions
class TestAccountInvoiceTemporal(common.TransactionCase):
def setUp(self):
super(TestAccountInvoiceTemporal, self).setUp()
self.invoice = self.env.ref('account.demo_invoice_0')
self.account = self.env.ref('account.a_expense')
self.wiz_model = self.env['account.invoice.confirm']
self.account.temporal = True
def test_temporal(self):
with self.assertRaises(exceptions.Warning):
self.invoice.check_temporal()
self.account.temporal = False
self.invoice.check_temporal()
self.assertNotEqual(self.invoice.state, 'draft')
def test_validate_invoices(self):
wiz = self.wiz_model.create({})
self.account.temporal = True
with self.assertRaises(exceptions.Warning):
wiz.with_context({
'active_ids': [self.invoice.id]}).invoice_confirm()
self.account.temporal = False
wiz.with_context({'active_ids': [self.invoice.id]}).invoice_confirm()
self.assertNotEqual(self.invoice.state, 'draft')
with self.assertRaises(exceptions.Warning):
wiz.with_context({
'active_ids': [self.invoice.id]}).invoice_confirm()
def test_temporal_invoice(self):
self.account.temporal = True
self.assertEqual(self.invoice.is_temporal, True)
| agpl-3.0 | -4,319,140,326,759,959,600 | 37.25 | 77 | 0.651634 | false |
awong1900/platformio | platformio/builder/scripts/nxplpc.py | 3 | 1364 | # Copyright (C) Ivan Kravets <[email protected]>
# See LICENSE for details.
"""
Builder for NXP LPC series ARM microcontrollers.
"""
from os.path import join
from shutil import copyfile
from SCons.Script import (COMMAND_LINE_TARGETS, AlwaysBuild, Default,
DefaultEnvironment, SConscript)
def UploadToDisk(target, source, env): # pylint: disable=W0613,W0621
env.AutodetectUploadPort()
copyfile(join(env.subst("$BUILD_DIR"), "firmware.bin"),
join(env.subst("$UPLOAD_PORT"), "firmware.bin"))
print ("Firmware has been successfully uploaded.\n"
"Please restart your board.")
env = DefaultEnvironment()
SConscript(env.subst(join("$PIOBUILDER_DIR", "scripts", "basearm.py")))
#
# Target: Build executable and linkable firmware
#
target_elf = env.BuildFirmware()
#
# Target: Build the .bin file
#
if "uploadlazy" in COMMAND_LINE_TARGETS:
target_firm = join("$BUILD_DIR", "firmware.bin")
else:
target_firm = env.ElfToBin(join("$BUILD_DIR", "firmware"), target_elf)
#
# Target: Print binary size
#
target_size = env.Alias("size", target_elf, "$SIZEPRINTCMD")
AlwaysBuild(target_size)
#
# Target: Upload by default .bin file
#
upload = env.Alias(["upload", "uploadlazy"], target_firm, UploadToDisk)
AlwaysBuild(upload)
#
# Target: Define targets
#
Default([target_firm, target_size])
| mit | 6,722,294,072,934,264,000 | 22.118644 | 74 | 0.68915 | false |
tomduijf/home-assistant | tests/components/sensor/test_mqtt.py | 3 | 1220 | """
tests.components.sensor.test_mqtt
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
Tests mqtt sensor.
"""
import unittest
import homeassistant.core as ha
import homeassistant.components.sensor as sensor
from tests.common import mock_mqtt_component, fire_mqtt_message
class TestSensorMQTT(unittest.TestCase):
""" Test the MQTT sensor. """
def setUp(self): # pylint: disable=invalid-name
self.hass = ha.HomeAssistant()
mock_mqtt_component(self.hass)
def tearDown(self): # pylint: disable=invalid-name
""" Stop down stuff we started. """
self.hass.stop()
def test_setting_sensor_value_via_mqtt_message(self):
self.assertTrue(sensor.setup(self.hass, {
'sensor': {
'platform': 'mqtt',
'name': 'test',
'state_topic': 'test-topic',
'unit_of_measurement': 'fav unit'
}
}))
fire_mqtt_message(self.hass, 'test-topic', '100')
self.hass.pool.block_till_done()
state = self.hass.states.get('sensor.test')
self.assertEqual('100', state.state)
self.assertEqual('fav unit',
state.attributes.get('unit_of_measurement'))
| mit | -6,412,931,878,990,657,000 | 28.756098 | 69 | 0.583607 | false |
syazdan25/SE17-Project | CrossVal.py | 1 | 4486 | # -*- coding: utf-8 -*-
"""
Created on Tue Mar 14 18:06:24 2017
@author: mahna
"""
from flask import Flask, render_template, flash, request
from wtforms import Form, TextField, TextAreaField, validators, StringField, SubmitField
import nltk
import numpy
from nltk.classify import SklearnClassifier
from sklearn.naive_bayes import BernoulliNB
from sklearn.svm import SVC
import webbrowser
from math import floor
from math import ceil
# App config.
DEBUG = True
app = Flask(__name__)
app.config.from_object(__name__)
app.config['SECRET_KEY'] = '7d441f27d441f27567d441f2b6176a'
class ReusableForm(Form):
name = TextField('Name:', validators=[validators.required()])
@app.route("/", methods=['GET', 'POST'])
def hello():
FoldCount = 10
f = open("amazon_data.txt")
AllDataList = list()
for line in f:
AllDataList.append(line.split("\t"))
f.close()
FoldCount = 10
L = list(range(0,len(AllDataList)-1)) # You can use this to find the number of lines in the file
ShuffledIndex = numpy.random.permutation(L) #Shuffling the range of indexes of the tweets for doing cross validation
partition = ceil((len(AllDataList)-1)/FoldCount)
CrossValidIndex = 0
Accuracy = []
Acc = 0
for iteration in range(10):
ctr = 0
TrainingFile = open("TrainingFile.txt", "w")
TestingFile = open("TestingFile.txt", "w")
f = open("amazon_data.txt")
for lines in f:
if CrossValidIndex <=ctr <= CrossValidIndex + partition:
TestingFile.write(lines)
ctr = ctr + 1
else:
TrainingFile.write(lines)
ctr = ctr + 1
TrainingFile.close()
TestingFile.close()
f.close()
CrossValidIndex = CrossValidIndex + partition + 1
TrainingFile = open("TrainingFile.txt")
pos_tweets = list()
neg_tweets = list()
for line in TrainingFile:
words = line.split("\t")
if words[1] == '0\n' or words[1] == '0':
neg_tweets.append(words)
else:
pos_tweets.append(words)
TrainingFile.close()
tweets = []
for (words, sentiment) in pos_tweets + neg_tweets:
words_filtered = [e.lower() for e in words.split() if len(e) >= 3]
tweets.append((words_filtered, sentiment))
def get_words_in_tweets(tweets):
all_words = []
for (words, sentiment) in tweets:
all_words.extend(words)
return all_words
def get_word_features(wordlist):
wordlist = nltk.FreqDist(wordlist)
word_features = wordlist.keys()
return word_features
word_features = get_word_features(get_words_in_tweets(tweets))
def extract_features(document):
document_words = set(document)
features = {}
for word in word_features:
features['contains(%s)' % word] = (word in document_words)
return features
training_set = nltk.classify.apply_features(extract_features, tweets)
classifie = nltk.NaiveBayesClassifier.train(training_set)
# classifie = SklearnClassifier(BernoulliNB()).train(training_set)
TestingFile = open("TestingFile.txt")
resfile = open("result_naive_bayes.txt", "w")
predicted = numpy.array([]);
actual = numpy.array([]);
index = 0
for line in TestingFile:
review = classifie.classify(extract_features(line.split()))
words = line.split("\t")
if len(words) >1 :
actual = numpy.insert(actual, index, int(words[1]))
predicted = numpy.insert(predicted, index, int(review))
#review+=words[1]
resfile.write(line)
resfile.write(review)
TestingFile.close()
confusion = actual - predicted
FP = numpy.count_nonzero(confusion==-1)
FN = numpy.count_nonzero(confusion==1)
Acc1 = numpy.count_nonzero(confusion==0)/(numpy.count_nonzero(confusion==0) + FP+ FN)
Accuracy.append(Acc1)
Acc = Acc + Acc1
#print (Accuracy)
resfile.write(str(Accuracy))
resfile.close()
MeanAcc = Acc/FoldCount
AccSTD = numpy.std(Accuracy)
print(MeanAcc)
print(AccSTD)
form = ReusableForm(request.form)
print (form.errors)
if request.method == 'POST':
name=request.form['name']
name = classifie.classify(extract_features(name.split()))
# name = str(predicted.size)
print (name)
if form.validate():
# Save the comment here.
flash(name)
else:
flash('Error: All the form fields are required. ')
return render_template('analysis.html', form=form)
if __name__ == "__main__":
url = 'http://127.0.0.1:5000'
webbrowser.open_new(url)
app.run()
| gpl-3.0 | 115,934,505,233,283,630 | 26.226415 | 117 | 0.654926 | false |
TJYee/Sandbox | yahtzee/yahtzee.py | 1 | 2567 | import random
class Die(object):
"""
A class used to represent a Die
...
Attributes
----------
__value : int
The current value of the Die (default is None)
side_min_value : int
The lowest side value (default is 1)
side_count : int
The number of sides on the Die (default is 6)
side_increment : int
The incremental side value between sides (default is 1)
Methods
-------
roll()
Rolls the Die to get a new random value of the Die
check_value()
Returns current value of Die
"""
__value = None
def __init__(self, side_min_value=1, side_count=6, side_increment=1):
"""
Parameters
----------
side_min_value : int, optional
The lowest side value (default is 1)
side_count : int, optional
The number of sides on the Die (default is 6)
side_increment : int, optional
The incremental side value between sides (default is 1)
"""
print("Called Die constructor")
self.side_min_value = side_min_value
self.side_count = side_count
self.side_increment = side_increment
def roll(self):
"""Rolls the Die to get a new random value of the Die"""
self.__value = random.randint(
self.side_min_value, self.side_count) * self.side_increment
def check_value(self):
"""Returns current value of Die"""
return self.__value
# Test Code
# die1 = Die(side_min_value=0, side_count=10, side_increment=10)
# print("Minimum side is: " + str(die1.side_min_value))
# print("Side count is: " + str(die1.side_count))
# print("Value is: " + str(die1.check_value()))
# die1.roll()
# print("Value is: " + str(die1.check_value()))
# die1.roll()
# print("Value is: " + str(die1.check_value()))
game_play = True
print("Beginning of Game Loop")
while(game_play):
print("Start of Game")
# Create empty list for dice
dice_list = []
for i in range(5):
dice_list.append(Die())
# Roll each Die in list to obtain a value
for die in dice_list:
die.roll()
print(die.check_value()) # Print value of Die
print("End of Game")
user_input = input("Would you like to continue to play? ")
while(not(user_input in ["yes", "no"])):
print("I do not understand that response.")
user_input = input("Would you like to continue to play? ")
if(user_input == "no"):
game_play = False
print("End of Game Loop.")
| mit | -6,398,496,092,662,879,000 | 27.208791 | 73 | 0.582392 | false |
GoogleChrome/big-rig | app/src/thirdparty/telemetry/internal/backends/mandoline/mandoline_browser_backend.py | 8 | 4613 | # Copyright 2015 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import re
from telemetry.core import exceptions
from telemetry.core import util
from telemetry.internal.backends import browser_backend
from telemetry.internal.backends.chrome import tab_list_backend
from telemetry.internal.backends.chrome_inspector import devtools_client_backend
from telemetry.util import wpr_modes
class MandolineBrowserBackend(browser_backend.BrowserBackend):
"""An abstract class for mandoline browser backends. Provides basic
functionality once a remote-debugger port has been established."""
# It is OK to have abstract methods. pylint: disable=W0223
def __init__(self, platform_backend, browser_options):
super(MandolineBrowserBackend, self).__init__(
platform_backend=platform_backend,
supports_extensions=False,
browser_options=browser_options,
tab_list_backend=tab_list_backend.TabListBackend)
self._port = None
self._devtools_client = None
# Some of the browser options are not supported by mandoline yet.
self._CheckUnsupportedBrowserOptions(browser_options)
@property
def devtools_client(self):
return self._devtools_client
def GetBrowserStartupArgs(self):
args = []
args.extend(self.browser_options.extra_browser_args)
return args
def HasBrowserFinishedLaunching(self):
assert self._port, 'No DevTools port info available.'
return devtools_client_backend.IsDevToolsAgentAvailable(self._port)
def _InitDevtoolsClientBackend(self, remote_devtools_port=None):
""" Initiates the devtool client backend which allows browser connection
through browser' devtool.
Args:
remote_devtools_port: The remote devtools port, if any. Otherwise assumed
to be the same as self._port.
"""
assert not self._devtools_client, (
'Devtool client backend cannot be init twice')
self._devtools_client = devtools_client_backend.DevToolsClientBackend(
self._port, remote_devtools_port or self._port, self)
def _WaitForBrowserToComeUp(self):
""" Waits for browser to come up. """
try:
timeout = self.browser_options.browser_startup_timeout
util.WaitFor(self.HasBrowserFinishedLaunching, timeout=timeout)
except (exceptions.TimeoutException, exceptions.ProcessGoneException) as e:
if not self.IsBrowserRunning():
raise exceptions.BrowserGoneException(self.browser, e)
raise exceptions.BrowserConnectionGoneException(self.browser, e)
@property
def browser_directory(self):
raise NotImplementedError()
@property
def profile_directory(self):
raise NotImplementedError()
@property
def supports_tab_control(self):
return False
@property
def supports_tracing(self):
return False
@property
def supports_system_info(self):
return False
def GetProcessName(self, cmd_line):
"""Returns a user-friendly name for the process of the given |cmd_line|."""
if not cmd_line:
return 'unknown'
m = re.search(r'\s--child-process(\s.*)?$', cmd_line)
if not m:
return 'browser'
return 'child-process'
def Close(self):
if self._devtools_client:
self._devtools_client.Close()
self._devtools_client = None
def _CheckUnsupportedBrowserOptions(self, browser_options):
def _RaiseForUnsupportedOption(name):
raise Exception('BrowserOptions.%s is ignored. Value: %r'
% (name, getattr(browser_options, name)))
if browser_options.dont_override_profile:
_RaiseForUnsupportedOption('dont_override_profile')
if browser_options.profile_dir:
_RaiseForUnsupportedOption('profile_dir')
if browser_options.profile_type and browser_options.profile_type != 'clean':
_RaiseForUnsupportedOption('profile_type')
if browser_options.extra_wpr_args:
_RaiseForUnsupportedOption('extra_wpr_args')
if browser_options.wpr_mode != wpr_modes.WPR_OFF:
_RaiseForUnsupportedOption('wpr_mode')
if browser_options.netsim:
_RaiseForUnsupportedOption('netsim')
if not browser_options.disable_background_networking:
_RaiseForUnsupportedOption('disable_background_networking')
if browser_options.no_proxy_server:
_RaiseForUnsupportedOption('no_proxy_server')
if browser_options.browser_user_agent_type:
_RaiseForUnsupportedOption('browser_user_agent_type')
if browser_options.use_devtools_active_port:
_RaiseForUnsupportedOption('use_devtools_active_port')
| apache-2.0 | 1,090,366,271,570,663,000 | 33.17037 | 80 | 0.725992 | false |
luwensu/autotest-docker | subtests/docker_cli/top/top.py | 1 | 4045 | r"""
Summary
---------
Verify output from docker top against a test-controlled container matches
content expectations.
Operational Summary
----------------------
#. start container
#. execute docker top against container
#. verify output
"""
from time import sleep
from time import time
from dockertest.subtest import SubSubtestCaller
from dockertest.subtest import SubSubtest
from dockertest.images import DockerImage
from dockertest.containers import DockerContainers
from dockertest.config import get_as_list
from dockertest.dockercmd import DockerCmd
from dockertest.output import DockerTime
class top(SubSubtestCaller):
pass
# This abstract base class is not referenced from this module
class base(SubSubtest): # pylint: disable=R0921
def init_run_dkrcmd(self):
# This should probably be non-blocking
raise NotImplementedError
def init_top_dkrcmd(self):
# This should probably be blocking
raise NotImplementedError
def get_run_name(self):
raise NotImplementedError
# TODO: Make cntnr_state part of container module?
def cntnr_state(self, name):
dc = self.sub_stuff['dc']
json = dc.json_by_name(name)[0]
state = json['State']
# Separate representation from implementation
# (really should use a named tuple)
return {'running': state['Running'],
'paused': state['Paused'],
'restarting': state['Restarting'],
'oom': state['OOMKilled'],
'exitcode': state['ExitCode'],
'error': state['Error'],
'finished': DockerTime(state['FinishedAt']),
'started': DockerTime(state['StartedAt']),
'pid': state['Pid']}
# TODO: Make is_running_cntnr part of container module?
def is_running_cntnr(self, name):
start = time()
end = start + self.config['docker_timeout']
while time() < end:
state = self.cntnr_state(name)
good = [state['running'] is True,
state['paused'] is False,
state['restarting'] is False,
state['oom'] is False,
state['exitcode'] <= 0,
state['error'] == "",
state['finished'] == DockerTime.UTC.EPOCH,
state['started'] > DockerTime.UTC.EPOCH,
state['pid'] > 0]
bad = [state['oom'] is True,
state['exitcode'] > 0,
state['error'] != "",
state['finished'] > DockerTime.UTC.EPOCH]
if all(good):
self.logdebug("Container %s confirmed running", name)
break
elif any(bad):
self.logdebug("Container %s has problems %s", name, state)
break
else:
# Don't busy-wait
sleep(0.1)
return all(good)
def initialize(self):
self.sub_stuff['dc'] = DockerContainers(self)
fqin = DockerImage.full_name_from_defaults(self.config)
self.sub_stuff['fqin'] = fqin
self.sub_stuff['run_options'] = (
get_as_list(self.config['run_options_csv']))
self.sub_stuff['run_options'] += ['--name', self.get_run_name()]
self.sub_stuff['top_options'] = (
get_as_list(self.config['top_options_csv']))
self.sub_stuff['containers'] = []
self.sub_stuff['run_dkrcmd'] = self.init_run_dkrcmd()
self.sub_stuff['top_dkrcmd'] = self.init_top_dkrcmd()
def run_once(self):
self.sub_stuff['run_dkrcmd'].execute()
self.failif(not self.is_running_cntnr(self.get_run_name()))
def postprocess(self):
raise NotImplementedError
def cleanup(self):
if self.config['remove_after_test']:
run_name = self.get_run_name()
DockerCmd(self, 'kill', [run_name]).execute()
sleep(1)
DockerCmd(self, 'rm', ['--force', run_name]).execute()
| gpl-2.0 | -3,137,200,714,642,721,300 | 33.57265 | 74 | 0.571075 | false |
jianghuaw/nova | nova/tests/unit/virt/hyperv/test_block_device_manager.py | 9 | 20392 | # Copyright (c) 2016 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import mock
from os_win import constants as os_win_const
from nova import exception
from nova.tests.unit.virt.hyperv import test_base
from nova.virt.hyperv import block_device_manager
from nova.virt.hyperv import constants
class BlockDeviceManagerTestCase(test_base.HyperVBaseTestCase):
"""Unit tests for the Hyper-V BlockDeviceInfoManager class."""
def setUp(self):
super(BlockDeviceManagerTestCase, self).setUp()
self._bdman = block_device_manager.BlockDeviceInfoManager()
def test_get_device_bus_scsi(self):
bdm = {'disk_bus': constants.CTRL_TYPE_SCSI,
'drive_addr': 0, 'ctrl_disk_addr': 2}
bus = self._bdman._get_device_bus(bdm)
self.assertEqual('0:0:0:2', bus.address)
def test_get_device_bus_ide(self):
bdm = {'disk_bus': constants.CTRL_TYPE_IDE,
'drive_addr': 0, 'ctrl_disk_addr': 1}
bus = self._bdman._get_device_bus(bdm)
self.assertEqual('0:1', bus.address)
@staticmethod
def _bdm_mock(**kwargs):
bdm = mock.MagicMock(**kwargs)
bdm.__contains__.side_effect = (
lambda attr: getattr(bdm, attr, None) is not None)
return bdm
@mock.patch.object(block_device_manager.objects, 'DiskMetadata')
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'_get_device_bus')
@mock.patch.object(block_device_manager.objects.BlockDeviceMappingList,
'get_by_instance_uuid')
def test_get_bdm_metadata(self, mock_get_by_inst_uuid, mock_get_device_bus,
mock_DiskMetadata):
mock_instance = mock.MagicMock()
root_disk = {'mount_device': mock.sentinel.dev0}
ephemeral = {'device_name': mock.sentinel.dev1}
block_device_info = {
'root_disk': root_disk,
'block_device_mapping': [
{'mount_device': mock.sentinel.dev2},
{'mount_device': mock.sentinel.dev3},
],
'ephemerals': [ephemeral],
}
bdm = self._bdm_mock(device_name=mock.sentinel.dev0, tag='taggy',
volume_id=mock.sentinel.uuid1)
eph = self._bdm_mock(device_name=mock.sentinel.dev1, tag='ephy',
volume_id=mock.sentinel.uuid2)
mock_get_by_inst_uuid.return_value = [
bdm, eph, self._bdm_mock(device_name=mock.sentinel.dev2, tag=None),
]
bdm_metadata = self._bdman.get_bdm_metadata(mock.sentinel.context,
mock_instance,
block_device_info)
mock_get_by_inst_uuid.assert_called_once_with(mock.sentinel.context,
mock_instance.uuid)
mock_get_device_bus.assert_has_calls(
[mock.call(root_disk), mock.call(ephemeral)], any_order=True)
mock_DiskMetadata.assert_has_calls(
[mock.call(bus=mock_get_device_bus.return_value,
serial=bdm.volume_id, tags=[bdm.tag]),
mock.call(bus=mock_get_device_bus.return_value,
serial=eph.volume_id, tags=[eph.tag])],
any_order=True)
self.assertEqual([mock_DiskMetadata.return_value] * 2, bdm_metadata)
@mock.patch('nova.virt.configdrive.required_by')
def test_init_controller_slot_counter_gen1_no_configdrive(
self, mock_cfg_drive_req):
mock_cfg_drive_req.return_value = False
slot_map = self._bdman._initialize_controller_slot_counter(
mock.sentinel.FAKE_INSTANCE, constants.VM_GEN_1)
self.assertEqual(slot_map[constants.CTRL_TYPE_IDE][0],
os_win_const.IDE_CONTROLLER_SLOTS_NUMBER)
self.assertEqual(slot_map[constants.CTRL_TYPE_IDE][1],
os_win_const.IDE_CONTROLLER_SLOTS_NUMBER)
self.assertEqual(slot_map[constants.CTRL_TYPE_SCSI][0],
os_win_const.SCSI_CONTROLLER_SLOTS_NUMBER)
@mock.patch('nova.virt.configdrive.required_by')
def test_init_controller_slot_counter_gen1(self, mock_cfg_drive_req):
slot_map = self._bdman._initialize_controller_slot_counter(
mock.sentinel.FAKE_INSTANCE, constants.VM_GEN_1)
self.assertEqual(slot_map[constants.CTRL_TYPE_IDE][1],
os_win_const.IDE_CONTROLLER_SLOTS_NUMBER - 1)
@mock.patch.object(block_device_manager.configdrive, 'required_by')
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'_initialize_controller_slot_counter')
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'_check_and_update_root_device')
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'_check_and_update_ephemerals')
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'_check_and_update_volumes')
def _check_validate_and_update_bdi(self, mock_check_and_update_vol,
mock_check_and_update_eph,
mock_check_and_update_root,
mock_init_ctrl_cntr,
mock_required_by, available_slots=1):
mock_required_by.return_value = True
slot_map = {constants.CTRL_TYPE_SCSI: [available_slots]}
mock_init_ctrl_cntr.return_value = slot_map
if available_slots:
self._bdman.validate_and_update_bdi(mock.sentinel.FAKE_INSTANCE,
mock.sentinel.IMAGE_META,
constants.VM_GEN_2,
mock.sentinel.BLOCK_DEV_INFO)
else:
self.assertRaises(exception.InvalidBDMFormat,
self._bdman.validate_and_update_bdi,
mock.sentinel.FAKE_INSTANCE,
mock.sentinel.IMAGE_META,
constants.VM_GEN_2,
mock.sentinel.BLOCK_DEV_INFO)
mock_init_ctrl_cntr.assert_called_once_with(
mock.sentinel.FAKE_INSTANCE, constants.VM_GEN_2)
mock_check_and_update_root.assert_called_once_with(
constants.VM_GEN_2, mock.sentinel.IMAGE_META,
mock.sentinel.BLOCK_DEV_INFO, slot_map)
mock_check_and_update_eph.assert_called_once_with(
constants.VM_GEN_2, mock.sentinel.BLOCK_DEV_INFO, slot_map)
mock_check_and_update_vol.assert_called_once_with(
constants.VM_GEN_2, mock.sentinel.BLOCK_DEV_INFO, slot_map)
mock_required_by.assert_called_once_with(mock.sentinel.FAKE_INSTANCE)
def test_validate_and_update_bdi(self):
self._check_validate_and_update_bdi()
def test_validate_and_update_bdi_insufficient_slots(self):
self._check_validate_and_update_bdi(available_slots=0)
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'_get_available_controller_slot')
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'is_boot_from_volume')
def _test_check_and_update_root_device(self, mock_is_boot_from_vol,
mock_get_avail_ctrl_slot,
disk_format,
vm_gen=constants.VM_GEN_1,
boot_from_volume=False):
image_meta = mock.MagicMock(disk_format=disk_format)
bdi = {'root_device': '/dev/sda',
'block_device_mapping': [
{'mount_device': '/dev/sda',
'connection_info': mock.sentinel.FAKE_CONN_INFO}]}
mock_is_boot_from_vol.return_value = boot_from_volume
mock_get_avail_ctrl_slot.return_value = (0, 0)
self._bdman._check_and_update_root_device(vm_gen, image_meta, bdi,
mock.sentinel.SLOT_MAP)
root_disk = bdi['root_disk']
if boot_from_volume:
self.assertEqual(root_disk['type'], constants.VOLUME)
self.assertIsNone(root_disk['path'])
self.assertEqual(root_disk['connection_info'],
mock.sentinel.FAKE_CONN_INFO)
else:
image_type = self._bdman._TYPE_FOR_DISK_FORMAT.get(
image_meta.disk_format)
self.assertEqual(root_disk['type'], image_type)
self.assertIsNone(root_disk['path'])
self.assertIsNone(root_disk['connection_info'])
disk_bus = (constants.CTRL_TYPE_IDE if
vm_gen == constants.VM_GEN_1 else constants.CTRL_TYPE_SCSI)
self.assertEqual(root_disk['disk_bus'], disk_bus)
self.assertEqual(root_disk['drive_addr'], 0)
self.assertEqual(root_disk['ctrl_disk_addr'], 0)
self.assertEqual(root_disk['boot_index'], 0)
self.assertEqual(root_disk['mount_device'], bdi['root_device'])
mock_get_avail_ctrl_slot.assert_called_once_with(
root_disk['disk_bus'], mock.sentinel.SLOT_MAP)
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'is_boot_from_volume', return_value=False)
def test_check_and_update_root_device_exception(self, mock_is_boot_vol):
bdi = {}
image_meta = mock.MagicMock(disk_format=mock.sentinel.fake_format)
self.assertRaises(exception.InvalidImageFormat,
self._bdman._check_and_update_root_device,
constants.VM_GEN_1, image_meta, bdi,
mock.sentinel.SLOT_MAP)
def test_check_and_update_root_device_gen1(self):
self._test_check_and_update_root_device(disk_format='vhd')
def test_check_and_update_root_device_gen1_vhdx(self):
self._test_check_and_update_root_device(disk_format='vhdx')
def test_check_and_update_root_device_gen1_iso(self):
self._test_check_and_update_root_device(disk_format='iso')
def test_check_and_update_root_device_gen2(self):
self._test_check_and_update_root_device(disk_format='vhd',
vm_gen=constants.VM_GEN_2)
def test_check_and_update_root_device_boot_from_vol_gen1(self):
self._test_check_and_update_root_device(disk_format='vhd',
boot_from_volume=True)
def test_check_and_update_root_device_boot_from_vol_gen2(self):
self._test_check_and_update_root_device(disk_format='vhd',
vm_gen=constants.VM_GEN_2,
boot_from_volume=True)
@mock.patch('nova.virt.configdrive.required_by', return_value=True)
def _test_get_available_controller_slot(self, mock_config_drive_req,
bus=constants.CTRL_TYPE_IDE,
fail=False):
slot_map = self._bdman._initialize_controller_slot_counter(
mock.sentinel.FAKE_VM, constants.VM_GEN_1)
if fail:
slot_map[constants.CTRL_TYPE_IDE][0] = 0
slot_map[constants.CTRL_TYPE_IDE][1] = 0
self.assertRaises(exception.InvalidBDMFormat,
self._bdman._get_available_controller_slot,
constants.CTRL_TYPE_IDE,
slot_map)
else:
(disk_addr,
ctrl_disk_addr) = self._bdman._get_available_controller_slot(
bus, slot_map)
self.assertEqual(0, disk_addr)
self.assertEqual(0, ctrl_disk_addr)
def test_get_available_controller_slot(self):
self._test_get_available_controller_slot()
def test_get_available_controller_slot_scsi_ctrl(self):
self._test_get_available_controller_slot(bus=constants.CTRL_TYPE_SCSI)
def test_get_available_controller_slot_exception(self):
self._test_get_available_controller_slot(fail=True)
def test_is_boot_from_volume_true(self):
vol = {'mount_device': self._bdman._DEFAULT_ROOT_DEVICE}
block_device_info = {'block_device_mapping': [vol]}
ret = self._bdman.is_boot_from_volume(block_device_info)
self.assertTrue(ret)
def test_is_boot_from_volume_false(self):
block_device_info = {'block_device_mapping': []}
ret = self._bdman.is_boot_from_volume(block_device_info)
self.assertFalse(ret)
def test_get_root_device_bdm(self):
mount_device = '/dev/sda'
bdm1 = {'mount_device': None}
bdm2 = {'mount_device': mount_device}
bdi = {'block_device_mapping': [bdm1, bdm2]}
ret = self._bdman._get_root_device_bdm(bdi, mount_device)
self.assertEqual(bdm2, ret)
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'_check_and_update_bdm')
def test_check_and_update_ephemerals(self, mock_check_and_update_bdm):
fake_ephemerals = [mock.sentinel.eph1, mock.sentinel.eph2,
mock.sentinel.eph3]
fake_bdi = {'ephemerals': fake_ephemerals}
expected_calls = []
for eph in fake_ephemerals:
expected_calls.append(mock.call(mock.sentinel.fake_slot_map,
mock.sentinel.fake_vm_gen,
eph))
self._bdman._check_and_update_ephemerals(mock.sentinel.fake_vm_gen,
fake_bdi,
mock.sentinel.fake_slot_map)
mock_check_and_update_bdm.assert_has_calls(expected_calls)
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'_check_and_update_bdm')
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'_get_root_device_bdm')
def test_check_and_update_volumes(self, mock_get_root_dev_bdm,
mock_check_and_update_bdm):
fake_vol1 = {'mount_device': '/dev/sda'}
fake_vol2 = {'mount_device': '/dev/sdb'}
fake_volumes = [fake_vol1, fake_vol2]
fake_bdi = {'block_device_mapping': fake_volumes,
'root_disk': {'mount_device': '/dev/sda'}}
mock_get_root_dev_bdm.return_value = fake_vol1
self._bdman._check_and_update_volumes(mock.sentinel.fake_vm_gen,
fake_bdi,
mock.sentinel.fake_slot_map)
mock_get_root_dev_bdm.assert_called_once_with(fake_bdi, '/dev/sda')
mock_check_and_update_bdm.assert_called_once_with(
mock.sentinel.fake_slot_map, mock.sentinel.fake_vm_gen, fake_vol2)
self.assertNotIn(fake_vol1, fake_bdi)
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'_get_available_controller_slot')
def test_check_and_update_bdm_with_defaults(self, mock_get_ctrl_slot):
mock_get_ctrl_slot.return_value = ((mock.sentinel.DRIVE_ADDR,
mock.sentinel.CTRL_DISK_ADDR))
bdm = {'device_type': None,
'disk_bus': None,
'boot_index': None}
self._bdman._check_and_update_bdm(mock.sentinel.FAKE_SLOT_MAP,
constants.VM_GEN_1, bdm)
mock_get_ctrl_slot.assert_called_once_with(
bdm['disk_bus'], mock.sentinel.FAKE_SLOT_MAP)
self.assertEqual(mock.sentinel.DRIVE_ADDR, bdm['drive_addr'])
self.assertEqual(mock.sentinel.CTRL_DISK_ADDR, bdm['ctrl_disk_addr'])
self.assertEqual('disk', bdm['device_type'])
self.assertEqual(self._bdman._DEFAULT_BUS, bdm['disk_bus'])
self.assertIsNone(bdm['boot_index'])
def test_check_and_update_bdm_exception_device_type(self):
bdm = {'device_type': 'cdrom',
'disk_bus': 'IDE'}
self.assertRaises(exception.InvalidDiskInfo,
self._bdman._check_and_update_bdm,
mock.sentinel.FAKE_SLOT_MAP, constants.VM_GEN_1, bdm)
def test_check_and_update_bdm_exception_disk_bus(self):
bdm = {'device_type': 'disk',
'disk_bus': 'fake_bus'}
self.assertRaises(exception.InvalidDiskInfo,
self._bdman._check_and_update_bdm,
mock.sentinel.FAKE_SLOT_MAP, constants.VM_GEN_1, bdm)
def test_sort_by_boot_order(self):
original = [{'boot_index': 2}, {'boot_index': None}, {'boot_index': 1}]
expected = [original[2], original[0], original[1]]
self._bdman._sort_by_boot_order(original)
self.assertEqual(expected, original)
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'_get_boot_order_gen1')
def test_get_boot_order_gen1_vm(self, mock_get_boot_order):
self._bdman.get_boot_order(constants.VM_GEN_1,
mock.sentinel.BLOCK_DEV_INFO)
mock_get_boot_order.assert_called_once_with(
mock.sentinel.BLOCK_DEV_INFO)
@mock.patch.object(block_device_manager.BlockDeviceInfoManager,
'_get_boot_order_gen2')
def test_get_boot_order_gen2_vm(self, mock_get_boot_order):
self._bdman.get_boot_order(constants.VM_GEN_2,
mock.sentinel.BLOCK_DEV_INFO)
mock_get_boot_order.assert_called_once_with(
mock.sentinel.BLOCK_DEV_INFO)
def test_get_boot_order_gen1_iso(self):
fake_bdi = {'root_disk': {'type': 'iso'}}
expected = [os_win_const.BOOT_DEVICE_CDROM,
os_win_const.BOOT_DEVICE_HARDDISK,
os_win_const.BOOT_DEVICE_NETWORK,
os_win_const.BOOT_DEVICE_FLOPPY]
res = self._bdman._get_boot_order_gen1(fake_bdi)
self.assertEqual(expected, res)
def test_get_boot_order_gen1_vhd(self):
fake_bdi = {'root_disk': {'type': 'vhd'}}
expected = [os_win_const.BOOT_DEVICE_HARDDISK,
os_win_const.BOOT_DEVICE_CDROM,
os_win_const.BOOT_DEVICE_NETWORK,
os_win_const.BOOT_DEVICE_FLOPPY]
res = self._bdman._get_boot_order_gen1(fake_bdi)
self.assertEqual(expected, res)
@mock.patch('nova.virt.hyperv.volumeops.VolumeOps.get_disk_resource_path')
def test_get_boot_order_gen2(self, mock_get_disk_path):
fake_root_disk = {'boot_index': 0,
'path': mock.sentinel.FAKE_ROOT_PATH}
fake_eph1 = {'boot_index': 2,
'path': mock.sentinel.FAKE_EPH_PATH1}
fake_eph2 = {'boot_index': 3,
'path': mock.sentinel.FAKE_EPH_PATH2}
fake_bdm = {'boot_index': 1,
'connection_info': mock.sentinel.FAKE_CONN_INFO}
fake_bdi = {'root_disk': fake_root_disk,
'ephemerals': [fake_eph1,
fake_eph2],
'block_device_mapping': [fake_bdm]}
mock_get_disk_path.return_value = fake_bdm['connection_info']
expected_res = [mock.sentinel.FAKE_ROOT_PATH,
mock.sentinel.FAKE_CONN_INFO,
mock.sentinel.FAKE_EPH_PATH1,
mock.sentinel.FAKE_EPH_PATH2]
res = self._bdman._get_boot_order_gen2(fake_bdi)
self.assertEqual(expected_res, res)
| apache-2.0 | 8,594,211,752,975,120,000 | 45.557078 | 79 | 0.574539 | false |
JackDanger/luigi | luigi/contrib/hadoop.py | 4 | 34538 | # -*- coding: utf-8 -*-
#
# Copyright 2012-2015 Spotify AB
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
Run Hadoop Mapreduce jobs using Hadoop Streaming. To run a job, you need
to subclass :py:class:`luigi.contrib.hadoop.JobTask` and implement a
``mapper`` and ``reducer`` methods. See :doc:`/example_top_artists` for
an example of how to run a Hadoop job.
"""
from __future__ import print_function
import abc
import binascii
import datetime
import glob
import logging
import os
import pickle
import random
import re
import shutil
import signal
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import subprocess
import sys
import tempfile
import warnings
from hashlib import md5
from itertools import groupby
import cached_property
from luigi import six
from luigi import configuration
import luigi
import luigi.task
import luigi.contrib.hdfs
import luigi.s3
from luigi import mrrunner
if six.PY2:
from itertools import imap as map
try:
# See benchmark at https://gist.github.com/mvj3/02dca2bcc8b0ef1bbfb5
import ujson as json
except ImportError:
import json
logger = logging.getLogger('luigi-interface')
_attached_packages = [cached_property]
class hadoop(luigi.task.Config):
pool = luigi.Parameter(default=None,
description='Hadoop pool so use for Hadoop tasks. '
'To specify pools per tasks, see '
'BaseHadoopJobTask.pool')
def attach(*packages):
"""
Attach a python package to hadoop map reduce tarballs to make those packages available
on the hadoop cluster.
"""
_attached_packages.extend(packages)
def dereference(f):
if os.path.islink(f):
# by joining with the dirname we are certain to get the absolute path
return dereference(os.path.join(os.path.dirname(f), os.readlink(f)))
else:
return f
def get_extra_files(extra_files):
result = []
for f in extra_files:
if isinstance(f, str):
src, dst = f, os.path.basename(f)
elif isinstance(f, tuple):
src, dst = f
else:
raise Exception()
if os.path.isdir(src):
src_prefix = os.path.join(src, '')
for base, dirs, files in os.walk(src):
for f in files:
f_src = os.path.join(base, f)
f_src_stripped = f_src[len(src_prefix):]
f_dst = os.path.join(dst, f_src_stripped)
result.append((f_src, f_dst))
else:
result.append((src, dst))
return result
def create_packages_archive(packages, filename):
"""
Create a tar archive which will contain the files for the packages listed in packages.
"""
import tarfile
tar = tarfile.open(filename, "w")
def add(src, dst):
logger.debug('adding to tar: %s -> %s', src, dst)
tar.add(src, dst)
def add_files_for_package(sub_package_path, root_package_path, root_package_name):
for root, dirs, files in os.walk(sub_package_path):
if '.svn' in dirs:
dirs.remove('.svn')
for f in files:
if not f.endswith(".pyc") and not f.startswith("."):
add(dereference(root + "/" + f), root.replace(root_package_path, root_package_name) + "/" + f)
for package in packages:
# Put a submodule's entire package in the archive. This is the
# magic that usually packages everything you need without
# having to attach packages/modules explicitly
if not getattr(package, "__path__", None) and '.' in package.__name__:
package = __import__(package.__name__.rpartition('.')[0], None, None, 'non_empty')
n = package.__name__.replace(".", "/")
if getattr(package, "__path__", None):
# TODO: (BUG) picking only the first path does not
# properly deal with namespaced packages in different
# directories
p = package.__path__[0]
if p.endswith('.egg') and os.path.isfile(p):
raise 'egg files not supported!!!'
# Add the entire egg file
# p = p[:p.find('.egg') + 4]
# add(dereference(p), os.path.basename(p))
else:
# include __init__ files from parent projects
root = []
for parent in package.__name__.split('.')[0:-1]:
root.append(parent)
module_name = '.'.join(root)
directory = '/'.join(root)
add(dereference(__import__(module_name, None, None, 'non_empty').__path__[0] + "/__init__.py"),
directory + "/__init__.py")
add_files_for_package(p, p, n)
# include egg-info directories that are parallel:
for egg_info_path in glob.glob(p + '*.egg-info'):
logger.debug(
'Adding package metadata to archive for "%s" found at "%s"',
package.__name__,
egg_info_path
)
add_files_for_package(egg_info_path, p, n)
else:
f = package.__file__
if f.endswith("pyc"):
f = f[:-3] + "py"
if n.find(".") == -1:
add(dereference(f), os.path.basename(f))
else:
add(dereference(f), n + ".py")
tar.close()
def flatten(sequence):
"""
A simple generator which flattens a sequence.
Only one level is flattened.
.. code-block:: python
(1, (2, 3), 4) -> (1, 2, 3, 4)
"""
for item in sequence:
if hasattr(item, "__iter__") and not isinstance(item, str) and not isinstance(item, bytes):
for i in item:
yield i
else:
yield item
class HadoopRunContext(object):
def __init__(self):
self.job_id = None
def __enter__(self):
self.__old_signal = signal.getsignal(signal.SIGTERM)
signal.signal(signal.SIGTERM, self.kill_job)
return self
def kill_job(self, captured_signal=None, stack_frame=None):
if self.job_id:
logger.info('Job interrupted, killing job %s', self.job_id)
subprocess.call(['mapred', 'job', '-kill', self.job_id])
if captured_signal is not None:
# adding 128 gives the exit code corresponding to a signal
sys.exit(128 + captured_signal)
def __exit__(self, exc_type, exc_val, exc_tb):
if exc_type is KeyboardInterrupt:
self.kill_job()
signal.signal(signal.SIGTERM, self.__old_signal)
class HadoopJobError(RuntimeError):
def __init__(self, message, out=None, err=None):
super(HadoopJobError, self).__init__(message, out, err)
self.message = message
self.out = out
self.err = err
def run_and_track_hadoop_job(arglist, tracking_url_callback=None, env=None):
"""
Runs the job by invoking the command from the given arglist.
Finds tracking urls from the output and attempts to fetch errors using those urls if the job fails.
Throws HadoopJobError with information about the error
(including stdout and stderr from the process)
on failure and returns normally otherwise.
:param arglist:
:param tracking_url_callback:
:param env:
:return:
"""
logger.info('%s', ' '.join(arglist))
def write_luigi_history(arglist, history):
"""
Writes history to a file in the job's output directory in JSON format.
Currently just for tracking the job ID in a configuration where
no history is stored in the output directory by Hadoop.
"""
history_filename = configuration.get_config().get('core', 'history-filename', '')
if history_filename and '-output' in arglist:
output_dir = arglist[arglist.index('-output') + 1]
f = luigi.contrib.hdfs.HdfsTarget(os.path.join(output_dir, history_filename)).open('w')
f.write(json.dumps(history))
f.close()
def track_process(arglist, tracking_url_callback, env=None):
# Dump stdout to a temp file, poll stderr and log it
temp_stdout = tempfile.TemporaryFile('w+t')
proc = subprocess.Popen(arglist, stdout=temp_stdout, stderr=subprocess.PIPE, env=env, close_fds=True, universal_newlines=True)
# We parse the output to try to find the tracking URL.
# This URL is useful for fetching the logs of the job.
tracking_url = None
job_id = None
err_lines = []
with HadoopRunContext() as hadoop_context:
while proc.poll() is None:
err_line = proc.stderr.readline()
err_lines.append(err_line)
err_line = err_line.strip()
if err_line:
logger.info('%s', err_line)
err_line = err_line.lower()
if err_line.find('tracking url') != -1:
tracking_url = err_line.split('tracking url: ')[-1]
try:
tracking_url_callback(tracking_url)
except Exception as e:
logger.error("Error in tracking_url_callback, disabling! %s", e)
tracking_url_callback = lambda x: None
if err_line.find('running job') != -1:
# hadoop jar output
job_id = err_line.split('running job: ')[-1]
if err_line.find('submitted hadoop job:') != -1:
# scalding output
job_id = err_line.split('submitted hadoop job: ')[-1]
hadoop_context.job_id = job_id
# Read the rest + stdout
err = ''.join(err_lines + [err_line for err_line in proc.stderr])
temp_stdout.seek(0)
out = ''.join(temp_stdout.readlines())
if proc.returncode == 0:
write_luigi_history(arglist, {'job_id': job_id})
return (out, err)
# Try to fetch error logs if possible
message = 'Streaming job failed with exit code %d. ' % proc.returncode
if not tracking_url:
raise HadoopJobError(message + 'Also, no tracking url found.', out, err)
try:
task_failures = fetch_task_failures(tracking_url)
except Exception as e:
raise HadoopJobError(message + 'Additionally, an error occurred when fetching data from %s: %s' %
(tracking_url, e), out, err)
if not task_failures:
raise HadoopJobError(message + 'Also, could not fetch output from tasks.', out, err)
else:
raise HadoopJobError(message + 'Output from tasks below:\n%s' % task_failures, out, err)
if tracking_url_callback is None:
tracking_url_callback = lambda x: None
return track_process(arglist, tracking_url_callback, env)
def fetch_task_failures(tracking_url):
"""
Uses mechanize to fetch the actual task logs from the task tracker.
This is highly opportunistic, and we might not succeed.
So we set a low timeout and hope it works.
If it does not, it's not the end of the world.
TODO: Yarn has a REST API that we should probably use instead:
http://hadoop.apache.org/docs/current/hadoop-yarn/hadoop-yarn-site/WebServicesIntro.html
"""
import mechanize
timeout = 3.0
failures_url = tracking_url.replace('jobdetails.jsp', 'jobfailures.jsp') + '&cause=failed'
logger.debug('Fetching data from %s', failures_url)
b = mechanize.Browser()
b.open(failures_url, timeout=timeout)
links = list(b.links(text_regex='Last 4KB')) # For some reason text_regex='All' doesn't work... no idea why
links = random.sample(links, min(10, len(links))) # Fetch a random subset of all failed tasks, so not to be biased towards the early fails
error_text = []
for link in links:
task_url = link.url.replace('&start=-4097', '&start=-100000') # Increase the offset
logger.debug('Fetching data from %s', task_url)
b2 = mechanize.Browser()
try:
r = b2.open(task_url, timeout=timeout)
data = r.read()
except Exception as e:
logger.debug('Error fetching data from %s: %s', task_url, e)
continue
# Try to get the hex-encoded traceback back from the output
for exc in re.findall(r'luigi-exc-hex=[0-9a-f]+', data):
error_text.append('---------- %s:' % task_url)
error_text.append(exc.split('=')[-1].decode('hex'))
return '\n'.join(error_text)
class JobRunner(object):
run_job = NotImplemented
class HadoopJobRunner(JobRunner):
"""
Takes care of uploading & executing a Hadoop job using Hadoop streaming.
TODO: add code to support Elastic Mapreduce (using boto) and local execution.
"""
def __init__(self, streaming_jar, modules=None, streaming_args=None,
libjars=None, libjars_in_hdfs=None, jobconfs=None,
input_format=None, output_format=None,
end_job_with_atomic_move_dir=True):
def get(x, default):
return x is not None and x or default
self.streaming_jar = streaming_jar
self.modules = get(modules, [])
self.streaming_args = get(streaming_args, [])
self.libjars = get(libjars, [])
self.libjars_in_hdfs = get(libjars_in_hdfs, [])
self.jobconfs = get(jobconfs, {})
self.input_format = input_format
self.output_format = output_format
self.end_job_with_atomic_move_dir = end_job_with_atomic_move_dir
self.tmp_dir = False
def run_job(self, job):
packages = [luigi] + self.modules + job.extra_modules() + list(_attached_packages)
# find the module containing the job
packages.append(__import__(job.__module__, None, None, 'dummy'))
# find the path to out runner.py
runner_path = mrrunner.__file__
# assume source is next to compiled
if runner_path.endswith("pyc"):
runner_path = runner_path[:-3] + "py"
base_tmp_dir = configuration.get_config().get('core', 'tmp-dir', None)
if base_tmp_dir:
warnings.warn("The core.tmp-dir configuration item is"
" deprecated, please use the TMPDIR"
" environment variable if you wish"
" to control where luigi.contrib.hadoop may"
" create temporary files and directories.")
self.tmp_dir = os.path.join(base_tmp_dir, 'hadoop_job_%016x' % random.getrandbits(64))
os.makedirs(self.tmp_dir)
else:
self.tmp_dir = tempfile.mkdtemp()
logger.debug("Tmp dir: %s", self.tmp_dir)
# build arguments
config = configuration.get_config()
python_executable = config.get('hadoop', 'python-executable', 'python')
map_cmd = '{0} mrrunner.py map'.format(python_executable)
cmb_cmd = '{0} mrrunner.py combiner'.format(python_executable)
red_cmd = '{0} mrrunner.py reduce'.format(python_executable)
output_final = job.output().path
# atomic output: replace output with a temporary work directory
if self.end_job_with_atomic_move_dir:
if isinstance(job.output(), luigi.s3.S3FlagTarget):
raise TypeError("end_job_with_atomic_move_dir is not supported"
" for S3FlagTarget")
output_hadoop = '{output}-temp-{time}'.format(
output=output_final,
time=datetime.datetime.now().isoformat().replace(':', '-'))
else:
output_hadoop = output_final
arglist = luigi.contrib.hdfs.load_hadoop_cmd() + ['jar', self.streaming_jar]
# 'libjars' is a generic option, so place it first
libjars = [libjar for libjar in self.libjars]
for libjar in self.libjars_in_hdfs:
run_cmd = luigi.contrib.hdfs.load_hadoop_cmd() + ['fs', '-get', libjar, self.tmp_dir]
logger.debug(' '.join(run_cmd))
subprocess.call(run_cmd)
libjars.append(os.path.join(self.tmp_dir, os.path.basename(libjar)))
if libjars:
arglist += ['-libjars', ','.join(libjars)]
# Add static files and directories
extra_files = get_extra_files(job.extra_files())
files = []
for src, dst in extra_files:
dst_tmp = '%s_%09d' % (dst.replace('/', '_'), random.randint(0, 999999999))
files += ['%s#%s' % (src, dst_tmp)]
# -files doesn't support subdirectories, so we need to create the dst_tmp -> dst manually
job.add_link(dst_tmp, dst)
if files:
arglist += ['-files', ','.join(files)]
jobconfs = job.jobconfs()
for k, v in six.iteritems(self.jobconfs):
jobconfs.append('%s=%s' % (k, v))
for conf in jobconfs:
arglist += ['-D', conf]
arglist += self.streaming_args
arglist += ['-mapper', map_cmd]
if job.combiner != NotImplemented:
arglist += ['-combiner', cmb_cmd]
if job.reducer != NotImplemented:
arglist += ['-reducer', red_cmd]
files = [runner_path, self.tmp_dir + '/packages.tar', self.tmp_dir + '/job-instance.pickle']
for f in files:
arglist += ['-file', f]
if self.output_format:
arglist += ['-outputformat', self.output_format]
if self.input_format:
arglist += ['-inputformat', self.input_format]
for target in luigi.task.flatten(job.input_hadoop()):
if not isinstance(target, luigi.contrib.hdfs.HdfsTarget) \
and not isinstance(target, luigi.s3.S3Target):
raise TypeError('target must be an HdfsTarget or S3Target')
arglist += ['-input', target.path]
if not isinstance(job.output(), luigi.contrib.hdfs.HdfsTarget) \
and not isinstance(job.output(), luigi.s3.S3FlagTarget):
raise TypeError('output must be an HdfsTarget or S3FlagTarget')
arglist += ['-output', output_hadoop]
# submit job
create_packages_archive(packages, self.tmp_dir + '/packages.tar')
job.dump(self.tmp_dir)
run_and_track_hadoop_job(arglist)
if self.end_job_with_atomic_move_dir:
luigi.contrib.hdfs.HdfsTarget(output_hadoop).move_dir(output_final)
self.finish()
def finish(self):
# FIXME: check for isdir?
if self.tmp_dir and os.path.exists(self.tmp_dir):
logger.debug('Removing directory %s', self.tmp_dir)
shutil.rmtree(self.tmp_dir)
def __del__(self):
self.finish()
class DefaultHadoopJobRunner(HadoopJobRunner):
"""
The default job runner just reads from config and sets stuff.
"""
def __init__(self):
config = configuration.get_config()
streaming_jar = config.get('hadoop', 'streaming-jar')
super(DefaultHadoopJobRunner, self).__init__(streaming_jar=streaming_jar)
# TODO: add more configurable options
class LocalJobRunner(JobRunner):
"""
Will run the job locally.
This is useful for debugging and also unit testing. Tries to mimic Hadoop Streaming.
TODO: integrate with JobTask
"""
def __init__(self, samplelines=None):
self.samplelines = samplelines
def sample(self, input_stream, n, output):
for i, line in enumerate(input_stream):
if n is not None and i >= n:
break
output.write(line)
def group(self, input_stream):
output = StringIO()
lines = []
for i, line in enumerate(input_stream):
parts = line.rstrip('\n').split('\t')
blob = md5(str(i).encode('ascii')).hexdigest() # pseudo-random blob to make sure the input isn't sorted
lines.append((parts[:-1], blob, line))
for _, _, line in sorted(lines):
output.write(line)
output.seek(0)
return output
def run_job(self, job):
map_input = StringIO()
for i in luigi.task.flatten(job.input_hadoop()):
self.sample(i.open('r'), self.samplelines, map_input)
map_input.seek(0)
if job.reducer == NotImplemented:
# Map only job; no combiner, no reducer
map_output = job.output().open('w')
job.run_mapper(map_input, map_output)
map_output.close()
return
job.init_mapper()
# run job now...
map_output = StringIO()
job.run_mapper(map_input, map_output)
map_output.seek(0)
if job.combiner == NotImplemented:
reduce_input = self.group(map_output)
else:
combine_input = self.group(map_output)
combine_output = StringIO()
job.run_combiner(combine_input, combine_output)
combine_output.seek(0)
reduce_input = self.group(combine_output)
job.init_reducer()
reduce_output = job.output().open('w')
job.run_reducer(reduce_input, reduce_output)
reduce_output.close()
class BaseHadoopJobTask(luigi.Task):
pool = luigi.Parameter(default=None, significant=False, positional=False)
# This value can be set to change the default batching increment. Default is 1 for backwards compatibility.
batch_counter_default = 1
final_mapper = NotImplemented
final_combiner = NotImplemented
final_reducer = NotImplemented
mr_priority = NotImplemented
_counter_dict = {}
task_id = None
def _get_pool(self):
""" Protected method """
if self.pool:
return self.pool
if hadoop().pool:
return hadoop().pool
@abc.abstractmethod
def job_runner(self):
pass
def jobconfs(self):
jcs = []
jcs.append('mapred.job.name="%s"' % self.task_id)
if self.mr_priority != NotImplemented:
jcs.append('mapred.job.priority=%s' % self.mr_priority())
pool = self._get_pool()
if pool is not None:
# Supporting two schedulers: fair (default) and capacity using the same option
scheduler_type = configuration.get_config().get('hadoop', 'scheduler', 'fair')
if scheduler_type == 'fair':
jcs.append('mapred.fairscheduler.pool=%s' % pool)
elif scheduler_type == 'capacity':
jcs.append('mapred.job.queue.name=%s' % pool)
return jcs
def init_local(self):
"""
Implement any work to setup any internal datastructure etc here.
You can add extra input using the requires_local/input_local methods.
Anything you set on the object will be pickled and available on the Hadoop nodes.
"""
pass
def init_hadoop(self):
pass
def run(self):
self.init_local()
self.job_runner().run_job(self)
def requires_local(self):
"""
Default impl - override this method if you need any local input to be accessible in init().
"""
return []
def requires_hadoop(self):
return self.requires() # default impl
def input_local(self):
return luigi.task.getpaths(self.requires_local())
def input_hadoop(self):
return luigi.task.getpaths(self.requires_hadoop())
def deps(self):
# Overrides the default implementation
return luigi.task.flatten(self.requires_hadoop()) + luigi.task.flatten(self.requires_local())
def on_failure(self, exception):
if isinstance(exception, HadoopJobError):
return """Hadoop job failed with message: {message}
stdout:
{stdout}
stderr:
{stderr}
""".format(message=exception.message, stdout=exception.out, stderr=exception.err)
else:
return super(BaseHadoopJobTask, self).on_failure(exception)
DataInterchange = {
"python": {"serialize": str,
"internal_serialize": repr,
"deserialize": eval},
"json": {"serialize": json.dumps,
"internal_serialize": json.dumps,
"deserialize": json.loads}
}
class JobTask(BaseHadoopJobTask):
n_reduce_tasks = 25
reducer = NotImplemented
# available formats are "python" and "json".
data_interchange_format = "python"
def jobconfs(self):
jcs = super(JobTask, self).jobconfs()
if self.reducer == NotImplemented:
jcs.append('mapred.reduce.tasks=0')
else:
jcs.append('mapred.reduce.tasks=%s' % self.n_reduce_tasks)
return jcs
@cached_property.cached_property
def serialize(self):
return DataInterchange[self.data_interchange_format]['serialize']
@cached_property.cached_property
def internal_serialize(self):
return DataInterchange[self.data_interchange_format]['internal_serialize']
@cached_property.cached_property
def deserialize(self):
return DataInterchange[self.data_interchange_format]['deserialize']
def init_mapper(self):
pass
def init_combiner(self):
pass
def init_reducer(self):
pass
def _setup_remote(self):
self._setup_links()
def job_runner(self):
# We recommend that you define a subclass, override this method and set up your own config
"""
Get the MapReduce runner for this job.
If all outputs are HdfsTargets, the DefaultHadoopJobRunner will be used.
Otherwise, the LocalJobRunner which streams all data through the local machine
will be used (great for testing).
"""
outputs = luigi.task.flatten(self.output())
for output in outputs:
if not isinstance(output, luigi.contrib.hdfs.HdfsTarget):
warnings.warn("Job is using one or more non-HdfsTarget outputs" +
" so it will be run in local mode")
return LocalJobRunner()
else:
return DefaultHadoopJobRunner()
def reader(self, input_stream):
"""
Reader is a method which iterates over input lines and outputs records.
The default implementation yields one argument containing the line for each line in the input."""
for line in input_stream:
yield line,
def writer(self, outputs, stdout, stderr=sys.stderr):
"""
Writer format is a method which iterates over the output records
from the reducer and formats them for output.
The default implementation outputs tab separated items.
"""
for output in outputs:
try:
output = flatten(output)
if self.data_interchange_format == "json":
# Only dump one json string, and skip another one, maybe key or value.
output = filter(lambda x: x, output)
else:
# JSON is already serialized, so we put `self.serialize` in a else statement.
output = map(self.serialize, output)
print("\t".join(output), file=stdout)
except:
print(output, file=stderr)
raise
def mapper(self, item):
"""
Re-define to process an input item (usually a line of input data).
Defaults to identity mapper that sends all lines to the same reducer.
"""
yield None, item
combiner = NotImplemented
def incr_counter(self, *args, **kwargs):
"""
Increments a Hadoop counter.
Since counters can be a bit slow to update, this batches the updates.
"""
threshold = kwargs.get("threshold", self.batch_counter_default)
if len(args) == 2:
# backwards compatibility with existing hadoop jobs
group_name, count = args
key = (group_name,)
else:
group, name, count = args
key = (group, name)
ct = self._counter_dict.get(key, 0)
ct += count
if ct >= threshold:
new_arg = list(key) + [ct]
self._incr_counter(*new_arg)
ct = 0
self._counter_dict[key] = ct
def _flush_batch_incr_counter(self):
"""
Increments any unflushed counter values.
"""
for key, count in six.iteritems(self._counter_dict):
if count == 0:
continue
args = list(key) + [count]
self._incr_counter(*args)
def _incr_counter(self, *args):
"""
Increments a Hadoop counter.
Note that this seems to be a bit slow, ~1 ms
Don't overuse this function by updating very frequently.
"""
if len(args) == 2:
# backwards compatibility with existing hadoop jobs
group_name, count = args
print('reporter:counter:%s,%s' % (group_name, count), file=sys.stderr)
else:
group, name, count = args
print('reporter:counter:%s,%s,%s' % (group, name, count), file=sys.stderr)
def extra_modules(self):
return [] # can be overridden in subclass
def extra_files(self):
"""
Can be overriden in subclass.
Each element is either a string, or a pair of two strings (src, dst).
* `src` can be a directory (in which case everything will be copied recursively).
* `dst` can include subdirectories (foo/bar/baz.txt etc)
Uses Hadoop's -files option so that the same file is reused across tasks.
"""
return []
def add_link(self, src, dst):
if not hasattr(self, '_links'):
self._links = []
self._links.append((src, dst))
def _setup_links(self):
if hasattr(self, '_links'):
missing = []
for src, dst in self._links:
d = os.path.dirname(dst)
if d and not os.path.exists(d):
os.makedirs(d)
if not os.path.exists(src):
missing.append(src)
continue
if not os.path.exists(dst):
# If the combiner runs, the file might already exist,
# so no reason to create the link again
os.link(src, dst)
if missing:
raise HadoopJobError(
'Missing files for distributed cache: ' +
', '.join(missing))
def dump(self, directory=''):
"""
Dump instance to file.
"""
file_name = os.path.join(directory, 'job-instance.pickle')
if self.__module__ == '__main__':
d = pickle.dumps(self)
module_name = os.path.basename(sys.argv[0]).rsplit('.', 1)[0]
d = d.replace(b'(c__main__', "(c" + module_name)
open(file_name, "wb").write(d)
else:
pickle.dump(self, open(file_name, "wb"))
def _map_input(self, input_stream):
"""
Iterate over input and call the mapper for each item.
If the job has a parser defined, the return values from the parser will
be passed as arguments to the mapper.
If the input is coded output from a previous run,
the arguments will be splitted in key and value.
"""
for record in self.reader(input_stream):
for output in self.mapper(*record):
yield output
if self.final_mapper != NotImplemented:
for output in self.final_mapper():
yield output
self._flush_batch_incr_counter()
def _reduce_input(self, inputs, reducer, final=NotImplemented):
"""
Iterate over input, collect values with the same key, and call the reducer for each unique key.
"""
for key, values in groupby(inputs, key=lambda x: self.internal_serialize(x[0])):
for output in reducer(self.deserialize(key), (v[1] for v in values)):
yield output
if final != NotImplemented:
for output in final():
yield output
self._flush_batch_incr_counter()
def run_mapper(self, stdin=sys.stdin, stdout=sys.stdout):
"""
Run the mapper on the hadoop node.
"""
self.init_hadoop()
self.init_mapper()
outputs = self._map_input((line[:-1] for line in stdin))
if self.reducer == NotImplemented:
self.writer(outputs, stdout)
else:
self.internal_writer(outputs, stdout)
def run_reducer(self, stdin=sys.stdin, stdout=sys.stdout):
"""
Run the reducer on the hadoop node.
"""
self.init_hadoop()
self.init_reducer()
outputs = self._reduce_input(self.internal_reader((line[:-1] for line in stdin)), self.reducer, self.final_reducer)
self.writer(outputs, stdout)
def run_combiner(self, stdin=sys.stdin, stdout=sys.stdout):
self.init_hadoop()
self.init_combiner()
outputs = self._reduce_input(self.internal_reader((line[:-1] for line in stdin)), self.combiner, self.final_combiner)
self.internal_writer(outputs, stdout)
def internal_reader(self, input_stream):
"""
Reader which uses python eval on each part of a tab separated string.
Yields a tuple of python objects.
"""
for input_line in input_stream:
yield list(map(self.deserialize, input_line.split("\t")))
def internal_writer(self, outputs, stdout):
"""
Writer which outputs the python repr for each item.
"""
for output in outputs:
print("\t".join(map(self.internal_serialize, output)), file=stdout)
| apache-2.0 | -1,559,398,363,594,478,000 | 34.242857 | 143 | 0.58411 | false |
canvasnetworks/canvas | website/drawquest/apps/palettes/tests.py | 2 | 1853 | from canvas.tests.tests_helpers import (CanvasTestCase, create_content, create_user, create_group, create_comment,
create_staff)
from drawquest.apps.drawquest_auth.models import AnonymousUser
from services import Services, override_service
class TestPalettes(CanvasTestCase):
def after_setUp(self):
self.user = create_user()
def _user_palettes(self):
resp = self.api_post('/api/palettes/user_palettes', user=self.user)
self.assertAPISuccess(resp)
return resp['palettes']
def _buyable(self):
resp = self.api_post('/api/palettes/purchasable_palettes', user=self.user)
self.assertAPISuccess(resp)
return resp['palettes']
def _user_has_palette(self, palette):
return palette['name'] in [p['name'] for p in self._user_palettes()]
def _purchase(self, palette):
return self.api_post('/api/palettes/purchase_palette', {
'palette_name': palette['name'],
'username': self.user.username,
}, user=self.user)
def test_purchase_with_insufficient_balance(self):
palette = self._buyable()[0]
self.assertAPIFailure(self._purchase(palette))
self.assertFalse(self._user_has_palette(palette))
def test_user_doesnt_have_other_palettes_before_purchasing(self):
palette = self._buyable()[0]
self.assertFalse(self._user_has_palette(palette))
def test_purchase(self):
palette = self._buyable()[0]
self.user.kv.stickers.currency.increment(palette['cost'])
self.assertAPISuccess(self._purchase(palette))
self.assertTrue(self._user_has_palette(palette))
def test_anonymous_palettes(self):
user = AnonymousUser()
palettes = self._user_palettes()
self.assertEqual(palettes[0]['name'], 'default')
| bsd-3-clause | 1,000,820,004,689,059,100 | 36.816327 | 114 | 0.651376 | false |
TomAugspurger/pandas | pandas/tests/indexes/datetimes/test_indexing.py | 1 | 23710 | from datetime import date, datetime, time, timedelta
import numpy as np
import pytest
import pandas as pd
from pandas import DatetimeIndex, Index, Timestamp, date_range, notna
import pandas._testing as tm
from pandas.core.indexes.base import InvalidIndexError
from pandas.tseries.offsets import BDay, CDay
START, END = datetime(2009, 1, 1), datetime(2010, 1, 1)
class TestGetItem:
def test_ellipsis(self):
# GH#21282
idx = pd.date_range(
"2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx"
)
result = idx[...]
assert result.equals(idx)
assert result is not idx
def test_getitem_slice_keeps_name(self):
# GH4226
st = pd.Timestamp("2013-07-01 00:00:00", tz="America/Los_Angeles")
et = pd.Timestamp("2013-07-02 00:00:00", tz="America/Los_Angeles")
dr = pd.date_range(st, et, freq="H", name="timebucket")
assert dr[1:].name == dr.name
def test_getitem(self):
idx1 = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
idx2 = pd.date_range(
"2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx"
)
for idx in [idx1, idx2]:
result = idx[0]
assert result == Timestamp("2011-01-01", tz=idx.tz)
result = idx[0:5]
expected = pd.date_range(
"2011-01-01", "2011-01-05", freq="D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[0:10:2]
expected = pd.date_range(
"2011-01-01", "2011-01-09", freq="2D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[-20:-5:3]
expected = pd.date_range(
"2011-01-12", "2011-01-24", freq="3D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx[4::-1]
expected = DatetimeIndex(
["2011-01-05", "2011-01-04", "2011-01-03", "2011-01-02", "2011-01-01"],
freq="-1D",
tz=idx.tz,
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
def test_dti_business_getitem(self):
rng = pd.bdate_range(START, END)
smaller = rng[:5]
exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq="B")
tm.assert_index_equal(smaller, exp)
assert smaller.freq == exp.freq
assert smaller.freq == rng.freq
sliced = rng[::5]
assert sliced.freq == BDay() * 5
fancy_indexed = rng[[4, 3, 2, 1, 0]]
assert len(fancy_indexed) == 5
assert isinstance(fancy_indexed, DatetimeIndex)
assert fancy_indexed.freq is None
# 32-bit vs. 64-bit platforms
assert rng[4] == rng[np.int_(4)]
def test_dti_business_getitem_matplotlib_hackaround(self):
rng = pd.bdate_range(START, END)
with tm.assert_produces_warning(DeprecationWarning):
# GH#30588 multi-dimensional indexing deprecated
values = rng[:, None]
expected = rng.values[:, None]
tm.assert_numpy_array_equal(values, expected)
def test_dti_custom_getitem(self):
rng = pd.bdate_range(START, END, freq="C")
smaller = rng[:5]
exp = DatetimeIndex(rng.view(np.ndarray)[:5], freq="C")
tm.assert_index_equal(smaller, exp)
assert smaller.freq == exp.freq
assert smaller.freq == rng.freq
sliced = rng[::5]
assert sliced.freq == CDay() * 5
fancy_indexed = rng[[4, 3, 2, 1, 0]]
assert len(fancy_indexed) == 5
assert isinstance(fancy_indexed, DatetimeIndex)
assert fancy_indexed.freq is None
# 32-bit vs. 64-bit platforms
assert rng[4] == rng[np.int_(4)]
def test_dti_custom_getitem_matplotlib_hackaround(self):
rng = pd.bdate_range(START, END, freq="C")
with tm.assert_produces_warning(DeprecationWarning):
# GH#30588 multi-dimensional indexing deprecated
values = rng[:, None]
expected = rng.values[:, None]
tm.assert_numpy_array_equal(values, expected)
def test_getitem_int_list(self):
dti = date_range(start="1/1/2005", end="12/1/2005", freq="M")
dti2 = dti[[1, 3, 5]]
v1 = dti2[0]
v2 = dti2[1]
v3 = dti2[2]
assert v1 == Timestamp("2/28/2005")
assert v2 == Timestamp("4/30/2005")
assert v3 == Timestamp("6/30/2005")
# getitem with non-slice drops freq
assert dti2.freq is None
class TestWhere:
def test_where_doesnt_retain_freq(self):
dti = date_range("20130101", periods=3, freq="D", name="idx")
cond = [True, True, False]
expected = DatetimeIndex([dti[0], dti[1], dti[0]], freq=None, name="idx")
result = dti.where(cond, dti[::-1])
tm.assert_index_equal(result, expected)
def test_where_other(self):
# other is ndarray or Index
i = pd.date_range("20130101", periods=3, tz="US/Eastern")
for arr in [np.nan, pd.NaT]:
result = i.where(notna(i), other=np.nan)
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notna(i2), i2)
tm.assert_index_equal(result, i2)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notna(i2), i2._values)
tm.assert_index_equal(result, i2)
def test_where_invalid_dtypes(self):
dti = pd.date_range("20130101", periods=3, tz="US/Eastern")
i2 = Index([pd.NaT, pd.NaT] + dti[2:].tolist())
with pytest.raises(TypeError, match="Where requires matching dtype"):
# passing tz-naive ndarray to tzaware DTI
dti.where(notna(i2), i2.values)
with pytest.raises(TypeError, match="Where requires matching dtype"):
# passing tz-aware DTI to tznaive DTI
dti.tz_localize(None).where(notna(i2), i2)
with pytest.raises(TypeError, match="Where requires matching dtype"):
dti.where(notna(i2), i2.tz_localize(None).to_period("D"))
with pytest.raises(TypeError, match="Where requires matching dtype"):
dti.where(notna(i2), i2.asi8.view("timedelta64[ns]"))
with pytest.raises(TypeError, match="Where requires matching dtype"):
dti.where(notna(i2), i2.asi8)
with pytest.raises(TypeError, match="Where requires matching dtype"):
# non-matching scalar
dti.where(notna(i2), pd.Timedelta(days=4))
def test_where_mismatched_nat(self, tz_aware_fixture):
tz = tz_aware_fixture
dti = pd.date_range("2013-01-01", periods=3, tz=tz)
cond = np.array([True, False, True])
msg = "Where requires matching dtype"
with pytest.raises(TypeError, match=msg):
# wrong-dtyped NaT
dti.where(cond, np.timedelta64("NaT", "ns"))
def test_where_tz(self):
i = pd.date_range("20130101", periods=3, tz="US/Eastern")
result = i.where(notna(i))
expected = i
tm.assert_index_equal(result, expected)
i2 = i.copy()
i2 = Index([pd.NaT, pd.NaT] + i[2:].tolist())
result = i.where(notna(i2))
expected = i2
tm.assert_index_equal(result, expected)
class TestTake:
def test_take(self):
# GH#10295
idx1 = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
idx2 = pd.date_range(
"2011-01-01", "2011-01-31", freq="D", tz="Asia/Tokyo", name="idx"
)
for idx in [idx1, idx2]:
result = idx.take([0])
assert result == Timestamp("2011-01-01", tz=idx.tz)
result = idx.take([0, 1, 2])
expected = pd.date_range(
"2011-01-01", "2011-01-03", freq="D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([0, 2, 4])
expected = pd.date_range(
"2011-01-01", "2011-01-05", freq="2D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([7, 4, 1])
expected = pd.date_range(
"2011-01-08", "2011-01-02", freq="-3D", tz=idx.tz, name="idx"
)
tm.assert_index_equal(result, expected)
assert result.freq == expected.freq
result = idx.take([3, 2, 5])
expected = DatetimeIndex(
["2011-01-04", "2011-01-03", "2011-01-06"],
freq=None,
tz=idx.tz,
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq is None
result = idx.take([-3, 2, 5])
expected = DatetimeIndex(
["2011-01-29", "2011-01-03", "2011-01-06"],
freq=None,
tz=idx.tz,
name="idx",
)
tm.assert_index_equal(result, expected)
assert result.freq is None
def test_take_invalid_kwargs(self):
idx = pd.date_range("2011-01-01", "2011-01-31", freq="D", name="idx")
indices = [1, 6, 5, 9, 10, 13, 15, 3]
msg = r"take\(\) got an unexpected keyword argument 'foo'"
with pytest.raises(TypeError, match=msg):
idx.take(indices, foo=2)
msg = "the 'out' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, out=indices)
msg = "the 'mode' parameter is not supported"
with pytest.raises(ValueError, match=msg):
idx.take(indices, mode="clip")
# TODO: This method came from test_datetime; de-dup with version above
@pytest.mark.parametrize("tz", [None, "US/Eastern", "Asia/Tokyo"])
def test_take2(self, tz):
dates = [
datetime(2010, 1, 1, 14),
datetime(2010, 1, 1, 15),
datetime(2010, 1, 1, 17),
datetime(2010, 1, 1, 21),
]
idx = pd.date_range(
start="2010-01-01 09:00",
end="2010-02-01 09:00",
freq="H",
tz=tz,
name="idx",
)
expected = DatetimeIndex(dates, freq=None, name="idx", tz=tz)
taken1 = idx.take([5, 6, 8, 12])
taken2 = idx[[5, 6, 8, 12]]
for taken in [taken1, taken2]:
tm.assert_index_equal(taken, expected)
assert isinstance(taken, DatetimeIndex)
assert taken.freq is None
assert taken.tz == expected.tz
assert taken.name == expected.name
def test_take_fill_value(self):
# GH#12631
idx = pd.DatetimeIndex(["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx")
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(["2011-02-01", "2011-01-01", "NaT"], name="xxx")
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.DatetimeIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "out of bounds"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
def test_take_fill_value_with_timezone(self):
idx = pd.DatetimeIndex(
["2011-01-01", "2011-02-01", "2011-03-01"], name="xxx", tz="US/Eastern"
)
result = idx.take(np.array([1, 0, -1]))
expected = pd.DatetimeIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", tz="US/Eastern"
)
tm.assert_index_equal(result, expected)
# fill_value
result = idx.take(np.array([1, 0, -1]), fill_value=True)
expected = pd.DatetimeIndex(
["2011-02-01", "2011-01-01", "NaT"], name="xxx", tz="US/Eastern"
)
tm.assert_index_equal(result, expected)
# allow_fill=False
result = idx.take(np.array([1, 0, -1]), allow_fill=False, fill_value=True)
expected = pd.DatetimeIndex(
["2011-02-01", "2011-01-01", "2011-03-01"], name="xxx", tz="US/Eastern"
)
tm.assert_index_equal(result, expected)
msg = (
"When allow_fill=True and fill_value is not None, "
"all indices must be >= -1"
)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -2]), fill_value=True)
with pytest.raises(ValueError, match=msg):
idx.take(np.array([1, 0, -5]), fill_value=True)
msg = "out of bounds"
with pytest.raises(IndexError, match=msg):
idx.take(np.array([1, -5]))
class TestGetLoc:
@pytest.mark.parametrize("method", [None, "pad", "backfill", "nearest"])
def test_get_loc_method_exact_match(self, method):
idx = pd.date_range("2000-01-01", periods=3)
assert idx.get_loc(idx[1], method) == 1
assert idx.get_loc(idx[1].to_pydatetime(), method) == 1
assert idx.get_loc(str(idx[1]), method) == 1
if method is not None:
assert idx.get_loc(idx[1], method, tolerance=pd.Timedelta("0 days")) == 1
def test_get_loc(self):
idx = pd.date_range("2000-01-01", periods=3)
assert idx.get_loc("2000-01-01", method="nearest") == 0
assert idx.get_loc("2000-01-01T12", method="nearest") == 1
assert idx.get_loc("2000-01-01T12", method="nearest", tolerance="1 day") == 1
assert (
idx.get_loc("2000-01-01T12", method="nearest", tolerance=pd.Timedelta("1D"))
== 1
)
assert (
idx.get_loc(
"2000-01-01T12", method="nearest", tolerance=np.timedelta64(1, "D")
)
== 1
)
assert (
idx.get_loc("2000-01-01T12", method="nearest", tolerance=timedelta(1)) == 1
)
with pytest.raises(ValueError, match="unit abbreviation w/o a number"):
idx.get_loc("2000-01-01T12", method="nearest", tolerance="foo")
with pytest.raises(KeyError, match="'2000-01-01T03'"):
idx.get_loc("2000-01-01T03", method="nearest", tolerance="2 hours")
with pytest.raises(
ValueError, match="tolerance size must match target index size"
):
idx.get_loc(
"2000-01-01",
method="nearest",
tolerance=[
pd.Timedelta("1day").to_timedelta64(),
pd.Timedelta("1day").to_timedelta64(),
],
)
assert idx.get_loc("2000", method="nearest") == slice(0, 3)
assert idx.get_loc("2000-01", method="nearest") == slice(0, 3)
assert idx.get_loc("1999", method="nearest") == 0
assert idx.get_loc("2001", method="nearest") == 2
with pytest.raises(KeyError, match="'1999'"):
idx.get_loc("1999", method="pad")
with pytest.raises(KeyError, match="'2001'"):
idx.get_loc("2001", method="backfill")
with pytest.raises(KeyError, match="'foobar'"):
idx.get_loc("foobar")
with pytest.raises(InvalidIndexError, match=r"slice\(None, 2, None\)"):
idx.get_loc(slice(2))
idx = pd.to_datetime(["2000-01-01", "2000-01-04"])
assert idx.get_loc("2000-01-02", method="nearest") == 0
assert idx.get_loc("2000-01-03", method="nearest") == 1
assert idx.get_loc("2000-01", method="nearest") == slice(0, 2)
# time indexing
idx = pd.date_range("2000-01-01", periods=24, freq="H")
tm.assert_numpy_array_equal(
idx.get_loc(time(12)), np.array([12]), check_dtype=False
)
tm.assert_numpy_array_equal(
idx.get_loc(time(12, 30)), np.array([]), check_dtype=False
)
msg = "cannot yet lookup inexact labels when key is a time object"
with pytest.raises(NotImplementedError, match=msg):
idx.get_loc(time(12, 30), method="pad")
def test_get_loc_tz_aware(self):
# https://github.com/pandas-dev/pandas/issues/32140
dti = pd.date_range(
pd.Timestamp("2019-12-12 00:00:00", tz="US/Eastern"),
pd.Timestamp("2019-12-13 00:00:00", tz="US/Eastern"),
freq="5s",
)
key = pd.Timestamp("2019-12-12 10:19:25", tz="US/Eastern")
result = dti.get_loc(key, method="nearest")
assert result == 7433
def test_get_loc_nat(self):
# GH#20464
index = DatetimeIndex(["1/3/2000", "NaT"])
assert index.get_loc(pd.NaT) == 1
assert index.get_loc(None) == 1
assert index.get_loc(np.nan) == 1
assert index.get_loc(pd.NA) == 1
assert index.get_loc(np.datetime64("NaT")) == 1
with pytest.raises(KeyError, match="NaT"):
index.get_loc(np.timedelta64("NaT"))
@pytest.mark.parametrize("key", [pd.Timedelta(0), pd.Timedelta(1), timedelta(0)])
def test_get_loc_timedelta_invalid_key(self, key):
# GH#20464
dti = pd.date_range("1970-01-01", periods=10)
msg = "Cannot index DatetimeIndex with [Tt]imedelta"
with pytest.raises(TypeError, match=msg):
dti.get_loc(key)
def test_get_loc_reasonable_key_error(self):
# GH#1062
index = DatetimeIndex(["1/3/2000"])
with pytest.raises(KeyError, match="2000"):
index.get_loc("1/1/2000")
class TestContains:
def test_dti_contains_with_duplicates(self):
d = datetime(2011, 12, 5, 20, 30)
ix = DatetimeIndex([d, d])
assert d in ix
@pytest.mark.parametrize(
"vals",
[
[0, 1, 0],
[0, 0, -1],
[0, -1, -1],
["2015", "2015", "2016"],
["2015", "2015", "2014"],
],
)
def test_contains_nonunique(self, vals):
# GH#9512
idx = DatetimeIndex(vals)
assert idx[0] in idx
class TestGetIndexer:
def test_get_indexer(self):
idx = pd.date_range("2000-01-01", periods=3)
exp = np.array([0, 1, 2], dtype=np.intp)
tm.assert_numpy_array_equal(idx.get_indexer(idx), exp)
target = idx[0] + pd.to_timedelta(["-1 hour", "12 hours", "1 day 1 hour"])
tm.assert_numpy_array_equal(
idx.get_indexer(target, "pad"), np.array([-1, 0, 1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "backfill"), np.array([0, 1, 2], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest"), np.array([0, 1, 1], dtype=np.intp)
)
tm.assert_numpy_array_equal(
idx.get_indexer(target, "nearest", tolerance=pd.Timedelta("1 hour")),
np.array([0, -1, 1], dtype=np.intp),
)
tol_raw = [
pd.Timedelta("1 hour"),
pd.Timedelta("1 hour"),
pd.Timedelta("1 hour").to_timedelta64(),
]
tm.assert_numpy_array_equal(
idx.get_indexer(
target, "nearest", tolerance=[np.timedelta64(x) for x in tol_raw]
),
np.array([0, -1, 1], dtype=np.intp),
)
tol_bad = [
pd.Timedelta("2 hour").to_timedelta64(),
pd.Timedelta("1 hour").to_timedelta64(),
"foo",
]
with pytest.raises(ValueError, match="abbreviation w/o a number"):
idx.get_indexer(target, "nearest", tolerance=tol_bad)
with pytest.raises(ValueError, match="abbreviation w/o a number"):
idx.get_indexer(idx[[0]], method="nearest", tolerance="foo")
@pytest.mark.parametrize(
"target",
[
[date(2020, 1, 1), pd.Timestamp("2020-01-02")],
[pd.Timestamp("2020-01-01"), date(2020, 1, 2)],
],
)
def test_get_indexer_mixed_dtypes(self, target):
# https://github.com/pandas-dev/pandas/issues/33741
values = pd.DatetimeIndex(
[pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")]
)
result = values.get_indexer(target)
expected = np.array([0, 1], dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
@pytest.mark.parametrize(
"target, positions",
[
([date(9999, 1, 1), pd.Timestamp("2020-01-01")], [-1, 0]),
([pd.Timestamp("2020-01-01"), date(9999, 1, 1)], [0, -1]),
([date(9999, 1, 1), date(9999, 1, 1)], [-1, -1]),
],
)
def test_get_indexer_out_of_bounds_date(self, target, positions):
values = pd.DatetimeIndex(
[pd.Timestamp("2020-01-01"), pd.Timestamp("2020-01-02")]
)
result = values.get_indexer(target)
expected = np.array(positions, dtype=np.intp)
tm.assert_numpy_array_equal(result, expected)
class TestMaybeCastSliceBound:
def test_maybe_cast_slice_bounds_empty(self):
# GH#14354
empty_idx = date_range(freq="1H", periods=0, end="2015")
right = empty_idx._maybe_cast_slice_bound("2015-01-02", "right", "loc")
exp = Timestamp("2015-01-02 23:59:59.999999999")
assert right == exp
left = empty_idx._maybe_cast_slice_bound("2015-01-02", "left", "loc")
exp = Timestamp("2015-01-02 00:00:00")
assert left == exp
def test_maybe_cast_slice_duplicate_monotonic(self):
# https://github.com/pandas-dev/pandas/issues/16515
idx = DatetimeIndex(["2017", "2017"])
result = idx._maybe_cast_slice_bound("2017-01-01", "left", "loc")
expected = Timestamp("2017-01-01")
assert result == expected
class TestDatetimeIndex:
def test_get_value(self):
# specifically make sure we have test for np.datetime64 key
dti = pd.date_range("2016-01-01", periods=3)
arr = np.arange(6, 9)
ser = pd.Series(arr, index=dti)
key = dti[1]
with pytest.raises(AttributeError, match="has no attribute '_values'"):
with tm.assert_produces_warning(FutureWarning):
dti.get_value(arr, key)
with tm.assert_produces_warning(FutureWarning):
result = dti.get_value(ser, key)
assert result == 7
with tm.assert_produces_warning(FutureWarning):
result = dti.get_value(ser, key.to_pydatetime())
assert result == 7
with tm.assert_produces_warning(FutureWarning):
result = dti.get_value(ser, key.to_datetime64())
assert result == 7
| bsd-3-clause | -7,807,238,278,642,369,000 | 35.143293 | 88 | 0.548714 | false |
TheMOOCAgency/edx-platform | common/djangoapps/third_party_auth/middleware.py | 10 | 2526 | """Middleware classes for third_party_auth."""
from social.apps.django_app.middleware import SocialAuthExceptionMiddleware
from . import pipeline
class ExceptionMiddleware(SocialAuthExceptionMiddleware):
"""Custom middleware that handles conditional redirection."""
def get_redirect_uri(self, request, exception):
# Fall back to django settings's SOCIAL_AUTH_LOGIN_ERROR_URL.
redirect_uri = super(ExceptionMiddleware, self).get_redirect_uri(request, exception)
# Safe because it's already been validated by
# pipeline.parse_query_params. If that pipeline step ever moves later
# in the pipeline stack, we'd need to validate this value because it
# would be an injection point for attacker data.
auth_entry = request.session.get(pipeline.AUTH_ENTRY_KEY)
# Check if we have an auth entry key we can use instead
if auth_entry and auth_entry in pipeline.AUTH_DISPATCH_URLS:
redirect_uri = pipeline.AUTH_DISPATCH_URLS[auth_entry]
return redirect_uri
class PipelineQuarantineMiddleware(object):
"""
Middleware flushes the session if a user agent with a quarantined session
attempts to leave the quarantined set of views.
"""
def process_view(self, request, view_func, view_args, view_kwargs): # pylint: disable=unused-argument
"""
Check the session to see if we've quarantined the user to a particular
step of the authentication pipeline; if so, look up which modules the
user is allowed to browse to without breaking the pipeline. If the view
that's been requested is outside those modules, then flush the session.
In general, this middleware should be used in cases where allowing the
user to exit the running pipeline would be undesirable, and where it'd
be better to flush the session state rather than allow it. Pipeline
quarantining is utilized by the Enterprise application to enforce
collection of user consent for sharing data with a linked third-party
authentication provider.
"""
running_pipeline = request.session.get('partial_pipeline')
if not running_pipeline:
return
view_module = view_func.__module__
quarantined_modules = request.session.get('third_party_auth_quarantined_modules', None)
if quarantined_modules is not None and not any(view_module.startswith(mod) for mod in quarantined_modules):
request.session.flush()
| agpl-3.0 | -6,565,105,050,527,088,000 | 44.107143 | 115 | 0.706651 | false |
canwe/NewsBlur | apps/rss_feeds/migrations/0010_stories_per_month.py | 18 | 11962 | # encoding: utf-8
import datetime
from south.db import db
from south.v2 import SchemaMigration
from django.db import models
class Migration(SchemaMigration):
def forwards(self, orm):
# Adding model 'StoriesPerMonth'
db.create_table('rss_feeds_storiespermonth', (
('id', self.gf('django.db.models.fields.AutoField')(primary_key=True)),
('feed', self.gf('django.db.models.fields.related.ForeignKey')(related_name='stories_per_month', to=orm['rss_feeds.Feed'])),
('year', self.gf('django.db.models.fields.IntegerField')()),
('month', self.gf('django.db.models.fields.IntegerField')()),
('story_count', self.gf('django.db.models.fields.IntegerField')()),
('beginning_of_month', self.gf('django.db.models.fields.DateTimeField')(default=datetime.datetime.now)),
))
db.send_create_signal('rss_feeds', ['StoriesPerMonth'])
# Renaming field 'Feed.stories_per_month'
db.rename_column('feeds', 'stories_per_month', 'stories_last_month')
# Adding field 'Feed.average_stories_per_month'
db.add_column('feeds', 'average_stories_per_month', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# Adding field 'Feed.stories_last_year'
db.add_column('feeds', 'stories_last_year', self.gf('django.db.models.fields.CharField')(max_length=1024, null=True, blank=True), keep_default=False)
# Changing field 'Feed.feed_link'
db.alter_column('feeds', 'feed_link', self.gf('django.db.models.fields.URLField')(max_length=1000, null=True, blank=True))
# Changing field 'Story.story_tags'
db.alter_column('stories', 'story_tags', self.gf('django.db.models.fields.CharField')(max_length=2000, null=True, blank=True))
# Adding unique constraint on 'Story', fields ['story_feed', 'story_guid_hash']
# db.create_unique('stories', ['story_feed_id', 'story_guid_hash'])
def backwards(self, orm):
# Deleting model 'StoriesPerMonth'
db.delete_table('rss_feeds_storiespermonth')
# Adding field 'Feed.stories_per_month'
db.add_column('feeds', 'stories_per_month', self.gf('django.db.models.fields.IntegerField')(default=0), keep_default=False)
# rename field 'Feed.stories_last_month'
db.rename_column('feeds', 'stories_last_month', 'stories_per_month')
# Deleting field 'Feed.average_stories_per_month'
db.delete_column('feeds', 'average_stories_per_month')
# Deleting field 'Feed.stories_last_year'
db.delete_column('feeds', 'stories_last_year')
# Changing field 'Feed.feed_link'
db.alter_column('feeds', 'feed_link', self.gf('django.db.models.fields.URLField')(max_length=200, null=True, blank=True))
# Changing field 'Story.story_tags'
db.alter_column('stories', 'story_tags', self.gf('django.db.models.fields.CharField')(max_length=2000))
# Removing unique constraint on 'Story', fields ['story_feed', 'story_guid_hash']
db.delete_unique('stories', ['story_feed_id', 'story_guid_hash'])
models = {
'rss_feeds.feed': {
'Meta': {'object_name': 'Feed', 'db_table': "'feeds'"},
'active': ('django.db.models.fields.BooleanField', [], {'default': 'True', 'blank': 'True'}),
'average_stories_per_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'creation': ('django.db.models.fields.DateField', [], {'auto_now_add': 'True', 'blank': 'True'}),
'days_to_trim': ('django.db.models.fields.IntegerField', [], {'default': '90'}),
'etag': ('django.db.models.fields.CharField', [], {'max_length': '50', 'null': 'True', 'blank': 'True'}),
'feed_address': ('django.db.models.fields.URLField', [], {'unique': 'True', 'max_length': '255'}),
'feed_link': ('django.db.models.fields.URLField', [], {'default': "''", 'max_length': '1000', 'null': 'True', 'blank': 'True'}),
'feed_tagline': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'feed_title': ('django.db.models.fields.CharField', [], {'default': "''", 'max_length': '255', 'null': 'True', 'blank': 'True'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'last_load_time': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'last_modified': ('django.db.models.fields.DateTimeField', [], {'null': 'True', 'blank': 'True'}),
'last_update': ('django.db.models.fields.DateTimeField', [], {'default': '0', 'auto_now': 'True', 'blank': 'True'}),
'min_to_decay': ('django.db.models.fields.IntegerField', [], {'default': '15'}),
'next_scheduled_update': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'num_subscribers': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'popular_authors': ('django.db.models.fields.CharField', [], {'max_length': '2048', 'null': 'True', 'blank': 'True'}),
'popular_tags': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'}),
'stories_last_month': ('django.db.models.fields.IntegerField', [], {'default': '0'}),
'stories_last_year': ('django.db.models.fields.CharField', [], {'max_length': '1024', 'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedfetchhistory': {
'Meta': {'object_name': 'FeedFetchHistory'},
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'feed_fetch_history'", 'to': "orm['rss_feeds.Feed']"}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedpage': {
'Meta': {'object_name': 'FeedPage'},
'feed': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feed_page'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'page_data': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.feedupdatehistory': {
'Meta': {'object_name': 'FeedUpdateHistory'},
'average_per_feed': ('django.db.models.fields.DecimalField', [], {'max_digits': '4', 'decimal_places': '1'}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'number_of_feeds': ('django.db.models.fields.IntegerField', [], {}),
'seconds_taken': ('django.db.models.fields.IntegerField', [], {})
},
'rss_feeds.feedxml': {
'Meta': {'object_name': 'FeedXML'},
'feed': ('django.db.models.fields.related.OneToOneField', [], {'related_name': "'feed_xml'", 'unique': 'True', 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'rss_xml': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'})
},
'rss_feeds.pagefetchhistory': {
'Meta': {'object_name': 'PageFetchHistory'},
'exception': ('django.db.models.fields.TextField', [], {'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'page_fetch_history'", 'to': "orm['rss_feeds.Feed']"}),
'fetch_date': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'message': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'status_code': ('django.db.models.fields.CharField', [], {'max_length': '10', 'null': 'True', 'blank': 'True'})
},
'rss_feeds.storiespermonth': {
'Meta': {'object_name': 'StoriesPerMonth'},
'beginning_of_month': ('django.db.models.fields.DateTimeField', [], {'default': 'datetime.datetime.now'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stories_per_month'", 'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'month': ('django.db.models.fields.IntegerField', [], {}),
'story_count': ('django.db.models.fields.IntegerField', [], {}),
'year': ('django.db.models.fields.IntegerField', [], {})
},
'rss_feeds.story': {
'Meta': {'unique_together': "(('story_feed', 'story_guid_hash'),)", 'object_name': 'Story', 'db_table': "'stories'"},
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'story_author': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.StoryAuthor']"}),
'story_author_name': ('django.db.models.fields.CharField', [], {'max_length': '500', 'null': 'True', 'blank': 'True'}),
'story_content': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_content_type': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'story_date': ('django.db.models.fields.DateTimeField', [], {}),
'story_feed': ('django.db.models.fields.related.ForeignKey', [], {'related_name': "'stories'", 'to': "orm['rss_feeds.Feed']"}),
'story_guid': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_guid_hash': ('django.db.models.fields.CharField', [], {'max_length': '40'}),
'story_original_content': ('utils.compressed_textfield.StoryField', [], {'null': 'True', 'blank': 'True'}),
'story_past_trim_date': ('django.db.models.fields.BooleanField', [], {'default': 'False', 'blank': 'True'}),
'story_permalink': ('django.db.models.fields.CharField', [], {'max_length': '1000'}),
'story_tags': ('django.db.models.fields.CharField', [], {'max_length': '2000', 'null': 'True', 'blank': 'True'}),
'story_title': ('django.db.models.fields.CharField', [], {'max_length': '255'}),
'tags': ('django.db.models.fields.related.ManyToManyField', [], {'to': "orm['rss_feeds.Tag']", 'symmetrical': 'False'})
},
'rss_feeds.storyauthor': {
'Meta': {'object_name': 'StoryAuthor'},
'author_name': ('django.db.models.fields.CharField', [], {'max_length': '255', 'null': 'True', 'blank': 'True'}),
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'})
},
'rss_feeds.tag': {
'Meta': {'object_name': 'Tag'},
'feed': ('django.db.models.fields.related.ForeignKey', [], {'to': "orm['rss_feeds.Feed']"}),
'id': ('django.db.models.fields.AutoField', [], {'primary_key': 'True'}),
'name': ('django.db.models.fields.CharField', [], {'max_length': '255'})
}
}
complete_apps = ['rss_feeds']
| mit | 2,089,193,026,125,378,000 | 68.953216 | 157 | 0.57248 | false |
klugjohannes/alembic-sqlite | alembic_sqlite/op.py | 1 | 1388 | from alembic import op
import sqlalchemy as sa
def drop_column_sqlite(tablename, columns):
""" column dropping functionality for SQLite """
# we need copy to make a deep copy of the column attributes
from copy import copy
# get the db engine and reflect database tables
engine = op.get_bind()
meta = sa.MetaData(bind=engine)
meta.reflect()
# create a select statement from the old table
old_table = meta.tables[tablename]
select = sa.sql.select([c for c in old_table.c if c.name not in columns])
# get remaining columns without table attribute attached
remaining_columns = [copy(c) for c in old_table.columns
if c.name not in columns]
for column in remaining_columns:
column.table = None
# create a temporary new table
new_tablename = '{0}_new'.format(tablename)
op.create_table(new_tablename, *remaining_columns)
meta.reflect()
new_table = meta.tables[new_tablename]
# copy data from old table
insert = sa.sql.insert(new_table).from_select(
[c.name for c in remaining_columns], select)
engine.execute(insert)
# drop the old table and rename the new table to take the old tables
# position
op.drop_table(tablename)
op.rename_table(new_tablename, tablename)
def drop_column(tablename, columnname):
drop_column_sqlite(tablename, [columnname])
| mit | 6,067,963,066,410,273,000 | 30.545455 | 77 | 0.68804 | false |
Subsets and Splits