ext
stringclasses 9
values | sha
stringlengths 40
40
| content
stringlengths 3
1.04M
|
---|---|---|
py | b416fe94a66980eb73b1a0995743affb84d2220c | # -*- coding: utf-8 -*-
"""
字符串使用
单引号和双引号表示字符串
三个双引号或单引号开头的字符串可以拆行
字符串用反斜杠\表示转义
Author: cjp
Version: 1.0
"""
s1 = 'hello,s1'
s2 = "hello,s2"
# 三个换行
s3 = """
hello,
s3
"""
print(s1,s2,s3,end='-----------------\n')
"""
\后面可以接八进制、十六进制,也可以接unicode字符编码表示
"""
s5 = '\141\142\143\x61\x62\x63'
s6 = '\u9a86\u660a'
print(s5, s6,end='\n-----------------\n')
"""
Python为字符串类型提供了非常丰富的运算符,我们可以使用+运算符来实现字符串的拼接,
可以使用*运算符来重复一个字符串的内容,
可以使用in和not in来判断一个字符串是否包含另外一个字符串(成员运算),
我们也可以用[]和[:]运算符从字符串取出某个字符或某些字符(切片运算),
"""
s7 = 'hello ' * 3 # 重复前面字符串3次
print(s7) # hello hello hello
s8 = 'world'
s7 += s8
print(s7) # hello hello hello world
print('ll' in s7) # True
print('good' not in s7) # True
str2 = 'abc123456'
print(str2[:-1]) # 获取第0个字符到倒数字符
# 从字符串中取出指定位置的字符(下标运算)
print(str2[2]) # c
# 字符串切片(从指定的开始索引到指定的结束索引)
print(str2[2:5]) # c12
print(str2[2:]) # c123456
# string从第2个开始,每两个取一个:
print(str2[2::2]) # c246
# string从第0个开始,每两个取一个:
print(str2[::2]) # ac246
print(str2[::-2]) # 642ca 字符串翻转
# string,每-1个取一个,意思是反着取每一个
print(str2[::-1]) # 654321cba 字符串翻转
print(''.join(reversed(str2))) # 使用reversed()反转
print(str2[-3:-1]) # 45 |
py | b416ff576814841e2839a2dba53143627e37723f | #
# --------------------------------------------------------------------------------------------------------------------
# <copyright company="Aspose" file="test_read_project.py">
# Copyright (c) 2020 Aspose.Tasks Cloud
# </copyright>
# <summary>
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# </summary>
# --------------------------------------------------------------------------------------------------------------------
#
from asposetaskscloud import GetProjectIdsRequest, ProjectIdsResponse, GetTaskDocumentRequest
from test.base_test_context import BaseTestContext
class TestReadProject(BaseTestContext):
def test_get_project_ids(self):
filename = 'p6_multiproject.xml'
self.upload_file(filename)
get_request = GetProjectIdsRequest(filename)
get_result = self.tasks_api.get_project_ids(get_request)
self.assertIsNotNone(get_result)
self.assertIsInstance(get_result, ProjectIdsResponse)
self.assertEqual(['1', '111'], get_result.project_ids)
def test_get_task_document(self):
filename = 'testXer.xer'
self.upload_file(filename)
get_request = GetTaskDocumentRequest(filename)
get_result = self.tasks_api.get_task_document(get_request)
self.assertIsNotNone(get_result)
with open(get_result) as f:
self.assertTrue(f.readable())
|
py | b416ff60f50d0143233a1e676c08c96674f0ceb2 | # Copyright 2014 Cloudbase Solutions Srl
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import functools
import os
import re
import tempfile
import uuid
from oslo_log import log as oslo_logging
from cloudbaseinit.osutils import factory as osutils_factory
from cloudbaseinit.plugins.common import base
LOG = oslo_logging.getLogger(__name__)
# used with ec2 config files (xmls)
SCRIPT_TAG = 1
POWERSHELL_TAG = 2
# regexp and temporary file extension for each tag
TAG_REGEX = {
SCRIPT_TAG: (
re.compile(br"<script>([\s\S]+?)</script>"),
"cmd"
),
POWERSHELL_TAG: (
re.compile(br"<powershell>([\s\S]+?)</powershell>"),
"ps1"
)
}
NO_REBOOT = 0
# important return values range
RET_START = 1001
RET_END = 1003
def _ec2_find_sections(data):
"""An intuitive script generator.
Is able to detect and extract code between:
- <script>...</script>
- <powershell>...</powershell>
tags. Yields data with each specific block of code.
Note that, regardless of data structure, all cmd scripts are
yielded before the rest of powershell scripts.
"""
# extract code blocks between the tags
blocks = {
SCRIPT_TAG: TAG_REGEX[SCRIPT_TAG][0].findall(data),
POWERSHELL_TAG: TAG_REGEX[POWERSHELL_TAG][0].findall(data)
}
# build and yield blocks (preserve order)
for script_type in (SCRIPT_TAG, POWERSHELL_TAG):
for code in blocks[script_type]:
code = code.strip()
if not code:
continue # skip the empty ones
yield code, script_type
def _split_sections(multicmd):
for code, stype in _ec2_find_sections(multicmd):
if stype == SCRIPT_TAG:
command = Shell.from_data(code)
else:
command = PowershellSysnative.from_data(code)
yield command
def get_plugin_return_value(ret_val):
plugin_status = base.PLUGIN_EXECUTION_DONE
reboot = False
try:
ret_val = int(ret_val)
except (ValueError, TypeError):
ret_val = 0
if ret_val and RET_START <= ret_val <= RET_END:
reboot = bool(ret_val & 1)
if ret_val & 2:
plugin_status = base.PLUGIN_EXECUTE_ON_NEXT_BOOT
return plugin_status, reboot
class BaseCommand(object):
"""Implements logic for executing an user command.
This is intended to be subclassed and each subclass should change the
attributes which controls the behaviour of the execution.
It must be instantiated with a file.
It can also execute string commands, by using the alternate
constructor :meth:`~from_data`.
The following attributes can control the behaviour of the command:
* shell: Run the command as a shell command.
* extension:
A string, which will be appended to a generated script file.
This is important for certain commands, e.g. Powershell,
which can't execute something without the `.ps1` extension.
* command:
A program which will execute the underlying command,
e.g. `python`, `bash` etc.
"""
shell = False
extension = None
command = None
def __init__(self, target_path, cleanup=None):
"""Instantiate the command.
The parameter *target_path* represents the file which will be
executed. The optional parameter *cleanup* can be a callable,
which will be called after executing a command, no matter if the
execution was successful or not.
"""
self._target_path = target_path
self._cleanup = cleanup
self._osutils = osutils_factory.get_os_utils()
@property
def args(self):
"""Return a list of commands.
The list will be passed to :meth:`~execute_process`.
"""
if not self.command:
# Then we can assume it's a shell command.
return [self._target_path]
else:
return [self.command, self._target_path]
def get_execute_method(self):
"""Return a callable, which will be called by :meth:`~execute`."""
return functools.partial(self._osutils.execute_process,
self.args, shell=self.shell)
def execute(self):
"""Execute the underlying command."""
try:
return self.get_execute_method()()
finally:
if self._cleanup:
self._cleanup()
__call__ = execute
@classmethod
def from_data(cls, command):
"""Create a new command class from the given command data."""
def safe_remove(target_path):
try:
os.remove(target_path)
except OSError: # pragma: no cover
pass
tmp = os.path.join(tempfile.gettempdir(), str(uuid.uuid4()))
if cls.extension:
tmp += cls.extension
with open(tmp, 'wb') as stream:
stream.write(command)
return cls(tmp, cleanup=functools.partial(safe_remove, tmp))
class Shell(BaseCommand):
shell = True
extension = '.cmd'
class Python(BaseCommand):
extension = '.py'
command = 'python'
class Bash(BaseCommand):
extension = '.sh'
command = 'bash'
class PowershellSysnative(BaseCommand):
extension = '.ps1'
sysnative = True
def get_execute_method(self):
return functools.partial(
self._osutils.execute_powershell_script,
self._target_path,
self.sysnative)
class Powershell(PowershellSysnative):
sysnative = False
class CommandExecutor(object):
"""Execute multiple commands and gather outputs."""
SEP = b"\n" # multistring separator
def __init__(self, commands):
self._commands = commands
def execute(self):
out_total = []
err_total = []
ret_total = 0
for command in self._commands:
out = err = b""
ret_val = 0
try:
out, err, ret_val = command()
except Exception as exc:
LOG.exception(
"An error occurred during part execution: %s",
exc
)
else:
out_total.append(out)
err_total.append(err)
ret_total += ret_val
return (
self.SEP.join(out_total),
self.SEP.join(err_total),
ret_total
)
__call__ = execute
class EC2Config(object):
@classmethod
def from_data(cls, multicmd):
"""Create multiple `CommandExecutor` objects.
These are created using data chunks
parsed from the given command data.
"""
return CommandExecutor(_split_sections(multicmd))
|
py | b417044bcbff039094e929f644700d4954f29e58 | """Test script for the gzip module.
"""
import unittest
from test import support
import os
import io
import struct
gzip = support.import_module('gzip')
data1 = b""" int length=DEFAULTALLOC, err = Z_OK;
PyObject *RetVal;
int flushmode = Z_FINISH;
unsigned long start_total_out;
"""
data2 = b"""/* zlibmodule.c -- gzip-compatible data compression */
/* See http://www.gzip.org/zlib/
/* See http://www.winimage.com/zLibDll for Windows */
"""
class UnseekableIO(io.BytesIO):
def seekable(self):
return False
def tell(self):
raise io.UnsupportedOperation
def seek(self, *args):
raise io.UnsupportedOperation
class BaseTest(unittest.TestCase):
filename = support.TESTFN
def setUp(self):
support.unlink(self.filename)
def tearDown(self):
support.unlink(self.filename)
class TestGzip(BaseTest):
def write_and_read_back(self, data, mode='b'):
b_data = bytes(data)
with gzip.GzipFile(self.filename, 'w'+mode) as f:
l = f.write(data)
self.assertEqual(l, len(b_data))
with gzip.GzipFile(self.filename, 'r'+mode) as f:
self.assertEqual(f.read(), b_data)
def test_write(self):
with gzip.GzipFile(self.filename, 'wb') as f:
f.write(data1 * 50)
# Try flush and fileno.
f.flush()
f.fileno()
if hasattr(os, 'fsync'):
os.fsync(f.fileno())
f.close()
# Test multiple close() calls.
f.close()
# The following test_write_xy methods test that write accepts
# the corresponding bytes-like object type as input
# and that the data written equals bytes(xy) in all cases.
def test_write_memoryview(self):
self.write_and_read_back(memoryview(data1 * 50))
m = memoryview(bytes(range(256)))
data = m.cast('B', shape=[8,8,4])
self.write_and_read_back(data)
def test_write_bytearray(self):
self.write_and_read_back(bytearray(data1 * 50))
def test_write_incompatible_type(self):
# Test that non-bytes-like types raise TypeError.
# Issue #21560: attempts to write incompatible types
# should not affect the state of the fileobject
with gzip.GzipFile(self.filename, 'wb') as f:
with self.assertRaises(TypeError):
f.write('a')
with self.assertRaises(TypeError):
f.write([1])
f.write(data1)
with gzip.GzipFile(self.filename, 'rb') as f:
self.assertEqual(f.read(), data1)
def test_read(self):
self.test_write()
# Try reading.
with gzip.GzipFile(self.filename, 'r') as f:
d = f.read()
self.assertEqual(d, data1*50)
def test_read1(self):
self.test_write()
blocks = []
nread = 0
with gzip.GzipFile(self.filename, 'r') as f:
while True:
d = f.read1()
if not d:
break
blocks.append(d)
nread += len(d)
# Check that position was updated correctly (see issue10791).
self.assertEqual(f.tell(), nread)
self.assertEqual(b''.join(blocks), data1 * 50)
def test_io_on_closed_object(self):
# Test that I/O operations on closed GzipFile objects raise a
# ValueError, just like the corresponding functions on file objects.
# Write to a file, open it for reading, then close it.
self.test_write()
f = gzip.GzipFile(self.filename, 'r')
f.close()
with self.assertRaises(ValueError):
f.read(1)
with self.assertRaises(ValueError):
f.seek(0)
with self.assertRaises(ValueError):
f.tell()
# Open the file for writing, then close it.
f = gzip.GzipFile(self.filename, 'w')
f.close()
with self.assertRaises(ValueError):
f.write(b'')
with self.assertRaises(ValueError):
f.flush()
def test_append(self):
self.test_write()
# Append to the previous file
with gzip.GzipFile(self.filename, 'ab') as f:
f.write(data2 * 15)
with gzip.GzipFile(self.filename, 'rb') as f:
d = f.read()
self.assertEqual(d, (data1*50) + (data2*15))
def test_many_append(self):
# Bug #1074261 was triggered when reading a file that contained
# many, many members. Create such a file and verify that reading it
# works.
with gzip.GzipFile(self.filename, 'wb', 9) as f:
f.write(b'a')
for i in range(0, 200):
with gzip.GzipFile(self.filename, "ab", 9) as f: # append
f.write(b'a')
# Try reading the file
with gzip.GzipFile(self.filename, "rb") as zgfile:
contents = b""
while 1:
ztxt = zgfile.read(8192)
contents += ztxt
if not ztxt: break
self.assertEqual(contents, b'a'*201)
def test_exclusive_write(self):
with gzip.GzipFile(self.filename, 'xb') as f:
f.write(data1 * 50)
with gzip.GzipFile(self.filename, 'rb') as f:
self.assertEqual(f.read(), data1 * 50)
with self.assertRaises(FileExistsError):
gzip.GzipFile(self.filename, 'xb')
def test_buffered_reader(self):
# Issue #7471: a GzipFile can be wrapped in a BufferedReader for
# performance.
self.test_write()
with gzip.GzipFile(self.filename, 'rb') as f:
with io.BufferedReader(f) as r:
lines = [line for line in r]
self.assertEqual(lines, 50 * data1.splitlines(keepends=True))
def test_readline(self):
self.test_write()
# Try .readline() with varying line lengths
with gzip.GzipFile(self.filename, 'rb') as f:
line_length = 0
while 1:
L = f.readline(line_length)
if not L and line_length != 0: break
self.assertTrue(len(L) <= line_length)
line_length = (line_length + 1) % 50
def test_readlines(self):
self.test_write()
# Try .readlines()
with gzip.GzipFile(self.filename, 'rb') as f:
L = f.readlines()
with gzip.GzipFile(self.filename, 'rb') as f:
while 1:
L = f.readlines(150)
if L == []: break
def test_seek_read(self):
self.test_write()
# Try seek, read test
with gzip.GzipFile(self.filename) as f:
while 1:
oldpos = f.tell()
line1 = f.readline()
if not line1: break
newpos = f.tell()
f.seek(oldpos) # negative seek
if len(line1)>10:
amount = 10
else:
amount = len(line1)
line2 = f.read(amount)
self.assertEqual(line1[:amount], line2)
f.seek(newpos) # positive seek
def test_seek_whence(self):
self.test_write()
# Try seek(whence=1), read test
with gzip.GzipFile(self.filename) as f:
f.read(10)
f.seek(10, whence=1)
y = f.read(10)
self.assertEqual(y, data1[20:30])
def test_seek_write(self):
# Try seek, write test
with gzip.GzipFile(self.filename, 'w') as f:
for pos in range(0, 256, 16):
f.seek(pos)
f.write(b'GZ\n')
def test_mode(self):
self.test_write()
with gzip.GzipFile(self.filename, 'r') as f:
self.assertEqual(f.myfileobj.mode, 'rb')
support.unlink(self.filename)
with gzip.GzipFile(self.filename, 'x') as f:
self.assertEqual(f.myfileobj.mode, 'xb')
def test_1647484(self):
for mode in ('wb', 'rb'):
with gzip.GzipFile(self.filename, mode) as f:
self.assertTrue(hasattr(f, "name"))
self.assertEqual(f.name, self.filename)
def test_paddedfile_getattr(self):
self.test_write()
with gzip.GzipFile(self.filename, 'rb') as f:
self.assertTrue(hasattr(f.fileobj, "name"))
self.assertEqual(f.fileobj.name, self.filename)
def test_mtime(self):
mtime = 123456789
with gzip.GzipFile(self.filename, 'w', mtime = mtime) as fWrite:
fWrite.write(data1)
with gzip.GzipFile(self.filename) as fRead:
dataRead = fRead.read()
self.assertEqual(dataRead, data1)
self.assertTrue(hasattr(fRead, 'mtime'))
self.assertEqual(fRead.mtime, mtime)
def test_metadata(self):
mtime = 123456789
with gzip.GzipFile(self.filename, 'w', mtime = mtime) as fWrite:
fWrite.write(data1)
with open(self.filename, 'rb') as fRead:
# see RFC 1952: http://www.faqs.org/rfcs/rfc1952.html
idBytes = fRead.read(2)
self.assertEqual(idBytes, b'\x1f\x8b') # gzip ID
cmByte = fRead.read(1)
self.assertEqual(cmByte, b'\x08') # deflate
flagsByte = fRead.read(1)
self.assertEqual(flagsByte, b'\x08') # only the FNAME flag is set
mtimeBytes = fRead.read(4)
self.assertEqual(mtimeBytes, struct.pack('<i', mtime)) # little-endian
xflByte = fRead.read(1)
self.assertEqual(xflByte, b'\x02') # maximum compression
osByte = fRead.read(1)
self.assertEqual(osByte, b'\xff') # OS "unknown" (OS-independent)
# Since the FNAME flag is set, the zero-terminated filename follows.
# RFC 1952 specifies that this is the name of the input file, if any.
# However, the gzip module defaults to storing the name of the output
# file in this field.
expected = self.filename.encode('Latin-1') + b'\x00'
nameBytes = fRead.read(len(expected))
self.assertEqual(nameBytes, expected)
# Since no other flags were set, the header ends here.
# Rather than process the compressed data, let's seek to the trailer.
fRead.seek(os.stat(self.filename).st_size - 8)
crc32Bytes = fRead.read(4) # CRC32 of uncompressed data [data1]
self.assertEqual(crc32Bytes, b'\xaf\xd7d\x83')
isizeBytes = fRead.read(4)
self.assertEqual(isizeBytes, struct.pack('<i', len(data1)))
def test_with_open(self):
# GzipFile supports the context management protocol
with gzip.GzipFile(self.filename, "wb") as f:
f.write(b"xxx")
f = gzip.GzipFile(self.filename, "rb")
f.close()
try:
with f:
pass
except ValueError:
pass
else:
self.fail("__enter__ on a closed file didn't raise an exception")
try:
with gzip.GzipFile(self.filename, "wb") as f:
1/0
except ZeroDivisionError:
pass
else:
self.fail("1/0 didn't raise an exception")
def test_zero_padded_file(self):
with gzip.GzipFile(self.filename, "wb") as f:
f.write(data1 * 50)
# Pad the file with zeroes
with open(self.filename, "ab") as f:
f.write(b"\x00" * 50)
with gzip.GzipFile(self.filename, "rb") as f:
d = f.read()
self.assertEqual(d, data1 * 50, "Incorrect data in file")
def test_non_seekable_file(self):
uncompressed = data1 * 50
buf = UnseekableIO()
with gzip.GzipFile(fileobj=buf, mode="wb") as f:
f.write(uncompressed)
compressed = buf.getvalue()
buf = UnseekableIO(compressed)
with gzip.GzipFile(fileobj=buf, mode="rb") as f:
self.assertEqual(f.read(), uncompressed)
def test_peek(self):
uncompressed = data1 * 200
with gzip.GzipFile(self.filename, "wb") as f:
f.write(uncompressed)
def sizes():
while True:
for n in range(5, 50, 10):
yield n
with gzip.GzipFile(self.filename, "rb") as f:
f.max_read_chunk = 33
nread = 0
for n in sizes():
s = f.peek(n)
if s == b'':
break
self.assertEqual(f.read(len(s)), s)
nread += len(s)
self.assertEqual(f.read(100), b'')
self.assertEqual(nread, len(uncompressed))
def test_textio_readlines(self):
# Issue #10791: TextIOWrapper.readlines() fails when wrapping GzipFile.
lines = (data1 * 50).decode("ascii").splitlines(keepends=True)
self.test_write()
with gzip.GzipFile(self.filename, 'r') as f:
with io.TextIOWrapper(f, encoding="ascii") as t:
self.assertEqual(t.readlines(), lines)
def test_fileobj_from_fdopen(self):
# Issue #13781: Opening a GzipFile for writing fails when using a
# fileobj created with os.fdopen().
fd = os.open(self.filename, os.O_WRONLY | os.O_CREAT)
with os.fdopen(fd, "wb") as f:
with gzip.GzipFile(fileobj=f, mode="w") as g:
pass
def test_bytes_filename(self):
str_filename = self.filename
try:
bytes_filename = str_filename.encode("ascii")
except UnicodeEncodeError:
self.skipTest("Temporary file name needs to be ASCII")
with gzip.GzipFile(bytes_filename, "wb") as f:
f.write(data1 * 50)
with gzip.GzipFile(bytes_filename, "rb") as f:
self.assertEqual(f.read(), data1 * 50)
# Sanity check that we are actually operating on the right file.
with gzip.GzipFile(str_filename, "rb") as f:
self.assertEqual(f.read(), data1 * 50)
# Testing compress/decompress shortcut functions
def test_compress(self):
for data in [data1, data2]:
for args in [(), (1,), (6,), (9,)]:
datac = gzip.compress(data, *args)
self.assertEqual(type(datac), bytes)
with gzip.GzipFile(fileobj=io.BytesIO(datac), mode="rb") as f:
self.assertEqual(f.read(), data)
def test_decompress(self):
for data in (data1, data2):
buf = io.BytesIO()
with gzip.GzipFile(fileobj=buf, mode="wb") as f:
f.write(data)
self.assertEqual(gzip.decompress(buf.getvalue()), data)
# Roundtrip with compress
datac = gzip.compress(data)
self.assertEqual(gzip.decompress(datac), data)
def test_read_truncated(self):
data = data1*50
# Drop the CRC (4 bytes) and file size (4 bytes).
truncated = gzip.compress(data)[:-8]
with gzip.GzipFile(fileobj=io.BytesIO(truncated)) as f:
self.assertRaises(EOFError, f.read)
with gzip.GzipFile(fileobj=io.BytesIO(truncated)) as f:
self.assertEqual(f.read(len(data)), data)
self.assertRaises(EOFError, f.read, 1)
# Incomplete 10-byte header.
for i in range(2, 10):
with gzip.GzipFile(fileobj=io.BytesIO(truncated[:i])) as f:
self.assertRaises(EOFError, f.read, 1)
def test_read_with_extra(self):
# Gzip data with an extra field
gzdata = (b'\x1f\x8b\x08\x04\xb2\x17cQ\x02\xff'
b'\x05\x00Extra'
b'\x0bI-.\x01\x002\xd1Mx\x04\x00\x00\x00')
with gzip.GzipFile(fileobj=io.BytesIO(gzdata)) as f:
self.assertEqual(f.read(), b'Test')
def test_prepend_error(self):
# See issue #20875
with gzip.open(self.filename, "wb") as f:
f.write(data1)
with gzip.open(self.filename, "rb") as f:
f.fileobj.prepend()
class TestOpen(BaseTest):
def test_binary_modes(self):
uncompressed = data1 * 50
with gzip.open(self.filename, "wb") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed)
with gzip.open(self.filename, "rb") as f:
self.assertEqual(f.read(), uncompressed)
with gzip.open(self.filename, "ab") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed * 2)
with self.assertRaises(FileExistsError):
gzip.open(self.filename, "xb")
support.unlink(self.filename)
with gzip.open(self.filename, "xb") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed)
def test_implicit_binary_modes(self):
# Test implicit binary modes (no "b" or "t" in mode string).
uncompressed = data1 * 50
with gzip.open(self.filename, "w") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed)
with gzip.open(self.filename, "r") as f:
self.assertEqual(f.read(), uncompressed)
with gzip.open(self.filename, "a") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed * 2)
with self.assertRaises(FileExistsError):
gzip.open(self.filename, "x")
support.unlink(self.filename)
with gzip.open(self.filename, "x") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read())
self.assertEqual(file_data, uncompressed)
def test_text_modes(self):
uncompressed = data1.decode("ascii") * 50
uncompressed_raw = uncompressed.replace("\n", os.linesep)
with gzip.open(self.filename, "wt") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read()).decode("ascii")
self.assertEqual(file_data, uncompressed_raw)
with gzip.open(self.filename, "rt") as f:
self.assertEqual(f.read(), uncompressed)
with gzip.open(self.filename, "at") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read()).decode("ascii")
self.assertEqual(file_data, uncompressed_raw * 2)
def test_fileobj(self):
uncompressed_bytes = data1 * 50
uncompressed_str = uncompressed_bytes.decode("ascii")
compressed = gzip.compress(uncompressed_bytes)
with gzip.open(io.BytesIO(compressed), "r") as f:
self.assertEqual(f.read(), uncompressed_bytes)
with gzip.open(io.BytesIO(compressed), "rb") as f:
self.assertEqual(f.read(), uncompressed_bytes)
with gzip.open(io.BytesIO(compressed), "rt") as f:
self.assertEqual(f.read(), uncompressed_str)
def test_bad_params(self):
# Test invalid parameter combinations.
with self.assertRaises(TypeError):
gzip.open(123.456)
with self.assertRaises(ValueError):
gzip.open(self.filename, "wbt")
with self.assertRaises(ValueError):
gzip.open(self.filename, "xbt")
with self.assertRaises(ValueError):
gzip.open(self.filename, "rb", encoding="utf-8")
with self.assertRaises(ValueError):
gzip.open(self.filename, "rb", errors="ignore")
with self.assertRaises(ValueError):
gzip.open(self.filename, "rb", newline="\n")
def test_encoding(self):
# Test non-default encoding.
uncompressed = data1.decode("ascii") * 50
uncompressed_raw = uncompressed.replace("\n", os.linesep)
with gzip.open(self.filename, "wt", encoding="utf-16") as f:
f.write(uncompressed)
with open(self.filename, "rb") as f:
file_data = gzip.decompress(f.read()).decode("utf-16")
self.assertEqual(file_data, uncompressed_raw)
with gzip.open(self.filename, "rt", encoding="utf-16") as f:
self.assertEqual(f.read(), uncompressed)
def test_encoding_error_handler(self):
# Test with non-default encoding error handler.
with gzip.open(self.filename, "wb") as f:
f.write(b"foo\xffbar")
with gzip.open(self.filename, "rt", encoding="ascii", errors="ignore") \
as f:
self.assertEqual(f.read(), "foobar")
def test_newline(self):
# Test with explicit newline (universal newline mode disabled).
uncompressed = data1.decode("ascii") * 50
with gzip.open(self.filename, "wt", newline="\n") as f:
f.write(uncompressed)
with gzip.open(self.filename, "rt", newline="\r") as f:
self.assertEqual(f.readlines(), [uncompressed])
def test_main(verbose=None):
support.run_unittest(TestGzip, TestOpen)
if __name__ == "__main__":
test_main(verbose=True)
|
py | b41704b29741e2c6a36168ac21895855f74b2b53 | import numpy
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
try:
from itertools import ifilterfalse
except ImportError: # py3k
from itertools import filterfalse as ifilterfalse
def flatten_binary_scores(scores, labels, ignore=None):
"""
Flattens predictions in the batch (binary case)
Remove labels equal to 'ignore'
"""
scores = scores.view(-1)
labels = labels.view(-1)
if ignore is None:
return scores, labels
valid = (labels != ignore)
vscores = scores[valid]
vlabels = labels[valid]
return vscores, vlabels
def lovasz_grad(gt_sorted):
"""
Computes gradient of the Lovasz extension w.r.t sorted errors
See Alg. 1 in paper
"""
p = len(gt_sorted)
gts = gt_sorted.sum()
intersection = gts - gt_sorted.float().cumsum(0)
union = gts + (1 - gt_sorted).float().cumsum(0)
jaccard = 1. - intersection / union
if p > 1: # cover 1-pixel case
jaccard[1:p] = jaccard[1:p] - jaccard[0:-1]
return jaccard
def lovasz_hinge(logits, labels, per_image=True, ignore=None):
"""
Binary Lovasz hinge loss
logits: [B, H, W] Variable, logits at each pixel (between -\infty and +\infty)
labels: [B, H, W] Tensor, binary ground truth masks (0 or 1)
per_image: compute the loss per image instead of per batch
ignore: void class id
"""
if per_image:
loss = mean(lovasz_hinge_flat(*flatten_binary_scores(log.unsqueeze(0), lab.unsqueeze(0), ignore))
for log, lab in zip(logits, labels))
else:
loss = lovasz_hinge_flat(*flatten_binary_scores(logits, labels, ignore))
return loss
def lovasz_hinge_flat(logits, labels):
"""
Binary Lovasz hinge loss
logits: [P] Variable, logits at each prediction (between -\infty and +\infty)
labels: [P] Tensor, binary ground truth labels (0 or 1)
ignore: label to ignore
"""
if len(labels) == 0:
# only void pixels, the gradients should be 0
return logits.sum() * 0.
# signs = 2. * labels.float() - 1.
errors = (1. - logits)*labels + logits*(1. - labels)
## Old error for output values in the range [-1 1]
# errors = (1. - logits*Variable(signs))
errors_sorted, perm = torch.sort(errors, dim=0, descending=True)
perm = perm.data
gt_sorted = labels[perm]
grad = lovasz_grad(gt_sorted)
loss = torch.dot(F.relu(errors_sorted), Variable(grad))
return loss
#=====
#Multi-class Lovasz loss
#=====
def lovasz_softmax(probas, labels, classes='present', per_image=False, ignore=None):
"""
Multi-class Lovasz-Softmax loss
probas: [B, C, H, W] Variable, class probabilities at each prediction (between 0 and 1).
Interpreted as binary (sigmoid) output with outputs of size [B, H, W].
labels: [B, H, W] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
per_image: compute the loss per image instead of per batch
ignore: void class labels
"""
if per_image:
loss = mean(lovasz_softmax_flat(*flatten_probas(prob.unsqueeze(0), lab.unsqueeze(0), ignore), classes=classes)
for prob, lab in zip(probas, labels))
else:
loss = lovasz_softmax_flat(*flatten_probas(probas, labels, ignore), classes=classes)
return loss
def lovasz_softmax_flat(probas, labels, classes='present'):
"""
Multi-class Lovasz-Softmax loss
probas: [P, C] Variable, class probabilities at each prediction (between 0 and 1)
labels: [P] Tensor, ground truth labels (between 0 and C - 1)
classes: 'all' for all, 'present' for classes present in labels, or a list of classes to average.
"""
if probas.numel() == 0:
# only void pixels, the gradients should be 0
return probas * 0.
C = probas.size(1)
losses = []
class_to_sum = list(range(C)) if classes in ['all', 'present'] else classes
for c in class_to_sum:
fg = (labels == c).float() # foreground for class c
if (classes is 'present' and fg.sum() == 0):
continue
if C == 1:
if len(classes) > 1:
raise ValueError('Sigmoid output possible only with 1 class')
class_pred = probas[:, 0]
else:
class_pred = probas[:, c]
errors = (Variable(fg) - class_pred).abs()
errors_sorted, perm = torch.sort(errors, 0, descending=True)
perm = perm.data
fg_sorted = fg[perm]
losses.append(torch.dot(errors_sorted, Variable(lovasz_grad(fg_sorted))))
return mean(losses)
def flatten_probas(probas, labels, ignore=None):
"""
Flattens predictions in the batch
"""
if probas.dim() == 3:
# assumes output of a sigmoid layer
B, H, W = probas.size()
probas = probas.view(B, 1, H, W)
B, C, H, W = probas.size()
probas = probas.permute(0, 2, 3, 1).contiguous().view(-1, C) # B * H * W, C = P, C
labels = labels.view(-1)
if ignore is None:
return probas, labels
valid = (labels != ignore)
vprobas = probas[valid.nonzero().squeeze()]
vlabels = labels[valid]
return vprobas, vlabels
def isnan(x):
return x != x
def mean(l, ignore_nan=False, empty=0):
"""
nanmean compatible with generators.
"""
l = iter(l)
if ignore_nan:
l = ifilterfalse(isnan, l)
try:
n = 1
acc = next(l)
except StopIteration:
if empty == 'raise':
raise ValueError('Empty mean')
return empty
for n, v in enumerate(l, 2):
acc += v
if n == 1:
return acc
return acc / n |
py | b41705c7d8013b2ea904a412e9d428fccfbe7358 | '''
Copyright (c) 2018 by Tobias Houska
This file is part of Statistical Parameter Optimization Tool for Python(SPOTPY).
:author: Tobias Houska
This file holds the standards for every algorithm.
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from spotpy import database
from spotpy import parameter
import numpy as np
import time
import threading
try:
from queue import Queue
except ImportError:
# If the running python version is 2.* we have only Queue available as a multiprocessing class
# we need to stop the whole main process which this sleep for one microsecond otherwise the subprocess is not
# finished and the main process can not access it and put it as garbage away (Garbage collectors cause)
# However this slows down the whole simulation process and is a boring bug. Python3.x does not need this
# workaround
from Queue import Queue
class _RunStatistic(object):
"""
this class checks for each run if the objectivefunction got better and holds the
best parameter set.
Every _algorithm has an object of this class as status.
Usage:
status = _RunStatistic()
status(rep,like,params)
"""
def __init__(self, repetitions, algorithm_name, optimization_direction, parnames):
self.optimization_direction = optimization_direction #grid, mazimize, minimize
print('Initializing the ',algorithm_name,' with ',repetitions,' repetitions')
if optimization_direction == 'minimize':
self.compare = self.minimizer
print('The objective function will be minimized')
if optimization_direction == 'maximize':
self.compare = self.maximizer
print('The objective function will be maximized')
if optimization_direction == 'grid':
self.compare = self.grid
self.rep = 0
self.parnames = parnames
self.parameters= len(parnames)
self.params_min = [np.nan]*self.parameters
self.params_max = [np.nan]*self.parameters
self.objectivefunction_min = 1e308
self.objectivefunction_max = -1e308
self.starttime = time.time()
self.last_print = time.time()
self.repetitions = repetitions
self.stop = False
def minimizer(self, objval, params):
if objval < self.objectivefunction_min:
self.objectivefunction_min = objval
self.params_min = list(params)
def maximizer(self, objval, params):
if objval > self.objectivefunction_max:
self.objectivefunction_max = objval
self.params_max = list(params)
def grid(self, objval, params):
if objval < self.objectivefunction_min:
self.objectivefunction_min = objval
self.params_min = list(params)
if objval > self.objectivefunction_max:
self.objectivefunction_max = objval
self.params_max = list(params)
def __call__(self, objectivefunction, params, block_print=False):
self.rep+=1
if type(objectivefunction) == type([]): #TODO: change to iterable
self.compare(objectivefunction[0], params)
elif type(objectivefunction) == type(np.array([])):
pass
else:
self.compare(objectivefunction, params)
if self.rep == self.repetitions:
self.stop = True
if not block_print:
self.print_status()
def print_status(self):
# get str showing approximate timeleft to end of simulation in H, M, S
acttime = time.time()
# Refresh progressbar every two second
if acttime - self.last_print >= 2:
avg_time_per_run = (acttime - self.starttime) / (self.rep + 1)
timestr = time.strftime("%H:%M:%S", time.gmtime(round(avg_time_per_run * (self.repetitions - (self.rep + 1)))))
if self.optimization_direction == 'minimize':
text = '%i of %i, minimal objective function=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_min, timestr)
if self.optimization_direction == 'maximize':
text = '%i of %i, maximal objective function=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_max, timestr)
if self.optimization_direction == 'grid':
text = '%i of %i, min objf=%g, max objf=%g, time remaining: %s' % (
self.rep, self.repetitions, self.objectivefunction_min, self.objectivefunction_max, timestr)
print(text)
self.last_print = time.time()
def print_status_final(self):
print('\n*** Final SPOTPY summary ***')
print('Total Duration: ' + str(round((time.time() - self.starttime), 2)) + ' seconds')
print('Total Repetitions:', self.rep)
if self.optimization_direction == 'minimize':
print('Minimal objective value: %g' % (self.objectivefunction_min))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_min[i])
print(text)
if self.optimization_direction == 'maximize':
print('Maximal objective value: %g' % (self.objectivefunction_max))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_max[i])
print(text)
if self.optimization_direction == 'grid':
print('Minimal objective value: %g' % (self.objectivefunction_min))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_min[i])
print(text)
print('Maximal objective value: %g' % (self.objectivefunction_max))
print('Corresponding parameter setting:')
for i in range(self.parameters):
text = '%s: %g' % (self.parnames[i], self.params_max[i])
print(text)
print('******************************\n')
def __repr__(self):
return 'Min objectivefunction: %g \n Max objectivefunction: %g' % (
self.objectivefunction_min, self.objectivefunction_max)
class _algorithm(object):
"""
Implements an algorithm.
Input
----------
spot_setup: class
model: function
Should be callable with a parameter combination of the parameter-function
and return an list of simulation results (as long as evaluation list)
parameter: function
When called, it should return a random parameter combination. Which can
be e.g. uniform or Gaussian
objectivefunction: function
Should return the objectivefunction for a given list of a model simulation and
observation.
evaluation: function
Should return the true values as return by the model.
dbname: str
Name of the database where parameter, objectivefunction value and simulation
results will be saved.
dbformat: str
ram: fast suited for short sampling time. no file will be created and results are saved in an array.
csv: A csv file will be created, which you can import afterwards.
parallel: str
seq: Sequentiel sampling (default): Normal iterations on one core of your cpu.
mpc: Multi processing: Iterations on all available cores on your (single) pc
mpi: Message Passing Interface: Parallel computing on high performance computing clusters, py4mpi needs to be installed
save_threshold: float or list
Compares the given value/list of values with return value/list of values from spot_setup.objectivefunction.
If the objectivefunction value is higher, the results are saved in the database. If not they are ignored (saves storage).
db_precision:np.float type
set np.float16, np.float32 or np.float64 for rounding of floats in the output database
Default is np.float16
sim_timeout: float, int or None, default: None
the defined model given in the spot_setup class can be controlled to break after 'sim_timeout' seconds if
sim_timeout is not None.
If the model run has been broken simlply '[nan]' will be returned.
random_state: int or None, default: None
the algorithms uses the number in random_state as seed for numpy. This way stochastic processes can be reproduced.
"""
_unaccepted_parameter_types = (parameter.List, )
def __init__(self, spot_setup, dbname=None, dbformat=None, dbinit=True,
dbappend=False, parallel='seq', save_sim=True, breakpoint=None,
backup_every_rep=100, save_threshold=-np.inf, db_precision=np.float16,
sim_timeout=None, random_state=None, optimization_direction='grid', algorithm_name=''):
# Initialize the user defined setup class
self.setup = spot_setup
param_info = parameter.get_parameters_array(self.setup, unaccepted_parameter_types=self._unaccepted_parameter_types)
self.all_params = param_info['random']
self.constant_positions = parameter.get_constant_indices(spot_setup)
if self.constant_positions:
self.non_constant_positions = []
for i, val in enumerate(self.all_params):
if self.all_params[i] not in self.constant_positions:
self.non_constant_positions.append(i)
else:
self.non_constant_positions = np.arange(0,len(self.all_params))
self.parameter = self.get_parameters
self.parnames = param_info['name']
self.algorithm_name = algorithm_name
# Create a type to hold the parameter values using a namedtuple
self.partype = parameter.ParameterSet(param_info)
self.evaluation = self.setup.evaluation()
self.save_sim = save_sim
self.optimization_direction = optimization_direction
self.dbname = dbname or 'customDb'
self.dbformat = dbformat or 'ram'
self.db_precision = db_precision
self.breakpoint = breakpoint
self.backup_every_rep = backup_every_rep
# Two parameters to control the data base handling
# 'dbinit' triggers the initial creation of the data base file
# 'dbappend' used to append to the existing data base, after restart
self.dbinit = dbinit
self.dbappend = dbappend
# Set the random state
if random_state is None: #ToDo: Have to discuss if these 3 lines are neccessary.
random_state = np.random.randint(low=0, high=2**30)
np.random.seed(random_state)
# If value is not None a timeout will set so that the simulation will break after sim_timeout seconds without return a value
self.sim_timeout = sim_timeout
self.save_threshold = save_threshold
if breakpoint == 'read' or breakpoint == 'readandwrite':
print('Reading backupfile')
try:
open(self.dbname+'.break')
except FileNotFoundError:
print('Backupfile not found')
self.dbappend = True
# Now a repeater (ForEach-object) is loaded
# A repeater is a convinent wrapper to repeat tasks
# We have the same interface for sequential and for parallel tasks
if parallel == 'seq':
from spotpy.parallel.sequential import ForEach
elif parallel == 'mpi':
from spotpy.parallel.mpi import ForEach
# MPC is based on pathos mutiprocessing and uses ordered map, so results are given back in the order
# as the parameters are
elif parallel == 'mpc':
from spotpy.parallel.mproc import ForEach
# UMPC is based on pathos mutiprocessing and uses unordered map, so results are given back in the order
# as the subprocesses are finished which may speed up the whole simulation process but is not recommended if
# objective functions do their calculation based on the order of the data because the order of the result is chaotic
# and randomized
elif parallel == 'umpc':
from spotpy.parallel.umproc import ForEach
else:
raise ValueError(
"'%s' is not a valid keyword for parallel processing" % parallel)
# This is the repeater for the model runs. The simulate method does the work
# If you need different tasks, the repeater can be pushed into a "phase" using the
# setphase function. The simulate method can check the current phase and dispatch work
# to other functions. This is introduced for sceua to differentiate between burn in and
# the normal work on the chains
self.repeat = ForEach(self.simulate)
# method "save" needs to know whether objective function result is list or float, default is float
self.like_struct_typ = type(1.1)
def __str__(self):
return '{type}({mtype}())->{dbname}'.format(
type=type(self).__name__,
mtype=type(self.setup).__name__,
dbname=self.dbname)
def __repr__(self):
return '{type}()'.format(type=type(self).__name__)
def get_parameters(self):
"""
Returns the parameter array from the setup
"""
pars = parameter.get_parameters_array(self.setup)
return pars[self.non_constant_positions]
def set_repetiton(self, repetitions):
self.status = _RunStatistic(repetitions, self.algorithm_name,
self.optimization_direction, self.parnames)
# In MPI, this command will do nothing on the master process
# but the worker processes are going to wait for jobs.
# Hence the workers will only receive parameters for the
# simulate function, new calculation phases and the termination
self.repeat.start()
def final_call(self):
self.repeat.terminate()
try:
self.datawriter.finalize()
except AttributeError: # Happens if no database was assigned
pass
self.status.print_status_final()
def _init_database(self, like, randompar, simulations):
if self.dbinit:
print('Initialize database...')
self.datawriter = database.get_datawriter(self.dbformat,
self.dbname, self.parnames, like, randompar, simulations,
save_sim=self.save_sim, dbappend=self.dbappend,
dbinit=self.dbinit, db_precision=self.db_precision,
setup=self.setup)
self.dbinit = False
def __is_list_type(self, data):
if type(data) == type:
return data == list or data == type(np.array([]))
else:
return type(data) == list or type(data) == type(np.array([]))
def save(self, like, randompar, simulations, chains=1):
# Initialize the database if no run was performed so far
self._init_database(like, randompar, simulations)
# Test if like and the save threshold are float/list and compare accordingly
if self.__is_list_type(like) and self.__is_list_type(self.save_threshold):
if all(i > j for i, j in zip(like, self.save_threshold)): #Compares list/list
self.datawriter.save(like, randompar, simulations, chains=chains)
if (not self.__is_list_type(like)) and (not self.__is_list_type(self.save_threshold)):
if like>self.save_threshold: #Compares float/float
self.datawriter.save(like, randompar, simulations, chains=chains)
if self.__is_list_type(like) and (not self.__is_list_type(self.save_threshold)):
if like[0]>self.save_threshold: #Compares list/float
self.datawriter.save(like, randompar, simulations, chains=chains)
if (not self.__is_list_type(like)) and self.__is_list_type(self.save_threshold): #Compares float/list
if (like > self.save_threshold).all:
self.datawriter.save(like, randompar, simulations, chains=chains)
def read_breakdata(self, dbname):
''' Read data from a pickle file if a breakpoint is set.
Reason: In case of incomplete optimizations, old data can be restored. '''
import pickle
with open(dbname+'.break', 'rb') as breakfile:
work,backuptime,repos,obmin,obmax=pickle.load(breakfile)
self.status.starttime=self.status.starttime-backuptime
self.status.rep=repos
self.status.objectivefunction_min=obmin
self.status.objectivefunction_max=obmax
return work
def write_breakdata(self, dbname, work):
''' Write data to a pickle file if a breakpoint has been set.'''
import pickle
work=(work,self.status.last_print-self.status.starttime,self.status.rep,self.status.objectivefunction_min,self.status.objectivefunction_max)
with open(str(dbname)+'.break', 'wb') as breakfile:
pickle.dump(work, breakfile)
def getdata(self):
return self.datawriter.getdata()
def update_params(self, params):
#Add potential Constant parameters
self.all_params[self.non_constant_positions] = params
return self.all_params
def postprocessing(self, rep, params, simulation, chains=1, save_run=True, negativlike=False, block_print=False): # TODO: rep not necessaray
params = self.update_params(params)
if negativlike is True:
like = -self.getfitness(simulation=simulation, params=params)
else:
like = self.getfitness(simulation=simulation, params=params)
# Save everything in the database, if save is True
# This is needed as some algorithms just want to know the fitness,
# before they actually save the run in a database (e.g. sce-ua)
self.status(like,params,block_print=block_print)
if save_run is True and simulation is not None:
self.save(like, params, simulations=simulation, chains=chains)
if type(like)==type([]):
return like[0]
else:
return like
def getfitness(self, simulation, params):
"""
Calls the user defined spot_setup objectivefunction
"""
try:
#print('Using parameters in fitness function')
return self.setup.objectivefunction(evaluation=self.evaluation, simulation=simulation, params = (params,self.parnames))
except TypeError: # Happens if the user does not allow to pass parameter in the spot_setup.objectivefunction
#print('Not using parameters in fitness function')
return self.setup.objectivefunction(evaluation=self.evaluation, simulation=simulation)
def simulate(self, id_params_tuple):
"""This is a simple wrapper of the model, returning the result together with
the run id and the parameters. This is needed, because some parallel things
can mix up the ordering of runs
"""
id, params = id_params_tuple
self.all_params[self.non_constant_positions] = params #TODO: List parameters are not updated if not accepted for the algorithm, we may have to warn/error if list is given
all_params = self.all_params
# we need a layer to fetch returned data from a threaded process into a queue.
def model_layer(q,all_params):
# Call self.model with a namedtuple instead of another sequence
q.put(self.setup.simulation(self.partype(*all_params)))
# starting a queue, where in python2.7 this is a multiprocessing class and can cause errors because of
# incompability which the main thread. Therefore only for older Python version a workaround follows
que = Queue()
sim_thread = threading.Thread(target=model_layer, args=(que, all_params))
sim_thread.daemon = True
sim_thread.start()
# If self.sim_timeout is not None the self.model will break after self.sim_timeout seconds otherwise is runs as
# long it needs to run
sim_thread.join(self.sim_timeout)
# If no result from the thread is given, i.e. the thread was killed from the watcher the default result is
# '[nan]' and will not be saved. Otherwise get the result from the thread
model_result = None
if not que.empty():
model_result = que.get()
return id, params, model_result
|
py | b417068227374ee2c1ff155415945c7b6e1dd3d6 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
import os
extensions = [
'sphinx.ext.autodoc',
'sphinx.ext.autosummary',
'sphinx.ext.coverage',
'sphinx.ext.doctest',
'sphinx.ext.extlinks',
'sphinx.ext.ifconfig',
'sphinx.ext.napoleon',
'sphinx.ext.todo',
'sphinx.ext.viewcode',
]
if os.getenv('SPELLCHECK'):
extensions += 'sphinxcontrib.spelling',
spelling_show_suggestions = True
spelling_lang = 'en_US'
source_suffix = '.rst'
master_doc = 'index'
project = 'fuchur'
year = '2018'
author = 'Simon Hilpert'
copyright = '{0}, {1}'.format(year, author)
version = release = '0.0.0'
pygments_style = 'trac'
templates_path = ['.']
extlinks = {
'issue': ('https://github.com/znes/fuchur/issues/%s', '#'),
'pr': ('https://github.com/znes/fuchur/pull/%s', 'PR #'),
}
# on_rtd is whether we are on readthedocs.org
on_rtd = os.environ.get('READTHEDOCS', None) == 'True'
if not on_rtd: # only set the theme if we're building docs locally
html_theme = 'sphinx_rtd_theme'
html_use_smartypants = True
html_last_updated_fmt = '%b %d, %Y'
html_split_index = False
html_sidebars = {
'**': ['searchbox.html', 'globaltoc.html', 'sourcelink.html'],
}
html_short_title = '%s-%s' % (project, version)
napoleon_use_ivar = True
napoleon_use_rtype = False
napoleon_use_param = False
|
py | b417078547f2f590ba165db9b006be492a5abea2 | # -*- coding: utf-8 -*-
# Copyright 2017 Leo Moll and Dominik Schlösser
#
# -- Imports ------------------------------------------------
# -- Classes ------------------------------------------------
class Film( object ):
def __init__( self ):
self.id = 0
self.title = u''
self.show = u''
self.channel = u''
self.description = u''
self.seconds = 0
self.size = 0
self.aired = u''
self.url_sub = u''
self.url_video = u''
self.url_video_sd = u''
self.url_video_hd = u''
|
py | b41707943d57a4b751a98d8af985ebb7d864a875 | # coding: utf-8
# Copyright (c) 2016, 2022, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class Sddc(object):
"""
An `Oracle Cloud VMware Solution`__ software-defined data center (SDDC) contains the resources required for a
functional VMware environment. Instances in an SDDC
(see :class:`EsxiHost`) run in a virtual cloud network (VCN)
and are preconfigured with VMware and storage. Use the vCenter utility to manage
and deploy VMware virtual machines (VMs) in the SDDC.
The SDDC uses a single management subnet for provisioning the SDDC. It also uses a
set of VLANs for various components of the VMware environment (vSphere, vMotion,
vSAN, and so on). See the Core Services API for information about VCN subnets and VLANs.
__ https://docs.cloud.oracle.com/iaas/Content/VMware/Concepts/ocvsoverview.htm
"""
#: A constant which can be used with the initial_sku property of a Sddc.
#: This constant has a value of "HOUR"
INITIAL_SKU_HOUR = "HOUR"
#: A constant which can be used with the initial_sku property of a Sddc.
#: This constant has a value of "MONTH"
INITIAL_SKU_MONTH = "MONTH"
#: A constant which can be used with the initial_sku property of a Sddc.
#: This constant has a value of "ONE_YEAR"
INITIAL_SKU_ONE_YEAR = "ONE_YEAR"
#: A constant which can be used with the initial_sku property of a Sddc.
#: This constant has a value of "THREE_YEARS"
INITIAL_SKU_THREE_YEARS = "THREE_YEARS"
#: A constant which can be used with the lifecycle_state property of a Sddc.
#: This constant has a value of "CREATING"
LIFECYCLE_STATE_CREATING = "CREATING"
#: A constant which can be used with the lifecycle_state property of a Sddc.
#: This constant has a value of "UPDATING"
LIFECYCLE_STATE_UPDATING = "UPDATING"
#: A constant which can be used with the lifecycle_state property of a Sddc.
#: This constant has a value of "ACTIVE"
LIFECYCLE_STATE_ACTIVE = "ACTIVE"
#: A constant which can be used with the lifecycle_state property of a Sddc.
#: This constant has a value of "DELETING"
LIFECYCLE_STATE_DELETING = "DELETING"
#: A constant which can be used with the lifecycle_state property of a Sddc.
#: This constant has a value of "DELETED"
LIFECYCLE_STATE_DELETED = "DELETED"
#: A constant which can be used with the lifecycle_state property of a Sddc.
#: This constant has a value of "FAILED"
LIFECYCLE_STATE_FAILED = "FAILED"
def __init__(self, **kwargs):
"""
Initializes a new Sddc object with values from keyword arguments.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param id:
The value to assign to the id property of this Sddc.
:type id: str
:param compute_availability_domain:
The value to assign to the compute_availability_domain property of this Sddc.
:type compute_availability_domain: str
:param display_name:
The value to assign to the display_name property of this Sddc.
:type display_name: str
:param instance_display_name_prefix:
The value to assign to the instance_display_name_prefix property of this Sddc.
:type instance_display_name_prefix: str
:param vmware_software_version:
The value to assign to the vmware_software_version property of this Sddc.
:type vmware_software_version: str
:param compartment_id:
The value to assign to the compartment_id property of this Sddc.
:type compartment_id: str
:param esxi_hosts_count:
The value to assign to the esxi_hosts_count property of this Sddc.
:type esxi_hosts_count: int
:param initial_sku:
The value to assign to the initial_sku property of this Sddc.
Allowed values for this property are: "HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type initial_sku: str
:param vcenter_fqdn:
The value to assign to the vcenter_fqdn property of this Sddc.
:type vcenter_fqdn: str
:param nsx_manager_fqdn:
The value to assign to the nsx_manager_fqdn property of this Sddc.
:type nsx_manager_fqdn: str
:param vcenter_private_ip_id:
The value to assign to the vcenter_private_ip_id property of this Sddc.
:type vcenter_private_ip_id: str
:param nsx_manager_private_ip_id:
The value to assign to the nsx_manager_private_ip_id property of this Sddc.
:type nsx_manager_private_ip_id: str
:param vcenter_initial_password:
The value to assign to the vcenter_initial_password property of this Sddc.
:type vcenter_initial_password: str
:param nsx_manager_initial_password:
The value to assign to the nsx_manager_initial_password property of this Sddc.
:type nsx_manager_initial_password: str
:param vcenter_username:
The value to assign to the vcenter_username property of this Sddc.
:type vcenter_username: str
:param nsx_manager_username:
The value to assign to the nsx_manager_username property of this Sddc.
:type nsx_manager_username: str
:param ssh_authorized_keys:
The value to assign to the ssh_authorized_keys property of this Sddc.
:type ssh_authorized_keys: str
:param workload_network_cidr:
The value to assign to the workload_network_cidr property of this Sddc.
:type workload_network_cidr: str
:param nsx_overlay_segment_name:
The value to assign to the nsx_overlay_segment_name property of this Sddc.
:type nsx_overlay_segment_name: str
:param nsx_edge_uplink_ip_id:
The value to assign to the nsx_edge_uplink_ip_id property of this Sddc.
:type nsx_edge_uplink_ip_id: str
:param provisioning_subnet_id:
The value to assign to the provisioning_subnet_id property of this Sddc.
:type provisioning_subnet_id: str
:param vsphere_vlan_id:
The value to assign to the vsphere_vlan_id property of this Sddc.
:type vsphere_vlan_id: str
:param vmotion_vlan_id:
The value to assign to the vmotion_vlan_id property of this Sddc.
:type vmotion_vlan_id: str
:param vsan_vlan_id:
The value to assign to the vsan_vlan_id property of this Sddc.
:type vsan_vlan_id: str
:param nsx_v_tep_vlan_id:
The value to assign to the nsx_v_tep_vlan_id property of this Sddc.
:type nsx_v_tep_vlan_id: str
:param nsx_edge_v_tep_vlan_id:
The value to assign to the nsx_edge_v_tep_vlan_id property of this Sddc.
:type nsx_edge_v_tep_vlan_id: str
:param nsx_edge_uplink1_vlan_id:
The value to assign to the nsx_edge_uplink1_vlan_id property of this Sddc.
:type nsx_edge_uplink1_vlan_id: str
:param nsx_edge_uplink2_vlan_id:
The value to assign to the nsx_edge_uplink2_vlan_id property of this Sddc.
:type nsx_edge_uplink2_vlan_id: str
:param replication_vlan_id:
The value to assign to the replication_vlan_id property of this Sddc.
:type replication_vlan_id: str
:param provisioning_vlan_id:
The value to assign to the provisioning_vlan_id property of this Sddc.
:type provisioning_vlan_id: str
:param hcx_private_ip_id:
The value to assign to the hcx_private_ip_id property of this Sddc.
:type hcx_private_ip_id: str
:param hcx_fqdn:
The value to assign to the hcx_fqdn property of this Sddc.
:type hcx_fqdn: str
:param hcx_initial_password:
The value to assign to the hcx_initial_password property of this Sddc.
:type hcx_initial_password: str
:param hcx_vlan_id:
The value to assign to the hcx_vlan_id property of this Sddc.
:type hcx_vlan_id: str
:param is_hcx_enabled:
The value to assign to the is_hcx_enabled property of this Sddc.
:type is_hcx_enabled: bool
:param hcx_on_prem_key:
The value to assign to the hcx_on_prem_key property of this Sddc.
:type hcx_on_prem_key: str
:param is_hcx_enterprise_enabled:
The value to assign to the is_hcx_enterprise_enabled property of this Sddc.
:type is_hcx_enterprise_enabled: bool
:param is_hcx_pending_downgrade:
The value to assign to the is_hcx_pending_downgrade property of this Sddc.
:type is_hcx_pending_downgrade: bool
:param hcx_on_prem_licenses:
The value to assign to the hcx_on_prem_licenses property of this Sddc.
:type hcx_on_prem_licenses: list[oci.ocvp.models.HcxLicenseSummary]
:param time_hcx_billing_cycle_end:
The value to assign to the time_hcx_billing_cycle_end property of this Sddc.
:type time_hcx_billing_cycle_end: datetime
:param time_hcx_license_status_updated:
The value to assign to the time_hcx_license_status_updated property of this Sddc.
:type time_hcx_license_status_updated: datetime
:param time_created:
The value to assign to the time_created property of this Sddc.
:type time_created: datetime
:param time_updated:
The value to assign to the time_updated property of this Sddc.
:type time_updated: datetime
:param lifecycle_state:
The value to assign to the lifecycle_state property of this Sddc.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:type lifecycle_state: str
:param initial_host_shape_name:
The value to assign to the initial_host_shape_name property of this Sddc.
:type initial_host_shape_name: str
:param initial_host_ocpu_count:
The value to assign to the initial_host_ocpu_count property of this Sddc.
:type initial_host_ocpu_count: float
:param is_shielded_instance_enabled:
The value to assign to the is_shielded_instance_enabled property of this Sddc.
:type is_shielded_instance_enabled: bool
:param capacity_reservation_id:
The value to assign to the capacity_reservation_id property of this Sddc.
:type capacity_reservation_id: str
:param freeform_tags:
The value to assign to the freeform_tags property of this Sddc.
:type freeform_tags: dict(str, str)
:param defined_tags:
The value to assign to the defined_tags property of this Sddc.
:type defined_tags: dict(str, dict(str, object))
"""
self.swagger_types = {
'id': 'str',
'compute_availability_domain': 'str',
'display_name': 'str',
'instance_display_name_prefix': 'str',
'vmware_software_version': 'str',
'compartment_id': 'str',
'esxi_hosts_count': 'int',
'initial_sku': 'str',
'vcenter_fqdn': 'str',
'nsx_manager_fqdn': 'str',
'vcenter_private_ip_id': 'str',
'nsx_manager_private_ip_id': 'str',
'vcenter_initial_password': 'str',
'nsx_manager_initial_password': 'str',
'vcenter_username': 'str',
'nsx_manager_username': 'str',
'ssh_authorized_keys': 'str',
'workload_network_cidr': 'str',
'nsx_overlay_segment_name': 'str',
'nsx_edge_uplink_ip_id': 'str',
'provisioning_subnet_id': 'str',
'vsphere_vlan_id': 'str',
'vmotion_vlan_id': 'str',
'vsan_vlan_id': 'str',
'nsx_v_tep_vlan_id': 'str',
'nsx_edge_v_tep_vlan_id': 'str',
'nsx_edge_uplink1_vlan_id': 'str',
'nsx_edge_uplink2_vlan_id': 'str',
'replication_vlan_id': 'str',
'provisioning_vlan_id': 'str',
'hcx_private_ip_id': 'str',
'hcx_fqdn': 'str',
'hcx_initial_password': 'str',
'hcx_vlan_id': 'str',
'is_hcx_enabled': 'bool',
'hcx_on_prem_key': 'str',
'is_hcx_enterprise_enabled': 'bool',
'is_hcx_pending_downgrade': 'bool',
'hcx_on_prem_licenses': 'list[HcxLicenseSummary]',
'time_hcx_billing_cycle_end': 'datetime',
'time_hcx_license_status_updated': 'datetime',
'time_created': 'datetime',
'time_updated': 'datetime',
'lifecycle_state': 'str',
'initial_host_shape_name': 'str',
'initial_host_ocpu_count': 'float',
'is_shielded_instance_enabled': 'bool',
'capacity_reservation_id': 'str',
'freeform_tags': 'dict(str, str)',
'defined_tags': 'dict(str, dict(str, object))'
}
self.attribute_map = {
'id': 'id',
'compute_availability_domain': 'computeAvailabilityDomain',
'display_name': 'displayName',
'instance_display_name_prefix': 'instanceDisplayNamePrefix',
'vmware_software_version': 'vmwareSoftwareVersion',
'compartment_id': 'compartmentId',
'esxi_hosts_count': 'esxiHostsCount',
'initial_sku': 'initialSku',
'vcenter_fqdn': 'vcenterFqdn',
'nsx_manager_fqdn': 'nsxManagerFqdn',
'vcenter_private_ip_id': 'vcenterPrivateIpId',
'nsx_manager_private_ip_id': 'nsxManagerPrivateIpId',
'vcenter_initial_password': 'vcenterInitialPassword',
'nsx_manager_initial_password': 'nsxManagerInitialPassword',
'vcenter_username': 'vcenterUsername',
'nsx_manager_username': 'nsxManagerUsername',
'ssh_authorized_keys': 'sshAuthorizedKeys',
'workload_network_cidr': 'workloadNetworkCidr',
'nsx_overlay_segment_name': 'nsxOverlaySegmentName',
'nsx_edge_uplink_ip_id': 'nsxEdgeUplinkIpId',
'provisioning_subnet_id': 'provisioningSubnetId',
'vsphere_vlan_id': 'vsphereVlanId',
'vmotion_vlan_id': 'vmotionVlanId',
'vsan_vlan_id': 'vsanVlanId',
'nsx_v_tep_vlan_id': 'nsxVTepVlanId',
'nsx_edge_v_tep_vlan_id': 'nsxEdgeVTepVlanId',
'nsx_edge_uplink1_vlan_id': 'nsxEdgeUplink1VlanId',
'nsx_edge_uplink2_vlan_id': 'nsxEdgeUplink2VlanId',
'replication_vlan_id': 'replicationVlanId',
'provisioning_vlan_id': 'provisioningVlanId',
'hcx_private_ip_id': 'hcxPrivateIpId',
'hcx_fqdn': 'hcxFqdn',
'hcx_initial_password': 'hcxInitialPassword',
'hcx_vlan_id': 'hcxVlanId',
'is_hcx_enabled': 'isHcxEnabled',
'hcx_on_prem_key': 'hcxOnPremKey',
'is_hcx_enterprise_enabled': 'isHcxEnterpriseEnabled',
'is_hcx_pending_downgrade': 'isHcxPendingDowngrade',
'hcx_on_prem_licenses': 'hcxOnPremLicenses',
'time_hcx_billing_cycle_end': 'timeHcxBillingCycleEnd',
'time_hcx_license_status_updated': 'timeHcxLicenseStatusUpdated',
'time_created': 'timeCreated',
'time_updated': 'timeUpdated',
'lifecycle_state': 'lifecycleState',
'initial_host_shape_name': 'initialHostShapeName',
'initial_host_ocpu_count': 'initialHostOcpuCount',
'is_shielded_instance_enabled': 'isShieldedInstanceEnabled',
'capacity_reservation_id': 'capacityReservationId',
'freeform_tags': 'freeformTags',
'defined_tags': 'definedTags'
}
self._id = None
self._compute_availability_domain = None
self._display_name = None
self._instance_display_name_prefix = None
self._vmware_software_version = None
self._compartment_id = None
self._esxi_hosts_count = None
self._initial_sku = None
self._vcenter_fqdn = None
self._nsx_manager_fqdn = None
self._vcenter_private_ip_id = None
self._nsx_manager_private_ip_id = None
self._vcenter_initial_password = None
self._nsx_manager_initial_password = None
self._vcenter_username = None
self._nsx_manager_username = None
self._ssh_authorized_keys = None
self._workload_network_cidr = None
self._nsx_overlay_segment_name = None
self._nsx_edge_uplink_ip_id = None
self._provisioning_subnet_id = None
self._vsphere_vlan_id = None
self._vmotion_vlan_id = None
self._vsan_vlan_id = None
self._nsx_v_tep_vlan_id = None
self._nsx_edge_v_tep_vlan_id = None
self._nsx_edge_uplink1_vlan_id = None
self._nsx_edge_uplink2_vlan_id = None
self._replication_vlan_id = None
self._provisioning_vlan_id = None
self._hcx_private_ip_id = None
self._hcx_fqdn = None
self._hcx_initial_password = None
self._hcx_vlan_id = None
self._is_hcx_enabled = None
self._hcx_on_prem_key = None
self._is_hcx_enterprise_enabled = None
self._is_hcx_pending_downgrade = None
self._hcx_on_prem_licenses = None
self._time_hcx_billing_cycle_end = None
self._time_hcx_license_status_updated = None
self._time_created = None
self._time_updated = None
self._lifecycle_state = None
self._initial_host_shape_name = None
self._initial_host_ocpu_count = None
self._is_shielded_instance_enabled = None
self._capacity_reservation_id = None
self._freeform_tags = None
self._defined_tags = None
@property
def id(self):
"""
**[Required]** Gets the id of this Sddc.
The `OCID`__ of the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The id of this Sddc.
:rtype: str
"""
return self._id
@id.setter
def id(self, id):
"""
Sets the id of this Sddc.
The `OCID`__ of the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param id: The id of this Sddc.
:type: str
"""
self._id = id
@property
def compute_availability_domain(self):
"""
**[Required]** Gets the compute_availability_domain of this Sddc.
The availability domain the ESXi hosts are running in. For Multi-AD SDDC, it is `multi-AD`.
Example: `Uocm:PHX-AD-1`, `multi-AD`
:return: The compute_availability_domain of this Sddc.
:rtype: str
"""
return self._compute_availability_domain
@compute_availability_domain.setter
def compute_availability_domain(self, compute_availability_domain):
"""
Sets the compute_availability_domain of this Sddc.
The availability domain the ESXi hosts are running in. For Multi-AD SDDC, it is `multi-AD`.
Example: `Uocm:PHX-AD-1`, `multi-AD`
:param compute_availability_domain: The compute_availability_domain of this Sddc.
:type: str
"""
self._compute_availability_domain = compute_availability_domain
@property
def display_name(self):
"""
**[Required]** Gets the display_name of this Sddc.
A descriptive name for the SDDC. It must be unique, start with a letter, and contain only letters, digits,
whitespaces, dashes and underscores.
Avoid entering confidential information.
:return: The display_name of this Sddc.
:rtype: str
"""
return self._display_name
@display_name.setter
def display_name(self, display_name):
"""
Sets the display_name of this Sddc.
A descriptive name for the SDDC. It must be unique, start with a letter, and contain only letters, digits,
whitespaces, dashes and underscores.
Avoid entering confidential information.
:param display_name: The display_name of this Sddc.
:type: str
"""
self._display_name = display_name
@property
def instance_display_name_prefix(self):
"""
Gets the instance_display_name_prefix of this Sddc.
A prefix used in the name of each ESXi host and Compute instance in the SDDC.
If this isn't set, the SDDC's `displayName` is used as the prefix.
For example, if the value is `MySDDC`, the ESXi hosts are named `MySDDC-1`,
`MySDDC-2`, and so on.
:return: The instance_display_name_prefix of this Sddc.
:rtype: str
"""
return self._instance_display_name_prefix
@instance_display_name_prefix.setter
def instance_display_name_prefix(self, instance_display_name_prefix):
"""
Sets the instance_display_name_prefix of this Sddc.
A prefix used in the name of each ESXi host and Compute instance in the SDDC.
If this isn't set, the SDDC's `displayName` is used as the prefix.
For example, if the value is `MySDDC`, the ESXi hosts are named `MySDDC-1`,
`MySDDC-2`, and so on.
:param instance_display_name_prefix: The instance_display_name_prefix of this Sddc.
:type: str
"""
self._instance_display_name_prefix = instance_display_name_prefix
@property
def vmware_software_version(self):
"""
**[Required]** Gets the vmware_software_version of this Sddc.
In general, this is a specific version of bundled VMware software supported by
Oracle Cloud VMware Solution (see
:func:`list_supported_vmware_software_versions`).
This attribute is not guaranteed to reflect the version of
software currently installed on the ESXi hosts in the SDDC. The purpose
of this attribute is to show the version of software that the Oracle
Cloud VMware Solution will install on any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you upgrade the existing ESXi hosts in the SDDC to use a newer
version of bundled VMware software supported by the Oracle Cloud VMware Solution, you
should use :func:`update_sddc` to update the SDDC's
`vmwareSoftwareVersion` with that new version.
:return: The vmware_software_version of this Sddc.
:rtype: str
"""
return self._vmware_software_version
@vmware_software_version.setter
def vmware_software_version(self, vmware_software_version):
"""
Sets the vmware_software_version of this Sddc.
In general, this is a specific version of bundled VMware software supported by
Oracle Cloud VMware Solution (see
:func:`list_supported_vmware_software_versions`).
This attribute is not guaranteed to reflect the version of
software currently installed on the ESXi hosts in the SDDC. The purpose
of this attribute is to show the version of software that the Oracle
Cloud VMware Solution will install on any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you upgrade the existing ESXi hosts in the SDDC to use a newer
version of bundled VMware software supported by the Oracle Cloud VMware Solution, you
should use :func:`update_sddc` to update the SDDC's
`vmwareSoftwareVersion` with that new version.
:param vmware_software_version: The vmware_software_version of this Sddc.
:type: str
"""
self._vmware_software_version = vmware_software_version
@property
def compartment_id(self):
"""
**[Required]** Gets the compartment_id of this Sddc.
The `OCID`__ of the compartment that
contains the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The compartment_id of this Sddc.
:rtype: str
"""
return self._compartment_id
@compartment_id.setter
def compartment_id(self, compartment_id):
"""
Sets the compartment_id of this Sddc.
The `OCID`__ of the compartment that
contains the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param compartment_id: The compartment_id of this Sddc.
:type: str
"""
self._compartment_id = compartment_id
@property
def esxi_hosts_count(self):
"""
**[Required]** Gets the esxi_hosts_count of this Sddc.
The number of ESXi hosts in the SDDC.
:return: The esxi_hosts_count of this Sddc.
:rtype: int
"""
return self._esxi_hosts_count
@esxi_hosts_count.setter
def esxi_hosts_count(self, esxi_hosts_count):
"""
Sets the esxi_hosts_count of this Sddc.
The number of ESXi hosts in the SDDC.
:param esxi_hosts_count: The esxi_hosts_count of this Sddc.
:type: int
"""
self._esxi_hosts_count = esxi_hosts_count
@property
def initial_sku(self):
"""
Gets the initial_sku of this Sddc.
The billing option selected during SDDC creation.
:func:`list_supported_skus`.
Allowed values for this property are: "HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The initial_sku of this Sddc.
:rtype: str
"""
return self._initial_sku
@initial_sku.setter
def initial_sku(self, initial_sku):
"""
Sets the initial_sku of this Sddc.
The billing option selected during SDDC creation.
:func:`list_supported_skus`.
:param initial_sku: The initial_sku of this Sddc.
:type: str
"""
allowed_values = ["HOUR", "MONTH", "ONE_YEAR", "THREE_YEARS"]
if not value_allowed_none_or_none_sentinel(initial_sku, allowed_values):
initial_sku = 'UNKNOWN_ENUM_VALUE'
self._initial_sku = initial_sku
@property
def vcenter_fqdn(self):
"""
**[Required]** Gets the vcenter_fqdn of this Sddc.
The FQDN for vCenter.
Example: `vcenter-my-sddc.sddc.us-phoenix-1.oraclecloud.com`
:return: The vcenter_fqdn of this Sddc.
:rtype: str
"""
return self._vcenter_fqdn
@vcenter_fqdn.setter
def vcenter_fqdn(self, vcenter_fqdn):
"""
Sets the vcenter_fqdn of this Sddc.
The FQDN for vCenter.
Example: `vcenter-my-sddc.sddc.us-phoenix-1.oraclecloud.com`
:param vcenter_fqdn: The vcenter_fqdn of this Sddc.
:type: str
"""
self._vcenter_fqdn = vcenter_fqdn
@property
def nsx_manager_fqdn(self):
"""
**[Required]** Gets the nsx_manager_fqdn of this Sddc.
The FQDN for NSX Manager.
Example: `nsx-my-sddc.sddc.us-phoenix-1.oraclecloud.com`
:return: The nsx_manager_fqdn of this Sddc.
:rtype: str
"""
return self._nsx_manager_fqdn
@nsx_manager_fqdn.setter
def nsx_manager_fqdn(self, nsx_manager_fqdn):
"""
Sets the nsx_manager_fqdn of this Sddc.
The FQDN for NSX Manager.
Example: `nsx-my-sddc.sddc.us-phoenix-1.oraclecloud.com`
:param nsx_manager_fqdn: The nsx_manager_fqdn of this Sddc.
:type: str
"""
self._nsx_manager_fqdn = nsx_manager_fqdn
@property
def vcenter_private_ip_id(self):
"""
**[Required]** Gets the vcenter_private_ip_id of this Sddc.
The `OCID`__ of the `PrivateIp` object that is
the virtual IP (VIP) for vCenter. For information about `PrivateIp` objects, see the
Core Services API.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The vcenter_private_ip_id of this Sddc.
:rtype: str
"""
return self._vcenter_private_ip_id
@vcenter_private_ip_id.setter
def vcenter_private_ip_id(self, vcenter_private_ip_id):
"""
Sets the vcenter_private_ip_id of this Sddc.
The `OCID`__ of the `PrivateIp` object that is
the virtual IP (VIP) for vCenter. For information about `PrivateIp` objects, see the
Core Services API.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param vcenter_private_ip_id: The vcenter_private_ip_id of this Sddc.
:type: str
"""
self._vcenter_private_ip_id = vcenter_private_ip_id
@property
def nsx_manager_private_ip_id(self):
"""
**[Required]** Gets the nsx_manager_private_ip_id of this Sddc.
The `OCID`__ of the `PrivateIp` object that is
the virtual IP (VIP) for NSX Manager. For information about `PrivateIp` objects, see the
Core Services API.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The nsx_manager_private_ip_id of this Sddc.
:rtype: str
"""
return self._nsx_manager_private_ip_id
@nsx_manager_private_ip_id.setter
def nsx_manager_private_ip_id(self, nsx_manager_private_ip_id):
"""
Sets the nsx_manager_private_ip_id of this Sddc.
The `OCID`__ of the `PrivateIp` object that is
the virtual IP (VIP) for NSX Manager. For information about `PrivateIp` objects, see the
Core Services API.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param nsx_manager_private_ip_id: The nsx_manager_private_ip_id of this Sddc.
:type: str
"""
self._nsx_manager_private_ip_id = nsx_manager_private_ip_id
@property
def vcenter_initial_password(self):
"""
Gets the vcenter_initial_password of this Sddc.
The SDDC includes an administrator username and initial password for vCenter. Make sure
to change this initial vCenter password to a different value.
:return: The vcenter_initial_password of this Sddc.
:rtype: str
"""
return self._vcenter_initial_password
@vcenter_initial_password.setter
def vcenter_initial_password(self, vcenter_initial_password):
"""
Sets the vcenter_initial_password of this Sddc.
The SDDC includes an administrator username and initial password for vCenter. Make sure
to change this initial vCenter password to a different value.
:param vcenter_initial_password: The vcenter_initial_password of this Sddc.
:type: str
"""
self._vcenter_initial_password = vcenter_initial_password
@property
def nsx_manager_initial_password(self):
"""
Gets the nsx_manager_initial_password of this Sddc.
The SDDC includes an administrator username and initial password for NSX Manager. Make sure
to change this initial NSX Manager password to a different value.
:return: The nsx_manager_initial_password of this Sddc.
:rtype: str
"""
return self._nsx_manager_initial_password
@nsx_manager_initial_password.setter
def nsx_manager_initial_password(self, nsx_manager_initial_password):
"""
Sets the nsx_manager_initial_password of this Sddc.
The SDDC includes an administrator username and initial password for NSX Manager. Make sure
to change this initial NSX Manager password to a different value.
:param nsx_manager_initial_password: The nsx_manager_initial_password of this Sddc.
:type: str
"""
self._nsx_manager_initial_password = nsx_manager_initial_password
@property
def vcenter_username(self):
"""
Gets the vcenter_username of this Sddc.
The SDDC includes an administrator username and initial password for vCenter. You can
change this initial username to a different value in vCenter.
:return: The vcenter_username of this Sddc.
:rtype: str
"""
return self._vcenter_username
@vcenter_username.setter
def vcenter_username(self, vcenter_username):
"""
Sets the vcenter_username of this Sddc.
The SDDC includes an administrator username and initial password for vCenter. You can
change this initial username to a different value in vCenter.
:param vcenter_username: The vcenter_username of this Sddc.
:type: str
"""
self._vcenter_username = vcenter_username
@property
def nsx_manager_username(self):
"""
Gets the nsx_manager_username of this Sddc.
The SDDC includes an administrator username and initial password for NSX Manager. You
can change this initial username to a different value in NSX Manager.
:return: The nsx_manager_username of this Sddc.
:rtype: str
"""
return self._nsx_manager_username
@nsx_manager_username.setter
def nsx_manager_username(self, nsx_manager_username):
"""
Sets the nsx_manager_username of this Sddc.
The SDDC includes an administrator username and initial password for NSX Manager. You
can change this initial username to a different value in NSX Manager.
:param nsx_manager_username: The nsx_manager_username of this Sddc.
:type: str
"""
self._nsx_manager_username = nsx_manager_username
@property
def ssh_authorized_keys(self):
"""
**[Required]** Gets the ssh_authorized_keys of this Sddc.
One or more public SSH keys to be included in the `~/.ssh/authorized_keys` file for
the default user on each ESXi host. Use a newline character to separate multiple keys.
The SSH keys must be in the format required for the `authorized_keys` file.
This attribute is not guaranteed to reflect the public SSH keys
currently installed on the ESXi hosts in the SDDC. The purpose
of this attribute is to show the public SSH keys that Oracle
Cloud VMware Solution will install on any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you upgrade the existing ESXi hosts in the SDDC to use different
SSH keys, you should use :func:`update_sddc` to update
the SDDC's `sshAuthorizedKeys` with the new public keys.
:return: The ssh_authorized_keys of this Sddc.
:rtype: str
"""
return self._ssh_authorized_keys
@ssh_authorized_keys.setter
def ssh_authorized_keys(self, ssh_authorized_keys):
"""
Sets the ssh_authorized_keys of this Sddc.
One or more public SSH keys to be included in the `~/.ssh/authorized_keys` file for
the default user on each ESXi host. Use a newline character to separate multiple keys.
The SSH keys must be in the format required for the `authorized_keys` file.
This attribute is not guaranteed to reflect the public SSH keys
currently installed on the ESXi hosts in the SDDC. The purpose
of this attribute is to show the public SSH keys that Oracle
Cloud VMware Solution will install on any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you upgrade the existing ESXi hosts in the SDDC to use different
SSH keys, you should use :func:`update_sddc` to update
the SDDC's `sshAuthorizedKeys` with the new public keys.
:param ssh_authorized_keys: The ssh_authorized_keys of this Sddc.
:type: str
"""
self._ssh_authorized_keys = ssh_authorized_keys
@property
def workload_network_cidr(self):
"""
Gets the workload_network_cidr of this Sddc.
The CIDR block for the IP addresses that VMware VMs in the SDDC use to run application
workloads.
:return: The workload_network_cidr of this Sddc.
:rtype: str
"""
return self._workload_network_cidr
@workload_network_cidr.setter
def workload_network_cidr(self, workload_network_cidr):
"""
Sets the workload_network_cidr of this Sddc.
The CIDR block for the IP addresses that VMware VMs in the SDDC use to run application
workloads.
:param workload_network_cidr: The workload_network_cidr of this Sddc.
:type: str
"""
self._workload_network_cidr = workload_network_cidr
@property
def nsx_overlay_segment_name(self):
"""
Gets the nsx_overlay_segment_name of this Sddc.
The VMware NSX overlay workload segment to host your application. Connect to workload
portgroup in vCenter to access this overlay segment.
:return: The nsx_overlay_segment_name of this Sddc.
:rtype: str
"""
return self._nsx_overlay_segment_name
@nsx_overlay_segment_name.setter
def nsx_overlay_segment_name(self, nsx_overlay_segment_name):
"""
Sets the nsx_overlay_segment_name of this Sddc.
The VMware NSX overlay workload segment to host your application. Connect to workload
portgroup in vCenter to access this overlay segment.
:param nsx_overlay_segment_name: The nsx_overlay_segment_name of this Sddc.
:type: str
"""
self._nsx_overlay_segment_name = nsx_overlay_segment_name
@property
def nsx_edge_uplink_ip_id(self):
"""
Gets the nsx_edge_uplink_ip_id of this Sddc.
The `OCID`__ of the `PrivateIp` object that is
the virtual IP (VIP) for the NSX Edge Uplink. Use this OCID as the route target for
route table rules when setting up connectivity between the SDDC and other networks.
For information about `PrivateIp` objects, see the Core Services API.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The nsx_edge_uplink_ip_id of this Sddc.
:rtype: str
"""
return self._nsx_edge_uplink_ip_id
@nsx_edge_uplink_ip_id.setter
def nsx_edge_uplink_ip_id(self, nsx_edge_uplink_ip_id):
"""
Sets the nsx_edge_uplink_ip_id of this Sddc.
The `OCID`__ of the `PrivateIp` object that is
the virtual IP (VIP) for the NSX Edge Uplink. Use this OCID as the route target for
route table rules when setting up connectivity between the SDDC and other networks.
For information about `PrivateIp` objects, see the Core Services API.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param nsx_edge_uplink_ip_id: The nsx_edge_uplink_ip_id of this Sddc.
:type: str
"""
self._nsx_edge_uplink_ip_id = nsx_edge_uplink_ip_id
@property
def provisioning_subnet_id(self):
"""
**[Required]** Gets the provisioning_subnet_id of this Sddc.
The `OCID`__ of the management subnet used
to provision the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The provisioning_subnet_id of this Sddc.
:rtype: str
"""
return self._provisioning_subnet_id
@provisioning_subnet_id.setter
def provisioning_subnet_id(self, provisioning_subnet_id):
"""
Sets the provisioning_subnet_id of this Sddc.
The `OCID`__ of the management subnet used
to provision the SDDC.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param provisioning_subnet_id: The provisioning_subnet_id of this Sddc.
:type: str
"""
self._provisioning_subnet_id = provisioning_subnet_id
@property
def vsphere_vlan_id(self):
"""
**[Required]** Gets the vsphere_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the vSphere component of the VMware environment.
This attribute is not guaranteed to reflect the vSphere VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the vSphere VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the vSphere component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`vsphereVlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The vsphere_vlan_id of this Sddc.
:rtype: str
"""
return self._vsphere_vlan_id
@vsphere_vlan_id.setter
def vsphere_vlan_id(self, vsphere_vlan_id):
"""
Sets the vsphere_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the vSphere component of the VMware environment.
This attribute is not guaranteed to reflect the vSphere VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the vSphere VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the vSphere component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`vsphereVlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param vsphere_vlan_id: The vsphere_vlan_id of this Sddc.
:type: str
"""
self._vsphere_vlan_id = vsphere_vlan_id
@property
def vmotion_vlan_id(self):
"""
**[Required]** Gets the vmotion_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the vMotion component of the VMware environment.
This attribute is not guaranteed to reflect the vMotion VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the vMotion VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the vMotion component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`vmotionVlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The vmotion_vlan_id of this Sddc.
:rtype: str
"""
return self._vmotion_vlan_id
@vmotion_vlan_id.setter
def vmotion_vlan_id(self, vmotion_vlan_id):
"""
Sets the vmotion_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the vMotion component of the VMware environment.
This attribute is not guaranteed to reflect the vMotion VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the vMotion VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the vMotion component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`vmotionVlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param vmotion_vlan_id: The vmotion_vlan_id of this Sddc.
:type: str
"""
self._vmotion_vlan_id = vmotion_vlan_id
@property
def vsan_vlan_id(self):
"""
**[Required]** Gets the vsan_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the vSAN component of the VMware environment.
This attribute is not guaranteed to reflect the vSAN VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the vSAN VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the vSAN component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`vsanVlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The vsan_vlan_id of this Sddc.
:rtype: str
"""
return self._vsan_vlan_id
@vsan_vlan_id.setter
def vsan_vlan_id(self, vsan_vlan_id):
"""
Sets the vsan_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the vSAN component of the VMware environment.
This attribute is not guaranteed to reflect the vSAN VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the vSAN VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the vSAN component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`vsanVlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param vsan_vlan_id: The vsan_vlan_id of this Sddc.
:type: str
"""
self._vsan_vlan_id = vsan_vlan_id
@property
def nsx_v_tep_vlan_id(self):
"""
**[Required]** Gets the nsx_v_tep_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the NSX VTEP component of the VMware environment.
This attribute is not guaranteed to reflect the NSX VTEP VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the NSX VTEP VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the NSX VTEP component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`nsxVTepVlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The nsx_v_tep_vlan_id of this Sddc.
:rtype: str
"""
return self._nsx_v_tep_vlan_id
@nsx_v_tep_vlan_id.setter
def nsx_v_tep_vlan_id(self, nsx_v_tep_vlan_id):
"""
Sets the nsx_v_tep_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the NSX VTEP component of the VMware environment.
This attribute is not guaranteed to reflect the NSX VTEP VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the NSX VTEP VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the NSX VTEP component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`nsxVTepVlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param nsx_v_tep_vlan_id: The nsx_v_tep_vlan_id of this Sddc.
:type: str
"""
self._nsx_v_tep_vlan_id = nsx_v_tep_vlan_id
@property
def nsx_edge_v_tep_vlan_id(self):
"""
**[Required]** Gets the nsx_edge_v_tep_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the NSX Edge VTEP component of the VMware environment.
This attribute is not guaranteed to reflect the NSX Edge VTEP VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the NSX Edge VTEP VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the NSX Edge VTEP component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`nsxEdgeVTepVlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The nsx_edge_v_tep_vlan_id of this Sddc.
:rtype: str
"""
return self._nsx_edge_v_tep_vlan_id
@nsx_edge_v_tep_vlan_id.setter
def nsx_edge_v_tep_vlan_id(self, nsx_edge_v_tep_vlan_id):
"""
Sets the nsx_edge_v_tep_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the NSX Edge VTEP component of the VMware environment.
This attribute is not guaranteed to reflect the NSX Edge VTEP VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the NSX Edge VTEP VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the NSX Edge VTEP component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`nsxEdgeVTepVlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param nsx_edge_v_tep_vlan_id: The nsx_edge_v_tep_vlan_id of this Sddc.
:type: str
"""
self._nsx_edge_v_tep_vlan_id = nsx_edge_v_tep_vlan_id
@property
def nsx_edge_uplink1_vlan_id(self):
"""
**[Required]** Gets the nsx_edge_uplink1_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the NSX Edge Uplink 1 component of the VMware environment.
This attribute is not guaranteed to reflect the NSX Edge Uplink 1 VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the NSX Edge Uplink 1 VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the NSX Edge Uplink 1 component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`nsxEdgeUplink1VlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The nsx_edge_uplink1_vlan_id of this Sddc.
:rtype: str
"""
return self._nsx_edge_uplink1_vlan_id
@nsx_edge_uplink1_vlan_id.setter
def nsx_edge_uplink1_vlan_id(self, nsx_edge_uplink1_vlan_id):
"""
Sets the nsx_edge_uplink1_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the NSX Edge Uplink 1 component of the VMware environment.
This attribute is not guaranteed to reflect the NSX Edge Uplink 1 VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the NSX Edge Uplink 1 VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the NSX Edge Uplink 1 component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`nsxEdgeUplink1VlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param nsx_edge_uplink1_vlan_id: The nsx_edge_uplink1_vlan_id of this Sddc.
:type: str
"""
self._nsx_edge_uplink1_vlan_id = nsx_edge_uplink1_vlan_id
@property
def nsx_edge_uplink2_vlan_id(self):
"""
**[Required]** Gets the nsx_edge_uplink2_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the NSX Edge Uplink 2 component of the VMware environment.
This attribute is not guaranteed to reflect the NSX Edge Uplink 2 VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the NSX Edge Uplink 2 VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the NSX Edge Uplink 2 component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`nsxEdgeUplink2VlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The nsx_edge_uplink2_vlan_id of this Sddc.
:rtype: str
"""
return self._nsx_edge_uplink2_vlan_id
@nsx_edge_uplink2_vlan_id.setter
def nsx_edge_uplink2_vlan_id(self, nsx_edge_uplink2_vlan_id):
"""
Sets the nsx_edge_uplink2_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the NSX Edge Uplink 2 component of the VMware environment.
This attribute is not guaranteed to reflect the NSX Edge Uplink 2 VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the NSX Edge Uplink 2 VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the NSX Edge Uplink 2 component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`nsxEdgeUplink2VlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param nsx_edge_uplink2_vlan_id: The nsx_edge_uplink2_vlan_id of this Sddc.
:type: str
"""
self._nsx_edge_uplink2_vlan_id = nsx_edge_uplink2_vlan_id
@property
def replication_vlan_id(self):
"""
Gets the replication_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the vSphere Replication component of the VMware environment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The replication_vlan_id of this Sddc.
:rtype: str
"""
return self._replication_vlan_id
@replication_vlan_id.setter
def replication_vlan_id(self, replication_vlan_id):
"""
Sets the replication_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the vSphere Replication component of the VMware environment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param replication_vlan_id: The replication_vlan_id of this Sddc.
:type: str
"""
self._replication_vlan_id = replication_vlan_id
@property
def provisioning_vlan_id(self):
"""
Gets the provisioning_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the Provisioning component of the VMware environment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The provisioning_vlan_id of this Sddc.
:rtype: str
"""
return self._provisioning_vlan_id
@provisioning_vlan_id.setter
def provisioning_vlan_id(self, provisioning_vlan_id):
"""
Sets the provisioning_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the Provisioning component of the VMware environment.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param provisioning_vlan_id: The provisioning_vlan_id of this Sddc.
:type: str
"""
self._provisioning_vlan_id = provisioning_vlan_id
@property
def hcx_private_ip_id(self):
"""
Gets the hcx_private_ip_id of this Sddc.
The `OCID`__ of the `PrivateIp` object that is
the virtual IP (VIP) for HCX Manager. For information about `PrivateIp` objects, see the
Core Services API.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The hcx_private_ip_id of this Sddc.
:rtype: str
"""
return self._hcx_private_ip_id
@hcx_private_ip_id.setter
def hcx_private_ip_id(self, hcx_private_ip_id):
"""
Sets the hcx_private_ip_id of this Sddc.
The `OCID`__ of the `PrivateIp` object that is
the virtual IP (VIP) for HCX Manager. For information about `PrivateIp` objects, see the
Core Services API.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param hcx_private_ip_id: The hcx_private_ip_id of this Sddc.
:type: str
"""
self._hcx_private_ip_id = hcx_private_ip_id
@property
def hcx_fqdn(self):
"""
Gets the hcx_fqdn of this Sddc.
The FQDN for HCX Manager.
Example: `hcx-my-sddc.sddc.us-phoenix-1.oraclecloud.com`
:return: The hcx_fqdn of this Sddc.
:rtype: str
"""
return self._hcx_fqdn
@hcx_fqdn.setter
def hcx_fqdn(self, hcx_fqdn):
"""
Sets the hcx_fqdn of this Sddc.
The FQDN for HCX Manager.
Example: `hcx-my-sddc.sddc.us-phoenix-1.oraclecloud.com`
:param hcx_fqdn: The hcx_fqdn of this Sddc.
:type: str
"""
self._hcx_fqdn = hcx_fqdn
@property
def hcx_initial_password(self):
"""
Gets the hcx_initial_password of this Sddc.
The SDDC includes an administrator username and initial password for HCX Manager. Make sure
to change this initial HCX Manager password to a different value.
:return: The hcx_initial_password of this Sddc.
:rtype: str
"""
return self._hcx_initial_password
@hcx_initial_password.setter
def hcx_initial_password(self, hcx_initial_password):
"""
Sets the hcx_initial_password of this Sddc.
The SDDC includes an administrator username and initial password for HCX Manager. Make sure
to change this initial HCX Manager password to a different value.
:param hcx_initial_password: The hcx_initial_password of this Sddc.
:type: str
"""
self._hcx_initial_password = hcx_initial_password
@property
def hcx_vlan_id(self):
"""
Gets the hcx_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the HCX component of the VMware environment.
This attribute is not guaranteed to reflect the HCX VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the HCX VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the HCX component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`hcxVlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The hcx_vlan_id of this Sddc.
:rtype: str
"""
return self._hcx_vlan_id
@hcx_vlan_id.setter
def hcx_vlan_id(self, hcx_vlan_id):
"""
Sets the hcx_vlan_id of this Sddc.
The `OCID`__ of the VLAN used by the SDDC
for the HCX component of the VMware environment.
This attribute is not guaranteed to reflect the HCX VLAN
currently used by the ESXi hosts in the SDDC. The purpose
of this attribute is to show the HCX VLAN that the Oracle
Cloud VMware Solution will use for any new ESXi hosts that you *add to this
SDDC in the future* with :func:`create_esxi_host`.
Therefore, if you change the existing ESXi hosts in the SDDC to use a different VLAN
for the HCX component of the VMware environment, you
should use :func:`update_sddc` to update the SDDC's
`hcxVlanId` with that new VLAN's OCID.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param hcx_vlan_id: The hcx_vlan_id of this Sddc.
:type: str
"""
self._hcx_vlan_id = hcx_vlan_id
@property
def is_hcx_enabled(self):
"""
Gets the is_hcx_enabled of this Sddc.
Indicates whether HCX is enabled for this SDDC.
:return: The is_hcx_enabled of this Sddc.
:rtype: bool
"""
return self._is_hcx_enabled
@is_hcx_enabled.setter
def is_hcx_enabled(self, is_hcx_enabled):
"""
Sets the is_hcx_enabled of this Sddc.
Indicates whether HCX is enabled for this SDDC.
:param is_hcx_enabled: The is_hcx_enabled of this Sddc.
:type: bool
"""
self._is_hcx_enabled = is_hcx_enabled
@property
def hcx_on_prem_key(self):
"""
Gets the hcx_on_prem_key of this Sddc.
The activation keys to use on the on-premises HCX Enterprise appliances you site pair with HCX Manager in your VMware Solution.
The number of keys provided depends on the HCX license type. HCX Advanced provides 3 activation keys.
HCX Enterprise provides 10 activation keys.
:return: The hcx_on_prem_key of this Sddc.
:rtype: str
"""
return self._hcx_on_prem_key
@hcx_on_prem_key.setter
def hcx_on_prem_key(self, hcx_on_prem_key):
"""
Sets the hcx_on_prem_key of this Sddc.
The activation keys to use on the on-premises HCX Enterprise appliances you site pair with HCX Manager in your VMware Solution.
The number of keys provided depends on the HCX license type. HCX Advanced provides 3 activation keys.
HCX Enterprise provides 10 activation keys.
:param hcx_on_prem_key: The hcx_on_prem_key of this Sddc.
:type: str
"""
self._hcx_on_prem_key = hcx_on_prem_key
@property
def is_hcx_enterprise_enabled(self):
"""
Gets the is_hcx_enterprise_enabled of this Sddc.
Indicates whether HCX Enterprise is enabled for this SDDC.
:return: The is_hcx_enterprise_enabled of this Sddc.
:rtype: bool
"""
return self._is_hcx_enterprise_enabled
@is_hcx_enterprise_enabled.setter
def is_hcx_enterprise_enabled(self, is_hcx_enterprise_enabled):
"""
Sets the is_hcx_enterprise_enabled of this Sddc.
Indicates whether HCX Enterprise is enabled for this SDDC.
:param is_hcx_enterprise_enabled: The is_hcx_enterprise_enabled of this Sddc.
:type: bool
"""
self._is_hcx_enterprise_enabled = is_hcx_enterprise_enabled
@property
def is_hcx_pending_downgrade(self):
"""
Gets the is_hcx_pending_downgrade of this Sddc.
Indicates whether SDDC is pending downgrade from HCX Enterprise to HCX Advanced.
:return: The is_hcx_pending_downgrade of this Sddc.
:rtype: bool
"""
return self._is_hcx_pending_downgrade
@is_hcx_pending_downgrade.setter
def is_hcx_pending_downgrade(self, is_hcx_pending_downgrade):
"""
Sets the is_hcx_pending_downgrade of this Sddc.
Indicates whether SDDC is pending downgrade from HCX Enterprise to HCX Advanced.
:param is_hcx_pending_downgrade: The is_hcx_pending_downgrade of this Sddc.
:type: bool
"""
self._is_hcx_pending_downgrade = is_hcx_pending_downgrade
@property
def hcx_on_prem_licenses(self):
"""
Gets the hcx_on_prem_licenses of this Sddc.
The activation licenses to use on the on-premises HCX Enterprise appliance you site pair with HCX Manager in your VMware Solution.
:return: The hcx_on_prem_licenses of this Sddc.
:rtype: list[oci.ocvp.models.HcxLicenseSummary]
"""
return self._hcx_on_prem_licenses
@hcx_on_prem_licenses.setter
def hcx_on_prem_licenses(self, hcx_on_prem_licenses):
"""
Sets the hcx_on_prem_licenses of this Sddc.
The activation licenses to use on the on-premises HCX Enterprise appliance you site pair with HCX Manager in your VMware Solution.
:param hcx_on_prem_licenses: The hcx_on_prem_licenses of this Sddc.
:type: list[oci.ocvp.models.HcxLicenseSummary]
"""
self._hcx_on_prem_licenses = hcx_on_prem_licenses
@property
def time_hcx_billing_cycle_end(self):
"""
Gets the time_hcx_billing_cycle_end of this Sddc.
The date and time current HCX Enterprise billing cycle ends, in the format defined by `RFC3339`__.
Example: `2016-08-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:return: The time_hcx_billing_cycle_end of this Sddc.
:rtype: datetime
"""
return self._time_hcx_billing_cycle_end
@time_hcx_billing_cycle_end.setter
def time_hcx_billing_cycle_end(self, time_hcx_billing_cycle_end):
"""
Sets the time_hcx_billing_cycle_end of this Sddc.
The date and time current HCX Enterprise billing cycle ends, in the format defined by `RFC3339`__.
Example: `2016-08-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:param time_hcx_billing_cycle_end: The time_hcx_billing_cycle_end of this Sddc.
:type: datetime
"""
self._time_hcx_billing_cycle_end = time_hcx_billing_cycle_end
@property
def time_hcx_license_status_updated(self):
"""
Gets the time_hcx_license_status_updated of this Sddc.
The date and time the SDDC's HCX on-premise license status was updated, in the format defined by
`RFC3339`__.
Example: `2016-08-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:return: The time_hcx_license_status_updated of this Sddc.
:rtype: datetime
"""
return self._time_hcx_license_status_updated
@time_hcx_license_status_updated.setter
def time_hcx_license_status_updated(self, time_hcx_license_status_updated):
"""
Sets the time_hcx_license_status_updated of this Sddc.
The date and time the SDDC's HCX on-premise license status was updated, in the format defined by
`RFC3339`__.
Example: `2016-08-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:param time_hcx_license_status_updated: The time_hcx_license_status_updated of this Sddc.
:type: datetime
"""
self._time_hcx_license_status_updated = time_hcx_license_status_updated
@property
def time_created(self):
"""
**[Required]** Gets the time_created of this Sddc.
The date and time the SDDC was created, in the format defined by
`RFC3339`__.
Example: `2016-08-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:return: The time_created of this Sddc.
:rtype: datetime
"""
return self._time_created
@time_created.setter
def time_created(self, time_created):
"""
Sets the time_created of this Sddc.
The date and time the SDDC was created, in the format defined by
`RFC3339`__.
Example: `2016-08-25T21:10:29.600Z`
__ https://tools.ietf.org/html/rfc3339
:param time_created: The time_created of this Sddc.
:type: datetime
"""
self._time_created = time_created
@property
def time_updated(self):
"""
Gets the time_updated of this Sddc.
The date and time the SDDC was updated, in the format defined by
`RFC3339`__.
__ https://tools.ietf.org/html/rfc3339
:return: The time_updated of this Sddc.
:rtype: datetime
"""
return self._time_updated
@time_updated.setter
def time_updated(self, time_updated):
"""
Sets the time_updated of this Sddc.
The date and time the SDDC was updated, in the format defined by
`RFC3339`__.
__ https://tools.ietf.org/html/rfc3339
:param time_updated: The time_updated of this Sddc.
:type: datetime
"""
self._time_updated = time_updated
@property
def lifecycle_state(self):
"""
Gets the lifecycle_state of this Sddc.
The current state of the SDDC.
Allowed values for this property are: "CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED", 'UNKNOWN_ENUM_VALUE'.
Any unrecognized values returned by a service will be mapped to 'UNKNOWN_ENUM_VALUE'.
:return: The lifecycle_state of this Sddc.
:rtype: str
"""
return self._lifecycle_state
@lifecycle_state.setter
def lifecycle_state(self, lifecycle_state):
"""
Sets the lifecycle_state of this Sddc.
The current state of the SDDC.
:param lifecycle_state: The lifecycle_state of this Sddc.
:type: str
"""
allowed_values = ["CREATING", "UPDATING", "ACTIVE", "DELETING", "DELETED", "FAILED"]
if not value_allowed_none_or_none_sentinel(lifecycle_state, allowed_values):
lifecycle_state = 'UNKNOWN_ENUM_VALUE'
self._lifecycle_state = lifecycle_state
@property
def initial_host_shape_name(self):
"""
**[Required]** Gets the initial_host_shape_name of this Sddc.
The initial compute shape of the SDDC's ESXi hosts.
:func:`list_supported_host_shapes`.
:return: The initial_host_shape_name of this Sddc.
:rtype: str
"""
return self._initial_host_shape_name
@initial_host_shape_name.setter
def initial_host_shape_name(self, initial_host_shape_name):
"""
Sets the initial_host_shape_name of this Sddc.
The initial compute shape of the SDDC's ESXi hosts.
:func:`list_supported_host_shapes`.
:param initial_host_shape_name: The initial_host_shape_name of this Sddc.
:type: str
"""
self._initial_host_shape_name = initial_host_shape_name
@property
def initial_host_ocpu_count(self):
"""
Gets the initial_host_ocpu_count of this Sddc.
The initial OCPU count of the SDDC's ESXi hosts.
:return: The initial_host_ocpu_count of this Sddc.
:rtype: float
"""
return self._initial_host_ocpu_count
@initial_host_ocpu_count.setter
def initial_host_ocpu_count(self, initial_host_ocpu_count):
"""
Sets the initial_host_ocpu_count of this Sddc.
The initial OCPU count of the SDDC's ESXi hosts.
:param initial_host_ocpu_count: The initial_host_ocpu_count of this Sddc.
:type: float
"""
self._initial_host_ocpu_count = initial_host_ocpu_count
@property
def is_shielded_instance_enabled(self):
"""
Gets the is_shielded_instance_enabled of this Sddc.
Indicates whether shielded instance is enabled at the SDDC level.
:return: The is_shielded_instance_enabled of this Sddc.
:rtype: bool
"""
return self._is_shielded_instance_enabled
@is_shielded_instance_enabled.setter
def is_shielded_instance_enabled(self, is_shielded_instance_enabled):
"""
Sets the is_shielded_instance_enabled of this Sddc.
Indicates whether shielded instance is enabled at the SDDC level.
:param is_shielded_instance_enabled: The is_shielded_instance_enabled of this Sddc.
:type: bool
"""
self._is_shielded_instance_enabled = is_shielded_instance_enabled
@property
def capacity_reservation_id(self):
"""
Gets the capacity_reservation_id of this Sddc.
The `OCID`__ of the Capacity Reservation.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:return: The capacity_reservation_id of this Sddc.
:rtype: str
"""
return self._capacity_reservation_id
@capacity_reservation_id.setter
def capacity_reservation_id(self, capacity_reservation_id):
"""
Sets the capacity_reservation_id of this Sddc.
The `OCID`__ of the Capacity Reservation.
__ https://docs.cloud.oracle.com/Content/General/Concepts/identifiers.htm
:param capacity_reservation_id: The capacity_reservation_id of this Sddc.
:type: str
"""
self._capacity_reservation_id = capacity_reservation_id
@property
def freeform_tags(self):
"""
**[Required]** Gets the freeform_tags of this Sddc.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The freeform_tags of this Sddc.
:rtype: dict(str, str)
"""
return self._freeform_tags
@freeform_tags.setter
def freeform_tags(self, freeform_tags):
"""
Sets the freeform_tags of this Sddc.
Free-form tags for this resource. Each tag is a simple key-value pair with no
predefined name, type, or namespace. For more information, see `Resource Tags`__.
Example: `{\"Department\": \"Finance\"}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param freeform_tags: The freeform_tags of this Sddc.
:type: dict(str, str)
"""
self._freeform_tags = freeform_tags
@property
def defined_tags(self):
"""
**[Required]** Gets the defined_tags of this Sddc.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:return: The defined_tags of this Sddc.
:rtype: dict(str, dict(str, object))
"""
return self._defined_tags
@defined_tags.setter
def defined_tags(self, defined_tags):
"""
Sets the defined_tags of this Sddc.
Defined tags for this resource. Each key is predefined and scoped to a
namespace. For more information, see `Resource Tags`__.
Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`
__ https://docs.cloud.oracle.com/Content/General/Concepts/resourcetags.htm
:param defined_tags: The defined_tags of this Sddc.
:type: dict(str, dict(str, object))
"""
self._defined_tags = defined_tags
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
py | b4170811fb9531a6ff49f2cf883e8f419a85a9b7 | # coding=utf-8
# Copyright 2019 The Google Research Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Implementations of different layers."""
import inits
import tensorflow.compat.v1 as tf
# global unique layer ID dictionary for layer name assignment
_LAYER_UIDS = {}
def get_layer_uid(layer_name=''):
"""Helper function, assigns unique layer IDs."""
if layer_name not in _LAYER_UIDS:
_LAYER_UIDS[layer_name] = 1
return 1
else:
_LAYER_UIDS[layer_name] += 1
return _LAYER_UIDS[layer_name]
def sparse_dropout(x, keep_prob, noise_shape):
"""Dropout for sparse tensors."""
random_tensor = keep_prob
random_tensor += tf.random_uniform(noise_shape)
dropout_mask = tf.cast(tf.floor(random_tensor), dtype=tf.bool)
pre_out = tf.sparse_retain(x, dropout_mask)
return pre_out * (1. / keep_prob)
def dot(x, y, sparse=False):
"""Wrapper for tf.matmul (sparse vs dense)."""
if sparse:
res = tf.sparse_tensor_dense_matmul(x, y)
else:
res = tf.matmul(x, y)
return res
def layernorm(x, offset, scale):
mean, variance = tf.nn.moments(x, axes=[1], keep_dims=True)
return tf.nn.batch_normalization(x, mean, variance, offset, scale, 1e-9)
class Layer(object):
"""Base layer class.
Defines basic API for all layer objects.
Implementation inspired by keras (http://keras.io).
# Properties
name: String, defines the variable scope of the layer.
logging: Boolean, switches Tensorflow histogram logging on/off
# Methods
_call(inputs): Defines computation graph of layer
(i.e. takes input, returns output)
__call__(inputs): Wrapper for _call()
_log_vars(): Log all variables
"""
def __init__(self, **kwargs):
allowed_kwargs = {'name', 'logging'}
for kwarg, _ in kwargs.items():
assert kwarg in allowed_kwargs, 'Invalid keyword argument: ' + kwarg
name = kwargs.get('name')
if not name:
layer = self.__class__.__name__.lower()
name = layer + '_' + str(get_layer_uid(layer))
self.name = name
self.vars = {}
logging = kwargs.get('logging', False)
self.logging = logging
self.sparse_inputs = False
def _call(self, inputs):
return inputs
def __call__(self, inputs):
with tf.name_scope(self.name):
if self.logging and not self.sparse_inputs:
tf.summary.histogram(self.name + '/inputs', inputs)
outputs = self._call(inputs)
if self.logging:
tf.summary.histogram(self.name + '/outputs', outputs)
return outputs
def _log_vars(self):
for var in self.vars:
tf.summary.histogram(self.name + '/vars/' + var, self.vars[var])
class Dense(Layer):
"""Dense layer."""
def __init__(self,
input_dim,
output_dim,
placeholders,
dropout=0.,
sparse_inputs=False,
act=tf.nn.relu,
bias=False,
featureless=False,
norm=False,
**kwargs):
super(Dense, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
self.norm = norm
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = inits.glorot([input_dim, output_dim],
name='weights')
if self.bias:
self.vars['bias'] = inits.zeros([output_dim], name='bias')
if self.norm:
self.vars['offset'] = inits.zeros([1, output_dim], name='offset')
self.vars['scale'] = inits.ones([1, output_dim], name='scale')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# dropout
if self.sparse_inputs:
x = sparse_dropout(x, 1 - self.dropout, self.num_features_nonzero)
else:
x = tf.nn.dropout(x, 1 - self.dropout)
# transform
output = dot(x, self.vars['weights'], sparse=self.sparse_inputs)
# bias
if self.bias:
output += self.vars['bias']
with tf.variable_scope(self.name + '_vars'):
if self.norm:
output = layernorm(output, self.vars['offset'], self.vars['scale'])
return self.act(output)
class GraphConvolution(Layer):
"""Graph convolution layer."""
def __init__(self,
input_dim,
output_dim,
placeholders,
dropout=0.,
sparse_inputs=False,
act=tf.nn.relu,
bias=False,
featureless=False,
norm=False,
precalc=False,
**kwargs):
super(GraphConvolution, self).__init__(**kwargs)
if dropout:
self.dropout = placeholders['dropout']
else:
self.dropout = 0.
self.act = act
self.support = placeholders['support']
self.sparse_inputs = sparse_inputs
self.featureless = featureless
self.bias = bias
self.norm = norm
self.precalc = precalc
# helper variable for sparse dropout
self.num_features_nonzero = placeholders['num_features_nonzero']
with tf.variable_scope(self.name + '_vars'):
self.vars['weights'] = inits.glorot([input_dim, output_dim],
name='weights')
if self.bias:
self.vars['bias'] = inits.zeros([output_dim], name='bias')
if self.norm:
self.vars['offset'] = inits.zeros([1, output_dim], name='offset')
self.vars['scale'] = inits.ones([1, output_dim], name='scale')
if self.logging:
self._log_vars()
def _call(self, inputs):
x = inputs
# convolve
if self.precalc:
support = x
else:
support = dot(self.support, x, sparse=True)
support = tf.concat((support, x), axis=1)
# dropout
support = tf.nn.dropout(support, 1 - self.dropout)
output = dot(support, self.vars['weights'], sparse=self.sparse_inputs)
# bias
if self.bias:
output += self.vars['bias']
with tf.variable_scope(self.name + '_vars'):
if self.norm:
output = layernorm(output, self.vars['offset'], self.vars['scale'])
return self.act(output)
|
py | b41708c84a7299f84616dd78b3dda3e04bf7a45f | #!/usr/bin/env python
import petl as etl
from datetime import datetime
print("PULSE DATA")
pulse_tab = (
etl
.fromcsv('measurements.csv')
.convert('value', float)
.convert('value', int)
.convert('timestamp', int)
.convert('timestamp', lambda t: datetime.fromtimestamp(int(t/1000.0)))
)
print(pulse_tab.lookall())
print("PANAMA DATA")
src_tab = (
etl
.fromjson('monitor.json', header=['timestamp', 'metric', 'source', 'measure'])
.convert('timestamp', lambda t: datetime.fromtimestamp(int(t/1000.0)))
.select('source', lambda v: v == 'panama-scheduler')
.rename('measure', 'value')
)
print(src_tab.lookall())
|
py | b417094bcb95bcb1121fa34c2232ffeaa69f08ba |
import uuid
import json
import os.path as path
import IPython
web_assets_dir = path.join(path.dirname(path.realpath(__file__)), 'web_assets')
class TextualHeatmap:
"""Create interactive textual heatmaps for Jupiter notebooks.
This is useful for PyTorch or pure TensorFlow. You should properly use
`KerasLearningCurve` if you use keras.
Line description: dict with the properties `name` and `color`.
Axis description:
Example:
heatmap = TextualHeatmap(
show_meta = True,
facet_titles = ['LSTM', 'GRU']
)
heatmap.set_data([
[{
"token": 'a',
"meta": ['and', 'africa', 'america'],
"heat": [1, 0, 0]
}, {
"token": 'n',
"meta": ['and', 'anecdote', 'antelope'],
"heat": [0.3, 0.7, 0]
}, {
"token": 'd',
"meta": ['and', 'andante', 'andosol'],
"heat": [0.2, 0.3, 0.5]
}],
[{
"token": 'a',
"meta": ['and', 'africa', 'america'],
"heat": [1, 0, 0]
}, {
"token": 'n',
"meta": ['and', 'anecdote', 'antelope'],
"heat": [0.1, 0.9, 0]
}, {
"token": 'd',
"meta": ['and', 'andante', 'andosol'],
"heat": [0.1, 0.1, 0.8]
}]
])
heatmap.highlight(1)
Arguments:
show_meta: The meta texts on top of each facet (Default: False).
facet_titles: The title on each facet (Default: ['Heatmap']).
rotate_facet_titles: If true, the facet titles will be rotated 90deg (Default: False).
width: The width of the heatmap (Default: 600).
interactive: Should the heatmap be interactive on mouseover. (Default: True)
debug: Depending on the notebook, a JavaScript evaluation does not provide
a stack trace in the developer console. Setting this to `true` works
around that by injecting `<script>` tags instead.
"""
def __init__(self,
show_meta = False,
facet_titles = ['Heatmap'],
rotate_facet_titles = False,
width = 600,
interactive = True,
display_fn=IPython.display.display,
debug=False
):
if not isinstance(width, int) or width <= 0:
raise ValueError(f'width must be a positive number, was {width}')
if not isinstance(show_meta, bool):
raise ValueError('show_meta must be a boolean')
if not isinstance(interactive, bool):
raise ValueError('interactive must be a boolean')
if not isinstance(facet_titles, list):
raise ValueError('facet_titles must be a list')
for facet_title_i, facet_title in enumerate(facet_titles):
if not isinstance(facet_title, str):
raise ValueError(f'facet_title["{facet_title_i}"] must a string')
if not isinstance(rotate_facet_titles, bool):
raise ValueError('rotate_facet_titles must be a boolean')
# Store settings
self._debug = debug
self._display = display_fn
self._settings = {
'id': str(uuid.uuid4()),
'width': width,
'showMeta': show_meta,
'facetTitles': facet_titles,
'rotateFacetTitles': rotate_facet_titles,
'interactive': interactive
}
# Prepear data containers
self._data = []
self._display(self._create_inital_html())
self._data_element = self._display(
IPython.display.Javascript('void(0);'),
display_id=True
)
self._highlight_element = self._display(
IPython.display.Javascript('void(0);'),
display_id=True
)
def _create_inital_html(self):
with open(path.join(web_assets_dir, 'textual_heatmap.css')) as css_fp, \
open(path.join(web_assets_dir, 'textual_heatmap.js')) as js_fp:
return IPython.display.HTML(
f'<style>{css_fp.read()}</style>'
f'<script>{js_fp.read()}</script>'
f'<div id="{self._settings["id"]}" class="textual-heatmap"></div>'
f'<script>'
f' window.setupTextualHeatmap({json.dumps(self._settings)});'
f'</script>'
)
def set_data(self, data):
"""Sets the data and render the heatmap.
`data` is a list of `FacetData`. Each `FacetData` is a
list of `TokenData`.
TokenData = {"token": str, "meta": List[str], "heat": List[float], "format": bool}
* The `token` is what is displayed in the main text window.
* The `meta` is used if `show_meta` is set to `True` in `TextualHeatmap`. This is
displayed above the main text window.
* The `heat` is is a ratio from 0 to 1, that will map to a color. 0 meaning there
is no heat, and 1 is full heat. The `heat` values does not have to sum to 1.
* The `format` is used to indicate tokens that are not seen by the model. For example
space charecters if word or sub-word tokenization is used. In this case,
`meta` and `heat` have no meaning.
Examples:
data = [[
{ "token": "context", "meta": ["content", "concise", "context"], "heat": [0, 0.2] },
{ "token": " ", "format": True },
{ "token": "is", "meta": ["are", "that", "is"], "heat": [0.7, 0] }
]]
Arguments:
data: List[List[TokenData]] - Heatmap data.
"""
disp = IPython.display.HTML(
f'<script>'
f' window.setDataTextualHeatmap({json.dumps(self._settings)}, {json.dumps(data)});'
f'</script>'
)
self._data_element.update(disp)
def highlight(self, index):
"""Select a token index to be highlighted on the heatmap.
This will affect all facets in the heatmap.
Arguments:
index: integer - The token index to highlight.
"""
disp = IPython.display.HTML(
f'<script>'
f' window.highlightTextualHeatmap({json.dumps(self._settings)}, {index});'
f'</script>'
)
self._highlight_element.update(disp)
|
py | b41709761686507e588f6a73af28407166575753 | from sys import argv
print("How old are you?", end=' ')
# age = input()
age = 1
print("How tall are you?", end=' ')
# height = input()
height = 1
print("How much do you weigh?", end=' ')
# weight = input()
weight = 1
print(f"So, you're {age} old, {height} tall and {weight} heavy.")
script, filename = argv
txt = open(filename)
print("Here's your file {filename}:")
print(txt.read())
print("Type the filename again:")
file_again = input()
txt_again = open(file_again)
print(txt_again.read())
print("Let's practice everything.")
print("You'd need to know 'bout escapes with \\\\ that do \\n newlines and \\t tabs.")
poem = """
\tThe lovely world with logic so firmly planted cannot discern
\tthe needs of love nor comprehend passion from intuition
\tand requires an explanation where there is none.
"""
print("--------------")
print(poem)
print("--------------")
five = 10 - 2 + 3 - 6
print(f"This should be five: {five}")
def secret_formula(started):
jelly_beans = started * 500
jars = jelly_beans / 1000
crates = jars * 100
return jelly_beans, jars, crates
start_point = 10000
jelly_beans, jars, crates = secret_formula(start_point)
# remember that this is another way to format a string
print("With a starting point of: {}".format(start_point))
# it's just like with an f"" string
print(f"We'd have {jelly_beans} beans, {jars} jars, and {crates} crates.")
start_point = start_point / 10
print("We can also do that this way:")
formula = secret_formula(start_point)
# this is an easy way to apply a list to a format string
print("We'd have {} beans, {} jars, and {} crates.".format(*formula))
people = 20
cats = 30
dogs = 15
if people < cats:
print("Too many cats! The world is doomed!")
if people > cats:
print("Not many cats! The world is saved!")
if people < dogs:
print("The world is drooled on!")
if people > dogs:
print("The world is dry!")
dogs += 5
if people >= dogs:
print("People are greater than or equal to dogs.")
if people <= dogs:
print("People are less than or equal to dogs.")
if people == dogs:
print("People are dogs.")
|
py | b41709bd9be8aec27777503466be9563845c49a8 | # Copyright 2014 Intel Corp.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""UserCreds object."""
from oslo_versionedobjects import base
from oslo_versionedobjects import fields
from heat.db import api as db_api
from heat.objects import base as heat_base
@base.VersionedObjectRegistry.register
class UserCreds(
heat_base.HeatObject,
base.VersionedObjectDictCompat,
base.ComparableVersionedObject,
):
fields = {
'id': fields.StringField(),
'created_at': fields.DateTimeField(read_only=True),
'updated_at': fields.DateTimeField(nullable=True),
'username': fields.StringField(nullable=True),
'password': fields.StringField(nullable=True),
'tenant': fields.StringField(nullable=True),
'tenant_id': fields.StringField(nullable=True),
'trustor_user_id': fields.StringField(nullable=True),
'trust_id': fields.StringField(nullable=True),
'region_name': fields.StringField(nullable=True),
'auth_url': fields.StringField(nullable=True),
'decrypt_method': fields.StringField(nullable=True)
}
@staticmethod
def _from_db_object(ucreds, db_ucreds, context=None):
if db_ucreds is None:
return db_ucreds
ucreds._context = context
for field in ucreds.fields:
# TODO(Shao HE Feng), now the db layer delete the decrypt_method
# field, just skip it here. and will add an encrypted_field later.
if field == "decrypt_method":
continue
ucreds[field] = db_ucreds[field]
ucreds.obj_reset_changes()
return ucreds
@classmethod
def create(cls, context):
user_creds_db = db_api.user_creds_create(context)
return cls._from_db_object(cls(), user_creds_db)
@classmethod
def delete(cls, context, user_creds_id):
db_api.user_creds_delete(context, user_creds_id)
@classmethod
def get_by_id(cls, context, user_creds_id):
user_creds_db = db_api.user_creds_get(context, user_creds_id)
user_creds = cls._from_db_object(cls(), user_creds_db)
return user_creds
|
py | b4170bb027cd2628e4035e6d1ad514054d47ce98 | # Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# pylint: disable=comparison-with-callable, line-too-long, too-many-branches
import logging
import re
from contextlib import closing
from datetime import datetime, timedelta
from typing import Any, Callable, cast, Dict, List, Optional, Union
import os
from urllib import parse
from superset import security_manager
from superset.custom_auth import use_ip_auth
import backoff
import humanize
import pandas as pd
import simplejson as json
from flask import abort, flash, g, Markup, redirect, render_template, request, Response
from flask_appbuilder import expose
from flask_appbuilder.models.sqla.interface import SQLAInterface
from flask_appbuilder.security.decorators import (
has_access,
has_access_api,
permission_name,
)
from superset.peak.decorators import has_superset_api_access, check_access_and_create_session
from flask_appbuilder.security.sqla import models as ab_models
from flask_babel import gettext as __, lazy_gettext as _, ngettext
from jinja2.exceptions import TemplateError
from jinja2.meta import find_undeclared_variables
from sqlalchemy import and_, or_
from sqlalchemy.engine.url import make_url
from sqlalchemy.exc import ArgumentError, DBAPIError, NoSuchModuleError, SQLAlchemyError
from sqlalchemy.orm.session import Session
from sqlalchemy.sql import functions as func
from werkzeug.urls import Href
import boto3
from superset import (
app,
appbuilder,
conf,
db,
event_logger,
get_feature_flags,
is_feature_enabled,
results_backend,
results_backend_use_msgpack,
security_manager,
sql_lab,
viz,
)
from superset.charts.dao import ChartDAO
from superset.connectors.base.models import BaseDatasource
from superset.connectors.connector_registry import ConnectorRegistry
from superset.connectors.sqla.models import (
AnnotationDatasource,
SqlaTable,
SqlMetric,
TableColumn,
)
from superset.dashboards.commands.importers.v0 import ImportDashboardsCommand
from superset.dashboards.dao import DashboardDAO
from superset.databases.dao import DatabaseDAO
from superset.databases.filters import DatabaseFilter
from superset.datasets.commands.exceptions import DatasetNotFoundError
from superset.exceptions import (
CacheLoadError,
CertificateException,
DatabaseNotFound,
SerializationError,
SupersetException,
SupersetGenericDBErrorException,
SupersetSecurityException,
SupersetTemplateParamsErrorException,
SupersetTimeoutException,
)
from superset.extensions import async_query_manager, cache_manager
from superset.jinja_context import get_template_processor
from superset.models.core import Database, FavStar, Log
from superset.models.dashboard import Dashboard
from superset.models.datasource_access_request import DatasourceAccessRequest
from superset.models.slice import Slice
from superset.models.sql_lab import LimitingFactor, Query, TabState
from superset.models.user_attributes import UserAttribute
from superset.queries.dao import QueryDAO
from superset.security.analytics_db_safety import check_sqlalchemy_uri
from superset.sql_parse import CtasMethod, ParsedQuery, Table
from superset.sql_validators import get_validator_by_name
from superset.tasks.async_queries import load_explore_json_into_cache
from superset.typing import FlaskResponse
from superset.utils import core as utils, csv
from superset.utils.async_query_manager import AsyncQueryTokenException
from superset.utils.cache import etag_cache
from superset.utils.core import ReservedUrlParameters
from superset.utils.dates import now_as_float
from superset.utils.decorators import check_dashboard_access
from superset.views.base import (
api,
BaseSupersetView,
check_ownership,
common_bootstrap_payload,
create_table_permissions,
CsvResponse,
data_payload_response,
generate_download_headers,
get_error_msg,
get_user_roles,
handle_api_exception,
json_error_response,
json_errors_response,
json_success,
validate_sqlatable,
)
from superset.views.utils import (
_deserialize_results_payload,
apply_display_max_row_limit,
bootstrap_user_data,
check_datasource_perms,
check_explore_cache_perms,
check_resource_permissions,
check_slice_perms,
get_cta_schema_name,
get_dashboard_extra_filters,
get_datasource_info,
get_form_data,
get_viz,
is_owner,
)
from superset.viz import BaseViz
STAGE = os.environ['STAGE']
TENANT = os.environ['TENANT']
config = app.config
SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT = config["SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT"]
stats_logger = config["STATS_LOGGER"]
DAR = DatasourceAccessRequest
QueryStatus = utils.QueryStatus
logger = logging.getLogger(__name__)
DATABASE_KEYS = [
"allow_csv_upload",
"allow_ctas",
"allow_cvas",
"allow_dml",
"allow_multi_schema_metadata_fetch",
"allow_run_async",
"allows_subquery",
"backend",
"database_name",
"expose_in_sqllab",
"force_ctas_schema",
"id",
]
lambda_client = boto3.client('lambda')
ALL_DATASOURCE_ACCESS_ERR = __(
"This endpoint requires the `all_datasource_access` permission"
)
DATASOURCE_MISSING_ERR = __("The data source seems to have been deleted")
USER_MISSING_ERR = __("The user seems to have been deleted")
PARAMETER_MISSING_ERR = (
"Please check your template parameters for syntax errors and make sure "
"they match across your SQL query and Set Parameters. Then, try running "
"your query again.")
class Superset(BaseSupersetView): # pylint: disable=too-many-public-methods
"""The base views for Superset!"""
logger = logging.getLogger(__name__)
@has_access_api
@event_logger.log_this
@expose("/datasources/")
def datasources(self) -> FlaskResponse:
return self.json_response(
sorted(
[
datasource.short_data
for datasource in ConnectorRegistry.get_all_datasources(db.session)
if datasource.short_data.get("name")
],
key=lambda datasource: datasource["name"],
)
)
@has_access_api
@event_logger.log_this
@expose("/override_role_permissions/", methods=["POST"])
def override_role_permissions(self) -> FlaskResponse:
"""Updates the role with the give datasource permissions.
Permissions not in the request will be revoked. This endpoint should
be available to admins only. Expects JSON in the format:
{
'role_name': '{role_name}',
'database': [{
'datasource_type': '{table|druid}',
'name': '{database_name}',
'schema': [{
'name': '{schema_name}',
'datasources': ['{datasource name}, {datasource name}']
}]
}]
}
"""
data = request.get_json(force=True)
role_name = data["role_name"]
databases = data["database"]
db_ds_names = set()
for dbs in databases:
for schema in dbs["schema"]:
for ds_name in schema["datasources"]:
fullname = utils.get_datasource_full_name(
dbs["name"], ds_name, schema=schema["name"]
)
db_ds_names.add(fullname)
existing_datasources = ConnectorRegistry.get_all_datasources(db.session)
datasources = [d for d in existing_datasources if d.full_name in db_ds_names]
role = security_manager.find_role(role_name)
# remove all permissions
role.permissions = []
# grant permissions to the list of datasources
granted_perms = []
for datasource in datasources:
view_menu_perm = security_manager.find_permission_view_menu(
view_menu_name=datasource.perm, permission_name="datasource_access"
)
# prevent creating empty permissions
if view_menu_perm and view_menu_perm.view_menu:
role.permissions.append(view_menu_perm)
granted_perms.append(view_menu_perm.view_menu.name)
db.session.commit()
return self.json_response(
{"granted": granted_perms, "requested": list(db_ds_names)}, status=201
)
@has_access
@event_logger.log_this
@expose("/request_access/")
def request_access(self) -> FlaskResponse:
datasources = set()
dashboard_id = request.args.get("dashboard_id")
if dashboard_id:
dash = db.session.query(Dashboard).filter_by(id=int(dashboard_id)).one()
datasources |= dash.datasources
datasource_id = request.args.get("datasource_id")
datasource_type = request.args.get("datasource_type")
if datasource_id and datasource_type:
ds_class = ConnectorRegistry.sources.get(datasource_type)
datasource = (
db.session.query(ds_class).filter_by(id=int(datasource_id)).one()
)
datasources.add(datasource)
has_access_ = all(
(
datasource and security_manager.can_access_datasource(datasource)
for datasource in datasources
)
)
if has_access_:
return redirect("/superset/dashboard/{}".format(dashboard_id))
if request.args.get("action") == "go":
for datasource in datasources:
access_request = DAR(
datasource_id=datasource.id, datasource_type=datasource.type
)
db.session.add(access_request)
db.session.commit()
flash(__("Access was requested"), "info")
return redirect("/")
return self.render_template(
"superset/request_access.html",
datasources=datasources,
datasource_names=", ".join([o.name for o in datasources]),
)
@has_access
@event_logger.log_this
@expose("/approve")
def approve(self) -> FlaskResponse: # pylint: disable=too-many-locals,no-self-use
def clean_fulfilled_requests(session: Session) -> None:
for dar in session.query(DAR).all():
datasource = ConnectorRegistry.get_datasource(
dar.datasource_type, dar.datasource_id, session,
)
if not datasource or security_manager.can_access_datasource(datasource):
# Dataset does not exist anymore
session.delete(dar)
session.commit()
datasource_type = request.args["datasource_type"]
datasource_id = request.args["datasource_id"]
created_by_username = request.args.get("created_by")
role_to_grant = request.args.get("role_to_grant")
role_to_extend = request.args.get("role_to_extend")
session = db.session
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, session
)
if not datasource:
flash(DATASOURCE_MISSING_ERR, "alert")
return json_error_response(DATASOURCE_MISSING_ERR)
requested_by = security_manager.find_user(username=created_by_username)
if not requested_by:
flash(USER_MISSING_ERR, "alert")
return json_error_response(USER_MISSING_ERR)
requests = (
session.query(DAR)
.filter(
DAR.datasource_id == datasource_id,
DAR.datasource_type == datasource_type,
DAR.created_by_fk == requested_by.id,
)
.all()
)
if not requests:
err = __("The access requests seem to have been deleted")
flash(err, "alert")
return json_error_response(err)
# check if you can approve
if security_manager.can_access_all_datasources() or check_ownership(
datasource, raise_if_false=False
):
# can by done by admin only
if role_to_grant:
role = security_manager.find_role(role_to_grant)
requested_by.roles.append(role)
msg = __(
"%(user)s was granted the role %(role)s that gives access "
"to the %(datasource)s",
user=requested_by.username,
role=role_to_grant,
datasource=datasource.full_name,
)
utils.notify_user_about_perm_udate(
g.user,
requested_by,
role,
datasource,
"email/role_granted.txt",
app.config,
)
flash(msg, "info")
if role_to_extend:
perm_view = security_manager.find_permission_view_menu(
"email/datasource_access", datasource.perm
)
role = security_manager.find_role(role_to_extend)
security_manager.add_permission_role(role, perm_view)
msg = __(
"Role %(r)s was extended to provide the access to "
"the datasource %(ds)s",
r=role_to_extend,
ds=datasource.full_name,
)
utils.notify_user_about_perm_udate(
g.user,
requested_by,
role,
datasource,
"email/role_extended.txt",
app.config,
)
flash(msg, "info")
clean_fulfilled_requests(session)
else:
flash(__("You have no permission to approve this request"), "danger")
return redirect("/accessrequestsmodelview/list/")
for request_ in requests:
session.delete(request_)
session.commit()
return redirect("/accessrequestsmodelview/list/")
@has_access
@event_logger.log_this
@expose("/slice/<int:slice_id>/")
def slice(self, slice_id: int) -> FlaskResponse: # pylint: disable=no-self-use
_, slc = get_form_data(slice_id, use_slice_data=True)
if not slc:
abort(404)
endpoint = "/superset/explore/?form_data={}".format(
parse.quote(json.dumps({"slice_id": slice_id}))
)
is_standalone_mode = ReservedUrlParameters.is_standalone_mode()
if is_standalone_mode:
endpoint += f"&{ReservedUrlParameters.STANDALONE}={is_standalone_mode}"
return redirect(endpoint)
def get_query_string_response(self, viz_obj: BaseViz) -> FlaskResponse:
query = None
try:
query_obj = viz_obj.query_obj()
if query_obj:
query = viz_obj.datasource.get_query_str(query_obj)
except Exception as ex: # pylint: disable=broad-except
err_msg = utils.error_msg_from_exception(ex)
logger.exception(err_msg)
return json_error_response(err_msg)
if not query:
query = "No query."
return self.json_response(
{"query": query, "language": viz_obj.datasource.query_language}
)
def get_raw_results(self, viz_obj: BaseViz) -> FlaskResponse:
payload = viz_obj.get_df_payload()
if viz_obj.has_error(payload):
return json_error_response(payload=payload, status=400)
return self.json_response({"data": payload["df"].to_dict("records")})
def get_samples(self, viz_obj: BaseViz) -> FlaskResponse:
return self.json_response({"data": viz_obj.get_samples()})
def generate_json(
self, viz_obj: BaseViz, response_type: Optional[str] = None
) -> FlaskResponse:
if response_type == utils.ChartDataResultFormat.CSV:
return CsvResponse(
viz_obj.get_csv(), headers=generate_download_headers("csv")
)
if response_type == utils.ChartDataResultType.QUERY:
return self.get_query_string_response(viz_obj)
if response_type == utils.ChartDataResultType.RESULTS:
return self.get_raw_results(viz_obj)
if response_type == utils.ChartDataResultType.SAMPLES:
return self.get_samples(viz_obj)
payload = viz_obj.get_payload()
return data_payload_response(*viz_obj.payload_json_and_has_error(payload))
@event_logger.log_this
@api
@has_access_api
@expose("/slice_json/<int:slice_id>")
@etag_cache()
@check_resource_permissions(check_slice_perms)
def slice_json(self, slice_id: int) -> FlaskResponse:
form_data, slc = get_form_data(slice_id, use_slice_data=True)
if not slc:
return json_error_response("The slice does not exist")
try:
viz_obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=False,
)
return self.generate_json(viz_obj)
except SupersetException as ex:
return json_error_response(utils.error_msg_from_exception(ex))
@api
@has_access_api
@event_logger.log_this
@expose("/annotation_json/<int:layer_id>")
def annotation_json( # pylint: disable=no-self-use
self, layer_id: int
) -> FlaskResponse:
form_data = get_form_data()[0]
force = utils.parse_boolean_string(request.args.get("force"))
form_data["layer_id"] = layer_id
form_data["filters"] = [{"col": "layer_id", "op": "==", "val": layer_id}]
# Set all_columns to ensure the TableViz returns the necessary columns to the
# frontend.
form_data["all_columns"] = [
"created_on",
"changed_on",
"id",
"start_dttm",
"end_dttm",
"layer_id",
"short_descr",
"long_descr",
"json_metadata",
"created_by_fk",
"changed_by_fk",
]
datasource = AnnotationDatasource()
viz_obj = viz.viz_types["table"](datasource, form_data=form_data, force=force)
payload = viz_obj.get_payload()
return data_payload_response(*viz_obj.payload_json_and_has_error(payload))
@event_logger.log_this
@api
@has_access_api
@handle_api_exception
@permission_name("explore_json")
@expose("/explore_json/data/<cache_key>", methods=["GET"])
@check_resource_permissions(check_explore_cache_perms)
def explore_json_data(self, cache_key: str) -> FlaskResponse:
"""Serves cached result data for async explore_json calls
`self.generate_json` receives this input and returns different
payloads based on the request args in the first block
TODO: form_data should not be loaded twice from cache
(also loaded in `check_explore_cache_perms`)
"""
try:
cached = cache_manager.cache.get(cache_key)
if not cached:
raise CacheLoadError("Cached data not found")
form_data = cached.get("form_data")
response_type = cached.get("response_type")
datasource_id, datasource_type = get_datasource_info(None, None, form_data)
viz_obj = get_viz(
datasource_type=cast(str, datasource_type),
datasource_id=datasource_id,
form_data=form_data,
force_cached=True,
)
return self.generate_json(viz_obj, response_type)
except SupersetException as ex:
return json_error_response(utils.error_msg_from_exception(ex), 400)
EXPLORE_JSON_METHODS = ["POST"]
if not is_feature_enabled("ENABLE_EXPLORE_JSON_CSRF_PROTECTION"):
EXPLORE_JSON_METHODS.append("GET")
@api
@has_access_api
@handle_api_exception
@event_logger.log_this
@expose(
"/explore_json/<datasource_type>/<int:datasource_id>/",
methods=EXPLORE_JSON_METHODS,
)
@expose("/explore_json/", methods=EXPLORE_JSON_METHODS)
@etag_cache()
@check_resource_permissions(check_datasource_perms)
def explore_json(
self, datasource_type: Optional[str] = None, datasource_id: Optional[int] = None
) -> FlaskResponse:
"""Serves all request that GET or POST form_data
This endpoint evolved to be the entry point of many different
requests that GETs or POSTs a form_data.
`self.generate_json` receives this input and returns different
payloads based on the request args in the first block
TODO: break into one endpoint for each return shape"""
response_type = utils.ChartDataResultFormat.JSON.value
responses: List[
Union[utils.ChartDataResultFormat, utils.ChartDataResultType]
] = list(utils.ChartDataResultFormat)
responses.extend(list(utils.ChartDataResultType))
for response_option in responses:
if request.args.get(response_option) == "true":
response_type = response_option
break
form_data = get_form_data()[0]
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
force = request.args.get("force") == "true"
# TODO: support CSV, SQL query and other non-JSON types
if (
is_feature_enabled("GLOBAL_ASYNC_QUERIES")
and response_type == utils.ChartDataResultFormat.JSON
):
try:
async_channel_id = async_query_manager.parse_jwt_from_request(
request
)["channel"]
job_metadata = async_query_manager.init_job(async_channel_id)
load_explore_json_into_cache.delay(
job_metadata, form_data, response_type, force
)
except AsyncQueryTokenException:
return json_error_response("Not authorized", 401)
return json_success(json.dumps(job_metadata), status=202)
viz_obj = get_viz(
datasource_type=cast(str, datasource_type),
datasource_id=datasource_id,
form_data=form_data,
force=force,
)
return self.generate_json(viz_obj, response_type)
except SupersetException as ex:
return json_error_response(utils.error_msg_from_exception(ex), 400)
@has_access
@event_logger.log_this
@expose("/import_dashboards/", methods=["GET", "POST"])
def import_dashboards(self) -> FlaskResponse:
"""Overrides the dashboards using json instances from the file."""
import_file = request.files.get("file")
if request.method == "POST" and import_file:
success = False
database_id = request.form.get("db_id")
try:
ImportDashboardsCommand(
{import_file.filename: import_file.read()}, database_id
).run()
success = True
except DatabaseNotFound as ex:
logger.exception(ex)
flash(
_(
"Cannot import dashboard: %(db_error)s.\n"
"Make sure to create the database before "
"importing the dashboard.",
db_error=ex,
),
"danger",
)
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
flash(
_(
"An unknown error occurred. "
"Please contact your Superset administrator"
),
"danger",
)
if success:
flash("Dashboard(s) have been imported", "success")
return redirect("/dashboard/list/")
databases = db.session.query(Database).all()
return self.render_template(
"superset/import_dashboards.html", databases=databases
)
@check_access_and_create_session
@event_logger.log_this
@expose("/explore/<datasource_type>/<int:datasource_id>/", methods=["GET", "POST"])
@expose("/explore/", methods=["GET", "POST"])
def explore( # pylint: disable=too-many-locals,too-many-return-statements,too-many-statements
self, datasource_type: Optional[str] = None, datasource_id: Optional[int] = None
) -> FlaskResponse:
user_id = g.user.get_id() if g.user else None
form_data, slc = get_form_data(use_slice_data=True)
# Flash the SIP-15 message if the slice is owned by the current user and has not
# been updated, i.e., is not using the [start, end) interval.
if (
config["SIP_15_ENABLED"]
and slc
and g.user in slc.owners
and (
not form_data.get("time_range_endpoints")
or form_data["time_range_endpoints"]
!= (
utils.TimeRangeEndpoint.INCLUSIVE,
utils.TimeRangeEndpoint.EXCLUSIVE,
)
)
):
url = Href("/superset/explore/")(
{
"form_data": json.dumps(
{
"slice_id": slc.id,
"time_range_endpoints": (
utils.TimeRangeEndpoint.INCLUSIVE.value,
utils.TimeRangeEndpoint.EXCLUSIVE.value,
),
}
)
}
)
# flash(Markup(config["SIP_15_TOAST_MESSAGE"].format(url=url)))
try:
datasource_id, datasource_type = get_datasource_info(
datasource_id, datasource_type, form_data
)
except SupersetException:
datasource_id = None
# fallback unkonw datasource to table type
datasource_type = SqlaTable.type
datasource: Optional[BaseDatasource] = None
if datasource_id is not None:
try:
datasource = ConnectorRegistry.get_datasource(
cast(str, datasource_type), datasource_id, db.session
)
except DatasetNotFoundError:
pass
datasource_name = datasource.name if datasource else _("[Missing Dataset]")
if datasource:
if config["ENABLE_ACCESS_REQUEST"] and (
not security_manager.can_access_datasource(datasource)
):
flash(
__(security_manager.get_datasource_access_error_msg(datasource)),
"danger",
)
return redirect(
"superset/request_access/?"
f"datasource_type={datasource_type}&"
f"datasource_id={datasource_id}&"
)
viz_type = form_data.get("viz_type")
if not viz_type and datasource and datasource.default_endpoint:
return redirect(datasource.default_endpoint)
# CHECK - Permissions got changed
# slc perms
slice_add_perm = security_manager.can_access("can_write", "Chart")
slice_overwrite_perm = is_owner(slc, g.user) if slc else False
slice_download_perm = security_manager.can_access("can_read", "Chart")
form_data["datasource"] = str(datasource_id) + "__" + cast(str, datasource_type)
# On explore, merge legacy and extra filters into the form data
utils.convert_legacy_filters_into_adhoc(form_data)
utils.merge_extra_filters(form_data)
# merge request url params
if request.method == "GET":
utils.merge_request_params(form_data, request.args)
# handle save or overwrite
action = request.args.get("action")
if action == "overwrite" and not slice_overwrite_perm:
return json_error_response(
_("You don't have the rights to ") + _("alter this ") + _("chart"),
status=403,
)
if action == "saveas" and not slice_add_perm:
return json_error_response(
_("You don't have the rights to ") + _("create a ") + _("chart"),
status=403,
)
if action in ("saveas", "overwrite") and datasource:
return self.save_or_overwrite_slice(
slc,
slice_add_perm,
slice_overwrite_perm,
slice_download_perm,
datasource.id,
datasource.type,
datasource.name,
)
standalone_mode = ReservedUrlParameters.is_standalone_mode()
dummy_datasource_data: Dict[str, Any] = {
"type": datasource_type,
"name": datasource_name,
"columns": [],
"metrics": [],
"database": {"id": 0, "backend": "",},
}
try:
datasource_data = datasource.data if datasource else dummy_datasource_data
datasource_database = datasource_data.get("database")
if datasource_database:
datasource_database["parameters"] = {}
except (SupersetException, SQLAlchemyError):
datasource_data = dummy_datasource_data
bootstrap_data = {
"can_add": slice_add_perm,
"can_download": slice_download_perm,
"can_overwrite": slice_overwrite_perm,
"datasource": datasource_data,
"form_data": form_data,
"datasource_id": datasource_id,
"datasource_type": datasource_type,
"slice": slc.data if slc else None,
"standalone": standalone_mode,
"user_id": user_id,
"user": bootstrap_user_data(g.user, include_perms=True),
"forced_height": request.args.get("height"),
"common": common_bootstrap_payload(),
}
if slc:
title = slc.slice_name
elif datasource:
table_name = (
datasource.table_name
if datasource_type == "table"
else datasource.datasource_name
)
title = _("Explore - %(table)s", table=table_name)
else:
title = _("Explore")
return self.render_template(
"superset/basic.html",
bootstrap_data=json.dumps(
bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser
),
entry="explore",
title=title.__str__(),
standalone_mode=standalone_mode,
)
@api
@handle_api_exception
@has_access_api
@event_logger.log_this
@expose("/filter/<datasource_type>/<int:datasource_id>/<column>/")
def filter( # pylint: disable=no-self-use
self, datasource_type: str, datasource_id: int, column: str
) -> FlaskResponse:
"""
Endpoint to retrieve values for specified column.
:param datasource_type: Type of datasource e.g. table
:param datasource_id: Datasource id
:param column: Column name to retrieve values for
:returns: The Flask response
:raises SupersetSecurityException: If the user cannot access the resource
"""
# TODO: Cache endpoint by user, datasource and column
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session,
)
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
datasource.raise_for_access()
payload = json.dumps(
datasource.values_for_column(column, config["FILTER_SELECT_ROW_LIMIT"]),
default=utils.json_int_dttm_ser,
ignore_nan=True,
)
return json_success(payload)
@staticmethod
def remove_extra_filters(filters: List[Dict[str, Any]]) -> List[Dict[str, Any]]:
"""Extra filters are ones inherited from the dashboard's temporary context
Those should not be saved when saving the chart"""
return [f for f in filters if not f.get("isExtra")]
def save_or_overwrite_slice(
# pylint: disable=too-many-arguments,too-many-locals,no-self-use
self,
slc: Optional[Slice],
slice_add_perm: bool,
slice_overwrite_perm: bool,
slice_download_perm: bool,
datasource_id: int,
datasource_type: str,
datasource_name: str,
) -> FlaskResponse:
"""Save or overwrite a slice"""
slice_name = request.args.get("slice_name")
action = request.args.get("action")
form_data = get_form_data()[0]
if action == "saveas":
if "slice_id" in form_data:
form_data.pop("slice_id") # don't save old slice_id
slc = Slice(owners=[g.user] if g.user else [])
form_data["adhoc_filters"] = self.remove_extra_filters(
form_data.get("adhoc_filters", [])
)
assert slc
slc.params = json.dumps(form_data, indent=2, sort_keys=True)
slc.datasource_name = datasource_name
slc.viz_type = form_data["viz_type"]
slc.datasource_type = datasource_type
slc.datasource_id = datasource_id
slc.slice_name = slice_name
if action == "saveas" and slice_add_perm:
ChartDAO.save(slc)
msg = _("Chart [{}] has been saved").format(slc.slice_name)
flash(msg, "info")
elif action == "overwrite" and slice_overwrite_perm:
ChartDAO.overwrite(slc)
msg = _("Chart [{}] has been overwritten").format(slc.slice_name)
flash(msg, "info")
# Adding slice to a dashboard if requested
dash: Optional[Dashboard] = None
save_to_dashboard_id = request.args.get("save_to_dashboard_id")
new_dashboard_name = request.args.get("new_dashboard_name")
if save_to_dashboard_id:
# Adding the chart to an existing dashboard
dash = cast(
Dashboard,
db.session.query(Dashboard)
.filter_by(id=int(save_to_dashboard_id))
.one(),
)
# check edit dashboard permissions
dash_overwrite_perm = check_ownership(dash, raise_if_false=False)
if not dash_overwrite_perm:
return json_error_response(
_("You don't have the rights to ")
+ _("alter this ")
+ _("dashboard"),
status=403,
)
flash(
_("Chart [{}] was added to dashboard [{}]").format(
slc.slice_name, dash.dashboard_title
),
"info",
)
elif new_dashboard_name:
# Creating and adding to a new dashboard
# check create dashboard permissions
dash_add_perm = security_manager.can_access("can_write", "Dashboard")
if not dash_add_perm:
return json_error_response(
_("You don't have the rights to ")
+ _("create a ")
+ _("dashboard"),
status=403,
)
dash = Dashboard(
dashboard_title=request.args.get("new_dashboard_name"),
owners=[g.user] if g.user else [],
)
flash(
_(
"Dashboard [{}] just got created and chart [{}] was added " "to it"
).format(dash.dashboard_title, slc.slice_name),
"info",
)
if dash and slc not in dash.slices:
dash.slices.append(slc)
db.session.commit()
response = {
"can_add": slice_add_perm,
"can_download": slice_download_perm,
"can_overwrite": is_owner(slc, g.user),
"form_data": slc.form_data,
"slice": slc.data,
"dashboard_url": dash.url if dash else None,
"dashboard_id": dash.id if dash else None,
}
if dash and request.args.get("goto_dash") == "true":
response.update({"dashboard": dash.url})
return json_success(json.dumps(response))
@api
@has_access_api
@event_logger.log_this
@expose("/schemas/<int:db_id>/")
@expose("/schemas/<int:db_id>/<force_refresh>/")
def schemas( # pylint: disable=no-self-use
self, db_id: int, force_refresh: str = "false"
) -> FlaskResponse:
logger.warning(
"This API endpoint is deprecated and will be removed in version 2.0.0"
)
db_id = int(db_id)
database = db.session.query(Database).get(db_id)
if database:
schemas = database.get_all_schema_names(
cache=database.schema_cache_enabled,
cache_timeout=database.schema_cache_timeout,
force=force_refresh.lower() == "true",
)
schemas = security_manager.get_schemas_accessible_by_user(database, schemas)
else:
schemas = []
return Response(json.dumps({"schemas": schemas}), mimetype="application/json")
@api
@has_access_api
@event_logger.log_this
@expose("/tables/<int:db_id>/<schema>/<substr>/")
@expose("/tables/<int:db_id>/<schema>/<substr>/<force_refresh>/")
def tables( # pylint: disable=too-many-locals,no-self-use
self, db_id: int, schema: str, substr: str, force_refresh: str = "false"
) -> FlaskResponse:
"""Endpoint to fetch the list of tables for given database"""
# Guarantees database filtering by security access
query = db.session.query(Database)
query = DatabaseFilter("id", SQLAInterface(Database, db.session)).apply(
query, None
)
database = query.filter_by(id=db_id).one_or_none()
if not database:
return json_error_response("Not found", 404)
force_refresh_parsed = force_refresh.lower() == "true"
schema_parsed = utils.parse_js_uri_path_item(schema, eval_undefined=True)
substr_parsed = utils.parse_js_uri_path_item(substr, eval_undefined=True)
if schema_parsed:
tables = (
database.get_all_table_names_in_schema(
schema=schema_parsed,
force=force_refresh_parsed,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
or []
)
views = (
database.get_all_view_names_in_schema(
schema=schema_parsed,
force=force_refresh_parsed,
cache=database.table_cache_enabled,
cache_timeout=database.table_cache_timeout,
)
or []
)
else:
tables = database.get_all_table_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60
)
views = database.get_all_view_names_in_database(
cache=True, force=False, cache_timeout=24 * 60 * 60
)
tables = security_manager.get_datasources_accessible_by_user(
database, tables, schema_parsed
)
views = security_manager.get_datasources_accessible_by_user(
database, views, schema_parsed
)
def get_datasource_label(ds_name: utils.DatasourceName) -> str:
return (
ds_name.table if schema_parsed else f"{ds_name.schema}.{ds_name.table}"
)
if substr_parsed:
tables = [tn for tn in tables if substr_parsed in get_datasource_label(tn)]
views = [vn for vn in views if substr_parsed in get_datasource_label(vn)]
if not schema_parsed and database.default_schemas:
user_schemas = (
[g.user.email.split("@")[0]] if hasattr(g.user, "email") else []
)
valid_schemas = set(database.default_schemas + user_schemas)
tables = [tn for tn in tables if tn.schema in valid_schemas]
views = [vn for vn in views if vn.schema in valid_schemas]
max_items = config["MAX_TABLE_NAMES"] or len(tables)
total_items = len(tables) + len(views)
max_tables = len(tables)
max_views = len(views)
if total_items and substr_parsed:
max_tables = max_items * len(tables) // total_items
max_views = max_items * len(views) // total_items
dataset_tables = {table.name: table for table in database.tables}
table_options = [
{
"value": tn.table,
"schema": tn.schema,
"label": get_datasource_label(tn),
"title": get_datasource_label(tn),
"type": "table",
"extra": dataset_tables[f"{tn.schema}.{tn.table}"].extra_dict
if (f"{tn.schema}.{tn.table}" in dataset_tables)
else None,
}
for tn in tables[:max_tables]
]
table_options.extend(
[
{
"value": vn.table,
"schema": vn.schema,
"label": get_datasource_label(vn),
"title": get_datasource_label(vn),
"type": "view",
}
for vn in views[:max_views]
]
)
table_options.sort(key=lambda value: value["label"])
payload = {"tableLength": len(tables) + len(views), "options": table_options}
return json_success(json.dumps(payload))
@api
@has_access_api
@event_logger.log_this
@expose("/copy_dash/<int:dashboard_id>/", methods=["GET", "POST"])
def copy_dash( # pylint: disable=no-self-use
self, dashboard_id: int
) -> FlaskResponse:
"""Copy dashboard"""
session = db.session()
data = json.loads(request.form["data"])
# client-side send back last_modified_time which was set when
# the dashboard was open. it was use to avoid mid-air collision.
# remove it to avoid confusion.
data.pop("last_modified_time", None)
dash = Dashboard()
original_dash = session.query(Dashboard).get(dashboard_id)
dash.owners = [g.user] if g.user else []
dash.dashboard_title = data["dashboard_title"]
old_to_new_slice_ids: Dict[int, int] = {}
if data["duplicate_slices"]:
# Duplicating slices as well, mapping old ids to new ones
for slc in original_dash.slices:
new_slice = slc.clone()
new_slice.owners = [g.user] if g.user else []
session.add(new_slice)
session.flush()
new_slice.dashboards.append(dash)
old_to_new_slice_ids[slc.id] = new_slice.id
# update chartId of layout entities
for value in data["positions"].values():
if isinstance(value, dict) and value.get("meta", {}).get("chartId"):
old_id = value["meta"]["chartId"]
new_id = old_to_new_slice_ids.get(old_id)
value["meta"]["chartId"] = new_id
else:
dash.slices = original_dash.slices
dash.params = original_dash.params
DashboardDAO.set_dash_metadata(dash, data, old_to_new_slice_ids)
session.add(dash)
session.commit()
dash_json = json.dumps(dash.data)
session.close()
return json_success(dash_json)
@api
@has_access_api
@event_logger.log_this
@expose("/save_dash/<int:dashboard_id>/", methods=["GET", "POST"])
def save_dash( # pylint: disable=no-self-use
self, dashboard_id: int
) -> FlaskResponse:
"""Save a dashboard's metadata"""
session = db.session()
dash = session.query(Dashboard).get(dashboard_id)
check_ownership(dash, raise_if_false=True)
data = json.loads(request.form["data"])
# client-side send back last_modified_time which was set when
# the dashboard was open. it was use to avoid mid-air collision.
remote_last_modified_time = data.get("last_modified_time")
current_last_modified_time = dash.changed_on.replace(microsecond=0).timestamp()
if (
remote_last_modified_time
and remote_last_modified_time < current_last_modified_time
):
return json_error_response(
__(
"This dashboard was changed recently. "
"Please reload dashboard to get latest version."
),
412,
)
# remove to avoid confusion.
data.pop("last_modified_time", None)
DashboardDAO.set_dash_metadata(dash, data)
session.merge(dash)
session.commit()
# get updated changed_on
dash = session.query(Dashboard).get(dashboard_id)
last_modified_time = dash.changed_on.replace(microsecond=0).timestamp()
session.close()
return json_success(
json.dumps({"status": "SUCCESS", "last_modified_time": last_modified_time,})
)
@api
@has_access_api
@event_logger.log_this
@expose("/add_slices/<int:dashboard_id>/", methods=["POST"])
def add_slices( # pylint: disable=no-self-use
self, dashboard_id: int
) -> FlaskResponse:
"""Add and save slices to a dashboard"""
data = json.loads(request.form["data"])
session = db.session()
dash = session.query(Dashboard).get(dashboard_id)
check_ownership(dash, raise_if_false=True)
new_slices = session.query(Slice).filter(Slice.id.in_(data["slice_ids"]))
dash.slices += new_slices
session.merge(dash)
session.commit()
session.close()
return "SLICES ADDED"
@api
@has_access_api
@event_logger.log_this
@expose("/testconn", methods=["POST", "GET"])
def testconn( # pylint: disable=too-many-return-statements,no-self-use
self,
) -> FlaskResponse:
"""Tests a sqla connection"""
db_name = request.json.get("name")
uri = request.json.get("uri")
try:
if app.config["PREVENT_UNSAFE_DB_CONNECTIONS"]:
check_sqlalchemy_uri(make_url(uri))
# if the database already exists in the database, only its safe
# (password-masked) URI would be shown in the UI and would be passed in the
# form data so if the database already exists and the form was submitted
# with the safe URI, we assume we should retrieve the decrypted URI to test
# the connection.
if db_name:
existing_database = (
db.session.query(Database)
.filter_by(database_name=db_name)
.one_or_none()
)
if existing_database and uri == existing_database.safe_sqlalchemy_uri():
uri = existing_database.sqlalchemy_uri_decrypted
# This is the database instance that will be tested. Note the extra fields
# are represented as JSON encoded strings in the model.
database = Database(
server_cert=request.json.get("server_cert"),
extra=json.dumps(request.json.get("extra", {})),
impersonate_user=request.json.get("impersonate_user"),
encrypted_extra=json.dumps(request.json.get("encrypted_extra", {})),
)
database.set_sqlalchemy_uri(uri)
database.db_engine_spec.mutate_db_for_connection_test(database)
username = (
g.user.username if g.user and hasattr(g.user, "username") else None
)
engine = database.get_sqla_engine(user_name=username)
with closing(engine.raw_connection()) as conn:
if engine.dialect.do_ping(conn):
return json_success('"OK"')
raise DBAPIError(None, None, None)
except CertificateException as ex:
logger.info("Certificate exception")
return json_error_response(ex.message)
except (NoSuchModuleError, ModuleNotFoundError):
logger.info("Invalid driver")
driver_name = make_url(uri).drivername
return json_error_response(
_(
"Could not load database driver: %(driver_name)s",
driver_name=driver_name,
),
400,
)
except ArgumentError:
logger.info("Invalid URI")
return json_error_response(
_(
"Invalid connection string, a valid string usually follows:\n"
"'DRIVER://USER:PASSWORD@DB-HOST/DATABASE-NAME'"
)
)
except DBAPIError:
logger.warning("Connection failed")
return json_error_response(
_("Connection failed, please check your connection settings"), 400
)
except SupersetSecurityException as ex:
logger.warning("Stopped an unsafe database connection")
return json_error_response(_(str(ex)), 400)
except Exception as ex: # pylint: disable=broad-except
logger.warning("Unexpected error %s", type(ex).__name__)
return json_error_response(
_("Unexpected error occurred, please check your logs for details"), 400
)
@api
@has_access_api
@event_logger.log_this
@expose("/recent_activity/<int:user_id>/", methods=["GET"])
def recent_activity( # pylint: disable=no-self-use
self, user_id: int
) -> FlaskResponse:
"""Recent activity (actions) for a given user"""
limit = request.args.get("limit")
limit = int(limit) if limit and limit.isdigit() else 100
actions = request.args.get("actions", "explore,dashboard").split(",")
# whether to get distinct subjects
distinct = request.args.get("distinct") != "false"
has_subject_title = or_(
and_(
Dashboard.dashboard_title is not None, Dashboard.dashboard_title != "",
),
and_(Slice.slice_name is not None, Slice.slice_name != ""),
)
if distinct:
one_year_ago = datetime.today() - timedelta(days=365)
subqry = (
db.session.query(
Log.dashboard_id,
Log.slice_id,
Log.action,
func.max(Log.dttm).label("dttm"),
)
.group_by(Log.dashboard_id, Log.slice_id, Log.action)
.filter(
and_(
Log.action.in_(actions),
Log.user_id == user_id,
# limit to one year of data to improve performance
Log.dttm > one_year_ago,
or_(Log.dashboard_id.isnot(None), Log.slice_id.isnot(None)),
)
)
.subquery()
)
qry = (
db.session.query(
subqry,
Dashboard.slug.label("dashboard_slug"),
Dashboard.dashboard_title,
Slice.slice_name,
)
.outerjoin(Dashboard, Dashboard.id == subqry.c.dashboard_id)
.outerjoin(Slice, Slice.id == subqry.c.slice_id,)
.filter(has_subject_title)
.order_by(subqry.c.dttm.desc())
.limit(limit)
)
else:
qry = (
db.session.query(
Log.dttm,
Log.action,
Log.dashboard_id,
Log.slice_id,
Dashboard.slug.label("dashboard_slug"),
Dashboard.dashboard_title,
Slice.slice_name,
)
.outerjoin(Dashboard, Dashboard.id == Log.dashboard_id)
.outerjoin(Slice, Slice.id == Log.slice_id)
.filter(has_subject_title)
.order_by(Log.dttm.desc())
.limit(limit)
)
payload = []
for log in qry.all():
item_url = None
item_title = None
item_type = None
if log.dashboard_id:
item_type = "dashboard"
item_url = Dashboard(id=log.dashboard_id, slug=log.dashboard_slug).url
item_title = log.dashboard_title
elif log.slice_id:
slc = Slice(id=log.slice_id, slice_name=log.slice_name)
item_type = "slice"
item_url = slc.slice_url
item_title = slc.chart
payload.append(
{
"action": log.action,
"item_type": item_type,
"item_url": item_url,
"item_title": item_title,
"time": log.dttm,
"time_delta_humanized": humanize.naturaltime(
datetime.now() - log.dttm
),
}
)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@event_logger.log_this
@expose("/csrf_token/", methods=["GET"])
def csrf_token(self) -> FlaskResponse:
logger.warning(
"This API endpoint is deprecated and will be removed in version 2.0.0"
)
return Response(
self.render_template("superset/csrf_token.json"), mimetype="text/json"
)
@api
@has_access_api
@event_logger.log_this
@expose("/available_domains/", methods=["GET"])
def available_domains(self) -> FlaskResponse: # pylint: disable=no-self-use
"""
Returns the list of available Superset Webserver domains (if any)
defined in config. This enables charts embedded in other apps to
leverage domain sharding if appropriately configured.
"""
return Response(
json.dumps(conf.get("SUPERSET_WEBSERVER_DOMAINS")), mimetype="text/json"
)
@api
@has_access_api
@event_logger.log_this
@expose("/fave_dashboards_by_username/<username>/", methods=["GET"])
def fave_dashboards_by_username(self, username: str) -> FlaskResponse:
"""This lets us use a user's username to pull favourite dashboards"""
user = security_manager.find_user(username=username)
return self.fave_dashboards(user.get_id())
@api
@has_access_api
@event_logger.log_this
@expose("/fave_dashboards/<int:user_id>/", methods=["GET"])
def fave_dashboards( # pylint: disable=no-self-use
self, user_id: int
) -> FlaskResponse:
qry = (
db.session.query(Dashboard, FavStar.dttm)
.join(
FavStar,
and_(
FavStar.user_id == int(user_id),
FavStar.class_name == "Dashboard",
Dashboard.id == FavStar.obj_id,
),
)
.order_by(FavStar.dttm.desc())
)
payload = []
for o in qry.all():
dash = {
"id": o.Dashboard.id,
"dashboard": o.Dashboard.dashboard_link(),
"title": o.Dashboard.dashboard_title,
"url": o.Dashboard.url,
"dttm": o.dttm,
}
if o.Dashboard.created_by:
user = o.Dashboard.created_by
dash["creator"] = str(user)
dash["creator_url"] = "/superset/profile/{}/".format(user.username)
payload.append(dash)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@event_logger.log_this
@expose("/created_dashboards/<int:user_id>/", methods=["GET"])
def created_dashboards( # pylint: disable=no-self-use
self, user_id: int
) -> FlaskResponse:
Dash = Dashboard
qry = (
db.session.query(Dash)
.filter(
or_( # pylint: disable=comparison-with-callable
Dash.created_by_fk == user_id, Dash.changed_by_fk == user_id
)
)
.order_by(Dash.changed_on.desc())
)
payload = [
{
"id": o.id,
"dashboard": o.dashboard_link(),
"title": o.dashboard_title,
"url": o.url,
"dttm": o.changed_on,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@event_logger.log_this
@expose("/user_slices", methods=["GET"])
@expose("/user_slices/<int:user_id>/", methods=["GET"])
def user_slices( # pylint: disable=no-self-use
self, user_id: Optional[int] = None
) -> FlaskResponse:
"""List of slices a user owns, created, modified or faved"""
if not user_id:
user_id = g.user.get_id()
owner_ids_query = (
db.session.query(Slice.id)
.join(Slice.owners)
.filter(security_manager.user_model.id == user_id)
)
qry = (
db.session.query(Slice, FavStar.dttm)
.join(
FavStar,
and_(
FavStar.user_id == user_id,
FavStar.class_name == "slice",
Slice.id == FavStar.obj_id,
),
isouter=True,
)
.filter(
or_(
Slice.id.in_(owner_ids_query),
Slice.created_by_fk == user_id,
Slice.changed_by_fk == user_id,
FavStar.user_id == user_id,
)
)
.order_by(Slice.slice_name.asc())
)
payload = [
{
"id": o.Slice.id,
"title": o.Slice.slice_name,
"url": o.Slice.slice_url,
"data": o.Slice.form_data,
"dttm": o.dttm if o.dttm else o.Slice.changed_on,
"viz_type": o.Slice.viz_type,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@event_logger.log_this
@expose("/created_slices", methods=["GET"])
@expose("/created_slices/<int:user_id>/", methods=["GET"])
def created_slices( # pylint: disable=no-self-use
self, user_id: Optional[int] = None
) -> FlaskResponse:
"""List of slices created by this user"""
if not user_id:
user_id = g.user.get_id()
qry = (
db.session.query(Slice)
.filter(or_(Slice.created_by_fk == user_id, Slice.changed_by_fk == user_id))
.order_by(Slice.changed_on.desc())
)
payload = [
{
"id": o.id,
"title": o.slice_name,
"url": o.slice_url,
"dttm": o.changed_on,
"viz_type": o.viz_type,
}
for o in qry.all()
]
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@api
@has_access_api
@event_logger.log_this
@expose("/fave_slices", methods=["GET"])
@expose("/fave_slices/<int:user_id>/", methods=["GET"])
def fave_slices( # pylint: disable=no-self-use
self, user_id: Optional[int] = None
) -> FlaskResponse:
"""Favorite slices for a user"""
if not user_id:
user_id = g.user.get_id()
qry = (
db.session.query(Slice, FavStar.dttm)
.join(
FavStar,
and_(
FavStar.user_id == user_id,
FavStar.class_name == "slice",
Slice.id == FavStar.obj_id,
),
)
.order_by(FavStar.dttm.desc())
)
payload = []
for o in qry.all():
dash = {
"id": o.Slice.id,
"title": o.Slice.slice_name,
"url": o.Slice.slice_url,
"dttm": o.dttm,
"viz_type": o.Slice.viz_type,
}
if o.Slice.created_by:
user = o.Slice.created_by
dash["creator"] = str(user)
dash["creator_url"] = "/superset/profile/{}/".format(user.username)
payload.append(dash)
return json_success(json.dumps(payload, default=utils.json_int_dttm_ser))
@event_logger.log_this
@api
@has_access_api
@expose("/warm_up_cache/", methods=["GET"])
def warm_up_cache( # pylint: disable=too-many-locals,no-self-use
self,
) -> FlaskResponse:
"""Warms up the cache for the slice or table.
Note for slices a force refresh occurs.
In terms of the `extra_filters` these can be obtained from records in the JSON
encoded `logs.json` column associated with the `explore_json` action.
"""
session = db.session()
slice_id = request.args.get("slice_id")
dashboard_id = request.args.get("dashboard_id")
table_name = request.args.get("table_name")
db_name = request.args.get("db_name")
extra_filters = request.args.get("extra_filters")
slices: List[Slice] = []
if not slice_id and not (table_name and db_name):
return json_error_response(
__(
"Malformed request. slice_id or table_name and db_name "
"arguments are expected"
),
status=400,
)
if slice_id:
slices = session.query(Slice).filter_by(id=slice_id).all()
if not slices:
return json_error_response(
__("Chart %(id)s not found", id=slice_id), status=404
)
elif table_name and db_name:
table = (
session.query(SqlaTable)
.join(Database)
.filter(
Database.database_name == db_name
or SqlaTable.table_name == table_name
)
).one_or_none()
if not table:
return json_error_response(
__(
"Table %(table)s wasn't found in the database %(db)s",
table=table_name,
db=db_name,
),
status=404,
)
slices = (
session.query(Slice)
.filter_by(datasource_id=table.id, datasource_type=table.type)
.all()
)
result = []
for slc in slices:
try:
form_data = get_form_data(slc.id, use_slice_data=True)[0]
if dashboard_id:
form_data["extra_filters"] = (
json.loads(extra_filters)
if extra_filters
else get_dashboard_extra_filters(slc.id, dashboard_id)
)
obj = get_viz(
datasource_type=slc.datasource.type,
datasource_id=slc.datasource.id,
form_data=form_data,
force=True,
)
g.form_data = form_data
payload = obj.get_payload()
delattr(g, "form_data")
error = payload["errors"] or None
status = payload["status"]
except Exception as ex: # pylint: disable=broad-except
error = utils.error_msg_from_exception(ex)
status = None
result.append(
{"slice_id": slc.id, "viz_error": error, "viz_status": status}
)
return json_success(json.dumps(result))
@has_access_api
@event_logger.log_this
@expose("/favstar/<class_name>/<int:obj_id>/<action>/")
def favstar( # pylint: disable=no-self-use
self, class_name: str, obj_id: int, action: str
) -> FlaskResponse:
"""Toggle favorite stars on Slices and Dashboard"""
session = db.session()
count = 0
favs = (
session.query(FavStar)
.filter_by(class_name=class_name, obj_id=obj_id, user_id=g.user.get_id())
.all()
)
if action == "select":
if not favs:
session.add(
FavStar(
class_name=class_name,
obj_id=obj_id,
user_id=g.user.get_id(),
dttm=datetime.now(),
)
)
count = 1
elif action == "unselect":
for fav in favs:
session.delete(fav)
else:
count = len(favs)
session.commit()
return json_success(json.dumps({"count": count}))
@api
@has_access_api
@event_logger.log_this
@expose("/dashboard/<int:dashboard_id>/published/", methods=("GET", "POST"))
def publish( # pylint: disable=no-self-use
self, dashboard_id: int
) -> FlaskResponse:
"""Gets and toggles published status on dashboards"""
logger.warning(
"This API endpoint is deprecated and will be removed in version 2.0.0"
)
session = db.session()
Role = ab_models.Role
dash = (
session.query(Dashboard).filter(Dashboard.id == dashboard_id).one_or_none()
)
admin_role = session.query(Role).filter(Role.name == "Admin").one_or_none()
if request.method == "GET":
if dash:
return json_success(json.dumps({"published": dash.published}))
return json_error_response(
f"ERROR: cannot find dashboard {dashboard_id}", status=404
)
edit_perm = is_owner(dash, g.user) or admin_role in get_user_roles()
if not edit_perm:
username = g.user.username if hasattr(g.user, "username") else "user"
return json_error_response(
f'ERROR: "{username}" cannot alter '
f'dashboard "{dash.dashboard_title}"',
status=403,
)
dash.published = str(request.form["published"]).lower() == "true"
session.commit()
return json_success(json.dumps({"published": dash.published}))
@has_access
@expose("/dashboard/<dashboard_id_or_slug>/")
@event_logger.log_this_with_extra_payload
@check_dashboard_access(
on_error=lambda self, ex: Response(
utils.error_msg_from_exception(ex), status=403
)
)
def dashboard( # pylint: disable=too-many-locals
self, # pylint: disable=no-self-use
dashboard_id_or_slug: str, # pylint: disable=unused-argument
add_extra_log_payload: Callable[..., None] = lambda **kwargs: None,
dashboard: Optional[Dashboard] = None,
) -> FlaskResponse:
"""
Server side rendering for a dashboard
:param dashboard_id_or_slug: identifier for dashboard. used in the decorators
:param add_extra_log_payload: added by `log_this_with_manual_updates`, set a default value to appease pylint
:param dashboard: added by `check_dashboard_access`
"""
if not dashboard:
abort(404)
if config["ENABLE_ACCESS_REQUEST"]:
for datasource in dashboard.datasources:
datasource = ConnectorRegistry.get_datasource(
datasource_type=datasource.type,
datasource_id=datasource.id,
session=db.session(),
)
if datasource and not security_manager.can_access_datasource(
datasource=datasource
):
flash(
__(
security_manager.get_datasource_access_error_msg(datasource)
),
"danger",
)
return redirect(
f"/superset/request_access/?dashboard_id={dashboard.id}"
)
dash_edit_perm = check_ownership(
dashboard, raise_if_false=False
) and security_manager.can_access("can_save_dash", "Superset")
charts_only_mode = request.args.get("chartsOnly") == "true"
standalone_mode = ReservedUrlParameters.is_standalone_mode() or charts_only_mode
edit_mode = (
request.args.get(utils.ReservedUrlParameters.EDIT_MODE.value) == "true"
)
add_extra_log_payload(
dashboard_id=dashboard.id,
dashboard_version="v2",
dash_edit_perm=dash_edit_perm,
edit_mode=edit_mode,
)
bootstrap_data = {
"user": bootstrap_user_data(g.user, include_perms=True),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/dashboard.html",
entry="dashboard",
standalone_mode=standalone_mode,
title=dashboard.dashboard_title,
custom_css=dashboard.css,
bootstrap_data=json.dumps(
bootstrap_data, default=utils.pessimistic_json_iso_dttm_ser
),
)
@api
@has_access
@event_logger.log_this
@expose("/log/", methods=["POST"])
def log(self) -> FlaskResponse: # pylint: disable=no-self-use
return Response(status=200)
@has_access
@expose("/sync_druid/", methods=["POST"])
@event_logger.log_this
def sync_druid_source(self) -> FlaskResponse: # pylint: disable=no-self-use
"""Syncs the druid datasource in main db with the provided config.
The endpoint takes 3 arguments:
user - user name to perform the operation as
cluster - name of the druid cluster
config - configuration stored in json that contains:
name: druid datasource name
dimensions: list of the dimensions, they become druid columns
with the type STRING
metrics_spec: list of metrics (dictionary). Metric consists of
2 attributes: type and name. Type can be count,
etc. `count` type is stored internally as longSum
other fields will be ignored.
Example: {
'name': 'test_click',
'metrics_spec': [{'type': 'count', 'name': 'count'}],
'dimensions': ['affiliate_id', 'campaign', 'first_seen']
}
"""
payload = request.get_json(force=True)
druid_config = payload["config"]
user_name = payload["user"]
cluster_name = payload["cluster"]
user = security_manager.find_user(username=user_name)
DruidDatasource = ConnectorRegistry.sources[ # pylint: disable=invalid-name
"druid"
]
DruidCluster = DruidDatasource.cluster_class # pylint: disable=invalid-name
if not user:
err_msg = __(
"Can't find User '%(name)s', please ask your admin " "to create one.",
name=user_name,
)
logger.error(err_msg)
return json_error_response(err_msg)
cluster = (
db.session.query(DruidCluster)
.filter_by(cluster_name=cluster_name)
.one_or_none()
)
if not cluster:
err_msg = __(
"Can't find DruidCluster with cluster_name = " "'%(name)s'",
name=cluster_name,
)
logger.error(err_msg)
return json_error_response(err_msg)
try:
DruidDatasource.sync_to_db_from_config(druid_config, user, cluster)
except Exception as ex: # pylint: disable=broad-except
err_msg = utils.error_msg_from_exception(ex)
logger.exception(err_msg)
return json_error_response(err_msg)
return Response(status=201)
@has_access
@expose("/get_or_create_table/", methods=["POST"])
@event_logger.log_this
def sqllab_table_viz(self) -> FlaskResponse: # pylint: disable=no-self-use
"""Gets or creates a table object with attributes passed to the API.
It expects the json with params:
* datasourceName - e.g. table name, required
* dbId - database id, required
* schema - table schema, optional
* templateParams - params for the Jinja templating syntax, optional
:return: Response
"""
data = json.loads(request.form["data"])
table_name = data["datasourceName"]
database_id = data["dbId"]
table = (
db.session.query(SqlaTable)
.filter_by(database_id=database_id, table_name=table_name)
.one_or_none()
)
if not table:
# Create table if doesn't exist.
with db.session.no_autoflush:
table = SqlaTable(table_name=table_name, owners=[g.user])
table.database_id = database_id
table.database = (
db.session.query(Database).filter_by(id=database_id).one()
)
table.schema = data.get("schema")
table.template_params = data.get("templateParams")
# needed for the table validation.
validate_sqlatable(table)
db.session.add(table)
table.fetch_metadata()
create_table_permissions(table)
db.session.commit()
return json_success(json.dumps({"table_id": table.id}))
@expose("/version/", methods=["GET"])
def version(self) -> FlaskResponse:
return json_success(json.dumps({"version": config.get("SUPERSET_VERSION")}))
@has_superset_api_access
@expose("/sqllab_viz/", methods=["POST"])
@event_logger.log_this
def sqllab_viz(self) -> FlaskResponse: # pylint: disable=no-self-use
data = json.loads(request.form["data"])
try:
table_name = data["datasourceName"]
database_id = data["dbId"]
except KeyError:
return json_error_response("Missing required fields", status=400)
database = db.session.query(Database).get(database_id)
if not database:
return json_error_response("Database not found", status=400)
table = (
db.session.query(SqlaTable)
.filter_by(database_id=database_id, table_name=table_name)
.one_or_none()
)
if not table:
table = SqlaTable(table_name=table_name, owners=[g.user])
table.database = database
table.schema = data.get("schema")
table.template_params = data.get("templateParams")
table.is_sqllab_view = True
table.sql = ParsedQuery(data.get("sql")).stripped()
db.session.add(table)
cols = []
for config_ in data.get("columns"):
column_name = config_.get("name")
col = TableColumn(
column_name=column_name,
filterable=True,
groupby=True,
is_dttm=config_.get("is_date", False),
type=config_.get("type", False),
)
cols.append(col)
table.columns = cols
table.metrics = [SqlMetric(metric_name="count", expression="count(*)")]
db.session.commit()
return json_success(json.dumps({"table_id": table.id}))
@has_access
@expose("/extra_table_metadata/<int:database_id>/<table_name>/<schema>/")
@event_logger.log_this
def extra_table_metadata( # pylint: disable=no-self-use
self, database_id: int, table_name: str, schema: str
) -> FlaskResponse:
parsed_schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)
table_name = utils.parse_js_uri_path_item(table_name) # type: ignore
mydb = db.session.query(Database).filter_by(id=database_id).one()
payload = mydb.db_engine_spec.extra_table_metadata(
mydb, table_name, parsed_schema
)
return json_success(json.dumps(payload))
@has_access
@expose("/select_star/<int:database_id>/<table_name>")
@expose("/select_star/<int:database_id>/<table_name>/<schema>")
@event_logger.log_this
def select_star(
self, database_id: int, table_name: str, schema: Optional[str] = None
) -> FlaskResponse:
logging.warning(
"%s.select_star "
"This API endpoint is deprecated and will be removed in version 2.0.0",
self.__class__.__name__,
)
stats_logger.incr(f"{self.__class__.__name__}.select_star.init")
database = db.session.query(Database).get(database_id)
if not database:
stats_logger.incr(
f"deprecated.{self.__class__.__name__}.select_star.database_not_found"
)
return json_error_response("Not found", 404)
schema = utils.parse_js_uri_path_item(schema, eval_undefined=True)
table_name = utils.parse_js_uri_path_item(table_name) # type: ignore
if not self.appbuilder.sm.can_access_table(database, Table(table_name, schema)):
stats_logger.incr(
f"deprecated.{self.__class__.__name__}.select_star.permission_denied"
)
logging.warning(
"Permission denied for user %s on table: %s schema: %s",
str(g.user),
table_name,
schema,
)
return json_error_response("Not found", 404)
stats_logger.incr(f"deprecated.{self.__class__.__name__}.select_star.success")
return json_success(
database.select_star(
table_name, schema, latest_partition=True, show_cols=True
)
)
@has_access_api
@expose("/estimate_query_cost/<int:database_id>/", methods=["POST"])
@expose("/estimate_query_cost/<int:database_id>/<schema>/", methods=["POST"])
@event_logger.log_this
def estimate_query_cost( # pylint: disable=no-self-use
self, database_id: int, schema: Optional[str] = None
) -> FlaskResponse:
mydb = db.session.query(Database).get(database_id)
sql = json.loads(request.form.get("sql", '""'))
template_params = json.loads(request.form.get("templateParams") or "{}")
if template_params:
template_processor = get_template_processor(mydb)
sql = template_processor.process_template(sql, **template_params)
timeout = SQLLAB_QUERY_COST_ESTIMATE_TIMEOUT
timeout_msg = f"The estimation exceeded the {timeout} seconds timeout."
try:
with utils.timeout(seconds=timeout, error_message=timeout_msg):
cost = mydb.db_engine_spec.estimate_query_cost(
mydb, schema, sql, utils.QuerySource.SQL_LAB
)
except SupersetTimeoutException as ex:
logger.exception(ex)
return json_errors_response([ex.error])
except Exception as ex: # pylint: disable=broad-except
return json_error_response(utils.error_msg_from_exception(ex))
spec = mydb.db_engine_spec
query_cost_formatters: Dict[str, Any] = get_feature_flags().get(
"QUERY_COST_FORMATTERS_BY_ENGINE", {}
)
query_cost_formatter = query_cost_formatters.get(
spec.engine, spec.query_cost_formatter
)
cost = query_cost_formatter(cost)
return json_success(json.dumps(cost))
@expose("/theme/")
def theme(self) -> FlaskResponse:
return self.render_template("superset/theme.html")
@has_access_api
@expose("/results/<key>/")
@event_logger.log_this
def results(self, key: str) -> FlaskResponse:
return self.results_exec(key)
@staticmethod
def results_exec( # pylint: disable=too-many-return-statements
key: str,
) -> FlaskResponse:
"""Serves a key off of the results backend
It is possible to pass the `rows` query argument to limit the number
of rows returned.
"""
if not results_backend:
return json_error_response("Results backend isn't configured")
read_from_results_backend_start = now_as_float()
blob = results_backend.get(key)
stats_logger.timing(
"sqllab.query.results_backend_read",
now_as_float() - read_from_results_backend_start,
)
if not blob:
return json_error_response(
"Data could not be retrieved. " "You may want to re-run the query.",
status=410,
)
query = db.session.query(Query).filter_by(results_key=key).one_or_none()
if query is None:
return json_error_response(
"Data could not be retrieved. You may want to re-run the query.",
status=404,
)
try:
query.raise_for_access()
except SupersetSecurityException as ex:
return json_errors_response([ex.error], status=403)
payload = utils.zlib_decompress(blob, decode=not results_backend_use_msgpack)
try:
obj = _deserialize_results_payload(
payload, query, cast(bool, results_backend_use_msgpack)
)
except SerializationError:
return json_error_response(
__("Data could not be deserialized. You may want to re-run the query."),
status=404,
)
if "rows" in request.args:
try:
rows = int(request.args["rows"])
except ValueError:
return json_error_response("Invalid `rows` argument", status=400)
obj = apply_display_max_row_limit(obj, rows)
# Trigger sql editor api if api got result for sql explorer query
return json_success(
json.dumps(
obj, default=utils.json_iso_dttm_ser, ignore_nan=True, encoding=None
)
)
@has_access_api
@expose("/stop_query/", methods=["POST"])
@event_logger.log_this
@backoff.on_exception(
backoff.constant,
Exception,
interval=1,
on_backoff=lambda details: db.session.rollback(),
on_giveup=lambda details: db.session.rollback(),
max_tries=5,
)
def stop_query(self) -> FlaskResponse:
client_id = request.form.get("client_id")
query = db.session.query(Query).filter_by(client_id=client_id).one()
if query.status in [
QueryStatus.FAILED,
QueryStatus.SUCCESS,
QueryStatus.TIMED_OUT,
]:
logger.warning(
"Query with client_id %s could not be stopped: "
"query already complete",
str(client_id),
)
return self.json_response("OK")
query.status = QueryStatus.STOPPED
db.session.commit()
return self.json_response("OK")
@has_access_api
@event_logger.log_this
@expose("/validate_sql_json/", methods=["POST", "GET"])
def validate_sql_json(
# pylint: disable=too-many-locals,too-many-return-statements,no-self-use
self,
) -> FlaskResponse:
"""Validates that arbitrary sql is acceptable for the given database.
Returns a list of error/warning annotations as json.
"""
sql = request.form["sql"]
database_id = request.form["database_id"]
schema = request.form.get("schema") or None
template_params = json.loads(request.form.get("templateParams") or "{}")
if len(template_params) > 0:
# TODO: factor the Database object out of template rendering
# or provide it as mydb so we can render template params
# without having to also persist a Query ORM object.
return json_error_response(
"SQL validation does not support template parameters", status=400
)
session = db.session()
mydb = session.query(Database).filter_by(id=database_id).one_or_none()
if not mydb:
return json_error_response(
"Database with id {} is missing.".format(database_id), status=400
)
spec = mydb.db_engine_spec
validators_by_engine = get_feature_flags().get("SQL_VALIDATORS_BY_ENGINE")
if not validators_by_engine or spec.engine not in validators_by_engine:
return json_error_response(
"no SQL validator is configured for {}".format(spec.engine), status=400
)
validator_name = validators_by_engine[spec.engine]
validator = get_validator_by_name(validator_name)
if not validator:
return json_error_response(
"No validator named {} found (configured for the {} engine)".format(
validator_name, spec.engine
)
)
try:
timeout = config["SQLLAB_VALIDATION_TIMEOUT"]
timeout_msg = f"The query exceeded the {timeout} seconds timeout."
with utils.timeout(seconds=timeout, error_message=timeout_msg):
errors = validator.validate(sql, schema, mydb)
payload = json.dumps(
[err.to_dict() for err in errors],
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
return json_success(payload)
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
msg = _(
"%(validator)s was unable to check your query.\n"
"Please recheck your query.\n"
"Exception: %(ex)s",
validator=validator.name,
ex=ex,
)
# Return as a 400 if the database error message says we got a 4xx error
if re.search(r"([\W]|^)4\d{2}([\W]|$)", str(ex)):
return json_error_response(f"{msg}", status=400)
return json_error_response(f"{msg}")
@staticmethod
def _sql_json_async( # pylint: disable=too-many-arguments
session: Session,
rendered_query: str,
query: Query,
expand_data: bool,
log_params: Optional[Dict[str, Any]] = None,
) -> FlaskResponse:
"""
Send SQL JSON query to celery workers.
:param session: SQLAlchemy session object
:param rendered_query: the rendered query to perform by workers
:param query: The query (SQLAlchemy) object
:return: A Flask Response
"""
logger.info("Query %i: Running query on a Celery worker", query.id)
# Ignore the celery future object and the request may time out.
query_id = query.id
try:
task = sql_lab.get_sql_results.delay(
query.id,
rendered_query,
return_results=False,
store_results=not query.select_as_cta,
user_name=g.user.username
if g.user and hasattr(g.user, "username")
else None,
start_time=now_as_float(),
expand_data=expand_data,
log_params=log_params,
)
# Explicitly forget the task to ensure the task metadata is removed from the
# Celery results backend in a timely manner.
try:
task.forget()
except NotImplementedError:
logger.warning(
"Unable to forget Celery task as backend"
"does not support this operation"
)
except Exception as ex: # pylint: disable=broad-except
logger.exception("Query %i: %s", query.id, str(ex))
msg = _(
"Failed to start remote query on a worker. "
"Tell your administrator to verify the availability of "
"the message queue."
)
query.status = QueryStatus.FAILED
query.error_message = msg
session.commit()
return json_error_response("{}".format(msg))
# Update saved query with execution info from the query execution
QueryDAO.update_saved_query_exec_info(query_id)
resp = json_success(
json.dumps(
{"query": query.to_dict()},
default=utils.json_int_dttm_ser,
ignore_nan=True,
),
status=202,
)
session.commit()
return resp
@staticmethod
def _sql_json_sync(
_session: Session,
rendered_query: str,
query: Query,
expand_data: bool,
log_params: Optional[Dict[str, Any]] = None,
) -> FlaskResponse:
"""
Execute SQL query (sql json).
:param rendered_query: The rendered query (included templates)
:param query: The query SQL (SQLAlchemy) object
:return: A Flask Response
:raises: SupersetTimeoutException
"""
try:
timeout = config["SQLLAB_TIMEOUT"]
timeout_msg = f"The query exceeded the {timeout} seconds timeout."
store_results = (
is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE")
and not query.select_as_cta
)
query_id = query.id
with utils.timeout(seconds=timeout, error_message=timeout_msg):
# pylint: disable=no-value-for-parameter
data = sql_lab.get_sql_results(
query.id,
rendered_query,
return_results=True,
store_results=store_results,
user_name=g.user.username
if g.user and hasattr(g.user, "username")
else None,
expand_data=expand_data,
log_params=log_params,
)
# Update saved query if needed
QueryDAO.update_saved_query_exec_info(query_id)
# TODO: set LimitingFactor to display?
payload = json.dumps(
apply_display_max_row_limit(data),
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
except SupersetTimeoutException as ex:
# re-raise exception for api exception handler
raise ex
except Exception as ex: # pylint: disable=broad-except
logger.exception("Query %i failed unexpectedly", query.id)
raise SupersetGenericDBErrorException(utils.error_msg_from_exception(ex))
if data.get("status") == QueryStatus.FAILED:
raise SupersetGenericDBErrorException(data["error"])
return json_success(payload)
@has_access_api
@handle_api_exception
@event_logger.log_this
@expose("/sql_json/", methods=["POST"])
def sql_json(self) -> FlaskResponse:
log_params = {
"user_agent": cast(Optional[str], request.headers.get("USER_AGENT"))
}
return self.sql_json_exec(request.json, log_params)
def sql_json_exec( # pylint: disable=too-many-statements,too-many-locals
self, query_params: Dict[str, Any], log_params: Optional[Dict[str, Any]] = None
) -> FlaskResponse:
"""Runs arbitrary sql and returns data as json"""
# Collect Values
database_id: int = cast(int, query_params.get("database_id"))
schema: str = cast(str, query_params.get("schema"))
sql: str = cast(str, query_params.get("sql"))
try:
template_params = json.loads(query_params.get("templateParams") or "{}")
except json.JSONDecodeError:
logger.warning(
"Invalid template parameter %s" " specified. Defaulting to empty dict",
str(query_params.get("templateParams")),
)
template_params = {}
limit: int = query_params.get("queryLimit") or app.config["SQL_MAX_ROW"]
async_flag: bool = cast(bool, query_params.get("runAsync"))
if limit < 0:
logger.warning(
"Invalid limit of %i specified. Defaulting to max limit.", limit
)
limit = 0
select_as_cta: bool = cast(bool, query_params.get("select_as_cta"))
ctas_method: CtasMethod = cast(
CtasMethod, query_params.get("ctas_method", CtasMethod.TABLE)
)
tmp_table_name: str = cast(str, query_params.get("tmp_table_name"))
client_id: str = cast(
str, query_params.get("client_id") or utils.shortid()[:10]
)
sql_editor_id: str = cast(str, query_params.get("sql_editor_id"))
tab_name: str = cast(str, query_params.get("tab"))
status: str = QueryStatus.PENDING if async_flag else QueryStatus.RUNNING
session = db.session()
mydb = session.query(Database).get(database_id)
if not mydb:
return json_error_response("Database with id %i is missing.", database_id)
# Set tmp_schema_name for CTA
# TODO(bkyryliuk): consider parsing, splitting tmp_schema_name from
# tmp_table_name if user enters
# <schema_name>.<table_name>
tmp_schema_name: Optional[str] = schema
if select_as_cta and mydb.force_ctas_schema:
tmp_schema_name = mydb.force_ctas_schema
elif select_as_cta:
tmp_schema_name = get_cta_schema_name(mydb, g.user, schema, sql)
# Save current query
query = Query(
database_id=database_id,
sql=sql,
schema=schema,
select_as_cta=select_as_cta,
ctas_method=ctas_method,
start_time=now_as_float(),
tab_name=tab_name,
status=status,
sql_editor_id=sql_editor_id,
tmp_table_name=tmp_table_name,
tmp_schema_name=tmp_schema_name,
user_id=g.user.get_id() if g.user else None,
client_id=client_id,
)
try:
session.add(query)
session.flush()
query_id = query.id
session.commit() # shouldn't be necessary
except SQLAlchemyError as ex:
logger.error("Errors saving query details %s", str(ex))
session.rollback()
raise Exception(_("Query record was not created as expected."))
if not query_id:
raise Exception(_("Query record was not created as expected."))
logger.info("Triggering query_id: %i", query_id)
try:
query.raise_for_access()
except SupersetSecurityException as ex:
query.status = QueryStatus.FAILED
session.commit()
return json_errors_response([ex.error], status=403)
try:
template_processor = get_template_processor(
database=query.database, query=query
)
rendered_query = template_processor.process_template(
query.sql, **template_params
)
except TemplateError as ex:
query.status = QueryStatus.FAILED
session.commit()
raise SupersetTemplateParamsErrorException(
utils.error_msg_from_exception(ex)
)
if is_feature_enabled("ENABLE_TEMPLATE_PROCESSING"):
# pylint: disable=protected-access
ast = template_processor._env.parse(rendered_query)
undefined_parameters = find_undeclared_variables(ast) # type: ignore
if undefined_parameters:
query.status = QueryStatus.FAILED
session.commit()
raise SupersetTemplateParamsErrorException(
message=ngettext(
"The parameter %(parameters)s in your query is undefined.",
"The following parameters in your query are undefined: %(parameters)s.",
len(undefined_parameters),
parameters=utils.format_list(undefined_parameters),
)
+ " "
+ PARAMETER_MISSING_ERR,
extra={
"undefined_parameters": list(undefined_parameters),
"template_parameters": template_params,
},
)
# Limit is not applied to the CTA queries if SQLLAB_CTAS_NO_LIMIT flag is set
# to True.
if not (config.get("SQLLAB_CTAS_NO_LIMIT") and select_as_cta):
# set LIMIT after template processing
limits = [mydb.db_engine_spec.get_limit_from_sql(rendered_query), limit]
if limits[0] is None or limits[0] > limits[1]:
query.limiting_factor = LimitingFactor.DROPDOWN
elif limits[1] > limits[0]:
query.limiting_factor = LimitingFactor.QUERY
else: # limits[0] == limits[1]
query.limiting_factor = LimitingFactor.QUERY_AND_DROPDOWN
query.limit = min(lim for lim in limits if lim is not None)
# Flag for whether or not to expand data
# (feature that will expand Presto row objects and arrays)
expand_data: bool = cast(
bool,
is_feature_enabled("PRESTO_EXPAND_DATA")
and query_params.get("expand_data"),
)
# Async request.
if async_flag:
return self._sql_json_async(
session, rendered_query, query, expand_data, log_params
)
# Sync request.
return self._sql_json_sync(
session, rendered_query, query, expand_data, log_params
)
@has_access
@event_logger.log_this
@expose("/csv/<client_id>")
def csv( # pylint: disable=no-self-use,too-many-locals
self, client_id: str
) -> FlaskResponse:
"""Download the query results as csv."""
logger.info("Exporting CSV file [%s]", client_id)
query = db.session.query(Query).filter_by(client_id=client_id).one()
try:
query.raise_for_access()
except SupersetSecurityException as ex:
flash(ex.error.message)
return redirect("/")
blob = None
if results_backend and query.results_key:
logger.info("Fetching CSV from results backend [%s]", query.results_key)
blob = results_backend.get(query.results_key)
if blob:
logger.info("Decompressing")
payload = utils.zlib_decompress(
blob, decode=not results_backend_use_msgpack
)
obj = _deserialize_results_payload(
payload, query, cast(bool, results_backend_use_msgpack)
)
columns = [c["name"] for c in obj["columns"]]
df = pd.DataFrame.from_records(obj["data"], columns=columns)
logger.info("Using pandas to convert to CSV")
else:
logger.info("Running a query to turn into CSV")
if query.select_sql:
sql = query.select_sql
limit = None
else:
sql = query.executed_sql
limit = ParsedQuery(sql).limit
if limit is not None and query.limiting_factor in {
LimitingFactor.QUERY,
LimitingFactor.DROPDOWN,
LimitingFactor.QUERY_AND_DROPDOWN,
}:
# remove extra row from `increased_limit`
limit -= 1
df = query.database.get_df(sql, query.schema)[:limit]
csv_data = csv.df_to_escaped_csv(df, index=False, **config["CSV_EXPORT"])
quoted_csv_name = parse.quote(query.name)
response = CsvResponse(
csv_data, headers=generate_download_headers("csv", quoted_csv_name)
)
event_info = {
"event_type": "data_export",
"client_id": client_id,
"row_count": len(df.index),
"database": query.database.name,
"schema": query.schema,
"sql": query.sql,
"exported_format": "csv",
}
event_rep = repr(event_info)
logger.info("CSV exported: %s", event_rep, extra={"superset_event": event_info})
return response
# -------------------------------------------------------------------------------------------
# SQL Explorer API
@use_ip_auth
@api
@handle_api_exception
@event_logger.log_this
@expose("/run_query/", methods=["POST", "GET"])
def run_query(self) -> FlaskResponse:
log_params = {
"user_agent": cast(
Optional[str], request.headers.get("USER_AGENT")
)
}
return self.run_query_exec(request.form, log_params)
def run_query_exec( # pylint: disable=too-many-statements,too-many-locals
self, query_params, log_params: Optional[Dict[str, Any]] = None
) -> FlaskResponse:
"""Runs arbitrary sql and returns and json"""
logger.info("Request Received for query execution", query_params)
logger.info("runAsync value: {}".format(query_params.get("runAsync")))
# Collect Values
# Haven't added type in here
g.user = security_manager.find_user(username="admin")
database_id: int = cast(int, int(query_params.get("database_id")))
schema: str = cast(str, (query_params.get("schema") or None))
sql: str = cast(str, query_params.get("sql"))
try:
template_params = json.loads(
query_params.get("templateParams") or "{}"
)
except json.JSONDecodeError:
logger.warning(
"Invalid template parameter %s" " specified. Defaulting to empty dict",
str(query_params.get("templateParams")),
)
template_params = {}
limit: int = int(query_params.get("queryLimit", 0))
async_flag: bool = cast(
bool, bool(query_params.get("runAsync") == "true")
)
if limit < 0:
logger.warning(
"Invalid limit of %i specified. Defaulting to max limit.", limit
)
limit = 0
select_as_cta: bool = cast(
bool, bool(query_params.get("select_as_cta") == "true")
)
ctas_method: CtasMethod = cast(
CtasMethod, query_params.get("ctas_method", CtasMethod.TABLE)
)
tmp_table_name: str = cast(str, query_params.get("tmp_table_name"))
client_id: str = cast(
str, query_params.get("client_id") or utils.shortid()[:10]
)
sql_editor_id: str = cast(str, query_params.get("sql_editor_id"))
tab_name: str = cast(str, query_params.get("tab"))
status: str = QueryStatus.PENDING if async_flag else QueryStatus.RUNNING
session = db.session()
mydb = session.query(Database).get(database_id)
if not mydb:
return json_error_response(
"Database with id %i is missing.", database_id
)
# Set tmp_schema_name for CTA
# TODO(bkyryliuk): consider parsing, splitting tmp_schema_name from
# tmp_table_name if user enters
# <schema_name>.<table_name>
tmp_schema_name: Optional[str] = schema
if select_as_cta and mydb.force_ctas_schema:
tmp_schema_name = mydb.force_ctas_schema
elif select_as_cta:
tmp_schema_name = get_cta_schema_name(mydb, g.user, schema, sql)
if select_as_cta and mydb.force_ctas_schema:
tmp_table_name = "{}.{}".format(mydb.force_ctas_schema, tmp_table_name)
# Save current query
query = Query(
database_id=database_id,
sql=sql,
schema=schema,
select_as_cta=select_as_cta,
ctas_method=ctas_method,
start_time=now_as_float(),
tab_name=tab_name,
status=status,
sql_editor_id=sql_editor_id,
tmp_table_name=tmp_table_name,
tmp_schema_name=tmp_schema_name,
user_id=g.user.get_id() if g.user else None,
client_id=client_id,
)
try:
session.add(query)
session.flush()
query_id = query.id
session.commit() # shouldn't be necessary
except SQLAlchemyError as ex:
logger.error("Errors saving query details %s", str(ex))
session.rollback()
raise Exception(_("Query record was not created as expected."))
if not query_id:
raise Exception(_("Query record was not created as expected."))
logger.info("Triggering query_id: %i", query_id)
# CHECK - Is it breaking change for us ??
try:
query.raise_for_access()
except SupersetSecurityException as ex:
query.status = QueryStatus.FAILED
session.commit()
return json_errors_response([ex.error], status=403)
try:
template_processor = get_template_processor(
database=query.database, query=query
)
rendered_query = template_processor.process_template(
query.sql, **template_params
)
except TemplateError as ex:
query.status = QueryStatus.FAILED
session.commit()
raise SupersetTemplateParamsErrorException(
utils.error_msg_from_exception(ex)
)
if is_feature_enabled("ENABLE_TEMPLATE_PROCESSING"):
# pylint: disable=protected-access
ast = template_processor._env.parse(rendered_query)
undefined_parameters = find_undeclared_variables(ast) # type: ignore
if undefined_parameters:
query.status = QueryStatus.FAILED
session.commit()
raise SupersetTemplateParamsErrorException(
message=ngettext(
"The parameter %(parameters)s in your query is undefined.",
"The following parameters in your query are undefined: %(parameters)s.",
len(undefined_parameters),
parameters=utils.format_list(undefined_parameters),
)
+ " "
+ PARAMETER_MISSING_ERR,
extra={
"undefined_parameters": list(undefined_parameters),
"template_parameters": template_params,
},
)
# Limit is not applied to the CTA queries
# if SQLLAB_CTAS_NO_LIMIT flag is set to True
if not (config.get("SQLLAB_CTAS_NO_LIMIT") and select_as_cta):
# set LIMIT after template processing
limits = [mydb.db_engine_spec.get_limit_from_sql(rendered_query), limit]
if limits[0] is None or limits[0] > limits[1]:
query.limiting_factor = LimitingFactor.DROPDOWN
elif limits[1] > limits[0]:
query.limiting_factor = LimitingFactor.QUERY
else: # limits[0] == limits[1]
query.limiting_factor = LimitingFactor.QUERY_AND_DROPDOWN
query.limit = min(lim for lim in limits if lim is not None)
# Flag for whether or not to expand data
# (feature that will expand Presto row objects and arrays)
expand_data: bool = cast(
bool,
is_feature_enabled("PRESTO_EXPAND_DATA")
and query_params.get("expand_data"),
)
logger.info("Query Limit to run async query: {}".format(query.limit))
logger.info("Triggering async: {}".format(async_flag))
# Async request.
if async_flag:
# Send SQL JSON query to celery workers.
logger.info("Query %i: Running query on a Celery worker", query.id)
# Ignore the celery future object and the request may time out.
query_id = query.id
try:
task = sql_lab.get_sql_results.delay(
query_id,
rendered_query,
return_results=False,
store_results=not query.select_as_cta,
# CHECK - AIS is not used anymore
# user_name=g.user.username
user_name='AIS',
start_time=now_as_float(),
expand_data=expand_data,
log_params=log_params,
)
# Explicitly forget the task to ensure the task metadata
# is removed from the
# Celery results backend in a timely manner.
try:
task.forget()
except NotImplementedError:
logger.warning(
"Unable to forget Celery task as backend"
"does not support this operation"
)
except Exception as ex: # pylint: disable=broad-except
logger.exception("Query %i: %s", query.id, str(ex))
msg = _(
"Failed to start remote query on a worker. "
"Tell your administrator to verify the availability of "
"the message queue."
)
query.status = QueryStatus.FAILED
query.error_message = msg
session.commit()
logger.info(
"calling sql editor service lambda function for query_id:",
query_id)
lambda_client.invoke(
FunctionName='ais-service-sql-editor-{}-getSupersetResponse'.format(os.environ['STAGE']),
InvocationType='Event',
Payload=json.dumps({
'error': msg,
'status': 'failed',
'queryId': query_id,
'tenant': TENANT,
}))
logger.info(
"sql editor function called successfully for query_id:",
query_id)
return json_error_response("{}".format(msg))
# Update saved query with execution info from the query execution
QueryDAO.update_saved_query_exec_info(query_id)
resp = json_success(
json.dumps(
{"query": query.to_dict()},
default=utils.json_int_dttm_ser,
ignore_nan=True,
),
status=202,
)
session.commit()
return resp
# Sync request.
try:
logger.info("Running query without sync response")
timeout = config["SQLLAB_TIMEOUT"]
timeout_msg = f"The query exceeded the {timeout} seconds timeout."
store_results = (
is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE")
and not query.select_as_cta
)
query_id = query.id
with utils.timeout(seconds=timeout, error_message=timeout_msg):
# pylint: disable=no-value-for-parameter
data = sql_lab.get_sql_results(
query.id,
rendered_query,
return_results=True,
store_results=store_results,
user_name=g.user.username
if g.user and hasattr(g.user, "username")
else None,
expand_data=expand_data,
log_params=log_params,
)
# Update saved query if needed
QueryDAO.update_saved_query_exec_info(query_id)
# TODO: set LimitingFactor to display?
payload = json.dumps(
apply_display_max_row_limit(data),
default=utils.pessimistic_json_iso_dttm_ser,
ignore_nan=True,
encoding=None,
)
except SupersetTimeoutException as ex:
# re-raise exception for api exception handler
raise ex
except Exception as ex: # pylint: disable=broad-except
logger.exception("Query %i failed unexpectedly", query.id)
raise SupersetGenericDBErrorException(utils.error_msg_from_exception(ex))
if data.get("status") == QueryStatus.FAILED:
raise SupersetGenericDBErrorException(data["error"])
return json_success(payload)
@use_ip_auth
@api
@handle_api_exception
@expose("/check_cache_key/<key>/")
@event_logger.log_this
def check_cache_key(self, key):
"""Returns if a key from cache exist"""
logger.info("Request Received to check cache key")
g.user = security_manager.find_user(username="admin")
key_exist = True if cache_manager.cache.get(key) else False
status = 200 if key_exist else 404
return json_success(json.dumps({"key_exist": key_exist}), status=status)
@use_ip_auth
@api
@handle_api_exception
@expose("/fetch_data/<key>/")
@event_logger.log_this
def fetch_data(self, key: str) -> FlaskResponse:
return self.fetch_data_exec(key)
@staticmethod
def fetch_data_exec( # pylint: disable=too-many-return-statements
key: str,
) -> FlaskResponse:
"""Serves a key off of the results backend
It is possible to pass the `rows` query argument to limit the number
of rows returned.
"""
logger.info("Request Received to fetch data")
g.user = security_manager.find_user(username="admin")
if not results_backend:
return json_error_response("Results backend isn't configured")
read_from_results_backend_start = now_as_float()
blob = results_backend.get(key)
stats_logger.timing(
"sqllab.query.results_backend_read",
now_as_float() - read_from_results_backend_start,
)
if not blob:
return json_error_response(
"Data could not be retrieved. " "You may want to re-run the query.",
status=410,
)
query = db.session.query(Query).filter_by(results_key=key).one_or_none()
if query is None:
return json_error_response(
"Data could not be retrieved. You may want to re-run the query.",
status=404,
)
try:
query.raise_for_access()
except SupersetSecurityException as ex:
return json_errors_response([ex.error], status=403)
payload = utils.zlib_decompress(blob, decode=not results_backend_use_msgpack)
try:
obj = _deserialize_results_payload(
payload, query, cast(bool, results_backend_use_msgpack)
)
except SerializationError:
return json_error_response(
__("Data could not be deserialized. You may want to re-run the query."),
status=404,
)
if "rows" in request.args:
try:
rows = int(request.args["rows"])
except ValueError:
return json_error_response("Invalid `rows` argument", status=400)
obj = apply_display_max_row_limit(obj, rows)
# Trigger sql editor api if api got result for sql explorer query
return json_success(
json.dumps(
obj, default=utils.json_iso_dttm_ser, ignore_nan=True, encoding=None
)
)
@use_ip_auth
@api
@handle_api_exception
@expose("/stop_sql_query/", methods=["POST"])
@event_logger.log_this
@backoff.on_exception(
backoff.constant,
Exception,
interval=1,
on_backoff=lambda details: db.session.rollback(),
on_giveup=lambda details: db.session.rollback(),
max_tries=5,
)
def stop_sql_query(self) -> FlaskResponse:
logger.info("Request Received to stop query")
g.user = security_manager.find_user(username="admin")
client_id = request.form.get("client_id")
query = db.session.query(Query).filter_by(client_id=client_id).one()
if query.status in [
QueryStatus.FAILED,
QueryStatus.SUCCESS,
QueryStatus.TIMED_OUT,
]:
logger.warning(
"Query with client_id %s could not be stopped: "
"query already complete",
str(client_id),
)
return self.json_response("OK")
query.status = QueryStatus.STOPPED
db.session.commit()
return self.json_response("OK")
# @use_ip_auth
# @api
# @handle_api_exception
# @expose("/validate_sql_query/", methods=["POST", "GET"])
# @event_logger.log_this
# def validate_sql_query(self):
# """Validates that arbitrary sql is acceptable for the given database.
# Returns a list of error/warning annotations as json.
# """
# logger.info("Request Received to validate query")
# g.user = security_manager.find_user(username="admin")
# sql = request.form.get("sql")
# database_id = request.form.get("database_id")
# schema = request.form.get("schema") or None
# template_params = json.loads(request.form.get("templateParams") or "{}")
#
# if len(template_params) > 0:
# # TODO: factor the Database object out of template rendering
# # or provide it as mydb so we can render template params
# # without having to also persist a Query ORM object.
# return json_error_response(
# "SQL validation does not support template parameters", status=400
# )
#
# session = db.session()
# mydb = session.query(models.Database).filter_by(id=database_id).first()
# if not mydb:
# json_error_response(
# "Database with id {} is missing.".format(database_id), status=400
# )
#
# spec = mydb.db_engine_spec
# validators_by_engine = get_feature_flags().get("SQL_VALIDATORS_BY_ENGINE")
# if not validators_by_engine or spec.engine not in validators_by_engine:
# return json_error_response(
# "no SQL validator is configured for {}".format(spec.engine), status=400
# )
# validator_name = validators_by_engine[spec.engine]
# validator = get_validator_by_name(validator_name)
# if not validator:
# return json_error_response(
# "No validator named {} found (configured for the {} engine)".format(
# validator_name, spec.engine
# )
# )
#
# try:
# timeout = config.get("SQLLAB_VALIDATION_TIMEOUT")
# timeout_msg = f"The query exceeded the {timeout} seconds timeout."
# with utils.timeout(seconds=timeout, error_message=timeout_msg):
# errors = validator.validate(sql, schema, mydb)
# payload = json.dumps(
# [err.to_dict() for err in errors],
# default=utils.pessimistic_json_iso_dttm_ser,
# ignore_nan=True,
# encoding=None,
# )
# return json_success(payload)
# except Exception as e:
# logger.exception(e)
# msg = _(
# f"{validator.name} was unable to check your query.\nPlease "
# "make sure that any services it depends on are available\n"
# f"Exception: {e}"
# )
# return json_error_response(f"{msg}")
# -----------------------------------------------------------------------------------------
@api
@handle_api_exception
@has_access
@event_logger.log_this
@expose("/fetch_datasource_metadata")
def fetch_datasource_metadata(self) -> FlaskResponse: # pylint: disable=no-self-use
"""
Fetch the datasource metadata.
:returns: The Flask response
:raises SupersetSecurityException: If the user cannot access the resource
"""
datasource_id, datasource_type = request.args["datasourceKey"].split("__")
datasource = ConnectorRegistry.get_datasource(
datasource_type, datasource_id, db.session,
)
# Check if datasource exists
if not datasource:
return json_error_response(DATASOURCE_MISSING_ERR)
datasource.raise_for_access()
return json_success(json.dumps(datasource.data))
@has_access_api
@event_logger.log_this
@expose("/queries/<float:last_updated_ms>")
@expose("/queries/<int:last_updated_ms>")
def queries(self, last_updated_ms: Union[float, int]) -> FlaskResponse:
"""
Get the updated queries.
:param last_updated_ms: Unix time (milliseconds)
"""
return self.queries_exec(last_updated_ms)
@staticmethod
def queries_exec(last_updated_ms: Union[float, int]) -> FlaskResponse:
stats_logger.incr("queries")
if not g.user.get_id():
return json_error_response(
"Please login to access the queries.", status=403
)
# UTC date time, same that is stored in the DB.
last_updated_dt = datetime.utcfromtimestamp(last_updated_ms / 1000)
sql_queries = (
db.session.query(Query)
.filter(
Query.user_id == g.user.get_id(), Query.changed_on >= last_updated_dt
)
.all()
)
dict_queries = {q.client_id: q.to_dict() for q in sql_queries}
return json_success(json.dumps(dict_queries, default=utils.json_int_dttm_ser))
@has_access
@event_logger.log_this
@expose("/search_queries")
def search_queries(self) -> FlaskResponse: # pylint: disable=no-self-use
"""
Search for previously run sqllab queries. Used for Sqllab Query Search
page /superset/sqllab#search.
Custom permission can_only_search_queries_owned restricts queries
to only queries run by current user.
:returns: Response with list of sql query dicts
"""
if security_manager.can_access_all_queries():
search_user_id = request.args.get("user_id")
elif request.args.get("user_id") is not None:
try:
search_user_id = int(cast(int, request.args.get("user_id")))
except ValueError:
return Response(status=400, mimetype="application/json")
if search_user_id != g.user.get_user_id():
return Response(status=403, mimetype="application/json")
else:
search_user_id = g.user.get_user_id()
database_id = request.args.get("database_id")
search_text = request.args.get("search_text")
status = request.args.get("status")
# From and To time stamp should be Epoch timestamp in seconds
from_time = request.args.get("from")
to_time = request.args.get("to")
query = db.session.query(Query)
if search_user_id:
# Filter on user_id
query = query.filter(Query.user_id == search_user_id)
if database_id:
# Filter on db Id
query = query.filter(Query.database_id == database_id)
if status:
# Filter on status
query = query.filter(Query.status == status)
if search_text:
# Filter on search text
query = query.filter(Query.sql.like(f"%{search_text}%"))
if from_time:
query = query.filter(Query.start_time > int(from_time))
if to_time:
query = query.filter(Query.start_time < int(to_time))
query_limit = config["QUERY_SEARCH_LIMIT"]
sql_queries = query.order_by(Query.start_time.asc()).limit(query_limit).all()
dict_queries = [q.to_dict() for q in sql_queries]
return Response(
json.dumps(dict_queries, default=utils.json_int_dttm_ser),
status=200,
mimetype="application/json",
)
@app.errorhandler(500)
def show_traceback(self) -> FlaskResponse: # pylint: disable=no-self-use
return (
render_template("superset/traceback.html", error_msg=get_error_msg()),
500,
)
@event_logger.log_this
@expose("/welcome/")
def welcome(self) -> FlaskResponse:
"""Personalized welcome page"""
if not g.user or not g.user.get_id():
if conf.get("PUBLIC_ROLE_LIKE_GAMMA", False) or conf["PUBLIC_ROLE_LIKE"]:
return self.render_template("superset/public_welcome.html")
return redirect(appbuilder.get_url_for_login)
welcome_dashboard_id = (
db.session.query(UserAttribute.welcome_dashboard_id)
.filter_by(user_id=g.user.get_id())
.scalar()
)
if welcome_dashboard_id:
return self.dashboard(dashboard_id_or_slug=str(welcome_dashboard_id))
payload = {
"user": bootstrap_user_data(g.user),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/crud_views.html",
entry="crudViews",
bootstrap_data=json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
),
)
@has_access
@event_logger.log_this
@expose("/profile/<username>/")
def profile(self, username: str) -> FlaskResponse:
"""User profile page"""
user = (
db.session.query(ab_models.User).filter_by(username=username).one_or_none()
)
if not user:
abort(404, description=f"User: {username} does not exist.")
payload = {
"user": bootstrap_user_data(user, include_perms=True),
"common": common_bootstrap_payload(),
}
return self.render_template(
"superset/basic.html",
title=_("%(user)s's profile", user=username).__str__(),
entry="profile",
bootstrap_data=json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
),
)
@staticmethod
def _get_sqllab_tabs(user_id: int) -> Dict[str, Any]:
# send list of tab state ids
tabs_state = (
db.session.query(TabState.id, TabState.label)
.filter_by(user_id=user_id)
.all()
)
tab_state_ids = [str(tab_state[0]) for tab_state in tabs_state]
# return first active tab, or fallback to another one if no tab is active
active_tab = (
db.session.query(TabState)
.filter_by(user_id=user_id)
.order_by(TabState.active.desc())
.first()
)
databases: Dict[int, Any] = {
database.id: {
k: v for k, v in database.to_json().items() if k in DATABASE_KEYS
}
for database in DatabaseDAO.find_all()
}
queries: Dict[str, Any] = {}
# These are unnecessary if sqllab backend persistence is disabled
if is_feature_enabled("SQLLAB_BACKEND_PERSISTENCE"):
# return all user queries associated with existing SQL editors
user_queries = (
db.session.query(Query)
.filter_by(user_id=user_id)
.filter(Query.sql_editor_id.in_(tab_state_ids))
.all()
)
queries = {
query.client_id: dict(query.to_dict().items()) for query in user_queries
}
return {
"tab_state_ids": tabs_state,
"active_tab": active_tab.to_dict() if active_tab else None,
"databases": databases,
"queries": queries,
}
@has_access
@event_logger.log_this
@expose("/sqllab/", methods=["GET", "POST"])
def sqllab(self) -> FlaskResponse:
"""SQL Editor"""
payload = {
"defaultDbId": config["SQLLAB_DEFAULT_DBID"],
"common": common_bootstrap_payload(),
**self._get_sqllab_tabs(g.user.get_id()),
}
form_data = request.form.get("form_data")
if form_data:
try:
payload["requested_query"] = json.loads(form_data)
except json.JSONDecodeError:
pass
payload["user"] = bootstrap_user_data(g.user)
bootstrap_data = json.dumps(
payload, default=utils.pessimistic_json_iso_dttm_ser
)
return self.render_template(
"superset/basic.html", entry="sqllab", bootstrap_data=bootstrap_data
)
@has_access
@event_logger.log_this
@expose("/sqllab/history/", methods=["GET"])
@event_logger.log_this
def sqllab_history(self) -> FlaskResponse:
if not is_feature_enabled("ENABLE_REACT_CRUD_VIEWS"):
return redirect("/superset/sqllab#search", code=307)
return super().render_app_template()
@api
@has_access_api
@event_logger.log_this
@expose("/schemas_access_for_csv_upload")
def schemas_access_for_csv_upload(self) -> FlaskResponse:
"""
This method exposes an API endpoint to
get the schema access control settings for csv upload in this database
"""
if not request.args.get("db_id"):
return json_error_response("No database is allowed for your csv upload")
db_id = int(request.args["db_id"])
database = db.session.query(Database).filter_by(id=db_id).one()
try:
schemas_allowed = database.get_schema_access_for_csv_upload()
if security_manager.can_access_database(database):
return self.json_response(schemas_allowed)
# the list schemas_allowed should not be empty here
# and the list schemas_allowed_processed returned from security_manager
# should not be empty either,
# otherwise the database should have been filtered out
# in CsvToDatabaseForm
schemas_allowed_processed = security_manager.get_schemas_accessible_by_user(
database, schemas_allowed, False
)
return self.json_response(schemas_allowed_processed)
except Exception as ex: # pylint: disable=broad-except
logger.exception(ex)
return json_error_response(
"Failed to fetch schemas allowed for csv upload in this database! "
"Please contact your Superset Admin!"
)
|
py | b4170bcb30f5a8c4ca42a7bb21eb3a681df72f3e | import pandas as pd
import re
from os.path import join
re_desc = re.compile(r"(?:\//' @describeIn\s(\w+)\s([^\n]+)\n)")
re_func_name = re.compile(r"\n*\w+\s(\w+)\(\)\s\{\n*")
re_func_val = re.compile(r"\n*return\s(\w+);\n*")
re_list = [re_desc, re_func_name, re_func_val]
dir_path = r'C:\Users\nich980\code\hector\src'
f_name = 'rcpp_constants.cpp'
f_out = 'rcpp_consant_names.csv'
abs_path = join(dir_path, f_name)
# Dictionary to hold the variable/constant definitions
var_defs = {}
# Open & read the file line-by-line
with open(abs_path, 'r') as fh:
block_component = ''
block_desc = ''
func_name = ''
func_val = ''
# for line_idx, line in enumerate(fh):
file_str = fh.read()
block_arr = file_str.split("\n\n")
for block in block_arr:
match = re.search(re_desc, block)
if (match):
block_component = match.group(1)
block_desc = match.group(2)
match = re.search(re_func_name, block)
if (match):
func_name = match.group(1)
match = re.search(re_func_val, block)
if (match):
func_val = match.group(1)
if (func_val != ''):
var_defs[func_val] = [func_name, block_component, block_desc]
# Create a Pandas DataFrame from the dictionary we just created
df = pd.DataFrame.from_dict(var_defs, orient='index')
# The DataFrame is initialized with the 'Constant' column as the index,
# so here we copy the index column to a new column named 'Constant'
df['Constant'] = df.index
# Specify the desired order of the DataFrame colummns
cols_rearr = ['Constant', 0, 1, 2]
# Rearrange the columns
df = df[cols_rearr]
# Rename the columns as the DataFrame is initialized with integer column names
df = df.rename(columns={0: "Function", 1: "Type", 2: "Desc"})
# change the DataFrame index column from the function constant values to the
# default integer index values
df = df.reset_index(drop=True)
print(df)
print('\nWriting DataFrame to {}...'.format(f_out))
df.to_csv(f_out, sep=',', header=True, index=False)
|
py | b4170d6a1b797b0efbbeaf389431d9b0c9b51e10 | import torch
import torch.nn as nn
import torch.nn.init as init
from functools import reduce
class Net(nn.Module):
def __init__(self, blocks, rate):
super(Net, self).__init__()
self.convt_I1 = nn.ConvTranspose2d(1, 1, kernel_size=int(4*rate//2), stride=rate, padding=rate//2, bias=False)
self.conv_input = nn.Conv2d(in_channels=1, out_channels=64, kernel_size=3, stride=1, padding=1, bias=False)
self.relu = nn.PReLU()
self.convt_F1 = self._make_layer(SKBlock(64), blocks)
self.Transpose = nn.ConvTranspose2d(64, 64, kernel_size=int(4*rate//2), stride=rate, padding=rate//2, bias=False)
self.relu_transpose = nn.PReLU()
self.convt_R1 = nn.Conv2d(in_channels=64, out_channels=1, kernel_size=3, stride=1, padding=1, bias=False)
for m in self.modules():
if isinstance(m, nn.Conv2d):
init.orthogonal(m.weight)
if m.bias is not None:
m.bias.data.zero_()
if isinstance(m, nn.ConvTranspose2d):
init.orthogonal(m.weight)
if m.bias is not None:
m.bias.data.zero_()
def _make_layer(self, block, blocks):
layers = []
for _ in range(blocks):
layers.append(block)
return nn.Sequential(*layers)
def forward(self, x):
convt_I1 = self.convt_I1(x)
out = self.relu(self.conv_input(x))
convt_F1 = self.convt_F1(out)
convt_out = self.relu_transpose(self.Transpose(convt_F1))
convt_R1 = self.convt_R1(convt_out)
HR = convt_I1 + convt_R1
return HR
class L1_Charbonnier_loss(nn.Module):
def __init__(self):
super(L1_Charbonnier_loss, self).__init__()
self.eps = 1e-6
def forward(self, X, Y):
diff = torch.add(X, -Y)
error = torch.sqrt(diff * diff + self.eps)
loss = torch.sum(error)
return loss
class SKBlock(nn.Module):
def __init__(self, planes, stride=1, use_sk=True):
super(SKBlock, self).__init__()
self.use_sk = use_sk
self.conv1 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.relu1 = nn.PReLU()
self.conv2 = nn.Conv2d(planes, planes, kernel_size=3, stride=stride, padding=1, bias=False)
self.relu2 = nn.PReLU()
self.sk = SKLayer(planes)
self.channelAttention = channelAttention(planes, planes)
def forward(self, x):
residual = x
out = self.relu1(self.conv1(x))
out = self.relu2(self.conv2(out))
if self.use_sk:
out = self.sk(out)
out = self.channelAttention(out)
out += residual
return out
class SKLayer(nn.Module):
def __init__(self, channel, stride=1, M=2, r=16, L=32):
super(SKLayer, self).__init__()
self.M = M
self.out_channels = channel
d = max(channel//r, L)
self.conv = nn.ModuleList()
for i in range(M):
self.conv.append(nn.Sequential(nn.Conv2d(channel, channel, 3, stride, padding=1+i, dilation=1+i, groups=32, bias=False),
nn.PReLU()))
self.global_pool = nn.AdaptiveAvgPool2d(1)
self.fc1 = nn.Sequential(nn.Conv2d(channel, d, 1, bias=False),
nn.PReLU())
self.fc2 = nn.Conv2d(d, channel*M, 1, 1, bias=False)
self.softmax = nn.Softmax(dim=1)
self.channelAttention = channelAttention(channel, channel)
def forward(self, xx):
batch_size = xx.size(0)
output = []
# split
for i, conv in enumerate(self.conv):
output.append(conv(xx))
# fusion
U = reduce(lambda x, y: x+y, output)
s = self.global_pool(U)
z = self.fc1(s)
a_b = self.fc2(z)
a_b = a_b.reshape(batch_size, self.M, self.out_channels, -1)
a_b = self.softmax(a_b)
# the part of selection
a_b = list(a_b.chunk(self.M, dim=1)) # split to a and b
a_b = list(map(lambda x: x.reshape(batch_size, self.out_channels, 1, 1), a_b))
V = list(map(lambda x, y: x*y, output, a_b))
V = reduce(lambda x, y: x+y, V)
V = self.channelAttention(V)
return V
class channelAttention(nn.Module):
def __init__(self, inChannels, outChannels):
super(channelAttention, self).__init__()
self.swish = nn.Sigmoid()
self.channel_squeeze = nn.AdaptiveAvgPool2d(1)
self.conv_down = nn.Conv2d(inChannels * 4, inChannels // 4, kernel_size=1, bias=False)
self.conv_up = nn.Conv2d(inChannels // 4, inChannels * 4, kernel_size=1, bias=False)
self.sig = nn.Sigmoid()
self.trans1 = nn.Sequential(
nn.Conv2d(in_channels=inChannels, out_channels=inChannels * 4, kernel_size=1, stride=1, padding=0, bias=False),
nn.PReLU(),
)
self.trans2 = nn.Sequential(
nn.Conv2d(in_channels=inChannels * 4, out_channels=outChannels, kernel_size=1, stride=1, padding=0, bias=False),
nn.PReLU(),
)
def forward(self, x):
ex = self.trans1(x)
out1 = self.channel_squeeze(ex)
out1 = self.conv_down(out1)
out1 = out1*self.swish(out1)
out1 = self.conv_up(out1)
weight = self.sig(out1)
out = ex*weight
out = self.trans2(out)
return out
|
py | b4170f9535cc42000e2e1f0993e2fb929e1c7f5e | import os
from operator import itemgetter
import pytest
from funcy import raiser
from dvc.dvcfile import PIPELINE_FILE, FileIsGitIgnored
from dvc.exceptions import NoOutputOrStageError
from dvc.repo import Repo
from dvc.stage.exceptions import (
StageFileDoesNotExistError,
StageNameUnspecified,
StageNotFound,
)
from dvc.utils import relpath
from dvc.utils.fs import remove
from dvc.utils.strictyaml import YAMLValidationError
def test_collect(tmp_dir, scm, dvc, run_copy):
def collect_outs(*args, **kwargs):
return {
str(out)
for stage in dvc.stage.collect(*args, **kwargs)
for out in stage.outs
}
tmp_dir.dvc_gen("foo", "foo")
run_copy("foo", "bar", single_stage=True)
scm.add([".gitignore", "foo.dvc", "bar.dvc"])
scm.commit("Add foo and bar")
scm.checkout("new-branch", create_new=True)
run_copy("bar", "buzz", single_stage=True)
scm.add([".gitignore", "buzz.dvc"])
scm.commit("Add buzz")
assert collect_outs("bar.dvc", with_deps=True) == {"foo", "bar"}
assert collect_outs("buzz.dvc", with_deps=True) == {"foo", "bar", "buzz"}
assert collect_outs("buzz.dvc", with_deps=False) == {"buzz"}
run_copy("foo", "foobar", name="copy-foo-foobar")
assert collect_outs(":copy-foo-foobar") == {"foobar"}
assert collect_outs(":copy-foo-foobar", with_deps=True) == {
"foobar",
"foo",
}
assert collect_outs("dvc.yaml:copy-foo-foobar", recursive=True) == {
"foobar"
}
assert collect_outs("copy-foo-foobar") == {"foobar"}
assert collect_outs("copy-foo-foobar", with_deps=True) == {"foobar", "foo"}
assert collect_outs("copy-foo-foobar", recursive=True) == {"foobar"}
run_copy("foobar", "baz", name="copy-foobar-baz")
assert collect_outs("dvc.yaml") == {"foobar", "baz"}
assert collect_outs("dvc.yaml", with_deps=True) == {"foobar", "baz", "foo"}
def test_collect_dir_recursive(tmp_dir, dvc, run_head):
tmp_dir.gen({"dir": {"foo": "foo"}})
(stage1,) = dvc.add("dir", recursive=True)
with (tmp_dir / "dir").chdir():
stage2 = run_head("foo", name="copy-foo-bar")
stage3 = run_head("foo-1", single_stage=True)
assert set(dvc.stage.collect("dir", recursive=True)) == {
stage1,
stage2,
stage3,
}
def test_collect_with_not_existing_output_or_stage_name(
tmp_dir, dvc, run_copy
):
with pytest.raises(StageFileDoesNotExistError):
dvc.stage.collect("some_file")
tmp_dir.dvc_gen("foo", "foo")
run_copy("foo", "bar", name="copy-foo-bar")
with pytest.raises(StageNotFound):
dvc.stage.collect("some_file")
def test_stages(tmp_dir, dvc):
def collect_stages():
return {
stage.relpath for stage in Repo(os.fspath(tmp_dir)).index.stages
}
tmp_dir.dvc_gen({"file": "a", "dir/file": "b", "dir/subdir/file": "c"})
assert collect_stages() == {
"file.dvc",
os.path.join("dir", "file.dvc"),
os.path.join("dir", "subdir", "file.dvc"),
}
tmp_dir.gen(".dvcignore", "dir")
assert collect_stages() == {"file.dvc"}
@pytest.fixture
def stages(tmp_dir, run_copy):
stage1, stage2 = tmp_dir.dvc_gen({"foo": "foo", "lorem": "lorem"})
return {
"foo-generate": stage1,
"lorem-generate": stage2,
"copy-foo-bar": run_copy("foo", "bar", single_stage=True),
"copy-bar-foobar": run_copy("bar", "foobar", name="copy-bar-foobar"),
"copy-lorem-ipsum": run_copy(
"lorem", "ipsum", name="copy-lorem-ipsum"
),
}
def test_collect_not_a_group_stage_with_group_flag(tmp_dir, dvc, stages):
assert set(dvc.stage.collect("copy-bar-foobar", accept_group=True)) == {
stages["copy-bar-foobar"]
}
assert set(
dvc.stage.collect("copy-bar-foobar", accept_group=True, with_deps=True)
) == {
stages["copy-bar-foobar"],
stages["copy-foo-bar"],
stages["foo-generate"],
}
assert set(
dvc.stage.collect_granular("copy-bar-foobar", accept_group=True)
) == {(stages["copy-bar-foobar"], None)}
assert set(
dvc.stage.collect_granular(
"copy-bar-foobar", accept_group=True, with_deps=True
)
) == {
(stages["copy-bar-foobar"], None),
(stages["copy-foo-bar"], None),
(stages["foo-generate"], None),
}
def test_collect_generated(tmp_dir, dvc):
d = {
"vars": [{"vars": [1, 2, 3, 4, 5]}],
"stages": {
"build": {"foreach": "${vars}", "do": {"cmd": "echo ${item}"}}
},
}
(tmp_dir / "dvc.yaml").dump(d)
all_stages = set(dvc.index.stages)
assert len(all_stages) == 5
assert set(dvc.stage.collect()) == all_stages
assert set(dvc.stage.collect("build", accept_group=True)) == all_stages
assert (
set(dvc.stage.collect("build", accept_group=True, with_deps=True))
== all_stages
)
assert set(dvc.stage.collect("build*", glob=True)) == all_stages
assert (
set(dvc.stage.collect("build*", glob=True, with_deps=True))
== all_stages
)
stages_info = {(stage, None) for stage in all_stages}
assert (
set(dvc.stage.collect_granular("build", accept_group=True))
== stages_info
)
assert (
set(
dvc.stage.collect_granular(
"build", accept_group=True, with_deps=True
)
)
== stages_info
)
def test_collect_glob(tmp_dir, dvc, stages):
assert set(dvc.stage.collect("copy*", glob=True)) == {
stages[key] for key in ["copy-bar-foobar", "copy-lorem-ipsum"]
}
assert set(
dvc.stage.collect("copy-lorem*", glob=True, with_deps=True)
) == {stages[key] for key in ["copy-lorem-ipsum", "lorem-generate"]}
def test_collect_granular_with_no_target(tmp_dir, dvc, stages):
assert set(map(itemgetter(0), dvc.stage.collect_granular())) == set(
stages.values()
)
assert list(map(itemgetter(1), dvc.stage.collect_granular())) == [
None
] * len(stages)
def test_collect_granular_with_target(tmp_dir, dvc, stages):
assert dvc.stage.collect_granular("bar.dvc") == [
(stages["copy-foo-bar"], None)
]
assert dvc.stage.collect_granular(PIPELINE_FILE) == [
(stages["copy-bar-foobar"], None),
(stages["copy-lorem-ipsum"], None),
]
assert dvc.stage.collect_granular(":") == [
(stages["copy-bar-foobar"], None),
(stages["copy-lorem-ipsum"], None),
]
assert dvc.stage.collect_granular("copy-bar-foobar") == [
(stages["copy-bar-foobar"], None)
]
assert dvc.stage.collect_granular(":copy-bar-foobar") == [
(stages["copy-bar-foobar"], None)
]
assert dvc.stage.collect_granular("dvc.yaml:copy-bar-foobar") == [
(stages["copy-bar-foobar"], None)
]
with (tmp_dir / dvc.DVC_DIR).chdir():
assert dvc.stage.collect_granular(
relpath(tmp_dir / PIPELINE_FILE) + ":copy-bar-foobar"
) == [(stages["copy-bar-foobar"], None)]
assert dvc.stage.collect_granular("foobar") == [
(stages["copy-bar-foobar"], os.path.join(tmp_dir, "foobar"))
]
@pytest.mark.parametrize(
"target",
[
"not_existing.dvc",
"not_existing.dvc:stage_name",
"not_existing/dvc.yaml",
"not_existing/dvc.yaml:stage_name",
],
)
def test_collect_with_not_existing_dvcfile(tmp_dir, dvc, target):
with pytest.raises(StageFileDoesNotExistError):
dvc.stage.collect_granular(target)
with pytest.raises(StageFileDoesNotExistError):
dvc.stage.collect(target)
def test_collect_granular_with_not_existing_output_or_stage_name(tmp_dir, dvc):
with pytest.raises(NoOutputOrStageError):
dvc.stage.collect_granular("some_file")
with pytest.raises(NoOutputOrStageError):
dvc.stage.collect_granular("some_file", recursive=True)
def test_collect_granular_with_deps(tmp_dir, dvc, stages):
assert set(
map(
itemgetter(0),
dvc.stage.collect_granular("bar.dvc", with_deps=True),
)
) == {stages["copy-foo-bar"], stages["foo-generate"]}
assert set(
map(
itemgetter(0),
dvc.stage.collect_granular("copy-bar-foobar", with_deps=True),
)
) == {
stages["copy-bar-foobar"],
stages["copy-foo-bar"],
stages["foo-generate"],
}
assert set(
map(
itemgetter(0),
dvc.stage.collect_granular(PIPELINE_FILE, with_deps=True),
)
) == set(stages.values())
def test_collect_granular_same_output_name_stage_name(tmp_dir, dvc, run_copy):
(stage1,) = tmp_dir.dvc_gen("foo", "foo")
(stage2,) = tmp_dir.dvc_gen("copy-foo-bar", "copy-foo-bar")
stage3 = run_copy("foo", "bar", name="copy-foo-bar")
assert dvc.stage.collect_granular("copy-foo-bar") == [(stage3, None)]
coll = dvc.stage.collect_granular("copy-foo-bar", with_deps=True)
assert set(map(itemgetter(0), coll)) == {stage3, stage1}
assert list(map(itemgetter(1), coll)) == [None] * 2
assert dvc.stage.collect_granular("./copy-foo-bar") == [
(stage2, os.path.join(tmp_dir / "copy-foo-bar"))
]
assert dvc.stage.collect_granular("./copy-foo-bar", with_deps=True) == [
(stage2, os.path.join(tmp_dir / "copy-foo-bar"))
]
def test_collect_granular_priority_on_collision(tmp_dir, dvc, run_copy):
tmp_dir.gen({"dir": {"foo": "foo"}, "foo": "foo"})
(stage1,) = dvc.add("dir", recursive=True)
stage2 = run_copy("foo", "bar", name="dir")
assert dvc.stage.collect_granular("dir") == [(stage2, None)]
assert dvc.stage.collect_granular("dir", recursive=True) == [
(stage1, None)
]
remove(tmp_dir / "dir")
assert dvc.stage.collect_granular("dir") == [(stage2, None)]
assert dvc.stage.collect_granular("dir", recursive=True) == [
(stage2, None)
]
def test_collect_granular_collision_output_dir_stage_name(
tmp_dir, dvc, run_copy
):
stage1, *_ = tmp_dir.dvc_gen({"dir": {"foo": "foo"}, "foo": "foo"})
stage3 = run_copy("foo", "bar", name="dir")
assert dvc.stage.collect_granular("dir") == [(stage3, None)]
assert not dvc.stage.collect_granular("dir", recursive=True)
assert dvc.stage.collect_granular("./dir") == [
(stage1, os.path.join(tmp_dir / "dir"))
]
def test_collect_granular_not_existing_stage_name(tmp_dir, dvc, run_copy):
tmp_dir.dvc_gen("foo", "foo")
(stage,) = tmp_dir.dvc_gen("copy-foo-bar", "copy-foo-bar")
run_copy("foo", "bar", name="copy-foo-bar")
assert dvc.stage.collect_granular(
"copy-foo-bar.dvc:stage_name_not_needed"
) == [(stage, None)]
with pytest.raises(StageNotFound):
dvc.stage.collect_granular("dvc.yaml:does-not-exist")
def test_get_stages(tmp_dir, dvc, run_copy):
with pytest.raises(StageFileDoesNotExistError):
dvc.stage.load_all()
tmp_dir.gen("foo", "foo")
stage1 = run_copy("foo", "bar", name="copy-foo-bar")
stage2 = run_copy("bar", "foobar", name="copy-bar-foobar")
assert set(dvc.stage.load_all()) == {stage1, stage2}
assert set(dvc.stage.load_all(path=PIPELINE_FILE)) == {stage1, stage2}
assert set(dvc.stage.load_all(name="copy-bar-foobar")) == {stage2}
assert set(
dvc.stage.load_all(path=PIPELINE_FILE, name="copy-bar-foobar")
) == {stage2}
with pytest.raises(StageFileDoesNotExistError):
dvc.stage.load_all(path=relpath(tmp_dir / ".." / PIPELINE_FILE))
with pytest.raises(StageNotFound):
dvc.stage.load_all(path=PIPELINE_FILE, name="copy")
def test_get_stages_old_dvcfile(tmp_dir, dvc):
(stage1,) = tmp_dir.dvc_gen("foo", "foo")
assert set(dvc.stage.load_all("foo.dvc")) == {stage1}
assert set(dvc.stage.load_all("foo.dvc", name="foo-generate")) == {stage1}
with pytest.raises(StageFileDoesNotExistError):
dvc.stage.load_all(path=relpath(tmp_dir / ".." / "foo.dvc"))
def test_get_stage(tmp_dir, dvc, run_copy):
tmp_dir.gen("foo", "foo")
stage1 = run_copy("foo", "bar", name="copy-foo-bar")
with pytest.raises(StageNameUnspecified):
dvc.stage.load_one()
with pytest.raises(StageNameUnspecified):
dvc.stage.load_one(path=PIPELINE_FILE)
assert (
dvc.stage.load_one(path=PIPELINE_FILE, name="copy-foo-bar") == stage1
)
assert dvc.stage.load_one(name="copy-foo-bar") == stage1
with pytest.raises(StageFileDoesNotExistError):
dvc.stage.load_one(path="something.yaml", name="name")
with pytest.raises(StageNotFound):
dvc.stage.load_one(name="random_name")
def test_get_stage_single_stage_dvcfile(tmp_dir, dvc):
(stage1,) = tmp_dir.dvc_gen("foo", "foo")
assert dvc.stage.load_one("foo.dvc") == stage1
assert dvc.stage.load_one("foo.dvc", name="jpt") == stage1
with pytest.raises(StageFileDoesNotExistError):
dvc.stage.load_one(path="bar.dvc", name="name")
def test_collect_optimization(tmp_dir, dvc, mocker):
(stage,) = tmp_dir.dvc_gen("foo", "foo text")
# Forget cached stages and graph and error out on collection
dvc._reset()
mocker.patch(
"dvc.repo.index.Index.stages",
property(raiser(Exception("Should not collect"))),
)
# Should read stage directly instead of collecting the whole graph
dvc.stage.collect(stage.path)
dvc.stage.collect_granular(stage.path)
def test_collect_optimization_on_stage_name(tmp_dir, dvc, mocker, run_copy):
tmp_dir.dvc_gen("foo", "foo")
stage = run_copy("foo", "bar", name="copy-foo-bar")
# Forget cached stages and graph and error out on collection
dvc._reset()
mocker.patch(
"dvc.repo.index.Index.stages",
property(raiser(Exception("Should not collect"))),
)
# Should read stage directly instead of collecting the whole graph
assert dvc.stage.collect("copy-foo-bar") == [stage]
assert dvc.stage.collect_granular("copy-foo-bar") == [(stage, None)]
def test_collect_repo_callback(tmp_dir, dvc, mocker):
mock = mocker.Mock()
dvc.stage_collection_error_handler = mock
(stage,) = tmp_dir.dvc_gen("foo", "foo")
(tmp_dir / PIPELINE_FILE).dump({"stages": {"cmd": "echo hello world"}})
dvc._reset()
assert dvc.index.stages == [stage]
mock.assert_called_once()
file_path, exc = mock.call_args[0]
assert file_path == PIPELINE_FILE
assert isinstance(exc, YAMLValidationError)
def test_gitignored_collect_repo(tmp_dir, dvc, scm):
(stage,) = tmp_dir.dvc_gen({"data": {"foo": "foo", "bar": "bar"}})
assert dvc.stage.collect_repo() == [stage]
scm.ignore(stage.path)
scm._reset()
assert not dvc.stage.collect_repo()
def test_gitignored_file_try_collect_granular_for_data_files(
tmp_dir, dvc, scm
):
(stage,) = tmp_dir.dvc_gen({"data": {"foo": "foo", "bar": "bar"}})
path = os.path.join("data", "foo")
assert dvc.stage.collect_granular(path) == [
(stage, os.path.join(tmp_dir, path))
]
scm.ignore(stage.path)
dvc._reset()
with pytest.raises(NoOutputOrStageError):
dvc.stage.collect_granular(path)
def test_gitignored_file_try_collect_granular_for_dvc_yaml_files(
tmp_dir, dvc, scm, stages
):
assert dvc.stage.collect_granular("bar") == [
(stages["copy-foo-bar"], os.path.join(tmp_dir, "bar"))
]
scm.ignore(tmp_dir / "dvc.yaml")
scm._reset()
with pytest.raises(FileIsGitIgnored):
dvc.stage.collect_granular("bar")
|
py | b41710129d4a1b7a0c3e3013fa36ac3f0ac62be2 | from async_helpers.managers import AsyncEnabledManager
from typing import TypeVar, TYPE_CHECKING
if TYPE_CHECKING:
from .models import DiscordUser
_T = TypeVar("_T", bound="DiscordUser")
class DiscordUserManager(AsyncEnabledManager[_T]):
async def lookup_user(self, discord_id: int) -> _T:
return (await self.async_get_or_create(discord_id=discord_id))[0]
|
py | b417105be0c0984ed43f292b5ddd927ca37e2ccf | """
Pairs Trading with Kalman Filters
Credit: Quantopian
"""
import numpy as np
import pandas as pd
from pykalman import KalmanFilter
import statsmodels.api as sm
def initialize(context):
# Quantopian backtester specific variables
set_slippage(slippage.FixedSlippage(spread=0))
set_commission(commission.PerShare(cost=0.01, min_trade_cost=1.0))
context.pairs = [
KalmanPairTrade(sid(5885), sid(4283),
initial_bars=300, freq='1m', delta=1e-3, maxlen=300),
]
context.security_list = [sid(5885), sid(4283)]
weight = 1.8 / len(context.pairs)
for pair in context.pairs:
pair.leverage = weight
for minute in range(10, 390, 90):
for pair in context.pairs:
schedule_function(pair.trading_logic,
time_rule=time_rules.market_open(minutes=minute))
class KalmanPairTrade(object):
def __init__(self, y, x, leverage=1.0, initial_bars=10,
freq='1d', delta=1e-3, maxlen=3000):
self._y = y
self._x = x
self.maxlen = maxlen
self.initial_bars = initial_bars
self.freq = freq
self.delta = delta
self.leverage = leverage
self.Y = KalmanMovingAverage(self._y, maxlen=self.maxlen)
self.X = KalmanMovingAverage(self._x, maxlen=self.maxlen)
self.kf = None
self.entry_dt = pd.Timestamp('1900-01-01', tz='utc')
@property
def name(self):
return "{}~{}".format(self._y.symbol, self._x.symbol)
def trading_logic(self, context, data):
try:
if self.kf is None:
self.initialize_filters(context, data)
return
self.update(context, data)
if get_open_orders(self._x) or get_open_orders(self._y):
return
spreads = self.mean_spread()
zscore = (spreads[-1] - spreads.mean()) / spreads.std()
reference_pos = context.portfolio.positions[self._y].amount
now = get_datetime()
if reference_pos:
if (now - self.entry_dt).days > 20:
order_target(self._y, 0.0)
order_target(self._x, 0.0)
return
# Do a PNL check to make sure a reversion at least covered trading costs
# I do this because parameter drift often causes trades to be exited
# before the original spread has become profitable.
pnl = self.get_pnl(context, data)
if zscore > -0.0 and reference_pos > 0 and pnl > 0:
order_target(self._y, 0.0)
order_target(self._x, 0.0)
elif zscore < 0.0 and reference_pos < 0 and pnl > 0:
order_target(self._y, 0.0)
order_target(self._x, 0.0)
else:
if zscore > 1.5:
order_target_percent(self._y, -self.leverage / 2.)
order_target_percent(self._x, self.leverage / 2.)
self.entry_dt = now
if zscore < -1.5:
order_target_percent(self._y, self.leverage / 2.)
order_target_percent(self._x, -self.leverage / 2.)
self.entry_dt = now
except Exception as e:
log.debug("[{}] {}".format(self.name, str(e)))
def update(self, context, data):
prices = np.log(data.history(context.security_list, 'price', 1, '1m'))
self.X.update(prices)
self.Y.update(prices)
self.kf.update(self.means_frame().iloc[-1])
def mean_spread(self):
means = self.means_frame()
beta, alpha = self.kf.state_mean
return means[self._y] - (beta * means[self._x] + alpha)
def means_frame(self):
mu_Y = self.Y.state_means
mu_X = self.X.state_means
return pd.DataFrame([mu_Y, mu_X]).T
def initialize_filters(self, context, data):
prices = np.log(data.history(context.security_list, 'price', self.initial_bars, self.freq))
self.X.update(prices)
self.Y.update(prices)
# Drops the initial 0 mean value from the kalman filter
self.X.state_means = self.X.state_means.iloc[-self.initial_bars:]
self.Y.state_means = self.Y.state_means.iloc[-self.initial_bars:]
self.kf = KalmanRegression(self.Y.state_means, self.X.state_means,
delta=self.delta, maxlen=self.maxlen)
def get_pnl(self, context, data):
x = self._x
y = self._y
prices = data.history(context.security_list, 'price', 1, '1d').iloc[-1]
positions = context.portfolio.positions
dx = prices[x] - positions[x].cost_basis
dy = prices[y] - positions[y].cost_basis
return (positions[x].amount * dx +
positions[y].amount * dy)
def handle_data(context, data):
record(market_exposure=context.account.net_leverage,
leverage=context.account.leverage)
class KalmanMovingAverage(object):
"""
Estimates the moving average of a price process
via Kalman Filtering.
See http://pykalman.github.io/ for docs on the
filtering process.
"""
def __init__(self, asset, observation_covariance=1.0, initial_value=0,
initial_state_covariance=1.0, transition_covariance=0.05,
initial_window=20, maxlen=3000, freq='1d'):
self.asset = asset
self.freq = freq
self.initial_window = initial_window
self.maxlen = maxlen
self.kf = KalmanFilter(transition_matrices=[1],
observation_matrices=[1],
initial_state_mean=initial_value,
initial_state_covariance=initial_state_covariance,
observation_covariance=observation_covariance,
transition_covariance=transition_covariance)
self.state_means = pd.Series([self.kf.initial_state_mean], name=self.asset)
self.state_vars = pd.Series([self.kf.initial_state_covariance], name=self.asset)
def update(self, observations):
for dt, observation in observations[self.asset].iteritems():
self._update(dt, observation)
def _update(self, dt, observation):
mu, cov = self.kf.filter_update(self.state_means.iloc[-1],
self.state_vars.iloc[-1],
observation)
self.state_means[dt] = mu.flatten()[0]
self.state_vars[dt] = cov.flatten()[0]
if self.state_means.shape[0] > self.maxlen:
self.state_means = self.state_means.iloc[-self.maxlen:]
if self.state_vars.shape[0] > self.maxlen:
self.state_vars = self.state_vars.iloc[-self.maxlen:]
class KalmanRegression(object):
"""
Uses a Kalman Filter to estimate regression parameters
in an online fashion.
Estimated model: y ~ beta * x + alpha
"""
def __init__(self, initial_y, initial_x, delta=1e-5, maxlen=3000):
self._x = initial_x.name
self._y = initial_y.name
self.maxlen = maxlen
trans_cov = delta / (1 - delta) * np.eye(2)
obs_mat = np.expand_dims(
np.vstack([[initial_x], [np.ones(initial_x.shape[0])]]).T, axis=1)
self.kf = KalmanFilter(n_dim_obs=1, n_dim_state=2,
initial_state_mean=np.zeros(2),
initial_state_covariance=np.ones((2, 2)),
transition_matrices=np.eye(2),
observation_matrices=obs_mat,
observation_covariance=1.0,
transition_covariance=trans_cov)
state_means, state_covs = self.kf.filter(initial_y.values)
self.means = pd.DataFrame(state_means,
index=initial_y.index,
columns=['beta', 'alpha'])
self.state_cov = state_covs[-1]
def update(self, observations):
x = observations[self._x]
y = observations[self._y]
mu, self.state_cov = self.kf.filter_update(
self.state_mean, self.state_cov, y,
observation_matrix=np.array([[x, 1.0]]))
mu = pd.Series(mu, index=['beta', 'alpha'],
name=observations.name)
self.means = self.means.append(mu)
if self.means.shape[0] > self.maxlen:
self.means = self.means.iloc[-self.maxlen:]
def get_spread(self, observations):
x = observations[self._x]
y = observations[self._y]
return y - (self.means.beta[-1] * x + self.means.alpha[-1])
@property
def state_mean(self):
return self.means.iloc[-1]
|
py | b41710eb7c439ea936868f7dd88ec62ac4cd7d04 | #! python
#
# This module implements a RFC2217 compatible client. RF2217 descibes a
# protocol to access serial ports over TCP/IP and allows setting the baud rate,
# modem control lines etc.
#
# This file is part of pySerial. https://github.com/pyserial/pyserial
# (C) 2001-2015 Chris Liechti <[email protected]>
#
# SPDX-License-Identifier: BSD-3-Clause
# TODO:
# - setting control line -> answer is not checked (had problems with one of the
# severs). consider implementing a compatibility mode flag to make check
# conditional
# - write timeout not implemented at all
# ###########################################################################
# observations and issues with servers
# ===========================================================================
# sredird V2.2.1
# - http://www.ibiblio.org/pub/Linux/system/serial/ sredird-2.2.2.tar.gz
# - does not acknowledge SET_CONTROL (RTS/DTR) correctly, always responding
# [105 1] instead of the actual value.
# - SET_BAUDRATE answer contains 4 extra null bytes -> probably for larger
# numbers than 2**32?
# - To get the signature [COM_PORT_OPTION 0] has to be sent.
# - run a server: while true; do nc -l -p 7000 -c "sredird debug /dev/ttyUSB0 /var/lock/sredir"; done
# ===========================================================================
# telnetcpcd (untested)
# - http://ftp.wayne.edu/kermit/sredird/telnetcpcd-1.09.tar.gz
# - To get the signature [COM_PORT_OPTION] w/o data has to be sent.
# ===========================================================================
# ser2net
# - does not negotiate BINARY or COM_PORT_OPTION for his side but at least
# acknowledges that the client activates these options
# - The configuration may be that the server prints a banner. As this client
# implementation does a flushInput on connect, this banner is hidden from
# the user application.
# - NOTIFY_MODEMSTATE: the poll interval of the server seems to be one
# second.
# - To get the signature [COM_PORT_OPTION 0] has to be sent.
# - run a server: run ser2net daemon, in /etc/ser2net.conf:
# 2000:telnet:0:/dev/ttyS0:9600 remctl banner
# ###########################################################################
# How to identify ports? pySerial might want to support other protocols in the
# future, so lets use an URL scheme.
# for RFC2217 compliant servers we will use this:
# rfc2217://<host>:<port>[?option[&option...]]
#
# options:
# - "logging" set log level print diagnostic messages (e.g. "logging=debug")
# - "ign_set_control": do not look at the answers to SET_CONTROL
# - "poll_modem": issue NOTIFY_MODEMSTATE requests when CTS/DTR/RI/CD is read.
# Without this option it expects that the server sends notifications
# automatically on change (which most servers do and is according to the
# RFC).
# the order of the options is not relevant
from __future__ import absolute_import
import logging
import socket
import struct
import threading
import time
try:
import urlparse
except ImportError:
import urllib.parse as urlparse
try:
import Queue
except ImportError:
import queue as Queue
import serial
from serial.serialutil import SerialBase, SerialException, to_bytes, \
iterbytes, portNotOpenError, Timeout
# port string is expected to be something like this:
# rfc2217://host:port
# host may be an IP or including domain, whatever.
# port is 0...65535
# map log level names to constants. used in from_url()
LOGGER_LEVELS = {
'debug': logging.DEBUG,
'info': logging.INFO,
'warning': logging.WARNING,
'error': logging.ERROR,
}
# telnet protocol characters
SE = b'\xf0' # Subnegotiation End
NOP = b'\xf1' # No Operation
DM = b'\xf2' # Data Mark
BRK = b'\xf3' # Break
IP = b'\xf4' # Interrupt process
AO = b'\xf5' # Abort output
AYT = b'\xf6' # Are You There
EC = b'\xf7' # Erase Character
EL = b'\xf8' # Erase Line
GA = b'\xf9' # Go Ahead
SB = b'\xfa' # Subnegotiation Begin
WILL = b'\xfb'
WONT = b'\xfc'
DO = b'\xfd'
DONT = b'\xfe'
IAC = b'\xff' # Interpret As Command
IAC_DOUBLED = b'\xff\xff'
# selected telnet options
BINARY = b'\x00' # 8-bit data path
ECHO = b'\x01' # echo
SGA = b'\x03' # suppress go ahead
# RFC2217
COM_PORT_OPTION = b'\x2c'
# Client to Access Server
SET_BAUDRATE = b'\x01'
SET_DATASIZE = b'\x02'
SET_PARITY = b'\x03'
SET_STOPSIZE = b'\x04'
SET_CONTROL = b'\x05'
NOTIFY_LINESTATE = b'\x06'
NOTIFY_MODEMSTATE = b'\x07'
FLOWCONTROL_SUSPEND = b'\x08'
FLOWCONTROL_RESUME = b'\x09'
SET_LINESTATE_MASK = b'\x0a'
SET_MODEMSTATE_MASK = b'\x0b'
PURGE_DATA = b'\x0c'
SERVER_SET_BAUDRATE = b'\x65'
SERVER_SET_DATASIZE = b'\x66'
SERVER_SET_PARITY = b'\x67'
SERVER_SET_STOPSIZE = b'\x68'
SERVER_SET_CONTROL = b'\x69'
SERVER_NOTIFY_LINESTATE = b'\x6a'
SERVER_NOTIFY_MODEMSTATE = b'\x6b'
SERVER_FLOWCONTROL_SUSPEND = b'\x6c'
SERVER_FLOWCONTROL_RESUME = b'\x6d'
SERVER_SET_LINESTATE_MASK = b'\x6e'
SERVER_SET_MODEMSTATE_MASK = b'\x6f'
SERVER_PURGE_DATA = b'\x70'
RFC2217_ANSWER_MAP = {
SET_BAUDRATE: SERVER_SET_BAUDRATE,
SET_DATASIZE: SERVER_SET_DATASIZE,
SET_PARITY: SERVER_SET_PARITY,
SET_STOPSIZE: SERVER_SET_STOPSIZE,
SET_CONTROL: SERVER_SET_CONTROL,
NOTIFY_LINESTATE: SERVER_NOTIFY_LINESTATE,
NOTIFY_MODEMSTATE: SERVER_NOTIFY_MODEMSTATE,
FLOWCONTROL_SUSPEND: SERVER_FLOWCONTROL_SUSPEND,
FLOWCONTROL_RESUME: SERVER_FLOWCONTROL_RESUME,
SET_LINESTATE_MASK: SERVER_SET_LINESTATE_MASK,
SET_MODEMSTATE_MASK: SERVER_SET_MODEMSTATE_MASK,
PURGE_DATA: SERVER_PURGE_DATA,
}
SET_CONTROL_REQ_FLOW_SETTING = b'\x00' # Request Com Port Flow Control Setting (outbound/both)
SET_CONTROL_USE_NO_FLOW_CONTROL = b'\x01' # Use No Flow Control (outbound/both)
SET_CONTROL_USE_SW_FLOW_CONTROL = b'\x02' # Use XON/XOFF Flow Control (outbound/both)
SET_CONTROL_USE_HW_FLOW_CONTROL = b'\x03' # Use HARDWARE Flow Control (outbound/both)
SET_CONTROL_REQ_BREAK_STATE = b'\x04' # Request BREAK State
SET_CONTROL_BREAK_ON = b'\x05' # Set BREAK State ON
SET_CONTROL_BREAK_OFF = b'\x06' # Set BREAK State OFF
SET_CONTROL_REQ_DTR = b'\x07' # Request DTR Signal State
SET_CONTROL_DTR_ON = b'\x08' # Set DTR Signal State ON
SET_CONTROL_DTR_OFF = b'\x09' # Set DTR Signal State OFF
SET_CONTROL_REQ_RTS = b'\x0a' # Request RTS Signal State
SET_CONTROL_RTS_ON = b'\x0b' # Set RTS Signal State ON
SET_CONTROL_RTS_OFF = b'\x0c' # Set RTS Signal State OFF
SET_CONTROL_REQ_FLOW_SETTING_IN = b'\x0d' # Request Com Port Flow Control Setting (inbound)
SET_CONTROL_USE_NO_FLOW_CONTROL_IN = b'\x0e' # Use No Flow Control (inbound)
SET_CONTROL_USE_SW_FLOW_CONTOL_IN = b'\x0f' # Use XON/XOFF Flow Control (inbound)
SET_CONTROL_USE_HW_FLOW_CONTOL_IN = b'\x10' # Use HARDWARE Flow Control (inbound)
SET_CONTROL_USE_DCD_FLOW_CONTROL = b'\x11' # Use DCD Flow Control (outbound/both)
SET_CONTROL_USE_DTR_FLOW_CONTROL = b'\x12' # Use DTR Flow Control (inbound)
SET_CONTROL_USE_DSR_FLOW_CONTROL = b'\x13' # Use DSR Flow Control (outbound/both)
LINESTATE_MASK_TIMEOUT = 128 # Time-out Error
LINESTATE_MASK_SHIFTREG_EMPTY = 64 # Transfer Shift Register Empty
LINESTATE_MASK_TRANSREG_EMPTY = 32 # Transfer Holding Register Empty
LINESTATE_MASK_BREAK_DETECT = 16 # Break-detect Error
LINESTATE_MASK_FRAMING_ERROR = 8 # Framing Error
LINESTATE_MASK_PARTIY_ERROR = 4 # Parity Error
LINESTATE_MASK_OVERRUN_ERROR = 2 # Overrun Error
LINESTATE_MASK_DATA_READY = 1 # Data Ready
MODEMSTATE_MASK_CD = 128 # Receive Line Signal Detect (also known as Carrier Detect)
MODEMSTATE_MASK_RI = 64 # Ring Indicator
MODEMSTATE_MASK_DSR = 32 # Data-Set-Ready Signal State
MODEMSTATE_MASK_CTS = 16 # Clear-To-Send Signal State
MODEMSTATE_MASK_CD_CHANGE = 8 # Delta Receive Line Signal Detect
MODEMSTATE_MASK_RI_CHANGE = 4 # Trailing-edge Ring Detector
MODEMSTATE_MASK_DSR_CHANGE = 2 # Delta Data-Set-Ready
MODEMSTATE_MASK_CTS_CHANGE = 1 # Delta Clear-To-Send
PURGE_RECEIVE_BUFFER = b'\x01' # Purge access server receive data buffer
PURGE_TRANSMIT_BUFFER = b'\x02' # Purge access server transmit data buffer
PURGE_BOTH_BUFFERS = b'\x03' # Purge both the access server receive data
# buffer and the access server transmit data buffer
RFC2217_PARITY_MAP = {
serial.PARITY_NONE: 1,
serial.PARITY_ODD: 2,
serial.PARITY_EVEN: 3,
serial.PARITY_MARK: 4,
serial.PARITY_SPACE: 5,
}
RFC2217_REVERSE_PARITY_MAP = dict((v, k) for k, v in RFC2217_PARITY_MAP.items())
RFC2217_STOPBIT_MAP = {
serial.STOPBITS_ONE: 1,
serial.STOPBITS_ONE_POINT_FIVE: 3,
serial.STOPBITS_TWO: 2,
}
RFC2217_REVERSE_STOPBIT_MAP = dict((v, k) for k, v in RFC2217_STOPBIT_MAP.items())
# Telnet filter states
M_NORMAL = 0
M_IAC_SEEN = 1
M_NEGOTIATE = 2
# TelnetOption and TelnetSubnegotiation states
REQUESTED = 'REQUESTED'
ACTIVE = 'ACTIVE'
INACTIVE = 'INACTIVE'
REALLY_INACTIVE = 'REALLY_INACTIVE'
class TelnetOption(object):
"""Manage a single telnet option, keeps track of DO/DONT WILL/WONT."""
def __init__(self, connection, name, option, send_yes, send_no, ack_yes,
ack_no, initial_state, activation_callback=None):
"""\
Initialize option.
:param connection: connection used to transmit answers
:param name: a readable name for debug outputs
:param send_yes: what to send when option is to be enabled.
:param send_no: what to send when option is to be disabled.
:param ack_yes: what to expect when remote agrees on option.
:param ack_no: what to expect when remote disagrees on option.
:param initial_state: options initialized with REQUESTED are tried to
be enabled on startup. use INACTIVE for all others.
"""
self.connection = connection
self.name = name
self.option = option
self.send_yes = send_yes
self.send_no = send_no
self.ack_yes = ack_yes
self.ack_no = ack_no
self.state = initial_state
self.active = False
self.activation_callback = activation_callback
def __repr__(self):
"""String for debug outputs"""
return "{o.name}:{o.active}({o.state})".format(o=self)
def process_incoming(self, command):
"""\
A DO/DONT/WILL/WONT was received for this option, update state and
answer when needed.
"""
if command == self.ack_yes:
if self.state is REQUESTED:
self.state = ACTIVE
self.active = True
if self.activation_callback is not None:
self.activation_callback()
elif self.state is ACTIVE:
pass
elif self.state is INACTIVE:
self.state = ACTIVE
self.connection.telnet_send_option(self.send_yes, self.option)
self.active = True
if self.activation_callback is not None:
self.activation_callback()
elif self.state is REALLY_INACTIVE:
self.connection.telnet_send_option(self.send_no, self.option)
else:
raise ValueError('option in illegal state {!r}'.format(self))
elif command == self.ack_no:
if self.state is REQUESTED:
self.state = INACTIVE
self.active = False
elif self.state is ACTIVE:
self.state = INACTIVE
self.connection.telnet_send_option(self.send_no, self.option)
self.active = False
elif self.state is INACTIVE:
pass
elif self.state is REALLY_INACTIVE:
pass
else:
raise ValueError('option in illegal state {!r}'.format(self))
class TelnetSubnegotiation(object):
"""\
A object to handle subnegotiation of options. In this case actually
sub-sub options for RFC 2217. It is used to track com port options.
"""
def __init__(self, connection, name, option, ack_option=None):
if ack_option is None:
ack_option = option
self.connection = connection
self.name = name
self.option = option
self.value = None
self.ack_option = ack_option
self.state = INACTIVE
def __repr__(self):
"""String for debug outputs."""
return "{sn.name}:{sn.state}".format(sn=self)
def set(self, value):
"""\
Request a change of the value. a request is sent to the server. if
the client needs to know if the change is performed he has to check the
state of this object.
"""
if value != self.value:
self.value = value
self.state = REQUESTED
self.connection.rfc2217_send_subnegotiation(self.option, self.value)
if self.connection.logger:
self.connection.logger.debug("SB Requesting {} -> {!r}".format(self.name, self.value))
else:
if self.connection.logger:
self.connection.logger.debug("SB Requesting {} -> {!r} (skipped)".format(self.name, self.value))
def is_ready(self):
"""\
Check if answer from server has been received. when server rejects
the change, raise a ValueError.
"""
if self.state == REALLY_INACTIVE:
raise ValueError("remote rejected value for option {!r}".format(self.name))
return self.state == ACTIVE
# add property to have a similar interface as TelnetOption
active = property(is_ready)
def wait(self, timeout=3):
"""\
Wait until the subnegotiation has been acknowledged or timeout. It
can also throw a value error when the answer from the server does not
match the value sent.
"""
timeout_timer = Timeout(timeout)
while not timeout_timer.expired():
time.sleep(0.05) # prevent 100% CPU load
if self.is_ready():
break
else:
raise SerialException("timeout while waiting for option {!r}".format(self.name))
def check_answer(self, suboption):
"""\
Check an incoming subnegotiation block. The parameter already has
cut off the header like sub option number and com port option value.
"""
if self.value == suboption[:len(self.value)]:
self.state = ACTIVE
else:
# error propagation done in is_ready
self.state = REALLY_INACTIVE
if self.connection.logger:
self.connection.logger.debug("SB Answer {} -> {!r} -> {}".format(self.name, suboption, self.state))
class Serial(SerialBase):
"""Serial port implementation for RFC 2217 remote serial ports."""
BAUDRATES = (50, 75, 110, 134, 150, 200, 300, 600, 1200, 1800, 2400, 4800,
9600, 19200, 38400, 57600, 115200)
def __init__(self, *args, **kwargs):
self._thread = None
self._socket = None
self._linestate = 0
self._modemstate = None
self._modemstate_timeout = Timeout(-1)
self._remote_suspend_flow = False
self._write_lock = None
self.logger = None
self._ignore_set_control_answer = False
self._poll_modem_state = False
self._network_timeout = 3
self._telnet_options = None
self._rfc2217_port_settings = None
self._rfc2217_options = None
self._read_buffer = None
super(Serial, self).__init__(*args, **kwargs) # must be last call in case of auto-open
def open(self):
"""\
Open port with current settings. This may throw a SerialException
if the port cannot be opened.
"""
self.logger = None
self._ignore_set_control_answer = False
self._poll_modem_state = False
self._network_timeout = 3
if self._port is None:
raise SerialException("Port must be configured before it can be used.")
if self.is_open:
raise SerialException("Port is already open.")
try:
self._socket = socket.create_connection(self.from_url(self.portstr), timeout=5) # XXX good value?
self._socket.setsockopt(socket.IPPROTO_TCP, socket.TCP_NODELAY, 1)
except Exception as msg:
self._socket = None
raise SerialException("Could not open port {}: {}".format(self.portstr, msg))
# use a thread save queue as buffer. it also simplifies implementing
# the read timeout
self._read_buffer = Queue.Queue()
# to ensure that user writes does not interfere with internal
# telnet/rfc2217 options establish a lock
self._write_lock = threading.Lock()
# name the following separately so that, below, a check can be easily done
mandadory_options = [
TelnetOption(self, 'we-BINARY', BINARY, WILL, WONT, DO, DONT, INACTIVE),
TelnetOption(self, 'we-RFC2217', COM_PORT_OPTION, WILL, WONT, DO, DONT, REQUESTED),
]
# all supported telnet options
self._telnet_options = [
TelnetOption(self, 'ECHO', ECHO, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'we-SGA', SGA, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'they-SGA', SGA, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'they-BINARY', BINARY, DO, DONT, WILL, WONT, INACTIVE),
TelnetOption(self, 'they-RFC2217', COM_PORT_OPTION, DO, DONT, WILL, WONT, REQUESTED),
] + mandadory_options
# RFC 2217 specific states
# COM port settings
self._rfc2217_port_settings = {
'baudrate': TelnetSubnegotiation(self, 'baudrate', SET_BAUDRATE, SERVER_SET_BAUDRATE),
'datasize': TelnetSubnegotiation(self, 'datasize', SET_DATASIZE, SERVER_SET_DATASIZE),
'parity': TelnetSubnegotiation(self, 'parity', SET_PARITY, SERVER_SET_PARITY),
'stopsize': TelnetSubnegotiation(self, 'stopsize', SET_STOPSIZE, SERVER_SET_STOPSIZE),
}
# There are more subnegotiation objects, combine all in one dictionary
# for easy access
self._rfc2217_options = {
'purge': TelnetSubnegotiation(self, 'purge', PURGE_DATA, SERVER_PURGE_DATA),
'control': TelnetSubnegotiation(self, 'control', SET_CONTROL, SERVER_SET_CONTROL),
}
self._rfc2217_options.update(self._rfc2217_port_settings)
# cache for line and modem states that the server sends to us
self._linestate = 0
self._modemstate = None
self._modemstate_timeout = Timeout(-1)
# RFC 2217 flow control between server and client
self._remote_suspend_flow = False
self.is_open = True
self._thread = threading.Thread(target=self._telnet_read_loop)
self._thread.setDaemon(True)
self._thread.setName('pySerial RFC 2217 reader thread for {}'.format(self._port))
self._thread.start()
try: # must clean-up if open fails
# negotiate Telnet/RFC 2217 -> send initial requests
for option in self._telnet_options:
if option.state is REQUESTED:
self.telnet_send_option(option.send_yes, option.option)
# now wait until important options are negotiated
timeout = Timeout(self._network_timeout)
while not timeout.expired():
time.sleep(0.05) # prevent 100% CPU load
if sum(o.active for o in mandadory_options) == sum(o.state != INACTIVE for o in mandadory_options):
break
else:
raise SerialException(
"Remote does not seem to support RFC2217 or BINARY mode {!r}".format(mandadory_options))
if self.logger:
self.logger.info("Negotiated options: {}".format(self._telnet_options))
# fine, go on, set RFC 2271 specific things
self._reconfigure_port()
# all things set up get, now a clean start
if not self._dsrdtr:
self._update_dtr_state()
if not self._rtscts:
self._update_rts_state()
self.reset_input_buffer()
self.reset_output_buffer()
except:
self.close()
raise
def _reconfigure_port(self):
"""Set communication parameters on opened port."""
if self._socket is None:
raise SerialException("Can only operate on open ports")
# if self._timeout != 0 and self._interCharTimeout is not None:
# XXX
if self._write_timeout is not None:
raise NotImplementedError('write_timeout is currently not supported')
# XXX
# Setup the connection
# to get good performance, all parameter changes are sent first...
if not 0 < self._baudrate < 2 ** 32:
raise ValueError("invalid baudrate: {!r}".format(self._baudrate))
self._rfc2217_port_settings['baudrate'].set(struct.pack(b'!I', self._baudrate))
self._rfc2217_port_settings['datasize'].set(struct.pack(b'!B', self._bytesize))
self._rfc2217_port_settings['parity'].set(struct.pack(b'!B', RFC2217_PARITY_MAP[self._parity]))
self._rfc2217_port_settings['stopsize'].set(struct.pack(b'!B', RFC2217_STOPBIT_MAP[self._stopbits]))
# and now wait until parameters are active
items = self._rfc2217_port_settings.values()
if self.logger:
self.logger.debug("Negotiating settings: {}".format(items))
timeout = Timeout(self._network_timeout)
while not timeout.expired():
time.sleep(0.05) # prevent 100% CPU load
if sum(o.active for o in items) == len(items):
break
else:
raise SerialException("Remote does not accept parameter change (RFC2217): {!r}".format(items))
if self.logger:
self.logger.info("Negotiated settings: {}".format(items))
if self._rtscts and self._xonxoff:
raise ValueError('xonxoff and rtscts together are not supported')
elif self._rtscts:
self.rfc2217_set_control(SET_CONTROL_USE_HW_FLOW_CONTROL)
elif self._xonxoff:
self.rfc2217_set_control(SET_CONTROL_USE_SW_FLOW_CONTROL)
else:
self.rfc2217_set_control(SET_CONTROL_USE_NO_FLOW_CONTROL)
def close(self):
"""Close port"""
self.is_open = False
if self._socket:
try:
self._socket.shutdown(socket.SHUT_RDWR)
self._socket.close()
except:
# ignore errors.
pass
if self._thread:
self._thread.join(7) # XXX more than socket timeout
self._thread = None
# in case of quick reconnects, give the server some time
time.sleep(0.3)
self._socket = None
def from_url(self, url):
"""\
extract host and port from an URL string, other settings are extracted
an stored in instance
"""
parts = urlparse.urlsplit(url)
if parts.scheme != "rfc2217":
raise SerialException(
'expected a string in the form '
'"rfc2217://<host>:<port>[?option[&option...]]": '
'not starting with rfc2217:// ({!r})'.format(parts.scheme))
try:
# process options now, directly altering self
for option, values in urlparse.parse_qs(parts.query, True).items():
if option == 'logging':
logging.basicConfig() # XXX is that good to call it here?
self.logger = logging.getLogger('pySerial.rfc2217')
self.logger.setLevel(LOGGER_LEVELS[values[0]])
self.logger.debug('enabled logging')
elif option == 'ign_set_control':
self._ignore_set_control_answer = True
elif option == 'poll_modem':
self._poll_modem_state = True
elif option == 'timeout':
self._network_timeout = float(values[0])
else:
raise ValueError('unknown option: {!r}'.format(option))
if not 0 <= parts.port < 65536:
raise ValueError("port not in range 0...65535")
except ValueError as e:
raise SerialException(
'expected a string in the form '
'"rfc2217://<host>:<port>[?option[&option...]]": {}'.format(e))
return (parts.hostname, parts.port)
# - - - - - - - - - - - - - - - - - - - - - - - -
@property
def in_waiting(self):
"""Return the number of bytes currently in the input buffer."""
if not self.is_open:
raise portNotOpenError
return self._read_buffer.qsize()
def read(self, size=1):
"""\
Read size bytes from the serial port. If a timeout is set it may
return less characters as requested. With no timeout it will block
until the requested number of bytes is read.
"""
if not self.is_open:
raise portNotOpenError
data = bytearray()
try:
timeout = Timeout(self._timeout)
while len(data) < size:
if self._thread is None:
raise SerialException('connection failed (reader thread died)')
buf = self._read_buffer.get(True, timeout.time_left())
if buf is None:
return bytes(data)
data += buf
if timeout.expired():
break
except Queue.Empty: # -> timeout
pass
return bytes(data)
def write(self, data):
"""\
Output the given byte string over the serial port. Can block if the
connection is blocked. May raise SerialException if the connection is
closed.
"""
if not self.is_open:
raise portNotOpenError
with self._write_lock:
try:
self._socket.sendall(to_bytes(data).replace(IAC, IAC_DOUBLED))
except socket.error as e:
raise SerialException("connection failed (socket error): {}".format(e))
return len(data)
def reset_input_buffer(self):
"""Clear input buffer, discarding all that is in the buffer."""
if not self.is_open:
raise portNotOpenError
self.rfc2217_send_purge(PURGE_RECEIVE_BUFFER)
# empty read buffer
while self._read_buffer.qsize():
self._read_buffer.get(False)
def reset_output_buffer(self):
"""\
Clear output buffer, aborting the current output and
discarding all that is in the buffer.
"""
if not self.is_open:
raise portNotOpenError
self.rfc2217_send_purge(PURGE_TRANSMIT_BUFFER)
def _update_break_state(self):
"""\
Set break: Controls TXD. When active, to transmitting is
possible.
"""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('set BREAK to {}'.format('active' if self._break_state else 'inactive'))
if self._break_state:
self.rfc2217_set_control(SET_CONTROL_BREAK_ON)
else:
self.rfc2217_set_control(SET_CONTROL_BREAK_OFF)
def _update_rts_state(self):
"""Set terminal status line: Request To Send."""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('set RTS to {}'.format('active' if self._rts_state else 'inactive'))
if self._rts_state:
self.rfc2217_set_control(SET_CONTROL_RTS_ON)
else:
self.rfc2217_set_control(SET_CONTROL_RTS_OFF)
def _update_dtr_state(self):
"""Set terminal status line: Data Terminal Ready."""
if not self.is_open:
raise portNotOpenError
if self.logger:
self.logger.info('set DTR to {}'.format('active' if self._dtr_state else 'inactive'))
if self._dtr_state:
self.rfc2217_set_control(SET_CONTROL_DTR_ON)
else:
self.rfc2217_set_control(SET_CONTROL_DTR_OFF)
@property
def cts(self):
"""Read terminal status line: Clear To Send."""
if not self.is_open:
raise portNotOpenError
return bool(self.get_modem_state() & MODEMSTATE_MASK_CTS)
@property
def dsr(self):
"""Read terminal status line: Data Set Ready."""
if not self.is_open:
raise portNotOpenError
return bool(self.get_modem_state() & MODEMSTATE_MASK_DSR)
@property
def ri(self):
"""Read terminal status line: Ring Indicator."""
if not self.is_open:
raise portNotOpenError
return bool(self.get_modem_state() & MODEMSTATE_MASK_RI)
@property
def cd(self):
"""Read terminal status line: Carrier Detect."""
if not self.is_open:
raise portNotOpenError
return bool(self.get_modem_state() & MODEMSTATE_MASK_CD)
@property
def timeout(self):
"""Get the current timeout setting."""
return self._timeout
@timeout.setter
def timeout(self, timeout):
"""Change timeout setting."""
if timeout is not None:
try:
timeout + 1 # test if it's a number, will throw a TypeError if not...
except TypeError:
raise ValueError("Not a valid timeout: {!r}".format(timeout))
if timeout < 0:
raise ValueError("Not a valid timeout: {!r}".format(timeout))
self._timeout = timeout
# - - - platform specific - - -
# None so far
# - - - RFC2217 specific - - -
def _telnet_read_loop(self):
"""Read loop for the socket."""
mode = M_NORMAL
suboption = None
try:
while self.is_open:
try:
data = self._socket.recv(1024)
except socket.timeout:
# just need to get out of recv form time to time to check if
# still alive
continue
except socket.error as e:
# connection fails -> terminate loop
if self.logger:
self.logger.debug("socket error in reader thread: {}".format(e))
self._read_buffer.put(None)
break
if not data:
self._read_buffer.put(None)
break # lost connection
for byte in iterbytes(data):
if mode == M_NORMAL:
# interpret as command or as data
if byte == IAC:
mode = M_IAC_SEEN
else:
# store data in read buffer or sub option buffer
# depending on state
if suboption is not None:
suboption += byte
else:
self._read_buffer.put(byte)
elif mode == M_IAC_SEEN:
if byte == IAC:
# interpret as command doubled -> insert character
# itself
if suboption is not None:
suboption += IAC
else:
self._read_buffer.put(IAC)
mode = M_NORMAL
elif byte == SB:
# sub option start
suboption = bytearray()
mode = M_NORMAL
elif byte == SE:
# sub option end -> process it now
self._telnet_process_subnegotiation(bytes(suboption))
suboption = None
mode = M_NORMAL
elif byte in (DO, DONT, WILL, WONT):
# negotiation
telnet_command = byte
mode = M_NEGOTIATE
else:
# other telnet commands
self._telnet_process_command(byte)
mode = M_NORMAL
elif mode == M_NEGOTIATE: # DO, DONT, WILL, WONT was received, option now following
self._telnet_negotiate_option(telnet_command, byte)
mode = M_NORMAL
finally:
self._thread = None
if self.logger:
self.logger.debug("read thread terminated")
# - incoming telnet commands and options
def _telnet_process_command(self, command):
"""Process commands other than DO, DONT, WILL, WONT."""
# Currently none. RFC2217 only uses negotiation and subnegotiation.
if self.logger:
self.logger.warning("ignoring Telnet command: {!r}".format(command))
def _telnet_negotiate_option(self, command, option):
"""Process incoming DO, DONT, WILL, WONT."""
# check our registered telnet options and forward command to them
# they know themselves if they have to answer or not
known = False
for item in self._telnet_options:
# can have more than one match! as some options are duplicated for
# 'us' and 'them'
if item.option == option:
item.process_incoming(command)
known = True
if not known:
# handle unknown options
# only answer to positive requests and deny them
if command == WILL or command == DO:
self.telnet_send_option((DONT if command == WILL else WONT), option)
if self.logger:
self.logger.warning("rejected Telnet option: {!r}".format(option))
def _telnet_process_subnegotiation(self, suboption):
"""Process subnegotiation, the data between IAC SB and IAC SE."""
if suboption[0:1] == COM_PORT_OPTION:
if suboption[1:2] == SERVER_NOTIFY_LINESTATE and len(suboption) >= 3:
self._linestate = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("NOTIFY_LINESTATE: {}".format(self._linestate))
elif suboption[1:2] == SERVER_NOTIFY_MODEMSTATE and len(suboption) >= 3:
self._modemstate = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("NOTIFY_MODEMSTATE: {}".format(self._modemstate))
# update time when we think that a poll would make sense
self._modemstate_timeout.restart(0.3)
elif suboption[1:2] == FLOWCONTROL_SUSPEND:
self._remote_suspend_flow = True
elif suboption[1:2] == FLOWCONTROL_RESUME:
self._remote_suspend_flow = False
else:
for item in self._rfc2217_options.values():
if item.ack_option == suboption[1:2]:
#~ print "processing COM_PORT_OPTION: %r" % list(suboption[1:])
item.check_answer(bytes(suboption[2:]))
break
else:
if self.logger:
self.logger.warning("ignoring COM_PORT_OPTION: {!r}".format(suboption))
else:
if self.logger:
self.logger.warning("ignoring subnegotiation: {!r}".format(suboption))
# - outgoing telnet commands and options
def _internal_raw_write(self, data):
"""internal socket write with no data escaping. used to send telnet stuff."""
with self._write_lock:
self._socket.sendall(data)
def telnet_send_option(self, action, option):
"""Send DO, DONT, WILL, WONT."""
self._internal_raw_write(IAC + action + option)
def rfc2217_send_subnegotiation(self, option, value=b''):
"""Subnegotiation of RFC2217 parameters."""
value = value.replace(IAC, IAC_DOUBLED)
self._internal_raw_write(IAC + SB + COM_PORT_OPTION + option + value + IAC + SE)
def rfc2217_send_purge(self, value):
"""\
Send purge request to the remote.
(PURGE_RECEIVE_BUFFER / PURGE_TRANSMIT_BUFFER / PURGE_BOTH_BUFFERS)
"""
item = self._rfc2217_options['purge']
item.set(value) # transmit desired purge type
item.wait(self._network_timeout) # wait for acknowledge from the server
def rfc2217_set_control(self, value):
"""transmit change of control line to remote"""
item = self._rfc2217_options['control']
item.set(value) # transmit desired control type
if self._ignore_set_control_answer:
# answers are ignored when option is set. compatibility mode for
# servers that answer, but not the expected one... (or no answer
# at all) i.e. sredird
time.sleep(0.1) # this helps getting the unit tests passed
else:
item.wait(self._network_timeout) # wait for acknowledge from the server
def rfc2217_flow_server_ready(self):
"""\
check if server is ready to receive data. block for some time when
not.
"""
#~ if self._remote_suspend_flow:
#~ wait---
def get_modem_state(self):
"""\
get last modem state (cached value. If value is "old", request a new
one. This cache helps that we don't issue to many requests when e.g. all
status lines, one after the other is queried by the user (CTS, DSR
etc.)
"""
# active modem state polling enabled? is the value fresh enough?
if self._poll_modem_state and self._modemstate_timeout.expired():
if self.logger:
self.logger.debug('polling modem state')
# when it is older, request an update
self.rfc2217_send_subnegotiation(NOTIFY_MODEMSTATE)
timeout = Timeout(self._network_timeout)
while not timeout.expired():
time.sleep(0.05) # prevent 100% CPU load
# when expiration time is updated, it means that there is a new
# value
if not self._modemstate_timeout.expired():
break
else:
if self.logger:
self.logger.warning('poll for modem state failed')
# even when there is a timeout, do not generate an error just
# return the last known value. this way we can support buggy
# servers that do not respond to polls, but send automatic
# updates.
if self._modemstate is not None:
if self.logger:
self.logger.debug('using cached modem state')
return self._modemstate
else:
# never received a notification from the server
raise SerialException("remote sends no NOTIFY_MODEMSTATE")
#############################################################################
# The following is code that helps implementing an RFC 2217 server.
class PortManager(object):
"""\
This class manages the state of Telnet and RFC 2217. It needs a serial
instance and a connection to work with. Connection is expected to implement
a (thread safe) write function, that writes the string to the network.
"""
def __init__(self, serial_port, connection, logger=None):
self.serial = serial_port
self.connection = connection
self.logger = logger
self._client_is_rfc2217 = False
# filter state machine
self.mode = M_NORMAL
self.suboption = None
self.telnet_command = None
# states for modem/line control events
self.modemstate_mask = 255
self.last_modemstate = None
self.linstate_mask = 0
# all supported telnet options
self._telnet_options = [
TelnetOption(self, 'ECHO', ECHO, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'we-SGA', SGA, WILL, WONT, DO, DONT, REQUESTED),
TelnetOption(self, 'they-SGA', SGA, DO, DONT, WILL, WONT, INACTIVE),
TelnetOption(self, 'we-BINARY', BINARY, WILL, WONT, DO, DONT, INACTIVE),
TelnetOption(self, 'they-BINARY', BINARY, DO, DONT, WILL, WONT, REQUESTED),
TelnetOption(self, 'we-RFC2217', COM_PORT_OPTION, WILL, WONT, DO, DONT, REQUESTED, self._client_ok),
TelnetOption(self, 'they-RFC2217', COM_PORT_OPTION, DO, DONT, WILL, WONT, INACTIVE, self._client_ok),
]
# negotiate Telnet/RFC2217 -> send initial requests
if self.logger:
self.logger.debug("requesting initial Telnet/RFC 2217 options")
for option in self._telnet_options:
if option.state is REQUESTED:
self.telnet_send_option(option.send_yes, option.option)
# issue 1st modem state notification
def _client_ok(self):
"""\
callback of telnet option. It gets called when option is activated.
This one here is used to detect when the client agrees on RFC 2217. A
flag is set so that other functions like check_modem_lines know if the
client is OK.
"""
# The callback is used for we and they so if one party agrees, we're
# already happy. it seems not all servers do the negotiation correctly
# and i guess there are incorrect clients too.. so be happy if client
# answers one or the other positively.
self._client_is_rfc2217 = True
if self.logger:
self.logger.info("client accepts RFC 2217")
# this is to ensure that the client gets a notification, even if there
# was no change
self.check_modem_lines(force_notification=True)
# - outgoing telnet commands and options
def telnet_send_option(self, action, option):
"""Send DO, DONT, WILL, WONT."""
self.connection.write(IAC + action + option)
def rfc2217_send_subnegotiation(self, option, value=b''):
"""Subnegotiation of RFC 2217 parameters."""
value = value.replace(IAC, IAC_DOUBLED)
self.connection.write(IAC + SB + COM_PORT_OPTION + option + value + IAC + SE)
# - check modem lines, needs to be called periodically from user to
# establish polling
def check_modem_lines(self, force_notification=False):
"""\
read control lines from serial port and compare the last value sent to remote.
send updates on changes.
"""
modemstate = (
(self.serial.cts and MODEMSTATE_MASK_CTS) |
(self.serial.dsr and MODEMSTATE_MASK_DSR) |
(self.serial.ri and MODEMSTATE_MASK_RI) |
(self.serial.cd and MODEMSTATE_MASK_CD))
# check what has changed
deltas = modemstate ^ (self.last_modemstate or 0) # when last is None -> 0
if deltas & MODEMSTATE_MASK_CTS:
modemstate |= MODEMSTATE_MASK_CTS_CHANGE
if deltas & MODEMSTATE_MASK_DSR:
modemstate |= MODEMSTATE_MASK_DSR_CHANGE
if deltas & MODEMSTATE_MASK_RI:
modemstate |= MODEMSTATE_MASK_RI_CHANGE
if deltas & MODEMSTATE_MASK_CD:
modemstate |= MODEMSTATE_MASK_CD_CHANGE
# if new state is different and the mask allows this change, send
# notification. suppress notifications when client is not rfc2217
if modemstate != self.last_modemstate or force_notification:
if (self._client_is_rfc2217 and (modemstate & self.modemstate_mask)) or force_notification:
self.rfc2217_send_subnegotiation(
SERVER_NOTIFY_MODEMSTATE,
to_bytes([modemstate & self.modemstate_mask]))
if self.logger:
self.logger.info("NOTIFY_MODEMSTATE: {}".format(modemstate))
# save last state, but forget about deltas.
# otherwise it would also notify about changing deltas which is
# probably not very useful
self.last_modemstate = modemstate & 0xf0
# - outgoing data escaping
def escape(self, data):
"""\
This generator function is for the user. All outgoing data has to be
properly escaped, so that no IAC character in the data stream messes up
the Telnet state machine in the server.
socket.sendall(escape(data))
"""
for byte in iterbytes(data):
if byte == IAC:
yield IAC
yield IAC
else:
yield byte
# - incoming data filter
def filter(self, data):
"""\
Handle a bunch of incoming bytes. This is a generator. It will yield
all characters not of interest for Telnet/RFC 2217.
The idea is that the reader thread pushes data from the socket through
this filter:
for byte in filter(socket.recv(1024)):
# do things like CR/LF conversion/whatever
# and write data to the serial port
serial.write(byte)
(socket error handling code left as exercise for the reader)
"""
for byte in iterbytes(data):
if self.mode == M_NORMAL:
# interpret as command or as data
if byte == IAC:
self.mode = M_IAC_SEEN
else:
# store data in sub option buffer or pass it to our
# consumer depending on state
if self.suboption is not None:
self.suboption += byte
else:
yield byte
elif self.mode == M_IAC_SEEN:
if byte == IAC:
# interpret as command doubled -> insert character
# itself
if self.suboption is not None:
self.suboption += byte
else:
yield byte
self.mode = M_NORMAL
elif byte == SB:
# sub option start
self.suboption = bytearray()
self.mode = M_NORMAL
elif byte == SE:
# sub option end -> process it now
self._telnet_process_subnegotiation(bytes(self.suboption))
self.suboption = None
self.mode = M_NORMAL
elif byte in (DO, DONT, WILL, WONT):
# negotiation
self.telnet_command = byte
self.mode = M_NEGOTIATE
else:
# other telnet commands
self._telnet_process_command(byte)
self.mode = M_NORMAL
elif self.mode == M_NEGOTIATE: # DO, DONT, WILL, WONT was received, option now following
self._telnet_negotiate_option(self.telnet_command, byte)
self.mode = M_NORMAL
# - incoming telnet commands and options
def _telnet_process_command(self, command):
"""Process commands other than DO, DONT, WILL, WONT."""
# Currently none. RFC2217 only uses negotiation and subnegotiation.
if self.logger:
self.logger.warning("ignoring Telnet command: {!r}".format(command))
def _telnet_negotiate_option(self, command, option):
"""Process incoming DO, DONT, WILL, WONT."""
# check our registered telnet options and forward command to them
# they know themselves if they have to answer or not
known = False
for item in self._telnet_options:
# can have more than one match! as some options are duplicated for
# 'us' and 'them'
if item.option == option:
item.process_incoming(command)
known = True
if not known:
# handle unknown options
# only answer to positive requests and deny them
if command == WILL or command == DO:
self.telnet_send_option((DONT if command == WILL else WONT), option)
if self.logger:
self.logger.warning("rejected Telnet option: {!r}".format(option))
def _telnet_process_subnegotiation(self, suboption):
"""Process subnegotiation, the data between IAC SB and IAC SE."""
if suboption[0:1] == COM_PORT_OPTION:
if self.logger:
self.logger.debug('received COM_PORT_OPTION: {!r}'.format(suboption))
if suboption[1:2] == SET_BAUDRATE:
backup = self.serial.baudrate
try:
(baudrate,) = struct.unpack(b"!I", suboption[2:6])
if baudrate != 0:
self.serial.baudrate = baudrate
except ValueError as e:
if self.logger:
self.logger.error("failed to set baud rate: {}".format(e))
self.serial.baudrate = backup
else:
if self.logger:
self.logger.info("{} baud rate: {}".format('set' if baudrate else 'get', self.serial.baudrate))
self.rfc2217_send_subnegotiation(SERVER_SET_BAUDRATE, struct.pack(b"!I", self.serial.baudrate))
elif suboption[1:2] == SET_DATASIZE:
backup = self.serial.bytesize
try:
(datasize,) = struct.unpack(b"!B", suboption[2:3])
if datasize != 0:
self.serial.bytesize = datasize
except ValueError as e:
if self.logger:
self.logger.error("failed to set data size: {}".format(e))
self.serial.bytesize = backup
else:
if self.logger:
self.logger.info("{} data size: {}".format('set' if datasize else 'get', self.serial.bytesize))
self.rfc2217_send_subnegotiation(SERVER_SET_DATASIZE, struct.pack(b"!B", self.serial.bytesize))
elif suboption[1:2] == SET_PARITY:
backup = self.serial.parity
try:
parity = struct.unpack(b"!B", suboption[2:3])[0]
if parity != 0:
self.serial.parity = RFC2217_REVERSE_PARITY_MAP[parity]
except ValueError as e:
if self.logger:
self.logger.error("failed to set parity: {}".format(e))
self.serial.parity = backup
else:
if self.logger:
self.logger.info("{} parity: {}".format('set' if parity else 'get', self.serial.parity))
self.rfc2217_send_subnegotiation(
SERVER_SET_PARITY,
struct.pack(b"!B", RFC2217_PARITY_MAP[self.serial.parity]))
elif suboption[1:2] == SET_STOPSIZE:
backup = self.serial.stopbits
try:
stopbits = struct.unpack(b"!B", suboption[2:3])[0]
if stopbits != 0:
self.serial.stopbits = RFC2217_REVERSE_STOPBIT_MAP[stopbits]
except ValueError as e:
if self.logger:
self.logger.error("failed to set stop bits: {}".format(e))
self.serial.stopbits = backup
else:
if self.logger:
self.logger.info("{} stop bits: {}".format('set' if stopbits else 'get', self.serial.stopbits))
self.rfc2217_send_subnegotiation(
SERVER_SET_STOPSIZE,
struct.pack(b"!B", RFC2217_STOPBIT_MAP[self.serial.stopbits]))
elif suboption[1:2] == SET_CONTROL:
if suboption[2:3] == SET_CONTROL_REQ_FLOW_SETTING:
if self.serial.xonxoff:
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_SW_FLOW_CONTROL)
elif self.serial.rtscts:
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_HW_FLOW_CONTROL)
else:
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_NO_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_NO_FLOW_CONTROL:
self.serial.xonxoff = False
self.serial.rtscts = False
if self.logger:
self.logger.info("changed flow control to None")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_NO_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_SW_FLOW_CONTROL:
self.serial.xonxoff = True
if self.logger:
self.logger.info("changed flow control to XON/XOFF")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_SW_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_USE_HW_FLOW_CONTROL:
self.serial.rtscts = True
if self.logger:
self.logger.info("changed flow control to RTS/CTS")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_USE_HW_FLOW_CONTROL)
elif suboption[2:3] == SET_CONTROL_REQ_BREAK_STATE:
if self.logger:
self.logger.warning("requested break state - not implemented")
pass # XXX needs cached value
elif suboption[2:3] == SET_CONTROL_BREAK_ON:
self.serial.break_condition = True
if self.logger:
self.logger.info("changed BREAK to active")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_BREAK_ON)
elif suboption[2:3] == SET_CONTROL_BREAK_OFF:
self.serial.break_condition = False
if self.logger:
self.logger.info("changed BREAK to inactive")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_BREAK_OFF)
elif suboption[2:3] == SET_CONTROL_REQ_DTR:
if self.logger:
self.logger.warning("requested DTR state - not implemented")
pass # XXX needs cached value
elif suboption[2:3] == SET_CONTROL_DTR_ON:
self.serial.dtr = True
if self.logger:
self.logger.info("changed DTR to active")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_DTR_ON)
elif suboption[2:3] == SET_CONTROL_DTR_OFF:
self.serial.dtr = False
if self.logger:
self.logger.info("changed DTR to inactive")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_DTR_OFF)
elif suboption[2:3] == SET_CONTROL_REQ_RTS:
if self.logger:
self.logger.warning("requested RTS state - not implemented")
pass # XXX needs cached value
#~ self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_ON)
elif suboption[2:3] == SET_CONTROL_RTS_ON:
self.serial.rts = True
if self.logger:
self.logger.info("changed RTS to active")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_ON)
elif suboption[2:3] == SET_CONTROL_RTS_OFF:
self.serial.rts = False
if self.logger:
self.logger.info("changed RTS to inactive")
self.rfc2217_send_subnegotiation(SERVER_SET_CONTROL, SET_CONTROL_RTS_OFF)
#~ elif suboption[2:3] == SET_CONTROL_REQ_FLOW_SETTING_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_NO_FLOW_CONTROL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_SW_FLOW_CONTOL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_HW_FLOW_CONTOL_IN:
#~ elif suboption[2:3] == SET_CONTROL_USE_DCD_FLOW_CONTROL:
#~ elif suboption[2:3] == SET_CONTROL_USE_DTR_FLOW_CONTROL:
#~ elif suboption[2:3] == SET_CONTROL_USE_DSR_FLOW_CONTROL:
elif suboption[1:2] == NOTIFY_LINESTATE:
# client polls for current state
self.rfc2217_send_subnegotiation(
SERVER_NOTIFY_LINESTATE,
to_bytes([0])) # sorry, nothing like that implemented
elif suboption[1:2] == NOTIFY_MODEMSTATE:
if self.logger:
self.logger.info("request for modem state")
# client polls for current state
self.check_modem_lines(force_notification=True)
elif suboption[1:2] == FLOWCONTROL_SUSPEND:
if self.logger:
self.logger.info("suspend")
self._remote_suspend_flow = True
elif suboption[1:2] == FLOWCONTROL_RESUME:
if self.logger:
self.logger.info("resume")
self._remote_suspend_flow = False
elif suboption[1:2] == SET_LINESTATE_MASK:
self.linstate_mask = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("line state mask: 0x{:02x}".format(self.linstate_mask))
elif suboption[1:2] == SET_MODEMSTATE_MASK:
self.modemstate_mask = ord(suboption[2:3]) # ensure it is a number
if self.logger:
self.logger.info("modem state mask: 0x{:02x}".format(self.modemstate_mask))
elif suboption[1:2] == PURGE_DATA:
if suboption[2:3] == PURGE_RECEIVE_BUFFER:
self.serial.reset_input_buffer()
if self.logger:
self.logger.info("purge in")
self.rfc2217_send_subnegotiation(SERVER_PURGE_DATA, PURGE_RECEIVE_BUFFER)
elif suboption[2:3] == PURGE_TRANSMIT_BUFFER:
self.serial.reset_output_buffer()
if self.logger:
self.logger.info("purge out")
self.rfc2217_send_subnegotiation(SERVER_PURGE_DATA, PURGE_TRANSMIT_BUFFER)
elif suboption[2:3] == PURGE_BOTH_BUFFERS:
self.serial.reset_input_buffer()
self.serial.reset_output_buffer()
if self.logger:
self.logger.info("purge both")
self.rfc2217_send_subnegotiation(SERVER_PURGE_DATA, PURGE_BOTH_BUFFERS)
else:
if self.logger:
self.logger.error("undefined PURGE_DATA: {!r}".format(list(suboption[2:])))
else:
if self.logger:
self.logger.error("undefined COM_PORT_OPTION: {!r}".format(list(suboption[1:])))
else:
if self.logger:
self.logger.warning("unknown subnegotiation: {!r}".format(suboption))
# simple client test
if __name__ == '__main__':
import sys
s = Serial('rfc2217://localhost:7000', 115200)
sys.stdout.write('{}\n'.format(s))
sys.stdout.write("write...\n")
s.write(b"hello\n")
s.flush()
sys.stdout.write("read: {}\n".format(s.read(5)))
s.close()
|
py | b41710f3b16b136d8329bf440cddbe4e95cdeb03 | """
Module with network code
"""
import warnings
import numpy as np
import random
import json
import os
import collections
import configobj
import shelve
import net.utilities
warnings.filterwarnings('error')
class NetHyperparameters:
"""
A very simple structure bundling together net hyperparameters
"""
def __init__(self, epochs, learning_rate, regularization_coefficient, batch_size):
self.epochs = epochs
self.learning_rate = learning_rate
self.regularization_coefficient = regularization_coefficient
self.batch_size = batch_size
class Net:
"""
A simple neural network
"""
def __init__(self, layers):
self.layers = layers
self.biases = [np.zeros(shape=[nodes_out, 1], dtype=np.float32) for nodes_out in layers[1:]]
self.weights = []
for index, (nodes_in, nodes_out) in enumerate(zip(layers[:-1], layers[1:])):
standard_deviation = np.sqrt(2 / nodes_in)
weights = np.random.normal(0, standard_deviation, size=[nodes_out, nodes_in])
self.weights.append(weights)
@staticmethod
def from_file(path):
"""
Constructor for loading a net from a file
:param path:
:return:
"""
with open(path, "r") as file:
data = json.load(file)
network = Net(data["layers"])
network.weights = [np.array(w) for w in data["weights"]]
network.biases = [np.array(b) for b in data["biases"]]
return network
def feedforward(self, x):
_, activations = self.verbose_feedforward(x)
return activations[-1]
def verbose_feedforward(self, x):
"""
Feedforward that eturns a list of preactivations and activations
:param x: input
:return: a tuple (preactivations list, activations list)
"""
zs = []
activations = [x]
a = x
for w, b in zip(self.weights, self.biases):
z = np.dot(w, a) + b
zs.append(z)
a = net.utilities.relu(z)
activations.append(a)
# Use softmax output
activations[-1] = net.utilities.softmax(zs[-1])
return zs, activations
def get_output_layer_error(self, y, prediction):
"""
Get output layer error for cross entropy cost
:param y:
:param prediction:
:return:
"""
return prediction - y
def get_accuracy(self, data):
is_correct = []
for x, y in data:
prediction = self.feedforward(x)
prediction_saturated = np.zeros(prediction.shape)
prediction_saturated[np.argmax(prediction)] = 1
is_correct.append(int(np.all(prediction_saturated == y)))
return np.sum(is_correct) / len(is_correct)
def save(self, output_path):
os.makedirs(os.path.dirname(output_path), exist_ok=True)
data = {
"layers": self.layers,
"weights": [w.tolist() for w in self.weights],
"biases": [b.tolist() for b in self.biases]
}
with open(output_path, "w") as file:
json.dump(data, file)
def get_prediction_error_cost(self, x, y):
"""
Given an input x and correct label y, return
cost of prediction network makes about x
:param x: matrix with columns containing input vectors
:param y: column vector of correct labels
:return: scalar cost of average prediction error
"""
predictions = self.feedforward(x)
indices = np.argmax(y, axis=0)
labels_predictions = predictions[indices, range(len(indices))]
cost = np.mean(-np.log(labels_predictions + 1e-10))
return cost
class Trainer:
"""
Class training a network
"""
def __init__(self, hyperparameters):
self.hyperparameters = hyperparameters
def train(self, network, data, test_data, output_path):
logger = Logger(network, self, data)
best_accuracy = 0
for epoch in range(self.hyperparameters.epochs):
random.shuffle(data)
if epoch % 1 == 0:
print("Epoch {}".format(epoch))
accuracy = network.get_accuracy(test_data)
print(accuracy)
logger.log_training_progress(epoch, accuracy)
if best_accuracy < accuracy:
best_accuracy = accuracy
network.save(output_path)
if epoch % 20 == 0:
self.hyperparameters.learning_rate *= 0.25
batched_data = net.utilities.get_data_batches(data, self.hyperparameters.batch_size)
for batch in batched_data:
x_batch, y_batch = net.utilities.data_tuples_to_matrices(batch)
self._update(network, x_batch, y_batch, self.hyperparameters.learning_rate)
def _update(self, network, x, y, learning_rate):
zs, activations = network.verbose_feedforward(x)
bias_gradients = [None] * len(network.biases)
weights_gradients = [None] * len(network.weights)
error = network.get_output_layer_error(y, activations[-1])
bias_gradients[-1] = np.mean(error, axis=1).reshape(error.shape[0], 1)
weights_gradients[-1] = np.dot(error, activations[-2].T) / self.hyperparameters.batch_size
indices = range(len(network.weights) - 2, -1, -1)
for index in indices:
error = np.dot(network.weights[index + 1].T, error) * net.utilities.relu_prime(zs[index])
bias_gradients[index] = np.mean(error, axis=1).reshape(error.shape[0], 1)
regularization_derivative = \
self.hyperparameters.regularization_coefficient * network.weights[index] /\
self.hyperparameters.batch_size
weights_gradients[index] = \
(np.dot(error, activations[index].T) / self.hyperparameters.batch_size) + \
regularization_derivative
network.weights = [w - (learning_rate * w_grad)
for w, w_grad in zip(network.weights, weights_gradients)]
network.biases = [b - (learning_rate * b_grad)
for b, b_grad in zip(network.biases, bias_gradients)]
def get_regularization_cost(self, network):
"""
Return average regularization cost per training element for the network
:param network:
:return:
"""
squared_weights_sum = np.sum([np.sum(np.square(w)) for w in network.weights])
cost = self.hyperparameters.regularization_coefficient * squared_weights_sum / \
self.hyperparameters.batch_size
return cost
class Debugger:
"""
Class for debugging networks.
In particular it offers insight into what classification mistakes network does
"""
def __init__(self, network, encoder):
self.network = network
self.encoder = encoder
def get_mistakes(self, data, mininmum_count=1):
mistakes_list_dictionary = self._get_mistakes_list_dictionary(data)
mistakes_counting_dictionary = {}
for true_label, wrong_labels in mistakes_list_dictionary.items():
mistakes_counter = collections.Counter(wrong_labels)
filtered_mistakes_counter = {
wrong_label: count for wrong_label, count in mistakes_counter.items()
if count >= mininmum_count}
mistakes_counting_dictionary[true_label] = filtered_mistakes_counter
return mistakes_counting_dictionary
def _get_mistakes_list_dictionary(self, data):
mistakes_list_dictionary = collections.defaultdict(list)
for x, y in data:
prediction = self.network.feedforward(x)
correct_label = self.encoder.decode(y)
predicted_label = self.encoder.decode(prediction)
if correct_label != predicted_label:
mistakes_list_dictionary[correct_label].append(predicted_label)
return mistakes_list_dictionary
class Logger:
"""
Class for logging performance of a network as it is being trained
"""
def __init__(self, network, trainer, data):
self.network = network
self.trainer = trainer
batched_data = net.utilities.get_data_batches(data, trainer.hyperparameters.batch_size)
self.x_batches = []
self.y_batches = []
for batch in batched_data:
x_batch, y_batch = net.utilities.data_tuples_to_matrices(batch)
self.x_batches.append(x_batch)
self.y_batches.append(y_batch)
network_parameters = {
'topology': [l for l in self.network.layers]
}
hyperparameters = {
'initial_learning_rate': trainer.hyperparameters.learning_rate,
'regularization_coefficient': trainer.hyperparameters.regularization_coefficient,
'batch_size': trainer.hyperparameters.batch_size
}
self.database_path = configobj.ConfigObj('configuration.ini')['database_path']
if os.path.exists(self.database_path):
os.remove(self.database_path)
shelf = shelve.open(self.database_path, writeback=True)
shelf['network_parameters'] = network_parameters
shelf['hyperparameters'] = hyperparameters
shelf['training'] = {}
shelf.sync()
shelf.close()
def log_training_progress(self, epoch, accuracy):
weights_percentiles = [np.percentile(w, [0, 25, 50, 75, 100]) for w in self.network.weights]
error_cost = np.mean([self.network.get_prediction_error_cost(x_batch, y_batch)
for x_batch, y_batch in zip(self.x_batches, self.y_batches)])
regularization_cost = self.trainer.get_regularization_cost(self.network)
epoch_summary = {
'accuracy': accuracy,
'weights_percentiles': weights_percentiles,
'error_cost': error_cost,
'regularization_cost': regularization_cost
}
shelf = shelve.open(self.database_path, writeback=True)
training = shelf['training']
training[epoch] = epoch_summary
shelf.sync()
shelf.close()
|
py | b4171178069f9a9c6f8406df04763c12f3df88d2 | # -*- coding: utf-8 -*-
"""
Tencent is pleased to support the open source community by making 蓝鲸智云(BlueKing) available.
Copyright (C) 2017 THL A29 Limited, a Tencent company. All rights reserved.
Licensed under the MIT License (the "License"); you may not use this file except in compliance with the License.
You may obtain a copy of the License at http://opensource.org/licenses/MIT
Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on
an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and limitations under the License.
用于本地开发环境的全局配置
"""
from settings import APP_ID
# ===============================================================================
# 数据库设置, 本地开发数据库设置
# ===============================================================================
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.mysql', # 默认用mysql
'NAME': APP_ID, # 数据库名 (默认与APP_ID相同)
'USER': 'root', # 你的数据库user
'PASSWORD': 'szh0823', # 你的数据库password
'HOST': '127.0.0.1', # 开发的时候,使用localhost
'PORT': '3306', # 默认3306
},
}
|
py | b41711c1ea233b504c40bce79d0e204c6c279ffe | # -*- coding: utf-8 -*-
"""
Created on Wed Mar 20 22:33:50 2019
@author: Titus
"""
import numpy as np
import shapefile
from matplotlib.path import Path
import matplotlib.pyplot as plt
from matplotlib import patches
import pandas as pd
shpFilePath = r"C:\Users\tq220\Documents\Tits things\2018-2019\Data Science\Final-project-data\Data\Roosevelt Hot Springs FORGE Site Outline\FORGE_Outline.shp"
listx=[]
listy=[]
test = shapefile.Reader(shpFilePath)
for sr in test.shapeRecords():
for xNew,yNew in sr.shape.points:
listx.append(xNew)
listy.append(yNew)
xlist=np.array(listx)
ylist=np.array(listy)
tupVerts=[]
for ind,xval in enumerate(xlist):
tupVerts.append((xval,ylist[ind]))
x, y = np.meshgrid(np.linspace(min(xlist),max(xlist),1000), np.linspace(min(ylist),max(ylist),1000)) # make a canvas with coordinates
x, y = x.flatten(), y.flatten()
#points = np.vstack((x,y)).T
points=np.array([[333594,4264000]])
p = Path(tupVerts) # make a polygon
grid = p.contains_points(points)
#mask = grid.reshape(1000,1000) # now you have a mask with points inside a polygon
#
#grid=np.array(grid)
#grid=grid.astype(int)
#
#forge_classifier=pd.DataFrame({'x':points[:,0],'y':points[:,1],'in':grid})
#forge_classifier.to_csv('forge_classifier.csv',index=False)
#fig = plt.figure()
#ax = fig.add_subplot(111)
#patch = patches.PathPatch(p, facecolor='orange', lw=2)
#ax.add_patch(patch)
#ax.set_xlim(min(xlist),max(xlist))
#ax.set_ylim(min(ylist),max(ylist))
#plt.show()
#plt.figure()
#plt.scatter(points[:,0],points[:,1],c=grid)
#plt.show() |
py | b417126172e36cfb3563cb92c4e90913c1a97d6d | import ecc_constants
import sike_core_utils
def generate_fpga_constants(prime, elliptic_curve_order, elliptic_curve_const_a, elliptic_curve_const_b, elliptic_curve_generator_point_x, elliptic_curve_generator_point_y, base_word_size, extended_word_size, number_of_bits_added):
ecc_fpga_constants = [0 for i in range(26)]
accumulator_word_size=2*extended_word_size+32
prime_size_bits = int(prime).bit_length()
arithmetic_parameters = sike_core_utils.generate_arithmetic_parameters(base_word_size, extended_word_size, prime_size_bits, number_of_bits_added, accumulator_word_size, prime)
prime_plus_one = arithmetic_parameters[5]
prime_plus_one_list = arithmetic_parameters[6]
prime_line = arithmetic_parameters[17]
prime2 = arithmetic_parameters[24]
r_mod_prime = arithmetic_parameters[12]
r2_mod_prime = arithmetic_parameters[14]
number_of_words = arithmetic_parameters[9]
prime_line_equal_one = arithmetic_parameters[19]
prime_plus_one_number_of_zeroes = 0
for i in range(0, number_of_words):
if(prime_plus_one_list[i] != 0):
break
prime_plus_one_number_of_zeroes = prime_plus_one_number_of_zeroes + 1
if((extended_word_size == 256) and (prime_plus_one_number_of_zeroes > 1)):
prime_plus_one_number_of_zeroes = 1
if((extended_word_size == 128) and (prime_plus_one_number_of_zeroes > 3)):
prime_plus_one_number_of_zeroes = 3
ecc_fpga_constants[0] = "p" + str(prime_size_bits)
ecc_fpga_constants[1] = base_word_size
ecc_fpga_constants[2] = extended_word_size
ecc_fpga_constants[3] = number_of_bits_added
ecc_fpga_constants[4] = number_of_words
ecc_fpga_constants[5] = prime
ecc_fpga_constants[6] = prime_size_bits
ecc_fpga_constants[7] = prime_plus_one
ecc_fpga_constants[8] = prime_line
ecc_fpga_constants[9] = prime_plus_one_number_of_zeroes
ecc_fpga_constants[10] = prime2
ecc_fpga_constants[11] = r_mod_prime
ecc_fpga_constants[12] = r2_mod_prime
ecc_fpga_constants[13] = int(1)
ecc_fpga_constants[14] = elliptic_curve_order
ecc_fpga_constants[15] = int(elliptic_curve_order).bit_length()
const_a = elliptic_curve_const_a
const_a2 = (elliptic_curve_const_a*elliptic_curve_const_a) % prime
const_b3 = (elliptic_curve_const_b*3) % prime
ecc_fpga_constants[16] = sike_core_utils.enter_montgomery_domain(arithmetic_parameters, const_a)
ecc_fpga_constants[17] = sike_core_utils.enter_montgomery_domain(arithmetic_parameters, const_a2)
ecc_fpga_constants[18] = sike_core_utils.enter_montgomery_domain(arithmetic_parameters, const_b3)
ecc_fpga_constants[19] = const_a
ecc_fpga_constants[20] = const_a2
ecc_fpga_constants[21] = const_b3
ecc_fpga_constants[22] = elliptic_curve_generator_point_x
ecc_fpga_constants[23] = elliptic_curve_generator_point_y
ecc_fpga_constants[24] = 1
ecc_fpga_constants[25] = 224*(1024//extended_word_size)
return ecc_fpga_constants
def print_fpga_constants(filename_constants, ecc_fpga_constants_all_parameters):
with open(filename_constants, 'w') as file_constants:
file_constants.write(filename_constants[0:-3] + ' = [\n')
for ecc_fpga_constants in ecc_fpga_constants_all_parameters:
file_constants.write('[\n')
file_constants.write('# ' + ' Parameter name\n')
file_constants.write('"' + ecc_fpga_constants[0] + '"' + ',\n')
file_constants.write('# ' + ' base word size\n')
file_constants.write(str(ecc_fpga_constants[1]) + ',\n')
file_constants.write('# ' + ' extended word size\n')
file_constants.write(str(ecc_fpga_constants[2]) + ',\n')
file_constants.write('# ' + ' number of bits added\n')
file_constants.write(str(ecc_fpga_constants[3]) + ',\n')
file_constants.write('# ' + ' number of words\n')
file_constants.write(str(ecc_fpga_constants[4]) + ',\n')
file_constants.write('# ' + ' prime\n')
file_constants.write(str(ecc_fpga_constants[5]) + ',\n')
file_constants.write('# ' + ' prime size in bits\n')
file_constants.write(str(ecc_fpga_constants[6]) + ',\n')
file_constants.write('# ' + ' prime+1\n')
file_constants.write(str(ecc_fpga_constants[7]) + ',\n')
file_constants.write('# ' + " prime' = -1/prime mod r\n")
file_constants.write(str(ecc_fpga_constants[8]) + ',\n')
file_constants.write('# ' + ' prime plus one number of zeroes\n')
file_constants.write(str(ecc_fpga_constants[9]) + ',\n')
file_constants.write('# ' + ' 2*prime\n')
file_constants.write(str(ecc_fpga_constants[10]) + ',\n')
file_constants.write('# ' + ' r mod prime\n')
file_constants.write(str(ecc_fpga_constants[11]) + ',\n')
file_constants.write('# ' + ' r^2 mod prime\n')
file_constants.write(str(ecc_fpga_constants[12]) + ',\n')
file_constants.write('# ' + ' value 1\n')
file_constants.write(str(ecc_fpga_constants[13]) + ',\n')
file_constants.write('# ' + ' ECC curve order\n')
file_constants.write(str(ecc_fpga_constants[14]) + ',\n')
file_constants.write('# ' + ' ECC curve order bit length\n')
file_constants.write(str(ecc_fpga_constants[15]) + ',\n')
file_constants.write('# ' + ' ECC curve constant a in Montgomery domain (*r mod prime)\n')
file_constants.write(str(ecc_fpga_constants[16]) + ',\n')
file_constants.write('# ' + ' ECC curve constant a^2 in Montgomery domain (*r mod prime) \n')
file_constants.write(str(ecc_fpga_constants[17]) + ',\n')
file_constants.write('# ' + ' ECC curve constant 3*b in Montgomery domain (*r mod prime) \n')
file_constants.write(str(ecc_fpga_constants[18]) + ',\n')
file_constants.write('# ' + ' ECC curve constant a original\n')
file_constants.write(str(ecc_fpga_constants[19]) + ',\n')
file_constants.write('# ' + ' ECC curve constant a^2 original\n')
file_constants.write(str(ecc_fpga_constants[20]) + ',\n')
file_constants.write('# ' + ' ECC curve constant 3*b original\n')
file_constants.write(str(ecc_fpga_constants[21]) + ',\n')
file_constants.write('# ' + ' ECC curve generator point x original\n')
file_constants.write(str(ecc_fpga_constants[22]) + ',\n')
file_constants.write('# ' + ' ECC curve generator point y original\n')
file_constants.write(str(ecc_fpga_constants[23]) + ',\n')
file_constants.write('# ' + ' ECC curve generator point z original\n')
file_constants.write(str(ecc_fpga_constants[24]) + ',\n')
file_constants.write('# ' + ' ECC stack starting address\n')
file_constants.write(str(ecc_fpga_constants[25]) + ',\n')
file_constants.write('],\n')
file_constants.write(']\n')
def generate_and_print_parameters(base_word_size, extended_word_size, number_of_bits_added, filename_constants):
ecc_fpga_constants_all_parameters = []
for param in ecc_constants.ecc_constants:
prime = param[1]
elliptic_curve_order = param[2]
elliptic_curve_const_a = param[3]
elliptic_curve_const_b = param[4]
elliptic_curve_generator_point_x = param[5]
elliptic_curve_generator_point_y = param[6]
ecc_fpga_constants_all_parameters += [generate_fpga_constants(prime, elliptic_curve_order, elliptic_curve_const_a, elliptic_curve_const_b, elliptic_curve_generator_point_x, elliptic_curve_generator_point_y, base_word_size, extended_word_size, number_of_bits_added)]
print_fpga_constants(filename_constants, ecc_fpga_constants_all_parameters)
generate_and_print_parameters(base_word_size = 16, extended_word_size = 256, number_of_bits_added = 9, filename_constants = 'ecc_fpga_constants_v256.py')
generate_and_print_parameters(base_word_size = 16, extended_word_size = 128, number_of_bits_added = 9, filename_constants = 'ecc_fpga_constants_v128.py')
|
py | b41712ae5507f20cd496bb5484178345e8848814 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
from typing import Any, AsyncIterable, Callable, Dict, Generic, Optional, TypeVar, Union
import warnings
from azure.core.async_paging import AsyncItemPaged, AsyncList
from azure.core.exceptions import ClientAuthenticationError, HttpResponseError, ResourceExistsError, ResourceNotFoundError, map_error
from azure.core.pipeline import PipelineResponse
from azure.core.pipeline.transport import AsyncHttpResponse, HttpRequest
from azure.core.polling import AsyncLROPoller, AsyncNoPolling, AsyncPollingMethod
from azure.mgmt.core.exceptions import ARMErrorFormat
from azure.mgmt.core.polling.async_arm_polling import AsyncARMPolling
from ... import models
T = TypeVar('T')
ClsType = Optional[Callable[[PipelineResponse[HttpRequest, AsyncHttpResponse], T, Dict[str, Any]], Any]]
class PrivateEndpointConnectionsOperations:
"""PrivateEndpointConnectionsOperations async operations.
You should not instantiate this class directly. Instead, you should create a Client instance that
instantiates it for you and attaches it as an attribute.
:ivar models: Alias to model classes used in this operation group.
:type models: ~$(python-base-namespace).v2019_10_17.models
:param client: Client for service requests.
:param config: Configuration of service client.
:param serializer: An object model serializer.
:param deserializer: An object model deserializer.
"""
models = models
def __init__(self, client, config, serializer, deserializer) -> None:
self._client = client
self._serialize = serializer
self._deserialize = deserializer
self._config = config
async def get(
self,
resource_group_name: str,
scope_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> "models.PrivateEndpointConnection":
"""Gets a private endpoint connection.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param scope_name: The name of the Azure Monitor PrivateLinkScope resource.
:type scope_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: PrivateEndpointConnection, or the result of cls(response)
:rtype: ~$(python-base-namespace).v2019_10_17.models.PrivateEndpointConnection
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PrivateEndpointConnection"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-17-preview"
accept = "application/json"
# Construct URL
url = self.get.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'scopeName': self._serialize.url("scope_name", scope_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
request = self._client.get(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
get.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/privateLinkScopes/{scopeName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _create_or_update_initial(
self,
resource_group_name: str,
scope_name: str,
private_endpoint_connection_name: str,
parameters: "models.PrivateEndpointConnection",
**kwargs
) -> Optional["models.PrivateEndpointConnection"]:
cls = kwargs.pop('cls', None) # type: ClsType[Optional["models.PrivateEndpointConnection"]]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-17-preview"
content_type = kwargs.pop("content_type", "application/json")
accept = "application/json"
# Construct URL
url = self._create_or_update_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'scopeName': self._serialize.url("scope_name", scope_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Content-Type'] = self._serialize.header("content_type", content_type, 'str')
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
body_content_kwargs = {} # type: Dict[str, Any]
body_content = self._serialize.body(parameters, 'PrivateEndpointConnection')
body_content_kwargs['content'] = body_content
request = self._client.put(url, query_parameters, header_parameters, **body_content_kwargs)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
deserialized = None
if response.status_code == 200:
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
_create_or_update_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/privateLinkScopes/{scopeName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_create_or_update(
self,
resource_group_name: str,
scope_name: str,
private_endpoint_connection_name: str,
parameters: "models.PrivateEndpointConnection",
**kwargs
) -> AsyncLROPoller["models.PrivateEndpointConnection"]:
"""Approve or reject a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param scope_name: The name of the Azure Monitor PrivateLinkScope resource.
:type scope_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:param parameters:
:type parameters: ~$(python-base-namespace).v2019_10_17.models.PrivateEndpointConnection
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either PrivateEndpointConnection or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[~$(python-base-namespace).v2019_10_17.models.PrivateEndpointConnection]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType["models.PrivateEndpointConnection"]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._create_or_update_initial(
resource_group_name=resource_group_name,
scope_name=scope_name,
private_endpoint_connection_name=private_endpoint_connection_name,
parameters=parameters,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnection', pipeline_response)
if cls:
return cls(pipeline_response, deserialized, {})
return deserialized
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_create_or_update.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/privateLinkScopes/{scopeName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def _delete_initial(
self,
resource_group_name: str,
scope_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> None:
cls = kwargs.pop('cls', None) # type: ClsType[None]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-17-preview"
# Construct URL
url = self._delete_initial.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'scopeName': self._serialize.url("scope_name", scope_name, 'str'),
'privateEndpointConnectionName': self._serialize.url("private_endpoint_connection_name", private_endpoint_connection_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
# Construct headers
header_parameters = {} # type: Dict[str, Any]
request = self._client.delete(url, query_parameters, header_parameters)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200, 202, 204]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
if cls:
return cls(pipeline_response, None, {})
_delete_initial.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/privateLinkScopes/{scopeName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
async def begin_delete(
self,
resource_group_name: str,
scope_name: str,
private_endpoint_connection_name: str,
**kwargs
) -> AsyncLROPoller[None]:
"""Deletes a private endpoint connection with a given name.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param scope_name: The name of the Azure Monitor PrivateLinkScope resource.
:type scope_name: str
:param private_endpoint_connection_name: The name of the private endpoint connection.
:type private_endpoint_connection_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:keyword str continuation_token: A continuation token to restart a poller from a saved state.
:keyword polling: True for ARMPolling, False for no polling, or a
polling object for personal polling strategy
:paramtype polling: bool or ~azure.core.polling.AsyncPollingMethod
:keyword int polling_interval: Default waiting time between two polls for LRO operations if no Retry-After header is present.
:return: An instance of AsyncLROPoller that returns either None or the result of cls(response)
:rtype: ~azure.core.polling.AsyncLROPoller[None]
:raises ~azure.core.exceptions.HttpResponseError:
"""
polling = kwargs.pop('polling', True) # type: Union[bool, AsyncPollingMethod]
cls = kwargs.pop('cls', None) # type: ClsType[None]
lro_delay = kwargs.pop(
'polling_interval',
self._config.polling_interval
)
cont_token = kwargs.pop('continuation_token', None) # type: Optional[str]
if cont_token is None:
raw_result = await self._delete_initial(
resource_group_name=resource_group_name,
scope_name=scope_name,
private_endpoint_connection_name=private_endpoint_connection_name,
cls=lambda x,y,z: x,
**kwargs
)
kwargs.pop('error_map', None)
kwargs.pop('content_type', None)
def get_long_running_output(pipeline_response):
if cls:
return cls(pipeline_response, None, {})
if polling is True: polling_method = AsyncARMPolling(lro_delay, **kwargs)
elif polling is False: polling_method = AsyncNoPolling()
else: polling_method = polling
if cont_token:
return AsyncLROPoller.from_continuation_token(
polling_method=polling_method,
continuation_token=cont_token,
client=self._client,
deserialization_callback=get_long_running_output
)
else:
return AsyncLROPoller(self._client, raw_result, get_long_running_output, polling_method)
begin_delete.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/privateLinkScopes/{scopeName}/privateEndpointConnections/{privateEndpointConnectionName}'} # type: ignore
def list_by_private_link_scope(
self,
resource_group_name: str,
scope_name: str,
**kwargs
) -> AsyncIterable["models.PrivateEndpointConnectionListResult"]:
"""Gets all private endpoint connections on a private link scope.
:param resource_group_name: The name of the resource group.
:type resource_group_name: str
:param scope_name: The name of the Azure Monitor PrivateLinkScope resource.
:type scope_name: str
:keyword callable cls: A custom type or function that will be passed the direct response
:return: An iterator like instance of either PrivateEndpointConnectionListResult or the result of cls(response)
:rtype: ~azure.core.async_paging.AsyncItemPaged[~$(python-base-namespace).v2019_10_17.models.PrivateEndpointConnectionListResult]
:raises: ~azure.core.exceptions.HttpResponseError
"""
cls = kwargs.pop('cls', None) # type: ClsType["models.PrivateEndpointConnectionListResult"]
error_map = {
401: ClientAuthenticationError, 404: ResourceNotFoundError, 409: ResourceExistsError
}
error_map.update(kwargs.pop('error_map', {}))
api_version = "2019-10-17-preview"
accept = "application/json"
def prepare_request(next_link=None):
# Construct headers
header_parameters = {} # type: Dict[str, Any]
header_parameters['Accept'] = self._serialize.header("accept", accept, 'str')
if not next_link:
# Construct URL
url = self.list_by_private_link_scope.metadata['url'] # type: ignore
path_format_arguments = {
'subscriptionId': self._serialize.url("self._config.subscription_id", self._config.subscription_id, 'str'),
'resourceGroupName': self._serialize.url("resource_group_name", resource_group_name, 'str'),
'scopeName': self._serialize.url("scope_name", scope_name, 'str'),
}
url = self._client.format_url(url, **path_format_arguments)
# Construct parameters
query_parameters = {} # type: Dict[str, Any]
query_parameters['api-version'] = self._serialize.query("api_version", api_version, 'str')
request = self._client.get(url, query_parameters, header_parameters)
else:
url = next_link
query_parameters = {} # type: Dict[str, Any]
request = self._client.get(url, query_parameters, header_parameters)
return request
async def extract_data(pipeline_response):
deserialized = self._deserialize('PrivateEndpointConnectionListResult', pipeline_response)
list_of_elem = deserialized.value
if cls:
list_of_elem = cls(list_of_elem)
return deserialized.next_link or None, AsyncList(list_of_elem)
async def get_next(next_link=None):
request = prepare_request(next_link)
pipeline_response = await self._client._pipeline.run(request, stream=False, **kwargs)
response = pipeline_response.http_response
if response.status_code not in [200]:
map_error(status_code=response.status_code, response=response, error_map=error_map)
raise HttpResponseError(response=response, error_format=ARMErrorFormat)
return pipeline_response
return AsyncItemPaged(
get_next, extract_data
)
list_by_private_link_scope.metadata = {'url': '/subscriptions/{subscriptionId}/resourceGroups/{resourceGroupName}/providers/Microsoft.Insights/privateLinkScopes/{scopeName}/privateEndpointConnections'} # type: ignore
|
py | b41712b39af0b0fc5dc14ded4f27644542dbcd05 | #!/usr/bin/python2
# -*-coding:utf-8 -*
# Copyright (c) 2011-2015, Intel Corporation
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without modification,
# are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation and/or
# other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR
# ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON
# ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
Integer parameter type testcases - UINT8
List of tested functions :
--------------------------
- [setParameter] function
- [getParameter] function
Initial Settings :
------------------
UINT8 :
- unsigned
- size = 8
- range : [0, 100]
Test cases :
------------
- UINT8 parameter min value = 0
- UINT8 parameter min value out of bounds = -1
- UINT8 parameter max value = 100
- UINT8 parameter max value out of bounds = 101
- UINT8 parameter in nominal case = 50
"""
import commands
from Util.PfwUnitTestLib import PfwTestCase
from Util import ACTLogging
log=ACTLogging.Logger()
# Test of type UINT8 - range [0, 100]
class TestCases(PfwTestCase):
def setUp(self):
self.param_name = "/Test/Test/TEST_DIR/UINT8"
self.pfw.sendCmd("setTuningMode", "on")
def tearDown(self):
self.pfw.sendCmd("setTuningMode", "off")
def test_Nominal_Case(self):
"""
Testing UINT8 in nominal case = 50
----------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8 parameter in nominal case = 50
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT8 parameter set to 50
- Blackboard and filesystem values checked
"""
log.D(self.test_Nominal_Case.__doc__)
log.I("UINT8 parameter in nominal case = 50")
value = "50"
hex_value = "0x32"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/UINT8') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin(self):
"""
Testing UINT8 minimal value = 0
-------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8 parameter min value = 0
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT8 parameter set to 0
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin.__doc__)
log.I("UINT8 parameter min value = 0")
value = "0"
hex_value = "0x0"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/UINT8') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMin_Overflow(self):
"""
Testing UINT8 parameter value out of negative range
---------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8 to -1
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- UINT8 parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMin_Overflow.__doc__)
log.I("UINT8 parameter min value out of bounds = -1")
value = "-1"
param_check = commands.getoutput('cat $PFW_RESULT/UINT8')
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/UINT8') == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
def test_TypeMax(self):
"""
Testing UINT8 parameter maximum value
-------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8 to 100
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- UINT8 parameter set to 100
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax.__doc__)
log.I("UINT8 parameter max value = 100")
value = "100"
hex_value = "0x64"
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == "Done", log.F("when setting parameter %s : %s"
% (self.param_name, out))
#Check parameter value on blackboard
out, err = self.pfw.sendCmd("getParameter", self.param_name, "")
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out == value, log.F("BLACKBOARD : Incorrect value for %s, expected: %s, found: %s"
% (self.param_name, value, out))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/UINT8') == hex_value, log.F("FILESYSTEM : parameter update error")
log.I("test OK")
def test_TypeMax_Overflow(self):
"""
Testing UINT8 parameter value out of positive range
---------------------------------------------------
Test case description :
~~~~~~~~~~~~~~~~~~~~~~~
- set UINT8 to 101
Tested commands :
~~~~~~~~~~~~~~~~~
- [setParameter] function
Used commands :
~~~~~~~~~~~~~~~
- [getParameter] function
Expected result :
~~~~~~~~~~~~~~~~~
- error detected
- UINT8 parameter not updated
- Blackboard and filesystem values checked
"""
log.D(self.test_TypeMax_Overflow.__doc__)
log.I("UINT8 parameter max value out of bounds = 101")
value = "101"
param_check = commands.getoutput('cat $PFW_RESULT/UINT8')
#Set parameter value
out, err = self.pfw.sendCmd("setParameter", self.param_name, value)
assert err == None, log.E("when setting parameter %s : %s"
% (self.param_name, err))
assert out != "Done", log.F("PFW : Error not detected when setting parameter %s out of bounds"
% (self.param_name))
#Check parameter value on filesystem
assert commands.getoutput('cat $PFW_RESULT/UINT8') == param_check, log.F("FILESYSTEM : Forbiden parameter change")
log.I("test OK")
|
py | b41712b950c16e802760d2a7cd917a96081da3a6 | '''#Estruturas condicionais
"Simples e Compostas"
-- Todo método tem uma () no final.
"Estrutura sequencial"
Estrutura que irá seguir uma sequencia, sem condições.
indentação -> Organização com espaços.
'''
tempo = int(input('Quantos anos tem seu carro?'))
if tempo <= 3:
print('Carro novo.')
else:
print('Carro velho.')
print('---- FIM ----')
|
py | b4171323a83971ce7aabca7b8d8f0e29f820200d | import unittest
import doctest
from Testing import ZopeTestCase
from plonetheme.das.tests.base import layer, FunctionalTestCase
optionflags = (doctest.NORMALIZE_WHITESPACE|
doctest.ELLIPSIS|
doctest.REPORT_NDIFF)
def test_suite():
suite = ZopeTestCase.FunctionalDocFileSuite(
'README.txt', package='plonetheme.das',
optionflags=optionflags,
test_class=FunctionalTestCase)
suite.layer = layer
return suite
if __name__ == '__main__':
unittest.main(defaultTest='test_suite')
|
py | b417138b5a8a764bf45c5c36396825e92786245c | # Ledger interaction script
from ..hwwclient import HardwareWalletClient
from ..errors import ActionCanceledError, BadArgumentError, DeviceConnectionError, DeviceFailureError, HWWError, UnavailableActionError, UNKNOWN_ERROR, common_err_msgs, handle_errors
from .btchip.btchip import *
from .btchip.btchipUtils import *
import base64
import hid
import json
import struct
from .. import base58
from ..base58 import get_xpub_fingerprint_hex
from ..serializations import hash256, hash160, ser_uint256, PSBT, CTransaction, HexToBase64
import binascii
import logging
import re
LEDGER_VENDOR_ID = 0x2c97
LEDGER_DEVICE_IDS = [
0x0001, # Ledger Nano S
0x0004, # Ledger Nano X
]
# minimal checking of string keypath
def check_keypath(key_path):
parts = re.split("/", key_path)
if parts[0] != "m":
return False
# strip hardening chars
for index in parts[1:]:
index_int = re.sub('[hH\']', '', index)
if not index_int.isdigit():
return False
if int(index_int) > 0x80000000:
return False
return True
bad_args = [
0x6700, # BTCHIP_SW_INCORRECT_LENGTH
0x6A80, # BTCHIP_SW_INCORRECT_DATA
0x6B00, # BTCHIP_SW_INCORRECT_P1_P2
0x6D00, # BTCHIP_SW_INS_NOT_SUPPORTED
]
cancels = [
0x6982, # BTCHIP_SW_SECURITY_STATUS_NOT_SATISFIED
0x6985, # BTCHIP_SW_CONDITIONS_OF_USE_NOT_SATISFIED
]
def ledger_exception(f):
def func(*args, **kwargs):
try:
return f(*args, **kwargs)
except ValueError as e:
raise BadArgumentError(str(e))
except BTChipException as e:
if e.sw in bad_args:
raise BadArgumentError('Bad argument')
elif e.sw == 0x6F00: # BTCHIP_SW_TECHNICAL_PROBLEM
raise DeviceFailureError(e.message)
elif e.sw == 0x6FAA: # BTCHIP_SW_HALTED
raise DeviceConnectionError('Device is asleep')
elif e.sw in cancels:
raise ActionCanceledError('{} canceled'.format(f.__name__))
else:
raise e
return func
# This class extends the HardwareWalletClient for Ledger Nano S and Nano X specific things
class LedgerClient(HardwareWalletClient):
def __init__(self, path, password=''):
super(LedgerClient, self).__init__(path, password)
device = hid.device()
device.open_path(path.encode())
device.set_nonblocking(True)
self.dongle = HIDDongleHIDAPI(device, True, logging.getLogger().getEffectiveLevel() == logging.DEBUG)
self.app = btchip(self.dongle)
# Must return a dict with the xpub
# Retrieves the public key at the specified BIP 32 derivation path
@ledger_exception
def get_pubkey_at_path(self, path):
if not check_keypath(path):
raise BadArgumentError("Invalid keypath")
path = path[2:]
path = path.replace('h', '\'')
path = path.replace('H', '\'')
# This call returns raw uncompressed pubkey, chaincode
pubkey = self.app.getWalletPublicKey(path)
if path != "":
parent_path = ""
for ind in path.split("/")[:-1]:
parent_path += ind+"/"
parent_path = parent_path[:-1]
# Get parent key fingerprint
parent = self.app.getWalletPublicKey(parent_path)
fpr = hash160(compress_public_key(parent["publicKey"]))[:4]
# Compute child info
childstr = path.split("/")[-1]
hard = 0
if childstr[-1] == "'" or childstr[-1] == "h" or childstr[-1] == "H":
childstr = childstr[:-1]
hard = 0x80000000
child = struct.pack(">I", int(childstr)+hard)
# Special case for m
else:
child = bytearray.fromhex("00000000")
fpr = child
chainCode = pubkey["chainCode"]
publicKey = compress_public_key(pubkey["publicKey"])
depth = len(path.split("/")) if len(path) > 0 else 0
depth = struct.pack("B", depth)
if self.is_testnet:
version = bytearray.fromhex("043587CF")
else:
version = bytearray.fromhex("0488B21E")
extkey = version+depth+fpr+child+chainCode+publicKey
checksum = hash256(extkey)[:4]
return {"xpub":base58.encode(extkey+checksum)}
# Must return a hex string with the signed transaction
# The tx must be in the combined unsigned transaction format
# Current only supports segwit signing
@ledger_exception
def sign_tx(self, tx):
c_tx = CTransaction(tx.tx)
tx_bytes = c_tx.serialize_with_witness()
# Master key fingerprint
master_fpr = hash160(compress_public_key(self.app.getWalletPublicKey('')["publicKey"]))[:4]
# An entry per input, each with 0 to many keys to sign with
all_signature_attempts = [[]]*len(c_tx.vin)
# NOTE: We only support signing Segwit inputs, where we can skip over non-segwit
# inputs, or non-segwit inputs, where *all* inputs are non-segwit. This is due
# to Ledger's mutually exclusive signing steps for each type.
segwit_inputs = []
# Legacy style inputs
legacy_inputs = []
has_segwit = False
has_legacy = False
script_codes = [[]]*len(c_tx.vin)
# Detect changepath, (p2sh-)p2(w)pkh only
change_path = ''
for txout, i_num in zip(c_tx.vout, range(len(c_tx.vout))):
# Find which wallet key could be change based on hdsplit: m/.../1/k
# Wallets shouldn't be sending to change address as user action
# otherwise this will get confused
for pubkey, path in tx.outputs[i_num].hd_keypaths.items():
if struct.pack("<I", path[0]) == master_fpr and len(path) > 2 and path[-2] == 1:
# For possible matches, check if pubkey matches possible template
if hash160(pubkey) in txout.scriptPubKey or hash160(bytearray.fromhex("0014")+hash160(pubkey)) in txout.scriptPubKey:
change_path = ''
for index in path[1:]:
change_path += str(index)+"/"
change_path = change_path[:-1]
for txin, psbt_in, i_num in zip(c_tx.vin, tx.inputs, range(len(c_tx.vin))):
seq = format(txin.nSequence, 'x')
seq = seq.zfill(8)
seq = bytearray.fromhex(seq)
seq.reverse()
seq_hex = ''.join('{:02x}'.format(x) for x in seq)
if psbt_in.non_witness_utxo:
segwit_inputs.append({"value":txin.prevout.serialize()+struct.pack("<Q", psbt_in.non_witness_utxo.vout[txin.prevout.n].nValue), "witness":True, "sequence":seq_hex})
# We only need legacy inputs in the case where all inputs are legacy, we check
# later
ledger_prevtx = bitcoinTransaction(psbt_in.non_witness_utxo.serialize())
legacy_inputs.append(self.app.getTrustedInput(ledger_prevtx, txin.prevout.n))
legacy_inputs[-1]["sequence"] = seq_hex
has_legacy = True
else:
segwit_inputs.append({"value":txin.prevout.serialize()+struct.pack("<Q", psbt_in.witness_utxo.nValue), "witness":True, "sequence":seq_hex})
has_segwit = True
pubkeys = []
signature_attempts = []
scriptCode = b""
witness_program = b""
if psbt_in.witness_utxo is not None and psbt_in.witness_utxo.is_p2sh():
redeemscript = psbt_in.redeem_script
witness_program += redeemscript
elif psbt_in.non_witness_utxo is not None and psbt_in.non_witness_utxo.vout[txin.prevout.n].is_p2sh():
redeemscript = psbt_in.redeem_script
elif psbt_in.witness_utxo is not None:
witness_program += psbt_in.witness_utxo.scriptPubKey
elif psbt_in.non_witness_utxo is not None:
# No-op
redeemscript = b""
witness_program = b""
else:
raise Exception("PSBT is missing input utxo information, cannot sign")
# Check if witness_program is script hash
if len(witness_program) == 34 and witness_program[0] == 0x00 and witness_program[1] == 0x20:
# look up witnessscript and set as scriptCode
witnessscript = psbt_in.witness_script
scriptCode += witnessscript
elif len(witness_program) > 0:
# p2wpkh
scriptCode += b"\x76\xa9\x14"
scriptCode += witness_program[2:]
scriptCode += b"\x88\xac"
elif len(witness_program) == 0:
if len(redeemscript) > 0:
scriptCode = redeemscript
else:
scriptCode = psbt_in.non_witness_utxo.vout[txin.prevout.n].scriptPubKey
# Save scriptcode for later signing
script_codes[i_num] = scriptCode
# Find which pubkeys could sign this input (should be all?)
for pubkey in psbt_in.hd_keypaths.keys():
if hash160(pubkey) in scriptCode or pubkey in scriptCode:
pubkeys.append(pubkey)
# Figure out which keys in inputs are from our wallet
for pubkey in pubkeys:
keypath = psbt_in.hd_keypaths[pubkey]
if master_fpr == struct.pack("<I", keypath[0]):
# Add the keypath strings
keypath_str = ''
for index in keypath[1:]:
keypath_str += str(index) + "/"
keypath_str = keypath_str[:-1]
signature_attempts.append([keypath_str, pubkey])
all_signature_attempts[i_num] = signature_attempts
# Sign any segwit inputs
if has_segwit:
# Process them up front with all scriptcodes blank
blank_script_code = bytearray()
for i in range(len(segwit_inputs)):
self.app.startUntrustedTransaction(i==0, i, segwit_inputs, blank_script_code, c_tx.nVersion)
# Number of unused fields for Nano S, only changepath and transaction in bytes req
outputData = self.app.finalizeInput(b"DUMMY", -1, -1, change_path, tx_bytes)
# For each input we control do segwit signature
for i in range(len(segwit_inputs)):
# Don't try to sign legacy inputs
if tx.inputs[i].non_witness_utxo is not None:
continue
for signature_attempt in all_signature_attempts[i]:
self.app.startUntrustedTransaction(False, 0, [segwit_inputs[i]], script_codes[i], c_tx.nVersion)
tx.inputs[i].partial_sigs[signature_attempt[1]] = self.app.untrustedHashSign(signature_attempt[0], "", c_tx.nLockTime, 0x01)
elif has_legacy:
first_input = True
# Legacy signing if all inputs are legacy
for i in range(len(legacy_inputs)):
for signature_attempt in all_signature_attempts[i]:
assert(tx.inputs[i].non_witness_utxo is not None)
self.app.startUntrustedTransaction(first_input, i, legacy_inputs, script_codes[i], c_tx.nVersion)
outputData = self.app.finalizeInput(b"DUMMY", -1, -1, change_path, tx_bytes)
tx.inputs[i].partial_sigs[signature_attempt[1]] = self.app.untrustedHashSign(signature_attempt[0], "", c_tx.nLockTime, 0x01)
first_input = False
# Send PSBT back
return {'psbt':tx.serialize()}
# Must return a base64 encoded string with the signed message
# The message can be any string
@ledger_exception
def sign_message(self, message, keypath):
if not check_keypath(keypath):
raise BadArgumentError("Invalid keypath")
message = bytearray(message, 'utf-8')
keypath = keypath[2:]
# First display on screen what address you're signing for
self.app.getWalletPublicKey(keypath, True)
self.app.signMessagePrepare(keypath, message)
signature = self.app.signMessageSign()
# Make signature into standard bitcoin format
rLength = signature[3]
r = signature[4 : 4 + rLength]
sLength = signature[4 + rLength + 1]
s = signature[4 + rLength + 2:]
if rLength == 33:
r = r[1:]
if sLength == 33:
s = s[1:]
sig = bytearray(chr(27 + 4 + (signature[0] & 0x01)), 'utf8') + r + s
return {"signature":base64.b64encode(sig).decode('utf-8')}
@ledger_exception
def display_address(self, keypath, p2sh_p2wpkh, bech32):
if not check_keypath(keypath):
raise BadArgumentError("Invalid keypath")
output = self.app.getWalletPublicKey(keypath[2:], True, (p2sh_p2wpkh or bech32), bech32)
return {'address': output['address'][12:-2]} # HACK: A bug in getWalletPublicKey results in the address being returned as the string "bytearray(b'<address>')". This extracts the actual address to work around this.
# Setup a new device
def setup_device(self, label='', passphrase=''):
raise UnavailableActionError('The Ledger Nano S and X do not support software setup')
# Wipe this device
def wipe_device(self):
raise UnavailableActionError('The Ledger Nano S and X do not support wiping via software')
# Restore device from mnemonic or xprv
def restore_device(self, label=''):
raise UnavailableActionError('The Ledger Nano S and X do not support restoring via software')
# Begin backup process
def backup_device(self, label='', passphrase=''):
raise UnavailableActionError('The Ledger Nano S and X do not support creating a backup via software')
# Close the device
def close(self):
self.dongle.close()
# Prompt pin
def prompt_pin(self):
raise UnavailableActionError('The Ledger Nano S and X do not need a PIN sent from the host')
# Send pin
def send_pin(self, pin):
raise UnavailableActionError('The Ledger Nano S and X do not need a PIN sent from the host')
def enumerate(password=''):
results = []
for device_id in LEDGER_DEVICE_IDS:
for d in hid.enumerate(LEDGER_VENDOR_ID, device_id):
if ('interface_number' in d and d['interface_number'] == 0 \
or ('usage_page' in d and d['usage_page'] == 0xffa0)):
d_data = {}
path = d['path'].decode()
d_data['type'] = 'ledger'
d_data['model'] = 'ledger_nano_x' if device_id == 0x0004 else 'ledger_nano_s'
d_data['path'] = path
client = None
with handle_errors(common_err_msgs["enumerate"], d_data):
client = LedgerClient(path, password)
master_xpub = client.get_pubkey_at_path('m/0h')['xpub']
d_data['fingerprint'] = get_xpub_fingerprint_hex(master_xpub)
d_data['needs_pin_sent'] = False
d_data['needs_passphrase_sent'] = False
if client:
client.close()
results.append(d_data)
return results
|
py | b41713ebaf1e73aa3a4a762b03da514e8f3e687c | import math
exp_name = 'basicvsrpp_wn_4x_adam_cctv_degradation'
# model settings
model = dict(
type='BasicVSR',
generator=dict(
type='BasicVSRPlusPlusNorm',
mid_channels=64,
num_blocks=7,
max_residue_magnitude=10,
is_low_res_input=True,
spynet_pretrained='https://download.openmmlab.com/mmediting/restorers/'
'basicvsr/spynet_20210409-c6c1bd09.pth',
cpu_cache_length=50,
weight_norm=True),
pixel_loss=dict(type='CharbonnierLoss', loss_weight=1.0, reduction='mean'))
# model training and testing settings
train_cfg = dict(fix_iter=5000)
test_cfg = dict(metrics=['PSNR', 'SSIM'], crop_border=0)
# dataset settings
scale = 4
gt_size = (256, 256)
lq_size = (64, 64)
ksizes = [7, 9, 11, 13, 15, 17, 19, 21]
iso_blur_sigma = (0.1, 2.8)
aniso_blur_sigma = (0.5, 8)
aniso_blur_angle = (0, math.pi)
multivar_noise_cov = [
[683.56881, 594.85845375, 666.11675025],
[594.85845375, 801.167823, 823.71264075],
[666.11675025, 823.71264075, 767.7930915]]
gaussian_noise_sigma = (1, 25)
jpeg_quality = (30, 95)
train_pipeline = [
dict(
type='GenerateSegmentIndices',
interval_list=[1],
filename_tmpl='{:05d}.png'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='bgr'),
dict(
type='SequenceRandomCrop',
keys=['gt'],
crop_size=gt_size,
offset=(140, 0)),
dict(
type='Flip',
keys=['gt'],
flip_ratio=0.5,
direction='horizontal'),
dict(type='Flip',
keys=['gt'],
flip_ratio=0.5,
direction='vertical'),
dict(type='RandomTransposeHW', keys=['gt'], transpose_ratio=0.5),
dict(type='Copy', from_key='gt', to_key='lq'),
dict(
type='Shuffle',
keep_order_indices=[2, 3],
transforms=[
dict(
type='RandomChoice',
transforms=[
dict(
type='ISOGaussianBlur',
keys=['lq'],
ksizes=ksizes,
sigma_range=iso_blur_sigma),
dict(
type='ANISOGaussianBlur',
keys=['lq'],
ksizes=ksizes,
sigma_x_range=aniso_blur_sigma,
sigma_y_range=aniso_blur_sigma,
angle_range=aniso_blur_angle)]),
dict(
type='RandomChoice',
p=0.8,
transforms=[
dict(
type='ISOGaussianBlur',
keys=['lq'],
ksizes=ksizes,
sigma_range=iso_blur_sigma),
dict(
type='ANISOGaussianBlur',
keys=['lq'],
ksizes=ksizes,
sigma_x_range=aniso_blur_sigma,
sigma_y_range=aniso_blur_sigma,
angle_range=aniso_blur_angle)]),
dict(
type='RandomChoice',
transforms=[
dict(
type='Rescale',
keys=['lq'],
size=lq_size,
interpolation=['bilinear', 'bicubic', 'area']),
dict(
type='RandomRescale',
keys=['lq'],
scale_range=(0.5 / scale, 1),
size_factor=2,
interpolation=['bilinear', 'bicubic', 'area']), ],
weights=[0.75, 0.25]),
dict(
type='Rescale',
keys=['lq'],
size=lq_size,
interpolation=['bilinear', 'bicubic', 'area']),
dict(
type='RandomChoice',
transforms=[
dict(
type='MultivariateGaussianNoise',
keys=['lq'],
mean=[0, 0, 0],
cov=multivar_noise_cov),
dict(
type='AdditiveWhiteGaussianNoise',
keys=['lq'],
sigma_range=gaussian_noise_sigma),
dict(
type='GrayAdditiveWhiteGaussianNoise',
keys=['lq'],
sigma_range=gaussian_noise_sigma)],
weights=[0.2, 0.4, 0.4]),
dict(
type='JPEGCompression',
keys=['lq'],
quality_range=jpeg_quality,
p=0.75),
dict(type='CameraSensorNoise', keys=['lq'], p=0.25)]),
dict(
type='JPEGCompression',
keys=['lq'],
quality_range=jpeg_quality),
dict(
type='Normalize',
keys=['lq', 'gt'],
mean=(0., 0., 0.),
std=(255., 255., 255.),
to_rgb=True),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path'])
]
test_pipeline = [
dict(
type='GenerateConstantSegmentIndices',
filename_tmpl='{:05d}.png'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='gt',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq', 'gt']),
dict(type='FramesToTensor', keys=['lq', 'gt']),
dict(
type='Collect',
keys=['lq', 'gt'],
meta_keys=['lq_path', 'gt_path', 'key'])
]
demo_pipeline = [
dict(
type='GenerateConstantSegmentIndices',
filename_tmpl='{:05d}.png'),
dict(
type='LoadImageFromFileList',
io_backend='disk',
key='lq',
channel_order='rgb'),
dict(type='RescaleToZeroOne', keys=['lq']),
dict(type='FramesToTensor', keys=['lq']),
dict(type='Collect', keys=['lq'], meta_keys=['lq_path', 'key'])
]
data = dict(
workers_per_gpu=6,
train_dataloader=dict(samples_per_gpu=4, drop_last=True),
val_dataloader=dict(samples_per_gpu=1),
test_dataloader=dict(samples_per_gpu=1, workers_per_gpu=1),
# train
train=dict(
type='SRCCTVMultipleGTDataset',
lq_folder='/131-data/cctv-data/sjtu/super_resolution/4KHDR/train_4k',
gt_folder='/131-data/cctv-data/sjtu/super_resolution/4KHDR/train_4k',
num_input_frames=30,
pipeline=train_pipeline,
scale=scale,
test_mode=False),
# val
val=dict(
type='SRCCTVMultipleGTDataset',
lq_folder='/131-data/cctv-data/sjtu/super_resolution/4KHDR/val_SDR_540p',
gt_folder='/131-data/cctv-data/sjtu/super_resolution/4KHDR/val_4k',
num_input_frames=30,
pipeline=test_pipeline,
scale=scale,
test_mode=True),
test=dict(
type='SRCCTVMultipleGTDataset',
lq_folder='/131-data/cctv-data/sjtu/super_resolution/4KHDR/val_SDR_540p',
gt_folder='/131-data/cctv-data/sjtu/super_resolution/4KHDR/val_4k',
num_input_frames=30,
pipeline=test_pipeline,
scale=scale,
test_mode=True),
)
# optimizer
optimizers = dict(
generator=dict(
type='Adam',
lr=1e-4,
betas=(0.9, 0.99),
paramwise_cfg=dict(custom_keys={'spynet': dict(lr_mult=0.25)})))
# learning policy
total_iters = 100000
lr_config = dict(
policy='CosineRestart',
by_epoch=False,
periods=[25000, 25000, 25000, 25000],
restart_weights=[1, 1, 1, 1],
min_lr=1e-7)
checkpoint_config = dict(interval=5000, save_optimizer=True, by_epoch=False)
# remove gpu_collect=True in non distributed training
# evaluation = dict(interval=5000, save_image=False, gpu_collect=True)
log_config = dict(
interval=100,
hooks=[
dict(type='TextLoggerHook', by_epoch=False),
# dict(type='TensorboardLoggerHook'),
])
visual_config = None
# runtime settings
dist_params = dict(backend='nccl')
log_level = 'INFO'
work_dir = f'./work_dirs/{exp_name}'
load_from = None
resume_from = None
workflow = [('train', 1)]
find_unused_parameters = True
seed = 0
deterministic = True
|
py | b41714bede5bf27ebc69d812cae46ba8d88d4c35 | from click import Option
from versio.version import Version
from unleash import log, opts, issues, commit, info
from .utils_assign import find_assign, replace_assign
from .utils_tree import require_file
PLUGIN_NAME = 'versions'
def require_setup_py():
return require_file(
'setup.py', 'No setup.py found',
'The version could not determined because no setup.py file was found. '
'Either supply a release version explicity or make sure setup.py '
'exists in the root of the repository.')
def setup(cli):
cli.commands['release'].params.append(Option(
['--dev-version', '-d'],
help='Set new development version to this. If not given, '
'auto-increment the release-version.'))
cli.commands['release'].params.append(Option(
['--release-version', '-v'],
help='Set the release version to this. If not given, will be '
'auto-detected from setup.py.'))
cli.params.append(Option(
['--package-dir', '-p'],
multiple=True,
help='Directories in which packages can be found (used to update '
'__version__ variables. Can be given multiple times.'))
def _shorten_version(version):
v = Version(str(version))
v.parts = [v.parts[0]] + [None] * 4
return v
def _set_commit_version(version):
# Steps
# 1. Replace commit message
# 2. Replace version in setup.py
# 3. Replace version in PKGNAME/__init__.py
setup_py = require_setup_py()
log.info('Updating setup.py and package version ({})'.format(version))
# update setup.py
commit.set_path_data('setup.py', replace_assign(setup_py,
'version',
version, ))
# update PKGNAME/__init__.py files
for fn in info['init_files']:
# replace version info
commit.set_path_data(fn, replace_assign(
commit.get_path_data(fn),
'__version__',
version, ))
def collect_info():
release_version = opts.get('release_version')
dev_version = opts.get('dev_version')
setup_py = require_setup_py()
try:
if release_version is None:
# try extracting version info
try:
release_version = find_assign(setup_py, 'version')
except ValueError as e:
issues.error(
e, 'There was an issue extracting the version number from '
'setup.py. Please make sure there is only a single '
'version= assignment in that file.')
log.debug('Release version automatically determined from setup.py')
else:
log.debug('Release version given on commandline.')
# parse release version string
release_version = _shorten_version(release_version)
if dev_version is None:
# if we're given no dev version, we try to create one by
# incrementing the release version
dev_version = Version(str(release_version))
dev_version.bump('release')
dev_version.bump('dev')
else:
# parse dev version string
dev_version = Version(dev_version)
except TypeError as e:
issues.error(
'Bad version number: {}'.format(e),
'The version number "{}" is not a version number that can be '
'understood by distutils.\n\n'
'Please correct the different version number and try again.'
.format(e))
# get package name
try:
pkg_name = find_assign(setup_py, 'name')
except ValueError as e:
issues.error(
e,
'Could not extract package name from setup.py. Please make sure '
'there is only a single name= expression in that file.')
info['pkg_name'] = pkg_name
info['release_version'] = str(release_version)
info['dev_version'] = str(dev_version)
# create the short versions
info['release_version_short'] = str(_shorten_version(release_version))
info['dev_version_short'] = str(_shorten_version(dev_version))
# use provided package dirs or auto-detected one from setup.py
pkg_paths = set(opts['package_dir'])
if not pkg_paths:
pkg_paths = set([info['pkg_name'], info['pkg_name'].replace('-', '_')])
log.debug('Package paths: {}'.format(pkg_paths))
init_files = [path + '/__init__.py' for path in pkg_paths]
init_files = filter(commit.path_exists, init_files)
if not init_files:
issues.warn(
'No __init__.py files found for packages.',
'While looking for package __init__.py files to update version '
'information in, none were found. This most often happens if your '
'package contains only modules or is not named after its primary '
'Python package.')
info['init_files'] = init_files
def prepare_release():
# update commit message
commit.message = u'Release version {}'.format(info['release_version'])
_set_commit_version(info['release_version'])
def prepare_dev():
commit.message = (u'Start developing version {} (after release of {})'
.format(info['dev_version'], info['release_version']))
_set_commit_version(info['dev_version'])
|
py | b41714eb53e94111da3e0e0d47e00e4b39f1682b | from django.contrib import admin
from .models import Question, Answer
# Register your models here.
admin.site.register(Question)
admin.site.register(Answer)
|
py | b417152244e4e54ef5e7a78c3d76d7429ce907c1 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
'''
QSDsan: Quantitative Sustainable Design for sanitation and resource recovery systems
This module is developed by:
Yalin Li <[email protected]>
This module is under the University of Illinois/NCSA Open Source License.
Please refer to https://github.com/QSD-Group/QSDsan/blob/main/LICENSE.txt
for license details.
'''
# %%
__all__ = ('copy_attr', 'AttrSetter', 'AttrFuncSetter', 'DictAttrSetter')
def copy_attr(new, original, skip=(), same=()):
'''
Set the attributes of a new object based on an original one:
- If one attribute is in `skip`, it will not be copied to the new object.
- If one attribute is in `same`, the attribute of the new object will be \
the same as the original object.
- For remaining attributes, if it has :func:`copy`, then the attribute \
of the new object will be set as the copy of the original one; otherwise, \
it will be the same as the original one.
Parameters
----------
new : obj
The new object.
origin : obj
The original object.
skip : Iterable
Attributes that will not be copied.
same : Iterable
Attributes that will be the same for the original one and the copy.
'''
for slot in original.__slots__:
if slot in skip:
continue
else:
value = getattr(original, slot)
if slot in same:
setattr(new, slot, value)
return new
else:
if hasattr(value, 'copy'):
new_value = value.copy()
else:
new_value = value
setattr(new, slot, new_value)
return new
class AttrSetter:
__slots__ = ('obj', 'attrs')
def __init__(self, obj, attrs):
self.obj = obj
if isinstance(attrs, str):
attrs = (attrs,)
self.attrs = attrs
def __call__(self, value):
for attr in self.attrs:
setattr(self.obj, attr, value)
class AttrFuncSetter:
__slots__ = ('obj', 'attrs', 'funcs')
def __init__(self, obj, attrs, funcs):
self.obj = obj
if isinstance(attrs, str):
attrs = (attrs,)
if callable(funcs):
funcs = (funcs,)
self.attrs = attrs
self.funcs = funcs
def __call__(self, value):
attrs = self.attrs
funcs = self.funcs
obj = self.obj
if len(funcs) == 1:
func = funcs[0]
for attr in attrs:
setattr(obj, attr, func(value))
elif len(funcs) == len(attrs):
for num, func in enumerate(funcs):
setattr(obj, attrs[num], func(value))
else:
raise ValueError('Number of functions does not match number of attributes.')
class DictAttrSetter:
__slots__ = ('obj', 'dict_attr', 'keys')
def __init__(self, obj, dict_attr, keys):
self.dict_attr = getattr(obj, dict_attr)
if isinstance(keys, str):
keys = (keys,)
self.keys = keys
def __call__(self, value):
for key in self.keys:
self.dict_attr[key] = value |
py | b4171707cb36913d74810fe314f5166216883d2a | import os
from typing import Callable, Iterable, List, Type
import aiohttp_jinja2
import aiohttp_security
import aiohttp_session
import jinja2
import socketio
from aiohttp import web
from aiohttp_security import SessionIdentityPolicy
from aiohttp_session.cookie_storage import EncryptedCookieStorage
import auth
from config import config
runners: List[web.AppRunner] = []
def setup_app(middlewares: Iterable[Callable] = ()) -> web.Application:
app = web.Application(middlewares=middlewares)
app["AuthzPolicy"] = auth.AuthPolicy()
aiohttp_security.setup(app, SessionIdentityPolicy(), app["AuthzPolicy"])
aiohttp_session.setup(app, EncryptedCookieStorage(auth.get_secret_token()))
return app
async def setup_runner(app: web.Application, site: Type[web.BaseSite], **kwargs):
runner = web.AppRunner(app)
runners.append(runner)
await runner.setup()
s = site(runner, **kwargs)
await s.start()
# MAIN APP
sio = socketio.AsyncServer(
async_mode="aiohttp",
engineio_logger=False,
cors_allowed_origins=config.get("Webserver", "cors_allowed_origins", fallback=None),
)
app = setup_app()
aiohttp_jinja2.setup(app, loader=jinja2.FileSystemLoader("templates"))
basepath = os.environ.get("PA_BASEPATH", "/")[1:]
socketio_path = basepath + "socket.io"
sio.attach(app, socketio_path=socketio_path)
app["state"] = {}
# API APP
admin_app = web.Application()
api_app = setup_app([auth.token_middleware])
|
py | b41717829eb5c77837d7bf3acb7a9e0df7ad204c | from typing import Callable, List, Tuple, Union
from ignite.engine import Engine, Events
class EpochOutputStore:
"""EpochOutputStore handler to save output prediction and target history
after every epoch, could be useful for e.g., visualization purposes.
Note:
This can potentially lead to a memory error if the output data is
larger than available RAM.
Args:
output_transform: a callable that is used to
transform the :class:`~ignite.engine.engine.Engine`'s
``process_function``'s output , e.g., lambda x: x[0]
Examples::
eos = EpochOutputStore()
trainer = create_supervised_trainer(model, optimizer, loss)
train_evaluator = create_supervised_evaluator(model, metrics)
eos.attach(train_evaluator)
@trainer.on(Events.EPOCH_COMPLETED)
def log_training_results(engine):
train_evaluator.run(train_loader)
output = eos.data
# do something with output, e.g., plotting
.. versionadded:: 0.4.2
"""
def __init__(self, output_transform: Callable = lambda x: x):
self.data = [] # type: List[Union[int, Tuple[int, int]]]
self.output_transform = output_transform
def reset(self) -> None:
"""Reset the attribute data to empty list."""
self.data = []
def update(self, engine: Engine) -> None:
"""Append the output of Engine to attribute data."""
output = self.output_transform(engine.state.output)
self.data.append(output)
def attach(self, engine: Engine) -> None:
"""Attaching `reset` method at EPOCH_STARTED and
`update` method at ITERATION_COMPLETED."""
engine.add_event_handler(Events.EPOCH_STARTED, self.reset)
engine.add_event_handler(Events.ITERATION_COMPLETED, self.update)
|
py | b417186466552033d13f2728e0c73331756a41ff | #!/usr/bin/env python
#Copyright 2016 Open Platform for NFV Project, Inc. and its contributors
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys, os, glob, threading
import getopt, socket
import logging, errno
import uuid
sys.path.insert(0, glob.glob('./lib')[0])
from dominoRPC import Communication
from dominoRPC.ttypes import *
from dominoRPC.constants import *
from dominoCLI import DominoClientCLI
from dominoCLI.ttypes import *
from dominoCLI.constants import *
from thrift import Thrift
from thrift.transport import TSocket
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol
from thrift.server import TServer
from util import *
#Load configuration parameters
from domino_conf import *
class CommunicationHandler:
def __init__(self):
self.log = {}
def __init__(self, dominoclient):
self.log = {}
self.dominoClient = dominoclient
self.transport = None
self.protocol = None
self.sender = None
# Template Push from Domino Server is received
# Actions:
# - Depending on Controller Domain, call API
# - Respond Back with Push Response
def d_push(self, push_msg):
logging.info('%s Received Template File', self.dominoClient.UDID)
# Retrieve the template file
try:
os.makedirs(TOSCA_RX_DIR+str(self.dominoClient.UDID))
except OSError as exception:
if exception.errno == errno.EEXIST:
logging.debug('IGNORING error: ERRNO %d; %s exists.', exception.errno, TOSCA_RX_DIR+str(self.dominoClient.UDID))
else:
logging.error('IGNORING error in creating %s. Err no: %d', exception.errno)
try:
miscutil.write_templatefile(TOSCA_RX_DIR+str(self.dominoClient.UDID)+'/'+str(push_msg.template_UUID)+'.yaml' , push_msg.template)
except:
logging.error('FAILED to write the pushed file: %s', sys.exc_info()[0])
push_r = PushResponseMessage()
# Fill response message fields
push_r.domino_udid = self.dominoClient.UDID
push_r.seq_no = self.dominoClient.seqno
push_r.responseCode = FAILED
self.dominoClient.seqno = self.dominoClient.seqno + 1
return push_r# Any inspection code goes here
## End of inspection
# Call NB API
# If heat client, call heat command
# If ONOS client, run as shell script
## End of NB API call
# Marshall the response message for the Domino Server Fill
push_r = PushResponseMessage()
# Fill response message fields
push_r.domino_udid = self.dominoClient.UDID
push_r.seq_no = self.dominoClient.seqno
push_r.responseCode = SUCCESS
## End of filling
self.dominoClient.seqno = self.dominoClient.seqno + 1
return push_r
def openconnection(self):
try:
# Make socket
transport = TSocket.TSocket(self.dominoClient.dominoserver_IP, DOMINO_SERVER_PORT)
transport.setTimeout(THRIFT_RPC_TIMEOUT_MS)
# Add buffering to compensate for slow raw sockets
self.transport = TTransport.TBufferedTransport(transport)
# Wrap in a protocol
self.protocol = TBinaryProtocol.TBinaryProtocol(self.transport)
# Create a client to use the protocol encoder
self.sender = Communication.Client(self.protocol)
self.transport.open()
except Thrift.TException, tx:
logging.error('%s' , tx.message)
def closeconnection():
self.transport.close()
class CLIHandler:
def __init__(self):
self.log = {}
def __init__(self, dominoclient, CLIservice):
self.log = {}
self.dominoClient = dominoclient
self.CLIservice = CLIservice
def d_CLI(self, msg):
#logging.info('Received CLI %s', msg.CLI_input) #breaks testing due to random TUIDs
CLIrespmsg = CLIResponse()
CLIrespmsg.CLI_response = self.CLIservice.process_input(msg.CLI_input)
return CLIrespmsg
class DominoClientCLIService(threading.Thread):
def __init__(self, dominoclient, communicationhandler, interactive):
threading.Thread.__init__(self)
self.dominoclient = dominoclient
self.communicationhandler = communicationhandler
self.interactive = interactive
def process_input(self, args):
if len(args) == 0:
return 'Empty API body'
try:
if args[0] == 'heartbeat':
self.dominoclient.heartbeat()
elif args[0] == 'publish':
opts, args = getopt.getopt(args[1:],"t:k:",["tosca-file=","tuid"])
if len(opts) == 0:
print '\nUsage: publish -t <toscafile> -k <TUID>'
return
template_UUID = None
toscafile = None
for opt, arg in opts:
if opt in ('-t', '--tosca-file'):
toscafile = arg
elif opt in ('-k', '--tuid'):
template_UUID = arg
if toscafile is not None:
self.dominoclient.publish(toscafile,template_UUID)
else:
print '\nUsage: publish -t <toscafile> -k <TUID>'
elif args[0] == 'subscribe':
labels = []
templateTypes = []
labelop = APPEND
templateop = APPEND
opts, args = getopt.getopt(args[1:],"l:t:",["labels=","ttype=","lop=","top="])
for opt, arg in opts:
if opt in ('-l', '--labels'):
labels = labels + arg.split(',')
elif opt in ('-t', '--ttype'):
templateTypes = templateTypes + arg.split(',')
elif opt in ('--lop'):
try:
labelop = str2enum[arg.upper()]
except KeyError as ex:
print '\nInvalid label option, pick one of: APPEND, OVERWRITE, DELETE'
return
elif opt in ('--top'):
try:
templateop = str2enum[arg.upper()]
except KeyError as ex:
print '\nInvalid label option, pick one of: APPEND, OVERWRITE, DELETE'
return
#check if labels or supported templates are nonempty
if labels != [] or templateTypes != []:
self.dominoclient.subscribe(labels, templateTypes, labelop, templateop)
elif args[0] == 'register':
self.dominoclient.start()
elif args[0] == 'list-tuids':
return self.dominoclient.query(['list-tuids'])
else:
return 'Command is misentered or not supported!'
except getopt.GetoptError:
print 'Command is misentered or not supported!'
def run(self):
global DEFAULT_TOSCA_PUBFILE
if self.interactive == "TRUE":
flag = True
else:
flag = False
if flag: #interactive CLI, loop in while until killed
while True:
sys.stdout.write('>>')
input_string = raw_input()
args = input_string.split()
if len(args) == 0:
continue
sys.stdout.write('>>')
#process input arguments
resp_msg = self.process_input(args)
if resp_msg is not None:
print resp_msg
else: #domino cli-client is used, listen for the CLI rpc calls
cliHandler = CLIHandler(self.dominoclient, self)
processor = DominoClientCLI.Processor(cliHandler)
transport = TSocket.TServerSocket(port=self.dominoclient.CLIport)
tfactory = TTransport.TBufferedTransportFactory()
pfactory = TBinaryProtocol.TBinaryProtocolFactory()
#Use TThreadedServer or TThreadPoolServer for a multithreaded server
CLIServer = TServer.TSimpleServer(processor, transport, tfactory, pfactory)
logging.debug('RPC service for CLI is starting...')
CLIServer.serve()
class DominoClient:
def __init__(self):
self.communicationHandler = CommunicationHandler(self)
self.processor = None
self.transport = None
self.tfactory = None
self.pfactory = None
self.communicationServer = None
self.CLIservice = None
self.serviceport = DOMINO_CLIENT_PORT
self.dominoserver_IP = DOMINO_SERVER_IP
self.CLIport = DOMINO_CLI_PORT
#Start from UNREGISTERED STATE
#TO BE DONE: initialize from a saved state
self.state = 'UNREGISTERED'
self.seqno = 0
self.UDID = 1
def start_communicationService(self):
self.processor = Communication.Processor(self.communicationHandler)
self.transport = TSocket.TServerSocket(port=int(self.serviceport))
self.tfactory = TTransport.TBufferedTransportFactory()
self.pfactory = TBinaryProtocol.TBinaryProtocolFactory()
#Use TThreadedServer or TThreadPoolServer for a multithreaded server
#self.communicationServer = TServer.TThreadedServer(self.processor, self.transport, self.tfactory, self.pfactory)
self.communicationServer = TServer.TThreadPoolServer(self.processor, self.transport, self.tfactory, self.pfactory)
self.communicationServer.serve()
def start(self):
try:
self.communicationHandler.openconnection()
self.register()
except Thrift.TException, tx:
print '%s' % (tx.message)
def register(self):
if self.state == 'UNREGISTERED':
logging.info('%d Sending Registration', self.UDID)
#prepare registration message
reg_msg = RegisterMessage()
reg_msg.domino_udid_desired = UDID_DESIRED
reg_msg.seq_no = self.seqno
reg_msg.ipaddr = netutil.get_ip()
reg_msg.tcpport = self.serviceport
reg_msg.supported_templates = LIST_SUPPORTED_TEMPLATES
try:
reg_msg_r = self.sender().d_register(reg_msg)
logging.info('Registration Response: Response Code: %d' , reg_msg_r.responseCode)
if reg_msg_r.comments:
logging.debug('Response Comments: %s' , reg_msg_r.comments)
if reg_msg_r.responseCode == SUCCESS:
self.state = 'REGISTERED'
self.UDID = reg_msg_r.domino_udid_assigned
else:
#Handle registration failure here (possibly based on reponse comments)
pass
except (Thrift.TException, TSocket.TTransportException) as tx:
logging.error('%s' , tx.message)
except (socket.timeout) as tx:
self.handle_RPC_timeout(reg_msg)
except (socket.error) as tx:
logging.error('%s' , tx.message)
self.seqno = self.seqno + 1
def heartbeat(self):
if self.state == 'UNREGISTERED':
self.start()
logging.info('%s Sending heartbeat', self.UDID)
hbm = HeartBeatMessage()
hbm.domino_udid = self.UDID
hbm.seq_no = self.seqno
try:
hbm_r = self.sender().d_heartbeat(hbm)
logging.info('heart beat received from: %s ,sequence number: %d' , hbm_r.domino_udid, hbm_r.seq_no)
except (Thrift.TException, TSocket.TTransportException) as tx:
logging.error('%s' , tx.message)
except (socket.timeout) as tx:
self.handle_RPC_timeout(hbm)
except:
logging.error('Unexpected error: %s', sys.exc_info()[0])
self.seqno = self.seqno + 1
def publish(self, toscafile, template_UUID=None):
if self.state == 'UNREGISTERED':
self.start()
logging.info('Publishing the template file: ' + toscafile)
pub_msg = PublishMessage()
pub_msg.domino_udid = self.UDID
pub_msg.seq_no = self.seqno
pub_msg.template_type = 'tosca-nfv-v1.0'
if template_UUID is not None:
pub_msg.template_UUID = template_UUID
try:
pub_msg.template = miscutil.read_templatefile(toscafile)
except IOError as e:
logging.error('I/O error(%d): %s' , e.errno, e.strerror)
return
try:
pub_msg_r = self.sender().d_publish(pub_msg)
logging.info('Publish Response is received from: %s ,sequence number: %d Status: %d', pub_msg_r.domino_udid, pub_msg_r.seq_no, pub_msg_r.responseCode)
except (Thrift.TException, TSocket.TTransportException) as tx:
print '%s' % (tx.message)
except (socket.timeout) as tx:
self.handle_RPC_timeout(pub_msg)
self.seqno = self.seqno + 1
def subscribe(self, labels, templateTypes, label_op, template_op):
if self.state == 'UNREGISTERED':
self.start()
logging.info('subscribing labels %s and templates %s', labels, templateTypes)
#send subscription message
sub_msg = SubscribeMessage()
sub_msg.domino_udid = self.UDID
sub_msg.seq_no = self.seqno
sub_msg.template_op = template_op
sub_msg.supported_template_types = templateTypes
sub_msg.label_op = label_op
sub_msg.labels = labels
try:
sub_msg_r = self.sender().d_subscribe(sub_msg)
logging.info('Subscribe Response is received from: %s ,sequence number: %d', sub_msg_r.domino_udid,sub_msg_r.seq_no)
except (Thrift.TException, TSocket.TTransportException) as tx:
logging.error('%s' , tx.message)
except (socket.timeout) as tx:
self.handle_RPC_timeout(sub_msg)
self.seqno = self.seqno + 1
def query(self, queryString, template_UUID=None):
logging.info('querying Domino Server: %s', queryString)
query_msg = QueryMessage()
query_msg.domino_udid = self.UDID
query_msg.seq_no = self.seqno
query_msg.queryString = queryString
query_msg.template_UUID = template_UUID
self.seqno = self.seqno + 1
try:
query_msg_r = self.sender().d_query(query_msg)
logging.info('Query Response is received from: %s ,sequence number: %d', query_msg_r.domino_udid,query_msg_r.seq_no)
if (query_msg_r.queryResponse is not None) and (len(query_msg_r.queryResponse)>0):
return query_msg_r.queryResponse
except (Thrift.TException, TSocket.TTransportException) as tx:
logging.error('%s' , tx.message)
except (socket.timeout) as tx:
self.handle_RPC_timeout(query_msg)
def stop(self):
try:
self.communicationHandler.closeconnection()
except Thrift.TException, tx:
logging.error('%s' , tx.message)
def sender(self):
return self.communicationHandler.sender
def startCLI(self, interactive):
self.CLIservice = DominoClientCLIService(self, self.communicationHandler, interactive)
logging.info('CLI Service is starting')
self.CLIservice.start()
#to wait until CLI service is finished
#self.CLIservice.join()
def set_serviceport(self, port):
self.serviceport = port
def set_CLIport(self, cliport):
self.CLIport = cliport
def set_dominoserver_ipaddr(self, ipaddr):
self.dominoserver_IP = ipaddr
def handle_RPC_timeout(self, RPCmessage):
# TBD: handle each RPC timeout separately
if RPCmessage.messageType == HEART_BEAT:
logging.debug('RPC Timeout for message type: HEART_BEAT')
elif RPCmessage.messageType == PUBLISH:
logging.debug('RPC Timeout for message type: PUBLISH')
elif RPCmessage.messageType == SUBSCRIBE:
logging.debug('RPC Timeout for message type: SUBSCRIBE')
elif RPCmessage.messageType == REGISTER:
logging.debug('RPC Timeout for message type: REGISTER')
elif RPCmessage.messageType == QUERY:
logging.debug('RPC Timeout for message type: QUERY')
def main():
client = DominoClient()
loglevel = LOGLEVEL
interactive = INTERACTIVE
#process input arguments
try:
opts, args = getopt.getopt(sys.argv[1:],"hc:p:i:l:",["conf=","port=","ipaddr=","log=","iac=","cliport=","uuid=","regmod="])
except getopt.GetoptError:
print 'DominoClient.py -c/--conf <configfile> -p/--port <socketport> -i/--ipaddr <IPaddr> -l/--log <loglevel> --iac=true/false --cliport <cliport>'
sys.exit(2)
for opt, arg in opts:
if opt == '-h':
print 'DominoClient.py -c/--conf <configfile> -p/--port <socketport> -i/--ipaddr <IPaddr> -l/--log <loglevel> --iac=true/false --cliport <cliport>'
sys.exit()
elif opt in ("-c", "--conf"):
configfile = arg
elif opt in ("-p", "--port"):
client.set_serviceport(int(arg))
elif opt in ("-i", "--ipaddr"):
client.set_dominoserver_ipaddr(arg)
elif opt in ("-l", "--log"):
loglevel = arg
elif opt in ("--iac"):
interactive = arg.upper()
elif opt in ("--cliport"):
client.set_CLIport(int(arg))
elif opt in ("--uuid"):
client.UDID = arg
elif opt in ("--regmod"):
if arg.upper() == 'REGISTERED':
client.state = 'REGISTERED'
#Set logging level
numeric_level = getattr(logging, loglevel.upper(), None)
try:
if not isinstance(numeric_level, int):
raise ValueError('Invalid log level: %s' % loglevel)
logging.basicConfig(filename=logfile,level=numeric_level, format='%(asctime)s %(message)s', datefmt='%m/%d/%Y %I:%M:%S %p')
except ValueError, ex:
print ex.message
exit()
#The client is starting
logging.debug('Domino Client Starting...')
client.start()
client.startCLI(interactive)
client.start_communicationService()
if __name__ == "__main__":
sys.exit(main())
|
py | b417196a13d8c0cce9bbc386f54a6e8e61164075 | # -*- coding: utf-8 -*-
'''
Management of user accounts
===========================
The user module is used to create and manage user settings, users can be set
as either absent or present
.. code-block:: yaml
fred:
user.present:
- fullname: Fred Jones
- shell: /bin/zsh
- home: /home/fred
- uid: 4000
- gid: 4000
- groups:
- wheel
- storage
- games
testuser:
user.absent
'''
from __future__ import absolute_import
# Import python libs
import logging
import os
# Import salt libs
import salt.utils
log = logging.getLogger(__name__)
def _group_changes(cur, wanted, remove=False):
'''
Determine if the groups need to be changed
'''
old = set(cur)
new = set(wanted)
if (remove and old != new) or (not remove and not new.issubset(old)):
return True
return False
def _changes(name,
uid=None,
gid=None,
groups=None,
optional_groups=None,
remove_groups=True,
home=None,
createhome=True,
password=None,
enforce_password=True,
empty_password=False,
shell=None,
fullname='',
roomnumber='',
workphone='',
homephone='',
loginclass=None,
date=0,
mindays=0,
maxdays=999999,
inactdays=0,
warndays=7,
expire=-1):
'''
Return a dict of the changes required for a user if the user is present,
otherwise return False.
Updated in 2014.7.0 to include support for shadow attributes, all
attributes supported as integers only.
'''
if 'shadow.info' in __salt__:
lshad = __salt__['shadow.info'](name)
lusr = __salt__['user.info'](name)
if not lusr:
return False
change = {}
wanted_groups = sorted(set((groups or []) + (optional_groups or [])))
if uid:
if lusr['uid'] != uid:
change['uid'] = uid
if gid is not None:
if lusr['gid'] not in (gid, __salt__['file.group_to_gid'](gid)):
change['gid'] = gid
default_grp = __salt__['file.gid_to_group'](
gid if gid is not None else lusr['gid']
)
# remove the default group from the list for comparison purposes
if default_grp in lusr['groups']:
lusr['groups'].remove(default_grp)
if name in lusr['groups'] and name not in wanted_groups:
lusr['groups'].remove(name)
# remove default group from wanted_groups, as this requirement is
# already met
if default_grp in wanted_groups:
wanted_groups.remove(default_grp)
if _group_changes(lusr['groups'], wanted_groups, remove_groups):
change['groups'] = wanted_groups
if home:
if lusr['home'] != home:
change['home'] = home
if createhome and not os.path.isdir(home):
change['homeDoesNotExist'] = home
if shell:
if lusr['shell'] != shell:
change['shell'] = shell
if 'shadow.info' in __salt__ and 'shadow.default_hash' in __salt__:
if password:
default_hash = __salt__['shadow.default_hash']()
if lshad['passwd'] == default_hash \
or lshad['passwd'] != default_hash and enforce_password:
if lshad['passwd'] != password:
change['passwd'] = password
if date and date is not 0 and lshad['lstchg'] != date:
change['date'] = date
if mindays and mindays is not 0 and lshad['min'] != mindays:
change['mindays'] = mindays
if maxdays and maxdays is not 999999 and lshad['max'] != maxdays:
change['maxdays'] = maxdays
if inactdays and inactdays is not 0 and lshad['inact'] != inactdays:
change['inactdays'] = inactdays
if warndays and warndays is not 7 and lshad['warn'] != warndays:
change['warndays'] = warndays
if expire and expire is not -1 and lshad['expire'] != expire:
change['expire'] = expire
# GECOS fields
if fullname is not None and lusr['fullname'] != fullname:
change['fullname'] = fullname
# MacOS doesn't have full GECOS support, so check for the "ch" functions
# and ignore these parameters if these functions do not exist.
if 'user.chroomnumber' in __salt__:
if roomnumber is not None and lusr['roomnumber'] != roomnumber:
change['roomnumber'] = roomnumber
if 'user.chworkphone' in __salt__:
if workphone is not None and lusr['workphone'] != workphone:
change['workphone'] = workphone
if 'user.chhomephone' in __salt__:
if homephone is not None and lusr['homephone'] != homephone:
change['homephone'] = homephone
# OpenBSD login class
if __grains__['kernel'] == 'OpenBSD':
if not loginclass:
loginclass = '""'
if __salt__['user.get_loginclass'](name)['loginclass'] != loginclass:
change['loginclass'] = loginclass
return change
def present(name,
uid=None,
gid=None,
gid_from_name=False,
groups=None,
optional_groups=None,
remove_groups=True,
home=None,
createhome=True,
password=None,
enforce_password=True,
empty_password=False,
shell=None,
unique=True,
system=False,
fullname=None,
roomnumber=None,
workphone=None,
homephone=None,
loginclass=None,
date=None,
mindays=None,
maxdays=None,
inactdays=None,
warndays=None,
expire=None):
'''
Ensure that the named user is present with the specified properties
name
The name of the user to manage
uid
The user id to assign, if left empty then the next available user id
will be assigned
gid
The default group id
gid_from_name
If True, the default group id will be set to the id of the group with
the same name as the user, Default is ``False``.
groups
A list of groups to assign the user to, pass a list object. If a group
specified here does not exist on the minion, the state will fail.
If set to the empty list, the user will be removed from all groups
except the default group.
optional_groups
A list of groups to assign the user to, pass a list object. If a group
specified here does not exist on the minion, the state will silently
ignore it.
NOTE: If the same group is specified in both "groups" and
"optional_groups", then it will be assumed to be required and not optional.
remove_groups
Remove groups that the user is a member of that weren't specified in
the state, Default is ``True``.
home
The location of the home directory to manage
createhome
If False, the home directory will not be created if it doesn't exist.
Please note that directories leading up to the home directory
will NOT be created, Default is ``True``.
password
A password hash to set for the user. This field is only supported on
Linux, FreeBSD, NetBSD, OpenBSD, and Solaris.
.. versionchanged:: 0.16.0
BSD support added.
enforce_password
Set to False to keep the password from being changed if it has already
been set and the password hash differs from what is specified in the
"password" field. This option will be ignored if "password" is not
specified, Default is ``True``.
empty_password
Set to True to enable password-less login for user, Default is ``False``.
shell
The login shell, defaults to the system default shell
unique
Require a unique UID, Default is ``True``.
system
Choose UID in the range of FIRST_SYSTEM_UID and LAST_SYSTEM_UID, Default is
``False``.
loginclass
The login class, defaults to empty
(BSD only)
User comment field (GECOS) support (currently Linux, BSD, and MacOS
only):
The below values should be specified as strings to avoid ambiguities when
the values are loaded. (Especially the phone and room number fields which
are likely to contain numeric data)
fullname
The user's full name
roomnumber
The user's room number (not supported in MacOS)
workphone
The user's work phone number (not supported in MacOS)
homephone
The user's home phone number (not supported in MacOS)
.. versionchanged:: 2014.7.0
Shadow attribute support added.
Shadow attributes support (currently Linux only):
The below values should be specified as integers.
date
Date of last change of password, represented in days since epoch
(January 1, 1970).
mindays
The minimum number of days between password changes.
maxdays
The maximum number of days between password changes.
inactdays
The number of days after a password expires before an account is
locked.
warndays
Number of days prior to maxdays to warn users.
expire
Date that account expires, represented in days since epoch (January 1,
1970).
'''
fullname = salt.utils.sdecode(fullname) if fullname is not None else fullname
roomnumber = salt.utils.sdecode(roomnumber) if roomnumber is not None else roomnumber
workphone = salt.utils.sdecode(workphone) if workphone is not None else workphone
homephone = salt.utils.sdecode(homephone) if homephone is not None else homephone
ret = {'name': name,
'changes': {},
'result': True,
'comment': 'User {0} is present and up to date'.format(name)}
if groups:
missing_groups = [x for x in groups if not __salt__['group.info'](x)]
if missing_groups:
ret['comment'] = 'The following group(s) are not present: ' \
'{0}'.format(','.join(missing_groups))
ret['result'] = False
return ret
if optional_groups:
present_optgroups = [x for x in optional_groups
if __salt__['group.info'](x)]
for missing_optgroup in [x for x in optional_groups
if x not in present_optgroups]:
log.debug('Optional group "{0}" for user "{1}" is not '
'present'.format(missing_optgroup, name))
else:
present_optgroups = None
# Log a warning for all groups specified in both "groups" and
# "optional_groups" lists.
if groups and optional_groups:
for isected in set(groups).intersection(optional_groups):
log.warning('Group "{0}" specified in both groups and '
'optional_groups for user {1}'.format(isected, name))
if gid_from_name:
gid = __salt__['file.group_to_gid'](name)
if empty_password:
__salt__['shadow.del_password'](name)
changes = _changes(name,
uid,
gid,
groups,
present_optgroups,
remove_groups,
home,
createhome,
password,
enforce_password,
empty_password,
shell,
fullname,
roomnumber,
workphone,
homephone,
date,
mindays,
maxdays,
inactdays,
warndays,
expire)
if changes:
if __opts__['test']:
ret['result'] = None
ret['comment'] = ('The following user attributes are set to be '
'changed:\n')
for key, val in changes.items():
ret['comment'] += '{0}: {1}\n'.format(key, val)
return ret
# The user is present
if 'shadow.info' in __salt__:
lshad = __salt__['shadow.info'](name)
if __grains__['kernel'] == 'OpenBSD':
lcpre = __salt__['user.get_loginclass'](name)
pre = __salt__['user.info'](name)
for key, val in changes.items():
if key == 'passwd' and not empty_password:
__salt__['shadow.set_password'](name, password)
continue
if key == 'date':
__salt__['shadow.set_date'](name, date)
continue
if key == 'home' or key == 'homeDoesNotExist':
if createhome:
__salt__['user.chhome'](name, val, True)
else:
__salt__['user.chhome'](name, val, False)
continue
if key == 'mindays':
__salt__['shadow.set_mindays'](name, mindays)
continue
if key == 'maxdays':
__salt__['shadow.set_maxdays'](name, maxdays)
continue
if key == 'inactdays':
__salt__['shadow.set_inactdays'](name, inactdays)
continue
if key == 'warndays':
__salt__['shadow.set_warndays'](name, warndays)
continue
if key == 'expire':
__salt__['shadow.set_expire'](name, expire)
continue
if key == 'groups':
__salt__['user.ch{0}'.format(key)](
name, val, not remove_groups
)
else:
__salt__['user.ch{0}'.format(key)](name, val)
post = __salt__['user.info'](name)
spost = {}
if 'shadow.info' in __salt__:
if lshad['passwd'] != password:
spost = __salt__['shadow.info'](name)
if __grains__['kernel'] == 'OpenBSD':
lcpost = __salt__['user.get_loginclass'](name)
# See if anything changed
for key in post:
if post[key] != pre[key]:
ret['changes'][key] = post[key]
if 'shadow.info' in __salt__:
for key in spost:
if lshad[key] != spost[key]:
ret['changes'][key] = spost[key]
if __grains__['kernel'] == 'OpenBSD':
if lcpost['loginclass'] != lcpre['loginclass']:
ret['changes']['loginclass'] = lcpost['loginclass']
if ret['changes']:
ret['comment'] = 'Updated user {0}'.format(name)
changes = _changes(name,
uid,
gid,
groups,
present_optgroups,
remove_groups,
home,
createhome,
password,
enforce_password,
empty_password,
shell,
fullname,
roomnumber,
workphone,
homephone,
loginclass,
date,
mindays,
maxdays,
inactdays,
warndays,
expire)
if changes:
ret['comment'] = 'These values could not be changed: {0}'.format(
changes
)
ret['result'] = False
return ret
if changes is False:
# The user is not present, make it!
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} set to be added'.format(name)
return ret
if groups and present_optgroups:
groups.extend(present_optgroups)
elif present_optgroups:
groups = present_optgroups[:]
if __salt__['user.add'](name,
uid=uid,
gid=gid,
groups=groups,
home=home,
shell=shell,
unique=unique,
system=system,
fullname=fullname,
roomnumber=roomnumber,
workphone=workphone,
homephone=homephone,
loginclass=loginclass,
createhome=createhome):
ret['comment'] = 'New user {0} created'.format(name)
ret['changes'] = __salt__['user.info'](name)
if 'shadow.info' in __salt__ and not salt.utils.is_windows():
if password and not empty_password:
__salt__['shadow.set_password'](name, password)
spost = __salt__['shadow.info'](name)
if spost['passwd'] != password:
ret['comment'] = 'User {0} created but failed to set' \
' password to' \
' {1}'.format(name, password)
ret['result'] = False
ret['changes']['password'] = password
if date:
__salt__['shadow.set_date'](name, date)
spost = __salt__['shadow.info'](name)
if spost['lstchg'] != date:
ret['comment'] = 'User {0} created but failed to set' \
' last change date to' \
' {1}'.format(name, date)
ret['result'] = False
ret['changes']['date'] = date
if mindays:
__salt__['shadow.set_mindays'](name, mindays)
spost = __salt__['shadow.info'](name)
if spost['min'] != mindays:
ret['comment'] = 'User {0} created but failed to set' \
' minimum days to' \
' {1}'.format(name, mindays)
ret['result'] = False
ret['changes']['mindays'] = mindays
if maxdays:
__salt__['shadow.set_maxdays'](name, maxdays)
spost = __salt__['shadow.info'](name)
if spost['max'] != maxdays:
ret['comment'] = 'User {0} created but failed to set' \
' maximum days to' \
' {1}'.format(name, maxdays)
ret['result'] = False
ret['changes']['maxdays'] = maxdays
if inactdays:
__salt__['shadow.set_inactdays'](name, inactdays)
spost = __salt__['shadow.info'](name)
if spost['inact'] != inactdays:
ret['comment'] = 'User {0} created but failed to set' \
' inactive days to' \
' {1}'.format(name, inactdays)
ret['result'] = False
ret['changes']['inactdays'] = inactdays
if warndays:
__salt__['shadow.set_warndays'](name, warndays)
spost = __salt__['shadow.info'](name)
if spost['warn'] != warndays:
ret['comment'] = 'User {0} created but failed to set' \
' warn days to' \
' {1}'.format(name, warndays)
ret['result'] = False
ret['changes']['warndays'] = warndays
if expire:
__salt__['shadow.set_expire'](name, expire)
spost = __salt__['shadow.info'](name)
if spost['expire'] != expire:
ret['comment'] = 'User {0} created but failed to set' \
' expire days to' \
' {1}'.format(name, expire)
ret['result'] = False
ret['changes']['expire'] = expire
else:
ret['comment'] = 'Failed to create new user {0}'.format(name)
ret['result'] = False
return ret
def absent(name, purge=False, force=False):
'''
Ensure that the named user is absent
name
The name of the user to remove
purge
Set purge to True to delete all of the user's files as well as the user,
Default is ``False``.
force
If the user is logged in, the absent state will fail. Set the force
option to True to remove the user even if they are logged in. Not
supported in FreeBSD and Solaris, Default is ``False``.
'''
ret = {'name': name,
'changes': {},
'result': True,
'comment': ''}
lusr = __salt__['user.info'](name)
if lusr:
# The user is present, make it not present
if __opts__['test']:
ret['result'] = None
ret['comment'] = 'User {0} set for removal'.format(name)
return ret
beforegroups = set(salt.utils.get_group_list(name))
ret['result'] = __salt__['user.delete'](name, purge, force)
aftergroups = set([g for g in beforegroups if __salt__['group.info'](g)])
if ret['result']:
ret['changes'] = {}
for g in beforegroups - aftergroups:
ret['changes']['{0} group'.format(g)] = 'removed'
ret['changes'][name] = 'removed'
ret['comment'] = 'Removed user {0}'.format(name)
else:
ret['result'] = False
ret['comment'] = 'Failed to remove user {0}'.format(name)
return ret
ret['comment'] = 'User {0} is not present'.format(name)
return ret
|
py | b4171986127c186e9e41206e618acecc2394da45 | """Command line interface for ccollab2eeplatform."""
from ccollab2eeplatform.cli import main
if __name__ == "__main__":
main()
|
py | b4171997b2eada0e4c49306edd4df97ef21a65d8 | import logging
from asyncio import sleep
from random import randint
from src.helpers.queue import SCRAPPING_LINK_TOPIC_KAFKA, app
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
@app.agent(SCRAPPING_LINK_TOPIC_KAFKA)
async def execute_job_agent(stream):
async for event in stream.noack().events():
await simulate_scrapping(event.value)
event.ack()
async def simulate_scrapping(link_obj):
time_to_sleep = randint(10, 30) / 10
await sleep(time_to_sleep)
logger.info(f"The link {link_obj['link']} was visited and scrapped!")
|
py | b4171a47cef91454d77349c34e182b8dfee8bdc8 | # exported from PySB model 'model'
from pysb import Model, Monomer, Parameter, Expression, Compartment, Rule, Observable, Initial, MatchOnce, Annotation, ANY, WILD
Model()
Monomer('Ligand', ['Receptor'])
Monomer('ParpU', ['C3A'])
Monomer('C8A', ['BidU', 'C3pro'])
Monomer('SmacM', ['BaxA'])
Monomer('BaxM', ['BidM', 'BaxA'])
Monomer('Apop', ['C3pro', 'Xiap'])
Monomer('Fadd', ['Receptor', 'C8pro'])
Monomer('SmacC', ['Xiap'])
Monomer('ParpC')
Monomer('Xiap', ['SmacC', 'Apop', 'C3A'])
Monomer('C9')
Monomer('C3ub')
Monomer('C8pro', ['Fadd', 'C6A'])
Monomer('Bcl2', ['BidM', 'BaxA'])
Monomer('C3pro', ['Apop', 'C8A'])
Monomer('CytoCM', ['BaxA'])
Monomer('CytoCC')
Monomer('BaxA', ['BaxM', 'Bcl2', 'BaxA_1', 'BaxA_2', 'SmacM', 'CytoCM'])
Monomer('ApafI')
Monomer('BidU', ['C8A'])
Monomer('BidT')
Monomer('C3A', ['Xiap', 'ParpU', 'C6pro'])
Monomer('ApafA')
Monomer('BidM', ['BaxM', 'Bcl2'])
Monomer('Receptor', ['Ligand', 'Fadd'])
Monomer('C6A', ['C8pro'])
Monomer('C6pro', ['C3A'])
Parameter('bind_0_Ligand_binder_Receptor_binder_target_2kf', 1.0)
Parameter('bind_0_Ligand_binder_Receptor_binder_target_1kr', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_2kf', 1.0)
Parameter('bind_0_Receptor_binder_Fadd_binder_target_1kr', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf', 1.0)
Parameter('substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr', 1.0)
Parameter('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf', 1.0)
Parameter('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf', 1.0)
Parameter('inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf', 1.0)
Parameter('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf', 1.0)
Parameter('inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf', 1.0)
Parameter('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr', 1.0)
Parameter('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kf', 1.0)
Parameter('equilibration_0_BidT_equil_a_BidM_equil_b_1kr', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf', 1.0)
Parameter('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr', 1.0)
Parameter('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf', 1.0)
Parameter('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr', 1.0)
Parameter('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf', 1.0)
Parameter('inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr', 1.0)
Parameter('pore_formation_0_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_0_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_1_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_1_BaxA_pore_1kr', 1.0)
Parameter('pore_formation_2_BaxA_pore_2kf', 1.0)
Parameter('pore_formation_2_BaxA_pore_1kr', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf', 1.0)
Parameter('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr', 1.0)
Parameter('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf', 1.0)
Parameter('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr', 1.0)
Parameter('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf', 1.0)
Parameter('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr', 1.0)
Parameter('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf', 1.0)
Parameter('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr', 1.0)
Parameter('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc', 1.0)
Parameter('Ligand_0', 1000.0)
Parameter('ParpU_0', 1000000.0)
Parameter('C8A_0', 0.0)
Parameter('SmacM_0', 100000.0)
Parameter('BaxM_0', 40000.0)
Parameter('Apop_0', 0.0)
Parameter('Fadd_0', 130000.0)
Parameter('SmacC_0', 0.0)
Parameter('ParpC_0', 0.0)
Parameter('Xiap_0', 7500.0)
Parameter('C9_0', 100000.0)
Parameter('C3ub_0', 0.0)
Parameter('C8pro_0', 130000.0)
Parameter('Bcl2_0', 70000.0)
Parameter('C3pro_0', 21000.0)
Parameter('CytoCM_0', 500000.0)
Parameter('CytoCC_0', 0.0)
Parameter('BaxA_0', 0.0)
Parameter('ApafI_0', 100000.0)
Parameter('BidU_0', 171000.0)
Parameter('BidT_0', 0.0)
Parameter('C3A_0', 0.0)
Parameter('ApafA_0', 0.0)
Parameter('BidM_0', 0.0)
Parameter('Receptor_0', 100.0)
Parameter('C6A_0', 0.0)
Parameter('C6pro_0', 100.0)
Observable('Ligand_obs', Ligand())
Observable('ParpU_obs', ParpU())
Observable('C8A_obs', C8A())
Observable('SmacM_obs', SmacM())
Observable('BaxM_obs', BaxM())
Observable('Apop_obs', Apop())
Observable('Fadd_obs', Fadd())
Observable('SmacC_obs', SmacC())
Observable('ParpC_obs', ParpC())
Observable('Xiap_obs', Xiap())
Observable('C9_obs', C9())
Observable('C3ub_obs', C3ub())
Observable('C8pro_obs', C8pro())
Observable('Bcl2_obs', Bcl2())
Observable('C3pro_obs', C3pro())
Observable('CytoCM_obs', CytoCM())
Observable('CytoCC_obs', CytoCC())
Observable('BaxA_obs', BaxA())
Observable('ApafI_obs', ApafI())
Observable('BidU_obs', BidU())
Observable('BidT_obs', BidT())
Observable('C3A_obs', C3A())
Observable('ApafA_obs', ApafA())
Observable('BidM_obs', BidM())
Observable('Receptor_obs', Receptor())
Observable('C6A_obs', C6A())
Observable('C6pro_obs', C6pro())
Rule('bind_0_Ligand_binder_Receptor_binder_target', Ligand(Receptor=None) + Receptor(Ligand=None, Fadd=None) | Ligand(Receptor=1) % Receptor(Ligand=1, Fadd=None), bind_0_Ligand_binder_Receptor_binder_target_2kf, bind_0_Ligand_binder_Receptor_binder_target_1kr)
Rule('bind_0_Receptor_binder_Fadd_binder_target', Receptor(Ligand=ANY, Fadd=None) + Fadd(Receptor=None, C8pro=None) | Receptor(Ligand=ANY, Fadd=1) % Fadd(Receptor=1, C8pro=None), bind_0_Receptor_binder_Fadd_binder_target_2kf, bind_0_Receptor_binder_Fadd_binder_target_1kr)
Rule('substrate_binding_0_Fadd_catalyzer_C8pro_substrate', Fadd(Receptor=ANY, C8pro=None) + C8pro(Fadd=None, C6A=None) | Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None), substrate_binding_0_Fadd_catalyzer_C8pro_substrate_2kf, substrate_binding_0_Fadd_catalyzer_C8pro_substrate_1kr)
Rule('catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product', Fadd(Receptor=ANY, C8pro=1) % C8pro(Fadd=1, C6A=None) >> Fadd(Receptor=ANY, C8pro=None) + C8A(BidU=None, C3pro=None), catalytic_step_0_Fadd_catalyzer_C8pro_substrate_C8A_product_1kc)
Rule('catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=None, C3pro=None) + BidU(C8A=None) | C8A(BidU=1, C3pro=None) % BidU(C8A=1), catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_2kf, catalysis_0_C8A_catalyzer_BidU_substrate_BidT_product_1kr)
Rule('catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product', C8A(BidU=1, C3pro=None) % BidU(C8A=1) >> C8A(BidU=None, C3pro=None) + BidT(), catalysis_1_C8A_catalyzer_BidU_substrate_BidT_product_1kc)
Rule('conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex', ApafI() + CytoCC() | ApafA(), conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_2kf, conversion_0_CytoCC_subunit_d_ApafI_subunit_c_ApafA_complex_1kr)
Rule('inhibition_0_SmacC_inhibitor_Xiap_inh_target', SmacC(Xiap=None) + Xiap(SmacC=None, Apop=None, C3A=None) | SmacC(Xiap=1) % Xiap(SmacC=1, Apop=None, C3A=None), inhibition_0_SmacC_inhibitor_Xiap_inh_target_2kf, inhibition_0_SmacC_inhibitor_Xiap_inh_target_1kr)
Rule('conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex', ApafA() + C9() | Apop(C3pro=None, Xiap=None), conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_2kf, conversion_0_C9_subunit_d_ApafA_subunit_c_Apop_complex_1kr)
Rule('catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=None, Xiap=None) + C3pro(Apop=None, C8A=None) | Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None), catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_Apop_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product', Apop(C3pro=1, Xiap=None) % C3pro(Apop=1, C8A=None) >> Apop(C3pro=None, Xiap=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_Apop_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('inhibition_0_Xiap_inhibitor_Apop_inh_target', Xiap(SmacC=None, Apop=None, C3A=None) + Apop(C3pro=None, Xiap=None) | Xiap(SmacC=None, Apop=1, C3A=None) % Apop(C3pro=None, Xiap=1), inhibition_0_Xiap_inhibitor_Apop_inh_target_2kf, inhibition_0_Xiap_inhibitor_Apop_inh_target_1kr)
Rule('catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=None) + C3A(Xiap=None, ParpU=None, C6pro=None) | Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None), catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_2kf, catalysis_0_Xiap_catalyzer_C3A_substrate_C3ub_product_1kr)
Rule('catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product', Xiap(SmacC=None, Apop=None, C3A=1) % C3A(Xiap=1, ParpU=None, C6pro=None) >> Xiap(SmacC=None, Apop=None, C3A=None) + C3ub(), catalysis_1_Xiap_catalyzer_C3A_substrate_C3ub_product_1kc)
Rule('catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=None, C6pro=None) + ParpU(C3A=None) | C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1), catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_2kf, catalysis_0_C3A_catalyzer_ParpU_substrate_ParpC_product_1kr)
Rule('catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product', C3A(Xiap=None, ParpU=1, C6pro=None) % ParpU(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + ParpC(), catalysis_1_C3A_catalyzer_ParpU_substrate_ParpC_product_1kc)
Rule('equilibration_0_BidT_equil_a_BidM_equil_b', BidT() | BidM(BaxM=None, Bcl2=None), equilibration_0_BidT_equil_a_BidM_equil_b_1kf, equilibration_0_BidT_equil_a_BidM_equil_b_1kr)
Rule('catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=None, Bcl2=None) + BaxM(BidM=None, BaxA=None) | BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None), catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_2kf, catalysis_0_BidM_catalyzer_BaxM_substrate_BaxA_product_1kr)
Rule('catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product', BidM(BaxM=1, Bcl2=None) % BaxM(BidM=1, BaxA=None) >> BidM(BaxM=None, Bcl2=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), catalysis_1_BidM_catalyzer_BaxM_substrate_BaxA_product_1kc)
Rule('self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxM(BidM=None, BaxA=None) | BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1), self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_2kf, self_catalyze_0_BaxA_self_catalyzer_BaxM_self_substrate_1kr)
Rule('self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate', BaxA(BaxM=1, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) % BaxM(BidM=None, BaxA=1) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), self_catalyze_1_BaxA_self_catalyzer_BaxM_self_substrate_1kc)
Rule('inhibition_0_Bcl2_inhibitor_BidM_inh_target', Bcl2(BidM=None, BaxA=None) + BidM(BaxM=None, Bcl2=None) | Bcl2(BidM=1, BaxA=None) % BidM(BaxM=None, Bcl2=1), inhibition_0_Bcl2_inhibitor_BidM_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BidM_inh_target_1kr)
Rule('inhibition_0_Bcl2_inhibitor_BaxA_inh_target', Bcl2(BidM=None, BaxA=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | Bcl2(BidM=None, BaxA=1) % BaxA(BaxM=None, Bcl2=1, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), inhibition_0_Bcl2_inhibitor_BaxA_inh_target_2kf, inhibition_0_Bcl2_inhibitor_BaxA_inh_target_1kr)
Rule('pore_formation_0_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None), pore_formation_0_BaxA_pore_2kf, pore_formation_0_BaxA_pore_1kr)
Rule('pore_formation_1_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=None, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None), pore_formation_1_BaxA_pore_2kf, pore_formation_1_BaxA_pore_1kr)
Rule('pore_formation_2_BaxA_pore', BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None) + BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None), pore_formation_2_BaxA_pore_2kf, pore_formation_2_BaxA_pore_1kr)
Rule('transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5), transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_2kf, transport_0_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=5, CytoCM=None) % SmacM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + SmacC(Xiap=None), transport_1_BaxA_pore_SmacM_cargo_M_SmacC_cargo_C_1kc)
Rule('transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCM(BaxA=None) | BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5), transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_2kf, transport_0_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kr)
Rule('transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C', BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=5) % CytoCM(BaxA=5) >> BaxA(BaxM=None, Bcl2=None, BaxA_1=4, BaxA_2=1, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=1, BaxA_2=2, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=2, BaxA_2=3, SmacM=None, CytoCM=None) % BaxA(BaxM=None, Bcl2=None, BaxA_1=3, BaxA_2=4, SmacM=None, CytoCM=None) + CytoCC(), transport_1_BaxA_pore_CytoCM_cargo_M_CytoCC_cargo_C_1kc)
Rule('catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=None) + C3pro(Apop=None, C8A=None) | C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1), catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_2kf, catalysis_0_C8A_catalyzer_C3pro_substrate_C3A_product_1kr)
Rule('catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product', C8A(BidU=None, C3pro=1) % C3pro(Apop=None, C8A=1) >> C8A(BidU=None, C3pro=None) + C3A(Xiap=None, ParpU=None, C6pro=None), catalysis_1_C8A_catalyzer_C3pro_substrate_C3A_product_1kc)
Rule('catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=None) + C6pro(C3A=None) | C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1), catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_2kf, catalysis_0_C3A_catalyzer_C6pro_substrate_C6A_product_1kr)
Rule('catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product', C3A(Xiap=None, ParpU=None, C6pro=1) % C6pro(C3A=1) >> C3A(Xiap=None, ParpU=None, C6pro=None) + C6A(C8pro=None), catalysis_1_C3A_catalyzer_C6pro_substrate_C6A_product_1kc)
Rule('catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=None) + C8pro(Fadd=None, C6A=None) | C6A(C8pro=1) % C8pro(Fadd=None, C6A=1), catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_2kf, catalysis_0_C6A_catalyzer_C8pro_substrate_C8A_product_1kr)
Rule('catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product', C6A(C8pro=1) % C8pro(Fadd=None, C6A=1) >> C6A(C8pro=None) + C8A(BidU=None, C3pro=None), catalysis_1_C6A_catalyzer_C8pro_substrate_C8A_product_1kc)
Initial(Ligand(Receptor=None), Ligand_0)
Initial(ParpU(C3A=None), ParpU_0)
Initial(C8A(BidU=None, C3pro=None), C8A_0)
Initial(SmacM(BaxA=None), SmacM_0)
Initial(BaxM(BidM=None, BaxA=None), BaxM_0)
Initial(Apop(C3pro=None, Xiap=None), Apop_0)
Initial(Fadd(Receptor=None, C8pro=None), Fadd_0)
Initial(SmacC(Xiap=None), SmacC_0)
Initial(ParpC(), ParpC_0)
Initial(Xiap(SmacC=None, Apop=None, C3A=None), Xiap_0)
Initial(C9(), C9_0)
Initial(C3ub(), C3ub_0)
Initial(C8pro(Fadd=None, C6A=None), C8pro_0)
Initial(Bcl2(BidM=None, BaxA=None), Bcl2_0)
Initial(C3pro(Apop=None, C8A=None), C3pro_0)
Initial(CytoCM(BaxA=None), CytoCM_0)
Initial(CytoCC(), CytoCC_0)
Initial(BaxA(BaxM=None, Bcl2=None, BaxA_1=None, BaxA_2=None, SmacM=None, CytoCM=None), BaxA_0)
Initial(ApafI(), ApafI_0)
Initial(BidU(C8A=None), BidU_0)
Initial(BidT(), BidT_0)
Initial(C3A(Xiap=None, ParpU=None, C6pro=None), C3A_0)
Initial(ApafA(), ApafA_0)
Initial(BidM(BaxM=None, Bcl2=None), BidM_0)
Initial(Receptor(Ligand=None, Fadd=None), Receptor_0)
Initial(C6A(C8pro=None), C6A_0)
Initial(C6pro(C3A=None), C6pro_0)
|
py | b4171ab166c16a2a45205bdfe599ca63ae4e46fa | # -*- coding: utf-8 -*-
# Generated by Django 1.11.27 on 2020-01-20 07:55
from __future__ import unicode_literals
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [("flowcells", "0016_auto_20190926_1337")]
operations = [
migrations.AddIndex(
model_name="flowcell",
index=models.Index(
fields=["project", "status_sequencing"], name="flowcells_f_project_646598_idx"
),
),
migrations.AddIndex(
model_name="flowcell",
index=models.Index(
fields=["project", "status_conversion"], name="flowcells_f_project_d318bc_idx"
),
),
migrations.AddIndex(
model_name="flowcell",
index=models.Index(
fields=["project", "status_delivery"], name="flowcells_f_project_9bc0a6_idx"
),
),
]
|
py | b4171ac145a35bf4fdd8f0c64351dc148cd43eb6 | from flask import Flask, request
from PIL import Image
from io import BytesIO
from concurrent.futures import ThreadPoolExecutor
import pdf
import sender
app = Flask(__name__)
executor = ThreadPoolExecutor(2)
@app.route('/webhooks/tk', methods=["POST"])
def receive_tk():
data = request.values.to_dict()
sign = request.files['Sign']
in_memory_file = BytesIO()
sign.save(in_memory_file)
in_memory_file.seek(0)
signimg = Image.open(in_memory_file)
executor.submit(fill_and_send_tk, data, signimg)
return 'Hello, World!'
def fill_and_send_tk(data, signimg):
pdf.fill_tk(data, signimg)
sender.sender_service(data["Email"] + "/tk_filled.pdf")
@app.route('/webhooks/tk_sepa', methods=["POST"])
def receive_tk_sepa():
data = request.values.to_dict()
sign = request.files['Sign']
in_memory_file = BytesIO()
sign.save(in_memory_file)
in_memory_file.seek(0)
signimg = Image.open(in_memory_file)
executor.submit(fill_and_send_tk_sepa, data, signimg)
return 'Hello, World!'
def fill_and_send_tk_sepa(data, signimg):
pdf.fill_tk_sepa(data, signimg)
sender.sender_service(data["Email"] + "/tk_filled.pdf")
@app.route('/webhooks/dak', methods=["POST"])
def receive_dak():
data = request.values.to_dict()
executor.submit(fill_and_send_dak, data)
return 'Hello, World!'
def fill_and_send_dak(data):
pdf.fill_dak(data)
sender.sender_service(data["Email"] + "/tk_filled.pdf")
@app.route('/webhooks/dak_sepa', methods=["POST"])
def receive_dak_sepa():
data = request.values.to_dict()
sign = request.files['Sign']
in_memory_file = BytesIO()
sign.save(in_memory_file)
in_memory_file.seek(0)
signimg = Image.open(in_memory_file)
executor.submit(fill_and_send_dak_sepa, data, signimg)
return 'Hello, World!'
def fill_and_send_dak_sepa(data, signimg):
pdf.fill_dak_sepa(data, signimg)
sender.sender_service(data["Email"] + "/tk_filled.pdf")
if __name__ == '__main__':
app.run(host='0.0.0.0', port=666)
|
py | b4171af1d9fa567a21a44bc9b2bb02c9a1af637d | from flarestack.cosmo.icecube_diffuse_flux import (
get_diffuse_flux_at_100TeV,
get_diffuse_flux_at_1GeV,
get_diffuse_flux_contour,
plot_diffuse_flux,
contours,
get_diffuse_flux,
)
from flarestack.cosmo.rates import get_rate
from flarestack.cosmo.neutrino_cosmology import (
calculate_transient_cosmology,
define_cosmology_functions,
cumulative_z,
)
from flarestack.cosmo.simulate_catalogue import simulate_transient_catalogue
|
py | b4171ebc6cb9378ba2a432d2bd4e230e84f85d2e | import os, sys, csv, matplotlib
import numpy as np
from matplotlib import pyplot as plt
from gstools import vario_estimate_unstructured, vario_estimate_structured, Exponential, SRF
from gstools.covmodel.plot import plot_variogram
from datetime import datetime
matplotlib.use('Agg')
import webbrowser
from PyQt5.QtCore import *
from PyQt5.QtGui import *
from PyQt5.QtWidgets import *
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.backends.backend_qt5agg import FigureCanvasQTAgg as FigureCanvas
from matplotlib.backends.backend_qt5agg import NavigationToolbar2QT as NavigationToolbar
from lib.Canvas import *
from lib.channels import channel_splitter
from lib.cropping import crop_data
from lib.rotation import rotate_data
from lib.downsample import downsample_data
from lib.variogram import *
from lib.SRF import *
from gstools import vario_estimate_unstructured, vario_estimate_structured, Exponential, SRF
from gstools.covmodel.plot import plot_variogram
def help_button_vario():
webbrowser.open("https://github.com/MaxxHolmes/HetKit")
def loadVTK_button(self, mainUI):
vtkReply = QMessageBox.question(self, 'Message - VTK', 'Are you sure you would like to load a .vtk file?\n'
+ 'This will overwrite any current datafiles.')
if vtkReply == QMessageBox.Yes:
vtkOptions = QFileDialog.Options()
vtkOptions |= QFileDialog.DontUseNativeDialog
vtkfname, _ = QFileDialog.getOpenFileName(self, "Open File", "", "All Files (*)", options = vtkOptions)
mainUI.LoadVTKPath = vtkfname
mainUI.vtkData, mainUI.vtkGrid = vtk2numpy(mainUI.LoadVTKPath)
mainUI.vtkMode = 1
else:
pass
def prepare_data(mainUI):
# Channel Splitting & Outputs
mainUI.dataChannel = channel_splitter(mainUI.dataTemp, mainUI.DB[mainUI.ID]["Channel"])
# Rotation & Outputs
mainUI.dataRotated = rotate_data(mainUI.dataChannel, mainUI.DB[mainUI.ID]["Rotation Angle"])
if mainUI.OPRotate == 1:
output_slice_data(mainUI, mainUI.dataRotated,
"Analysis/" + mainUI.ID + "/Slices/Rotated_" + str(mainUI.processCount) + ".vtk",
"Analysis/" + mainUI.ID + "/Slices/Rotated_" + str(mainUI.processCount) + ".dat")
if mainUI.OPHist == 1:
output_hist_data(mainUI, mainUI.dataRotated,
"Analysis/" + mainUI.ID + "/Histograms/Rotated_" + str(mainUI.processCount) + ".png",
"Analysis/" + mainUI.ID + "/Histograms/Rotated_" + str(mainUI.processCount) + ".dat")
# Cropping & Outputs
mainUI.dataCropped = crop_data(mainUI.dataRotated, mainUI.DB[mainUI.ID]["Crop X1"], mainUI.DB[mainUI.ID]["Crop X2"],
mainUI.DB[mainUI.ID]["Crop Y1"], mainUI.DB[mainUI.ID]["Crop Y2"])
if mainUI.OPCropped == 1:
output_slice_data(mainUI, mainUI.dataCropped,
"Analysis/" + mainUI.ID + "/Slices/Cropped_" + str(mainUI.processCount) + ".vtk",
"Analysis/" + mainUI.ID + "/Slices/Cropped_" + str(mainUI.processCount) + ".dat")
if mainUI.OPHist == 1:
output_hist_data(mainUI, mainUI.dataCropped,
"Analysis/" + mainUI.ID + "/Histograms/Cropped_" + str(mainUI.processCount) + ".png",
"Analysis/" + mainUI.ID + "/Histograms/Cropped_" + str(mainUI.processCount) + ".dat")
# Downsampling & Outputs
mainUI.dataDownsampled = downsample_data(mainUI.dataCropped, mainUI.DB[mainUI.ID]["Downsample X"],
mainUI.DB[mainUI.ID]["Downsample Y"],
mainUI.DB[mainUI.ID]["Scale X"],
mainUI.DB[mainUI.ID]["Scale Y"])
if mainUI.OPDownsample == 1:
output_slice_data(mainUI, mainUI.dataDownsampled,
"Analysis/" + mainUI.ID + "/Slices/Downsampled_" + str(mainUI.processCount) + ".vtk",
"Analysis/" + mainUI.ID + "/Slices/Downsampled_" + str(mainUI.processCount) + ".dat")
if mainUI.OPHist == 1:
output_hist_data(mainUI, mainUI.dataDownsampled,
"Analysis/" + mainUI.ID + "/Histograms/Downsampled_" + str(mainUI.processCount) + ".png",
"Analysis/" + mainUI.ID + "/Histograms/Downsampled_" + str(mainUI.processCount) + ".dat")
def output_slice_data(mainUI, data, vtkname, datname):
X = data.shape[0]
Y = data.shape[1]
try:
Z = data.shape[2]
except IndexError:
data = np.reshape(data, (data.shape[0], data.shape[1], 1))
Z = data.shape[2]
with open(vtkname, "w") as vtkfile:
with open(datname, "w") as datfile:
vtkfile.write("# vtk DataFile Version 3.0\n"
+ "vtk output\n"
+ "ASCII\n"
+ "DATASET STRUCTURED_POINTS\n"
+ "DIMENSIONS {} {} {}\n".format(X, Y, Z)
+ "SPACING 1 1 1\n"
+ "ORIGIN 0 0 0\n"
+ "POINT_DATA {}\n".format(X*Y*Z)
+ "SCALARS Image float 1\n"
+ "LOOKUP_TABLE default\n")
for k in range(Z):
for j in range(Y):
for i in range(X):
vtkfile.write(str(data[i][j][k]) + " ")
datfile.write(str(data[i][j][k]) + " ")
vtkfile.write("\n")
datfile.write("\n")
vtkfile.write("\n")
datfile.write("\n")
vtkfile.close()
datfile.close()
def output_hist_data(mainUI, data, hist_name, hist_dat_name):
data = np.asarray(data)
hist, bins = np.histogram(data.flatten(), 20)
figure = plt.figure(figsize = (10, 10))
plt.bar(bins[:-1], hist, width = (bins[-1]-bins[-2]), align = "edge")
plt.xlabel("Pixel Value (Intensity)")
plt.ylabel("Count")
plt.savefig(str(hist_name))
plt.close()
for q in range(len(bins)-1):
if q == 0:
hist_data = [bins[q+1], hist[q]]
else:
hist_new = [bins[q+1], hist[q]]
hist_data = np.vstack([hist_data, hist_new])
hist_data = np.asarray(hist_data)
with open(str(hist_dat_name), "w") as histfile:
for y in range(hist_data.shape[1]):
for x in range(hist_data.shape[0]):
histfile.write(str(hist_data[x][y]) + " ")
histfile.write("\n")
histfile.close()
def output_checkboxes(mainUI):
mainUI.OPRotate = 0
mainUI.OPCropped = 0
mainUI.OPDownsample = 0
mainUI.OPVario = 0
mainUI.OPHist = 0
if mainUI.checkOPDefault.isChecked() == True:
mainUI.OPRotate = 1
mainUI.OPCropped = 1
mainUI.OPDownsample = 1
mainUI.OPVario = 1
mainUI.OPHist = 1
if mainUI.checkOPRotate.isChecked() == True:
mainUI.OPRotate = 1
if mainUI.checkOPCropped.isChecked() == True:
mainUI.OPCropped = 1
if mainUI.checkOPDownsample.isChecked() == True:
mainUI.OPDownsample = 1
if mainUI.checkOPVario.isChecked() == True:
mainUI.OPVario = 1
if mainUI.checkOPHist.isChecked() == True:
mainUI.OPHist = 1
def analysis_button(self, mainUI, canvasVario):
mainUI.processCount = 0
mainUI.progressVario.setValue(0)
output_checkboxes(mainUI)
# Check for loaded VTK settings
tempArray = []
if mainUI.vtkMode == 1:
mainUI.dataArray = mainUI.vtkData
else:
for image in mainUI.dataPathList:
img = Image.open(image)
img.load()
mainUI.dataTemp = np.asarray(img, dtype = "int32")
prepare_data(mainUI)
tempArray.append(mainUI.dataDownsampled)
mainUI.dataArray = np.dstack(tempArray)
mainUI.processCount += 1
mainUI.dataArray = normalise_dataset(mainUI.dataArray)
mainUI.dataArray = zeroth_filter(mainUI.dataArray)
if mainUI.dataArray.ndim < 3:
mainUI.dataArray = np.reshape(mainUI.dataArray, (mainUI.dataArray[0], mainUI.dataArray[1], 1))
# Variogram Preparation
if mainUI.vtkMode == 1:
mainUI.x_s, mainUI.y_s, mainUI.z_s = create_structured_grid_vtk(mainUI.vtkGrid)
else:
mainUI.x_s, mainUI.y_s, mainUI.z_s = create_structured_grid(mainUI.dataArray)
X, Y, Z = len(mainUI.x_s), len(mainUI.y_s), len(mainUI.z_s)
mainUI.dataArrayLog = integration3D(mainUI)
x_u, y_u = create_unstructured_grid(mainUI.x_s, mainUI.y_s)
bin_space, bin_no = set_bins(mainUI)
total_cells = len(mainUI.x_s) * len(mainUI.y_s)
total_itrs = len(bin_space) * len(bin_no)
progress = 10
progress_full = 10 + (total_itrs * 3) + 1
mainUI.progressVario.setValue(np.rint((progress/progress_full) * 100))
# Variogram Fitting Loop
progress_full = 10 + (total_itrs * 3) + 1
count = 0
for i in range(len(bin_no)):
for j in range(len(bin_space)):
bins = np.linspace(0, bin_space[j], bin_no[i])
# Unstructured Fit
mainUI.bin_center, mainUI.gamma = vario_estimate_unstructured((x_u, y_u),
mainUI.dataArrayLog.flatten(),
bins,
sampling_size = total_cells)
mainUI.fit_model = Exponential(dim = 2)
mainUI.fit_model.fit_variogram(mainUI.bin_center, mainUI.gamma, nugget = False)
# Update progress bar
progress += 1
mainUI.progressVario.setValue(np.rint((progress/progress_full) * 100))
# Structured Fit
mainUI.gamma_x = vario_estimate_structured(mainUI.dataArrayLog, direction = 'x')
mainUI.gamma_y = vario_estimate_structured(mainUI.dataArrayLog, direction = 'y')
mainUI.fit_model_x = Exponential(dim = 2)
mainUI.fit_model_y = Exponential(dim = 2)
mainUI.fit_model_x.fit_variogram(mainUI.x_s, mainUI.gamma_x, nugget = False)
mainUI.fit_model_y.fit_variogram(mainUI.y_s, mainUI.gamma_y, nugget = False)
# Update progress bar
progress += 1
mainUI.progressVario.setValue(np.rint((progress/progress_full) * 100))
# Output Variograms
if mainUI.OPVario == 1:
output_variogram(mainUI, "Analysis/" + mainUI.ID + "/Variograms/Plot_BS_" + str(bin_space[j]) + "_BC_" + str(bin_no[i]) + ".png")
if count == 0:
mainUI.varioParams = extract_variogram_params(mainUI, bin_space[j], bin_no[i])
varioTemp = extract_variogram_params(mainUI, bin_space[j], bin_no[i])
mainUI.varioParams = np.vstack([mainUI.varioParams, varioTemp])
for x in range(len(mainUI.bin_center)):
if x == 0:
out_data = [mainUI.bin_center[x], mainUI.gamma[x]]
else:
out_new = [mainUI.bin_center[x], mainUI.gamma[x]]
out_data = np.vstack([out_data, out_new])
for x in range(len(mainUI.x_s)):
if x == 0:
outx_data = [mainUI.x_s[x], mainUI.gamma_x[x]]
else:
outx_new = [mainUI.x_s[x], mainUI.gamma_x[x]]
outx_data = np.vstack([outx_data, outx_new])
for x in range(len(mainUI.y_s)):
if x == 0:
outy_data = [mainUI.y_s[x], mainUI.gamma_y[x]]
else:
outy_new = [mainUI.y_s[x], mainUI.gamma_y[x]]
outy_data = np.vstack([outy_data, outy_new])
structured_outname = "Analysis/" + mainUI.ID + "/Variograms/Data_BS_" + str(bin_space[j]) + "_BC_" + str(bin_no[i]) + ".dat"
with open(structured_outname, 'w') as outfile:
for m in range(len(mainUI.bin_center)):
for n in range(len(out_data[m])):
outfile.write(str(out_data[m][n]) + " ")
outfile.write("\n")
outfile.close()
structured_y = "Analysis/" + mainUI.ID + "/Variograms/Data_X.dat"
structured_x = "Analysis/" + mainUI.ID + "/Variograms/Data_Y.dat"
with open(structured_x, "w") as outfile:
for m in range(len(mainUI.x_s)):
for n in range(len(outx_data[m])):
outfile.write(str(outx_data[m][n]) + " ")
outfile.write("\n")
outfile.close()
with open(structured_y, "w") as outfile:
for m in range(len(mainUI.y_s)):
for n in range(len(outy_data[m])):
outfile.write(str(outy_data[m][n]) + " ")
outfile.write("\n")
outfile.close()
params_outname = "Analysis/" + mainUI.ID + "Variogram_Results.dat"
with open(params_outname, "w") as outfile:
for m in range(len(mainUI.varioParams)):
for n in range(len(mainUI.varioParams[0])):
outfile.write(str(mainUI.varioParams[m][n]) + " ")
outfile.write("\n")
outfile.close()
# Update progress bar
progress += 1
mainUI.progressVario.setValue(np.rint((progress/progress_full) * 100))
count += 1
calculate_best_params(mainUI)
srf = SRF(mainUI.fit_model)
testSRF = srf((mainUI.x_s, mainUI.y_s), mesh_type = "structured")
testSRF = np.reshape(testSRF, (len(mainUI.x_s), len(mainUI.y_s), 1))
output_SRF(testSRF, "SRFs/" + str(mainUI.ID) + "_TestSRF_", len(mainUI.x_s), len(mainUI.y_s), 1)
mainUI.progressVario.setValue(100)
def calculate_best_params(mainUI):
results_output = "Analysis/" + mainUI.ID + "/Full_Results.dat"
with open(results_output, "w") as resultfile:
for m in range(len(mainUI.varioParams)):
for n in range(len(mainUI.varioParams[0])):
resultfile.write(str(mainUI.varioParams[m][n]) + " ")
resultfile.write("\n")
resultfile.close()
fit_var = mainUI.varioParams.T[3].astype(float)
fit_len = mainUI.varioParams.T[4].astype(float)
fit_var_x = mainUI.varioParams.T[5].astype(float)
fit_len_x = mainUI.varioParams.T[6].astype(float)
fit_var_y = mainUI.varioParams.T[7].astype(float)
fit_len_y = mainUI.varioParams.T[8].astype(float)
mean_fit_var = np.mean(fit_var[fit_var <= 20])
mean_fit_len = np.mean(fit_len[fit_len <= 50])
mean_fit_var_x = np.mean(fit_var_x[fit_var_x <= 20])
mean_fit_len_x = np.mean(fit_len_x[fit_len_x <= 50])
mean_fit_var_y = np.mean(fit_var_y[fit_var_y <= 20])
mean_fit_len_y = np.mean(fit_len_y[fit_len_y <= 50])
if mainUI.vtkMode == 1:
scaled_fit_var = mean_fit_var
scaled_fit_len = mean_fit_len
scaled_fit_var_x = mean_fit_var_X
scaled_fit_len_x = mean_fit_len_x
scaled_fit_var_y = mean_fit_var_y
scaled_fit_len_y = mean_fit_len_y
else:
scaled_fit_var = mean_fit_var * np.mean([float(mainUI.DB[mainUI.ID]["Downsample X"]),
float(mainUI.DB[mainUI.ID]["Downsample Y"])])
scaled_fit_len = mean_fit_len * np.mean([float(mainUI.DB[mainUI.ID]["Downsample X"]),
float(mainUI.DB[mainUI.ID]["Downsample Y"])])
scaled_fit_var_x = mean_fit_var_x * float(mainUI.DB[mainUI.ID]["Downsample X"])
scaled_fit_len_x = mean_fit_len_x * float(mainUI.DB[mainUI.ID]["Downsample X"])
scaled_fit_var_y = mean_fit_var_y * float(mainUI.DB[mainUI.ID]["Downsample Y"])
scaled_fit_len_y = mean_fit_var_y * float(mainUI.DB[mainUI.ID]["Downsample Y"])
output_matrix = [
["Type", "Mean Fit Var", "Mean Fit Len", "Mean Fit Var X", "Mean Fit Len X", "Mean Fit Var Y", "Mean Fit Len Y"],
["Mean Outputs", float(mean_fit_var), float(mean_fit_len), float(mean_fit_var_x), float(mean_fit_len_x),
float(mean_fit_var_y), float(mean_fit_len_y)],
["Scaled Outputs", float(scaled_fit_var), float(scaled_fit_len), float(scaled_fit_var_x), float(scaled_fit_len_x),
float(scaled_fit_var_y), float(scaled_fit_len_y)]]
results_output = "Analysis/" + mainUI.ID + "/Final_Results.dat"
with open(results_output, "w") as resultfile:
for m in range(len(output_matrix)):
for n in range(len(output_matrix[0])):
resultfile.write(str(output_matrix[m][n]) + " ")
resultfile.write("\n")
resultfile.close()
mainUI.AV_Isotropic = scaled_fit_len
mainUI.AV_Transverse = scaled_fit_len_x
mainUI.AV_Longitudinal = scaled_fit_len_y
def analysis_results_button(self, mainUI):
path = os.getcwd()
webbrowser.open('file://' + str(path) + '/Analysis/' + str(mainUI.ID))
print('file://' + str(path) + 'Analysis/' + str(mainUI.ID))
def vario_continue_button(self, mainUI):
# ------- Incomplete
mainUI.tabWidget.setCurrentIndex(4)
def output_variogram(mainUI, varioname):
plt.figure(figsize = (10, 10))
line, = plt.plot(mainUI.bin_center, mainUI.gamma, label = 'Estimated Variogram (ISO)')
plt.plot(mainUI.bin_center, mainUI.fit_model.variogram(mainUI.bin_center), color = line.get_color(), linestyle = "--", label = 'Exp. Variogram (ISO)')
line, = plt.plot(mainUI.x_s, mainUI.gamma_x, label = 'Estimated Variogram (X)')
plt.plot(mainUI.x_s, mainUI.fit_model_x.variogram(mainUI.x_s), color = line.get_color(), linestyle = "--",
label = 'Exp. Variogram (X)')
line, = plt.plot(mainUI.y_s, mainUI.gamma_y, label = 'Estimated Variogram (Y)')
plt.plot(mainUI.y_s, mainUI.fit_model_y.variogram(mainUI.y_s), color = line.get_color(), linestyle = "--",
label = 'Exp. Variogram (Y)')
plt.legend()
plt.savefig(varioname)
plt.close()
def extract_variogram_params(mainUI, bin_space, bin_no):
info = [mainUI.ID, bin_space, bin_no, float(np.round(mainUI.fit_model.var, 3)),
float(np.round(mainUI.fit_model.len_scale, 3)), float(np.round(mainUI.fit_model_x.var, 3)),
float(np.round(mainUI.fit_model_x.len_scale, 3)), float(np.round(mainUI.fit_model_y.var, 3)),
float(np.round(mainUI.fit_model_y.len_scale, 3))]
return info
|
py | b4171f74bd53beb79ce53837fb3d732d93d38631 | import os
import sys
import struct
import pprint
import matplotlib.pyplot as plt
import pickle
import math
import time
import statistics
import numpy as np
from TraceInc import AutoDict
def timer():
now = time.time()
return now
def comRegularity(tmap):
kSet = AutoDict()
reg = AutoDict()
recvSet = []
for sk in tmap:
for sit in tmap[sk]:
for cta in tmap[sk][sit]:
if not kSet[sk][sit][cta]:
kSet[sk][sit][cta] = []
for rk in tmap[sk][sit][cta]:
for rcta in sorted(tmap[sk][sit][cta][rk]):
recvSet.append(rcta)
a = frozenset(recvSet.copy())
kSet[sk][sit][cta] = a
recvSet.clear()
for sk in kSet:
reg[sk] = []
for scta in kSet[sk]:
reg[sk].append(1/len(set(kSet[sk][scta].values())))
return reg
pp = pprint.PrettyPrinter(indent=2)
tmap = pickle.load( open(sys.argv[1], "rb"))
reg = comRegularity(tmap)
plt.style.use('ggplot')
for c in reg:
plt.plot(reg[c], alpha=0.75, label=c)
plt.xlabel('CTA')
plt.legend()
plt.ylabel('1/unique sets')
plt.title('Communication Regularity')
path = 'plots/com-regularity/'
file = (sys.argv[1].split('/'))[-1]
file = file.split('.')[0] + '.pdf'
plt.savefig(path+file, papertype='a4', bbox_inches='tight', orientation='landscape')
plt.show()
|
py | b41720aab4678a539c23db9ae973ec4b1632a0ea | from uuid import uuid4
from django.core.validators import MaxValueValidator, MinValueValidator
from django.db import models
from thenewboston.constants.network import ACCEPTED_FEE_CHOICES, MAX_POINT_VALUE, MIN_POINT_VALUE, VERIFY_KEY_LENGTH
class NetworkTransaction(models.Model):
id = models.UUIDField(default=uuid4, editable=False, primary_key=True) # noqa: A003
amount = models.PositiveBigIntegerField(
validators=[
MaxValueValidator(MAX_POINT_VALUE),
MinValueValidator(MIN_POINT_VALUE),
]
)
fee = models.CharField(blank=True, choices=ACCEPTED_FEE_CHOICES, max_length=17)
recipient = models.CharField(max_length=VERIFY_KEY_LENGTH)
class Meta:
abstract = True
|
py | b4172184ec961e59c649bbd00a7a8dda013a6569 | import os
API_KEY = os.environ['AK']
print('python app/joincliSetup.py -ak ' + API_KEY)
os.system('python app/joincliSetup.py -ak {}'.format(API_KEY))
os.system('python app/joincliSetup.py -re')
os.system('python app/joincliServer.py')
|
py | b41721cbc8ae2771a920dc2feb20082d7b2ab145 | from __future__ import division
from math import log, ceil, floor
import os
import re
from subprocess import Popen, PIPE
import sys
from tempfile import TemporaryFile
from warnings import warn
try:
import audioop
except ImportError:
import pyaudioop as audioop
if sys.version_info >= (3, 0):
basestring = str
FRAME_WIDTHS = {
8: 1,
16: 2,
32: 4,
}
ARRAY_TYPES = {
8: "b",
16: "h",
32: "i",
}
ARRAY_RANGES = {
8: (-0x80, 0x7f),
16: (-0x8000, 0x7fff),
32: (-0x80000000, 0x7fffffff),
}
def get_frame_width(bit_depth):
return FRAME_WIDTHS[bit_depth]
def get_array_type(bit_depth, signed=True):
t = ARRAY_TYPES[bit_depth]
if not signed:
t = t.upper()
return t
def get_min_max_value(bit_depth):
return ARRAY_RANGES[bit_depth]
def _fd_or_path_or_tempfile(fd, mode='w+b', tempfile=True):
if fd is None and tempfile:
fd = TemporaryFile(mode=mode)
if isinstance(fd, basestring):
fd = open(fd, mode=mode)
try:
if isinstance(fd, os.PathLike):
fd = open(fd, mode=mode)
except AttributeError:
# module os has no attribute PathLike, so we're on python < 3.6.
# The protocol we're trying to support doesn't exist, so just pass.
pass
return fd
def db_to_float(db, using_amplitude=True):
"""
Converts the input db to a float, which represents the equivalent
ratio in power.
"""
db = float(db)
if using_amplitude:
return 10 ** (db / 20)
else: # using power
return 10 ** (db / 10)
def ratio_to_db(ratio, val2=None, using_amplitude=True):
"""
Converts the input float to db, which represents the equivalent
to the ratio in power represented by the multiplier passed in.
"""
ratio = float(ratio)
# accept 2 values and use the ratio of val1 to val2
if val2 is not None:
ratio = ratio / val2
# special case for multiply-by-zero (convert to silence)
if ratio == 0:
return -float('inf')
if using_amplitude:
return 20 * log(ratio, 10)
else: # using power
return 10 * log(ratio, 10)
def register_pydub_effect(fn, name=None):
"""
decorator for adding pydub effects to the AudioSegment objects.
example use:
@register_pydub_effect
def normalize(audio_segment):
...
or you can specify a name:
@register_pydub_effect("normalize")
def normalize_audio_segment(audio_segment):
...
"""
if isinstance(fn, basestring):
name = fn
return lambda fn: register_pydub_effect(fn, name)
if name is None:
name = fn.__name__
from .audio_segment import AudioSegment
setattr(AudioSegment, name, fn)
return fn
def make_chunks(audio_segment, chunk_length):
"""
Breaks an AudioSegment into chunks that are <chunk_length> milliseconds
long.
if chunk_length is 50 then you'll get a list of 50 millisecond long audio
segments back (except the last one, which can be shorter)
"""
number_of_chunks = ceil(len(audio_segment) / float(chunk_length))
return [audio_segment[i * chunk_length:(i + 1) * chunk_length]
for i in range(int(number_of_chunks))]
def which(program):
"""
Mimics behavior of UNIX which command.
"""
#Add .exe program extension for windows support
if os.name == "nt" and not program.endswith(".exe"):
program += ".exe"
envdir_list = [os.curdir] + os.environ["PATH"].split(os.pathsep)
for envdir in envdir_list:
program_path = os.path.join(envdir, program)
if os.path.isfile(program_path) and os.access(program_path, os.X_OK):
return program_path
def get_encoder_name():
"""
Return enconder default application for system, either avconv or ffmpeg
"""
if which("avconv"):
return "avconv"
elif which("ffmpeg"):
return "ffmpeg"
else:
# should raise exception
warn("Couldn't find ffmpeg or avconv - defaulting to ffmpeg, but may not work", RuntimeWarning)
return "ffmpeg"
def get_player_name():
"""
Return enconder default application for system, either avconv or ffmpeg
"""
if which("avplay"):
return "avplay"
elif which("ffplay"):
return "ffplay"
else:
# should raise exception
warn("Couldn't find ffplay or avplay - defaulting to ffplay, but may not work", RuntimeWarning)
return "ffplay"
def get_prober_name():
"""
Return probe application, either avconv or ffmpeg
"""
if which("avprobe"):
return "avprobe"
elif which("ffprobe"):
return "ffprobe"
else:
# should raise exception
warn("Couldn't find ffprobe or avprobe - defaulting to ffprobe, but may not work", RuntimeWarning)
return "ffprobe"
def mediainfo(filepath):
"""Return dictionary with media info(codec, duration, size, bitrate...) from filepath
"""
from .audio_segment import AudioSegment
prober = get_prober_name()
command_args = [
"-v", "quiet",
"-show_format",
"-show_streams",
filepath
]
command = [prober, '-of', 'old'] + command_args
res = Popen(command, stdout=PIPE)
output = res.communicate()[0].decode("utf-8")
if res.returncode != 0:
command = [prober] + command_args
output = Popen(command, stdout=PIPE).communicate()[0].decode("utf-8")
rgx = re.compile(r"(?:(?P<inner_dict>.*?):)?(?P<key>.*?)\=(?P<value>.*?)$")
info = {}
if sys.platform == 'win32':
output = output.replace("\r", "")
for line in output.split("\n"):
# print(line)
mobj = rgx.match(line)
if mobj:
# print(mobj.groups())
inner_dict, key, value = mobj.groups()
if inner_dict:
try:
info[inner_dict]
except KeyError:
info[inner_dict] = {}
info[inner_dict][key] = value
else:
info[key] = value
return info |
py | b417241e81b8d060bf7f52563a07bb69deeeba11 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
############################################################
# POSTGRESQL
############################################################
# A dictionary with the settings about connection
__PGSQL_CONNECTION_SETTINGS__ = {
"HOSTNAME": "",
"USERNAME": "",
"PASSWORD": "",
"DATABASE": "",
"PORT": 0
}
# A dictionary with the settings about Test/Debug connection
__DEBUG_PGSQL_CONNECTION_SETTINGS__ = {
"HOSTNAME": "",
"USERNAME": "",
"PASSWORD": "",
"DATABASE": "",
"PORT": 0
}
############################################################
# GEOSERVER
############################################################
# A dictionary with the settings about connection
__GEOSERVER_CONNECTION_SETTINGS__ = {
"HOSTNAME": "",
"PORT": 0,
"WORKSPACE": "",
"DATASTORE": "",
}
# A dictionary with the settings about Test/Debug connection
__DEBUG_GEOSERVER_CONNECTION_SETTINGS__ = {
"HOSTNAME": "",
"PORT": 0,
"WORKSPACE": "",
"DATASTORE": "",
}
############################################################
# GEOSERVER-REST
############################################################
# A dictionary with the settings about connection
__GEOSERVER_REST_CONNECTION_SETTINGS__ = {
"HOSTNAME": "",
"PORT": 0,
}
|
py | b4172503b6bb7b651247858f389e0e861fc9dcf7 | #-*-coding:utf-8-*-
import sys
class BaidubaikeException(Exception):
""" Base Baidubaike exception class. """
def __init__(self, error):
self.error = error
def __unicode__(self):
return "An unknown error occured: \"{0}\". Please report it on GitHub!".format(self.error)
if sys.version_info > (3, 0):
def __str__(self):
return self.__unicode__()
else:
def __str__(self):
return self.__unicode__().encode('utf-8')
class PageError(BaidubaikeException):
""" Exception raised when a page does not exist. """
def __init__(self, page_title):
self.title = page_title
def __unicode__(self):
return u"\"{0}\" does not match any pages.".format(self.title)
class DisambiguationError(BaidubaikeException):
""" Exception raised when a page resolves to a Disambiguation page. """
def __init__(self, title, may_refer_to):
self.title = title
self.options = [' -- '.join(item) for item in may_refer_to.items()]
def __unicode__(self):
return u"\"{0}\" may refer to: \n{1}".format(self.title, '\n'.join(self.options))
class VerifyError(BaidubaikeException):
""" Exception raised when a verify-code appears. """
def __init__(self, title):
self.title = title
def __unicode__(self):
return u"The page \"{0}\" requires verifying. Query may be too frequent".format(self.title)
|
py | b41725088b5815f4e8b308e23a3ae4e5f2ed9d60 | """A common abstract class for attractor based models.
Author: Mengye Ren ([email protected])
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import numpy as np
import tensorflow as tf
from fewshot.models.nnlib import weight_variable
from fewshot.utils.logger import get as get_logger
from fewshot.models.attractors.attractor import get_attractor
from fewshot.models.attractors.static_attr import StaticAttractor # NOQA
from fewshot.models.attractors.proto_attn_attr import ProtoAttentionAttractor # NOQA
from fewshot.models.attractors.static_attr_resmlp import StaticResMLPAttractor # NOQA
from fewshot.models.attractors.proto_attn_attr_resmlp import ProtoAttentionResMLPAttractor # NOQA
log = get_logger()
class AttractorModelBase(object):
"""Base class for attractor models."""
def build_task_a(self, x, y, is_training, ext_wts=None):
"""Build task A branch.
Args:
x: Tensor. [N, H, W, C]. Inputs tensor.
y: Tensor. [N]. Labels tensor.
is_training: Bool. Whether in training mode.
ext_wts: Dict. External weights dictionary.
opt: Optimizer object.
"""
config = self.config
global_step = self.global_step
if config.backbone_class == 'resnet_backbone':
bb_config = config.resnet_config
else:
assert False, 'Not supported'
proto_config = config.protonet_config
opt_config = config.optimizer_config
num_classes_a = self._num_classes_a
# Classification branch for task A.
h_a = self._run_backbone(x, is_training=is_training, ext_wts=ext_wts)
self._h_a = h_a
h_shape = h_a.get_shape()
h_size = 1
for ss in h_shape[1:]:
h_size *= int(ss)
self._h_size = h_size
if ext_wts is not None:
w_class_a = weight_variable(
[h_size, num_classes_a],
init_method='numpy',
dtype=self.dtype,
init_param={'val': np.transpose(ext_wts['w_class_a'])},
wd=bb_config.wd,
name='w_class_a')
b_class_a = weight_variable([],
init_method='numpy',
dtype=self.dtype,
init_param={'val': ext_wts['b_class_a']},
wd=0e0,
name='b_class_a')
else:
w_class_a = weight_variable([h_size, num_classes_a],
init_method='truncated_normal',
dtype=self.dtype,
init_param={'stddev': 0.01},
wd=bb_config.wd,
name='w_class_a')
b_class_a = weight_variable([num_classes_a],
dtype=self.dtype,
init_method='constant',
init_param={'val': 0.0},
name='b_class_a')
self._w_class_a = w_class_a
self._b_class_a = b_class_a
num_classes_a_dyn = tf.cast(tf.shape(b_class_a)[0], tf.int64)
num_classes_a_dyn32 = tf.shape(b_class_a)[0]
if proto_config.cosine_a:
if proto_config.cosine_tau:
if ext_wts is None:
tau_init_val = 10.0
else:
tau_init_val = ext_wts['tau'][0]
tau = weight_variable([],
dtype=self.dtype,
init_method='constant',
init_param={'val': tau_init_val},
name='tau')
else:
tau = tf.constant(1.0)
w_class_a_norm = self._normalize(w_class_a, axis=0)
h_a_norm = self._normalize(h_a, axis=1)
dot = tf.matmul(h_a_norm, w_class_a_norm)
if ext_wts is not None:
dot += b_class_a
logits_a = tau * dot
else:
logits_a = tf.matmul(h_a, w_class_a) + b_class_a
self._prediction_a = logits_a
self._prediction_a_all = self._prediction_a
y_dense = tf.one_hot(y, num_classes_a)
xent_a = tf.nn.softmax_cross_entropy_with_logits(
logits=logits_a, labels=y_dense)
xent_a = tf.reduce_mean(xent_a, name='xent')
cost_a = xent_a
self._cost_a = cost_a
cost_a += self._decay()
self._prediction_a = logits_a
return logits_a
def build_task_a_grad(self):
# Gradients for Task A for all trainable variables.
cost_a = self._cost_a
var_list_a = tf.trainable_variables()
grads_and_vars_a = list(zip(tf.gradients(cost_a, var_list_a), var_list_a))
return grads_and_vars_a
def _run_backbone(self, x, ext_wts=None, reuse=None, is_training=True):
if self.config.backbone_class.startswith('resnet'):
return self.backbone(
x,
is_training=is_training,
ext_wts=ext_wts,
reuse=reuse,
slow_bn=ext_wts is not None)
else:
return self.backbone(x, is_training=is_training, ext_wts=ext_wts)
def initialize(self, sess):
# sess.run(self._initializer, feed_dict=self._init_fdict)
sess.run(self._initializer)
def _compute_protos(self, nclasses, h_train, y_train):
"""Computes the prototypes, cluster centers.
Args:
nclasses: Int. Number of classes.
h_train: [B, N, D], Train features.
y_train: [B, N], Train class labels.
Returns:
protos: [B, K, D], Test prediction.
"""
protos = [None] * nclasses
for kk in range(nclasses):
# [N, 1]
ksel = tf.expand_dims(tf.cast(tf.equal(y_train, kk), h_train.dtype), 1)
# [N, D]
protos[kk] = tf.reduce_sum(h_train * ksel, [0], keep_dims=True)
protos[kk] /= (tf.reduce_sum(ksel, [0, 1], keep_dims=True) + 1e-7)
protos = tf.concat(protos, axis=0) # [K, D]
return protos
def _merge_var_list(self, vdict, vlist_old, vlist_new):
if vdict is None:
return None
vdict_new = dict(list(vdict.items()))
for vvo, vvn in zip(vlist_old, vlist_new):
vname = vvo.name.split(':')[0]
assert vname in vdict, '{} not found'.format(vname)
vdict_new[vname] = vvn
return vdict_new
def _aggregate_grads_and_vars(self, grads_and_vars_list, weights=None):
"""Aggregates two sets of gradients by doing an weighted sum."""
aggregated = {}
log.info('Number of grads and vars to aggregate: {}'.format(
len(grads_and_vars_list)))
if weights is None:
assert False, 'Equally aggregated, debug point'
weights = [None] * len(grads_and_vars_list)
for gv_list, wt in zip(grads_and_vars_list, weights):
for g, v in gv_list:
if g is not None:
if v in aggregated:
log.info('Variable matched in the dictionary: {}'.format(v.name))
if wt is None:
aggregated[v].append(g)
log.info('Applied default weight 1.0')
else:
aggregated[v].append(g * wt)
log.info('Applied weight {}'.format(wt))
else:
log.info('Variable created in the dictionary: {}'.format(v.name))
if wt is None:
aggregated[v] = [g]
log.info('Applied default weight 1.0')
else:
aggregated[v] = [g * wt]
log.info('Applied weight {}'.format(wt))
result = []
for v in aggregated.keys():
log.info('Variable {} Count {}'.format(v.name, len(aggregated[v])))
aggregated[v] = tf.add_n(aggregated[v])
result.append((aggregated[v], v))
return result
def _get_mask_fn(self, w, num_classes_a):
bin_mask = tf.reduce_sum(
tf.one_hot(self._y_sel, num_classes_a, dtype=self.dtype),
0,
keep_dims=True)
def mask_fn():
w_m = w * (1.0 - bin_mask) + 1e-7 * bin_mask
return w_m
return mask_fn
def _apply_transfer_loss(self, fast_weights, reuse=None, **kwargs):
"""Apply fast loss.
Args:
fast_weights: Fast weights to optimize in the inner loop.
reuse: Bool. Whether to reuse variables.
Returns:
loss: Scalar. Fast weights loss.
"""
config = self.config
tconfig = self.config.transfer_config
bb_config = self.config.resnet_config
loss_type = tconfig.transfer_loss_type
def get_arg_or_default(key, default):
return kwargs[key] if key in kwargs else default
h_b = get_arg_or_default('h_b', None)
y_b = get_arg_or_default('y_b', None)
w_class_a = get_arg_or_default('w_class_a', None)
b_class_a = get_arg_or_default('b_class_a', None)
kwargs['mask'] = self._mask # TODO: consider inject this elsewhere.
kwargs['y_sel'] = self._y_sel
def combine_wb(w, b):
return tf.concat([w, tf.expand_dims(b, 0)], axis=0)
attractor = get_attractor(loss_type, tconfig)
self._attractor = attractor
if attractor is not None:
return attractor(
fast_weights, is_training=self._is_training, reuse=reuse, **kwargs)
else:
assert False, loss_type
def _normalize(self, x, axis, eps=1e-5):
"""Normalize a vector (for calculating cosine similarity."""
return x / (
tf.sqrt(tf.reduce_sum(tf.square(x), axis=axis, keep_dims=True)) + 1e-5)
def _decay(self):
"""Weight decay for slow weights."""
wd_losses = tf.get_collection(tf.GraphKeys.REGULARIZATION_LOSSES)
log.info('Weight decay variables')
[log.info(x) for x in wd_losses]
log.info('Total length: {}'.format(len(wd_losses)))
if len(wd_losses) > 0:
return tf.add_n(wd_losses)
else:
log.warning('No weight decay variables!')
return 0.0
def _decay_list(self, var_list):
if len(var_list) > 0:
wd = self.config.transfer_config.wd
wd_losses = list(
map(lambda x: 0.5 * wd * tf.reduce_sum(tf.square(x)), var_list))
log.info('Weight decay variables')
[log.info(x) for x in wd_losses]
return tf.add_n(wd_losses)
else:
log.warning('No weight decay variables!')
return 0.0
def _ft_decay(self, var_list):
"""Weight decay for fast weights."""
if len(var_list) > 0:
wd = self.config.transfer_config.finetune_wd
wd_losses = list(
map(lambda x: 0.5 * wd * tf.reduce_sum(tf.square(x)), var_list))
log.info('Fast weight decay variables')
[log.info(x) for x in wd_losses]
return tf.add_n(wd_losses)
else:
log.warning('No fast weight decay variables!')
return 0.0
def get_slow_weights(self):
"""Returns a set of slow weights."""
var_list = tf.trainable_variables()
var_list = list(filter(lambda x: 'phi' in x.name, var_list))
layers = self.config.transfer_config.meta_layers
if layers == "all":
pass
elif layers == "4":
keywords = ['TaskB', 'unit_4_']
filter_fn = lambda x: any([kw in x.name for kw in keywords])
var_list = list(filter(filter_fn, var_list))
else:
raise ValueError('Unknown finetune layers {}'.format(layers))
[log.info('Slow weights {}'.format(v.name)) for v in var_list]
return var_list
def get_transfer_loss_weights(self, name='transfer_loss'):
var_list = tf.trainable_variables()
var_list = list(filter(lambda x: name in x.name, var_list))
return var_list
def get_meta_weights(self):
"""Returns a set of weights that belongs to the meta-learner."""
var_list = self.get_transfer_loss_weights(name=self.transfer_loss_name)
var_list += self.get_transfer_loss_weights(name=self.new_loss_name)
proto_config = self.config.protonet_config
transfer_config = self.config.transfer_config
if proto_config.cosine_softmax_tau:
var_list += [self._tau_b]
if proto_config.protos_phi:
var_list += [self._w_p1]
if transfer_config.train_wclass_a:
var_list += [self.w_class_a]
if not proto_config.cosine_softmax:
var_list += [self.b_class_a]
return var_list
def get_optimizer(self, optname, learn_rate):
"""Gets an optimizer."""
if optname == 'adam':
opt = tf.train.AdamOptimizer(learn_rate)
elif optname == 'momentum':
opt = tf.train.MomentumOptimizer(learn_rate, 0.9)
elif optname == 'nesterov':
opt = tf.train.MomentumOptimizer(learn_rate, 0.9, use_nesterov=True)
elif optname == 'sgd':
opt = tf.train.GradientDescentOptimizer(learn_rate)
else:
raise ValueError('Unknown optimizer')
return opt
def get_fdict(self, task_a_data=None, task_b_data=None):
"""Make a feed dict."""
fdict = {}
if task_a_data is not None:
x_a, y_a = task_a_data
fdict[self.inputs] = x_a
fdict[self.labels] = y_a
if task_b_data is not None:
fdict[self.inputs_b] = task_b_data.x_train
fdict[self.labels_b] = task_b_data.y_train
fdict[self.inputs_b_v] = task_b_data.x_test
fdict[self.labels_b_v] = task_b_data.y_test
if task_b_data.y_sel is not None:
fdict[self._y_sel] = task_b_data.y_sel
fdict[self._mask] = True
# print('adding ysel', task_b_data.y_sel)
else:
fdict[self._y_sel] = np.zeros([self._num_classes_b], dtype=np.int64)
fdict[self._mask] = False
# print('not adding ysel')
return fdict
def train_step_a(self, sess, task_a_data):
"""Train a single step on task A."""
x_a, y_a = task_a_data
fdict = self.get_fdict(task_a_data=task_a_data)
cost_a, _ = sess.run([self.cost_a, self.train_op_a], feed_dict=fdict)
return cost_a
def eval_step(self, sess, task_a_data, task_b_data):
"""Evaluate one step."""
prediction_a, y_a = self.eval_step_a(sess, task_a_data)
prediction_b, y_b = self.eval_step_b(sess, task_b_data)
return prediction_a, prediction_b
def eval_step_a(self, sess, task_a_data):
"""Evaluate one step on task A."""
x_a, y_a = task_a_data
fdict = self.get_fdict(task_a_data=task_a_data)
prediction_a, y_a = sess.run([self.prediction_a_all, self.labels_all],
feed_dict=fdict)
return prediction_a, y_a
def eval_step_b(self, sess, task_b_data):
"""Evaluate one step on task B."""
raise NotImplemented()
def eval_step_b_old_and_new(self, sess, task_b_data):
"""Evaluate one step when there is both old and new data."""
raise NotImplemented()
def train_step(self, sess, task_a_data):
"""Train a single step."""
raise NotImplemented()
@property
def transfer_loss_name(self):
return "transfer_loss"
@property
def new_loss_name(self):
return "new_loss"
@property
def global_step(self):
return tf.contrib.framework.get_or_create_global_step()
@property
def inputs(self):
"""Input images on task A."""
return self._inputs
@property
def labels(self):
"""Labels on task A."""
return self._labels
@property
def labels_all(self):
"""All labels on task A."""
return self._labels_all
@property
def inputs_b(self):
"""Input images on task B."""
return self._inputs_b
@property
def labels_b(self):
"""Labels on task B."""
return self._labels_b
@property
def inputs_b_v(self):
"""Input images on task B query."""
return self._inputs_b_v
@property
def labels_b_v(self):
"""All labels on task B support."""
return self._labels_b_v
@property
def labels_b_v_all(self):
"""All labels on task B query."""
return self._labels_b_v_all
@property
def cost_a(self):
"""Loss on task A."""
return self._cost_a
@property
def cost_b(self):
"""Loss on task B support."""
return self._cost_b
@property
def cost_b_v(self):
"""Loss on task B query."""
return self._cost_b_v
@property
def acc_a(self):
"""Accuracy on task A."""
return self._acc_a
@property
def acc_b(self):
"""Accuracy on task B."""
return self._acc_b
@property
def w_class_a(self):
"""Weights for task A classifier."""
return self._w_class_a
@property
def b_class_a(self):
"""Bias for task A classifier."""
return self._b_class_a
@property
def h_a(self):
"""Hidden state for task A."""
return self._h_a
@property
def prediction_a(self):
"""Prediction on task A."""
return self._prediction_a
@property
def prediction_a_all(self):
"""All prediction on task A."""
return self._prediction_a_all
@property
def dtype(self):
"""Data type."""
if self.config.dtype == 'float32':
return tf.float32
elif self.config.dtype == 'float64':
return tf.float64
@property
def attractor(self):
"""Attractor module."""
return self._attractor
|
py | b4172597f51349ca6f0b237c268dee9eb72d259a | # coding: utf-8
"""
Minihub.py
The Clear BSD License
Copyright (c) – 2016, NetApp, Inc. All rights reserved.
Redistribution and use in source and binary forms, with or without modification, are permitted (subject to the limitations in the disclaimer below) provided that the following conditions are met:
* Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.
* Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
* Neither the name of NetApp, Inc. nor the names of its contributors may be used to endorse or promote products derived from this software without specific prior written permission.
NO EXPRESS OR IMPLIED LICENSES TO ANY PARTY'S PATENT RIGHTS ARE GRANTED BY THIS LICENSE. THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
"""
from pprint import pformat
from six import iteritems
class Minihub(object):
"""
NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
def __init__(self):
"""
Minihub - a model defined in Swagger
:param dict swaggerTypes: The key is attribute name
and the value is attribute type.
:param dict attributeMap: The key is attribute name
and the value is json key in definition.
"""
self.swagger_types = {
'minihub_ref': 'str', # (required parameter)
'status': 'str', # (required parameter)
'physical_location': 'Location', # (required parameter)
'minihub_data': 'MinihubTypeData', # (required parameter)
'current_speed': 'str', # (required parameter)
'max_speed': 'str', # (required parameter)
'channel': 'int', # (required parameter)
'port_list': 'PortList', # (required parameter)
'vendor_name': 'str', # (required parameter)
'part_number': 'str', # (required parameter)
'serial_number': 'str', # (required parameter)
'fru_type': 'str', # (required parameter)
'manufacturer_date': 'int', # (required parameter)
'reserved1': 'str',
'reserved2': 'str',
'rtr_attributes': 'RTRAttributes'
}
self.attribute_map = {
'minihub_ref': 'minihubRef', # (required parameter)
'status': 'status', # (required parameter)
'physical_location': 'physicalLocation', # (required parameter)
'minihub_data': 'minihubData', # (required parameter)
'current_speed': 'currentSpeed', # (required parameter)
'max_speed': 'maxSpeed', # (required parameter)
'channel': 'channel', # (required parameter)
'port_list': 'portList', # (required parameter)
'vendor_name': 'vendorName', # (required parameter)
'part_number': 'partNumber', # (required parameter)
'serial_number': 'serialNumber', # (required parameter)
'fru_type': 'fruType', # (required parameter)
'manufacturer_date': 'manufacturerDate', # (required parameter)
'reserved1': 'reserved1',
'reserved2': 'reserved2',
'rtr_attributes': 'rtrAttributes'
}
self._minihub_ref = None
self._status = None
self._physical_location = None
self._minihub_data = None
self._current_speed = None
self._max_speed = None
self._channel = None
self._port_list = None
self._vendor_name = None
self._part_number = None
self._serial_number = None
self._fru_type = None
self._manufacturer_date = None
self._reserved1 = None
self._reserved2 = None
self._rtr_attributes = None
@property
def minihub_ref(self):
"""
Gets the minihub_ref of this Minihub.
The reference for this physical minihub.
:return: The minihub_ref of this Minihub.
:rtype: str
:required/optional: required
"""
return self._minihub_ref
@minihub_ref.setter
def minihub_ref(self, minihub_ref):
"""
Sets the minihub_ref of this Minihub.
The reference for this physical minihub.
:param minihub_ref: The minihub_ref of this Minihub.
:type: str
"""
self._minihub_ref = minihub_ref
@property
def status(self):
"""
Gets the status of this Minihub.
The operational status of the minihub.
:return: The status of this Minihub.
:rtype: str
:required/optional: required
"""
return self._status
@status.setter
def status(self, status):
"""
Sets the status of this Minihub.
The operational status of the minihub.
:param status: The status of this Minihub.
:type: str
"""
allowed_values = ["optimal", "failed", "unsupported", "unknown", "__UNDEFINED"]
if status not in allowed_values:
raise ValueError(
"Invalid value for `status`, must be one of {0}"
.format(allowed_values)
)
self._status = status
@property
def physical_location(self):
"""
Gets the physical_location of this Minihub.
The physical location of the minihub. Note that the tray reference identifies the enclosure containing the minihub, but the slot information does not apply to this component.
:return: The physical_location of this Minihub.
:rtype: Location
:required/optional: required
"""
return self._physical_location
@physical_location.setter
def physical_location(self, physical_location):
"""
Sets the physical_location of this Minihub.
The physical location of the minihub. Note that the tray reference identifies the enclosure containing the minihub, but the slot information does not apply to this component.
:param physical_location: The physical_location of this Minihub.
:type: Location
"""
self._physical_location = physical_location
@property
def minihub_data(self):
"""
Gets the minihub_data of this Minihub.
Information returned is based on the minihub type.
:return: The minihub_data of this Minihub.
:rtype: MinihubTypeData
:required/optional: required
"""
return self._minihub_data
@minihub_data.setter
def minihub_data(self, minihub_data):
"""
Sets the minihub_data of this Minihub.
Information returned is based on the minihub type.
:param minihub_data: The minihub_data of this Minihub.
:type: MinihubTypeData
"""
self._minihub_data = minihub_data
@property
def current_speed(self):
"""
Gets the current_speed of this Minihub.
The current speed of the minihub.
:return: The current_speed of this Minihub.
:rtype: str
:required/optional: required
"""
return self._current_speed
@current_speed.setter
def current_speed(self, current_speed):
"""
Sets the current_speed of this Minihub.
The current speed of the minihub.
:param current_speed: The current_speed of this Minihub.
:type: str
"""
allowed_values = ["speedUnknown", "speed1gig", "speed2gig", "speed4gig", "speed10gig", "speed15gig", "speed3gig", "speed10meg", "speed100meg", "speed2pt5Gig", "speed5gig", "speed20gig", "speed30gig", "speed60gig", "speed8gig", "speed6gig", "speed40gig", "speed16gig", "speed56gig", "speed12gig", "speed25gig", "speed32gig", "speed100gig", "__UNDEFINED"]
if current_speed not in allowed_values:
raise ValueError(
"Invalid value for `current_speed`, must be one of {0}"
.format(allowed_values)
)
self._current_speed = current_speed
@property
def max_speed(self):
"""
Gets the max_speed of this Minihub.
The maximum speed of the minihub.
:return: The max_speed of this Minihub.
:rtype: str
:required/optional: required
"""
return self._max_speed
@max_speed.setter
def max_speed(self, max_speed):
"""
Sets the max_speed of this Minihub.
The maximum speed of the minihub.
:param max_speed: The max_speed of this Minihub.
:type: str
"""
allowed_values = ["speedUnknown", "speed1gig", "speed2gig", "speed4gig", "speed10gig", "speed15gig", "speed3gig", "speed10meg", "speed100meg", "speed2pt5Gig", "speed5gig", "speed20gig", "speed30gig", "speed60gig", "speed8gig", "speed6gig", "speed40gig", "speed16gig", "speed56gig", "speed12gig", "speed25gig", "speed32gig", "speed100gig", "__UNDEFINED"]
if max_speed not in allowed_values:
raise ValueError(
"Invalid value for `max_speed`, must be one of {0}"
.format(allowed_values)
)
self._max_speed = max_speed
@property
def channel(self):
"""
Gets the channel of this Minihub.
The channel number that this minihub is associated with.
:return: The channel of this Minihub.
:rtype: int
:required/optional: required
"""
return self._channel
@channel.setter
def channel(self, channel):
"""
Sets the channel of this Minihub.
The channel number that this minihub is associated with.
:param channel: The channel of this Minihub.
:type: int
"""
self._channel = channel
@property
def port_list(self):
"""
Gets the port_list of this Minihub.
Detailed information for each port of the minihub. This field is deprecated.
:return: The port_list of this Minihub.
:rtype: PortList
:required/optional: required
"""
return self._port_list
@port_list.setter
def port_list(self, port_list):
"""
Sets the port_list of this Minihub.
Detailed information for each port of the minihub. This field is deprecated.
:param port_list: The port_list of this Minihub.
:type: PortList
"""
self._port_list = port_list
@property
def vendor_name(self):
"""
Gets the vendor_name of this Minihub.
The vendor name of the minihub.
:return: The vendor_name of this Minihub.
:rtype: str
:required/optional: required
"""
return self._vendor_name
@vendor_name.setter
def vendor_name(self, vendor_name):
"""
Sets the vendor_name of this Minihub.
The vendor name of the minihub.
:param vendor_name: The vendor_name of this Minihub.
:type: str
"""
self._vendor_name = vendor_name
@property
def part_number(self):
"""
Gets the part_number of this Minihub.
The part number of the minihub.
:return: The part_number of this Minihub.
:rtype: str
:required/optional: required
"""
return self._part_number
@part_number.setter
def part_number(self, part_number):
"""
Sets the part_number of this Minihub.
The part number of the minihub.
:param part_number: The part_number of this Minihub.
:type: str
"""
self._part_number = part_number
@property
def serial_number(self):
"""
Gets the serial_number of this Minihub.
The serial number of the minihub.
:return: The serial_number of this Minihub.
:rtype: str
:required/optional: required
"""
return self._serial_number
@serial_number.setter
def serial_number(self, serial_number):
"""
Sets the serial_number of this Minihub.
The serial number of the minihub.
:param serial_number: The serial_number of this Minihub.
:type: str
"""
self._serial_number = serial_number
@property
def fru_type(self):
"""
Gets the fru_type of this Minihub.
The field replaceable unit type of the minihub.
:return: The fru_type of this Minihub.
:rtype: str
:required/optional: required
"""
return self._fru_type
@fru_type.setter
def fru_type(self, fru_type):
"""
Sets the fru_type of this Minihub.
The field replaceable unit type of the minihub.
:param fru_type: The fru_type of this Minihub.
:type: str
"""
self._fru_type = fru_type
@property
def manufacturer_date(self):
"""
Gets the manufacturer_date of this Minihub.
The date the minihub was manufactured.
:return: The manufacturer_date of this Minihub.
:rtype: int
:required/optional: required
"""
return self._manufacturer_date
@manufacturer_date.setter
def manufacturer_date(self, manufacturer_date):
"""
Sets the manufacturer_date of this Minihub.
The date the minihub was manufactured.
:param manufacturer_date: The manufacturer_date of this Minihub.
:type: int
"""
self._manufacturer_date = manufacturer_date
@property
def reserved1(self):
"""
Gets the reserved1 of this Minihub.
:return: The reserved1 of this Minihub.
:rtype: str
:required/optional: optional
"""
return self._reserved1
@reserved1.setter
def reserved1(self, reserved1):
"""
Sets the reserved1 of this Minihub.
:param reserved1: The reserved1 of this Minihub.
:type: str
"""
self._reserved1 = reserved1
@property
def reserved2(self):
"""
Gets the reserved2 of this Minihub.
:return: The reserved2 of this Minihub.
:rtype: str
:required/optional: optional
"""
return self._reserved2
@reserved2.setter
def reserved2(self, reserved2):
"""
Sets the reserved2 of this Minihub.
:param reserved2: The reserved2 of this Minihub.
:type: str
"""
self._reserved2 = reserved2
@property
def rtr_attributes(self):
"""
Gets the rtr_attributes of this Minihub.
The CRU type of the minihub plus its ready-to-remove attributes, which are based on the CRU type
:return: The rtr_attributes of this Minihub.
:rtype: RTRAttributes
:required/optional: required
"""
return self._rtr_attributes
@rtr_attributes.setter
def rtr_attributes(self, rtr_attributes):
"""
Sets the rtr_attributes of this Minihub.
The CRU type of the minihub plus its ready-to-remove attributes, which are based on the CRU type
:param rtr_attributes: The rtr_attributes of this Minihub.
:type: RTRAttributes
"""
self._rtr_attributes = rtr_attributes
def to_dict(self):
"""
Returns the model properties as a dict
"""
result = {}
for attr, _ in iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
return result
def to_str(self):
"""
Returns the string representation of the model
"""
return pformat(self.to_dict())
def __repr__(self):
"""
For `print` and `pprint`
"""
if self is None:
return None
return self.to_str()
def __eq__(self, other):
"""
Returns true if both objects are equal
"""
if self is None or other is None:
return None
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""
Returns true if both objects are not equal
"""
return not self == other
|
py | b41725b6c60eacac30a5e0f75547313d8491b055 | # -*- coding: utf-8 -*-
from mailtrigger.trigger.helper import Helper
from mailtrigger.trigger.trigger import TriggerException
def test_init():
try:
_ = Helper(None)
except TriggerException as e:
assert str(e) == 'invalid helper configuration'
def test_helper():
helper = None
try:
helper = Helper(None)
except TriggerException as e:
assert str(e) == 'invalid helper configuration'
assert len(Helper.help()) == 0
msg, status = helper.run(None)
assert len(msg) != 0
assert status is False
event = {
'content': '',
'date': '',
'from': '[email protected]',
'subject': '',
'to': ''
}
msg, status = helper.run(event)
assert len(msg) != 0
assert status is False
event = {
'content': '',
'date': '',
'from': '[email protected]',
'subject': '',
'to': ''
}
msg, status = helper.run(event)
assert len(msg) != 0
assert status is False
event = {
'content': '',
'date': '',
'from': '[email protected]',
'subject': '[trigger]',
'to': ''
}
msg, status = helper.run(event)
assert len(msg) != 0
assert status is False
event = {
'content': '@help',
'date': '',
'from': '[email protected]',
'subject': '[trigger]',
'to': ''
}
msg, status = helper.run(event)
assert len(msg) != 0
assert status is True
|
py | b41726a0341cfe442395a2bed76ca90c14e5707f | """
Utility functions for testing
"""
import numpy as np
def make_test_sine(size, hz, rate=44100):
samples = np.zeros(size)
phaseIncrement = (2*np.pi) / (float(rate) / float(hz))
phase = 0.0
for i in range(size):
samples[i] = np.sin(phase)
phase = phase + phaseIncrement
return samples
def make_test_cosine(size, hz, rate=44100):
samples = np.zeros(size)
phaseIncrement = (2*np.pi) / (float(rate) / float(hz))
phase = 0.0
for i in range(size):
samples[i] = np.cos(phase)
phase = phase + phaseIncrement
return samples
|
py | b4172822945a4f4a93c2bac34e86c9c7b5cd065c | # Copyright 2019-2020 ETH Zurich and the DaCe authors. All rights reserved.
import dace
import numpy as np
import sys
import os
# Ensure files from the same directory can be imported (for pytest)
sys.path.insert(0, os.path.abspath(os.path.dirname(__file__)))
import external_module
W = external_module.W
H = external_module.H
@dace.program
def extmodtest(A: dace.float32[W, H], result: dace.float32[1]):
tmp = np.ndarray([H, W], dace.float32)
external_module.transpose(A, tmp)
with dace.tasklet:
a << tmp[1, 2]
b >> result[0]
b = a
if __name__ == '__main__':
W.set(12)
H.set(12)
A = np.random.rand(W.get(), H.get()).astype(np.float32)
res = np.zeros([1], np.float32)
extmodtest(A, res)
assert res[0] == A[2, 1]
print('TEST PASSED')
|
py | b4172a18badbbef588ff2f4a5bf1d36005c072d1 | from enum import Enum
class ModifierType(Enum):
Increase = 1
Decrease = 2 |
py | b4172a1a23dd114eb47f071aa5c092c4842e5360 | #!/usr/bin/env python3
import os
import torch
import torch.nn as nn
import torchvision
import numpy as np
from PIL import Image
from torch.utils.data import Dataset
from torch.utils.data import DataLoader
from torchvision import transforms
# transforms of input
tsfm = transforms.Compose([
transforms.Resize((224, 224)),
transforms.ToTensor(),
])
# custome class for data set
class MyTDataset(Dataset):
def __init__(self, images_dir=os.path.join('dataset', 'images'), labels_dir=os.path.join('dataset', 'labels'), transform=tsfm):
self.images_dir = images_dir
self.T_labels = np.loadtxt(os.path.join(labels_dir, 'T_labels.csv'), delimiter='\n', dtype=np.float32)
self.C_labels = np.loadtxt(os.path.join(labels_dir, 'C_labels.csv'), delimiter='\n', dtype=np.float32)
self.transform = transform
super().__init__()
def __len__(self):
return len(os.listdir(self.images_dir))
def __getitem__(self, index):
image_name = 'image' + str(index) +'.png'
image_path = os.path.join(self.images_dir, image_name)
image = Image.open(image_path)
image = self.transform(image)
label = (self.T_labels[index], self.C_labels[index])
return image, label
model = nn.Sequential(
torchvision.models.resnet18(pretrained=True),
nn.BatchNorm1d(1000),
nn.ReLU(),
nn.Linear(1000, 512),
nn.BatchNorm1d(512),
nn.ReLU(),
nn.Linear(512, 128),
nn.BatchNorm1d(128),
nn.ReLU(),
nn.Linear(128, 2),
nn.Tanh(),
)
if __name__ == "__main__":
# need cuda to train the model.
model = model.cuda()
dataset = MyTDataset(transform=tsfm)
dataloader = DataLoader(dataset, 64, shuffle=True, num_workers=4)
L1_loss = nn.L1Loss().cuda()
optimizer = torch.optim.Adam(model.parameters(), lr=0.0001)
losses_T = []
losses_C = []
for epoch in range(0, 100):
loss_T = torch.Tensor([0.0]).cuda()
loss_C = torch.Tensor([0.0]).cuda()
for batch_id, (images, (labels_T, labels_C)) in enumerate(dataloader):
batch_size = images.shape[0]
optimizer.zero_grad()
images = images.cuda()
labels_C = labels_C.cuda()
labels_T = labels_T.cuda()
predicts = model(images).reshape(-1, 2)
# get labels
predicts_T = predicts[:,0]
predicts_C = predicts[:,1]
# get each loss
batch_loss_T = L1_loss(predicts_T, labels_T)
batch_loss_C = L1_loss(predicts_C, labels_C)
batch_loss_C.backward(retain_graph=True)
batch_loss_T.backward()
optimizer.step()
# record
loss_T += batch_loss_T.detach() * batch_size
loss_C += batch_loss_C.detach() * batch_size
print('epoch {}, batch {}...\r'.format(epoch, batch_id), end='')
print('epoch {}, loss_T is {}, loss_C is {}'.format(epoch, loss_T, loss_C))
losses_T.append(loss_T)
losses_C.append(loss_C)
# save model state
torch.save(model.state_dict(), 'model_state')
print(losses_T)
print(losses_C)
|
py | b4172a75f8dd95b60a035142a6519a5ff1e5e8a8 | from __future__ import annotations
from collections.abc import Iterable
from collections import defaultdict, OrderedDict
import pdb
import itertools
import functools
import multiprocessing
import sympy
from sympy.solvers.solveset import linsolve
import helpers.vcommon as CM
import settings
from typing import Type, TypeVar, Union, Optional, Callable
from typing import List, Iterable, Any, Tuple, Dict, Sequence, Set
DBG = pdb.set_trace
mlog = CM.getLogger(__name__, settings.LOGGER_LEVEL)
class Miscs:
@staticmethod
def is_expr(x: Any) -> bool:
return isinstance(x, sympy.Expr)
@classmethod
def get_vars(cls: Type[Miscs], props: Any) -> List[sympy.Symbol]:
"""
Returns a list of uniq variables from a list of properties
>>> a,b,c,x = sympy.symbols('a b c x')
>>> assert [a, b, c, x] == Miscs.get_vars([x**(a*b) + a**2+b+2, sympy.Eq(c**2-b,100), sympy.Gt(b**2 + c**2 + a**3,1)])
>>> assert Miscs.get_vars(a**2+b+5*c+2) == [a, b, c]
>>> assert Miscs.get_vars(x+x**2) == [x]
>>> assert Miscs.get_vars([3]) == []
>>> assert Miscs.get_vars((3,'x + c',x+b)) == [b, x]
"""
props = props if isinstance(props, Iterable) else [props]
props = (p for p in props if isinstance(p, (sympy.Expr, sympy.Rel)))
vs = (v for p in props for v in p.free_symbols)
return sorted(set(vs), key=str)
str2rat_cache: Dict[str, sympy.Rational] = {}
@staticmethod
def str2list(s: str) -> Tuple:
assert isinstance(s, str), s
rs = tuple(eval(s))
return rs
@staticmethod
@functools.cache
def str2rat(s: str) -> sympy.Rational:
"""
Convert the input 's' to a rational number if possible.
Examples:
>>> print(Miscs.str2rat('.3333333'))
3333333/10000000
>>> print(Miscs.str2rat('3/7'))
3/7
>>> print(Miscs.str2rat('1.'))
1
>>> print(Miscs.str2rat('1.2'))
6/5
>>> print(Miscs.str2rat('.333'))
333/1000
>>> print(Miscs.str2rat('-.333'))
-333/1000
>>> print(Miscs.str2rat('-12.13'))
-1213/100
"""
return sympy.Rational(s)
@staticmethod
def create_uks(ts: List[Any], prefix: str = "uk") -> List[sympy.Symbol]:
uks = [sympy.Symbol(f"{prefix}_{i}") for i in range(len(ts))]
assert not set(ts).intersection(set(uks)), "name conflict"
return uks
@classmethod
def init_terms(cls: Type[Miscs], vs: List[str], deg: int, rate: float) -> Tuple[List[Any], List[sympy.Symbol], int]:
assert vs, vs
assert deg >= 1, deg
assert rate >= 0.1, rate
symbols = [sympy.Symbol(v) for v in vs]
terms = cls.get_terms(symbols, deg)
uks = cls.create_uks(terms)
assert not set(terms).intersection(set(uks)), "name conflict"
n_eqts_needed = int(rate * len(uks))
return terms, uks, n_eqts_needed
@staticmethod
def get_terms(symbols: List[sympy.Symbol], deg: int) -> List[Any]:
"""
get a list of terms from the given list of vars and deg
the number of terms is len(rs) == binomial(len(symbols)+d, d)
>>> a,b,c,d,e,f = sympy.symbols('a b c d e f')
>>> ts = Miscs.get_terms([a, b], 3)
>>> assert ts == [1, a, b, a**2, a*b, b**2, a**3, a**2*b, a*b**2, b**3]
>>> Miscs.get_terms([a,b,c,d,e,f], 3)
[1, a, b, c, d, e, f, a**2, a*b, a*c, a*d, a*e, a*f, b**2, b*c, b*d, b*e, b*f, c**2, c*d, c*e, c*f, d**2, d*e, d*f, e**2, e*f, f**2, a**3, a**2*b, a**2*c, a**2*d, a**2*e, a**2*f, a*b**2, a*b*c, a*b*d, a*b*e, a*b*f, a*c**2, a*c*d, a*c*e, a*c*f, a*d**2, a*d*e, a*d*f, a*e**2,
a*e*f, a*f**2, b**3, b**2*c, b**2*d, b**2*e, b**2*f, b*c**2, b*c*d, b*c*e, b*c*f, b*d**2, b*d*e, b*d*f, b*e**2, b*e*f, b*f**2, c**3, c**2*d, c**2*e, c**2*f, c*d**2, c*d*e, c*d*f, c*e**2, c*e*f, c*f**2, d**3, d**2*e, d**2*f, d*e**2, d*e*f, d*f**2, e**3, e**2*f, e*f**2, f**3]
"""
assert deg >= 0, deg
assert (symbols and
all(isinstance(s, sympy.Symbol) for s in symbols)), symbols
# ss_ = ([1] if ss else (1,)) + ss
symbols_ = [1] + symbols
combs = itertools.combinations_with_replacement(symbols_, deg)
terms = [sympy.prod(c) for c in combs]
return terms
@ classmethod
def get_max_deg(cls: Type[Miscs], p: Union[int, sympy.Expr]) -> int:
"""
get the max degree of a polynomial
>>> x, y, z = sympy.symbols('x y z')
>>> p = 3*x**2*y + x*y**4 + z*x
>>> assert(Miscs.get_max_deg(p) == 5)
>>> assert(Miscs.get_max_deg(x) == 1)
>>> assert(Miscs.get_max_deg(x**3) == 3)
>>> assert(Miscs.get_max_deg(-100) == 0)
>>> assert(Miscs.get_max_deg(x*y-100) == 2)
>>> assert(Miscs.get_max_deg(x*y**2 + 3*y) == 3)
"""
assert isinstance(p, (int, sympy.Expr)), p
if isinstance(p, (int, sympy.core.numbers.Integer)):
return 0
elif p.is_Symbol or p.is_Mul or p.is_Pow: # x, x*y, x**3
return sum(sympy.degree_list(p))
elif isinstance(p, sympy.Add):
return max(cls.get_max_deg(a) for a in p.args)
else:
mlog.warning(f"cannot handle {p} of type {type(p)}")
return 0
@classmethod
def get_deg(cls: Type[Miscs], nvs: int, nts: int, max_deg: int = 7) -> int:
"""
Guess a max degree wrt to a (maximum) number of terms (nss)
>>> assert(Miscs.get_deg(3, 4, 5) == 1)
>>> Miscs.get_deg(3, 1, 5)
Traceback (most recent call last):
...
AssertionError: (1, 3)
"""
assert nvs >= 1, nvs
assert nts >= nvs, (nts, nvs)
assert max_deg >= 1, max_deg
for d in range(1, max_deg + 1):
if d == max_deg:
return d
# look ahead
nterms: int = sympy.binomial(nvs + d + 1, d + 1)
if nterms > nts:
return d
return max_deg
@classmethod
def get_auto_deg(cls: Type[Miscs], maxdeg: int, nvars: int, maxterm: int) -> int:
if maxdeg:
deg = maxdeg
mlog.debug(f"using deg {deg}")
else:
deg = cls.get_deg(nvars, maxterm)
mlog.debug(f"autodeg {deg}")
return deg
@staticmethod
def get_terms_fixed_coefs(ss, subset_siz, icoef, do_create_terms=True):
"""
if do_create_terms = True, then return x*y, otherwise, return (x,y)
>>> x, y, z, t, s, u = sympy.symbols('x y z t s u')
>>> sorted(Miscs.get_terms_fixed_coefs([x,y], 2, 1), key=lambda x: str(x))
[-x, -x + y, -x - y, -y, x, x + y, x - y, y]
>>> sorted(Miscs.get_terms_fixed_coefs([x,y**2], 2, 1), key=lambda x: str(x))
[-x, -x + y**2, -x - y**2, -y**2, x, x + y**2, x - y**2, y**2]
>>> assert len(Miscs.get_terms_fixed_coefs([x,y,z], 2, 1)) == 18
>>> assert len(Miscs.get_terms_fixed_coefs([x,y,z], 3, 1)) == 26
>>> assert len(Miscs.get_terms_fixed_coefs([x,y,z], 2, 3)) == 126
"""
assert icoef >= 1, icoef
if len(ss) < subset_siz:
subset_siz = len(ss)
coefs = list(range(-icoef, icoef + 1))
rs = []
for ssSubset in itertools.combinations(ss, subset_siz):
css = itertools.product(*([coefs] * len(ssSubset)))
rs_ = [
tuple((t, c) for t, c in zip(ssSubset, cs) if c != 0)
for cs in css
if not all(c_ == 0 for c_ in cs)
]
if do_create_terms:
rs_ = [sum(t * c for t, c in tc) for tc in rs_]
rs.extend(rs_)
return set(rs)
@classmethod
def reduce_eqts(cls: Type[Miscs], ps: List[Union[sympy.Expr, sympy.Rel]]) -> List[Union[sympy.Expr, sympy.Rel]]:
"""
Return the basis (e.g., a min subset of ps that implies ps)
of the set of polynomial eqts using Groebner basis.
Warning 1: Grobner basis sometimes results in a larger set of eqts,
in which case we return the original set of eqts.
Warning 2: seems to get stuck often. So had to give it "nice" polynomials
>>> a, y, b, q, k = sympy.symbols('a y b q k')
# >>> rs = Miscs.reduce_eqts([a*y-b==0,q*y+k-x==0,a*x-a*k-b*q==0])
__main__:DEBUG:Grobner basis: got 2 ps from 3 ps
# >>> assert set(rs) == set([a*y - b == 0, q*y + k - x == 0])
# >>> rs = Miscs.reduce_eqts([x*y==6,y==2,x==3])
__main__:DEBUG:Grobner basis: got 2 ps from 3 ps
# >>> assert set(rs) == set([x - 3 == 0, y - 2 == 0])
# Attribute error occurs when only 1 var, thus return as is
# >>> rs = Miscs.reduce_eqts([x*x==4,x==2])
__main__:ERROR:'Ideal_1poly_field' object has no attribute 'radical'
# >>> assert set(rs) == set([x == 2, x**2 == 4])
"""
if len(ps) <= 1:
return ps
ps_ = sympy.groebner(ps, *cls.get_vars(ps))
ps_ = [x for x in ps_]
mlog.debug(f"Grobner basis: from {len(ps)} to {len(ps_)} ps")
return ps_ if len(ps_) < len(ps) else ps
@staticmethod
def elim_denom(p: Union[sympy.Expr, sympy.Rel]) -> Union[sympy.Expr, sympy.Rel]:
"""
Eliminate (Integer) denominators in expression operands.
Will not eliminate if denominators is a var (e.g., (3*x)/(y+2)).
>>> x,y,z = sympy.symbols('x y z')
>>> Miscs.elim_denom(sympy.Rational(3, 4)*x**2 + sympy.Rational(7, 5)*y**3)
15*x**2 + 28*y**3
>>> Miscs.elim_denom(x + y)
x + y
>>> Miscs.elim_denom(-sympy.Rational(3,2)*x**2 - sympy.Rational(1,24)*z**2)
-36*x**2 - z**2
>>> Miscs.elim_denom(15*x**2 - 12*z**2)
15*x**2 - 12*z**2
"""
denoms = [sympy.fraction(a)[1] for a in p.args]
if all(denom == 1 for denom in denoms): # no denominator like 1/2
return p
return p * sympy.lcm(denoms)
@classmethod
def get_coefs(cls: Type[Miscs], p: Union[sympy.Expr, sympy.Rel]) -> List[Union[int, float]]:
"""
Return coefficients of an expression
>>> x,y,z = sympy.symbols('x y z')
>>> Miscs.get_coefs(3*x+5*x*y**2)
[3, 5]
"""
p = p.lhs if p.is_Equality else p
return list(p.as_coefficients_dict().values())
@classmethod
def remove_ugly(cls: Type[Miscs], ps: List[Union[sympy.Expr, sympy.Rel]]) -> List[Union[sympy.Expr, sympy.Rel]]:
@functools.cache
def is_nice_coef(c: Union[int, float]) -> bool:
return abs(c) <= settings.UGLY_FACTOR or c % 10 == 0 or c % 5 == 0
@functools.cache
def is_nice_eqt(eqt: Union[sympy.Expr, sympy.Rel]) -> bool:
return (len(eqt.args) <= settings.UGLY_FACTOR
and all(is_nice_coef(c) for c in cls.get_coefs(eqt)))
ps_ = []
for p in ps:
if is_nice_eqt(p):
ps_.append(p)
else:
mlog.debug(f"ignoring large coefs {str(p)[:50]} ..")
return ps_
@classmethod
def refine(cls: Type[Miscs], eqts: List[Union[sympy.Expr, sympy.Rel]]) -> List[Union[sympy.Expr, sympy.Rel]]:
if not eqts:
return eqts
eqts = [cls.elim_denom(s) for s in eqts]
eqts = cls.remove_ugly(eqts)
eqts = cls.reduce_eqts(eqts)
eqts = [cls.elim_denom(s) for s in eqts]
eqts = cls.remove_ugly(eqts)
return eqts
@classmethod
def solve_eqts(cls: Type[Miscs], eqts: List[Union[sympy.Expr, sympy.Rel]],
terms: List[Any], uks: List[sympy.Symbol]) -> List[sympy.Eq]:
assert isinstance(eqts, list) and eqts, eqts
assert isinstance(terms, list) and terms, terms
assert isinstance(uks, list) and uks, uks
assert len(terms) == len(uks), (terms, uks)
assert len(eqts) >= len(uks), (len(eqts), len(uks))
mlog.debug(f"solving {len(uks)} uks using {len(eqts)} eqts")
sol = linsolve(eqts, uks)
vals = list(list(sol)[0])
if all(v == 0 for v in vals):
return []
eqts_ = cls.instantiate_template(terms, uks, vals)
mlog.debug(f"got {len(eqts_)} eqts after instantiating")
eqts_ = cls.refine(eqts_)
mlog.debug(f"got {len(eqts_)} eqts after refinement")
return [sympy.Eq(eqt, 0) for eqt in eqts_]
@classmethod
def instantiate_template(cls, terms, uks, vs):
# def instantiate_template(cls: Type[Miscs], terms: List[Any], uks: List[sympy.Symbol], vs: List[str]):
"""
Instantiate a template with solved coefficient values
# sage:var('uk_0,uk_1,uk_2,uk_3,uk_4,r14,r15,a,b,y')
(uk_0, uk_1, uk_2, uk_3, uk_4, r14, r15, a, b, y)
# sage:sols = [{uk_0: -2*r14 + 7/3*r15, uk_1: - \
1/3*r15, uk_4: r14, uk_2: r15, uk_3: -2*r14}]
# sage:Miscs.instantiate_template(uk_1*a + uk_2*b + uk_3*x + uk_4*y + uk_0 == 0, sols)
[-2*x + y - 2 == 0, -1/3*a + b + 7/3 == 0]
# sage:Miscs.instantiate_template(uk_1*a + uk_2*b + uk_3*x + uk_4*y + uk_0 == 0, [])
[]
"""
assert isinstance(vs, list), vs
assert isinstance(terms, list) and terms, terms
assert isinstance(uks, list) and uks, uks
assert len(terms) == len(uks) == len(vs), (terms, uks, vs)
cs = [(t, u, v) for t, u, v in zip(terms, uks, vs) if v != 0]
terms_, uks_, vs_ = zip(*cs)
eqt = sum(t*v for t, v in zip(terms_, vs_))
uk_vs = cls.get_vars(vs_)
if not uk_vs:
return eqt
sols = [eqt.xreplace({uk: (1 if j == i else 0) for j, uk in enumerate(uk_vs)})
for i, uk in enumerate(uk_vs)]
return sols
@staticmethod
def show_removed(s: str, orig_siz: int, new_siz: int, elapsed_time: float):
assert orig_siz >= new_siz, (orig_siz, new_siz)
n_removed = orig_siz - new_siz
mlog.debug(
f"{s}: removed {n_removed} invs "
f"in {elapsed_time:.2f}s (orig {orig_siz}, new {new_siz})"
)
@staticmethod
def simplify_idxs(ordered_idxs: List[int], imply_f: Callable[[Set[int], int], bool]) -> List[int]:
"""
attempt to remove i in idxs if imply_f returns true
Note: the order of idxs determine what to get checked (and removed)
"""
assert isinstance(ordered_idxs, list), ordered_idxs
assert ordered_idxs == list(range(len(ordered_idxs))), ordered_idxs
results = set(ordered_idxs)
for i in reversed(ordered_idxs):
if i not in results:
continue
others = results - {i}
if others and imply_f(others, i):
results = others
return sorted(results)
@staticmethod
def create_dict(l: List[Tuple[Any, Any]]) -> Dict[Any, Any]:
"""
given a list of set of type [(k1,v1),..,(kn,vn)]
generates a dict where keys are k's and values are [v's]
e.g.,
>>> Miscs.create_dict([('a',1),['b',2],('a',3),('c',4),('b',10)])
{'a': [1, 3], 'b': [2, 10], 'c': [4]}
"""
return functools.reduce(lambda d, kv: d.setdefault(kv[0], []).append(kv[1]) or d, l, {})
@staticmethod
def merge_dict(l: List[Dict[Any, Any]]) -> Dict[Any, Any]:
return functools.reduce(lambda x, y: OrderedDict(list(x.items()) + list(y.items())), l, {})
class MP:
@staticmethod
def get_workload(tasks: List[Any], n_cpus: int) -> List[List[Any]]:
"""
>>> wls = MP.get_workload(range(12),7); [len(wl) for wl in wls]
[1, 1, 2, 2, 2, 2, 2]
>>> wls = MP.get_workload(range(12),5); [len(wl) for wl in wls]
[2, 2, 2, 3, 3]
>>> wls = MP.get_workload(range(20),7); [len(wl) for wl in wls]
[2, 3, 3, 3, 3, 3, 3]
>>> wls = MP.get_workload(range(20),20); [len(wl) for wl in wls]
[1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1]
>>> wls = MP.get_workload(range(12),7); [len(wl) for wl in wls]
[1, 1, 2, 2, 2, 2, 2]
>>> wls = MP.get_workload(range(146), 20); [len(wl) for wl in wls]
[7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 7, 8, 8, 8, 8, 8, 8]
"""
assert len(tasks) >= 1, tasks
assert n_cpus >= 1, n_cpus
wloads = defaultdict(list)
for i, task in enumerate(tasks):
cpu_id = i % n_cpus
wloads[cpu_id].append(task)
_wloads = [wl for wl in sorted(
wloads.values(), key=lambda wl: len(wl))]
return _wloads
@staticmethod
def run_mp(taskname: str, tasks: List[Any], f: Callable[[List[Any]], Any], DO_MP: bool):
"""
Run wprocess on tasks in parallel
"""
def wprocess(mytasks: List[Any], myQ: Union[None, multiprocessing.Queue]):
try:
rs = f(mytasks)
except BaseException as ex:
mlog.debug(f"Got exception in worker: {ex}")
if myQ is None:
raise
else:
rs = ex
if myQ is None:
return rs
else:
myQ.put(rs)
n_cpus = multiprocessing.cpu_count()
if DO_MP and len(tasks) >= 2 and n_cpus >= 2:
Q: multiprocessing.Queue = multiprocessing.Queue()
wloads = MP.get_workload(tasks, n_cpus=n_cpus)
mlog.debug(
f"{taskname}:running {len(tasks)} jobs "
f"using {len(wloads)} threads: {list(map(len, wloads))}"
)
workers = [
multiprocessing.Process(target=wprocess, args=(wl, Q)) for wl in wloads
]
for w in workers:
w.start()
wrs = []
for _ in workers:
rs = Q.get()
if isinstance(rs, list):
wrs.extend(rs)
else:
mlog.debug(f"Got exception from worker: {rs}")
raise rs
else:
wrs = wprocess(tasks, myQ=None)
return wrs
if __name__ == "__main__":
import doctest
doctest.testmod()
|
py | b4172a92e775ea4f606509ea6a2778165939dcd5 | import hashlib
from datetime import datetime
from icalendar import Calendar
from gludb.simple import DBObject, Field, Index
from SuperGLU.Util.Serialization import SuperGlu_Serializable, tokenizeObject, untokenizeObject, makeSerialized
from SuperGLU.Services.QueryService.Queries import getKCsForAGivenUserAndTask, getAllHintsForSingleUserAndTask, getAllFeedbackForSingleUserAndTask
from SuperGLU.Util.ErrorHandling import logInfo
from SuperGLU.Util.SerializationGLUDB import DBSerializable, GLUDB_BRIDGE_NAME
from SuperGLU.Core.MessagingDB import DBLoggedMessage
from uuid import uuid4
import uuid
"""
This module contains secondary database objects that contain data derived from the logged messages
"""
def initDerivedDataTables():
DBSystem.ensure_table()
DBTask.ensure_table()
DBTopic.ensure_table()
DBSession.ensure_table()
DBStudent.ensure_table()
DBClass.ensure_table()
DBStudentModel.ensure_table()
DBClassModel.ensure_table()
DBStudentAlias.ensure_table()
DBKCTaskAssociations.ensure_table()
DBAssistmentsItem.ensure_table()
DBClasssAlias.ensure_table()
DBLoggedMessage.ensure_table()
DBCalendarData.ensure_table()
@DBObject(table_name="Systems")
class DBSystem(object):
uuid = Field('00000000-0000-0000-0000-000000000000')
ids = Field(list)
name = Field('')
contactEmails = Field(list)
description = Field('')
metadata = Field(dict)
tasks = Field(list)
baseURL = Field('')
authoringURL = Field('')
taskListURL = Field('')
deliveryURL = Field('')
authenticationURL = Field('')
#Non-persistant fields
taskCache = []
def __repr__(self):
return self.uuid + "|" + str(self.ids) + "|" + self.name + "|" + str(self.contactEmails) + "|" + self.description + "|" + str(self.metadata) + "|" + str(self.tasks) + "|" + self.baseURL + "|" + self.authoringURL + "|" + self.taskListURL + "|" + self.deliveryURL + "|" + self.authenticationURL
def getTasks(self, useCachedValue=False):
if not useCachedValue:
self.taskCache = [DBTask.find_by_index("taskIdIndex", x) for x in self.tasks]
return self.taskCache
def addTasks(self, newTask):
if newTask is None:
return #don't bother adding null values
self.taskCache.append(newTask)
if newTask.id is None:
newTask.save()
self.tasks.append(newTask.id)
class SerializableAssistmentsItem(SuperGlu_Serializable):
#Keys
ITEM_ID_KEY = 'itemId'
PROBLEM_SET_ID_KEY = 'problemSetId'
PROBLEM_SET_NAME_KEY = 'problemSetName'
ASSIGNMENTS_KEY = 'assignments'
ASSIGNMENT_NUMBER_KEY = "assignmentNumber"
def __init__(self, itemId=None, problemSetId=None, problemSetName=None,
assignments=None, assignmentNumber=None, anId=None):
super(SerializableAssistmentsItem, self).__init__(anId)
if assignments is None: assignments = []
self._itemId = itemId
self._problemSetId = problemSetId
self._problemSetName = problemSetName
self._assignmentNumber = assignmentNumber
#list of tuples containing id, name, url
self._assignments = assignments
def getActiveAssignmentURL(self):
if (self._assignmentNumber < len(self._assignments) and
len(self._assignments[self._assignmentNumber]) >= 3):
return self._assignments[self._assignmentNumber][2]
else:
return None
def saveToToken(self):
token = super(SerializableAssistmentsItem, self).saveToToken()
if self._assignmentNumber is not None:
token[self.ASSIGNMENT_NUMBER_KEY] = tokenizeObject(self._assignmentNumber)
if self._itemId is not None:
token[self.ITEM_ID_KEY] = tokenizeObject(self._itemId)
if self._problemSetId is not None:
token[self.PROBLEM_SET_ID_KEY] = tokenizeObject(self._problemSetId)
if self._problemSetName is not None:
token[self.PROBLEM_SET_NAME_KEY] = tokenizeObject(self._problemSetName)
if self._assignments is not None:
token[self.ASSIGNMENTS_KEY] = tokenizeObject(self._assignments)
return token
def initializeFromToken(self, token, context=None):
super(SerializableAssistmentsItem, self).initializeFromToken(token, context)
self._assignmentNumber = untokenizeObject(token.get(self.ASSIGNMENT_NUMBER_KEY, None), context)
self._itemId = untokenizeObject(token.get(self.ITEM_ID_KEY, None), context)
self._problemSetId = untokenizeObject(token.get(self.PROBLEM_SET_ID_KEY, None), context)
self._problemSetName = untokenizeObject(token.get(self.PROBLEM_SET_NAME_KEY, None), context)
self._assignments = untokenizeObject(token.get(self.ASSIGNMENTS_KEY, []), context)
def __repr__(self):
return self._itemId + "|||" + self._problemSetId + "|||" + self._problemSetName + "|||" + str(self._assignments) + "|||" + str(self._assignmentNumber)
@DBObject(table_name="AssistmentsAssignmentItems")
class DBAssistmentsItem(DBSerializable):
BRIDGE_NAME = GLUDB_BRIDGE_NAME
SOURCE_CLASS = SerializableAssistmentsItem
_itemId = Field('')
_problemSetId = Field('')
_problemSetName = Field('')
_assignments = Field(list) #list of tuples containing id, name, baseURL
@Index
def itemIdIndex(self):
return self._itemId
def create(self, serializableDBAssismentsAssignment = None):
if serializableDBAssismentsAssignment is not None:
self._itemId = serializableDBAssismentsAssignment._itemId
self._problemSetId = serializableDBAssismentsAssignment._problemSetId
self._problemSetName = serializableDBAssismentsAssignment._problemSetName
self._assignments = serializableDBAssismentsAssignment._assignments
return self
def __repr__(self):
return self._itemId + "|||" + self._problemSetId + "|||" + self._problemSetName + "|||" + str(self._assignments)
def toSerializable(self):
result = SerializableAssistmentsItem()
result._itemId = self._itemId
result._problemSetId = self._problemSetId
result._problemSetName = self._problemSetName
result._assignments = self._assignments
return result
def saveToDB(self):
self.save()
class LearningTask(SuperGlu_Serializable):
# Main Keys
TASK_ID_KEY = "taskId"
SYSTEM_KEY = "system"
ALIAS_IDS_KEY = "aliasIds"
NAME_KEY = "name"
DISPLAY_NAME_KEY = "displayName"
KCS_KEY = "kcs"
BASE_URL_KEY = "baseURL"
ASSISTMENTS_ITEM_KEY = "assistmentsItem"
DESCRIPTION_KEY = "description"
CAN_BE_RECOMMENDED_INDIVIDUALLY_KEY = "canBeRecommendedIndividually"
SUBTASKS_KEY ="subtasks"
def __init__(self, taskId=None, aliasIds=None, name=None, displayName=None, description=None,
system=None, subtasks=None, kcs=None, baseURL=None, assistmentsItem=None,
canRecommendIndividually = True, anId=None):
super(LearningTask, self).__init__(anId)
if aliasIds is None: aliasIds = []
if subtasks is None: subtasks = []
if kcs is None: kcs = []
self._taskId = taskId
self._aliasIds = aliasIds
self._name = name
self._displayName = displayName
self._description = description
self._system = system
self._subtasks = subtasks
self._kcs = kcs
self._baseURL = baseURL
self._assistmentsItem = assistmentsItem
self._canBeRecommendedIndividually = canRecommendIndividually
def saveToToken(self):
token = super(LearningTask, self).saveToToken()
if self._taskId is not None:
token[self.TASK_ID_KEY] = tokenizeObject(self._taskId)
if self._aliasIds is not None:
token[self.ALIAS_IDS_KEY] = tokenizeObject(self._aliasIds)
if self._name is not None:
token[self.NAME_KEY] = tokenizeObject(self._name)
if self._displayName is not None:
token[self.DISPLAY_NAME_KEY] = tokenizeObject(self._displayName)
if self._system is not None:
token[self.SYSTEM_KEY] = tokenizeObject(self._system)
if self._subtasks is not []:
token[self.SUBTASKS_KEY] = tokenizeObject(self._subtasks)
if self._kcs is not None:
token[self.KCS_KEY] = tokenizeObject(self._kcs)
if self._baseURL is not None:
token[self.BASE_URL_KEY] = tokenizeObject(self._baseURL)
if self._assistmentsItem is not None:
token[self.ASSISTMENTS_ITEM_KEY] = tokenizeObject(self._assistmentsItem)
if self._description is not None:
token[self.DESCRIPTION_KEY] = tokenizeObject(self._description)
if self._canBeRecommendedIndividually is not None:
token[self.CAN_BE_RECOMMENDED_INDIVIDUALLY_KEY] = tokenizeObject(self._canBeRecommendedIndividually)
return token
def initializeFromToken(self, token, context=None):
super(LearningTask, self).initializeFromToken(token, context)
self._taskId = untokenizeObject(token.get(self.TASK_ID_KEY, None), context)
self._aliasIds = untokenizeObject(token.get(self.ALIAS_IDS_KEY, []), context)
self._name = untokenizeObject(token.get(self.NAME_KEY, None))
self._displayName = untokenizeObject(token.get(self.DISPLAY_NAME_KEY, None), context)
self._description = untokenizeObject(token.get(self.DESCRIPTION_KEY, None), context)
self._system = untokenizeObject(token.get(self.SYSTEM_KEY, None), context)
self._subtasks = untokenizeObject(token.get(self.SUBTASKS_KEY, []), context)
self._kcs = untokenizeObject(token.get(self.KCS_KEY, []), context)
self._baseURL = untokenizeObject(token.get(self.BASE_URL_KEY, None), context)
self._assistmentsItem = untokenizeObject(token.get(self.ASSISTMENTS_ITEM_KEY, None), context)
self._canBeRecommendedIndividually = untokenizeObject(token.get(self.CAN_BE_RECOMMENDED_INDIVIDUALLY_KEY, True), context)
def toDB(self):
result = DBTask()
result.system = self._system
result.ids = self._aliasIds
result.subtasks = self._subtasks
result.taskId = self._taskId
result.name = self._name
result.displayName = self._displayName
result.kcs = self._kcs
result.baseURL = self._baseURL
result.assistmentsItemCache = self._assistmentsItem
result.description = self._description
result.canBeRecommendedIndividually = self._canBeRecommendedIndividually
return result
def initializeFromDBTask(self, dbTask):
self._taskId = dbTask.taskId
self._aliasIds = dbTask.ids
self._subtasks = dbTask.subtasks
self._name = dbTask.name
self._displayName = dbTask.displayName
self._kcs = dbTask.kcs
self._baseURL = dbTask.baseURL
self._system = dbTask.system
if dbTask.assistmentsItemCache is not None:
self._assistmentsItem = dbTask.assistmentsItemCache.toSerializable()
else:
self._assistmentsItem = None
self._description = dbTask.description
self._canBeRecommendedIndividually = dbTask.canBeRecommendedIndividually
# TODO: Figure out why we need this as such, rather than __str__?
def __repr__(self):
return "taskId:{0}|ids:{1}|subtasks:{2}|name:{3}|kcs:{4}|baseURL:{5}|assistmentItem:{6}|description:{7}|individualRecommend:{8}|displayName:{9}".format(
self._taskId, self._aliasIds, self._subtasks, self._name, self._kcs, self._baseURL,
self._assistmentsItem, self._description, self._canBeRecommendedIndividually, self._displayName)
@DBObject(table_name="Tasks")
class DBTask(DBSerializable):
ids = Field(list)
system = Field('')
subtasks = Field(list)
taskId = Field('')
name = Field('')
displayName = Field('')
kcs = Field(list)
baseURL = Field('')
assistmentsItemId = Field('')
description = Field('')
canBeRecommendedIndividually = Field(True)
assistmentsItemCache = None
BRIDGE_NAME = GLUDB_BRIDGE_NAME
SOURCE_CLASS = LearningTask
def create(self, serializableDBTask = None):
logInfo("found DBTask constructor", 5)
if serializableDBTask is not None:
self.taskId = serializableDBTask._taskId
self.system = serializableDBTask._system
self.ids = serializableDBTask._aliasIds
self.subtasks = serializableDBTask._subtasks
self.name = serializableDBTask._name
self.displayName = serializableDBTask._displayName
self.kcs = serializableDBTask._kcs
self.baseURL = serializableDBTask._baseURL
if serializableDBTask._assistmentsItem is not None:
self.assistmentsItemCache = DBSerializable.convert(serializableDBTask._assistmentsItem)
self.assistmentsItemId = serializableDBTask._assistmentsItem.getId()
else:
self.assistmentsItemCache = None
self.assistmentsItemId = None
self.description = serializableDBTask._description
self.canBeRecommendedIndividually = serializableDBTask._canBeRecommendedIndividually
return self
def getAssistementsItem(self, useCachedValue=False):
if not useCachedValue:
logInfo("assistmentItemId={0}".format(self.assistmentsItemId), 6)
if self.assistmentsItemId is not None:
return DBAssistmentsItem.find_one(self.assistmentsItemId)
else:
return None
else:
return self.assistmentsItemCache
def __repr__(self):
return str(self.ids) + "|" + self.name + "|" + str(self.kcs) + "|" + self.baseURL
@Index
def nameIndex(self):
return self.name
@Index
def taskIdIndex(self):
return self.taskId
def toSerializable(self):
if self.assistmentsItemCache is None:
self.assistmentsItemCache = self.getAssistementsItem(True)
result = LearningTask()
result.initializeFromDBTask(self)
return result
def saveToDB(self):
existingTasksWithSameName = DBTask.find_by_index('nameIndex', self.name)
existingTask = None
logInfo("assistmentsItemcacheValue2 = {0}".format(self.assistmentsItemCache), 6)
for possibleExistingTask in existingTasksWithSameName:
if self.ids == possibleExistingTask.ids:
existingTask = possibleExistingTask
if existingTask is None:
logInfo("task with name {0} does not yet exist".format(self.name), 3)
if self.assistmentsItemCache:
self.assistmentsItemCache.saveToDB()
self.assistmentsItemId = self.assistmentsItemCache.id
logInfo("assistmentsItemId = {0}".format(self.assistmentsItemId), 6)
logInfo("assistmentsItemcacheValue4 = {0}".format(self.assistmentsItemCache), 6)
self.save()
for kc in self.kcs:#TODO: figure out what tod do with these
alias = DBKCTaskAssociations()
alias.kc = kc
alias.taskId = self.id
alias.save()
else:
logInfo("task with name {0} already exists, overwriting".format(self.name), 3)
existingTask.name = self.name
existingTask.displayName = self.displayName
existingTask.ids = self.ids
existingTask.kcs = self.kcs
existingTask.baseURL = self.baseURL
existingTask.description = self.description
existingTask.canBeRecommendedIndividually = self.canBeRecommendedIndividually
self.assistmentsItemCache.id = existingTask.assistmentsItemId
existingTask.assistmentsItemCache = self.assistmentsItemCache
if existingTask.assistmentsItemCache:
existingTask.assistmentsItemCache.saveToDB()
existingTask.assistmentsItemId = existingTask.assistmentsItemCache.id
logInfo("assistmentsItemcacheValue3 = {0}".format(existingTask.assistmentsItemCache), 6)
logInfo("assistmentsItemId = {0}".format(existingTask.assistmentsItemId), 6)
existingTask.save()
return self.id
@DBObject(table_name="KC_TaskAssociations")
class DBKCTaskAssociations(object):
kc = Field('')
taskId = Field('')
@Index
def kcIndex(self):
return self.kc
@Index
def taskIdIndex(self):
return self.taskId
class SerializableTopic(SuperGlu_Serializable):
# Main Keys
TOPIC_ID_KEY = "topicId"
TOPIC_DESCRIPTION_KEY = "topicDescription"
KC_LIST_KEY = "kcList"
RESOURCE_LIST_KEY = "resourceList"
topicId = ''
description = ''
kcList = []
resourceList = []
def __init__(self, topicId = None, description=None, kcList = None, resourceList = None, anId=None):
super(SerializableTopic, self).__init__(anId)
if topicId == None:
topicId = ''
if kcList == None:
kcList = []
if resourceList == None:
resourceList = []
self.topicId = topicId
self.kcList = kcList
self.description = description
self.resourceList = resourceList
def saveToToken(self):
token = super(SerializableTopic, self).saveToToken()
if self.topicId is not None:
token[self.TOPIC_ID_KEY] = tokenizeObject(self.topicId)
if self.description is not None:
token[self.TOPIC_DESCRIPTION_KEY] = tokenizeObject(self.description)
if self.kcList is not None:
token[self.KC_LIST_KEY] = tokenizeObject(self.kcList)
if self.resourceList is not None:
token[self.RESOURCE_LIST_KEY] = tokenizeObject(self.resourceList)
return token
def initializeFromToken(self, token, context=None):
super(SerializableTopic, self).initializeFromToken(token, context)
self.description = untokenizeObject(token.get(self.TOPIC_DESCRIPTION_KEY, None))
self.topicId = untokenizeObject(token.get(self.TOPIC_ID_KEY, None), context)
self.kcList = untokenizeObject(token.get(self.KC_LIST_KEY, []), context)
self.resourceList = untokenizeObject(token.get(self.RESOURCE_LIST_KEY, []))
def toDB(self):
result = DBTopic()
result.topicId = self.topicId
result.kcList = self.kcList
result.description = self.description
result.resourceList = self.resourceList
return result
def initializeFromDBTopic(self, dbTopic):
self.topicId = dbTopic.topicId
self.kcList = dbTopic.kcList
self.description = dbTopic.description
self.resourceList = dbTopic.resourceList
@DBObject(table_name="Topics")
class DBTopic(DBSerializable):
BRIDGE_NAME = GLUDB_BRIDGE_NAME
SOURCE_CLASS = SerializableTopic
topicId = Field('')
description = Field('')
kcList = Field(list)
resourceList = Field(list)
def create(self, serializableTopic = None):
if serializableTopic is not None:
self.kcList = serializableTopic.kcList
self.resourceList = serializableTopic.resourceList
self.topicId = serializableTopic.topicId
self.description = serializableTopic.description
return self
@Index
def topicIdIndex(self):
return self.topicId
def toSerializable(self):
result = SerializableTopic()
result.initializeFromDBTask(self)
return result
def saveToDB(self):
self.save()
def __repr__(self):
return str(self.kcList) + "|" + str(self.resourceList)
class SerializableSession(SuperGlu_Serializable):
SESSION_ID_KEY = "sessionId"
STUDENTS_KEY = "students"
SYSTEM_KEY = "system"
TASK_KEY = "task"
ASSIGNMENT_NUMBER_KEY = "assignmentNumber"
START_TIME_KEY = "startTime"
DURATION_KEY = "duration"
END_CONDITION_KEY = "endCondition"
PERFORMANCE_KEY ="performance"
CLASSROOM_ID_KEY = "classroomId"
HINTS_KEY = "hints"
FEEDBACK_KEY = "feedback"
MESSAGE_IDS_KEY = "messageIds"
SOURCE_DATA_N_KEY = "sourceDataN"
SOURCE_DATA_HASH_KEY = "sourceDataHash"
def __init__(self, sessionId = None, students=[], system = None, task = None, assignmentNumber=0, startTime = None, duration = None, endCondition = None,
performance={}, classroomId=None, hints=[], feedback=[], messageIds = [], sourceDataN = -1, sourceDataHash = -1):
super(SerializableSession, self).__init__(sessionId)
self.sessionId = sessionId
self.students = students
self.system = system
self.task = task
self.assignmentNumber = assignmentNumber
self.startTime = startTime
self.duration = duration
self.endCondition = endCondition
self.performance = performance
self.classroomId = classroomId
self.hints = hints
self.feedback = feedback
self.messageIds = messageIds
self.sourceDataN = sourceDataN
self.sourceDataHash = sourceDataHash
def saveToToken(self):
token = super(SerializableSession, self).saveToToken()
if self.sessionId is not None:
token[self.SESSION_ID_KEY] = tokenizeObject(self.sessionId)
if self.students is not None:
token[self.STUDENTS_KEY] = tokenizeObject(self.students)
if self.system is not None:
token[self.SYSTEM_KEY] = tokenizeObject(self.system)
if self.task is not None:
token[self.TASK_KEY] = tokenizeObject(self.task)
if self.assignmentNumber is not None:
token[self.ASSIGNMENT_NUMBER_KEY] = tokenizeObject(self.assignmentNumber)
if self.startTime is not None:
token[self.START_TIME_KEY] = tokenizeObject(self.startTime)
if self.duration is not None:
token[self.DURATION_KEY] = tokenizeObject(self.duration)
if self.endCondition is not None:
token[self.END_CONDITION_KEY] = tokenizeObject(self.endCondition)
if self.performance is not None:
token[self.PERFORMANCE_KEY] = tokenizeObject(self.performance)
if self.classroomId is not None:
token[self.CLASSROOM_ID_KEY] = tokenizeObject(self.classroomId)
if self.hints is not None:
token[self.HINTS_KEY] = tokenizeObject(self.hints)
if self.feedback is not None:
token[self.FEEDBACK_KEY] = tokenizeObject(self.feedback)
if self.messageIds is not None:
token[self.MESSAGE_IDS_KEY] = tokenizeObject(self.messageIds)
if self.sourceDataN is not None:
token[self.SOURCE_DATA_N_KEY] = tokenizeObject(self.sourceDataN)
if self.sourceDataHash is not None:
token[self.SOURCE_DATA_HASH_KEY] = tokenizeObject(self.sourceDataHash)
return token
def initializeFromToken(self, token, context=None):
super(SerializableSession, self).initializeFromToken(token, context)
self.sessionId = untokenizeObject(token.get(self.SESSION_ID_KEY, None))
self.students = untokenizeObject(token.get(self.students, []), context)
self.system = untokenizeObject(token.get(self.SYSTEM_KEY, None), context)
self.task = untokenizeObject(token.get(self.TASK_KEY, None))
self.assignmentNumber = untokenizeObject(token.get(self.ASSIGNMENT_NUMBER_KEY, 0), context)
self.startTime = untokenizeObject(token.get(self.START_TIME_KEY, None), context)
self.duration = untokenizeObject(token.get(self.DURATION_KEY, 0), context)
self.endCondition = untokenizeObject(token.get(self.END_CONDITION_KEY, None), context)
self.performance = untokenizeObject(token.get(self.PERFORMANCE_KEY, None), context)
self.classroomId = untokenizeObject(token.get(self.CLASSROOM_ID_KEY, None), context)
self.hints = untokenizeObject(token.get(self.HINTS_KEY, []), context)
self.feedback = untokenizeObject(token.get(self.FEEDBACK_KEY, []), context)
self.messageIds = untokenizeObject(token.get(self.MESSAGE_IDS_KEY, []), context)
self.sourceDataN = untokenizeObject(token.get(self.SOURCE_DATA_N_KEY, None), context)
self.sourceDataHash = untokenizeObject(token.get(self.SOURCE_DATA_HASH_KEY, None), context)
def toDB(self):
result = DBSession()
result.sessionId = self.sessionId
result.students = self.students
if self.task is not None:
self.task.Save()
result.task = self.taskId
result.assignmentNumber = self.assignmentNumber
result.startTime = self.startTime
result.duration = self.duration
result.endCondition = self.endCondition
result.performance = self.performance
result.classId = self.classroomId
result.hints = self.hints
result.feedback = self.feedback
result.messageIds = self.messageIds
result.sourceDataN = self.sourceDataN
result.sourceDataHash = self.sourceDataHash
return result
def initializeFromDBSession(self, dbSession):
self.sessionId = dbSession.sessionId
self.students = dbSession.students
self.assignmentNumber = dbSession.assignmentNumber
dbTaskList = DBTask.find_by_index("taskIdIndex", dbSession.task)
if len(dbTaskList) > 0:
self.task = dbTaskList[0].toSerializable()
self.startTime = dbSession.startTime
self.duration = dbSession.duration
self.endCondition = dbSession.endCondition
self.performance = dbSession.performance
self.classroomId = dbSession.classId
self.hints = dbSession.hints
self.feedback = dbSession.feedback
self.messageIds = dbSession.messageIds
self.sourceDataN = dbSession.sourceDataN
self.sourceDataHash = dbSession.sourceDataHash
@DBObject(table_name="Sessions")
class DBSession(DBSerializable):
sessionId = Field('')
students = Field(list)
system = Field('')
task = Field('')
assignmentNumber= Field('')
startTime = Field('')
duration = Field(-1.0)
endCondition = Field('')
performance = Field(dict)
classId = Field('')
hints = Field(list)
feedback = Field(list)
messageIds = Field(list)
sourceDataN = Field(-1)
sourceDataHash = Field(-1)
BRIDGE_NAME = GLUDB_BRIDGE_NAME
SOURCE_CLASS = SerializableSession
#Non-persistent Fields
studentCache = []
taskCache = None
def create(self, serializableSession = None):
if serializableSession is not None:
self.sessionId = serializableSession.sessionId
self.students = serializableSession.students
self.system = serializableSession.system
self.task = serializableSession.task.taskId
self.assignmentNumber = serializableSession.assignmentNumber
self.startTime = serializableSession.startTime
self.duration = serializableSession.duration
self.endCondition = serializableSession.endCondition
self.performance = serializableSession.performance
self.classId = serializableSession.classId
self.hints = serializableSession.hints
self.feedback = serializableSession.feedback
self.messageIds = serializableSession.messageIds
self.sourceDataN = serializableSession.sourceDataN
self.sourceDataHash = serializableSession.sourceDataHash
return self
#keeping this method here as an example of how to query based on UUID
@classmethod
def getSessionFromUUID(self, sessionId):
return DBSession.find_one(sessionId)
@Index
def SessionIdIndex(self):
return self.sessionId
def getTask(self, useCachedValue = False):
if self.task is None or self.task == '':
return None
if not useCachedValue:
listOfValues = DBTask.find_by_index("taskIdIndex", self.task)
if len(listOfValues) > 0:
self.taskCache = listOfValues[0]
return self.taskCache
def getStudents(self, useCachedValue = False):
if not useCachedValue:
self.studentCache = [DBStudent.find_one(x) for x in self.students]
return self.studentCache
#takes a DBStudent object as an argument
def addStudent(self, newStudent):
if newStudent is None:
return
if newStudent.id in self.students:
return
if newStudent.id is None:
newStudent.save()
self.studentCache.append(newStudent)
self.students.append(newStudent.id)
def setStartTime(self, sTime):
self.startTime = sTime.strftime('%Y-%m-%dT%H:%M:%S.%fZ')
def getStartTime(self):
if(self.startTime != ''):
return datetime.strptime(self.startTime, '%Y-%m-%dT%H:%M:%S.%fZ')
return None
def getPerformance(self, useCachedValue = False):
if not useCachedValue:
self.performance = dict()
if self.task is None or self.startTime is None:
return self.performance
for currentDBStudent in self.students:
self.performance[currentDBStudent] = dict()
kcList = getKCsForAGivenUserAndTask(currentDBStudent, self.task, self.startTime, False)
for kcMessage in kcList:
self.performance[currentDBStudent][kcMessage.object] = kcMessage.result
if kcMessage.id not in self.messageIds:
self.messageIds.append(kcMessage.id)
return self.performance
def getHints(self, useCachedValue = False):
if not useCachedValue:
self.hints = list()
if self.task is None or self.startTime is None:
return self.hints
for currentDBStudent in self.students:
studentHints = getAllHintsForSingleUserAndTask(currentDBStudent, self.task, self.startTime, False)
for currentHint in studentHints:
self.hints.append(currentHint)
if currentHint.id not in self.messageIds:
self.messageIds.append(currentHint)
return self.hints
def getFeedback(self, useCachedValue = False):
if not useCachedValue:
self.feedback = list()
if self.task is None or self.startTime is None:
return self.feedback
for currentDBStudent in self.students:
studentFeedback = getAllFeedbackForSingleUserAndTask(currentDBStudent, self.task, self.startTime, False)
for currentFeedback in studentFeedback:
self.feedback.append(currentFeedback)
if currentFeedback.id not in self.messageIds:
self.messageIds.append(currentFeedback)
return self.feedback
def getSourceDataN(self, useCachedValue = False):
if not useCachedValue:
self.sourceDataN = len(self.messageIds)
return self.sourceDataN
def getSourceDataHash(self, useCachedValue = False):
if not useCachedValue:
uuidsAsString = ''.join(self.messageIds)
uuidsAsBytes = uuidsAsString.encode()
self.sourceDataHash = str(hashlib.sha256(uuidsAsBytes).hexdigest())
return self.sourceDataHash
def toSerializable(self):
result = SerializableSession()
result.initializeFromDBSession(self)
return result
class SerializableStudent(SuperGlu_Serializable):
STUDENT_ID_KEY = "studentId"
SESSIONS_KEY = "sessions"
OAUTH_IDS_KEY = "oAuthIds"
STUDENT_MODELS_KEY = "studentModels"
KC_GOALS_KEY = "kcGoals"
studentId = None
sessions = []
oAuthIds = {}
studentModels = {}
kcGoals = {}
def saveToToken(self):
token = super(SerializableStudent, self).saveToToken()
if self.studentId is not None:
token[self.STUDENT_ID_KEY] = tokenizeObject(self.studentId)
if self.sessions is not []:
token[self.SESSIONS_KEY] = tokenizeObject(self.sessions)
if self.oAuthIds is not {}:
token[self.OAUTH_IDS_KEY] = tokenizeObject(self.oAuthIds)
if self.studentModelIds is not {}:
token[self.STUDENT_MODELS_KEY] = tokenizeObject(self.studentModels)
if self.kcGoals is not {}:
token[self.KC_GOALS_KEY] = tokenizeObject(self.kcGoals)
return token
def initializeFromToken(self, token, context=None):
super(SerializableStudent, self).initializeFromToken(token, context)
self.studentId = untokenizeObject(token.get(self.STUDENT_ID_KEY, None))
self.sessions = untokenizeObject(token.get(self.SESSIONS_KEY, []), context)
self.oAuthIds = untokenizeObject(token.get(self.OAUTH_IDS_KEY,{}), context)
self.studentModels = untokenizeObject(token.get(self.STUDENT_MODELS_KEY, {}), context)
self.kcGoals = untokenizeObject(token.get(self.KC_GOALS_KEY, {}))
def toDB(self):
result = DBStudent()
result.studentId = self.studentId
result.sessionIds = [x.id for x in self.sessions]
result.oAuthIds = self.oAuthIds
result.studentModelIds = [x.id for x in self.studentModels]
result.kcGoals = self.kcGoals
return result
def initializeFromDBTask(self, dbStudent):
self.studentId = dbStudent.studentId
self.sessions = [x.toSerializable() for x in dbStudent.getSessions(False)]
self.oAuthIds = dbStudent.oAuthIds
self.studentModelIds = dbStudent.getStudentModels()
self.kcGoals = dbStudent.kcGoals
@DBObject(table_name="Students")
class DBStudent (object):
BRIDGE_NAME = GLUDB_BRIDGE_NAME
SOURCE_CLASS = SerializableStudent
studentId = Field('')
sessionIds = Field(list)
oAuthIds = Field(dict)
studentModelIds = Field(dict)
kcGoals = Field(dict)
#non-persistant fields
sessionCache = []
# One per each subclass of student model allowed
studentModelCache = {}
@Index
def StudentIdIndex(self):
return self.studentId
def getSessions(self, useCachedValue = False):
if not useCachedValue:
self.sessionCache = [DBSession.find_one(x) for x in self.sessionIds]
return self.sessionCache
def addSession(self, newSession):
if newSession is None:
return
if newSession.sessionId in self.sessionIds:
return
if newSession.id is None or newSession.id is '':
newSession.save()
self.sessionCache.append(newSession)
self.sessionIds.append(newSession.sessionId)
self.save()
def getStudentModels(self, useCachedValue = False):
if not useCachedValue:
self.studentModelCache = {x:DBStudentModel.find_one(self.studentModelIds[x]) for x in self.studentModelIds.keys()}
return self.studentModelCache
def addStudentModel(self, newStudentModel):
logInfo("Entering DBStudent.addStudentModel", 5)
if newStudentModel is None:
return
if newStudentModel.id is None or newStudentModel.id is '':
newStudentModel.save()
self.studentModelCache[newStudentModel.id] = newStudentModel
if self.studentModelIds is None or isinstance(self.studentModelIds, list):
self.studentModelIds = {}
self.studentModelIds[newStudentModel.__class__.__name__] = newStudentModel.id
self.save()
def toSerializable(self):
result = SerializableStudent()
result.initializeFromDBTask(self)
return result
@DBObject(table_name="StudentAliases")
class DBStudentAlias (object):
trueId = Field('')
alias = Field('')
@Index
def AliasIndex(self):
return self.alias
def getStudent(self):
student = DBStudent.find_one(self.trueId)
return student
@DBObject(table_name="Classes")
class DBClass (object):
ids = Field(list)
name = Field('')
roles = Field(dict)
students = Field(list)
topics = Field(list)
kcs = Field(list)
#TODO: Add schedule
#Non-persistent Fields
studentCache = []
topicsCache = []
def getStudents(self, useCachedValue = False):
if not useCachedValue:
self.studentCache = [DBStudent.find_one(x) for x in self.students]
return self.studentCache
def addStudent(self, newStudent):
if newStudent is None:
return
if newStudent.id is None:
newStudent.save()
self.studentCache.append(newStudent)
self.students.append(newStudent.id)
def getTopics(self, useCachedValue = False):
if not useCachedValue:
self.topicsCache = [DBTopic.find_one(x) for x in self.topics]
return self.topicsCache
def addTopic(self, newTopic):
if newTopic is None:
return
if newTopic.id is None:
newTopic.save()
self.topicsCache.append(newTopic)
self.topics.append(newTopic.id)
@DBObject(table_name="ClassAliases")
class DBClasssAlias:
trueId = Field('')
alias = Field('')
@Index
def Alias2Index(self):
return self.alias
def getClass(self):
clazz = DBClass.find_one(self.trueId)
return clazz
class SerializableStudentModel(SuperGlu_Serializable):
# Main Keys
STUDENT_ID_KEY = "studentId"
KC_MASTERY_KEY = "kcMastery"
_studentId = None
_kcMastery = {}
def saveToToken(self):
token = super(SerializableStudentModel, self).saveToToken()
if self._studentId is not None:
token[self.STUDENT_ID_KEY] = tokenizeObject(self._studentId)
if self._kcMastery is not None:
token[self.KC_MASTERY_KEY] = tokenizeObject(self._kcMastery)
return token
def initializeFromToken(self, token, context=None):
super(SerializableStudentModel, self).initializeFromToken(token, context)
self._studentId = untokenizeObject(token.get(self.STUDENT_ID_KEY, None))
self._kcMastery = untokenizeObject(token.get(self.KC_MASTERY_KEY, {}))
def toDB(self):
result = DBStudentModel()
result.studentId = self._studentId
result.kcMastery = self._kcMastery
return result
def initializeFromDBTask(self, dbTask):
self._studentId = dbTask.studentId
self._kcMastery = dbTask.kcMastery
@DBObject(table_name="StudentModels")
class DBStudentModel (object):
BRIDGE_NAME = GLUDB_BRIDGE_NAME
SOURCE_CLASS = SerializableStudentModel
studentId = Field('') #string
kcMastery = Field(dict) #Dictionary<string, float>
studentCache = None #type:DBStudent
@Index
def studentIdIndex(self):
return self.studentId
def getStudent(self, useCachedValue= False):
if self.studentId is not '':
if not useCachedValue:
self.studentCache = DBStudent.find_one(self.studentId)
return self.studentCache
else:
return None
def toSerializable(self):
result = SerializableStudentModel()
result.initializeFromDBTask(self)
return result
def saveToDB(self):#TODO: test before using widely
self.save()
@DBObject(table_name="ClassModels")
class DBClassModel(object):
studentIds = Field(list)
kcMastery = Field(dict)
def getStudents(self, useCachedValue = False):
if not useCachedValue:
self.studentCache = [DBStudent.find_one(x) for x in self.students]
return self.studentCache
#Owner Type enum:
CLASS_OWNER_TYPE = "class"
STUDENT_OWNER_TYPE = "student"
#Access Permissions enum:
PUBLIC_PERMISSION = "public"
MEMBERS_PERMISSION = "members"
OWNER_ONLY_PERMISSION = "owner only"
class SerializableCalendarData(SuperGlu_Serializable):
# Main Keys
OWNER_ID_KEY = "ownerId"
OWNER_TYPE_KEY = "ownerType"
PERMISSIONS_KEY = "permissions"
CALENDAR_DATA_KEY = "calendarData"
#string
ownerId = None
#string (values = {class, student})
ownerType = None
#string
accessPermissions = None
#ical string
calendarData = None
def getICalObject(self):
return Calendar.from_ical(self.calendarData)
def setICalObject(self, ical):
self.calendarData = ical.to_ical()
def saveToToken(self):
token = super(SerializableCalendarData, self).saveToToken()
if self.ownerId is not None:
token[self.OWNER_ID_KEY] = tokenizeObject(self.ownerId)
if self.ownerType is not None:
token[self.OWNER_TYPE_KEY] = tokenizeObject(self.ownerType)
if self.accessPermissions is not None:
token[self.PERMISSIONS_KEY] = tokenizeObject(self.accessPermissions)
if self.calendarData is not None:
token[self.CALENDAR_DATA_KEY] = tokenizeObject(self.calendarData)
return token
def initializeFromToken(self, token, context=None):
super(SerializableCalendarData, self).initializeFromToken(token, context)
self.ownerId = untokenizeObject(token.get(self.OWNER_ID_KEY, None))
self.ownerType = untokenizeObject(token.get(self.OWNER_TYPE_KEY, None))
self.calendarData = untokenizeObject(token.get(self.CALENDAR_DATA_KEY, None))
self.accessPermissions = untokenizeObject(token.get(self.PERMISSIONS_KEY, None))
def toDB(self):
result = DBCalendarData()
result.ownerId = self.ownerId
result.ownerType = self.ownerType
result.calendarData = self.calendarData
result.accessPermissions = self.accessPermissions
return result
def initializeFromDBCalendarData(self, dbCalendarData):
self.ownerId = dbCalendarData.ownerId
self.ownerType = dbCalendarData.ownerType
self.calendarData = dbCalendarData.calendarData
self.accessPermissions = dbCalendarData.accessPermissions
@DBObject(table_name="CalendarData")
class DBCalendarData(object):
BRIDGE_NAME = GLUDB_BRIDGE_NAME
SOURCE_CLASS = SerializableCalendarData
ownerId = Field('')
ownerType = Field('')
calendarData = Field('')
accessPermissions = Field('')
#transactional storage (for the future)
#list stores tuples containing (date, calendarData)
#calendarHistory = Field(list)
def setCalendarData(self, ownerId=None, ownerType=None, permissions=None, data=None):
if ownerType is None: ownerType = STUDENT_OWNER_TYPE
if permissions is None: permissions = PUBLIC_PERMISSION
self.ownerId = ownerId
self.ownerType = ownerType
self.accessPermissions = permissions
self.calendarData = data
####Place Index data here####
@Index
def ownerIdIndex(self):
return self.ownerId
def toSerializable(self):
result = SerializableCalendarData()
result.initializeFromDBCalendarData(self)
return result
def saveToDB(self):#TODO: test before using widely
self.save()
|
py | b4172a96229d2e04c3c3e28e8a1eacfda82d1b92 | # coding=utf-8
# Copyright 2014 International Business Machines Corporation
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
"""
Ironic console utilities.
"""
import os
import subprocess
import time
from oslo_concurrency import processutils
from oslo_config import cfg
from oslo_log import log as logging
from oslo_service import loopingcall
from oslo_utils import netutils
from ironic.common import exception
from ironic.common.i18n import _
from ironic.common.i18n import _LW
from ironic.common import utils
opts = [
cfg.StrOpt('terminal',
default='shellinaboxd',
help=_('Path to serial console terminal program')),
cfg.StrOpt('terminal_cert_dir',
help=_('Directory containing the terminal SSL cert(PEM) for '
'serial console access')),
cfg.StrOpt('terminal_pid_dir',
help=_('Directory for holding terminal pid files. '
'If not specified, the temporary directory '
'will be used.')),
cfg.IntOpt('subprocess_checking_interval',
default=1,
help=_('Time interval (in seconds) for checking the status of '
'console subprocess.')),
cfg.IntOpt('subprocess_timeout',
default=10,
help=_('Time (in seconds) to wait for the console subprocess '
'to start.')),
]
CONF = cfg.CONF
CONF.register_opts(opts, group='console')
LOG = logging.getLogger(__name__)
def _get_console_pid_dir():
"""Return the directory for the pid file."""
return CONF.console.terminal_pid_dir or CONF.tempdir
def _ensure_console_pid_dir_exists():
"""Ensure that the console PID directory exists
Checks that the directory for the console PID file exists
and if not, creates it.
:raises: ConsoleError if the directory doesn't exist and cannot be created
"""
dir = _get_console_pid_dir()
if not os.path.exists(dir):
try:
os.makedirs(dir)
except OSError as exc:
msg = (_("Cannot create directory '%(path)s' for console PID file."
" Reason: %(reason)s.") % {'path': dir, 'reason': exc})
LOG.error(msg)
raise exception.ConsoleError(message=msg)
def _get_console_pid_file(node_uuid):
"""Generate the pid file name to hold the terminal process id."""
pid_dir = _get_console_pid_dir()
name = "%s.pid" % node_uuid
path = os.path.join(pid_dir, name)
return path
def _get_console_pid(node_uuid):
"""Get the terminal process id from pid file."""
pid_path = _get_console_pid_file(node_uuid)
try:
with open(pid_path, 'r') as f:
pid_str = f.readline()
return int(pid_str)
except (IOError, ValueError):
raise exception.NoConsolePid(pid_path=pid_path)
def _stop_console(node_uuid):
"""Close the serial console for a node
Kills the console process and deletes the PID file.
:param node_uuid: the UUID of the node
:raises: NoConsolePid if no console PID was found
:raises: processutils.ProcessExecutionError if unable to stop the process
"""
try:
console_pid = _get_console_pid(node_uuid)
# Allow exitcode 99 (RC_UNAUTHORIZED)
utils.execute('kill', str(console_pid), check_exit_code=[0, 99])
finally:
utils.unlink_without_raise(_get_console_pid_file(node_uuid))
def make_persistent_password_file(path, password):
"""Writes a file containing a password until deleted."""
try:
utils.delete_if_exists(path)
with open(path, 'wb') as file:
os.chmod(path, 0o600)
file.write(password.encode())
return path
except Exception as e:
utils.delete_if_exists(path)
raise exception.PasswordFileFailedToCreate(error=e)
def get_shellinabox_console_url(port):
"""Get a url to access the console via shellinaboxd.
:param port: the terminal port for the node.
"""
console_host = CONF.my_ip
if netutils.is_valid_ipv6(console_host):
console_host = '[%s]' % console_host
scheme = 'https' if CONF.console.terminal_cert_dir else 'http'
return '%(scheme)s://%(host)s:%(port)s' % {'scheme': scheme,
'host': console_host,
'port': port}
def start_shellinabox_console(node_uuid, port, console_cmd):
"""Open the serial console for a node.
:param node_uuid: the uuid for the node.
:param port: the terminal port for the node.
:param console_cmd: the shell command that gets the console.
:raises: ConsoleError if the directory for the PID file cannot be created.
:raises: ConsoleSubprocessFailed when invoking the subprocess failed.
"""
# make sure that the old console for this node is stopped
# and the files are cleared
try:
_stop_console(node_uuid)
except exception.NoConsolePid:
pass
except processutils.ProcessExecutionError as exc:
LOG.warning(_LW("Failed to kill the old console process "
"before starting a new shellinabox console "
"for node %(node)s. Reason: %(err)s"),
{'node': node_uuid, 'err': exc})
_ensure_console_pid_dir_exists()
pid_file = _get_console_pid_file(node_uuid)
# put together the command and arguments for invoking the console
args = []
args.append(CONF.console.terminal)
if CONF.console.terminal_cert_dir:
args.append("-c")
args.append(CONF.console.terminal_cert_dir)
else:
args.append("-t")
args.append("-p")
args.append(str(port))
args.append("--background=%s" % pid_file)
args.append("-s")
args.append(console_cmd)
# run the command as a subprocess
try:
LOG.debug('Running subprocess: %s', ' '.join(args))
# use pipe here to catch the error in case shellinaboxd
# failed to start.
obj = subprocess.Popen(args,
stdout=subprocess.PIPE,
stderr=subprocess.PIPE)
except (OSError, ValueError) as e:
error = _("%(exec_error)s\n"
"Command: %(command)s") % {'exec_error': str(e),
'command': ' '.join(args)}
LOG.warning(error)
raise exception.ConsoleSubprocessFailed(error=error)
def _wait(node_uuid, popen_obj):
locals['returncode'] = popen_obj.poll()
# check if the console pid is created.
# if it is, then the shellinaboxd is invoked successfully as a daemon.
# otherwise check the error.
if locals['returncode'] is not None:
if locals['returncode'] == 0 and os.path.exists(pid_file):
raise loopingcall.LoopingCallDone()
else:
(stdout, stderr) = popen_obj.communicate()
locals['errstr'] = _(
"Command: %(command)s.\n"
"Exit code: %(return_code)s.\n"
"Stdout: %(stdout)r\n"
"Stderr: %(stderr)r") % {
'command': ' '.join(args),
'return_code': locals['returncode'],
'stdout': stdout,
'stderr': stderr}
LOG.warning(locals['errstr'])
raise loopingcall.LoopingCallDone()
if (time.time() > expiration):
locals['errstr'] = _("Timeout while waiting for console subprocess"
"to start for node %s.") % node_uuid
LOG.warning(locals['errstr'])
raise loopingcall.LoopingCallDone()
locals = {'returncode': None, 'errstr': ''}
expiration = time.time() + CONF.console.subprocess_timeout
timer = loopingcall.FixedIntervalLoopingCall(_wait, node_uuid, obj)
timer.start(interval=CONF.console.subprocess_checking_interval).wait()
if locals['errstr']:
raise exception.ConsoleSubprocessFailed(error=locals['errstr'])
def stop_shellinabox_console(node_uuid):
"""Close the serial console for a node.
:param node_uuid: the UUID of the node
:raises: ConsoleError if unable to stop the console process
"""
try:
_stop_console(node_uuid)
except exception.NoConsolePid:
LOG.warning(_LW("No console pid found for node %s while trying to "
"stop shellinabox console."), node_uuid)
except processutils.ProcessExecutionError as exc:
msg = (_("Could not stop the console for node '%(node)s'. "
"Reason: %(err)s.") % {'node': node_uuid, 'err': exc})
raise exception.ConsoleError(message=msg)
|
py | b4172bf20508eef96c8962e52ab92ea9ddbad183 | import re
import RPi.GPIO as GPIO
import time
import requests
import subprocess
from py_irsend import irsend
WORDS = ["TURN ON TV", "TURN ON THE TV", "TV ON", "TV", "TELEVISION"]
def handle(text, mic, profile):
irsend.send_once('/home/pi/lircd.conf', ['KEY_POWER'])
message = "turning on television"
mic.say(message)
payload = {'status':'tv on'}
r = requests.post("http://178.128.62.29/api/device/tvon", params=payload)
def isValid(text):
## return bool(re.search(r'\b(turn on tv|turn on the tv|tv on)\b', text, re.IGNORECASE))
return any(word in text.upper() for word in WORDS)
|
py | b4172d8721a34fa8e25c6d6ee333452b6b6fed97 | # https://deeplearningcourses.com/c/machine-learning-in-python-random-forest-adaboost
# https://www.udemy.com/machine-learning-in-python-random-forest-adaboost
from __future__ import print_function, division
from builtins import range, input
# Note: you may need to update your version of future
# sudo pip install -U future
import numpy as np
import matplotlib.pyplot as plt
from sklearn.tree import DecisionTreeClassifier, DecisionTreeRegressor
def plot_decision_boundary(X, model):
h = .02 # step size in the mesh
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
# Plot the decision boundary. For that, we will assign a color to each
# point in the mesh [x_min, m_max]x[y_min, y_max].
Z = model.predict(np.c_[xx.ravel(), yy.ravel()])
# Put the result into a color plot
Z = Z.reshape(xx.shape)
plt.contour(xx, yy, Z, cmap=plt.cm.Paired)
class BaggedTreeRegressor:
def __init__(self, n_estimators, max_depth=None):
self.B = n_estimators
self.max_depth = max_depth
def fit(self, X, Y):
N = len(X)
self.models = []
for b in range(self.B):
idx = np.random.choice(N, size=N, replace=True)
Xb = X[idx]
Yb = Y[idx]
model = DecisionTreeRegressor(max_depth=self.max_depth)
model.fit(Xb, Yb)
self.models.append(model)
def predict(self, X):
predictions = np.zeros(len(X))
for model in self.models:
predictions += model.predict(X)
return predictions / self.B
def score(self, X, Y):
d1 = Y - self.predict(X)
d2 = Y - Y.mean()
return 1 - d1.dot(d1) / d2.dot(d2)
class BaggedTreeClassifier:
def __init__(self, n_estimators, max_depth=None):
self.B = n_estimators
self.max_depth = max_depth
def fit(self, X, Y):
N = len(X)
self.models = []
for b in range(self.B):
idx = np.random.choice(N, size=N, replace=True)
Xb = X[idx]
Yb = Y[idx]
model = DecisionTreeClassifier(max_depth=self.max_depth)
model.fit(Xb, Yb)
self.models.append(model)
def predict(self, X):
# no need to keep a dictionary since we are doing binary classification
predictions = np.zeros(len(X))
for model in self.models:
predictions += model.predict(X)
return np.round(predictions / self.B)
def score(self, X, Y):
P = self.predict(X)
return np.mean(Y == P)
|
py | b4172d8c9333ff70aba5f8187667fa9a2f66db06 | from collections import defaultdict
from typing import List
from tool.runners.python import SubmissionPy
from copy import copy
class CocoSubmission(SubmissionPy):
def run(self, s):
"""
:param s: input in string format
:return: solution flag
"""
# Your code goes here
# parse
lines: List[str] = s.strip().split("\n")
recipes = []
all_ingredients = set()
for l in lines:
ingredients, allergens = l.split(" (contains ")
ingredients = ingredients.split()
all_ingredients.update(ingredients)
allergens = allergens.rstrip(")").split(", ")
recipes.append((ingredients, allergens))
allergens_in = defaultdict(list)
for (ingredients, allergens) in recipes:
for allergen in allergens:
allergens_in[allergen].append(set(ingredients))
# take the intersection
possible_ingredients_by_allergen = dict()
for allergen in allergens_in:
possible_ingredients_by_allergen[allergen] = set.intersection(*allergens_in[allergen])
possible_ingredients_with_allergen = set.union(*possible_ingredients_by_allergen.values())
ingredients_without_allergen = set()
for ingredient in all_ingredients:
if ingredient not in possible_ingredients_with_allergen:
ingredients_without_allergen.add(ingredient)
remaining_allergens = set(possible_ingredients_by_allergen.keys())
while remaining_allergens:
for allergen in copy(remaining_allergens):
if len(possible_ingredients_by_allergen[allergen]) == 1:
ing = list(possible_ingredients_by_allergen[allergen])[0]
remaining_allergens.remove(allergen)
for a2 in possible_ingredients_by_allergen:
if a2 != allergen:
possible_ingredients_by_allergen[a2].discard(ing)
# now we have the definitive list, build the output
return ",".join(
list(possible_ingredients_by_allergen[allergen])[0] for allergen in sorted(list(possible_ingredients_by_allergen.keys())))
def test_coco():
assert CocoSubmission().run("""mxmxvkd kfcds sqjhc nhms (contains dairy, fish)
trh fvjkl sbzzf mxmxvkd (contains dairy)
sqjhc fvjkl (contains soy)
sqjhc mxmxvkd sbzzf (contains fish)""") == 5
|
py | b4172e57a65594d9ccf7d4d6981fe24889bbb196 | n = int (raw_input ())
count = 0
k = pow (n, 2)
for j in range (k / 2) :
print j + 1,
print k - j,
count += 2
if count == n :
count = 0
print
|
py | b4172e719177d74ab37741ecec4f574d80d97aad | # -*- coding: utf-8 -*-
"""
flaskbb.utils.populate
~~~~~~~~~~~~~~~~~~~~~~
A module that makes creating data more easily
:copyright: (c) 2014 by the FlaskBB Team.
:license: BSD, see LICENSE for more details.
"""
from __future__ import unicode_literals
import collections
import logging
import os
from flask import current_app
from sqlalchemy_utils.functions import create_database, database_exists
from alembic.util.exc import CommandError
from flaskbb.extensions import alembic, db
from flaskbb.forum.models import Category, Forum, Post, Topic
from flaskbb.management.models import Setting, SettingsGroup
from flaskbb.user.models import Group, User
logger = logging.getLogger(__name__)
def delete_settings_from_fixture(fixture):
"""Deletes the settings from a fixture from the database.
Returns the deleted groups and settings.
:param fixture: The fixture that should be deleted.
"""
deleted_settings = {}
for settingsgroup in fixture:
group = SettingsGroup.query.filter_by(key=settingsgroup[0]).first()
deleted_settings[group] = []
for settings in settingsgroup[1]["settings"]:
setting = Setting.query.filter_by(key=settings[0]).first()
if setting:
deleted_settings[group].append(setting)
setting.delete()
group.delete()
return deleted_settings
def create_settings_from_fixture(fixture):
"""Inserts the settings from a fixture into the database.
Returns the created groups and settings.
:param fixture: The fixture which should inserted.
"""
created_settings = {}
for settingsgroup in fixture:
group = SettingsGroup(
key=settingsgroup[0],
name=settingsgroup[1]["name"],
description=settingsgroup[1]["description"]
)
group.save()
created_settings[group] = []
for settings in settingsgroup[1]["settings"]:
setting = Setting(
key=settings[0],
value=settings[1]["value"],
value_type=settings[1]["value_type"],
name=settings[1]["name"],
description=settings[1]["description"],
extra=settings[1].get("extra", ""), # Optional field
settingsgroup=group.key
)
if setting:
setting.save()
created_settings[group].append(setting)
return created_settings
def update_settings_from_fixture(fixture, overwrite_group=False,
overwrite_setting=False):
"""Updates the database settings from a fixture.
Returns the updated groups and settings.
:param fixture: The fixture which should be inserted/updated.
:param overwrite_group: Set this to ``True`` if you want to overwrite
the group if it already exists.
Defaults to ``False``.
:param overwrite_setting: Set this to ``True`` if you want to overwrite the
setting if it already exists.
Defaults to ``False``.
"""
updated_settings = collections.defaultdict(list)
for settingsgroup in fixture:
group = SettingsGroup.query.filter_by(key=settingsgroup[0]).first()
if (group is not None and overwrite_group) or group is None:
if group is not None:
group.name = settingsgroup[1]["name"]
group.description = settingsgroup[1]["description"]
else:
group = SettingsGroup(
key=settingsgroup[0],
name=settingsgroup[1]["name"],
description=settingsgroup[1]["description"]
)
group.save()
for settings in settingsgroup[1]["settings"]:
setting = Setting.query.filter_by(key=settings[0]).first()
if setting is not None:
setting_is_different = (
setting.value != settings[1]["value"]
or setting.value_type != settings[1]["value_type"]
or setting.name != settings[1]["name"]
or setting.description != settings[1]["description"]
or setting.extra != settings[1].get("extra", "")
or setting.settingsgroup != group.key
)
if (
setting is not None and
overwrite_setting and
setting_is_different
) or setting is None:
if setting is not None:
setting.value = settings[1]["value"]
setting.value_type = settings[1]["value_type"]
setting.name = settings[1]["name"]
setting.description = settings[1]["description"]
setting.extra = settings[1].get("extra", "")
setting.settingsgroup = group.key
else:
setting = Setting(
key=settings[0],
value=settings[1]["value"],
value_type=settings[1]["value_type"],
name=settings[1]["name"],
description=settings[1]["description"],
extra=settings[1].get("extra", ""),
settingsgroup=group.key
)
setting.save()
updated_settings[group].append(setting)
return updated_settings
def create_default_settings():
"""Creates the default settings."""
from flaskbb.fixtures.settings import fixture
create_settings_from_fixture(fixture)
def create_default_groups():
"""This will create the 5 default groups."""
from flaskbb.fixtures.groups import fixture
result = []
for key, value in fixture.items():
group = Group(name=key)
for k, v in value.items():
setattr(group, k, v)
group.save()
result.append(group)
return result
def create_user(username, password, email, groupname):
"""Creates a user.
Returns the created user.
:param username: The username of the user.
:param password: The password of the user.
:param email: The email address of the user.
:param groupname: The name of the group to which the user
should belong to.
"""
if groupname == "member":
group = Group.get_member_group()
else:
group = Group.query.filter(getattr(Group, groupname) == True).first()
user = User.create(username=username, password=password, email=email,
primary_group_id=group.id, activated=True)
return user
def update_user(username, password, email, groupname):
"""Update an existing user.
Returns the updated user.
:param username: The username of the user.
:param password: The password of the user.
:param email: The email address of the user.
:param groupname: The name of the group to which the user
should belong to.
"""
user = User.query.filter_by(username=username).first()
if user is None:
return None
if groupname == "member":
group = Group.get_member_group()
else:
group = Group.query.filter(getattr(Group, groupname) == True).first()
user.password = password
user.email = email
user.primary_group = group
return user.save()
def create_welcome_forum():
"""This will create the `welcome forum` with a welcome topic.
Returns True if it's created successfully.
"""
if User.query.count() < 1:
return False
user = User.query.filter_by(id=1).first()
category = Category(title="My Category", position=1)
category.save()
forum = Forum(title="Welcome", description="Your first forum",
category_id=category.id)
forum.save()
topic = Topic(title="Welcome!")
post = Post(content="Have fun with your new FlaskBB Forum!")
topic.save(user=user, forum=forum, post=post)
return True
def create_test_data(users=5, categories=2, forums=2, topics=1, posts=1):
"""Creates 5 users, 2 categories and 2 forums in each category.
It also creates a new topic topic in each forum with a post.
Returns the amount of created users, categories, forums, topics and posts
as a dict.
:param users: The number of users.
:param categories: The number of categories.
:param forums: The number of forums which are created in each category.
:param topics: The number of topics which are created in each forum.
:param posts: The number of posts which are created in each topic.
"""
create_default_groups()
create_default_settings()
data_created = {'users': 0, 'categories': 0, 'forums': 0,
'topics': 0, 'posts': 0}
# create 5 users
for u in range(1, users + 1):
username = "test%s" % u
email = "test%[email protected]" % u
user = User(username=username, password="test", email=email)
user.primary_group_id = u
user.activated = True
user.save()
data_created['users'] += 1
user1 = User.query.filter_by(id=1).first()
user2 = User.query.filter_by(id=2).first()
# lets send them a few private messages
for i in range(1, 3):
# TODO
pass
# create 2 categories
for i in range(1, categories + 1):
category_title = "Test Category %s" % i
category = Category(title=category_title,
description="Test Description")
category.save()
data_created['categories'] += 1
# create 2 forums in each category
for j in range(1, forums + 1):
if i == 2:
j += 2
forum_title = "Test Forum %s %s" % (j, i)
forum = Forum(title=forum_title, description="Test Description",
category_id=i)
forum.save()
data_created['forums'] += 1
for t in range(1, topics + 1):
# create a topic
topic = Topic(title="Test Title %s" % j)
post = Post(content="Test Content")
topic.save(post=post, user=user1, forum=forum)
data_created['topics'] += 1
for p in range(1, posts + 1):
# create a second post in the forum
post = Post(content="Test Post")
post.save(user=user2, topic=topic)
data_created['posts'] += 1
return data_created
def insert_bulk_data(topic_count=10, post_count=100):
"""Creates a specified number of topics in the first forum with
each topic containing a specified amount of posts.
Returns the number of created topics and posts.
:param topics: The amount of topics in the forum.
:param posts: The number of posts in each topic.
"""
user1 = User.query.filter_by(id=1).first()
user2 = User.query.filter_by(id=2).first()
forum = Forum.query.filter_by(id=1).first()
last_post = Post.query.order_by(Post.id.desc()).first()
last_post_id = 1 if last_post is None else last_post.id
created_posts = 0
created_topics = 0
posts = []
if not (user1 or user2 or forum):
return False
db.session.begin(subtransactions=True)
for i in range(1, topic_count + 1):
last_post_id += 1
# create a topic
topic = Topic(title="Test Title %s" % i)
post = Post(content="First Post")
topic.save(post=post, user=user1, forum=forum)
created_topics += 1
# create some posts in the topic
for j in range(1, post_count + 1):
last_post_id += 1
post = Post(content="Some other Post", user=user2, topic=topic.id)
topic.last_updated = post.date_created
topic.post_count += 1
# FIXME: Is there a way to ignore IntegrityErrors?
# At the moment, the first_post_id is also the last_post_id.
# This does no harm, except that in the forums view, you see
# the information for the first post instead of the last one.
# I run a little benchmark:
# 5.3643078804 seconds to create 100 topics and 10000 posts
# Using another method (where data integrity is ok) I benchmarked
# these stats:
# 49.7832770348 seconds to create 100 topics and 10000 posts
# Uncomment the line underneath and the other line to reduce
# performance but fixes the above mentioned problem.
# topic.last_post_id = last_post_id
created_posts += 1
posts.append(post)
# uncomment this and delete the one below, also uncomment the
# topic.last_post_id line above. This will greatly reduce the
# performance.
# db.session.bulk_save_objects(posts)
db.session.bulk_save_objects(posts)
# and finally, lets update some stats
forum.recalculate(last_post=True)
user1.recalculate()
user2.recalculate()
return created_topics, created_posts
def create_latest_db(target="default@head"):
"""Creates the database including the schema using SQLAlchemy's
db.create_all method instead of going through all the database revisions.
The revision will be set to 'head' which indicates the latest alembic
revision.
:param target: The target branch. Defaults to 'default@head'.
"""
if not database_exists(db.engine.url):
create_database(db.engine.url)
db.create_all()
alembic.stamp(target=target)
def run_plugin_migrations(plugins=None):
"""Runs the migrations for a list of plugins.
:param plugins: A iterable of plugins to run the migrations for. If set
to ``None``, all external plugin migrations will be run.
"""
if plugins is None:
plugins = current_app.pluggy.get_external_plugins()
for plugin in plugins:
plugin_name = current_app.pluggy.get_name(plugin)
if not os.path.exists(os.path.join(plugin.__path__[0], "migrations")):
logger.debug("No migrations found for plugin %s" % plugin_name)
continue
try:
alembic.upgrade(target="{}@head".format(plugin_name))
except CommandError as exc:
logger.debug("Couldn't run migrations for plugin {} because of "
"following exception: ".format(plugin_name),
exc_info=exc)
|
py | b417305472d9ae1ffcd3990cb4fb359a0ecfa20c | from validateJWT import InvalidAuthorizationToken, validateJWT
#from flask import Flask, jsonify, abort, make_response
from flask import request, Response
from expiringdict import ExpiringDict
import json
import storageBlobService
import appSecrets
import sys
sys.path.insert(0, '..') # needed as common is in the parent folder
import storageFileService
import json
import logging
import common
from loggingBase import clsLoggingBase
MX_NUM_USER=1000
MX_TOKEN_AGE=300 # seconds, 5 minutes
class securityImpl(clsLoggingBase):
"""
This class acts as the controller of everything related to security.
Single instance of this class should be created
"""
def __init__(self):
super().__init__(__name__)
self.jwtValidator = validateJWT()
self.valid_audiences = [appSecrets.ClientId, appSecrets.serviceIdentifierUri]
#self.ClientId = appSecrets.ClientId
#self.ClientSecret = appSecrets.ClientSecret
#self.TenantId = appSecrets.TenantId
self.userIdCache = ExpiringDict(max_len=MX_NUM_USER, max_age_seconds=MX_TOKEN_AGE)
self.storageObject = storageBlobService.StorageBlobServiceWrapper(appSecrets.KV_Storage_AccountName)
self.storageFileObject = storageFileService.storageFileService(appSecrets.KV_Storage_AccountName)
self.storageKeyLoaded = False
return
def get_StorageObject(self):
return self.storageObject
def get_fileStorageObject(self):
return self.storageFileObject
def validateRequest(self,request):
super().getLoggingObj().debug("validateRequest")
bRV = False
response = None
scopeTest = 'user_impersonation'
# first ask the jwt to validate that the request contains correct Bearer token
btempRV, bearerToken, decodedToken = self.jwtValidator.validate_request(request)
if (btempRV and bearerToken and decodedToken):
# further validation
if (decodedToken['aud'] in self.valid_audiences): # audience should include our instance
if (decodedToken['scp'] == scopeTest): # for the user_impersonation, this value should be present
# assume ['oid'] value is present in our cache
if (decodedToken['oid'] not in self.userIdCache):
# indicates that we've not seen this user before.
# Validate that he/she has
# been authorised to access the KeyVault APIs
btempRV, response = self.validateUserCredentials(bearerToken)
if (btempRV):
# add into our cache
self.userIdCache['oid'] = bearerToken
# If our Storage API Keys are not loaded, now is the time to load them
# Remember, this is executed only once for the first authenticated/authorised user
if (self.storageKeyLoaded == False):
access_token = json.loads(response.text)
# Assume that our storage access was not created and create it
storage_key_list, response = self.getStorageKeySecret(access_token)
# Also creates the storage service internally
if (storage_key_list):
if (len(storage_key_list) > 0 ):
#print(storage_key_list[0])
self.storageObject.set_storageKey(storage_key_list[0])
self.storageFileObject.set_storageKey(storage_key_list[0])
self.storageKeyLoaded = True
bRV = True # set this to true now.
else:
bRV = True
else:
response = Response('Unauthorized', 401, {'Content-Type': 'text/html', 'WWW-Authenticate': 'Invalid Scope'})
else:
response = Response('Unauthorized', 401, {'Content-Type': 'text/html', 'WWW-Authenticate': 'Invalid Audience'})
else:
response = Response('Unauthorized', 401, {'Content-Type': 'text/html', 'WWW-Authenticate': 'Bearer Token, Decoded Token security error'})
return bRV, response
def validateUserCredentials(self,bearerToken) :
super().getLoggingObj().debug("validateUserCredentials")
bRV = False
r = None
try:
r = self.get_token_with_authorization_code(bearerToken)
if (r.status_code >= 200 and r.status_code < 300):
bRV = True
except Exception as ex:
r = Response('Unauthorized', 401, {'Content-Type': 'text/html', 'WWW-Authenticate': ex})
except models.KeyVaultErrorException as kex:
r = Response('Unauthorized', 401, {'Content-Type': 'text/html', 'WWW-Authenticate': kex})
return bRV, r
def get_token_with_authorization_code(self, bearerToken):
super().getLoggingObj().debug("get_token_with_authorization_code")
import requests
resp = None
# construct our Azure AD obo message
grant_type= 'urn:ietf:params:oauth:grant-type:jwt-bearer'
resourceKeyVault ="https://vault.azure.net"
requested_token_use= 'on_behalf_of'
scope='openid'
headers = {'content-type': 'application/x-www-form-urlencoded'}
# Working example
params = {
'grant_type': grant_type,
'client_id': appSecrets.ClientId,
'client_secret' : appSecrets.ClientSecret,
'resource': resourceKeyVault,
'requested_token_use': requested_token_use,
'scope': scope,
'assertion': bearerToken
}
URL = 'https://login.microsoftonline.com/{0}/oauth2/token'.format(appSecrets.TenantId)
resp = requests.post(URL, headers=headers, data=params)
return resp
def getStorageKeySecret(self,token_credentials):
super().getLoggingObj().debug("getStorageKeySecret")
from msrestazure.azure_active_directory import AADTokenCredentials
resourceKeyVault ="https://vault.azure.net"
secret_bundle = []
try:
credentials = AADTokenCredentials(
token = token_credentials,
client_id = appSecrets.ClientId,
tenant = appSecrets.TenantId,
resource = resourceKeyVault
)
from azure.keyvault import KeyVaultClient, KeyVaultAuthentication
from azure.keyvault.models import KeyVaultErrorException
#Works the following
kvAuth = KeyVaultAuthentication(credentials=credentials)
client = KeyVaultClient(kvAuth)
# Following will also work, if the WebAPI is given permission to access Key Vault permissions
#client = KeyVaultClient(KeyVaultAuthentication(auth_callback))
rv = client.get_secret( appSecrets.KV_VAULT_URL,
appSecrets.KV_Storage_AccountKeyName,
appSecrets.KV_Storage_SECRET_VERSION)
secret_bundle.append(rv.value)
# first the HOST Value
secret_bundle.append(client.get_secret(appSecrets.KV_VAULT_URL,
appSecrets.KV_COSMOSDB_HOST,
appSecrets.KV_Storage_SECRET_VERSION).value)
# Next the Key value
secret_bundle.append(client.get_secret(appSecrets.KV_VAULT_URL,
appSecrets.KV_COSMOSDB_KEY,
appSecrets.KV_Storage_SECRET_VERSION).value)
# and finally the database value
secret_bundle.append(client.get_secret(appSecrets.KV_VAULT_URL,
appSecrets.KV_COSMOSDB_DATABASE,
appSecrets.KV_Storage_SECRET_VERSION).value)
except KeyVaultErrorException as ex:
print(ex)
rnce = ex.response
return None, rnce
except Exception as eex:
print(eex)
return None, None
return secret_bundle, None
def auth_callback(server, resource, scope):
'''
This function is not called /should not be called in normal circumstances; Only relevant for
testing to see that the Azure Vault access is provided directly by the Web API application as well.
'''
from azure.common.credentials import ServicePrincipalCredentials
credentials = ServicePrincipalCredentials(
client_id = appSecrets.ClientId,
secret = appSecrets.ClientSecret,
tenant = appSecrets.TenantId,
resource = resource
)
token = credentials.token
#if __debug__:
# import webbrowser
# url = 'https://jwt.ms/#access_token=' + token['access_token']
# webbrowser.open(url, new=0, autoraise=True)
return token['token_type'], token['access_token']
|
py | b41731839a4639cf72abf8d60f7d9cc78c1a554b | """
Copyright (C) since 2013 Calliope contributors listed in AUTHORS.
Licensed under the Apache 2.0 License (see LICENSE file).
generate_tables.py
~~~~~~~~~~~~~~~~~~
Parses, extracts and formats default configuration data
for the documentation.
"""
import csv
import ruamel.yaml as yaml
def get_section(commented_map):
""" Returns list of (setting, default, comment) tuples processed
from a YAML section."""
result = []
for k, v in commented_map.items():
comment = commented_map.ca.items[k][2].value.strip("#").strip()
if "¦" in comment:
comment = comment.split("¦")
comment[0] = comment[0].replace("name:", "").strip()
comment[1] = comment[1].replace("unit:", "").strip()
comment[2] = comment[2].strip()
else:
comment = [comment]
# Special case: empty dict gets turned into CommentedMap,
# turn it back
if isinstance(v, yaml.comments.CommentedMap):
v = {}
result.append((k, v, *comment))
return result
def write_csv(filename, iterable):
with open(filename, "w", newline="", encoding="utf-8") as f:
writer = csv.writer(f)
writer.writerows(iterable)
def process():
with open("../calliope/config/defaults.yaml", "r") as f:
defaults = yaml.round_trip_load(f)
write_csv(
"./user/includes/default_essentials.csv",
get_section(defaults["techs"]["default_tech"]["essentials"]),
)
write_csv(
"./user/includes/default_constraints.csv",
get_section(defaults["techs"]["default_tech"]["constraints"]),
)
write_csv(
"./user/includes/default_costs.csv",
get_section(defaults["techs"]["default_tech"]["costs"]["default_cost"]),
)
write_csv("./user/includes/model_settings.csv", get_section(defaults["model"]))
write_csv("./user/includes/run_settings.csv", get_section(defaults["run"]))
y = yaml.YAML()
for tech_group in defaults["tech_groups"]:
this_group_defaults = {
"essentials": defaults["tech_groups"][tech_group].get("essentials", {}),
"constraints": defaults["tech_groups"][tech_group].get("constraints", {}),
"costs": defaults["tech_groups"][tech_group].get("costs", {}),
}
with open("./user/includes/basetech_{}.yaml".format(tech_group), "w") as f:
f.write(yaml.dump(this_group_defaults, Dumper=yaml.RoundTripDumper))
required_allowed = {
"required_constraints": y.seq(
defaults["tech_groups"][tech_group].get("required_constraints", [])
),
"allowed_constraints": y.seq(
defaults["tech_groups"][tech_group].get("allowed_constraints", [])
),
"allowed_costs": y.seq(
defaults["tech_groups"][tech_group].get("allowed_costs", [])
),
}
with open(
"./user/includes/required_allowed_{}.yaml".format(tech_group), "w"
) as f:
f.write(yaml.dump(required_allowed, indent=4, Dumper=yaml.RoundTripDumper))
|
py | b41731d2d27134a82bedf450eab2e960396ac1ea | # Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, division, print_function, unicode_literals
from collections import OrderedDict
import numpy as np
from astropy.table import Table
from astropy.io import fits
import astropy.units as u
from ..utils.nddata import NDDataArray, BinnedDataAxis
from ..utils.scripts import make_path
__all__ = [
'Background3D',
'Background2D',
]
class Background3D(object):
"""Background 3D.
Data format specification: :ref:`gadf:bkg_3d`
Parameters
-----------
energy_lo, energy_hi : `~astropy.units.Quantity`
Energy binning
detx_lo, detx_hi : `~astropy.units.Quantity`
FOV coordinate X-axis binning
dety_lo, dety_hi : `~astropy.units.Quantity`
FOV coordinate Y-axis binning
data : `~astropy.units.Quantity`
Background rate (usually: ``s^-1 MeV^-1 sr^-1``)
Examples
--------
Here's an example you can use to learn about this class:
>>> from gammapy.irf import Background3D
>>> filename = '$GAMMAPY_EXTRA/datasets/cta-1dc/caldb/data/cta//1dc/bcf/South_z20_50h/irf_file.fits'
>>> bkg_3d = Background3D.read(filename, hdu='BACKGROUND')
>>> print(bkg_3d)
Background3D
NDDataArray summary info
energy : size = 21, min = 0.016 TeV, max = 158.489 TeV
detx : size = 36, min = -5.833 deg, max = 5.833 deg
dety : size = 36, min = -5.833 deg, max = 5.833 deg
Data : size = 27216, min = 0.000 1 / (MeV s sr), max = 0.421 1 / (MeV s sr)
"""
default_interp_kwargs = dict(bounds_error=False, fill_value=None)
"""Default Interpolation kwargs for `~NDDataArray`. Extrapolate."""
def __init__(self, energy_lo, energy_hi,
detx_lo, detx_hi, dety_lo, dety_hi,
data, meta=None, interp_kwargs=None):
if interp_kwargs is None:
interp_kwargs = self.default_interp_kwargs
axes = [
BinnedDataAxis(
energy_lo, energy_hi,
interpolation_mode='log', name='energy'),
BinnedDataAxis(
detx_lo, detx_hi,
interpolation_mode='linear', name='detx'),
BinnedDataAxis(
dety_lo, dety_hi,
interpolation_mode='linear', name='dety'),
]
self.data = NDDataArray(axes=axes, data=data,
interp_kwargs=interp_kwargs)
self.meta = OrderedDict(meta) if meta else OrderedDict()
def __str__(self):
ss = self.__class__.__name__
ss += '\n{}'.format(self.data)
return ss
@classmethod
def from_table(cls, table):
"""Read from `~astropy.table.Table`."""
# Spec says key should be "BKG", but there are files around
# (e.g. CTA 1DC) that use "BGD". For now we support both
if 'BKG' in table.colnames:
bkg_name = 'BKG'
elif 'BGD' in table.colnames:
bkg_name = 'BGD'
else:
raise ValueError('Invalid column names. Need "BKG" or "BGD".')
# Currently some files (e.g. CTA 1DC) contain unit in the FITS file
# '1/s/MeV/sr', which is invalid ( try: astropy.unit.Unit('1/s/MeV/sr')
# This should be corrected.
# For now, we hard-code the unit here:
data_unit = u.Unit('s-1 MeV-1 sr-1')
return cls(
energy_lo=table['ENERG_LO'].quantity[0],
energy_hi=table['ENERG_HI'].quantity[0],
detx_lo=table['DETX_LO'].quantity[0],
detx_hi=table['DETX_HI'].quantity[0],
dety_lo=table['DETY_LO'].quantity[0],
dety_hi=table['DETY_HI'].quantity[0],
data=table[bkg_name].data[0] * data_unit,
meta=table.meta,
)
@classmethod
def from_hdulist(cls, hdulist, hdu='BACKGROUND'):
"""Create from `~astropy.io.fits.HDUList`."""
return cls.from_table(Table.read(hdulist[hdu]))
@classmethod
def read(cls, filename, hdu='BACKGROUND'):
"""Read from file."""
filename = make_path(filename)
with fits.open(str(filename), memmap=False) as hdulist:
bkg = cls.from_hdulist(hdulist, hdu=hdu)
return bkg
def to_table(self):
"""Convert to `~astropy.table.Table`."""
meta = self.meta.copy()
table = Table(meta=meta)
table['DETX_LO'] = self.data.axis('detx').lo[np.newaxis]
table['DETX_HI'] = self.data.axis('detx').hi[np.newaxis]
table['DETY_LO'] = self.data.axis('dety').lo[np.newaxis]
table['DETY_HI'] = self.data.axis('dety').hi[np.newaxis]
table['ENERG_LO'] = self.data.axis('energy').lo[np.newaxis]
table['ENERG_HI'] = self.data.axis('energy').hi[np.newaxis]
table['BKG'] = self.data.data[np.newaxis]
return table
def to_fits(self, name='BACKGROUND'):
"""Convert to `~astropy.io.fits.BinTable`."""
return fits.BinTableHDU(self.to_table(), name=name)
class Background2D(object):
"""Background 2D.
Data format specification: :ref:`gadf:bkg_2d`
Parameters
-----------
energy_lo, energy_hi : `~astropy.units.Quantity`
Energy binning
offset_lo, offset_hi : `~astropy.units.Quantity`
FOV coordinate offset-axis binning
data : `~astropy.units.Quantity`
Background rate (usually: ``s^-1 MeV^-1 sr^-1``)
"""
default_interp_kwargs = dict(bounds_error=False, fill_value=None)
"""Default Interpolation kwargs for `~NDDataArray`. Extrapolate."""
def __init__(self, energy_lo, energy_hi,
offset_lo, offset_hi,
data, meta=None, interp_kwargs=None):
if interp_kwargs is None:
interp_kwargs = self.default_interp_kwargs
axes = [
BinnedDataAxis(
energy_lo, energy_hi,
interpolation_mode='log', name='energy'),
BinnedDataAxis(
offset_lo, offset_hi,
interpolation_mode='linear', name='offset'),
]
self.data = NDDataArray(axes=axes, data=data,
interp_kwargs=interp_kwargs)
self.meta = OrderedDict(meta) if meta else OrderedDict()
def __str__(self):
ss = self.__class__.__name__
ss += '\n{}'.format(self.data)
return ss
@classmethod
def from_table(cls, table):
"""Read from `~astropy.table.Table`."""
# Spec says key should be "BKG", but there are files around
# (e.g. CTA 1DC) that use "BGD". For now we support both
if 'BKG' in table.colnames:
bkg_name = 'BKG'
elif 'BGD' in table.colnames:
bkg_name = 'BGD'
else:
raise ValueError('Invalid column names. Need "BKG" or "BGD".')
# Currently some files (e.g. CTA 1DC) contain unit in the FITS file
# '1/s/MeV/sr', which is invalid ( try: astropy.unit.Unit('1/s/MeV/sr')
# This should be corrected.
# For now, we hard-code the unit here:
data_unit = u.Unit('s-1 MeV-1 sr-1')
return cls(
energy_lo=table['ENERG_LO'].quantity[0],
energy_hi=table['ENERG_HI'].quantity[0],
offset_lo=table['THETA_LO'].quantity[0],
offset_hi=table['THETA_HI'].quantity[0],
data=table[bkg_name].data[0] * data_unit,
meta=table.meta,
)
@classmethod
def from_hdulist(cls, hdulist, hdu='BACKGROUND'):
"""Create from `~astropy.io.fits.HDUList`."""
return cls.from_table(Table.read(hdulist[hdu]))
@classmethod
def read(cls, filename, hdu='BACKGROUND'):
"""Read from file."""
filename = make_path(filename)
with fits.open(str(filename), memmap=False) as hdulist:
bkg = cls.from_hdulist(hdulist, hdu=hdu)
return bkg
def to_table(self):
"""Convert to `~astropy.table.Table`."""
meta = self.meta.copy()
table = Table(meta=meta)
table['THETA_LO'] = self.data.axis('offset').lo[np.newaxis]
table['THETA_HI'] = self.data.axis('offset').hi[np.newaxis]
table['ENERG_LO'] = self.data.axis('energy').lo[np.newaxis]
table['ENERG_HI'] = self.data.axis('energy').hi[np.newaxis]
table['BKG'] = self.data.data[np.newaxis]
return table
def to_fits(self, name='BACKGROUND'):
"""Convert to `~astropy.io.fits.BinTable`."""
return fits.BinTableHDU(self.to_table(), name=name)
def evaluate(self, fov_offset, fov_phi=None, energy_reco=None, **kwargs):
"""
Evaluate the `Background2D` at a given offset and energy.
Parameters
----------
fov_offset : `~astropy.coordinates.Angle`
Offset in the FOV
fov_phi: `~astropy.coordinates.Angle`
Azimuth angle in the FOV.
Not used for this class since the background model is radially symmetric
energy_reco : `~astropy.units.Quantity`
Reconstructed energy
kwargs : dict
option for interpolation for `~scipy.interpolate.RegularGridInterpolator`
Returns
-------
array : `~astropy.units.Quantity`
Interpolated values, axis order is the same as for the NDData array
"""
if energy_reco is None:
energy_reco = self.data.axis('energy').nodes
array = self.data.evaluate(offset=fov_offset, energy=energy_reco, **kwargs)
return array
|
py | b41732487a11c684692fcb69ad8cf8b4e761d104 | # -*- coding: utf-8 -*-
#
# Mock documentation build configuration file, created by
# sphinx-quickstart on Mon Nov 17 18:12:00 2008.
#
# This file is execfile()d with the current directory set to its containing dir.
#
# The contents of this file are pickled, so don't put values in the namespace
# that aren't pickleable (module imports are okay, they're removed automatically).
#
# All configuration values have a default value; values that are commented out
# serve to show the default value.
import sys, os
sys.path.insert(0, os.path.abspath('..'))
import mock
# If your extensions are in another directory, add it here. If the directory
# is relative to the documentation root, use os.path.abspath to make it
# absolute, like shown here.
#sys.path.append(os.path.abspath('some/directory'))
# General configuration
# ---------------------
# Add any Sphinx extension module names here, as strings. They can be extensions
# coming with Sphinx (named 'sphinx.ext.*') or your custom ones.
extensions = ['sphinx.ext.doctest']
doctest_global_setup = """
import os
import sys
import mock
from mock import * # yeah, I know :-/
import unittest2
import __main__
if os.getcwd() not in sys.path:
sys.path.append(os.getcwd())
# keep a reference to __main__
sys.modules['__main'] = __main__
class ProxyModule(object):
def __init__(self):
self.__dict__ = globals()
sys.modules['__main__'] = ProxyModule()
"""
doctest_global_cleanup = """
sys.modules['__main__'] = sys.modules['__main']
"""
html_theme = 'nature'
html_theme_options = {}
# Add any paths that contain templates here, relative to this directory.
#templates_path = ['_templates']
# The suffix of source filenames.
source_suffix = '.txt'
# The main toctree document.
main_doc = 'index'
# General substitutions.
project = u'Mock'
copyright = u'2007-2015, Michael Foord & the mock team'
# The default replacements for |version| and |release|, also used in various
# other places throughout the built documents. Supplied by pbr.
#
# The short X.Y version.
version = mock.mock._v.brief_string()
# The full version, including alpha/beta/rc tags.
release = mock.__version__
# There are two options for replacing |today|: either, you set today to some
# non-false value, then it is used: (Set from pbr)
today = ''
# Else, today_fmt is used as the format for a strftime call.
# today_fmt = '%B %d, %Y'
# List of documents that shouldn't be included in the build.
#unused_docs = []
# List of directories, relative to source directories, that shouldn't be searched
# for source files.
exclude_trees = []
# The reST default role (used for this markup: `text`) to use for all documents.
#default_role = None
# If true, '()' will be appended to :func: etc. cross-reference text.
#add_function_parentheses = True
# If true, the current module name will be prepended to all description
# unit titles (such as .. function::).
add_module_names = False
# If true, sectionauthor and moduleauthor directives will be shown in the
# output. They are ignored by default.
#show_authors = False
# The name of the Pygments (syntax highlighting) style to use.
pygments_style = 'friendly'
# Options for HTML output
# -----------------------
# The style sheet to use for HTML and HTML Help pages. A file of that name
# must exist either in Sphinx' static/ path, or in one of the custom paths
# given in html_static_path.
#html_style = 'adctheme.css'
# The name for this set of Sphinx documents. If None, it defaults to
# "<project> v<release> documentation".
#html_title = None
# A shorter title for the navigation bar. Default is the same as html_title.
#html_short_title = None
# The name of an image file (relative to this directory) to place at the top
# of the sidebar.
#html_logo = None
# The name of an image file (within the static path) to use as favicon of the
# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32
# pixels large.
#html_favicon = None
# Add any paths that contain custom static files (such as style sheets) here,
# relative to this directory. They are copied after the builtin static files,
# so a file named "default.css" will overwrite the builtin "default.css".
#html_static_path = ['_static']
# If not '', a 'Last updated on:' timestamp is inserted at every page bottom,
# using the given strftime format.
html_last_updated_fmt = '%b %d, %Y'
# If true, SmartyPants will be used to convert quotes and dashes to
# typographically correct entities.
#html_use_smartypants = True
# Custom sidebar templates, maps document names to template names.
#html_sidebars = {}
# Additional templates that should be rendered to pages, maps page names to
# template names.
#html_additional_pages = {}
# If false, no module index is generated.
html_use_modindex = False
# If false, no index is generated.
#html_use_index = True
# If true, the index is split into individual pages for each letter.
#html_split_index = False
# If true, the reST sources are included in the HTML build as _sources/<name>.
#html_copy_source = True
# If true, an OpenSearch description file will be output, and all pages will
# contain a <link> tag referring to it. The value of this option must be the
# base URL from which the finished HTML is served.
#html_use_opensearch = ''
# If nonempty, this is the file name suffix for HTML files (e.g. ".xhtml").
#html_file_suffix = ''
# Output file base name for HTML help builder.
htmlhelp_basename = 'Mockdoc'
# Options for LaTeX output
# ------------------------
# The paper size ('letter' or 'a4').
#latex_paper_size = 'letter'
# The font size ('10pt', '11pt' or '12pt').
latex_font_size = '12pt'
# Grouping the document tree into LaTeX files. List of tuples
# (source start file, target name, title, author, document class [howto/manual]).
latex_documents = [
('index', 'Mock.tex', u'Mock Documentation',
u'Michael Foord', 'manual'),
]
# The name of an image file (relative to this directory) to place at the top of
# the title page.
#latex_logo = None
# For "manual" documents, if this is true, then toplevel headings are parts,
# not chapters.
#latex_use_parts = False
# Additional stuff for the LaTeX preamble.
#latex_preamble = ''
# Documents to append as an appendix to all manuals.
#latex_appendices = []
# If false, no module index is generated.
latex_use_modindex = False
|
py | b41732648c763ee86e7f1ebe45cf28559d4ea2e9 | # Generated by Django 2.0.9 on 2019-01-08 12:01
from django.db import migrations
class Migration(migrations.Migration):
dependencies = [
('images', '0003_auto_20181228_0148'),
]
operations = [
migrations.AlterModelOptions(
name='image',
options={'ordering': ['-created_at']},
),
]
|
py | b41732b78d1023f91632b7dc16c0593b6d312df2 | # -------------------------------------------------------------------------
#
# Part of the CodeChecker project, under the Apache License v2.0 with
# LLVM Exceptions. See LICENSE for license information.
# SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
#
# -------------------------------------------------------------------------
"""
This module tests the correctness of the ESLintAnalyzerResult, which
used in sequence transform ESLint output to a plist file.
"""
import os
import plistlib
import shutil
import tempfile
import unittest
from codechecker_report_converter.eslint.analyzer_result import \
ESLintAnalyzerResult
class ESLintAnalyzerResultTestCase(unittest.TestCase):
""" Test the output of the ESLintAnalyzerResult. """
def setUp(self):
""" Setup the test. """
self.analyzer_result = ESLintAnalyzerResult()
self.cc_result_dir = tempfile.mkdtemp()
self.test_files = os.path.join(os.path.dirname(__file__),
'eslint_output_test_files')
def tearDown(self):
""" Clean temporary directory. """
shutil.rmtree(self.cc_result_dir)
def test_no_json_file(self):
""" Test transforming single plist file. """
analyzer_result = os.path.join(self.test_files, 'files',
'index.js')
ret = self.analyzer_result.transform(analyzer_result,
self.cc_result_dir)
self.assertFalse(ret)
def test_transform_dir(self):
""" Test transforming single plist file. """
analyzer_result = os.path.join(self.test_files)
ret = self.analyzer_result.transform(analyzer_result,
self.cc_result_dir)
self.assertFalse(ret)
def test_transform_single_file(self):
""" Test transforming single plist file. """
analyzer_result = os.path.join(self.test_files, 'reports.json')
self.analyzer_result.transform(analyzer_result, self.cc_result_dir)
plist_file = os.path.join(self.cc_result_dir,
'index.js_eslint.plist')
with open(plist_file, mode='rb') as pfile:
res = plistlib.load(pfile)
# Use relative path for this test.
res['files'][0] = os.path.join('files', 'index.js')
self.assertTrue(res['metadata']['generated_by']['version'])
res['metadata']['generated_by']['version'] = "x.y.z"
plist_file = os.path.join(self.test_files,
'reports.expected.plist')
with open(plist_file, mode='rb') as pfile:
exp = plistlib.load(pfile)
self.assertEqual(res, exp)
|
py | b417332dff94da35de5f529fd92a5d0a66b9d56a | from faker import Faker
import os
import random
import sys
if len(sys.argv) < 4:
print("usage: data-generation.py <file_name> <desired_size> <number_of_files>")
quit()
file_name = sys.argv[1]
print(f"File name: {file_name}")
desired_size = int(sys.argv[2])
print(f"Desired number of rows: {desired_size}")
number_of_files = int(sys.argv[3])
print(f"Number of files: {number_of_files}")
fake = Faker()
total_icd = 0
total_records = 0
total_emails = 0
total_batwomen = 0
total_catwomen = 0
for file_number in range(number_of_files):
if number_of_files > 1:
file = f"{file_name}.{file_number}"
else:
file = f"{file_name}"
with open(file, 'a') as f:
#f.write('"id","icd","amount","message"\n')
while True:
total_records = total_records + 1
amount = f'"${round(random.uniform(1,1000),2)}"'
if random.uniform(0,1) <= 0.05:
icd = '"' + fake.icd_code() + '"'
total_icd += 1
else:
icd = '""'
message = '"' + fake.sentence()
if random.uniform(0,1) <= 0.01:
message += " SporadicKeyword."
total_batwomen += 1
if random.uniform(0,1) <= 0.10:
message += " FrequentKeyword."
total_catwomen += 1
if random.uniform(0,1) <= 0.05:
message += " " + fake.email()
total_emails = total_emails + 1
message += " " + fake.sentence() + '"'
f.write(f'{total_records},{icd},{amount},{message}\n')
if total_records >= desired_size:
break
print(f"Bytes written: {os.path.getsize(file)}")
print(f"Number of records: {total_records}")
print(f"Total icd: {total_icd}")
print(f"Total batwomen: {total_batwomen}")
print(f"Total catwomen: {total_catwomen}")
print(f"Total emails: {total_emails}")
|
py | b41734d214e98cd24be0c98ee67f7cb5e58b7a61 | # Copyright 2017 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for contrib.seq2seq.python.seq2seq.beam_search_ops."""
# pylint: disable=unused-import,g-bad-import-order
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# pylint: enable=unused-import
import itertools
import numpy as np
from tensorflow.contrib.seq2seq.python.ops import beam_search_ops
from tensorflow.python.framework import ops
from tensorflow.python.platform import test
def _transpose_batch_time(x):
return np.transpose(x, [1, 0, 2]).astype(np.int32)
class GatherTreeTest(test.TestCase):
def testGatherTreeOne(self):
# (max_time = 4, batch_size = 1, beams = 3)
end_token = 10
step_ids = _transpose_batch_time(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [-1, -1, -1]]])
parent_ids = _transpose_batch_time(
[[[0, 0, 0], [0, 1, 1], [2, 1, 2], [-1, -1, -1]]])
max_sequence_lengths = [3]
expected_result = _transpose_batch_time([[[2, 2, 2], [6, 5, 6], [7, 8, 9],
[10, 10, 10]]])
beams = beam_search_ops.gather_tree(
step_ids=step_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=end_token)
with self.session(use_gpu=True):
self.assertAllEqual(expected_result, beams.eval())
def testBadParentValuesOnCPU(self):
# (batch_size = 1, max_time = 4, beams = 3)
# bad parent in beam 1 time 1
end_token = 10
step_ids = _transpose_batch_time(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [-1, -1, -1]]])
parent_ids = _transpose_batch_time(
[[[0, 0, 0], [0, -1, 1], [2, 1, 2], [-1, -1, -1]]])
max_sequence_lengths = [3]
with ops.device("/cpu:0"):
beams = beam_search_ops.gather_tree(
step_ids=step_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=end_token)
with self.cached_session():
with self.assertRaisesOpError(
r"parent id -1 at \(batch, time, beam\) == \(0, 0, 1\)"):
_ = beams.eval()
def testBadParentValuesOnGPU(self):
# Only want to run this test on CUDA devices, as gather_tree is not
# registered for SYCL devices.
if not test.is_gpu_available(cuda_only=True):
return
# (max_time = 4, batch_size = 1, beams = 3)
# bad parent in beam 1 time 1; appears as a negative index at time 0
end_token = 10
step_ids = _transpose_batch_time(
[[[1, 2, 3], [4, 5, 6], [7, 8, 9], [-1, -1, -1]]])
parent_ids = _transpose_batch_time(
[[[0, 0, 0], [0, -1, 1], [2, 1, 2], [-1, -1, -1]]])
max_sequence_lengths = [3]
expected_result = _transpose_batch_time([[[2, -1, 2], [6, 5, 6], [7, 8, 9],
[10, 10, 10]]])
with ops.device("/device:GPU:0"):
beams = beam_search_ops.gather_tree(
step_ids=step_ids,
parent_ids=parent_ids,
max_sequence_lengths=max_sequence_lengths,
end_token=end_token)
with self.session(use_gpu=True):
self.assertAllEqual(expected_result, beams.eval())
def testGatherTreeBatch(self):
batch_size = 10
beam_width = 15
max_time = 8
max_sequence_lengths = [0, 1, 2, 4, 7, 8, 9, 10, 11, 0]
end_token = 5
with self.session(use_gpu=True):
step_ids = np.random.randint(
0, high=end_token + 1, size=(max_time, batch_size, beam_width))
parent_ids = np.random.randint(
0, high=beam_width - 1, size=(max_time, batch_size, beam_width))
beams = beam_search_ops.gather_tree(
step_ids=step_ids.astype(np.int32),
parent_ids=parent_ids.astype(np.int32),
max_sequence_lengths=max_sequence_lengths,
end_token=end_token)
self.assertEqual((max_time, batch_size, beam_width), beams.shape)
beams_value = beams.eval()
for b in range(batch_size):
# Past max_sequence_lengths[b], we emit all end tokens.
b_value = beams_value[max_sequence_lengths[b]:, b, :]
self.assertAllClose(b_value, end_token * np.ones_like(b_value))
for batch, beam in itertools.product(
range(batch_size), range(beam_width)):
v = np.squeeze(beams_value[:, batch, beam])
if end_token in v:
found_bad = np.where(v == -1)[0]
self.assertEqual(0, len(found_bad))
found = np.where(v == end_token)[0]
found = found[0] # First occurrence of end_token.
# If an end_token is found, everything before it should be a
# valid id and everything after it should be -1.
if found > 0:
self.assertAllEqual(
v[:found - 1] >= 0, np.ones_like(v[:found - 1], dtype=bool))
self.assertAllClose(v[found + 1:],
end_token * np.ones_like(v[found + 1:]))
if __name__ == "__main__":
test.main()
|
py | b4173551a52c5b193079671409b4f26552e60200 | from django.urls import reverse
from django.test import TestCase, override_settings
from accounts.models import User
@override_settings(STATICFILES_STORAGE='django.contrib.staticfiles.storage.StaticFilesStorage')
class AccountUsersViewsTestCase(TestCase):
@classmethod
def setUpTestData(cls):
# superuser
cls.su_pwd = "godzillapwd"
cls.superuser = User.objects.create_user("godzilla", "[email protected]", cls.su_pwd,
is_superuser=True)
# user
cls.pwd = "yo"
cls.user = User.objects.create_user("yo", "[email protected]", cls.pwd)
# remote user
cls.remoteuser = User.objects.create_user("remote", "[email protected]", "remote",
is_remote=True)
# auth utils
def login_redirect(self, url_name, *args):
url = reverse("users:{}".format(url_name), args=args)
response = self.client.get(url)
self.assertRedirects(response, "{u}?next={n}".format(u=reverse("login"), n=url))
def permission_denied(self, url_name, *args):
url = reverse("users:{}".format(url_name), args=args)
response = self.client.get(url)
self.assertEqual(response.status_code, 403)
def log_user_in(self, superuser=False):
if superuser:
user, pwd = self.superuser, self.su_pwd
else:
user, pwd = self.user, self.pwd
response = self.client.post(reverse('login'),
{'username': user.username, 'password': pwd},
follow=True)
self.assertEqual(response.status_code, 200)
self.assertEqual(response.context["user"], user)
# permissions redirects
def test_user_list_redirect(self):
self.login_redirect("list")
self.log_user_in()
self.permission_denied("list")
def test_user_add_redirect(self):
self.login_redirect("add")
self.log_user_in()
self.permission_denied("add")
def test_user_update_redirect(self):
self.login_redirect("update", self.user.id)
self.log_user_in()
self.permission_denied("update", self.superuser.id)
def test_user_delete_redirect(self):
self.login_redirect("delete", self.user.id)
self.log_user_in()
self.permission_denied("delete", self.user.id)
# user list
def test_user_list_ok(self):
self.log_user_in(superuser=True)
response = self.client.get(reverse("users:list"))
for text in (self.user.username, self.user.email,
self.remoteuser.username, self.remoteuser.email,
self.superuser.username, self.superuser.email,
"3 Users",
reverse("users:delete", args=(self.user.pk,)),
reverse("users:update", args=(self.user.pk,)),
reverse("users:delete", args=(self.remoteuser.pk,)),
reverse("users:update", args=(self.remoteuser.pk,)),
reverse("users:update", args=(self.superuser.pk,))):
self.assertContains(response, text)
self.assertNotContains(response, reverse("users:delete", args=(self.superuser.pk,)))
# add
def test_user_add_get(self):
self.log_user_in(superuser=True)
response = self.client.get(reverse("users:add"))
self.assertContains(response, "Send an email invitation")
def test_user_add_username_error(self):
self.log_user_in(superuser=True)
response = self.client.post(reverse("users:add"),
{"username": self.user.username,
"email": "[email protected]"},
follow=True)
self.assertFormError(response, "form", "username", "A user with that username already exists.")
def test_user_add_email_error(self):
self.log_user_in(superuser=True)
response = self.client.post(reverse("users:add"),
{"username": "test",
"email": self.user.email},
follow=True)
self.assertFormError(response, "form", "email", "User with this Email already exists.")
def test_user_add_ok(self):
self.log_user_in(superuser=True)
response = self.client.post(reverse("users:add"),
{"username": "test",
"email": "[email protected]"},
follow=True)
for text in ("4 Users", "test", "[email protected]"):
self.assertContains(response, text)
# update
def test_user_update_404(self):
self.log_user_in(superuser=True)
response = self.client.get(reverse("users:update", args=(0,)))
self.assertEqual(response.status_code, 404)
def test_user_update_get(self):
self.log_user_in(superuser=True)
for user, ue_disabled, su_disabled in ((self.user, False, False),
(self.remoteuser, True, False),
(self.superuser, False, True)):
response = self.client.get(reverse("users:update", args=(user.id,)))
self.assertContains(response, "Update user {}".format(user))
form = response.context["form"]
self.assertEqual(form.fields["is_superuser"].disabled, su_disabled)
self.assertEqual(form.fields["username"].disabled, ue_disabled)
self.assertEqual(form.fields["email"].disabled, ue_disabled)
def test_user_update_username_error(self):
self.log_user_in(superuser=True)
response = self.client.post(reverse("users:update", args=(self.user.id,)),
{"username": self.superuser.username,
"email": self.user.email,
"is_superuser": self.user.is_superuser})
self.assertFormError(response, "form", "username", "A user with that username already exists.")
def test_user_update_email_error(self):
self.log_user_in(superuser=True)
response = self.client.post(reverse("users:update", args=(self.user.id,)),
{"username": self.user.username,
"email": self.superuser.email,
"is_superuser": self.user.is_superuser})
self.assertFormError(response, "form", "email", "User with this Email already exists.")
def test_user_update_ok(self):
self.log_user_in(superuser=True)
response = self.client.post(reverse("users:update", args=(self.user.id,)),
{"username": "toto",
"email": "[email protected]",
"is_superuser": self.user.is_superuser},
follow=True)
for text in ("3 Users", "toto", "[email protected]"):
self.assertContains(response, text)
self.assertNotContains(response, self.user.username)
# delete
def test_user_delete_404(self):
self.log_user_in(superuser=True)
response = self.client.post(reverse("users:delete", args=(0,)))
self.assertEqual(response.status_code, 404)
def test_superuser_delete_redirect(self):
self.log_user_in(superuser=True)
response = self.client.post(reverse("users:delete", args=(self.superuser.id,)))
self.assertRedirects(response, reverse("users:list"))
def test_user_delete_ok(self):
self.log_user_in(superuser=True)
user_str = str(self.user)
response = self.client.post(reverse("users:delete", args=(self.user.id,)),
follow=True)
self.assertContains(response, "User {} deleted".format(user_str))
self.assertContains(response, "2 User")
|
py | b41735a35258fb768dfc7aacf2d3c7e128f4f06a | # Copyright 2012 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Manages dynamic properties of an application and/or its modules.
An application must explicitly declare properties and provide a type, doc string
and default value for each. The default property values are overridden by
the new values found in the environment variable with the same name. Those are
further overridden by the values found in the datastore. We also try to do all
of this with performance in mind.
"""
__author__ = 'Pavel Simakov ([email protected])'
import logging
import os
import threading
import time
import entities
import messages
import transforms
import appengine_config
from google.appengine.api import namespace_manager
from google.appengine.ext import db
# The default update interval supported.
DEFAULT_UPDATE_INTERVAL_SEC = 60
# The longest update interval supported.
MAX_UPDATE_INTERVAL_SEC = 60 * 5
# Allowed property types.
TYPE_INT = int
TYPE_STR = str
TYPE_BOOL = bool
ALLOWED_TYPES = frozenset([TYPE_INT, TYPE_STR, TYPE_BOOL])
class ConfigProperty(object):
"""A property with name, type, doc_string and a default value."""
def __init__(
self, name, value_type, doc_string,
default_value=None, multiline=False, validator=None,
after_change=None, label=None, deprecated=False,
show_in_site_settings=True):
"""Create a new global config property.
These properties are persisted as ConfigPropertyEntity in the default
namespace. As such, these properties apply to the installation as
a whole, rather than individual courses.
Args:
name: A name, by convention starting with "gcb_" for Google
Course Builder, and something else for third-party extensions.
value_type: A Python type, one of {bool, str, int}
doc_string: A brief description displayed on the admin page listing
all the config variables
default_value: The value used when no override has been set by the
site admin.
multiline: Whether the value, if value_type is str, can be
expected to extend to multiple lines of text.
validator: A function taking two parameters:
value: The value to validate
errors: A list of strings indicating problems. If the value
is acceptable, 'errors' must not be appended to, and
conversely.
after_change: This is a function which is called only when the
value is changed by the site administrator via the UI.
(It is not called when the underlying ConfigPropertyEntity is
directly modified). This function takes two one parameters:
the ConfigProperty instance, and the previous value.
label: A friendly display name.
deprecated: True/False. When a property is logically removed,
it should be marked as deprecated. This will leave the name
known to the system so that we do not issue spurious warnings
about unknown properties (once per minute, on property refresh)
"""
if value_type not in ALLOWED_TYPES:
raise Exception('Bad value type: %s' % value_type)
self._validator = validator
self._multiline = multiline
self._name = name
self._label = label
self._type = value_type
self._doc_string = doc_string
self._default_value = value_type(default_value)
self._after_change = after_change
self._deprecated = deprecated
self._show_in_site_settings = show_in_site_settings
errors = []
if self._validator and self._default_value:
self._validator(self._default_value, errors)
if errors:
raise Exception('Default value is invalid: %s.' % errors)
Registry.registered[name] = self
if name in Registry.db_items:
item = Registry.db_items[name]
del Registry.db_items[name]
# pylint: disable=protected-access
Registry._config_property_entity_changed(item)
@property
def validator(self):
return self._validator
@property
def after_change(self):
"""Properties may register callbacks to notice changes to value."""
return self._after_change
@property
def multiline(self):
return self._multiline
@property
def name(self):
return self._name
@property
def label(self):
return self._label or self._name
@property
def value_type(self):
return self._type
@property
def doc_string(self):
return self._doc_string
@property
def default_value(self):
return self._default_value
def get_environ_value(self):
"""Tries to get value from the environment variables."""
# Look for a name in lower or upper case.
name = None
if self._name.lower() in os.environ:
name = self._name.lower()
else:
if self._name.upper() in os.environ:
name = self._name.upper()
if name:
try:
return True, transforms.string_to_value(
os.environ[name], self.value_type)
except Exception: # pylint: disable=broad-except
logging.error(
'Property %s failed to cast to type %s; removing.',
self._name, self._type)
del os.environ[name]
return False, None
def get_value(self, db_overrides=None):
"""Gets value from overrides (datastore, environment) or default."""
# Try testing overrides.
overrides = Registry.test_overrides
if overrides and self.name in overrides:
return overrides[self.name]
# Try datastore overrides.
if db_overrides and self.name in db_overrides:
return db_overrides[self.name]
# Try environment variable overrides.
has_value, environ_value = self.get_environ_value()
if has_value:
return environ_value
# Use default value as last resort.
return self._default_value
@property
def value(self):
return self.get_value(db_overrides=Registry.get_overrides())
@property
def deprecated(self):
return self._deprecated
@property
def show_in_site_settings(self):
return self._show_in_site_settings and not self._deprecated
class ValidateLength(object):
def __init__(self, length):
self._length = length
def validator(self, value, errors):
if len(value) != self._length:
errors.append(
'The length of this field must be exactly %d, ' % self._length +
'but the value "%s" is of length %d.' % (value, len(value)))
class ValidateIntegerRange(object):
def __init__(self,
lower_bound_inclusive=None,
upper_bound_inclusive=None,
lower_bound_exclusive=None,
upper_bound_exclusive=None):
if (lower_bound_exclusive is not None and
lower_bound_inclusive is not None):
raise ValueError('Please specify only one lower bound.')
if (upper_bound_exclusive is not None and
upper_bound_inclusive is not None):
raise ValueError('Please specify only one upper bound.')
if (lower_bound_inclusive is None and
lower_bound_exclusive is None and
upper_bound_inclusive is None and
upper_bound_exclusive is None):
raise ValueError('Please specify at least one bound.')
# Convert to integers before checking ranges for sanity.
self._lower_bound_inclusive = None
self._lower_bound_exclusive = None
self._upper_bound_inclusive = None
self._upper_bound_exclusive = None
if lower_bound_inclusive != None:
self._lower_bound_inclusive = int(lower_bound_inclusive)
if lower_bound_exclusive != None:
self._lower_bound_exclusive = int(lower_bound_exclusive)
if upper_bound_inclusive != None:
self._upper_bound_inclusive = int(upper_bound_inclusive)
if upper_bound_exclusive != None:
self._upper_bound_exclusive = int(upper_bound_exclusive)
if (lower_bound_exclusive is not None and
upper_bound_exclusive is not None and
lower_bound_exclusive + 1 >= upper_bound_exclusive):
raise ValueError('Bounds do not permit any valid values.')
if (lower_bound_inclusive is not None and
upper_bound_exclusive is not None and
lower_bound_inclusive >= upper_bound_exclusive):
raise ValueError('Bounds do not permit any valid values.')
if (lower_bound_exclusive is not None and
upper_bound_inclusive is not None and
lower_bound_exclusive >= upper_bound_inclusive):
raise ValueError('Bounds do not permit any valid values.')
if (lower_bound_inclusive is not None and
upper_bound_inclusive is not None and
lower_bound_inclusive > upper_bound_inclusive):
raise ValueError('Bounds do not permit any valid values.')
def validate(self, value, errors):
try:
value = int(value)
except ValueError:
errors.append('"%s" is not an integer' % value)
return
if (self._lower_bound_inclusive is not None and
value < self._lower_bound_inclusive):
errors.append('This value must be greater than or equal to %d' %
self._lower_bound_inclusive)
if (self._lower_bound_exclusive is not None and
value <= self._lower_bound_exclusive):
errors.append('This value must be greater than %d' %
self._lower_bound_exclusive)
if (self._upper_bound_inclusive is not None and
value > self._upper_bound_inclusive):
errors.append('This value must be less than or equal to %d' %
self._upper_bound_inclusive)
if (self._upper_bound_exclusive is not None and
value >= self._upper_bound_exclusive):
errors.append('This value must be less than %d' %
self._upper_bound_exclusive)
class Registry(object):
"""Holds all registered properties and their various overrides."""
registered = {}
test_overrides = {}
db_items = {}
db_overrides = {}
names_with_draft = {}
last_update_time = 0
update_index = 0
threadlocal = threading.local()
REENTRY_ATTR_NAME = 'busy'
UNREGISTERED_PROPERTY_LOGGING_LEVEL = logging.WARNING
@classmethod
def get_overrides(cls, force_update=False):
"""Returns current property overrides, maybe cached."""
now = long(time.time())
age = now - cls.last_update_time
max_age = UPDATE_INTERVAL_SEC.get_value(db_overrides=cls.db_overrides)
# do not update if call is reentrant or outer db transaction exists
busy = hasattr(cls.threadlocal, cls.REENTRY_ATTR_NAME) or (
db.is_in_transaction())
if (not busy) and (force_update or age < 0 or age >= max_age):
# Value of '0' disables all datastore overrides.
if UPDATE_INTERVAL_SEC.get_value() == 0:
cls.db_overrides = {}
return cls.db_overrides
# Load overrides from a datastore.
setattr(cls.threadlocal, cls.REENTRY_ATTR_NAME, True)
try:
old_namespace = namespace_manager.get_namespace()
try:
namespace_manager.set_namespace(
appengine_config.DEFAULT_NAMESPACE_NAME)
cls._load_from_db()
finally:
namespace_manager.set_namespace(old_namespace)
except Exception as e: # pylint: disable=broad-except
logging.error(
'Failed to load properties from a database: %s.', str(e))
finally:
delattr(cls.threadlocal, cls.REENTRY_ATTR_NAME)
# Avoid overload and update timestamp even if we failed.
cls.last_update_time = now
cls.update_index += 1
return cls.db_overrides
@classmethod
def _load_from_db(cls):
"""Loads dynamic properties from db."""
items = {}
overrides = {}
drafts = set()
for item in ConfigPropertyEntity.all().fetch(1000):
items[item.key().name()] = item
cls._set_value(item, overrides, drafts)
cls.db_items = items
cls.db_overrides = overrides
cls.names_with_draft = drafts
@classmethod
def _config_property_entity_changed(cls, item):
cls._set_value(item, cls.db_overrides, cls.names_with_draft)
@classmethod
def _set_value(cls, item, overrides, drafts):
name = item.key().name()
target = cls.registered.get(name, None)
if not target:
if appengine_config.MODULE_REGISTRATION_IN_PROGRESS:
log_level = logging.INFO
else:
log_level = cls.UNREGISTERED_PROPERTY_LOGGING_LEVEL
logging.log(log_level, 'Property is not registered (skipped): %s',
name)
return
if item.is_draft:
if name in overrides:
del overrides[name]
drafts.add(name)
else:
if name in drafts:
drafts.remove(name)
# Enforce value type.
try:
value = transforms.string_to_value(
item.value, target.value_type)
except Exception: # pylint: disable=broad-except
logging.error(
'Property %s failed to cast to a type %s; removing.',
target.name, target.value_type)
return
# Enforce value validator.
if target.validator:
errors = []
try:
target.validator(value, errors)
except Exception as e: # pylint: disable=broad-except
errors.append(
'Error validating property %s.\n%s',
(target.name, e))
if errors:
logging.error(
'Property %s has invalid value:\n%s',
target.name, '\n'.join(errors))
return
overrides[name] = value
class ConfigPropertyEntity(entities.BaseEntity):
"""A class that represents a named configuration property."""
value = db.TextProperty(indexed=False)
is_draft = db.BooleanProperty(indexed=False)
def put(self):
# Persist to DB.
super(ConfigPropertyEntity, self).put()
# And tell local registry. Do this by direct call and synchronously
# so that this setting will be internally consistent within the
# remainder of this server's path of execution. (Note that the
# setting is _not_ going to be immediately available at all other
# instances; they will pick it up in due course after
# UPDATE_INTERVAL_SEC has elapsed.
# pylint: disable=protected-access
Registry._config_property_entity_changed(self)
def run_all_unit_tests():
"""Runs all unit tests for this modules."""
str_prop = ConfigProperty('gcb-str-prop', str, ('doc for str_prop'), 'foo')
int_prop = ConfigProperty('gcb-int-prop', int, ('doc for int_prop'), 123)
assert str_prop.default_value == 'foo'
assert str_prop.value == 'foo'
assert int_prop.default_value == 123
assert int_prop.value == 123
# Check os.environ override works.
os.environ[str_prop.name] = 'bar'
assert str_prop.value == 'bar'
del os.environ[str_prop.name]
assert str_prop.value == 'foo'
# Check os.environ override with type casting.
os.environ[int_prop.name] = '12345'
assert int_prop.value == 12345
# Check setting of value is disallowed.
try:
str_prop.value = 'foo'
raise Exception()
except AttributeError:
pass
# Check value of bad type is disregarded.
os.environ[int_prop.name] = 'foo bar'
assert int_prop.value == int_prop.default_value
UPDATE_INTERVAL_SEC = ConfigProperty(
'gcb_config_update_interval_sec', int,
messages.SITE_SETTINGS_REFRESH_INTERVAL_TEMPLATE % MAX_UPDATE_INTERVAL_SEC,
default_value=DEFAULT_UPDATE_INTERVAL_SEC, label='Refresh Interval',
validator=ValidateIntegerRange(
lower_bound_inclusive=0,
upper_bound_inclusive=MAX_UPDATE_INTERVAL_SEC).validate)
if __name__ == '__main__':
run_all_unit_tests()
|
py | b41735f54e2f189c151664a140ff1cea0d1aa262 | import os
import glob
import torch
import numpy as np
from abc import ABC
from tqdm import tqdm
from typing import List, Union, Optional
from datetime import datetime
from prettytable import PrettyTable
from collections.abc import Iterable
from torch.utils.tensorboard import SummaryWriter
from torch.utils.data import DataLoader
# Nan detector
# torch.autograd.set_detect_anomaly(True)
class TrainingProcessProgressBar:
def __init__(self,
iterator: Iterable,
description: str = None,
leave: bool = True,
unit: str = 'step'):
super().__init__()
self.iterator = iterator
self._leave = leave
self.description = description
self.unit = unit
self._postfix_dict = {}
self._pbar = self._init_pbar()
def __call__(self):
return self._pbar
def _init_pbar(self):
pbar = tqdm(self.iterator,
leave=self._leave,
postfix=self._postfix_dict,
unit=self.unit)
return pbar
def set_description(self, description: str):
self._pbar.set_description(description)
def set_value(self, k: str, v):
self._postfix_dict[k] = v
self._pbar.set_postfix(self._postfix_dict)
def close(self):
self._pbar.close()
# TODO: document that this dict has lists of values only
# TODO: add global strictness control instead of granulated by function
class TrainingProcessRunningDict:
def __init__(self):
super().__init__()
self._data = {}
def set_value(self,
k: Union[str, dict],
v: Optional[Union[str, int, float, None]] = None):
if isinstance(k, dict):
for k_, v_ in k.items():
self.set_value(k_, v_)
else:
if k not in self._data.keys():
self._data[k] = [v]
else:
self._data[k].append(v)
def update_value(self, k: str, v: str, strict: bool = True):
if k not in self._data.keys():
if strict:
raise KeyError(f"Key key={k} not found. "
f"Available keys={list(self._data.keys())}")
else:
self.remove_key(k, strict=False)
self.set_value(k, v)
def get_value(self, k: str, strict : bool = True):
# an error is raised if strict, otherwise None is returned
try:
v = self._data[k]
except KeyError:
if strict:
raise KeyError(f"Key key={k} not found. "
f"Available keys={list(self._data.keys())}")
else:
v = None
return v
def get_last_value(self, k: str, strict: bool = True):
try:
last_value = self.get_value(k, strict)[-1]
except TypeError:
if strict:
raise TypeError
else:
last_value = None
return last_value
def remove_key(self, k: str, strict: bool = False):
if strict:
del self._data[k] # raises error if key does not exist
else:
self._data.pop(k, None)
def flush(self, exceptions: list = []):
# copy keys to avoid 'changed size' error during iteration
for k in list(self._data.keys()):
if k not in exceptions:
self.remove_key(k)
class TrainingProcessCallback(ABC):
def __init__(self):
super().__init__()
def on_val_epoch_start(self, training_process):
...
def on_val_epoch_end(self, training_process):
...
# TODO: revision to adjust to different training routines
# TODO: add deterministic, benchmark and torch.manual_seed
# TODO: add mode to deactivate callbacks either in normal run of overfit
class TrainingProcess:
""" Base class to create a training cycle of a network. """
def __init__(self,
train_dataloader: DataLoader,
val_dataloader: DataLoader = None,
max_epochs: int = 1000,
device: torch.device = torch.device('cpu'),
lr_scheduler: torch.optim.lr_scheduler = None,
lr_scheduler_start_epoch: int = 0,
grad_norm_clipping: float = None,
resume_from_checkpoint: str = None,
overfit_single_batch: bool = False,
disable_callbacks_in_overfit_single_batch: bool = False,
run_name: str = 'model',
run_name_prefix: str = '',
run_ts_fmt: str = '%Y-%m-%d %Hh%Mm%Ss',
logs_dir: str = 'logs',
callbacks: List[TrainingProcessCallback] = []
):
super().__init__()
# dataloaders
self.train_dataloader = train_dataloader
self.val_dataloader = val_dataloader
# checkpoint params
self.resume_from_checkpoint = resume_from_checkpoint
self.run_ts_fmt = run_ts_fmt
self._base_run_name = run_name
self.run_name_prefix = run_name_prefix
self.run_name = self._init_run_name()
self.logs_dir = logs_dir
self.callbacks = callbacks
# training params
self.device = device
self.max_epochs = max_epochs
self.grad_norm_clipping = grad_norm_clipping
self.lr_scheduler = lr_scheduler
self.lr_scheduler_start_epoch = lr_scheduler_start_epoch
# training mode
self.resume_from_checkpoint = resume_from_checkpoint
self.overfit_single_batch = overfit_single_batch
self.disable_callbacks_in_overfit_single_batch = \
disable_callbacks_in_overfit_single_batch
# training process preparation
self.logger = self._init_logger()
self.checkpoints_dir = self._init_checkpoints_dir()
self.current_epoch = None # None if training hasn't started yet
# training running objects (exist only during fit)
# holds data in training and validation
self.running_dict = TrainingProcessRunningDict()
self._train_pbar = None
self._val_pbar = None
def _init_run_name(self):
# if a checkpoint is loaded, run_name is derived from the dir structure
if self.resume_from_checkpoint is not None:
run_name = os.path.basename(
os.path.dirname(
os.path.dirname(self.resume_from_checkpoint)
)
)
# no checkpoint
elif self.run_ts_fmt is not None:
ts_str = datetime.now().strftime(self.run_ts_fmt)
run_name = f'{self.run_name_prefix}{self._base_run_name} {ts_str}'
# no checkpoint and no ts
else:
run_name = self._base_run_name
return run_name
def _init_logger(self) -> SummaryWriter:
return SummaryWriter(os.path.join(self.logs_dir, self.run_name))
def _init_checkpoints_dir(self) -> str:
# checkpoints folder is fixed
return os.path.join(self.logs_dir, self.run_name, 'checkpoints')
def _init_overfit_batch(self):
return next(iter(self.train_dataloader))
def _get_dataset_name_from_dataloader(self, dataloader: DataLoader):
if dataloader is not None:
dataset_name = dataloader.dataset.__class__.__name__
else:
dataset_name = None
return dataset_name
# TODO: decide how to get val_loss if reduced direct or how to add
# more information to the saved checkpoint
# TODO: Expand checkpoint dict to save multiple models
def save_checkpoint(self, prefix='', ext='.tar'):
# make checkpoint dir if it does not exist
if not os.path.isdir(self.checkpoints_dir):
os.makedirs(self.checkpoints_dir)
# compose checkpoint filename
ts = datetime.now().strftime(self.run_ts_fmt)
# NOTE: overfit_single_batch_mode has no validation process
if self.has_validation and not self.is_in_overfit_single_batch_mode:
avg_val_loss = self.running_dict.get_last_value('avg_val_loss')
checkpoint_file = (
f'{prefix}{self._base_run_name} {ts} '
f'epoch={self.current_epoch} '
f'val_loss={avg_val_loss:.4f}{ext}'
)
else:
checkpoint_file = (
f'{prefix}{self._base_run_name} {ts} '
f'epoch={self.current_epoch}{ext}'
)
checkpoint_path = os.path.join(self.checkpoints_dir, checkpoint_file)
torch.save({
'model_state_dict': self.model.state_dict(),
'optimizer_state_dict': self.optimizer.state_dict(),
'last_epoch': self.current_epoch
}, checkpoint_path)
# checkpoint path is returned to be used by callbacks in case they need
# to keep track of different checkpoints saved over time
return checkpoint_path
def load_checkpoint(self, model, optimizer):
# corroborate checkpoint file exists
if not os.path.isfile(self.resume_from_checkpoint):
raise FileNotFoundError('Checkpoint file not found: '
f'{self.resume_from_checkpoint}')
checkpoint = torch.load(self.resume_from_checkpoint)
model.load_state_dict(checkpoint['model_state_dict'])
model.to(self.device)
optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
# start from the next epoch since the saved epoch is assumed
# as completed
self.current_epoch = checkpoint['last_epoch'] + 1
return model, optimizer
def _run_callback_hook(self, hook_name: str):
if (self.is_in_overfit_single_batch_mode and
self.disable_callbacks_in_overfit_single_batch):
return
for callback in self.callbacks:
hook_fn = getattr(callback, hook_name, False)
if callable(hook_fn):
hook_fn(self)
@property
def has_validation(self) -> bool:
return self.val_dataloader is not None
@property
def is_in_overfit_single_batch_mode(self) -> bool:
return self.overfit_single_batch
@property
def callback_names(self) -> list:
callback_names = [
callback.__class__.__name__ for callback in self.callbacks
] if len(self.callbacks) > 0 else None
return callback_names
@property
def train_dataset_name(self) -> str:
return self._get_dataset_name_from_dataloader(self.train_dataloader)
@property
def val_dataset_name(self) -> str:
return self._get_dataset_name_from_dataloader(self.val_dataloader)
@property
def criterion_name(self) -> str:
if type(self.criterion).__name__ == 'function':
criterion_name = self.criterion.__name__
else:
criterion_name = self.criterion.__class__.__name__
return criterion_name
@property
def model_name(self) -> str:
return self.model.__class__.__name__
@property
def optimizer_name(self) -> str:
return self.optimizer.__class__.__name__
@property
def lr_scheduler_name(self) -> str:
if self.lr_scheduler is not None:
lr_scheduler_name = self.lr_scheduler.__class__.__name__
else:
lr_scheduler_name = None
return lr_scheduler_name
def _print_model_summary(self, model: torch.nn.Module):
layers = []
layer_types = []
layer_params = []
for idx, (name, module) in enumerate(model.named_modules()):
# skip first entry that corresponds to the module itself
if idx == 0:
continue
layers.append(name)
layer_types.append(module.__class__.__name__)
layer_params.append(
sum(params.numel() for params in module.parameters())
)
trainable_params = []
nontrainable_params = []
for param in model.parameters():
if param.requires_grad:
trainable_params.append(param.numel())
else:
nontrainable_params.append(param.numel())
trainable_params = sum(trainable_params)
nontrainable_params = sum(nontrainable_params)
summary_table = PrettyTable()
summary_table.add_column("#", range(len(layers)))
summary_table.add_column("Layer", layers)
summary_table.add_column("Type", layer_types)
summary_table.add_column("Parameters", layer_params)
print(f"{model.__class__.__name__} summary:")
print(summary_table)
print(f"Total parameters: {trainable_params + nontrainable_params}")
print(f"Trainable parameters: {trainable_params}")
print(f"Non-trainable parameters: {nontrainable_params}")
def _print_training_process_summary(self):
# get callback names
if self.callback_names is not None:
callback_names_str = ", ".join(self.callback_names)
else:
callback_names_str = None
# get time stamp
process_start_date = datetime.now().strftime("%Y-%m-%d %H:%M:%S")
if self.is_in_overfit_single_batch_mode:
print("****** RUNNING IN OVERFIT SINGLE BATCH MODE ******\n"
"In 'overfit single batch mode' only the first batch "
"of the training_dataloader will be used for training. "
"Validation will be skipped."
)
print(
f"Model: {self.model_name}\n"
f"Run name: {self.run_name}\n"
f"Resumed from checkpoint: {self.resume_from_checkpoint is not None}\n"
f"CUDA available: {torch.cuda.is_available()}\n"
f"Device: {self.device}\n"
f"Training dataset: {self.train_dataset_name}\n"
f"Validation dataset: {self.val_dataset_name}\n"
f"Checkpoints folder: {self.checkpoints_dir}\n"
f"Process start date: {process_start_date}\n"
f"Optimizer: {self.optimizer_name}\n"
f"Learning rate scheduler: {self.lr_scheduler_name}\n"
f"Gradient norm clipping: {self.grad_norm_clipping}\n"
f"Criterion: {self.criterion_name}\n"
f"Maximum epochs: {self.max_epochs}\n"
f"Callbacks: {callback_names_str}\n"
)
def _on_train_setup(self):
# prevents inconsistency in current epoch vs last epoch
if (self.current_epoch is not None and
self.current_epoch >= self.max_epochs):
raise ValueError("Expected max_epochs > current_epoch but got "
f"max_epochs={self.max_epochs} and "
f"current_epoch={self.current_epoch}")
# self.current_epoch is None when training hasn't started yet
# if training has been started from checkpoint, self.current_epoch
# will be set accordingly in self.load_checkpoint()
if self.current_epoch is None:
self.current_epoch = 0
self._print_training_process_summary()
self._print_model_summary(self.model)
print("\nTraining progress:")
def on_train_setup(self):
...
def _on_train_epoch_start(self, epoch):
self.current_epoch = epoch
# prepare model for training
self.model.to(self.device)
self.model.train()
# if overfit single batch is set, only a subset will be put in the
# iterator
self._train_pbar = TrainingProcessProgressBar(
self.train_dataloader,
leave=(self.current_epoch + 1 == self.max_epochs))
self._train_pbar.set_description(f"Epoch {self.current_epoch}"
f"/{self.max_epochs - 1}")
# update running dict
self.running_dict.set_value('epoch', self.current_epoch)
def on_train_epoch_start(self):
...
def _on_train_step(self, batch_idx, batch):
...
def on_train_step(self, batch_idx, batch):
x, y = batch
x = x.to(self.device)
y = y.to(self.device)
y_pred = self.model(x)
loss = self.criterion(y_pred, y)
self.optimizer.zero_grad()
loss.backward()
if self.grad_norm_clipping is not None:
torch.nn.utils.clip_grad_norm_(
self.model.parameters(),
max_norm=self.grad_norm_clipping
)
self.optimizer.step()
# TODO: replace with returned dict {'loss', loss.item()}
self.running_dict.set_value('train_loss', loss.item())
def _on_train_step_end(self):
# set last train loss in progress bar
last_train_loss = self.running_dict.get_last_value('train_loss')
self._train_pbar.set_value('train_loss', f'{last_train_loss:.4f}')
if self.has_validation:
# set last average val loss in progress bar if exists
last_avg_val_loss = self.running_dict \
.get_last_value('avg_val_loss',
strict=False)
if last_avg_val_loss is not None:
self._train_pbar.set_value('avg_val_loss',
f'{last_avg_val_loss:.4f}')
def on_train_step_end(self):
...
def _on_train_epoch_end(self):
# close train progress bar
self._train_pbar.close()
# compute and log train loss
avg_train_loss = np.mean(self.running_dict.get_value('train_loss'))
self.logger.add_scalar('Loss/train', avg_train_loss, self.current_epoch)
# remove train_loss list from running dict
self.running_dict.remove_key('train_loss')
self.running_dict.set_value('avg_train_loss', avg_train_loss)
def on_train_epoch_end(self):
...
def _on_val_epoch_start(self):
# set up progress bar
self._val_pbar = TrainingProcessProgressBar(self.val_dataloader,
leave=(self.current_epoch + 1 == self.max_epochs))
self._val_pbar.set_description(
f'Validation of epoch {self.current_epoch}/{self.max_epochs - 1}'
)
# set last train loss
avg_train_loss = self.running_dict.get_last_value('avg_train_loss')
self._val_pbar.set_value('avg_train_loss',
f'{avg_train_loss:.4f}')
# remove val loss from last epoch
self.running_dict.remove_key('avg_val_loss', strict=False)
# set model for evaluation
self.model.eval()
def on_val_epoch_start(self):
...
def _on_val_step(self, batch_idx, batch):
...
def on_val_step(self, batch_idx, batch):
x, y = batch
x = x.to(self.device)
y = y.to(self.device)
y_pred = self.model(x)
loss = self.criterion(y_pred, y)
# log to running dict
self.running_dict.set_value('val_loss', loss.item())
# TODO: adapt to a returned dictionary or maybe decorator to have
# access before and after the actual functions
def _on_val_step_end(self):
last_val_loss = self.running_dict.get_last_value('val_loss')
self._val_pbar.set_value('val_loss', f'{last_val_loss:.4f}')
def on_val_step_end(self):
...
def _on_val_epoch_end(self):
# close validation progress bar
self._val_pbar.close()
# compute avg val loss and log it
avg_val_loss = np.mean(self.running_dict.get_last_value('val_loss'))
self.logger.add_scalar('Loss/validation',
avg_val_loss, self.current_epoch)
self.running_dict.remove_key('val_loss')
self._train_pbar.set_value('avg_val_loss', f'{avg_val_loss:.4f}')
self.running_dict.update_value('avg_val_loss', avg_val_loss,
strict=False)
def on_val_epoch_end(self):
# get avg_val_loss and update scheduler base on that
avg_val_loss = self.running_dict.get_last_value('avg_val_loss')
if (self.lr_scheduler is not None and
(self.current_epoch + 1) > self.lr_scheduler_start_epoch):
self.lr_scheduler.step(avg_val_loss)
def _on_fit_epoch_end(self):
self.running_dict.flush(exceptions=['avg_val_loss'])
def on_fit_epoch_end(self):
# log histogram
model_name = self.model.__class__.__name__
for name, param in self.model.named_parameters():
self.logger.add_histogram(f'{model_name}.{name}',
param, self.current_epoch)
self.logger.add_histogram(f'{model_name}.{name}.grad',
param.grad, self.current_epoch)
def _run_train_loop(self, model, criterion, optimizer):
# references to make model, criterion and optimizer accesible by
# passing a reference to this class in callback hooks
self.model = model
self.criterion = criterion
self.optimizer = optimizer
# internal hooks begin with _, external hooks have the same name
self._on_train_setup()
self.on_train_setup()
self._run_callback_hook('on_train_setup')
for epoch in range(self.current_epoch, self.max_epochs):
self._on_train_epoch_start(epoch)
self.on_train_epoch_start()
self._run_callback_hook('on_train_epoch_start')
for batch_idx, batch in enumerate(self._train_pbar()):
self._on_train_step(batch_idx, batch)
self.on_train_step(batch_idx, batch)
self._on_train_step_end()
self.on_train_step_end()
self._on_train_epoch_end()
self.on_train_epoch_end()
self._run_callback_hook('on_train_epoch_end')
if self.has_validation:
with torch.no_grad():
self._on_val_epoch_start()
self.on_val_epoch_start()
self._run_callback_hook('on_val_epoch_start')
for batch_idx, batch in enumerate(self._val_pbar()):
self._on_val_step(batch_idx, batch)
self.on_val_step(batch_idx, batch)
self._on_val_step_end()
self.on_val_step_end()
self._on_val_epoch_end()
self.on_val_epoch_end()
self._run_callback_hook('on_val_epoch_end')
self._on_fit_epoch_end()
self.on_fit_epoch_end()
self._run_callback_hook('on_fit_epoch_end')
def _on_overfit_train_setup(self):
# TODO: Check if starting always from epoch 0 is fine
# model will never be saved in overfit train setup
self.current_epoch = 0
self._print_training_process_summary()
self._print_model_summary(self.model)
print("\nOverfitting in progress...")
self.model.to(self.device)
self.running_dict.set_value('epoch', self.current_epoch)
def _on_overfit_train_epoch_start(self, epoch):
self.current_epoch = epoch
self.model.train()
def on_overfit_train_epoch_start(self, epoch):
...
def _on_overfit_train_step(self, batch_idx, batch):
...
def on_overfit_train_step(self, batch_idx, batch):
# an inherited class from TrainingProcess should be created and it
# should implement this method if used with overfit single batch mode
raise NotImplementedError
def _on_overfit_train_epoch_end(self):
# set last train loss in progress bar
last_train_loss = self.running_dict.get_last_value('train_loss')
# avoids accumulating several values on the running dict
self.running_dict.remove_key('train_loss')
# simple print to track the loss in overfit mode
print(f'Epoch {self.current_epoch}/{self.max_epochs - 1}, '
f'train_loss: {last_train_loss:.4f}')
def on_overfit_train_epoch_end(self):
...
def _on_overfit_val_step(self, batch_idx, batch):
self.model.eval()
def on_overfit_val_step(self, batch_idx, batch):
...
def _on_overfit_val_epoch_end(self):
...
def on_overfit_val_epoch_end(self):
...
def _on_overfit_fit_epoch_end(self):
self.running_dict.flush()
def on_overfit_fit_epoch_end(self):
...
# TODO: See if it is possible to reduce it and blend it with
# _run_train_loop
def _run_overfit_single_batch_loop(self, model, criterion, optimizer):
self.model = model
self.criterion = criterion
self.optimizer = optimizer
self._on_overfit_train_setup()
# TODO: evaluate if should be moved to an internal hook
overfit_batch = next(iter(self.train_dataloader))
for epoch in range(self.max_epochs):
self._on_overfit_train_epoch_start(epoch)
self.on_overfit_train_epoch_start(epoch)
# equivalent to args (batch_idx, batch) of regular train loop
self._on_overfit_train_step(0, overfit_batch)
self.on_overfit_train_step(0, overfit_batch)
self._on_overfit_train_epoch_end()
self.on_overfit_train_epoch_end()
# TODO: not really validation except for using no grad
with torch.no_grad():
self._on_overfit_val_step(0, overfit_batch)
self.on_overfit_val_step(0, overfit_batch)
self._on_overfit_val_epoch_end()
self.on_overfit_val_epoch_end()
self._run_callback_hook('on_overfit_val_epoch_end')
self._on_overfit_fit_epoch_end()
self.on_overfit_fit_epoch_end()
self._run_callback_hook('on_overfit_fit_epoch_end')
def fit(self, model, criterion, optimizer):
if self.resume_from_checkpoint is not None:
model, optimizer = self.load_checkpoint(model, optimizer)
self._run_train_loop(model, criterion, optimizer)
elif self.overfit_single_batch:
self._run_overfit_single_batch_loop(model, criterion, optimizer)
else:
self._run_train_loop(model, criterion, optimizer)
|
py | b4173609c54b3229de309f59d9b242fab78ba9bd | from django.db import connections
from django.core.management.base import BaseCommand
def ingestTrack(seqalignment, loadmultistrands, **ignored):
with connections['core'].cursor() as cursor:
queryStr = 'insert into browserwebsite.api_loadmultistrands (seqalignment, loadmultistrands) values (%s, %s);'
queryArgs = [seqalignment, loadmultistrands]
cursor.execute(queryStr, queryArgs);
row = cursor.fetchone()
return seqalignment, loadmultistrands
class Command(BaseCommand):
def add_arguments(self, parser):
parser.add_argument('--seqalignment', type=int)
parser.add_argument('--loadmultistrands', type=bool)
def handle(self, *args, **options):
print(ingestTrack(**options))
|
py | b41736ecf5623fc11a8ca9063fb0141bee222afc | """
Given an integer array nums and an integer k, return true if there are two distinct indices i and j in the array such that nums[i] == nums[j] and abs(i - j) <= k.
Example 1:
Input: nums = [1,2,3,1], k = 3
Output: true
Example 2:
Input: nums = [1,0,1,1], k = 1
Output: true
Example 3:
Input: nums = [1,2,3,1,2,3], k = 2
Output: false
Constraints:
1 <= nums.length <= 10^5
-10^9 <= nums[i] <= 10^9
0 <= k <= 10^5
"""
class Solution:
def containsNearbyDuplicate(self, nums: List[int], k: int) -> bool:
board = {}
for i in range(len(nums)):
if nums[i] in board:
if i - board[nums[i]][-1] <= k:
return True
board[nums[i]].append(i)
else:
board[nums[i]] = [i]
return False
|
py | b41737181319263385df4123284fe06631caaa1f | from engine.fs.serialization import Serializable
class Player(Serializable):
def __init__(self, money: int, name: str, avaliabe_items: list):
super(Player,self).__init__(money=money)
self.money :int = money
self.name = name
self.avaliabe_items = avaliabe_items
self.this_item = None
|
py | b41737ae6ce14de12e802684f4f907c932d5ace7 | __version__ = '1.0'
default_app_config = 'django_dynamic_fields.apps.ProjectConfig' |
py | b41737f1e043f0bb55681eb697e2816a9f851dce | """
Helper functions for working with the BundleModel.
Some functions placed in this central location to prevent circular imports.
"""
import http.client
import re
from bottle import abort, local, request
from codalab.bundles import PrivateBundle
from codalab.lib import bundle_util
from codalab.model.tables import (
GROUP_OBJECT_PERMISSION_ALL,
GROUP_OBJECT_PERMISSION_NONE,
GROUP_OBJECT_PERMISSION_READ,
)
from codalab.objects.permission import check_bundles_have_read_permission, unique_group
def get_resource_ids(document, type_):
links = document['data']
if not isinstance(links, list):
links = [links]
if any(link['type'] != type_ for link in links):
raise abort(http.client.BAD_REQUEST, 'type must be %r' % type_)
return [link['id'] for link in links]
def resolve_owner_in_keywords(keywords):
# Resolve references to owner ids
def resolve(keyword):
# Example: owner=codalab => owner_id=0
m = re.match('owner=(.+)', keyword)
if not m:
return keyword
return 'owner_id=%s' % getattr(local.model.get_user(username=m.group(1)), 'user_id', 'x')
return list(map(resolve, keywords))
#############################################################
# BUNDLES
#############################################################
# Placed in this module to prevent cyclic imports between rest.bundles and rest.worksheets
def get_bundle_infos(
uuids,
get_children=False,
get_single_host_worksheet=False,
get_host_worksheets=False,
get_permissions=False,
ignore_not_found=True,
model=None,
):
"""
Return a map from bundle uuid to info.
:param Collection[str] uuids: uuids of bundles to fetch
:param bool get_children: include children
:param bool get_single_host_worksheet: include one host_worksheet per bundle uuid
:param bool get_host_worksheets: include all host worksheets
:param bool get_permissions: include group permissions
:param bool ignore_not_found: abort with 404 NOT FOUND when False and bundle doesn't exist
:param BundleModel model: model used to make database queries
:rtype: dict[str, dict]
"""
if model is None:
model = local.model
if len(uuids) == 0:
return {}
bundles = model.batch_get_bundles(uuid=uuids)
bundle_infos = {
bundle.uuid: bundle_util.bundle_to_bundle_info(model, bundle) for bundle in bundles
}
# Implement permissions policies
perms = _get_user_bundle_permissions(model, uuids)
readable = {u for u, perm in perms.items() if perm >= GROUP_OBJECT_PERMISSION_READ}
anonymous = {
u
for u, perm in perms.items()
if u in bundle_infos
and (perm < GROUP_OBJECT_PERMISSION_READ or bundle_infos[u]['is_anonymous'])
}
for uuid in uuids:
bundle = bundle_infos.get(uuid)
# Bundle doesn't exist; abort or skip
if bundle is None:
if ignore_not_found:
continue
else:
abort(http.client.NOT_FOUND, "Bundle %s not found" % uuid)
# Replace bundles that the user does not have read access to
elif uuid not in readable:
bundle_infos[uuid] = bundle_util.bundle_to_bundle_info(
model, PrivateBundle.construct(uuid)
)
# Mask owners of anonymous bundles that user does not have all access to
elif uuid in anonymous:
bundle['owner_id'] = None
# Set permission
bundle['permission'] = perms[uuid]
if get_children:
parent2children = model.get_children_uuids(readable)
# Gather all children bundle uuids and fetch permissions
child_uuids = [uuid for l in parent2children.values() for uuid in l]
child_perms = _get_user_bundle_permissions(model, child_uuids)
# Lookup bundle names
child_names = model.get_bundle_names(child_uuids)
# Set children infos
for parent_uuid, children in parent2children.items():
bundle_infos[parent_uuid]['children'] = [
{'uuid': child_uuid, 'metadata': {'name': child_names[child_uuid]}}
for child_uuid in children
if child_perms[child_uuid] >= GROUP_OBJECT_PERMISSION_READ
]
if get_single_host_worksheet:
# Query for 5 worksheet uuids per bundle to check the read permissions for, since we
# just need a single host worksheet per bundle uuid. This is much faster than fetching all
# worksheet uuid's per bundle.
host_worksheets = model.get_host_worksheet_uuids(readable, 5)
worksheet_uuids = [uuid for l in host_worksheets.values() for uuid in l]
worksheet_names = _get_readable_worksheet_names(model, worksheet_uuids)
for bundle_uuid, host_uuids in host_worksheets.items():
if bundle_uuid not in bundle_infos:
continue
for host_uuid in host_uuids:
if host_uuid in worksheet_names:
bundle_infos[bundle_uuid]['host_worksheet'] = {
'uuid': host_uuid,
'name': worksheet_names[host_uuid],
}
# Just set a single host worksheet per bundle uuid
break
if get_host_worksheets:
host_worksheets = model.get_all_host_worksheet_uuids(readable)
# Gather all worksheet uuids
worksheet_uuids = [uuid for l in host_worksheets.values() for uuid in l]
worksheet_names = _get_readable_worksheet_names(model, worksheet_uuids)
# Fill the info
for bundle_uuid, host_uuids in host_worksheets.items():
if bundle_uuid not in bundle_infos:
continue
bundle_infos[bundle_uuid]['host_worksheets'] = [
{'uuid': host_uuid, 'name': worksheet_names[host_uuid]}
for host_uuid in host_uuids
if host_uuid in worksheet_names
]
if get_permissions:
# Fill the permissions info
bundle2group_perms = model.batch_get_group_bundle_permissions(
request.user.user_id, readable
)
for uuid, group_perms in bundle2group_perms.items():
# Only show group permissions to the user is they have
# at least read permission on this bundle.
if uuid in anonymous:
bundle_infos[uuid]['group_permissions'] = []
else:
bundle_infos[uuid]['group_permissions'] = group_perms
return bundle_infos
def _get_user_bundle_permissions(model, uuids):
return model.get_user_bundle_permissions(
request.user.user_id, uuids, model.get_bundle_owner_ids(uuids)
)
def _get_readable_worksheet_names(model, worksheet_uuids):
# Returns a dictionary of readable worksheet uuid's as keys and corresponding names as values
readable_worksheet_uuids = _filter_readable_worksheet_uuids(model, worksheet_uuids)
return dict(
(worksheet.uuid, worksheet.name)
for worksheet in model.batch_get_worksheets(
fetch_items=False, uuid=readable_worksheet_uuids
)
)
def _filter_readable_worksheet_uuids(model, worksheet_uuids):
# Returns a set of worksheet uuid's the user has read permission for
worksheet_permissions = model.get_user_worksheet_permissions(
request.user.user_id, worksheet_uuids, model.get_worksheet_owner_ids(worksheet_uuids)
)
return set(
uuid
for uuid, permission in worksheet_permissions.items()
if permission >= GROUP_OBJECT_PERMISSION_READ
)
def check_target_has_read_permission(target):
check_bundles_have_read_permission(local.model, request.user, [target[0]])
def get_target_info(target, depth):
"""
Returns information about an individual target inside the bundle
Raises NotFoundError if target bundle or path don't exist
"""
check_target_has_read_permission(target)
return local.download_manager.get_target_info(target[0], target[1], depth)
#############################################################
# GROUPS
#############################################################
def ensure_unused_group_name(name):
"""
Ensure group names are unique. Note: for simplicity, we are
ensuring uniqueness across the system, even on group names that
the user may not have access to.
"""
groups = local.model.batch_get_groups(name=name)
if len(groups) != 0:
abort(http.client.CONFLICT, 'Group with name %s already exists' % name)
def get_group_info(group_spec, need_admin, access_all_groups=False):
"""
Resolve |group_spec| and return the associated group_info.
"""
user_id = request.user.user_id
is_root_user = user_id == local.model.root_user_id
# If we're root, then we can access any group, otherwise get is_admin column with group_info
if is_root_user or access_all_groups:
# note: the returned object will NOT contain the 'is_admin' column
group_info = unique_group(local.model, group_spec, user_id=None)
else:
# note: the returned object will contain the 'is_admin' column
group_info = unique_group(local.model, group_spec, user_id=user_id)
# If not root and need admin access, but don't have it, raise error.
if not is_root_user and need_admin and group_info.get('is_admin') == False:
abort(http.client.FORBIDDEN, 'You are not the admin of group %s.' % group_spec)
# No one can admin the public group (not even root), because it's a special group.
if need_admin and group_info['uuid'] == local.model.public_group_uuid:
abort(http.client.FORBIDDEN, 'Cannot modify the public group %s.' % group_spec)
return group_info
|
py | b4173823da4f1f44818aaa8399da4552e98f3cbb | #!/usr/bin/python3
# Copyright (c) 2020 Stanford University
#
# Permission to use, copy, modify, and/or distribute this software for any
# purpose with or without fee is hereby granted, provided that the above
# copyright notice and this permission notice appear in all copies.
#
# THE SOFTWARE IS PROVIDED "AS IS" AND THE AUTHOR DISCLAIMS ALL WARRANTIES
# WITH REGARD TO THIS SOFTWARE INCLUDING ALL IMPLIED WARRANTIES OF
# MERCHANTABILITY AND FITNESS. IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR
# ANY SPECIAL, DIRECT, INDIRECT, OR CONSEQUENTIAL DAMAGES OR ANY DAMAGES
# WHATSOEVER RESULTING FROM LOSS OF USE, DATA OR PROFITS, WHETHER IN AN
# ACTION OF CONTRACT, NEGLIGENCE OR OTHER TORTIOUS ACTION, ARISING OUT OF
# OR IN CONNECTION WITH THE USE OR PERFORMANCE OF THIS SOFTWARE.
# This cperf benchmark computes shuffle performance as a function of the
# message size (all messages are equal) and the cluster size.
from cperf import *
parser = get_parser(description='Shuffle throughput as a function of '
'the message size and cluster size.',
usage='%(prog)s [options]', defaults={})
parser.usage = parser.format_help()
options = parser.parse_args()
init(options)
message_sizes = list(range(1000, 10000, 1000))
message_sizes += list(range(10000, 50000, 5000))
if not options.plot_only:
for cluster_size in range(2, options.num_nodes + 1):
nodes = range(1, cluster_size + 1)
command = "%s/%s %s --ifname %s --num-nodes %d --master-addr %s" \
" --log-file %s" % \
(options.shuffle_dir, options.shuffle_bin,
options.caladan_cfg, options.ifname, cluster_size,
options.master_addr, options.log_file)
try:
log("Starting nodes with command:\n%s" % command)
start_nodes(nodes, command)
run_bench_prefix = "run_bench --protocol udp --udp-port 5002 "
for msg_size in message_sizes:
do_cmd("gen_workload --avg-msg-size " + str(msg_size), nodes)
do_cmd(run_bench_prefix + "--policy LRPT --times 20", nodes)
log("Stopping nodes")
stop_nodes()
except Exception as e:
log("Caught exception:\n\t" + str(e))
log("Cleaning up orphaned '%s' processes" % options.shuffle_bin)
do_ssh(["pkill", options.shuffle_bin], nodes)
# Parse the log files to extract useful data
out = open(options.log_dir + "/cp_message_size.data", "w")
out.write("nodes avg_msg_bytes throughput_gbps\n")
for line in open(options.log_dir + "/rc01.log", "r"):
match = re.match('.*collected ([0-9.]+).*'
'policy ([a-zA-Z]+).*'
'max active ([0-9]+).*'
'cluster size ([0-9.]+).*'
'avg. msg size ([0-9.]+).*'
'msg skewness ([0-9.]+).*'
'part. skewness ([0-9.]+).*'
'throughput ([0-9.]+)', line)
if match:
samples = int(match.group(1))
policy = match.group(2)
max_active = match.group(3)
cluster_size = int(match.group(4))
avg_msg_size = int(match.group(5))
msg_skew = float(match.group(6))
part_skew = float(match.group(7))
throughput = float(match.group(8))
out.write("%d %d %.1f\n" % (cluster_size, avg_msg_size, throughput))
|
py | b41739099700ca5662f80a453205e36c04cbc5d0 | """
Graphical model (GM)-based optimization algorithm using Theano
"""
from past.utils import old_div
import logging
import time
import numpy as np
from scipy.special import erf
from . import pyll
from .pyll import scope
from .pyll.stochastic import implicit_stochastic
from .base import miscs_to_idxs_vals
from .base import miscs_update_idxs_vals
# from .base import Trials
from . import rand
__authors__ = "James Bergstra"
__license__ = "3-clause BSD License"
__contact__ = "github.com/jaberg/hyperopt"
logger = logging.getLogger(__name__)
EPS = 1e-12
# -- default linear forgetting. don't try to change by writing this variable
# because it's captured in function default args when this file is read
DEFAULT_LF = 25
adaptive_parzen_samplers = {}
# a decorator to register functions to the dict `adaptive_parzen_samplers`
def adaptive_parzen_sampler(name):
def wrapper(f):
assert name not in adaptive_parzen_samplers
adaptive_parzen_samplers[name] = f
return f
return wrapper
#
# These are some custom distributions
# that are used to represent posterior distributions.
#
# -- Categorical
@scope.define
def categorical_lpdf(sample, p):
if sample.size:
return np.log(np.asarray(p)[sample])
return np.asarray([])
@scope.define
def randint_via_categorical_lpdf(sample, p):
if sample.size:
return np.log(np.asarray(p)[sample])
return np.asarray([])
# -- Bounded Gaussian Mixture Model (BGMM)
@implicit_stochastic
@scope.define
def GMM1(weights, mus, sigmas, low=None, high=None, q=None, rng=None, size=()):
"""Sample from truncated 1-D Gaussian Mixture Model"""
weights, mus, sigmas = list(map(np.asarray, (weights, mus, sigmas)))
assert len(weights) == len(mus) == len(sigmas)
n_samples = int(np.prod(size))
# n_components = len(weights)
if low is None and high is None:
# -- draw from a standard GMM
active = np.argmax(rng.multinomial(1, weights, (n_samples,)), axis=1)
samples = rng.normal(loc=mus[active], scale=sigmas[active])
else:
# -- draw from truncated components, handling one-sided truncation
low = float(low) if low is not None else -float("Inf")
high = float(high) if high is not None else float("Inf")
if low >= high:
raise ValueError("low >= high", (low, high))
samples = []
while len(samples) < n_samples:
active = np.argmax(rng.multinomial(1, weights))
draw = rng.normal(loc=mus[active], scale=sigmas[active])
if low <= draw < high:
samples.append(draw)
samples = np.reshape(np.asarray(samples), size)
if q is None:
return samples
return np.round(old_div(samples, q)) * q
@scope.define
def normal_cdf(x, mu, sigma):
top = x - mu
bottom = np.maximum(np.sqrt(2) * sigma, EPS)
z = old_div(top, bottom)
return 0.5 * (1 + erf(z))
@scope.define
def GMM1_lpdf(samples, weights, mus, sigmas, low=None, high=None, q=None):
def print_verbose(s, x):
return print(f"GMM1_lpdf:{s}", x)
verbose = 0
samples, weights, mus, sigmas = list(
map(np.asarray, (samples, weights, mus, sigmas))
)
if samples.size == 0:
return np.asarray([])
if weights.ndim != 1:
raise TypeError("need vector of weights", weights.shape)
if mus.ndim != 1:
raise TypeError("need vector of mus", mus.shape)
if sigmas.ndim != 1:
raise TypeError("need vector of sigmas", sigmas.shape)
assert len(weights) == len(mus) == len(sigmas)
_samples = samples
samples = _samples.flatten()
if verbose:
print_verbose("samples", set(samples))
print_verbose("weights", weights)
print_verbose("mus", mus)
print_verbose("sigmas", sigmas)
print_verbose("low", low)
print_verbose("high", high)
print_verbose("q", q)
if low is None and high is None:
p_accept = 1
else:
p_accept = np.sum(
weights * (normal_cdf(high, mus, sigmas) - normal_cdf(low, mus, sigmas))
)
if q is None:
dist = samples[:, None] - mus
mahal = (old_div(dist, np.maximum(sigmas, EPS))) ** 2
# mahal shape is (n_samples, n_components)
Z = np.sqrt(2 * np.pi * sigmas ** 2)
coef = weights / Z / p_accept
rval = logsum_rows(-0.5 * mahal + np.log(coef))
else:
prob = np.zeros(samples.shape, dtype="float64")
for w, mu, sigma in zip(weights, mus, sigmas):
if high is None:
ubound = samples + old_div(q, 2.0)
else:
ubound = np.minimum(samples + old_div(q, 2.0), high)
if low is None:
lbound = samples - old_div(q, 2.0)
else:
lbound = np.maximum(samples - old_div(q, 2.0), low)
# -- two-stage addition is slightly more numerically accurate
inc_amt = w * normal_cdf(ubound, mu, sigma)
inc_amt -= w * normal_cdf(lbound, mu, sigma)
prob += inc_amt
rval = np.log(prob) - np.log(p_accept)
if verbose:
print_verbose("rval:", dict(list(zip(samples, rval))))
rval.shape = _samples.shape
return rval
# -- Mixture of Log-Normals
@scope.define
def lognormal_cdf(x, mu, sigma):
# wikipedia claims cdf is
# .5 + .5 erf( log(x) - mu / sqrt(2 sigma^2))
#
# the maximum is used to move negative values and 0 up to a point
# where they do not cause nan or inf, but also don't contribute much
# to the cdf.
if len(x) == 0:
return np.asarray([])
if x.min() < 0:
raise ValueError("negative arg to lognormal_cdf", x)
olderr = np.seterr(divide="ignore")
try:
top = np.log(np.maximum(x, EPS)) - mu
bottom = np.maximum(np.sqrt(2) * sigma, EPS)
z = old_div(top, bottom)
return 0.5 + 0.5 * erf(z)
finally:
np.seterr(**olderr)
@scope.define
def lognormal_lpdf(x, mu, sigma):
# formula copied from wikipedia
# http://en.wikipedia.org/wiki/Log-normal_distribution
assert np.all(sigma >= 0)
sigma = np.maximum(sigma, EPS)
Z = sigma * x * np.sqrt(2 * np.pi)
E = 0.5 * (old_div((np.log(x) - mu), sigma)) ** 2
rval = -E - np.log(Z)
return rval
@scope.define
def qlognormal_lpdf(x, mu, sigma, q):
# casting rounds up to nearest step multiple.
# so lpdf is log of integral from x-step to x+1 of P(x)
# XXX: subtracting two numbers potentially very close together.
return np.log(lognormal_cdf(x, mu, sigma) - lognormal_cdf(x - q, mu, sigma))
@implicit_stochastic
@scope.define
def LGMM1(weights, mus, sigmas, low=None, high=None, q=None, rng=None, size=()):
weights, mus, sigmas = list(map(np.asarray, (weights, mus, sigmas)))
n_samples = np.prod(size)
# n_components = len(weights)
if low is None and high is None:
active = np.argmax(rng.multinomial(1, weights, (n_samples,)), axis=1)
assert len(active) == n_samples
samples = np.exp(rng.normal(loc=mus[active], scale=sigmas[active]))
else:
# -- draw from truncated components
# TODO: one-sided-truncation
low = float(low)
high = float(high)
if low >= high:
raise ValueError("low >= high", (low, high))
samples = []
while len(samples) < n_samples:
active = np.argmax(rng.multinomial(1, weights))
draw = rng.normal(loc=mus[active], scale=sigmas[active])
if low <= draw < high:
samples.append(np.exp(draw))
samples = np.asarray(samples)
samples = np.reshape(np.asarray(samples), size)
if q is not None:
samples = np.round(old_div(samples, q)) * q
return samples
def logsum_rows(x):
m = x.max(axis=1)
return np.log(np.exp(x - m[:, None]).sum(axis=1)) + m
@scope.define
def LGMM1_lpdf(samples, weights, mus, sigmas, low=None, high=None, q=None):
samples, weights, mus, sigmas = list(
map(np.asarray, (samples, weights, mus, sigmas))
)
assert weights.ndim == 1
assert mus.ndim == 1
assert sigmas.ndim == 1
_samples = samples
if samples.ndim != 1:
samples = samples.flatten()
if low is None and high is None:
p_accept = 1
else:
p_accept = np.sum(
weights * (normal_cdf(high, mus, sigmas) - normal_cdf(low, mus, sigmas))
)
if q is None:
# compute the lpdf of each sample under each component
lpdfs = lognormal_lpdf(samples[:, None], mus, sigmas)
rval = logsum_rows(lpdfs + np.log(weights))
else:
# compute the lpdf of each sample under each component
prob = np.zeros(samples.shape, dtype="float64")
for w, mu, sigma in zip(weights, mus, sigmas):
if high is None:
ubound = samples + old_div(q, 2.0)
else:
ubound = np.minimum(samples + old_div(q, 2.0), np.exp(high))
if low is None:
lbound = samples - old_div(q, 2.0)
else:
lbound = np.maximum(samples - old_div(q, 2.0), np.exp(low))
lbound = np.maximum(0, lbound)
# -- two-stage addition is slightly more numerically accurate
inc_amt = w * lognormal_cdf(ubound, mu, sigma)
inc_amt -= w * lognormal_cdf(lbound, mu, sigma)
prob += inc_amt
rval = np.log(prob) - np.log(p_accept)
rval.shape = _samples.shape
return rval
#
# This is the weird heuristic ParzenWindow estimator used for continuous
# distributions in various ways.
#
@scope.define_info(o_len=3)
def adaptive_parzen_normal_orig(mus, prior_weight, prior_mu, prior_sigma):
"""
A heuristic estimator for the mu and sigma values of a GMM
TODO: try to find this heuristic in the literature, and cite it - Yoshua
mentioned the term 'elastic' I think?
mus - matrix (N, M) of M, N-dimensional component centers
"""
mus_orig = np.array(mus)
mus = np.array(mus)
assert str(mus.dtype) != "object"
if mus.ndim != 1:
raise TypeError("mus must be vector", mus)
if len(mus) == 0:
mus = np.asarray([prior_mu])
sigma = np.asarray([prior_sigma])
elif len(mus) == 1:
mus = np.asarray([prior_mu] + [mus[0]])
sigma = np.asarray([prior_sigma, prior_sigma * 0.5])
elif len(mus) >= 2:
order = np.argsort(mus)
mus = mus[order]
sigma = np.zeros_like(mus)
sigma[1:-1] = np.maximum(mus[1:-1] - mus[0:-2], mus[2:] - mus[1:-1])
if len(mus) > 2:
lsigma = mus[2] - mus[0]
usigma = mus[-1] - mus[-3]
else:
lsigma = mus[1] - mus[0]
usigma = mus[-1] - mus[-2]
sigma[0] = lsigma
sigma[-1] = usigma
# XXX: is sorting them necessary anymore?
# un-sort the mus and sigma
mus[order] = mus.copy()
sigma[order] = sigma.copy()
if not np.all(mus_orig == mus):
print("orig", mus_orig)
print("mus", mus)
assert np.all(mus_orig == mus)
# put the prior back in
mus = np.asarray([prior_mu] + list(mus))
sigma = np.asarray([prior_sigma] + list(sigma))
maxsigma = prior_sigma
# -- magic formula:
minsigma = old_div(prior_sigma, np.sqrt(1 + len(mus)))
sigma = np.clip(sigma, minsigma, maxsigma)
weights = np.ones(len(mus), dtype=mus.dtype)
weights[0] = prior_weight
weights = old_div(weights, weights.sum())
return weights, mus, sigma
@scope.define
def linear_forgetting_weights(N, LF):
assert N >= 0
assert LF > 0
if N == 0:
return np.asarray([])
if N < LF:
return np.ones(N)
ramp = np.linspace(old_div(1.0, N), 1.0, num=N - LF)
flat = np.ones(LF)
weights = np.concatenate([ramp, flat], axis=0)
assert weights.shape == (N,), (weights.shape, N)
return weights
# XXX: make TPE do a post-inference pass over the pyll graph and insert
# non-default LF argument
@scope.define_info(o_len=3)
def adaptive_parzen_normal(mus, prior_weight, prior_mu, prior_sigma, LF=DEFAULT_LF):
"""
mus - matrix (N, M) of M, N-dimensional component centers
"""
mus = np.array(mus)
assert str(mus.dtype) != "object"
if mus.ndim != 1:
raise TypeError("mus must be vector", mus)
if len(mus) == 0:
srtd_mus = np.asarray([prior_mu])
sigma = np.asarray([prior_sigma])
prior_pos = 0
elif len(mus) == 1:
if prior_mu < mus[0]:
prior_pos = 0
srtd_mus = np.asarray([prior_mu, mus[0]])
sigma = np.asarray([prior_sigma, prior_sigma * 0.5])
else:
prior_pos = 1
srtd_mus = np.asarray([mus[0], prior_mu])
sigma = np.asarray([prior_sigma * 0.5, prior_sigma])
elif len(mus) >= 2:
# create new_mus, which is sorted, and in which
# the prior has been inserted
order = np.argsort(mus)
prior_pos = np.searchsorted(mus[order], prior_mu)
srtd_mus = np.zeros(len(mus) + 1)
srtd_mus[:prior_pos] = mus[order[:prior_pos]]
srtd_mus[prior_pos] = prior_mu
srtd_mus[prior_pos + 1 :] = mus[order[prior_pos:]]
sigma = np.zeros_like(srtd_mus)
sigma[1:-1] = np.maximum(
srtd_mus[1:-1] - srtd_mus[0:-2], srtd_mus[2:] - srtd_mus[1:-1]
)
lsigma = srtd_mus[1] - srtd_mus[0]
usigma = srtd_mus[-1] - srtd_mus[-2]
sigma[0] = lsigma
sigma[-1] = usigma
if LF and LF < len(mus):
unsrtd_weights = linear_forgetting_weights(len(mus), LF)
srtd_weights = np.zeros_like(srtd_mus)
assert len(unsrtd_weights) + 1 == len(srtd_mus)
srtd_weights[:prior_pos] = unsrtd_weights[order[:prior_pos]]
srtd_weights[prior_pos] = prior_weight
srtd_weights[prior_pos + 1 :] = unsrtd_weights[order[prior_pos:]]
else:
srtd_weights = np.ones(len(srtd_mus))
srtd_weights[prior_pos] = prior_weight
# -- magic formula:
maxsigma = old_div(prior_sigma, 1.0)
minsigma = old_div(prior_sigma, min(100.0, (1.0 + len(srtd_mus))))
sigma = np.clip(sigma, minsigma, maxsigma)
sigma[prior_pos] = prior_sigma
assert prior_sigma > 0
assert maxsigma > 0
assert minsigma > 0
assert np.all(sigma > 0), (sigma.min(), minsigma, maxsigma)
srtd_weights /= srtd_weights.sum()
return srtd_weights, srtd_mus, sigma
#
# Adaptive Parzen Samplers
# These produce conditional estimators for various prior distributions
#
# NOTE: These are actually used in a fairly complicated way.
# They are actually returning pyll.Apply AST (Abstract Syntax Tree) objects.
# This AST is then manipulated and the corresponding _lpdf function is called
# (e.g GMM1_lpdf)
#
# Please see the build_posterior function for details
# -- Uniform
@adaptive_parzen_sampler("uniform")
def ap_uniform_sampler(obs, prior_weight, low, high, size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(
obs, prior_weight, prior_mu, prior_sigma
)
return scope.GMM1(
weights, mus, sigmas, low=low, high=high, q=None, size=size, rng=rng
)
@adaptive_parzen_sampler("quniform")
def ap_quniform_sampler(obs, prior_weight, low, high, q, size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(
obs, prior_weight, prior_mu, prior_sigma
)
return scope.GMM1(weights, mus, sigmas, low=low, high=high, q=q, size=size, rng=rng)
@adaptive_parzen_sampler("loguniform")
def ap_loguniform_sampler(obs, prior_weight, low, high, size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(
scope.log(obs), prior_weight, prior_mu, prior_sigma
)
rval = scope.LGMM1(weights, mus, sigmas, low=low, high=high, size=size, rng=rng)
return rval
@adaptive_parzen_sampler("qloguniform")
def ap_qloguniform_sampler(obs, prior_weight, low, high, q, size=(), rng=None):
prior_mu = 0.5 * (high + low)
prior_sigma = 1.0 * (high - low)
weights, mus, sigmas = scope.adaptive_parzen_normal(
scope.log(
# -- map observations that were quantized to be below exp(low)
# (particularly 0) back up to exp(low) where they will
# interact in a reasonable way with the AdaptiveParzen
# thing.
scope.maximum(
obs,
scope.maximum( # -- protect against exp(low) underflow
EPS, scope.exp(low)
),
)
),
prior_weight,
prior_mu,
prior_sigma,
)
return scope.LGMM1(weights, mus, sigmas, low, high, q=q, size=size, rng=rng)
# -- Normal
@adaptive_parzen_sampler("normal")
def ap_normal_sampler(obs, prior_weight, mu, sigma, size=(), rng=None):
weights, mus, sigmas = scope.adaptive_parzen_normal(obs, prior_weight, mu, sigma)
return scope.GMM1(weights, mus, sigmas, size=size, rng=rng)
@adaptive_parzen_sampler("qnormal")
def ap_qnormal_sampler(obs, prior_weight, mu, sigma, q, size=(), rng=None):
weights, mus, sigmas = scope.adaptive_parzen_normal(obs, prior_weight, mu, sigma)
return scope.GMM1(weights, mus, sigmas, q=q, size=size, rng=rng)
@adaptive_parzen_sampler("lognormal")
def ap_loglognormal_sampler(obs, prior_weight, mu, sigma, size=(), rng=None):
weights, mus, sigmas = scope.adaptive_parzen_normal(
scope.log(obs), prior_weight, mu, sigma
)
rval = scope.LGMM1(weights, mus, sigmas, size=size, rng=rng)
return rval
@adaptive_parzen_sampler("qlognormal")
def ap_qlognormal_sampler(obs, prior_weight, mu, sigma, q, size=(), rng=None):
log_obs = scope.log(scope.maximum(obs, EPS))
weights, mus, sigmas = scope.adaptive_parzen_normal(
log_obs, prior_weight, mu, sigma
)
rval = scope.LGMM1(weights, mus, sigmas, q=q, size=size, rng=rng)
return rval
# -- Categorical
@adaptive_parzen_sampler("randint")
def ap_randint_sampler(
obs, prior_weight, low, high=None, size=(), rng=None, LF=DEFAULT_LF
):
# randint can be seen as a categorical with high - low categories
weights = scope.linear_forgetting_weights(scope.len(obs), LF=LF)
# if high is None, then low represents high and there is no offset
domain_size = low if high is None else high - low
offset = pyll.Literal(0) if high is None else low
counts = scope.bincount(obs, offset=offset, minlength=domain_size, weights=weights)
# -- add in some prior pseudocounts
pseudocounts = counts + prior_weight
random_variable = scope.randint_via_categorical(
old_div(pseudocounts, scope.sum(pseudocounts)), size=size, rng=rng
)
return random_variable
@scope.define
def tpe_cat_pseudocounts(counts, prior_weight, p, size):
if np.prod(size) == 0:
return []
if p.ndim == 2:
assert np.all(p == p[0])
p = p[0]
pseudocounts = counts + p.size * (prior_weight * p)
return old_div(pseudocounts, np.sum(pseudocounts))
@adaptive_parzen_sampler("categorical")
def ap_categorical_sampler(obs, prior_weight, p, size=(), rng=None, LF=DEFAULT_LF):
weights = scope.linear_forgetting_weights(scope.len(obs), LF=LF)
# in order to support pchoice here, we need to find the size of p,
# but p can have p.ndim == 2, so we pass p to bincount and unpack it
# (if required) there
counts = scope.bincount(obs, p=p, weights=weights)
pseudocounts = scope.tpe_cat_pseudocounts(counts, prior_weight, p, size)
return scope.categorical(pseudocounts, size=size, rng=rng)
#
# Posterior clone performs symbolic inference on the pyll graph of priors.
#
@scope.define_info(o_len=2)
def ap_split_trials(o_idxs, o_vals, l_idxs, l_vals, gamma, gamma_cap=DEFAULT_LF):
"""Split the elements of `o_vals` (observations values) into two groups: those for
trials whose losses (`l_vals`) were above gamma, and those below gamma. Note that
only unique elements are returned, so the total number of returned elements might
be lower than `len(o_vals)`
"""
o_idxs, o_vals, l_idxs, l_vals = list(
map(np.asarray, [o_idxs, o_vals, l_idxs, l_vals])
)
# XXX if this is working, refactor this sort for efficiency
# Splitting is done this way to cope with duplicate loss values.
n_below = min(int(np.ceil(gamma * np.sqrt(len(l_vals)))), gamma_cap)
l_order = np.argsort(l_vals)
keep_idxs = set(l_idxs[l_order[:n_below]])
below = [v for i, v in zip(o_idxs, o_vals) if i in keep_idxs]
keep_idxs = set(l_idxs[l_order[n_below:]])
above = [v for i, v in zip(o_idxs, o_vals) if i in keep_idxs]
return np.asarray(below), np.asarray(above)
@scope.define
def broadcast_best(samples, below_llik, above_llik):
if len(samples):
score = below_llik - above_llik
if len(samples) != len(score):
raise ValueError()
best = np.argmax(score)
return [samples[best]] * len(samples)
else:
return []
def build_posterior(
specs,
prior_idxs,
prior_vals,
obs_idxs,
obs_vals,
obs_loss_idxs,
obs_loss_vals,
oloss_gamma,
prior_weight,
):
"""
This method clones a posterior inference graph by iterating forward in
topological order, and replacing prior random-variables (prior_idxs, prior_vals)
with new posterior distributions (post_specs, post_idxs, post_vals) that make use
of observations (obs_idxs, obs_vals).
"""
assert all(
isinstance(arg, pyll.Apply)
for arg in [obs_loss_idxs, obs_loss_vals, oloss_gamma]
)
assert set(prior_idxs.keys()) == set(prior_vals.keys())
expr = pyll.as_apply([specs, prior_idxs, prior_vals])
nodes = pyll.dfs(expr)
# build the joint posterior distribution as the values in this memo
memo = {}
# map prior RVs to observations
obs_memo = {}
for nid in prior_vals:
# construct the leading args for each call to adaptive_parzen_sampler
# which will permit the "adaptive parzen samplers" to adapt to the
# correct samples.
obs_below, obs_above = scope.ap_split_trials(
obs_idxs[nid], obs_vals[nid], obs_loss_idxs, obs_loss_vals, oloss_gamma
)
obs_memo[prior_vals[nid]] = [obs_below, obs_above]
for node in nodes:
if node not in memo:
new_inputs = [memo[arg] for arg in node.inputs()]
if node in obs_memo:
# -- this case corresponds to an observed Random Var
# node.name is a distribution like "normal", "randint", etc.
obs_below, obs_above = obs_memo[node]
aa = [memo[a] for a in node.pos_args]
fn = adaptive_parzen_samplers[node.name]
b_args = [obs_below, prior_weight] + aa
named_args = {kw: memo[arg] for (kw, arg) in node.named_args}
b_post = fn(*b_args, **named_args)
a_args = [obs_above, prior_weight] + aa
a_post = fn(*a_args, **named_args)
# fn is a function e.g ap_uniform_sampler, ap_normal_sampler, etc
# b_post and a_post are pyll.Apply objects that are
# AST (Abstract Syntax Trees). They create the distribution,
# (e.g. using adaptive_parzen_normal), and then
# call a function to sample randomly from that distribution
# (e.g. using scope.GMM1) which return those samples.
#
# However we are only interested in using the samples from b_post.
# This code looks at the AST and grabs the function name that we used
# for sampling (e.g. scope.GMM1) and modifies it, e.g. to
# "scope.GMM1_lpdf". It then calls this function, passing in the
# samples as the first parameter.a_args
#
# The result is that we are effectively calling, for example:
# below_llik = GMM1_lpdf( b_post, *adaptive_parzen_normal(obs_below, ...))
# above_llik = GMM1_lpdf( b_post, *adaptive_parzen_normal(obs_above, ...))
assert a_post.name == b_post.name
fn_lpdf = getattr(scope, a_post.name + "_lpdf")
a_kwargs = {
n: a for n, a in a_post.named_args if n not in ("rng", "size")
}
b_kwargs = {
n: a for n, a in b_post.named_args if n not in ("rng", "size")
}
# calculate the log likelihood of b_post under both distributions
below_llik = fn_lpdf(*([b_post] + b_post.pos_args), **b_kwargs)
above_llik = fn_lpdf(*([b_post] + a_post.pos_args), **a_kwargs)
# compute new_node based on below & above log likelihood
new_node = scope.broadcast_best(b_post, below_llik, above_llik)
elif hasattr(node, "obj"):
# -- keep same literals in the graph
new_node = node
else:
# -- this case is for all the other stuff in the graph
new_node = node.clone_from_inputs(new_inputs)
memo[node] = new_node
post_idxs = {nid: memo[idxs] for nid, idxs in prior_idxs.items()}
post_vals = {nid: memo[vals] for nid, vals in prior_vals.items()}
return post_idxs, post_vals
# TODO: is this used?
# @scope.define
# def idxs_prod(full_idxs, idxs_by_label, llik_by_label):
# """Add all of the log-likelihoods together by id.
#
# Example arguments:
# full_idxs = [0, 1, ... N-1]
# idxs_by_label = {'node_a': [1, 3], 'node_b': [3]}
# llik_by_label = {'node_a': [0.1, -3.3], node_b: [1.0]}
#
# This would return N elements: [0, 0.1, 0, -2.3, 0, 0, ... ]
# """
# assert len(set(full_idxs)) == len(full_idxs)
# full_idxs = list(full_idxs)
# rval = np.zeros(len(full_idxs))
# pos_of_tid = dict(list(zip(full_idxs, list(range(len(full_idxs))))))
# assert set(idxs_by_label.keys()) == set(llik_by_label.keys())
# for nid in idxs_by_label:
# idxs = idxs_by_label[nid]
# llik = llik_by_label[nid]
# assert np.all(np.asarray(idxs) > 1)
# assert len(set(idxs)) == len(idxs)
# assert len(idxs) == len(llik)
# for ii, ll in zip(idxs, llik):
# rval[pos_of_tid[ii]] += ll
# return rval
_default_prior_weight = 1.0
# -- suggest best of this many draws on every iteration
_default_n_EI_candidates = 24
# -- gamma * sqrt(n_trials) is fraction of to use as good
_default_gamma = 0.25
_default_n_startup_jobs = 20
_default_linear_forgetting = DEFAULT_LF
def build_posterior_wrapper(domain, prior_weight, gamma):
"""
Calls build_posterior
Args:
domain (hyperopt.base.Domain): contains info about the obj function and the hp
space passed to fmin
prior_weight (float): smoothing factor for counts, to avoid having 0 prob
# TODO: consider renaming or improving documentation for suggest
gamma (float): the threshold to split between l(x) and g(x), see eq. 2 in
https://papers.nips.cc/paper/4443-algorithms-for-hyper-parameter-optimization.pdf
Returns:
"""
# -- these dummy values will be replaced in build_posterior() and never used
observed = {"idxs": pyll.Literal(), "vals": pyll.Literal()}
observed_loss = {"idxs": pyll.Literal(), "vals": pyll.Literal()}
posterior = build_posterior(
# -- vectorized clone of bandit template
domain.vh.v_expr,
# -- this dict and next represent prior dists
domain.vh.idxs_by_label(),
domain.vh.vals_by_label(),
observed["idxs"],
observed["vals"],
observed_loss["idxs"],
observed_loss["vals"],
pyll.Literal(gamma),
pyll.Literal(float(prior_weight)),
)
return observed, observed_loss, posterior
def suggest(
new_ids,
domain,
trials,
seed,
prior_weight=_default_prior_weight,
n_startup_jobs=_default_n_startup_jobs,
n_EI_candidates=_default_n_EI_candidates,
gamma=_default_gamma,
verbose=True,
):
"""
Given previous trials and the domain, suggest the best expected hp point
according to the TPE-EI algo
Args:
prior_weight(
n_startup_jobs:
n_EI_candidates:
gamma:
verbose:
Returns:
"""
t0 = time.time()
# use build_posterior_wrapper to create the pyll nodes
observed, observed_loss, posterior = build_posterior_wrapper(
domain, prior_weight, gamma
)
tt = time.time() - t0
if verbose:
logger.info("build_posterior_wrapper took %f seconds" % tt)
# Loop over previous trials to collect best_docs and best_docs_loss
best_docs = dict()
best_docs_loss = dict()
for doc in trials.trials:
# get either these docs own tid or the one that it's from
tid = doc["misc"].get("from_tid", doc["tid"])
# associate infinite loss to new/running/failed jobs
loss = doc["result"].get("loss")
loss = float("inf") if loss is None else float(loss)
# if set, update loss for this tid if it's higher than current loss
# otherwise, set it
best_docs_loss.setdefault(tid, loss)
if loss <= best_docs_loss[tid]:
best_docs_loss[tid] = loss
best_docs[tid] = doc
# -- sort docs by order of suggestion
# so that linear_forgetting removes the oldest ones
tid_docs = sorted(best_docs.items())
losses = [best_docs_loss[tid] for tid, doc in tid_docs]
tids, docs = list(zip(*tid_docs)) if tid_docs else ([], [])
if verbose:
if docs:
s = "%i/%i trials with best loss %f" % (
len(docs),
len(trials),
np.nanmin(losses),
)
else:
s = "0 trials"
logger.info("TPE using %s" % s)
if len(docs) < n_startup_jobs:
# N.B. THIS SEEDS THE RNG BASED ON THE new_id
return rand.suggest(new_ids, domain, trials, seed)
# Sample and compute log-probability.
first_new_id = new_ids[0]
if tids:
# -- the +2 coordinates with an assertion above
# to ensure that fake ids are used during sampling
# TODO: not sure what assertion this refers to...
fake_id_0 = max(max(tids), first_new_id) + 2
else:
# -- weird - we're running the TPE algo from scratch
assert n_startup_jobs <= 0
fake_id_0 = first_new_id + 2
fake_ids = list(range(fake_id_0, fake_id_0 + n_EI_candidates))
# -- this dictionary will map pyll nodes to the values
# they should take during the evaluation of the pyll program
memo = {domain.s_new_ids: fake_ids, domain.s_rng: np.random.RandomState(seed)}
memo[observed_loss["idxs"]] = tids
memo[observed_loss["vals"]] = losses
observed_idxs_dict, observed_vals_dict = miscs_to_idxs_vals(
[doc["misc"] for doc in docs], keys=list(domain.params.keys())
)
memo[observed["idxs"]] = observed_idxs_dict
memo[observed["vals"]] = observed_vals_dict
# evaluate `n_EI_candidates` pyll nodes in `posterior` using `memo`
# TODO: it seems to return idxs, vals, all the same. Is this correct?
idxs, vals = pyll.rec_eval(posterior, memo=memo, print_node_on_error=False)
# hack to add offset again for randint params
for label, param in domain.params.items():
if param.name == "randint" and len(param.pos_args) == 2:
offset = param.pos_args[0].obj
vals[label] = [val + offset for val in vals[label]]
# -- retrieve the best of the samples and form the return tuple
# specs are deprecated since build_posterior makes all the same
rval_specs = [None]
rval_results = [domain.new_result()]
rval_miscs = [{"tid": first_new_id, "cmd": domain.cmd, "workdir": domain.workdir}]
miscs_update_idxs_vals(
rval_miscs,
idxs,
vals,
idxs_map={fake_ids[0]: first_new_id},
assert_all_vals_used=False,
)
# return the doc for the best new trial
return trials.new_trial_docs([first_new_id], rval_specs, rval_results, rval_miscs)
|
py | b417393e9e4b1bb9b8e449272d89c005eb79e16f | import pandas as pd
df = pd.read_csv('sample.csv')
df.to_csv('new-csv-file.csv') |
py | b41739689d1dd1b1f67405ebd3517d3fa576542a | # This file is generated by objective.metadata
#
# Last update: Mon Nov 22 22:40:45 2021
#
# flake8: noqa
import objc, sys
if sys.maxsize > 2 ** 32:
def sel32or64(a, b):
return b
else:
def sel32or64(a, b):
return a
if objc.arch == "arm64":
def selAorI(a, b):
return a
else:
def selAorI(a, b):
return b
misc = {}
constants = """$AXMFiHearingDevicePairedUUIDsDidChangeNotification$AXMFiHearingDeviceStreamingEarDidChangeNotification$"""
enums = """$AXChartContentDirectionBottomToTop@3$AXChartContentDirectionLeftToRight@0$AXChartContentDirectionRadialClockwise@4$AXChartContentDirectionRadialCounterClockwise@5$AXChartContentDirectionRightToLeft@1$AXChartContentDirectionTopToBottom@2$AXCustomContentImportanceDefault@0$AXCustomContentImportanceHigh@1$AXHearingDeviceEarBoth@6$AXHearingDeviceEarLeft@2$AXHearingDeviceEarNone@0$AXHearingDeviceEarRight@4$AXScaleTypeLinear@0$AXScaleTypeLn@2$AXScaleTypeLog10@1$"""
misc.update({})
functions = {
"AXMFiHearingDevicePairedUUIDs": (b"@",),
"AXSupportsBidirectionalAXMFiHearingDeviceStreaming": (b"Z",),
"AXNameFromColor": (b"@^{CGColor=}",),
"AXMFiHearingDeviceStreamingEar": (b"Q",),
}
r = objc.registerMetaDataForSelector
objc._updatingMetadata(True)
try:
r(
b"AXDataSeriesDescriptor",
b"initWithAttributedName:isContinuous:dataPoints:",
{"arguments": {3: {"type": b"Z"}}},
)
r(
b"AXDataSeriesDescriptor",
b"initWithName:isContinuous:dataPoints:",
{"arguments": {3: {"type": b"Z"}}},
)
r(b"AXDataSeriesDescriptor", b"isContinuous", {"retval": {"type": b"Z"}})
r(
b"AXDataSeriesDescriptor",
b"setIsContinuous:",
{"arguments": {2: {"type": b"Z"}}},
)
r(
b"AXNumericDataAxisDescriptor",
b"initWithAttributedTitle:lowerBound:upperBound:gridlinePositions:valueDescriptionProvider:",
{
"arguments": {
6: {
"callable": {
"retval": {"type": b"@"},
"arguments": {0: {"type": b"^v"}, 1: {"type": b"d"}},
}
}
}
},
)
r(
b"AXNumericDataAxisDescriptor",
b"initWithTitle:lowerBound:upperBound:gridlinePositions:valueDescriptionProvider:",
{
"arguments": {
6: {
"callable": {
"retval": {"type": b"@"},
"arguments": {0: {"type": b"^v"}, 1: {"type": b"d"}},
}
}
}
},
)
r(
b"AXNumericDataAxisDescriptor",
b"setValueDescriptionProvider:",
{
"arguments": {
2: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": b"^v"}},
}
}
}
},
)
r(
b"NSObject",
b"accessibilityBrailleMapRenderRegion",
{"required": True, "retval": {"type": b"{CGRect={CGPoint=dd}{CGSize=dd}}"}},
)
r(
b"NSObject",
b"accessibilityBrailleMapRenderer",
{
"required": True,
"retval": {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": b"^v"}, 1: {"type": b"@"}},
},
"type": b"@?",
},
},
)
r(
b"NSObject",
b"accessibilityChartDescriptor",
{"required": True, "retval": {"type": b"@"}},
)
r(
b"NSObject",
b"accessibilityCustomContent",
{"required": True, "retval": {"type": b"@"}},
)
r(b"NSObject", b"attributedTitle", {"required": True, "retval": {"type": b"@"}})
r(
b"NSObject",
b"setAccessibilityBrailleMapRenderRegion:",
{
"required": True,
"retval": {"type": b"v"},
"arguments": {2: {"type": b"{CGRect={CGPoint=dd}{CGSize=dd}}"}},
},
)
r(
b"NSObject",
b"setAccessibilityBrailleMapRenderer:",
{
"required": True,
"retval": {"type": b"v"},
"arguments": {
2: {
"callable": {
"retval": {"type": b"v"},
"arguments": {0: {"type": b"^v"}, 1: {"type": b"@"}},
},
"type": b"@?",
}
},
},
)
r(
b"NSObject",
b"setAccessibilityChartDescriptor:",
{"required": True, "retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"setAccessibilityCustomContent:",
{"required": True, "retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"setAttributedTitle:",
{"required": True, "retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(
b"NSObject",
b"setTitle:",
{"required": True, "retval": {"type": b"v"}, "arguments": {2: {"type": b"@"}}},
)
r(b"NSObject", b"title", {"required": True, "retval": {"type": b"@"}})
finally:
objc._updatingMetadata(False)
expressions = {}
# END OF FILE
|
py | b4173979afbe14b9dd2e6f340a3111e13a484625 | from sets import Set
# Don't look in here.
# You don't want to write code like this.
class Cart:
def __init__(self, cart_symbol, cartid):
if not self.valid_cart(cart_symbol):
raise Exception("Invalid cart: " + cart_symbol)
self.cart_symbol = cart_symbol
self.next_turn = 'LEFT'
self.cartid = cartid
def valid_cart(self, c):
return c in ['>', '<', '^', 'v']
# Turn in cycles of LEFT > STRAIGHT > RIGHT
def turn_cart(self):
if self.next_turn == 'LEFT':
self.next_turn = 'STRAIGHT'
self.cart_symbol = self.turn_symbol_left(self.cart_symbol)
elif self.next_turn == 'RIGHT':
self.next_turn = 'LEFT'
self.cart_symbol = self.turn_symbol_right(self.cart_symbol)
elif self.next_turn == 'STRAIGHT':
self.next_turn = 'RIGHT'
else:
raise Exception("12")
def turn_symbol_left(self, c):
if c == '^':
return '<'
elif c == '>':
return '^'
elif c == '<':
return 'v'
elif c == 'v':
return '>'
else:
raise Exception("10")
def turn_symbol_right(self, c):
if c == '^':
return '>'
elif c == '>':
return 'v'
elif c == '<':
return '^'
elif c == 'v':
return '<'
else:
raise Exception("11")
class RoadSection:
def __init__(self, road_symbol):
if not self.valid_road(road_symbol):
raise Exception("Invalid road: " + r)
self.road_symbol = road_symbol
self.road_type = self.get_road_type()
self.carts = []
def get_road_type(self):
if self.road_symbol == '|':
return 'UD'
elif self.road_symbol == '-':
return 'LR'
elif self.road_symbol == '+':
return 'INTERSECTION'
elif self.road_symbol == '/':
return 'RTURN'
elif self.road_symbol == '\\':
return 'LTURN'
else:
raise Exception("13")
def valid_road(self, r):
return r in ['-', '|', '+', '/', '\\']
def add_cart(self, cart):
self.carts.append(cart)
def solve():
lines = []
with open('data.txt', "r") as data:
for line in data:
line = line[:-1] # Remove new line
lines.append(line)
road = parse_lines_to_road(lines)
row_len = len(road[0])
for row in road:
if row_len != len(row):
raise Exception("Mismatch row length")
carts = cart_locations(road)
ticks = 0
while True:
if len(carts) <= 1:
# Represented as row, col
print(carts[0][1], carts[0][0])
break
carts = move_carts(road, carts)
ticks = ticks + 1
# Return the new cart locations = [(row, col)]
def move_carts(road, cart_locations):
sorted(cart_locations, key=lambda tup: (tup[0],tup[1]) )
have_moved = set()
new_carts = []
for loc in cart_locations:
row, col = loc
section = road[row][col]
if len(section.carts) == 0:
continue
# Assume we have a single cart
remaining_carts = section.carts[:]
for cart in section.carts:
if cart.cartid in have_moved:
continue
# We are definitely moving the cart.
remaining_carts.remove(cart)
rs = section.road_symbol
cs = cart.cart_symbol
# Handle simple cases first (intersection most complicated)
# Do as I say not as I do.
# Can you say "technical debt"?
new_row = None
new_col = None
if rs == '|':
if cs == '^':
new_row, new_col = row-1, col
elif cs == 'v':
new_row, new_col = row+1, col
else:
raise Exception("1")
elif rs == '-':
if cs == '<':
new_row, new_col = row, col-1
elif cs == '>':
new_row, new_col = row, col+1
else:
raise Exception("2")
elif rs == '/':
if cs == '^':
cart.cart_symbol = '>'
new_row, new_col = row, col+1
elif cs == '<':
cart.cart_symbol = 'v'
new_row, new_col = row+1, col
elif cs == '>':
cart.cart_symbol = '^'
new_row, new_col = row-1, col
elif cs == 'v':
cart.cart_symbol = '<'
new_row, new_col = row, col-1
else:
raise Exception("3")
elif rs == '\\':
if cs == '^':
cart.cart_symbol = '<'
new_row, new_col = row, col-1
elif cs == '>':
cart.cart_symbol = 'v'
new_row, new_col = row+1, col
elif cs == '<':
cart.cart_symbol = '^'
new_row, new_col = row-1, col
elif cs == 'v':
cart.cart_symbol = '>'
new_row, new_col = row, col+1
else:
raise Exception("4")
elif rs == '+':
cart.turn_cart()
cs = cart.cart_symbol
if cs == '^':
new_row, new_col = row-1, col
elif cs == '>':
new_row, new_col = row, col+1
elif cs == '<':
new_row, new_col = row, col-1
elif cs == 'v':
new_row, new_col = row+1, col
else:
raise Exception("5")
else:
raise Exception("6")
road[new_row][new_col].add_cart(cart)
have_moved.add(cart.cartid)
# Remove the carts the minute a crash happens
if len(road[new_row][new_col].carts) > 1:
road[new_row][new_col].carts = []
else:
new_carts.append((new_row,new_col))
# Update the carts on this cell to what remains
road[row][col].carts = remaining_carts
# Remove any crashing carts
carts = []
for loc in new_carts:
row, col = loc
section = road[row][col]
if len(section.carts) > 1:
section.carts = []
elif len(section.carts) == 1:
carts.append((row, col))
return carts
def print_road(r):
for row in r:
row_string = ""
for section in row:
if section == None:
row_string = row_string + " "
else:
if len(section.carts) > 0:
row_string = row_string + section.carts[0].cart_symbol
else:
row_string = row_string + section.road_symbol
print(row_string)
def cart_locations(road):
locations = []
num_cols = len(road[0])
num_rows = len(road)
for row in range(0, num_rows):
for col in range(0, num_cols):
section = road[row][col]
if section != None and len(section.carts) > 0:
locations.append((row, col))
return locations
# We assume a square grid road here
def has_crash(r):
num_cols = len(r[0])
num_rows = len(r)
for row in range(0, num_rows):
for col in range(0, num_cols):
if r[row][col] != None and len(r[row][col].carts) > 1:
return (col, row)
return None
def parse_lines_to_road(lines):
row_length = 0
for line in lines:
if len(line) > row_length:
row_length = len(line)
cartid = 0
road = []
for line in lines:
row = []
for char in line:
cell_type = classify_symbol(char)
if cell_type == 'EMPTY':
row.append(None)
elif cell_type == 'ROAD':
row.append(RoadSection(char))
elif cell_type == 'CART':
cart = Cart(char, cartid)
cartid = cartid + 1
roadChar = infer_road_from_cart(char)
r = RoadSection(roadChar)
r.add_cart(cart)
row.append(r)
else:
raise Exception("8")
while len(row) < row_length:
row.append(None)
road.append(row)
return road
def infer_road_from_cart(cart):
if cart == '<' or cart == '>':
return '-'
elif cart == '^' or cart == 'v':
return '|'
else:
raise Exception("9")
def classify_symbol(s):
if s == ' ':
return 'EMPTY'
elif s in ['>', '<', '^', 'v']:
return 'CART'
elif s in ['|', '-', '+', '/', '\\']:
return 'ROAD'
else:
raise Exception("What is this?" + s)
if __name__ == '__main__':
solve()
|
py | b41739bd578ce175d99797968411762b630ff329 | # Databricks notebook source
dbutils.widgets.text("infilefolder", "", "In - Folder Path")
infilefolder = dbutils.widgets.get("infilefolder")
dbutils.widgets.text("loadid", "", "Load Id")
loadid = dbutils.widgets.get("loadid")
# COMMAND ----------
import datetime
# For testing
# infilefolder = 'datalake/data/lnd/2019_03_11_01_38_00/'
load_id = loadid
loaded_on = datetime.datetime.now()
base_path = 'dbfs:/mnt/datalake/data/lnd/'
parkingbay_filepath = base_path + infilefolder + "/MelbParkingBayData.json"
sensors_filepath = base_path + infilefolder + "/MelbParkingSensorData.json"
# COMMAND ----------
import ddo_transform.standardize as s
# Read data
parkingbay_sdf = spark.read.json(parkingbay_filepath, multiLine=True)
sensordata_sdf = spark.read.json(sensors_filepath, multiLine=True)
# Standardize
t_parkingbay_sdf = s.standardize_parking_bay(parkingbay_sdf, load_id, loaded_on)
t_sensordata_sdf = s.standardize_sensordata(sensordata_sdf, load_id, loaded_on)
# Insert new rows
t_parkingbay_sdf.write.mode("append").insertInto("interim.parking_bay")
t_sensordata_sdf.write.mode("append").insertInto("interim.sensor")
# COMMAND ----------
|
py | b4173a4c4c37ab5d920fc18bfc7231e2518e84d7 | # generated from catkin/cmake/template/order_packages.context.py.in
source_root_dir = "/home/nvidia/racecar-ws/src"
whitelisted_packages = "".split(';') if "" != "" else []
blacklisted_packages = "".split(';') if "" != "" else []
underlay_workspaces = "/home/nvidia/racecar-ws/devel;/opt/ros/kinetic".split(';') if "/home/nvidia/racecar-ws/devel;/opt/ros/kinetic" != "" else []
|
py | b4173a6ca46ae240563ee604f6eb15ccdbe45775 | # video_path.py using for generate rendering path for create output video.
# Copyright (c) 2021 VISTEC - Vidyasirimedhi Institute of Science and Technology
# Distribute under MIT License
# Authors:
# - Suttisak Wizadwongsa <suttisak.w_s19[-at-]vistec.ac.th>
# - Pakkapon Phongthawee <pakkapon.p_s19[-at-]vistec.ac.th>
# - Jiraphon Yenphraphai <jiraphony_pro[-at-]vistec.ac.th>
# - Supasorn Suwajanakorn <supasorn.s[-at-]vistec.ac.th>
import numpy as np
from scipy.spatial.transform import Rotation, Slerp
def webGLspiralPath(ref_rotation, ref_translation, dmin, dmax, total_frame=120, spin_radius=10, total_spin=1):
spin_speed = 2 * np.pi / total_frame * total_spin
render_poses = {}
# matrix conversation helper
def dcm_to_4x4(r, t):
camera_matrix = np.zeros((4, 4), dtype=np.float32)
camera_matrix[:3, :3] = r
if len(t.shape) > 1:
camera_matrix[:3, 3:4] = t
else:
camera_matrix[:3, 3] = t
camera_matrix[3, 3] = 1.0
return camera_matrix
for i in range(total_frame):
anim_time = spin_speed * i
leftright = np.sin(anim_time) * spin_radius / 500.0
updown = np.cos(anim_time) * spin_radius / 500.0
r = ref_rotation
t = ref_translation
cam = dcm_to_4x4(r, t)
dist = (dmin + dmax) / 2.0
translation_matrix = dcm_to_4x4(np.eye(3), np.array([0, 0, -dist]))
translation_matrix2 = dcm_to_4x4(np.eye(3), np.array([0, 0, dist]))
euler_3x3 = Rotation.from_euler("yxz", [leftright, updown, 0]).as_dcm()
euler_4x4 = dcm_to_4x4(euler_3x3, np.array([0.0, 0.0, 0.0]))
output = translation_matrix2 @ euler_4x4 @ translation_matrix @ cam
output = output.astype(np.float32)
r = output[:3, :3]
t = output[:3, 3:4]
render_poses[i] = {"r": r, "t": t}
return render_poses
def deepviewInnerCircle(sfm, inter_frame=30):
"""
Deepview Inner Circle render
render across cam 1,2 (training view) and 5,11,10,7 (eval view)
"""
indices = sfm.index_split[0] + sfm.index_split[1]
indices = sorted(indices) # assume, space dataset always sortable
images = list(sfm.imgs.values())
selected_cam = [images[indices[i]] for i in [7, 1, 2, 5, 11, 10, 7]]
render_poses = {}
for i in range(len(selected_cam) - 1):
# use Slerp to interpolate between 2 rotation
rot = Rotation.from_dcm([selected_cam[i]["r"], selected_cam[i + 1]["r"]])
slerp = Slerp([0, 1], rot)
times = np.linspace(0.0, 1.0, num=inter_frame + 1)[:-1]
interp_rots = slerp(times).as_dcm().astype(np.float32)
for j in range(inter_frame):
step = j / inter_frame
t = selected_cam[i]["t"] * (1 - step) + step * selected_cam[i + 1]["t"]
render_poses[i * inter_frame + j] = {"r": interp_rots[j], "t": t}
return render_poses
|
py | b4173a98573cb62cb8909fa3b26274f8783de3fe | # a collection of useful classes
import os
import datetime
import operator
from decimal import Decimal
from stat import *
#__________________________________________________________________________
# a class to find data in the newest directory
class DataManager:
#_______________________________________________________________________
def __init__(self):
self.fVerbosity = 0
self.fHomeDir = os.getcwd() + "/"
self.fFile = [0,0,0,0] # init to size 4; dummy values
self.fModDir = "UNKNOWN"
#_______________________________________________________________________
def FindDir(self,path):
os.chdir(path)
dirs = sorted(os.listdir(os.getcwd()),key=os.path.getmtime)
for entry in dirs:
if ( os.path.isdir(entry) ): last_dir_mod = entry # gets last directory modified
os.chdir("..")
return last_dir_mod
#_______________________________________________________________________
def FindData(self,path):
# find directories
# year = self.FindDir(path)
# month = self.FindDir(path+year)
# day = self.FindDir(path+year+"/"+month)
# top_data_dir = path + "/" + year + "/" + month + "/" + day + "/"
now = datetime.datetime.now()
year = str(now.year)
month = str(now.month).zfill(2)
day = str(now.day).zfill(2)
# top_data_dir = path + year + "/" + month + "_" + year[2] + year[3] + "/" + month + "_" + day + "_" + year[2] +year[3] + "/"
top_data_dir = path + "/"
last_run = self.FindDir(top_data_dir)
run_dir = top_data_dir + last_run
# print top_data_dir
# print run_dir
# get all files in the run directory
os.chdir(run_dir)
files = sorted(os.listdir(os.getcwd()),key=os.path.getmtime)
files.reverse()
# go back to initial data directory
os.chdir(top_data_dir)
# find last four runs
j = 0
NMAX = 4
myPulse = [0,0,0,0]
for entry in files:
length = len(entry)
suffix = entry[length-3] + entry[length-2] + entry[length-1]
if( (suffix == "bin")&(j < NMAX) ):
myPulse[j] = entry
j=j+1
j=0
myPulse.reverse()
myList = [0,0,0,0]
for entry in myPulse:
myList[j] = last_run + "/" + str( entry )
j = j + 1
# for entry in myList: print entry
return myList
#__________________________________________________________________________
# a class to set an NMR run configuration
class ConfigManager:
#__________________________________________________________________
def __init__(self,home_path,config_name):
self.home = home_path
self.config = config_name
self.fpga = "pulse-data"
self.func_gen = "sg382"
self.adc = "struck_adc"
self.util = "utilities"
self.com = "comments"
self.src_dir = "/input/configs/files/"
self.tgt_dir = "/input/"
#__________________________________________________________________
def Print(self):
print( "[ConfigManager]: home = %s" %(self.home) )
print( "[ConfigManager]: config = %s" %(self.config) )
print( "[ConfigManager]: fpga file name = %s" %(self.fpga) )
print( "[ConfigManager]: func gen file name = %s" %(self.func_gen) )
print( "[ConfigManager]: adc file name = %s" %(self.adc) )
print( "[ConfigManager]: util file name = %s" %(self.util) )
print( "[ConfigManager]: comments file name = %s" %(self.com) )
print( "[ConfigManager]: src directory = %s" %(self.src_dir) )
print( "[ConfigManager]: tgt directory = %s" %(self.tgt_dir) )
#__________________________________________________________________
def CreateSymLinks(self):
# create symbolic links to the current configuration
cd_input = "cd " + self.home + "/input/"
cd_home = "cd " + self.home
symlink = "ln -s "
# define the source files
fpga_src = self.home + self.src_dir + self.fpga + "_" + self.config + ".dat"
fg_src = self.home + self.src_dir + self.func_gen + "_" + self.config + ".dat"
adc_src = self.home + self.src_dir + self.adc + "_" + self.config + ".dat"
util_src = self.home + self.src_dir + self.util + "_" + self.config + ".dat"
com_src = self.home + self.src_dir + self.com + "_" + self.config + ".txt"
# define targets
fpga_tgt = self.home + self.tgt_dir + self.fpga + ".dat"
fg_tgt = self.home + self.tgt_dir + self.func_gen + ".dat"
adc_tgt = self.home + self.tgt_dir + self.adc + ".dat"
util_tgt = self.home + self.tgt_dir + self.util + ".dat"
com_tgt = self.home + self.tgt_dir + self.com + ".txt"
# remove commands
rm_fpga = "rm " + fpga_tgt
rm_fg = "rm " + fg_tgt
rm_adc = "rm " + adc_tgt
rm_util = "rm " + util_tgt
rm_com = "rm " + com_tgt
# check for existing files
fpga_is_alive = os.path.islink(fpga_tgt)
fg_is_alive = os.path.islink(fg_tgt)
adc_is_alive = os.path.islink(adc_tgt)
util_is_alive = os.path.islink(util_tgt)
com_is_alive = os.path.islink(com_tgt)
# delete current symbolic links if necessary
if fpga_is_alive: os.remove( fpga_tgt )
if fg_is_alive: os.remove( fg_tgt )
if adc_is_alive: os.remove( adc_tgt )
if util_is_alive: os.remove( util_tgt )
if com_is_alive: os.remove( com_tgt )
# define the commands
fpga_cmd = symlink + fpga_src + " " + fpga_tgt # symbolic link for FPGA
fg_cmd = symlink + fg_src + " " + fg_tgt # symbolic link for function generator
adc_cmd = symlink + adc_src + " " + adc_tgt # symbolic link for ADC
util_cmd = symlink + util_src + " " + util_tgt # symbolic link for utilities
com_cmd = symlink + com_src + " " + com_tgt # symbolic link for comments
# print fpga_cmd
# print fg_cmd
# print adc_cmd
# print util_cmd
# print com_cmd
# symbolically link files
os.system(cd_input)
os.system(fpga_cmd)
os.system(fg_cmd)
os.system(adc_cmd)
os.system(util_cmd)
os.system(com_cmd)
# check to see if commands succeeded
fpga_is_alive = os.path.islink(fpga_tgt)
fg_is_alive = os.path.islink(fg_tgt)
adc_is_alive = os.path.islink(adc_tgt)
util_is_alive = os.path.islink(util_tgt)
com_is_alive = os.path.islink(com_tgt)
if fpga_is_alive: print "[ConfigManager]: symbolic link from %s to %s created." % (fpga_src,fpga_tgt)
if fg_is_alive: print "[ConfigManager]: symbolic link from %s to %s created." % (fg_src ,fg_tgt )
if adc_is_alive: print "[ConfigManager]: symbolic link from %s to %s created." % (adc_src ,adc_tgt )
if util_is_alive: print "[ConfigManager]: symbolic link from %s to %s created." % (util_src,util_tgt)
if com_is_alive: print "[ConfigManager]: symbolic link from %s to %s created." % (com_src ,com_tgt )
# cd back to main dir
os.system(cd_home)
#__________________________________________________________________
|
py | b4173aa5c5adf010993bdae8e19855e2bcfbb5db | # Copyright 2015 OpenStack Foundation
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
#
"""Add vlan transparent property to network
Revision ID: bebba223288
Revises: 43763a9618fd
Create Date: 2015-02-04 18:07:29.670554
"""
# revision identifiers, used by Alembic.
revision = 'bebba223288'
down_revision = '43763a9618fd'
from alembic import op
import sqlalchemy as sa
def upgrade():
op.add_column('networks', sa.Column('vlan_transparent', sa.Boolean(),
nullable=True))
def downgrade():
op.drop_column('networks', 'vlan_transparent')
|
py | b4173d1e716c57e141d3b53701e9ed4bb591a0c6 | from __future__ import with_statement, absolute_import
import time
from contextlib import closing
import psycopg2
from . import print_row_progress, status_logger
from .postgres_writer import PostgresWriter
class PostgresDbWriter(PostgresWriter):
"""Class used to stream DDL and/or data
from a MySQL server to a PostgreSQL.
:Parameters:
- `db_options`: :py:obj:`dict` containing connection specific variables
- `verbose`: whether or not to log progress to :py:obj:`stdout`
"""
class FileObjFaker(object):
"""A file-like class to support streaming
table data directly to :py:meth:`pscopg2.copy_from`.
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
- `data`:
- `processor`:
- `verbose`: whether or not to log progress to :py:obj:`stdout`
"""
def __init__(self, table, data, processor, verbose=False):
self.data = iter(data)
self.table = table
self.processor = processor
self.verbose = verbose
if verbose:
self.idx = 1
self.start_time = time.time()
self.prev_val_len = 0
self.prev_idx = 0
def readline(self, *args, **kwargs):
try:
row = list(self.data.next())
except StopIteration:
if self.verbose:
print('')
return ''
else:
self.processor(self.table, row)
try:
return '%s\n' % ('\t'.join(row))
except UnicodeDecodeError:
return '%s\n' % ('\t'.join(r.decode('utf8') for r in row))
finally:
if self.verbose:
if (self.idx % 20000) == 0:
now = time.time()
elapsed = now - self.start_time
val = '%.2f rows/sec [%s] ' % ((self.idx - self.prev_idx) / elapsed, self.idx)
print_row_progress('%s%s' % (("\b" * self.prev_val_len), val)),
self.prev_val_len = len(val) + 3
self.start_time = now
self.prev_idx = self.idx + 0
self.idx += 1
def read(self, *args, **kwargs):
return self.readline(*args, **kwargs)
def __init__(self, db_options, verbose=False, *args, **kwargs):
super(PostgresDbWriter, self).__init__(*args, **kwargs)
self.verbose = verbose
self.db_options = {
'host': str(db_options['hostname']),
'port': db_options.get('port', 5432),
'database': str(db_options['database']),
'password': str(db_options.get('password', None)) or '',
'user': str(db_options['username']),
}
if 'sslmode' in db_options:
self.db_options['sslmode'] = str(db_options['sslmode'])
if 'sslrootcert' in db_options:
self.db_options['sslrootcert'] = str(db_options['sslrootcert'])
if ':' in str(db_options['database']):
self.db_options['database'], self.schema = self.db_options['database'].split(':')
else:
self.schema = None
self.open()
def open(self):
self.conn = psycopg2.connect(**self.db_options)
with closing(self.conn.cursor()) as cur:
if self.schema:
cur.execute('SET search_path TO %s' % self.schema)
cur.execute('SET client_encoding = \'UTF8\'')
if self.conn.server_version >= 80200:
cur.execute('SET standard_conforming_strings = off')
cur.execute('SET check_function_bodies = false')
cur.execute('SET client_min_messages = warning')
def query(self, sql, args=(), one=False):
with closing(self.conn.cursor()) as cur:
cur.execute(sql, args)
return cur.fetchone() if one else cur
def execute(self, sql, args=(), many=False):
with closing(self.conn.cursor()) as cur:
print(sql + '\n')
if many:
cur.executemany(sql, args)
else:
cur.execute(sql, args)
self.conn.commit()
def copy_from(self, file_obj, table_name, columns):
with closing(self.conn.cursor()) as cur:
cur.copy_from(file_obj,
table=table_name,
columns=columns
)
self.conn.commit()
def close(self):
"""Closes connection to the PostgreSQL server"""
self.conn.close()
def exists(self, relname):
rc = self.query('SELECT COUNT(!) FROM pg_class WHERE relname = %s', (relname, ), one=True)
return rc and int(rc[0]) == 1
@status_logger
def truncate(self, table):
"""Send DDL to truncate the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
truncate_sql, serial_key_sql = super(PostgresDbWriter, self).truncate(table)
self.execute(truncate_sql)
if serial_key_sql:
self.execute(serial_key_sql)
@status_logger
def write_table(self, table):
"""Send DDL to create the specified `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
table_sql, serial_key_sql = super(PostgresDbWriter, self).write_table(table)
for sql in serial_key_sql + table_sql:
self.execute(sql)
@status_logger
def write_indexes(self, table):
"""Send DDL to create the specified `table` indexes
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
index_sql = super(PostgresDbWriter, self).write_indexes(table)
for sql in index_sql:
self.execute(sql)
@status_logger
def write_triggers(self, table):
"""Send DDL to create the specified `table` triggers
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
index_sql = super(PostgresDbWriter, self).write_triggers(table)
for sql in index_sql:
self.execute(sql)
@status_logger
def write_constraints(self, table):
"""Send DDL to create the specified `table` constraints
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
Returns None
"""
constraint_sql = super(PostgresDbWriter, self).write_constraints(table)
for sql in constraint_sql:
self.execute(sql)
@status_logger
def write_contents(self, table, reader):
"""Write the contents of `table`
:Parameters:
- `table`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader.Table` object that represents the table to read/write.
- `reader`: an instance of a :py:class:`mysql2pgsql.lib.mysql_reader.MysqlReader` object that allows reading from the data source.
Returns None
"""
f = self.FileObjFaker(table, reader.read(table), self.process_row, self.verbose)
self.copy_from(f, '"%s"' % table.name, ['"%s"' % c['name'] for c in table.columns])
|
py | b4173e43cdf5dd85c77bed2e7098838ac49a5cc9 | # Copyright 2020 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Contains definitions of Residual Networks.
Residual networks (ResNets) were proposed in:
[1] Kaiming He, Xiangyu Zhang, Shaoqing Ren, Jian Sun
Deep Residual Learning for Image Recognition. arXiv:1512.03385
"""
# Import libraries
import tensorflow as tf
from official.modeling import tf_utils
from official.vision.beta.modeling.layers import nn_blocks
layers = tf.keras.layers
# Specifications for different ResNet variants.
# Each entry specifies block configurations of the particular ResNet variant.
# Each element in the block configuration is in the following format:
# (block_fn, num_filters, block_repeats)
RESNET_SPECS = {
18: [
('residual', 64, 2),
('residual', 128, 2),
('residual', 256, 2),
('residual', 512, 2),
],
34: [
('residual', 64, 3),
('residual', 128, 4),
('residual', 256, 6),
('residual', 512, 3),
],
50: [
('bottleneck', 64, 3),
('bottleneck', 128, 4),
('bottleneck', 256, 6),
('bottleneck', 512, 3),
],
101: [
('bottleneck', 64, 3),
('bottleneck', 128, 4),
('bottleneck', 256, 23),
('bottleneck', 512, 3),
],
152: [
('bottleneck', 64, 3),
('bottleneck', 128, 8),
('bottleneck', 256, 36),
('bottleneck', 512, 3),
],
200: [
('bottleneck', 64, 3),
('bottleneck', 128, 24),
('bottleneck', 256, 36),
('bottleneck', 512, 3),
],
}
@tf.keras.utils.register_keras_serializable(package='Vision')
class ResNet(tf.keras.Model):
"""Class to build ResNet family model."""
def __init__(self,
model_id,
input_specs=layers.InputSpec(shape=[None, None, None, 3]),
activation='relu',
use_sync_bn=False,
norm_momentum=0.99,
norm_epsilon=0.001,
kernel_initializer='VarianceScaling',
kernel_regularizer=None,
bias_regularizer=None,
**kwargs):
"""ResNet initialization function.
Args:
model_id: `int` depth of ResNet backbone model.
input_specs: `tf.keras.layers.InputSpec` specs of the input tensor.
activation: `str` name of the activation function.
use_sync_bn: if True, use synchronized batch normalization.
norm_momentum: `float` normalization omentum for the moving average.
norm_epsilon: `float` small float added to variance to avoid dividing by
zero.
kernel_initializer: kernel_initializer for convolutional layers.
kernel_regularizer: tf.keras.regularizers.Regularizer object for Conv2D.
Default to None.
bias_regularizer: tf.keras.regularizers.Regularizer object for Conv2d.
Default to None.
**kwargs: keyword arguments to be passed.
"""
self._model_id = model_id
self._input_specs = input_specs
self._use_sync_bn = use_sync_bn
self._activation = activation
self._norm_momentum = norm_momentum
self._norm_epsilon = norm_epsilon
if use_sync_bn:
self._norm = layers.experimental.SyncBatchNormalization
else:
self._norm = layers.BatchNormalization
self._kernel_initializer = kernel_initializer
self._kernel_regularizer = kernel_regularizer
self._bias_regularizer = bias_regularizer
if tf.keras.backend.image_data_format() == 'channels_last':
bn_axis = -1
else:
bn_axis = 1
# Build ResNet.
inputs = tf.keras.Input(shape=input_specs.shape[1:])
x = layers.Conv2D(
filters=64, kernel_size=7, strides=2, use_bias=False, padding='same',
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer)(
inputs)
x = self._norm(
axis=bn_axis, momentum=norm_momentum, epsilon=norm_epsilon)(
x)
x = tf_utils.get_activation(activation)(x)
x = layers.MaxPool2D(pool_size=3, strides=2, padding='same')(x)
# TODO(xianzhi): keep a list of blocks to make blocks accessible.
endpoints = {}
for i, spec in enumerate(RESNET_SPECS[model_id]):
if spec[0] == 'residual':
block_fn = nn_blocks.ResidualBlock
elif spec[0] == 'bottleneck':
block_fn = nn_blocks.BottleneckBlock
else:
raise ValueError('Block fn `{}` is not supported.'.format(spec[0]))
x = self._block_group(
inputs=x,
filters=spec[1],
strides=(1 if i == 0 else 2),
block_fn=block_fn,
block_repeats=spec[2],
name='block_group_l{}'.format(i + 2))
endpoints[str(i + 2)] = x
self._output_specs = {l: endpoints[l].get_shape() for l in endpoints}
super(ResNet, self).__init__(inputs=inputs, outputs=endpoints, **kwargs)
def _block_group(self,
inputs,
filters,
strides,
block_fn,
block_repeats=1,
name='block_group'):
"""Creates one group of blocks for the ResNet model.
Args:
inputs: `Tensor` of size `[batch, channels, height, width]`.
filters: `int` number of filters for the first convolution of the layer.
strides: `int` stride to use for the first convolution of the layer. If
greater than 1, this layer will downsample the input.
block_fn: Either `nn_blocks.ResidualBlock` or `nn_blocks.BottleneckBlock`.
block_repeats: `int` number of blocks contained in the layer.
name: `str`name for the block.
Returns:
The output `Tensor` of the block layer.
"""
x = block_fn(
filters=filters,
strides=strides,
use_projection=True,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
inputs)
for _ in range(1, block_repeats):
x = block_fn(
filters=filters,
strides=1,
use_projection=False,
kernel_initializer=self._kernel_initializer,
kernel_regularizer=self._kernel_regularizer,
bias_regularizer=self._bias_regularizer,
activation=self._activation,
use_sync_bn=self._use_sync_bn,
norm_momentum=self._norm_momentum,
norm_epsilon=self._norm_epsilon)(
x)
return tf.identity(x, name=name)
def get_config(self):
config_dict = {
'model_id': self._model_id,
'activation': self._activation,
'use_sync_bn': self._use_sync_bn,
'norm_momentum': self._norm_momentum,
'norm_epsilon': self._norm_epsilon,
'kernel_initializer': self._kernel_initializer,
'kernel_regularizer': self._kernel_regularizer,
'bias_regularizer': self._bias_regularizer,
}
return config_dict
@classmethod
def from_config(cls, config, custom_objects=None):
return cls(**config)
@property
def output_specs(self):
"""A dict of {level: TensorShape} pairs for the model output."""
return self._output_specs
|
py | b4173e7572d392e4ba89be7f819691071da2c5d8 | import wx
from Pages import TodayPage, HistoryPage, StatisticsPage
class MainWindow(wx.Frame):
def __init__(self, *args, **kwargs):
super(MainWindow, self).__init__(*args, **kwargs)
self.setupUi()
self.Show()
def setupUi(self):
# Set pages
self.nb = wx.Notebook(self)
todayPage = TodayPage(self.nb)
historyPage = HistoryPage(self.nb)
statisticsPage = StatisticsPage(self.nb)
self.nb.AddPage(todayPage, "今日對獎")
self.nb.AddPage(historyPage, "歷史資料")
self.nb.AddPage(statisticsPage, "統計分析")
# 外觀設置
self.SetSize((1000, 600))
self.SetTitle("Lotto1224")
self.Centre()
todayPage.fetchData()
todayPage.showResult() |
py | b4173f0db076406907cde512a8cbf0fbd04f2abe | import httplib
from core.actionModule import actionModule
from core.keystore import KeyStore as kb
from core.utils import Utils
class scan_httpoptions(actionModule):
def __init__(self, config, display, lock):
super(scan_httpoptions, self).__init__(config, display, lock)
self.title = "Get HTTP Options"
self.shortName = "httpOptions"
self.description = "issue [OPTIONS / HTTP/1.0] to each web server"
self.requirements = []
self.triggers = ["newService_http", "newService_https", "newPort_tcp_80", "newPort_tcp_443"]
self.types = ["http"]
self.safeLevel = 5
def getTargets(self):
# we are interested in all hosts
self.targets = kb.get('service/http', 'service/https')
def processTarget(self, t, port):
if not self.seentarget(t + str(port)):
self.addseentarget(t + str(port))
self.display.verbose(self.shortName + " - Connecting to " + t)
try:
conn = httplib.HTTPConnection(t, port, timeout=10)
conn.request('OPTIONS', '/')
response = conn.getresponse()
text = ""
allowed = response.getheader('allow')
outfile = self.config["proofsDir"] + self.shortName + "_" + t + "_" + str(
port) + "_" + Utils.getRandStr(10)
if (allowed):
badoptions = ['PUT', 'DELETE', 'TRACE', 'TRACK']
for badopt in badoptions:
if (allowed.contains(badopt)):
self.fire("httpOption" + badopt)
self.addVuln(t, "httpOption" + badopt,
{"port": str(port), "output": outfile.replace("/", "%2F")})
self.display.error("VULN [httpOption%s] Found on [%s:%i]" % (badopt, host, int(port)))
text = "Allowed HTTP Options for %s : %s\n\nFull Headers:\n%s" % (
t, allowed, self.print_dict(response.getheaders()))
else:
text = "Allowed HTTP Options for %s : OPTIONS VERB NOT ALLOWED\n\nFull Headers:\n%s" % (
t, self.print_dict(response.getheaders()))
Utils.writeFile(text, outfile)
except httplib.BadStatusLine:
pass
# except socket.error as e:
except:
pass
def process(self):
# load any targets we are interested in
self.getTargets()
# loop over each target
for t in self.targets:
# verify we have not tested this host before
ports = kb.get('service/http/' + t + '/tcp', 'service/https/' + t + '/tcp')
for port in ports:
self.processTarget(t, port)
for hostname in self.getHostnames(t):
self.processTarget(hostname, port)
return
|
py | b4173f1aa3c87c769dda5b7d4911a0fc86d7ef20 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.index, name='index'),
url(r'^delete-book/(?P<book_id>[0-9]+)$', views.delete, name='delete'),
url(r'^create-book$', views.create, name='create'),
]
|
py | b4174108e752d12c5719809d8d7efbc0790daadf | """
This module is an example of a barebones QWidget plugin for napari
It implements the ``napari_experimental_provide_dock_widget`` hook specification.
see: https://napari.org/docs/dev/plugins/hook_specifications.html
Replace code below according to your needs.
"""
from napari_plugin_engine import napari_hook_implementation
from qtpy.QtWidgets import QWidget, QHBoxLayout, QPushButton
class MyWidget(QWidget):
# your QWidget.__init__ can optionally request the napari viewer instance
# in one of two ways:
# 1. use a parameter called `napari_viewer`, as done here
# 2. use a type annotation of 'napari.viewer.Viewer' for any parameter
def __init__(self, napari_viewer):
super().__init__()
self.viewer = napari_viewer
btn = QPushButton("Click me!")
btn.clicked.connect(self._on_click)
self.setLayout(QHBoxLayout())
self.layout().addWidget(btn)
def _on_click(self):
print("napari has", len(self.viewer.layers), "layers")
@napari_hook_implementation
def napari_experimental_provide_dock_widget():
return MyWidget
|
py | b417421a2917ce077450dafac8dd0b46ab8d3012 | """
Benchmarking tasks
"""
from __future__ import print_function
import os
import sys
from invoke import task
from .build_tags import get_default_build_tags
from .utils import bin_name
from .utils import get_git_branch_name
from .utils import REPO_PATH
# constants
BENCHMARKS_BIN_PATH = os.path.join(".", "bin", "benchmarks")
@task
def build_aggregator(ctx, rebuild=False):
"""
Build the Aggregator benchmarks.
"""
build_tags = get_default_build_tags() # pass all the build flags
ldflags = ""
gcflags = ""
if os.environ.get("DELVE"):
gcflags = "-N -l"
if sys.platform == 'win32':
# On windows, need to build with the extra argument -ldflags="-linkmode internal"
# if you want to be able to use the delve debugger.
ldflags += " -linkmode internal"
cmd = "go build -mod={go_mod} {build_type} -tags \"{build_tags}\" -o {bin_name} "
cmd += "{ldflags} {gcflags} {REPO_PATH}/test/benchmarks/aggregator"
args = {
"go_mod": "vendor",
"build_type": "-a" if rebuild else "",
"build_tags": " ".join(build_tags),
"bin_name": os.path.join(BENCHMARKS_BIN_PATH, bin_name("aggregator")),
"ldflags": ldflags,
"gcflags": gcflags,
"REPO_PATH": REPO_PATH
}
ctx.run(cmd.format(**args))
@task
def build_dogstatsd(ctx):
"""
Build Dogstatsd benchmarks.
"""
build_tags = get_default_build_tags() # pass all the build flags
cmd = "go build -mod={go_mod} -tags \"{build_tags}\" -o {bin_name} {REPO_PATH}/test/benchmarks/dogstatsd"
args = {
"go_mod": "vendor",
"build_tags": " ".join(build_tags),
"bin_name": os.path.join(BENCHMARKS_BIN_PATH, bin_name("dogstatsd")),
"REPO_PATH": REPO_PATH,
}
ctx.run(cmd.format(**args))
@task(pre=[build_dogstatsd])
def dogstastd(ctx):
"""
Run Dogstatsd Benchmarks.
"""
bin_path = os.path.join(BENCHMARKS_BIN_PATH, bin_name("dogstatsd"))
branch_name = os.environ.get("DD_REPO_BRANCH_NAME") or get_git_branch_name()
options = "-branch {}".format(branch_name)
key = os.environ.get("DD_AGENT_API_KEY")
if key:
options += " -api-key {}".format(key)
ctx.run("{} -pps=5000 -dur 45 -ser 5 -brk -inc 1000 {}".format(bin_path, options))
@task(pre=[build_aggregator])
def aggregator(ctx):
"""
Run the Aggregator Benchmarks.
"""
bin_path = os.path.join(BENCHMARKS_BIN_PATH, bin_name("aggregator"))
branch_name = os.environ.get("DD_REPO_BRANCH_NAME") or get_git_branch_name()
options = "-branch {}".format(branch_name)
key = os.environ.get("DD_AGENT_API_KEY")
if key:
options += " -api-key {}".format(key)
ctx.run("{} -points 2,10,100,500,1000 -series 10,100,1000 -log-level info -json {}".format(bin_path, options))
ctx.run("{} -points 2,10,100,500,1000 -series 10,100,1000 -log-level info -json -memory -duration 10 {}".format(bin_path, options))
|
py | b41742601a17b50c6dd2dde1c366c46624f889b1 | from typing import Dict, Optional
import numpy as np
def negamax_alpha_beta_pruned(
board: np.ndarray,
player: int,
alpha: np.float,
beta: np.float,
size: int = 3
) -> Dict[str, int]:
"""
Simple implementation of the negamax (minimax) algorithm for the tic-tac-toe game. Includes an improvement
of alpha-beta pruning.
See tests for example usage.
:param board: current state of the board
:param player: the player to make a move (can be 1 or -1)
:param alpha: the minimum score that the maximizing player is assured of
:param beta: the maximum score that the minimizing player is assured of
:param size: size of the board
:return: dict with results for score and move; the score is given from the perspective of the player who is about
to play (so score == 1 when player == -1 means that player "-1" won)
"""
winner = get_winner(board)
if winner:
return {'score': winner * player, 'move': None}
elif check_if_board_is_full(board, size):
return {'score': 0, 'move': None}
best_score = -np.inf
for move in range(size**2):
row = move // size
col = move % size
if board[row, col] == 0:
copied_board = board.copy()
copied_board[row, col] = player
result = negamax_alpha_beta_pruned(copied_board, -player, -beta, -alpha)
score = -result['score']
if score > best_score:
best_score = score
best_move = (row, col)
alpha = max(alpha, score)
if alpha >= beta:
break
return {'score': best_score, 'move': best_move}
def negamax(board: np.ndarray, player: int, size: int = 3) -> Dict[str, int]:
"""
Simple implementation of the negamax (minimax) algorithm for the tic-tac-toe game.
See tests for example usage.
:param board: current state of the board
:param player: the player to make a move (can be 1 or -1)
:param size: size of the board
:return: dict with results for score and move; the score is given from the perspective of the player who is about
to play (so score == 1 when player == -1 means that player "-1" won)
"""
winner = get_winner(board)
if winner:
return {'score': winner * player, 'move': None}
elif check_if_board_is_full(board, size):
return {'score': 0, 'move': None}
best_score = -np.inf
for move in range(size**2):
row = move // size
col = move % size
if board[row, col] == 0:
copied_board = board.copy()
copied_board[row, col] = player
result = negamax(copied_board, -player)
score = -result['score']
if score > best_score:
best_score = score
best_move = (row, col)
return {'score': best_score, 'move': best_move}
def check_if_board_is_full(board: np.ndarray, size: int) -> bool:
return np.count_nonzero(board) == size**2
def get_winner(board: np.ndarray) -> Optional[int]:
for player in [-1, 1]:
for i in range(3):
if board[i, :].tolist().count(player) == 3 or board[:, i].tolist().count(player) == 3:
return player
if board[0, 0] == board[1, 1] == board[2, 2] == player or board[0, 2] == board[1, 1] == board[2, 0] == player:
return player
|
Subsets and Splits