filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_15174 | """Tests for asyncio/sslproto.py."""
import logging
import socket
from test import support
import unittest
import weakref
from unittest import mock
try:
import ssl
except ImportError:
ssl = None
import asyncio
from asyncio import log
from asyncio import protocols
from asyncio import sslproto
from test import support
from test.test_asyncio import utils as test_utils
from test.test_asyncio import functional as func_tests
def tearDownModule():
asyncio.set_event_loop_policy(None)
@unittest.skipIf(ssl is None, 'No ssl module')
class SslProtoHandshakeTests(test_utils.TestCase):
def setUp(self):
super().setUp()
self.loop = asyncio.new_event_loop()
self.set_event_loop(self.loop)
def ssl_protocol(self, *, waiter=None, proto=None):
sslcontext = test_utils.dummy_ssl_context()
if proto is None: # app protocol
proto = asyncio.Protocol()
ssl_proto = sslproto.SSLProtocol(self.loop, proto, sslcontext, waiter,
ssl_handshake_timeout=0.1)
self.assertIs(ssl_proto._app_transport.get_protocol(), proto)
self.addCleanup(ssl_proto._app_transport.close)
return ssl_proto
def connection_made(self, ssl_proto, *, do_handshake=None):
transport = mock.Mock()
sslpipe = mock.Mock()
sslpipe.shutdown.return_value = b''
if do_handshake:
sslpipe.do_handshake.side_effect = do_handshake
else:
def mock_handshake(callback):
return []
sslpipe.do_handshake.side_effect = mock_handshake
with mock.patch('asyncio.sslproto._SSLPipe', return_value=sslpipe):
ssl_proto.connection_made(transport)
return transport
def test_handshake_timeout_zero(self):
sslcontext = test_utils.dummy_ssl_context()
app_proto = mock.Mock()
waiter = mock.Mock()
with self.assertRaisesRegex(ValueError, 'a positive number'):
sslproto.SSLProtocol(self.loop, app_proto, sslcontext, waiter,
ssl_handshake_timeout=0)
def test_handshake_timeout_negative(self):
sslcontext = test_utils.dummy_ssl_context()
app_proto = mock.Mock()
waiter = mock.Mock()
with self.assertRaisesRegex(ValueError, 'a positive number'):
sslproto.SSLProtocol(self.loop, app_proto, sslcontext, waiter,
ssl_handshake_timeout=-10)
def test_eof_received_waiter(self):
waiter = self.loop.create_future()
ssl_proto = self.ssl_protocol(waiter=waiter)
self.connection_made(ssl_proto)
ssl_proto.eof_received()
test_utils.run_briefly(self.loop)
self.assertIsInstance(waiter.exception(), ConnectionResetError)
def test_fatal_error_no_name_error(self):
# From issue #363.
# _fatal_error() generates a NameError if sslproto.py
# does not import base_events.
waiter = self.loop.create_future()
ssl_proto = self.ssl_protocol(waiter=waiter)
# Temporarily turn off error logging so as not to spoil test output.
log_level = log.logger.getEffectiveLevel()
log.logger.setLevel(logging.FATAL)
try:
ssl_proto._fatal_error(None)
finally:
# Restore error logging.
log.logger.setLevel(log_level)
def test_connection_lost(self):
# From issue #472.
# yield from waiter hang if lost_connection was called.
waiter = self.loop.create_future()
ssl_proto = self.ssl_protocol(waiter=waiter)
self.connection_made(ssl_proto)
ssl_proto.connection_lost(ConnectionAbortedError)
test_utils.run_briefly(self.loop)
self.assertIsInstance(waiter.exception(), ConnectionAbortedError)
def test_close_during_handshake(self):
# bpo-29743 Closing transport during handshake process leaks socket
waiter = self.loop.create_future()
ssl_proto = self.ssl_protocol(waiter=waiter)
transport = self.connection_made(ssl_proto)
test_utils.run_briefly(self.loop)
ssl_proto._app_transport.close()
self.assertTrue(transport.abort.called)
def test_get_extra_info_on_closed_connection(self):
waiter = self.loop.create_future()
ssl_proto = self.ssl_protocol(waiter=waiter)
self.assertIsNone(ssl_proto._get_extra_info('socket'))
default = object()
self.assertIs(ssl_proto._get_extra_info('socket', default), default)
self.connection_made(ssl_proto)
self.assertIsNotNone(ssl_proto._get_extra_info('socket'))
ssl_proto.connection_lost(None)
self.assertIsNone(ssl_proto._get_extra_info('socket'))
def test_set_new_app_protocol(self):
waiter = self.loop.create_future()
ssl_proto = self.ssl_protocol(waiter=waiter)
new_app_proto = asyncio.Protocol()
ssl_proto._app_transport.set_protocol(new_app_proto)
self.assertIs(ssl_proto._app_transport.get_protocol(), new_app_proto)
self.assertIs(ssl_proto._app_protocol, new_app_proto)
def test_data_received_after_closing(self):
ssl_proto = self.ssl_protocol()
self.connection_made(ssl_proto)
transp = ssl_proto._app_transport
transp.close()
# should not raise
self.assertIsNone(ssl_proto.data_received(b'data'))
def test_write_after_closing(self):
ssl_proto = self.ssl_protocol()
self.connection_made(ssl_proto)
transp = ssl_proto._app_transport
transp.close()
# should not raise
self.assertIsNone(transp.write(b'data'))
##############################################################################
# Start TLS Tests
##############################################################################
class BaseStartTLS(func_tests.FunctionalTestCaseMixin):
PAYLOAD_SIZE = 1024 * 100
TIMEOUT = support.LONG_TIMEOUT
def new_loop(self):
raise NotImplementedError
def test_buf_feed_data(self):
class Proto(asyncio.BufferedProtocol):
def __init__(self, bufsize, usemv):
self.buf = bytearray(bufsize)
self.mv = memoryview(self.buf)
self.data = b''
self.usemv = usemv
def get_buffer(self, sizehint):
if self.usemv:
return self.mv
else:
return self.buf
def buffer_updated(self, nsize):
if self.usemv:
self.data += self.mv[:nsize]
else:
self.data += self.buf[:nsize]
for usemv in [False, True]:
proto = Proto(1, usemv)
protocols._feed_data_to_buffered_proto(proto, b'12345')
self.assertEqual(proto.data, b'12345')
proto = Proto(2, usemv)
protocols._feed_data_to_buffered_proto(proto, b'12345')
self.assertEqual(proto.data, b'12345')
proto = Proto(2, usemv)
protocols._feed_data_to_buffered_proto(proto, b'1234')
self.assertEqual(proto.data, b'1234')
proto = Proto(4, usemv)
protocols._feed_data_to_buffered_proto(proto, b'1234')
self.assertEqual(proto.data, b'1234')
proto = Proto(100, usemv)
protocols._feed_data_to_buffered_proto(proto, b'12345')
self.assertEqual(proto.data, b'12345')
proto = Proto(0, usemv)
with self.assertRaisesRegex(RuntimeError, 'empty buffer'):
protocols._feed_data_to_buffered_proto(proto, b'12345')
def test_start_tls_client_reg_proto_1(self):
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
def serve(sock):
sock.settimeout(self.TIMEOUT)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.start_tls(server_context, server_side=True)
sock.sendall(b'O')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.shutdown(socket.SHUT_RDWR)
sock.close()
class ClientProto(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(proto, tr):
proto.con_made_cnt += 1
# Ensure connection_made gets called only once.
self.assertEqual(proto.con_made_cnt, 1)
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProto(on_data, on_eof), *addr)
tr.write(HELLO_MSG)
new_tr = await self.loop.start_tls(tr, proto, client_context)
self.assertEqual(await on_data, b'O')
new_tr.write(HELLO_MSG)
await on_eof
new_tr.close()
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr),
timeout=support.SHORT_TIMEOUT))
# No garbage is left if SSL is closed uncleanly
client_context = weakref.ref(client_context)
self.assertIsNone(client_context())
def test_create_connection_memory_leak(self):
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
def serve(sock):
sock.settimeout(self.TIMEOUT)
sock.start_tls(server_context, server_side=True)
sock.sendall(b'O')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.shutdown(socket.SHUT_RDWR)
sock.close()
class ClientProto(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(proto, tr):
# XXX: We assume user stores the transport in protocol
proto.tr = tr
proto.con_made_cnt += 1
# Ensure connection_made gets called only once.
self.assertEqual(proto.con_made_cnt, 1)
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProto(on_data, on_eof), *addr,
ssl=client_context)
self.assertEqual(await on_data, b'O')
tr.write(HELLO_MSG)
await on_eof
tr.close()
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr),
timeout=support.SHORT_TIMEOUT))
# No garbage is left for SSL client from loop.create_connection, even
# if user stores the SSLTransport in corresponding protocol instance
client_context = weakref.ref(client_context)
self.assertIsNone(client_context())
def test_start_tls_client_buf_proto_1(self):
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
client_con_made_calls = 0
def serve(sock):
sock.settimeout(self.TIMEOUT)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.start_tls(server_context, server_side=True)
sock.sendall(b'O')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.sendall(b'2')
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.shutdown(socket.SHUT_RDWR)
sock.close()
class ClientProtoFirst(asyncio.BufferedProtocol):
def __init__(self, on_data):
self.on_data = on_data
self.buf = bytearray(1)
def connection_made(self, tr):
nonlocal client_con_made_calls
client_con_made_calls += 1
def get_buffer(self, sizehint):
return self.buf
def buffer_updated(self, nsize):
assert nsize == 1
self.on_data.set_result(bytes(self.buf[:nsize]))
class ClientProtoSecond(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(self, tr):
nonlocal client_con_made_calls
client_con_made_calls += 1
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data1 = self.loop.create_future()
on_data2 = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProtoFirst(on_data1), *addr)
tr.write(HELLO_MSG)
new_tr = await self.loop.start_tls(tr, proto, client_context)
self.assertEqual(await on_data1, b'O')
new_tr.write(HELLO_MSG)
new_tr.set_protocol(ClientProtoSecond(on_data2, on_eof))
self.assertEqual(await on_data2, b'2')
new_tr.write(HELLO_MSG)
await on_eof
new_tr.close()
# connection_made() should be called only once -- when
# we establish connection for the first time. Start TLS
# doesn't call connection_made() on application protocols.
self.assertEqual(client_con_made_calls, 1)
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr),
timeout=self.TIMEOUT))
def test_start_tls_slow_client_cancel(self):
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
client_context = test_utils.simple_client_sslcontext()
server_waits_on_handshake = self.loop.create_future()
def serve(sock):
sock.settimeout(self.TIMEOUT)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
try:
self.loop.call_soon_threadsafe(
server_waits_on_handshake.set_result, None)
data = sock.recv_all(1024 * 1024)
except ConnectionAbortedError:
pass
finally:
sock.close()
class ClientProto(asyncio.Protocol):
def __init__(self, on_data, on_eof):
self.on_data = on_data
self.on_eof = on_eof
self.con_made_cnt = 0
def connection_made(proto, tr):
proto.con_made_cnt += 1
# Ensure connection_made gets called only once.
self.assertEqual(proto.con_made_cnt, 1)
def data_received(self, data):
self.on_data.set_result(data)
def eof_received(self):
self.on_eof.set_result(True)
async def client(addr):
await asyncio.sleep(0.5)
on_data = self.loop.create_future()
on_eof = self.loop.create_future()
tr, proto = await self.loop.create_connection(
lambda: ClientProto(on_data, on_eof), *addr)
tr.write(HELLO_MSG)
await server_waits_on_handshake
with self.assertRaises(asyncio.TimeoutError):
await asyncio.wait_for(
self.loop.start_tls(tr, proto, client_context),
0.5)
with self.tcp_server(serve, timeout=self.TIMEOUT) as srv:
self.loop.run_until_complete(
asyncio.wait_for(client(srv.addr),
timeout=support.SHORT_TIMEOUT))
def test_start_tls_server_1(self):
HELLO_MSG = b'1' * self.PAYLOAD_SIZE
ANSWER = b'answer'
server_context = test_utils.simple_server_sslcontext()
client_context = test_utils.simple_client_sslcontext()
answer = None
def client(sock, addr):
nonlocal answer
sock.settimeout(self.TIMEOUT)
sock.connect(addr)
data = sock.recv_all(len(HELLO_MSG))
self.assertEqual(len(data), len(HELLO_MSG))
sock.start_tls(client_context)
sock.sendall(HELLO_MSG)
answer = sock.recv_all(len(ANSWER))
sock.close()
class ServerProto(asyncio.Protocol):
def __init__(self, on_con, on_con_lost, on_got_hello):
self.on_con = on_con
self.on_con_lost = on_con_lost
self.on_got_hello = on_got_hello
self.data = b''
self.transport = None
def connection_made(self, tr):
self.transport = tr
self.on_con.set_result(tr)
def replace_transport(self, tr):
self.transport = tr
def data_received(self, data):
self.data += data
if len(self.data) >= len(HELLO_MSG):
self.on_got_hello.set_result(None)
def connection_lost(self, exc):
self.transport = None
if exc is None:
self.on_con_lost.set_result(None)
else:
self.on_con_lost.set_exception(exc)
async def main(proto, on_con, on_con_lost, on_got_hello):
tr = await on_con
tr.write(HELLO_MSG)
self.assertEqual(proto.data, b'')
new_tr = await self.loop.start_tls(
tr, proto, server_context,
server_side=True,
ssl_handshake_timeout=self.TIMEOUT)
proto.replace_transport(new_tr)
await on_got_hello
new_tr.write(ANSWER)
await on_con_lost
self.assertEqual(proto.data, HELLO_MSG)
new_tr.close()
async def run_main():
on_con = self.loop.create_future()
on_con_lost = self.loop.create_future()
on_got_hello = self.loop.create_future()
proto = ServerProto(on_con, on_con_lost, on_got_hello)
server = await self.loop.create_server(
lambda: proto, '127.0.0.1', 0)
addr = server.sockets[0].getsockname()
with self.tcp_client(lambda sock: client(sock, addr),
timeout=self.TIMEOUT):
await asyncio.wait_for(
main(proto, on_con, on_con_lost, on_got_hello),
timeout=self.TIMEOUT)
server.close()
await server.wait_closed()
self.assertEqual(answer, ANSWER)
self.loop.run_until_complete(run_main())
def test_start_tls_wrong_args(self):
async def main():
with self.assertRaisesRegex(TypeError, 'SSLContext, got'):
await self.loop.start_tls(None, None, None)
sslctx = test_utils.simple_server_sslcontext()
with self.assertRaisesRegex(TypeError, 'is not supported'):
await self.loop.start_tls(None, None, sslctx)
self.loop.run_until_complete(main())
def test_handshake_timeout(self):
# bpo-29970: Check that a connection is aborted if handshake is not
# completed in timeout period, instead of remaining open indefinitely
client_sslctx = test_utils.simple_client_sslcontext()
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
server_side_aborted = False
def server(sock):
nonlocal server_side_aborted
try:
sock.recv_all(1024 * 1024)
except ConnectionAbortedError:
server_side_aborted = True
finally:
sock.close()
async def client(addr):
await asyncio.wait_for(
self.loop.create_connection(
asyncio.Protocol,
*addr,
ssl=client_sslctx,
server_hostname='',
ssl_handshake_timeout=support.SHORT_TIMEOUT),
0.5)
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
with self.assertRaises(asyncio.TimeoutError):
self.loop.run_until_complete(client(srv.addr))
self.assertTrue(server_side_aborted)
# Python issue #23197: cancelling a handshake must not raise an
# exception or log an error, even if the handshake failed
self.assertEqual(messages, [])
# The 10s handshake timeout should be cancelled to free related
# objects without really waiting for 10s
client_sslctx = weakref.ref(client_sslctx)
self.assertIsNone(client_sslctx())
def test_create_connection_ssl_slow_handshake(self):
client_sslctx = test_utils.simple_client_sslcontext()
messages = []
self.loop.set_exception_handler(lambda loop, ctx: messages.append(ctx))
def server(sock):
try:
sock.recv_all(1024 * 1024)
except ConnectionAbortedError:
pass
finally:
sock.close()
async def client(addr):
with self.assertWarns(DeprecationWarning):
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
loop=self.loop,
ssl_handshake_timeout=1.0)
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
with self.assertRaisesRegex(
ConnectionAbortedError,
r'SSL handshake.*is taking longer'):
self.loop.run_until_complete(client(srv.addr))
self.assertEqual(messages, [])
def test_create_connection_ssl_failed_certificate(self):
self.loop.set_exception_handler(lambda loop, ctx: None)
sslctx = test_utils.simple_server_sslcontext()
client_sslctx = test_utils.simple_client_sslcontext(
disable_verify=False)
def server(sock):
try:
sock.start_tls(
sslctx,
server_side=True)
except ssl.SSLError:
pass
except OSError:
pass
finally:
sock.close()
async def client(addr):
with self.assertWarns(DeprecationWarning):
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
loop=self.loop,
ssl_handshake_timeout=support.LOOPBACK_TIMEOUT)
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
with self.assertRaises(ssl.SSLCertVerificationError):
self.loop.run_until_complete(client(srv.addr))
def test_start_tls_client_corrupted_ssl(self):
self.loop.set_exception_handler(lambda loop, ctx: None)
sslctx = test_utils.simple_server_sslcontext()
client_sslctx = test_utils.simple_client_sslcontext()
def server(sock):
orig_sock = sock.dup()
try:
sock.start_tls(
sslctx,
server_side=True)
sock.sendall(b'A\n')
sock.recv_all(1)
orig_sock.send(b'please corrupt the SSL connection')
except ssl.SSLError:
pass
finally:
orig_sock.close()
sock.close()
async def client(addr):
with self.assertWarns(DeprecationWarning):
reader, writer = await asyncio.open_connection(
*addr,
ssl=client_sslctx,
server_hostname='',
loop=self.loop)
self.assertEqual(await reader.readline(), b'A\n')
writer.write(b'B')
with self.assertRaises(ssl.SSLError):
await reader.readline()
writer.close()
return 'OK'
with self.tcp_server(server,
max_clients=1,
backlog=1) as srv:
res = self.loop.run_until_complete(client(srv.addr))
self.assertEqual(res, 'OK')
@unittest.skipIf(ssl is None, 'No ssl module')
class SelectorStartTLSTests(BaseStartTLS, unittest.TestCase):
def new_loop(self):
return asyncio.SelectorEventLoop()
@unittest.skipIf(ssl is None, 'No ssl module')
@unittest.skipUnless(hasattr(asyncio, 'ProactorEventLoop'), 'Windows only')
class ProactorStartTLSTests(BaseStartTLS, unittest.TestCase):
def new_loop(self):
return asyncio.ProactorEventLoop()
if __name__ == '__main__':
unittest.main()
|
the-stack_0_15175 | # -*- coding: utf-8 -*-
# Copyright (c) Vispy Development Team. All Rights Reserved.
# Distributed under the (new) BSD License. See LICENSE.txt for more info.
try:
from ipywidgets.widgets import DOMWidget, register
from traitlets import Unicode, Int, Bool
except Exception as exp:
# Init dummy objects needed to import this module without errors.
# These are all overwritten with imports from IPython (on success)
DOMWidget = object
def _noop(x):
return x
register = _noop
class _MockTraitlet(object):
def __init__(self, *args, **kwargs):
pass
def tag(self, *args, **kwargs):
pass
Unicode = Int = Float = Bool = _MockTraitlet
available, testable, why_not, which = False, False, str(exp), None
else:
available, testable, why_not, which = True, False, None, None
from ....app.backends._ipynb_util import create_glir_message
from ....app import Timer
# ---------------------------------------------------------- IPython Widget ---
def _stop_timers(canvas):
"""Stop all timers in a canvas."""
for attr in dir(canvas):
try:
attr_obj = getattr(canvas, attr)
except NotImplementedError:
# This try/except is needed because canvas.position raises
# an error (it is not implemented in this backend).
attr_obj = None
if isinstance(attr_obj, Timer):
attr_obj.stop()
@register
class VispyWidget(DOMWidget):
_view_name = Unicode("VispyView").tag(sync=True)
_view_module = Unicode('vispy').tag(sync=True)
_view_module_version = Unicode('~0.2.0').tag(sync=True)
_model_name = Unicode('VispyModel').tag(sync=True)
_model_module = Unicode('vispy').tag(sync=True)
_model_module_version = Unicode('~0.2.0').tag(sync=True)
#height/width of the widget is managed by IPython.
#it's a string and can be anything valid in CSS.
#here we only manage the size of the viewport.
width = Int().tag(sync=True)
height = Int().tag(sync=True)
resizable = Bool(value=True).tag(sync=True)
def __init__(self, **kwargs):
if DOMWidget is object:
raise ImportError("'ipywidgets' must be installed to use the notebook backend.")
super(VispyWidget, self).__init__(**kwargs)
self.on_msg(self.events_received)
self.canvas = None
self.canvas_backend = None
self.gen_event = None
def set_canvas(self, canvas):
self.width, self.height = canvas._backend._default_size
self.canvas = canvas
self.canvas_backend = self.canvas._backend
self.canvas_backend.set_widget(self)
self.gen_event = self.canvas_backend._gen_event
#setup the backend widget then.
def events_received(self, widget, content, buffers):
if content['msg_type'] == 'init':
self.canvas_backend._reinit_widget()
elif content['msg_type'] == 'events':
events = content['contents']
for ev in events:
self.gen_event(ev)
elif content['msg_type'] == 'status':
if content['contents'] == 'removed':
# Stop all timers associated to the widget.
_stop_timers(self.canvas_backend._vispy_canvas)
def send_glir_commands(self, commands):
# older versions of ipython (<3.0) use base64
# array_serialization = 'base64'
array_serialization = 'binary'
msg = create_glir_message(commands, array_serialization)
msg['array_serialization'] = array_serialization
if array_serialization == 'base64':
self.send(msg)
elif array_serialization == 'binary':
# Remove the buffers from the JSON message: they will be sent
# independently via binary WebSocket.
self.send(msg, buffers=msg.pop('buffers', None))
|
the-stack_0_15176 | """
owtf.http.transaction
~~~~~~~~~~~~~~~~~~~~~
HTTP_Transaction is a container of useful HTTP Transaction information to
simplify code both in the framework and the plugins.
"""
import cgi
import logging
import io
import gzip
import zlib
import json
try:
from http.client import responses as response_messages
except ImportError:
from httplib import responses as response_messages
from cookies import Cookie, InvalidCookieError
from owtf.lib.general import derive_http_method
class HTTP_Transaction(object):
def __init__(self, timer):
self.timer = timer
self.new = False
def scope_str(self):
"""Get the scope in a string format
:return: Scope
:rtype: `str`
"""
return str(self.is_in_scope)[0]
def in_scope(self):
"""Check if the transaction is in scope
:return: True if in scope, else False
:rtype: `bool`
"""
return self.is_in_scope()
def start(self, url, data, method, is_in_scope):
"""Get attributes for a new transaction
:param url: transaction url
:type url: `str`
:param data: transaction data
:type data:
:param method:
:type method:
:param is_in_scope:
:type is_in_scope:
:return:
:rtype:
"""
self.is_in_scope = is_in_scope
self.start_request()
self.url = url
self.init_data(data)
self.method = derive_http_method(method, data)
self.found = None
self.raw_request = ''
self.response_headers = []
self.response_size = ''
self.status = ''
self.id = ''
self.html_link_id = ''
self.new = True # Flag new transaction.
def init_data(self, data):
"""Sets the data for the transaction
:param data: Data to set
:type data: `str`
:return: None
:rtype: None
"""
self.data = data
if self.data is None:
# This simplifies other code later, no need to cast to str if None, etc.
self.data = ''
def start_request(self):
"""Start timer for the request
:return: None
:rtype: None
"""
self.timer.start_timer('Request')
self.time = self.time_human = ''
def end_request(self):
"""End timer for the request
:return: None
:rtype: None
"""
self.time = self.timer.get_elapsed_time_as_str('Request')
self.time_human = self.time
self.local_timestamp = self.timer.get_current_date_time()
def set_transaction(self, found, request, response):
"""Response can be "Response" for 200 OK or "Error" for everything else, we don't care here.
:param found:
:type found:
:param request:
:type request:
:param response:
:type response:
:return:
:rtype:
"""
if self.url != response.url:
if response.code not in [302, 301]: # No way, error in hook.
# Mark as a redirect, dirty but more accurate than 200 :P
self.status = "%s Found" % str(302)
self.status += " --Redirect--> %s " % str(response.code)
self.status += response.msg
# Redirect differs in schema (i.e. https instead of http).
if self.url.split(':')[0] != response.url.split(':')[0]:
pass
self.url = response.url
else:
self.status = "%s %s" % (str(response.code), response.msg)
self.raw_request = request
self.found = found
self.response_headers = response.headers
self.response_contents = response.read()
self.check_if_compressed(response, self.response_contents)
self.end_request()
def set_transaction_from_db(self, id, url, method, status, time, time_human, local_timestamp, request_data,
raw_request, response_headers, response_size, response_body):
"""Set the transaction from the DB
:param id:
:type id:
:param url:
:type url:
:param method:
:type method:
:param status:
:type status:
:param time:
:type time:
:param time_human:
:type time_human:
:param local_timestamp:
:type local_timestamp:
:param request_data:
:type request_data:
:param raw_request:
:type raw_request:
:param response_headers:
:type response_headers:
:param response_size:
:type response_size:
:param response_body:
:type response_body:
:return:
:rtype:
"""
self.id = id
self.new = False # Flag NOT new transaction.
self.url = url
self.method = method
self.status = status
self.found = (self.status == "200 OK")
self.time = time
self.time_human = time_human
self.local_timestamp = local_timestamp
self.data = request_data
self.raw_request = raw_request
self.response_headers = response_headers
self.response_size = response_size
self.response_contents = response_body
def get_session_tokens(self):
"""Get a JSON blob of all captured cookies
:return:
:rtype:
"""
cookies = []
try: # parsing may sometimes fail
for cookie in self.cookies_list:
cookies.append(Cookie.from_string(cookie).to_dict())
except InvalidCookieError:
logging.debug("Cannot not parse the cookies")
return cookies
def set_error(self, error_message):
"""Set the error message for a transaction
:param error_message: Message to set
:type error_message: `str`
:return: None
:rtype: None
"""
# Only called for unknown errors, 404 and other HTTP stuff handled on self.SetResponse.
self.response_contents = error_message
self.end_request()
def get_id(self):
"""Get transaction ID
:return: transaction id
:rtype: `int`
"""
return self.id
def set_id(self, id, html_link_to_id):
"""Sets the transaction id and format an HTML link
:param id: transaction id
:type id: `int`
:param html_link_to_id: HTML link for the id
:type html_link_to_id: `str`
:return: None
:rtype: None
"""
self.id = id
self.html_link_id = html_link_to_id
# Only for new transactions, not when retrieved from DB, etc.
if self.new:
log = logging.getLogger('general')
log.info("New OWTF HTTP Transaction: %s",
" - ".join([self.id, self.time_human, self.status, self.method, self.url]))
def get_html_link(self, link_name=''):
"""Get the HTML link to the transaction ID
:param link_name: Name of the link
:type link_name: `str`
:return: Formatted HTML link
:rtype: `str`
"""
if '' == link_name:
link_name = "Transaction %s" % self.id
return self.html_link_id.replace('@@@PLACE_HOLDER@@@', link_name)
def get_html_link_time(self, link_name=''):
"""Get the HTML link to the transaction ID
:param link_name: Name of the link
:type link_name: `str`
:return: Formatted HTML link
:rtype: `str`
"""
return "%s (%s)" % (self.get_html_link(link_name), self.time_human)
def get_raw_escaped(self):
"""Get escaped request and response
:return: None
:rtype: None
"""
return "<pre>%s</pre>" % cgi.escape(self.get_raw())
def get_raw(self):
"""Get raw transaction request and response
:return: Raw string with response and request
:rtype: `str`
"""
return "%s\n\n%s" % (self.get_raw_request(), self.get_raw_response())
def get_raw_request(self):
"""Return raw request
:return: Raw request
:rtype: `str`
"""
return self.raw_request
def get_status(self):
"""Get status for transaction response
:return: Status
:rtype: `str`
"""
return self.status
def get_response_headers(self):
"""Get response headers for the transaction
:return:
:rtype:
"""
return self.response_headers
def get_raw_response(self, with_status=True):
"""Get the complete raw response
:param with_status: Want status?
:type with_status: `bool`
:return: Raw reponse
:rtype: `str`
"""
try:
return "%s\r\n%s\n\n%s" % (self.get_status(), str(self.response_headers), self.response_contents)
except UnicodeDecodeError:
return "%s\r\n%s\n\n[Binary Content]" % (self.get_status(), str(self.response_headers))
def get_raw_response_headers(self, with_status=True):
"""Get raw response headers for the transaction
:param with_status: Want status?
:type with_status: `bool`
:return: Raw response headers as a string
:rtype: `str`
"""
return "%s\r\n%s" % (self.get_status(), str(self.response_headers))
def get_raw_response_body(self):
"""Return raw response content
:return: Raw response body
:rtype: `str`
"""
return self.response_contents
def import_proxy_req_resp(self, request, response):
"""Import proxy request and response
:param request:
:type request:
:param response:
:type response:
:return:
:rtype:
"""
self.is_in_scope = request.in_scope
self.url = request.url
self.init_data(request.body)
self.method = request.method
try:
self.status = "%s %s" % (str(response.code), response_messages[int(response.code)])
except KeyError:
self.status = "%s Unknown Error" % str(response.code)
self.raw_request = request.raw_request
self.response_headers = response.header_string
self.response_contents = response.body
self.response_size = len(self.response_contents)
self.time = str(response.request_time)
self.time_human = self.timer.get_time_human(self.time)
self.local_timestamp = request.local_timestamp
self.found = (self.status == "200 OK")
self.cookies_list = response.cookies
self.new = True
self.id = ''
self.html_link_id = ''
def get_decode_response(self):
return self.decoded_content
def check_if_compressed(self, response, content):
if response.info().get('Content-Encoding') == 'gzip': # check for gzip compression
compressed_file = io.StringIO()
compressed_file.write(content)
compressed_file.seek(0)
f = gzip.GzipFile(fileobj=compressed_file, mode='rb')
self.decoded_content = f.read()
elif response.info().get('Content-Encoding') == 'deflate': # check for deflate compression
self.decoded_content = zlib.decompress(content)
else:
self.decoded_content = content # else the no compression
|
the-stack_0_15177 | from PyQt4 import QtGui
from models.experiment import Experiment
__author__ = 'daniel'
class ExperimentComboBox(QtGui.QComboBox):
def __init__(self, session = None, parent = None):
super(ExperimentComboBox, self).__init__(parent)
self.session = session
self.refresh_experiments()
def refresh_experiments(self):
self.clear()
self.experiments = self.session.query(Experiment).all()
for e in self.experiments:
self.addItem(e.name)
def currentItem(self):
try:
val = self.experiments[self.currentIndex()]
except Exception as e:
print(e)
return None
return val
|
the-stack_0_15178 | # -*- coding: utf-8 -*-
# Copyright 2016 Matt Martz
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import sys
try:
from urllib import urlencode
except ImportError:
from urllib.parse import urlencode
from flask import Flask
try:
from cStringIO import StringIO
except ImportError:
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
from werkzeug.wrappers import BaseRequest
__version__ = '0.0.4'
def make_environ(event):
environ = {}
for hdr_name, hdr_value in event['headers'].items():
hdr_name = hdr_name.replace('-', '_').upper()
if hdr_name in ['CONTENT_TYPE', 'CONTENT_LENGTH']:
environ[hdr_name] = hdr_value
continue
http_hdr_name = 'HTTP_%s' % hdr_name
environ[http_hdr_name] = hdr_value
qs = event['queryStringParameters']
environ['REQUEST_METHOD'] = event['httpMethod']
environ['PATH_INFO'] = event['path']
environ['QUERY_STRING'] = urlencode(qs) if qs else ''
environ['REMOTE_ADDR'] = event['requestContext']['identity']['sourceIp']
environ['HOST'] = '%(HTTP_HOST)s:%(HTTP_X_FORWARDED_PORT)s' % environ
environ['SCRIPT_NAME'] = ''
environ['SERVER_PORT'] = environ['HTTP_X_FORWARDED_PORT']
environ['SERVER_PROTOCOL'] = 'HTTP/1.1'
environ['CONTENT_LENGTH'] = str(
len(event['body']) if event['body'] else ''
)
environ['wsgi.url_scheme'] = environ['HTTP_X_FORWARDED_PROTO']
environ['wsgi.input'] = StringIO(event['body'] or '')
environ['wsgi.version'] = (1, 0)
environ['wsgi.errors'] = sys.stderr
environ['wsgi.multithread'] = False
environ['wsgi.run_once'] = True
environ['wsgi.multiprocess'] = False
BaseRequest(environ)
return environ
class LambdaResponse(object):
def __init__(self):
self.status = None
self.response_headers = None
def start_response(self, status, response_headers, exc_info=None):
self.status = int(status[:3])
self.response_headers = dict(response_headers)
class FlaskLambda(Flask):
def __call__(self, event, context):
if 'httpMethod' not in event:
# In this "context" `event` is `environ` and
# `context` is `start_response`, meaning the request didn't
# occur via API Gateway and Lambda
return super(FlaskLambda, self).__call__(event, context)
response = LambdaResponse()
body = next(self.wsgi_app(
make_environ(event),
response.start_response
))
return {
'statusCode': response.status,
'headers': response.response_headers,
'body': body
}
|
the-stack_0_15180 | """Contains the OnScreenDebug class."""
__all__ = ['OnScreenDebug']
from panda3d.core import *
from direct.gui import OnscreenText
from direct.directtools import DirectUtil
class OnScreenDebug:
enabled = ConfigVariableBool("on-screen-debug-enabled", False)
def __init__(self):
self.onScreenText = None
self.frame = 0
self.text = ""
self.data = {}
def load(self):
if self.onScreenText:
return
fontPath = ConfigVariableString("on-screen-debug-font", "cmtt12").value
fontScale = ConfigVariableDouble("on-screen-debug-font-scale", 0.05).value
color = {
"black": Vec4(0, 0, 0, 1),
"white": Vec4(1, 1, 1, 1),
}
fgColor = color[ConfigVariableString("on-screen-debug-fg-color", "white").value]
bgColor = color[ConfigVariableString("on-screen-debug-bg-color", "black").value]
fgColor.setW(ConfigVariableDouble("on-screen-debug-fg-alpha", 0.85).value)
bgColor.setW(ConfigVariableDouble("on-screen-debug-bg-alpha", 0.85).value)
font = loader.loadFont(fontPath)
if not font.isValid():
print("failed to load OnScreenDebug font %s" % fontPath)
font = TextNode.getDefaultFont()
self.onScreenText = OnscreenText.OnscreenText(
pos = (-1.0, 0.9), fg=fgColor, bg=bgColor,
scale = (fontScale, fontScale, 0.0), align = TextNode.ALeft,
mayChange = 1, font = font)
# Make sure readout is never lit or drawn in wireframe
DirectUtil.useDirectRenderStyle(self.onScreenText)
def render(self):
if not self.enabled:
return
if not self.onScreenText:
self.load()
self.onScreenText.clearText()
entries = list(self.data.items())
entries.sort()
for k, v in entries:
if v[0] == self.frame:
# It was updated this frame (key equals value):
#isNew = " is"
isNew = "="
else:
# This data is not for the current
# frame (key roughly equals value):
#isNew = "was"
isNew = "~"
value = v[1]
if type(value) == float:
value = "% 10.4f"%(value,)
# else: other types will be converted to str by the "%s"
self.onScreenText.appendText("%20s %s %-44s\n"%(k, isNew, value))
self.onScreenText.appendText(self.text)
self.frame += 1
def clear(self):
self.text = ""
if self.onScreenText:
self.onScreenText.clearText()
def add(self, key, value):
self.data[key] = (self.frame, value)
return 1 # to allow assert onScreenDebug.add("foo", bar)
def has(self, key):
return key in self.data
def remove(self, key):
del self.data[key]
def removeAllWithPrefix(self, prefix):
toRemove = []
for key in list(self.data.keys()):
if len(key) >= len(prefix):
if key[:len(prefix)] == prefix:
toRemove.append(key)
for key in toRemove:
self.remove(key)
def append(self, text):
self.text += text
|
the-stack_0_15182 | from unityagents import UnityEnvironment
import numpy as np
env = UnityEnvironment(file_name='./Tennis_Linux/Tennis.x86_64')
# get the default brain
brain_name = env.brain_names[0]
brain = env.brains[brain_name]
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
# number of agents
num_agents = len(env_info.agents)
print('Number of agents:', num_agents)
# size of each action
action_size = brain.vector_action_space_size
print('Size of each action:', action_size)
# examine the state space
states = env_info.vector_observations
state_size = states.shape[1]
print('There are {} agents. Each observes a state with length: {}'.format(states.shape[0], state_size))
print('The state for the first agent looks like:', states[0])
import time
import random
import torch
from collections import deque
import matplotlib.pyplot as plt
from ddpg.ddpg_agent import Agent, NoisyAgent
#agent = Agent(num_agents=num_agents, state_size=state_size, action_size=action_size, random_seed=2)
agent = NoisyAgent(num_agents=num_agents, state_size=state_size, action_size=action_size, random_seed=2)
def ddpg(n_episodes=2000):
scores_deque = deque(maxlen=100)
scores = []
mean_scores = []
start = time.time()
for i_episode in range(1, n_episodes+1):
agent.reset()
# reset the environment
env_info = env.reset(train_mode=True)[brain_name]
states = env_info.vector_observations
add_noise=True
score = np.zeros(num_agents)
t = 0
while True:
#print('\r{}: {}'.format(t, score), end="")
t += 1
#action = agent.act(state)
#next_state, reward, done, _ = env.step(action)
#env.render()
#actions = np.random.randn(num_agents, action_size) # select an action (for each agent)
#actions = np.clip(actions, -1, 1) # all actions between -1 and 1
#actions = agent.act(states, add_noise=add_noise) # select an action (for each agent)
actions = agent.act(states) # select an action (for each agent)
#print('\r[{}]{}'.format(t, actions[0]), end="")
env_info = env.step(actions)[brain_name] # send all actions to tne environment
next_states = env_info.vector_observations # get next state (for each agent)
rewards = env_info.rewards # get reward (for each agent)
dones = env_info.local_done # see if episode finished
for i in range(num_agents):
agent.step(states[i], actions[i], rewards[i], next_states[i], dones[i])
num_update = 5
for _ in range(num_update):
agent.update()
states = next_states
score += np.array(rewards)
#print('\r{}: {} {} {}'.format(t, score, actions[0], actions[1]), end="")
if np.any(dones):
break
max_score = np.max(score)
scores_deque.append(max_score)
scores.append(max_score)
mean_scores.append(np.mean(scores_deque))
current = time.time()
elapsed = current - start
elapsed_str = time.strftime("%H:%M:%S", time.gmtime(elapsed))
print('\rEpisode {}\tAverage Score: {:.2f}\t{}'.format(i_episode, np.mean(scores_deque), elapsed_str), end="")
if i_episode%50 == 0:
print('\rEpisode {}\tAverage Score: {:.2f}\t{}'.format(i_episode, np.mean(scores_deque), elapsed_str))
if np.mean(scores_deque) > 1.0:
print('\nEnvironment solved in {:d} episodes!\tAverage Score: {:.2f}'.format(i_episode-100, np.mean(scores_deque)))
break
torch.save(agent.actor_local.state_dict(), 'checkpoint_actor.pth')
torch.save(agent.critic_local.state_dict(), 'checkpoint_critic.pth')
return scores
scores = ddpg()
fig = plt.figure()
ax = fig.add_subplot(111)
plt.plot(np.arange(1, len(scores)+1), scores, color='blue')
plt.plot(np.arange(1, len(max_score)+1), mean_scores, color='orange')
plt.ylabel('Score')
plt.xlabel('Episode #')
plt.show()
env.close()
|
the-stack_0_15185 | import numpy as np
import time
from collections import OrderedDict, deque, Counter
from digideep.environment import MakeEnvironment
from .data_helpers import flatten_dict, update_dict_of_lists, complete_dict_of_list, convert_time_to_batch_major, extract_keywise
# from mujoco_py import MujocoException
# from dm_control.rl.control import PhysicsError
from digideep.utility.logging import logger
from digideep.utility.profiling import KeepTime
from digideep.utility.monitoring import monitor
from digideep.environment.tsne_evaluation import tsne_evaluation
from digideep.environment.storage import Storage
class Explorer:
"""A class which runs environments in parallel and returns the result trajectories in a unified structure.
It support multi-agents in an environment.
Note:
The entrypoint of this class is the :func:`update` function, in which the :func:`step` function will be
called for ``n_steps`` times. In the :func:`step` function, the :func:`prestep` function is called first to get the
actions from the agents. Then the ``env.step`` function is called to execute those actions in the environments.
After the loop is done in the :func:`update`, we do another :func:`prestep` to save the ``observations``/``actions``
of the last step. This indicates the final action that the agent would take without actually executing that. This
information will be useful in some algorithms.
Args:
session (:obj:`~digideep.pipeline.session.Session`): The running session object.
agents (dict): A dictionary of the agents and their corresponding agent objects.
mode (str): The mode of the Explorer, which is any of the three: ``train`` | ``test`` | ``eval``
env (:obj:`env`): The parameters of the environment.
do_reset (bool): A flag indicating whether to reset the environment at the update start.
final_action (bool): A flag indicating whether in the final call of :func:`prestep` the action should also be generated or not.
num_workers (int): Number of workers to work in parallel.
deterministic (bool): Whether to choose the optimial action or to mix some noise with the action (i.e. for exploration).
n_steps (int): Number of steps to take in the :func:`update`.
render (bool): A flag used to indicate whether environment should be rendered at each step.
render_delay (float): The amount of seconds to wait after calling ``env.render``. Used when environment is too fast for
visualization, typically in ``eval`` mode.
seed (int): The environment seed.
Attributes:
steps (int): Number of times the :func:`step` function is called.
n_episode (int): Number of episodes (a full round of simulation) generated so far.
timesteps (int): Number of total timesteps of experience generated so far.
was_reset (bool): A flag indicating whether the Explorer has been just reset or not.
observations: A tracker of environment observations used to produce the actions for the next step.
masks: A tracker of environment ``done`` flag indicating the start of a new episode.
hidden_states: A tracker of hidden_states of the agents for producing the next step action in recurrent policies.
Caution:
Use ``do_reset`` with caution; only when you know what the consequences are.
Generally there are few oportunities when this flag needs to be true.
Tip:
This class is partially serializable. It only saves the state of environment wrappers and not the environment per se.
See Also:
:ref:`ref-data-structure`
"""
def __init__(self, session, agents=None, **params):
self.agents = agents
self.params = params
self.session = session
self.timestep = 0
self.timestep_num = 0
self.eval_episode_reward = deque(maxlen=100)
self.obs_cluster_num = 16
self.action_cluster_num = 2
# FSM을 위한 저장공간
self.storage = Storage(obs_cluster_num=self.obs_cluster_num, action_cluster_num=self.action_cluster_num, num_steps=self.timestep_num * 1000)
# Create models
extra_env_kwargs = self.params.get("extra_env_kwargs", {})
menv = MakeEnvironment(session, mode=self.params["mode"], seed=self.params["seed"], **self.params["env"])
self.envs = menv.create_envs(num_workers=self.params["num_workers"], extra_env_kwargs=extra_env_kwargs)
# self.params["env"]["env_type"]
self.state = {}
self.state["steps"] = 0
self.state["n_episode"] = 0
self.state["timesteps"] = 0
self.state["was_reset"] = False
self.local = {}
self.local["steps"] = 0
self.local["n_episode"] = 0
self.monitor_n_episode()
self.monitor_timesteps()
# We only reset once. Later environments will be reset automatically.
self.reset()
# Will the results be reported when using ``do_reset``?`
def monitor_n_episode(self):
if self.params["mode"] == "train":
monitor.set_meta_key("episode", self.state["n_episode"])
def monitor_timesteps(self):
if self.params["mode"] == "train":
monitor.set_meta_key("frame", self.state["timesteps"])
def state_dict(self):
# TODO" Should we make a deepcopy?
return {"state":self.state, "envs":self.envs.state_dict()}
def load_state_dict(self, state_dict):
self.state.update(state_dict["state"])
self.envs.load_state_dict(state_dict["envs"])
self.monitor_n_episode()
self.monitor_timesteps()
# if self.params["mode"] in ["test", "eval"]:
# # We reset the explorer in case of test/eval to clear the history of observations/masks/hidden_state.
# # Because this part does not make sense to be transferred.
# self.reset()
def make_counter(self, storage):
inputs = storage.input_batch
obs_clusters = storage.obs_cluster_batch
act_clusters = storage.action_cluster_batch
actions = storage.action_batch
hidden_action = storage.hidden_action_batch
step = 0
obs_cluster_labels = obs_clusters.argmax(dim=1).numpy()
act_cluster_labels = act_clusters.argmax(dim=1).numpy()
transition_pair = [(act_cluster_labels[i], obs_cluster_labels[i + 1], act_cluster_labels[i + 1]) for i in range(0, len(act_cluster_labels) - 1)]
counter = Counter(transition_pair)
return counter
def make_FSM(self, counter, obs_cluster_num, action_cluster_num, cut_value=20):
old_transition_list = list(counter.keys())
transition_list = []
for key in old_transition_list:
if counter[key] >= cut_value:
transition_list.append(key)
transition_table = [[[] for x in range(action_cluster_num)] for x in range(action_cluster_num)]
for transition in transition_list:
before_act, current_obs, current_act = transition
transition_table[current_act][before_act].append(current_obs)
from beautifultable import BeautifulTable
table = BeautifulTable()
table.column_headers = ["_"] + [str(x) for x in range(action_cluster_num)]
for i in range(action_cluster_num):
table.append_row([i] + [transition_table[i][j] for j in range(action_cluster_num)])
return table
def report_rewards(self, infos):
"""This function will extract episode information from infos and will send them to
:class:`~digideep.utility.monitoring.Monitor` class.
"""
# This episode keyword only exists if we use a Monitor wrapper.
# This keyword will only appear at the "reset" times.
# TODO: If this is a true multi-agent system, then the rewards
# must be separated as well!
if '/episode/r' in infos.keys():
rewards = infos['/episode/r']
self.eval_episode_reward.append(rewards)
self.timestep = self.timestep +1
#if self.timestep == self.timestep_num:
#print(np.mean(self.eval_episode_reward))
#import pdb
#pdb.set_trace()
#transition_counter = self.make_counter(self.storage)
#table = self.make_FSM(transition_counter, self.obs_cluster_num, self.action_cluster_num)
#print(table)
#tsne_evaluation(self.storage)
for rew in rewards:
if (rew is not None) and (not np.isnan(rew)):
self.local["n_episode"] += 1
self.state["n_episode"] += 1
self.monitor_n_episode()
monitor("/reward/"+self.params["mode"]+"/episodic", rew, window=self.params["win_size"])
self.session.writer.add_scalar('reward/'+self.params["mode"], rew, self.state["n_episode"])
def close(self):
"""It closes all environments.
"""
self.envs.close()
def reset(self):
"""Will reset the Explorer and all of its states. Will set ``was_reset`` to ``True`` to prevent immediate resets.
"""
self.state["observations"] = self.envs.reset()
self.state["masks"] = np.array([[0]]*self.params["num_workers"], dtype=np.float32)
# The initial hidden_state is not saved in the memory. The only use for it is
# getting passed to the action_generator.
# So if there is a size mismatch between this and the next hidden_states, no
# conflicts/errors would happen.
self.state["hidden_state"] = {}
for agent_name in self.agents:
self.state["hidden_state"][agent_name] = self.agents[agent_name].reset_hidden_state(self.params["num_workers"])
self.state["was_reset"] = True
def prestep(self, final_step=False):
"""
Function to produce actions for all of the agents. This function does not execute the actions in the environment.
Args:
final_step (bool): A flag indicating whether this is the last call of this function.
Returns:
dict: The pre-transition dictionary containing observations, masks, and agents informations. The format is like:
``{"observations":..., "masks":..., "agents":...}``
"""
with KeepTime("to_numpy"):
# TODO: Is it necessary for conversion of obs?
# NOTE: The np conversion will not work if observation is a dictionary.
# observations = np.array(self.state["observations"], dtype=np.float32)
observations = self.state["observations"]
masks = self.state["masks"]
hidden_state = self.state["hidden_state"]
with KeepTime("gen_action"):
publish_agents = True
agents = {}
# TODO: We are assuming a one-level action space.
if (not final_step) or (self.params["final_action"]):
if self.state["steps"] < self.params["warm_start"]:
# Take RANDOM actions if warm-starting
for agent_name in self.agents:
agents[agent_name] = self.agents[agent_name].random_action_generator(self.envs, self.params["num_workers"])
else:
# Take REAL actions if not warm-starting
for agent_name in self.agents:
action_generator = self.agents[agent_name].action_generator
agents[agent_name], storage_item = action_generator(observations, hidden_state[agent_name], masks, deterministic=self.params["deterministic"])
#self.storage.store(storage_item[0], storage_item[1], storage_item[2], storage_item[3], storage_item[4])
else:
publish_agents = False
# We are saving the "new" hidden_state now.
# for agent_name in self.agents:
# if (not final_step) or (self.params["final_action"]):
# action_generator = self.agents[agent_name].action_generator
# agents[agent_name] = action_generator(observations, hidden_state[agent_name], masks, deterministic=self.params["deterministic"])
# else:
# publish_agents = False
with KeepTime("form_dictionary"):
if publish_agents:
pre_transition = dict(observations=observations,
masks=masks,
agents=agents)
else:
pre_transition = dict(observations=observations,
masks=masks)
return pre_transition
def step(self):
"""Function that runs the ``prestep`` and the actual ``env.step`` functions.
It will also manipulate the transition data to be in appropriate format.
Returns:
dict: The full transition information, including the pre-transition (actions, last observations, etc) and the
results of executing actions on the environments, i.e. rewards and infos. The format is like:
``{"observations":..., "masks":..., "rewards":..., "infos":..., "agents":...}``
See Also:
:ref:`ref-data-structure`
"""
# We are saving old versions of observations, hidden_state, and masks.
with KeepTime("prestep"):
pre_transition = self.prestep()
# TODO: For true multi-agent systems, rewards must be a dictionary as well,
# i.e. one reward for each agent. However, if the agents are pursuing
# a single goal, the reward can still be a single scalar!
# Updating observations and masks: These two are one step old in the trajectory.
# hidden_state is the newest.
with KeepTime("envstep"):
# Prepare actions
actions = extract_keywise(pre_transition["agents"], "actions")
# Step
self.state["observations"], rewards, dones, infos = self.envs.step(actions)
# Post-step
self.state["hidden_state"] = extract_keywise(pre_transition["agents"], "hidden_state")
self.state["masks"] = np.array([0.0 if done_ else 1.0 for done_ in dones], dtype=np.float32).reshape((-1,1))
# NOTE: Uncomment if you find useful information in the continuous rewards ...
# monitor("/reward/"+self.params["mode"]+"/continuous", np.mean(rewards))
with KeepTime("render"):
if self.params["render"]:
self.envs.render()
if self.params["render_delay"] > 0:
time.sleep(self.params["render_delay"])
# except MujocoException as e:
# logger.error("We got a MuJoCo exception!")
# raise
# ## Retry??
# # return self.run()
with KeepTime("poststep"):
# TODO: Sometimes the type of observations is "dict" which shouldn't be. Investigate the reason.
if isinstance(self.state["observations"], OrderedDict) or isinstance(self.state["observations"], dict):
for key in self.state["observations"]:
if np.isnan(self.state["observations"][key]).any():
logger.warn('NaN caught in observations during rollout generation.', 'step =', self.state["steps"])
raise ValueError
else:
if np.isnan(self.state["observations"]).any():
logger.warn('NaN caught in observations during rollout generation.', 'step =', self.state["steps"])
raise ValueError
## Retry??
# return self.run()
self.state["steps"] += 1
self.state["timesteps"] += self.params["num_workers"]
self.monitor_timesteps()
# TODO: Adapt with the new dict_of_lists data structure.
with KeepTime("report_reward"):
self.report_rewards(infos)
transition = dict(**pre_transition,
rewards=rewards,
infos=infos)
return transition
def update(self):
"""Runs :func:`step` for ``n_steps`` times.
Returns:
dict: A dictionary of unix-stype file system keys including all information generated by the simulation.
See Also:
:ref:`ref-data-structure`
"""
# trajectory is a dictionary of lists
trajectory = {}
if not self.state["was_reset"] and self.params["do_reset"]:
self.reset()
self.state["was_reset"] = False
# Run T (n-step) steps.
self.local["steps"] = 0
self.local["n_episode"] = 0
while (self.params["n_steps"] and self.local["steps"] < self.params["n_steps"]) or \
(self.params["n_episodes"] and self.local["n_episode"] < self.params["n_episodes"]):
with KeepTime("step"):
# print("one exploration step ...")
transition = self.step()
with KeepTime("append"):
# Data is flattened in the explorer per se.
transition = flatten_dict(transition)
# Update the trajectory with the current list of data.
# Put nones if the key is absent.
update_dict_of_lists(trajectory, transition, index=self.local["steps"])
self.local["steps"] += 1
with KeepTime("poststep"):
# Take one prestep so we have the next observation/hidden_state/masks/action/value/ ...
transition = self.prestep(final_step=True)
transition = flatten_dict(transition)
update_dict_of_lists(trajectory, transition, index=self.local["steps"])
# Complete the trajectory if one key was in a transition, but did not occur in later
# transitions. "length=n_steps+1" is because of counting final out-of-loop prestep.
# complete_dict_of_list(trajectory, length=self.params["n_steps"]+1)
complete_dict_of_list(trajectory, length=self.local["steps"]+1)
result = convert_time_to_batch_major(trajectory)
# We discard the rest of monitored episodes for the test mode to prevent them from affecting next test.
monitor.discard_key("/reward/test/episodic")
return result
### Data Structure:
# Pre-step:
# observations
# masks:
#
# Agent (policies):
# actions
# hidden_state
# artifacts:
# action_log_p
# value
#
# Step:
# rewards
# infos
######################
##### Statistics #####
######################
# Stats: Wall-time
|
the-stack_0_15186 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Custom RNN decoder."""
import tensorflow as tf
def rnn_decoder(decoder_inputs,
initial_state,
cell,
loop_function=None,
scope=None):
"""RNN decoder for the LSTM-SSD model.
This decoder returns a list of all states, rather than only the final state.
Args:
decoder_inputs: A list of 4D Tensors with shape [batch_size x input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
loop_function: If not None, this function will be applied to the i-th output
in order to generate the i+1-st input, and decoder_inputs will be ignored,
except for the first element ("GO" symbol). This can be used for decoding,
but also for training to emulate http://arxiv.org/abs/1506.03099.
Signature -- loop_function(prev, i) = next
* prev is a 2D Tensor of shape [batch_size x output_size],
* i is an integer, the step number (when advanced control is needed),
* next is a 2D Tensor of shape [batch_size x input_size].
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 4D Tensors with
shape [batch_size x output_size] containing generated outputs.
states: A list of the same length as decoder_inputs of the state of each
cell at each time-step. It is a 2D Tensor of shape
[batch_size x cell.state_size].
"""
with tf.variable_scope(scope or 'rnn_decoder'):
state_tuple = initial_state
outputs = []
states = []
prev = None
for local_step, decoder_input in enumerate(decoder_inputs):
if loop_function is not None and prev is not None:
with tf.variable_scope('loop_function', reuse=True):
decoder_input = loop_function(prev, local_step)
output, state_tuple = cell(decoder_input, state_tuple)
outputs.append(output)
states.append(state_tuple)
if loop_function is not None:
prev = output
return outputs, states
def multi_input_rnn_decoder(decoder_inputs,
initial_state,
cell,
sequence_step,
selection_strategy='RANDOM',
is_training=None,
is_quantized=False,
preprocess_fn_list=None,
pre_bottleneck=False,
flatten_state=False,
scope=None):
"""RNN decoder for the Interleaved LSTM-SSD model.
This decoder takes multiple sequences of inputs and selects the input to feed
to the rnn at each timestep using its selection_strategy, which can be random,
learned, or deterministic.
This decoder returns a list of all states, rather than only the final state.
Args:
decoder_inputs: A list of lists of 2D Tensors [batch_size x input_size].
initial_state: 2D Tensor with shape [batch_size x cell.state_size].
cell: rnn_cell.RNNCell defining the cell function and size.
sequence_step: Tensor [batch_size] of the step number of the first elements
in the sequence.
selection_strategy: Method for picking the decoder_input to use at each
timestep. Must be 'RANDOM', 'SKIPX' for integer X, where X is the number
of times to use the second input before using the first.
is_training: boolean, whether the network is training. When using learned
selection, attempts exploration if training.
is_quantized: flag to enable/disable quantization mode.
preprocess_fn_list: List of functions accepting two tensor arguments: one
timestep of decoder_inputs and the lstm state. If not None,
decoder_inputs[i] will be updated with preprocess_fn[i] at the start of
each timestep.
pre_bottleneck: if True, use separate bottleneck weights for each sequence.
Useful when input sequences have differing numbers of channels. Final
bottlenecks will have the same dimension.
flatten_state: Whether the LSTM state is flattened.
scope: VariableScope for the created subgraph; defaults to "rnn_decoder".
Returns:
A tuple of the form (outputs, state), where:
outputs: A list of the same length as decoder_inputs of 2D Tensors with
shape [batch_size x output_size] containing generated outputs.
states: A list of the same length as decoder_inputs of the state of each
cell at each time-step. It is a 2D Tensor of shape
[batch_size x cell.state_size].
Raises:
ValueError: If selection_strategy is not recognized or unexpected unroll
length.
"""
if flatten_state and len(decoder_inputs[0]) > 1:
raise ValueError('In export mode, unroll length should not be more than 1')
with tf.variable_scope(scope or 'rnn_decoder'):
state_tuple = initial_state
outputs = []
states = []
batch_size = decoder_inputs[0][0].shape[0].value
num_sequences = len(decoder_inputs)
sequence_length = len(decoder_inputs[0])
for local_step in range(sequence_length):
for sequence_index in range(num_sequences):
if preprocess_fn_list is not None:
decoder_inputs[sequence_index][local_step] = (
preprocess_fn_list[sequence_index](
decoder_inputs[sequence_index][local_step], state_tuple[0]))
if pre_bottleneck:
decoder_inputs[sequence_index][local_step] = cell.pre_bottleneck(
inputs=decoder_inputs[sequence_index][local_step],
state=state_tuple[1],
input_index=sequence_index)
action = generate_action(selection_strategy, local_step, sequence_step,
[batch_size, 1, 1, 1])
inputs, _ = select_inputs(decoder_inputs, action, local_step)
# Mark base network endpoints under raw_inputs/
with tf.name_scope(None):
inputs = tf.identity(inputs, 'raw_inputs/base_endpoint')
output, state_tuple_out = cell(inputs, state_tuple)
state_tuple = select_state(state_tuple, state_tuple_out, action)
outputs.append(output)
states.append(state_tuple)
return outputs, states
def generate_action(selection_strategy, local_step, sequence_step,
action_shape):
"""Generate current (binary) action based on selection strategy.
Args:
selection_strategy: Method for picking the decoder_input to use at each
timestep. Must be 'RANDOM', 'SKIPX' for integer X, where X is the number
of times to use the second input before using the first.
local_step: Tensor [batch_size] of the step number within the current
unrolled batch.
sequence_step: Tensor [batch_size] of the step number of the first elements
in the sequence.
action_shape: The shape of action tensor to be generated.
Returns:
A tensor of shape action_shape, each element is an individual action.
Raises:
ValueError: if selection_strategy is not supported or if 'SKIP' is not
followed by numerics.
"""
if selection_strategy.startswith('RANDOM'):
action = tf.random.uniform(action_shape, maxval=2, dtype=tf.int32)
action = tf.minimum(action, 1)
# First step always runs large network.
if local_step == 0 and sequence_step is not None:
action *= tf.minimum(
tf.reshape(tf.cast(sequence_step, tf.int32), action_shape), 1)
elif selection_strategy.startswith('SKIP'):
inter_count = int(selection_strategy[4:])
if local_step % (inter_count + 1) == 0:
action = tf.zeros(action_shape)
else:
action = tf.ones(action_shape)
else:
raise ValueError('Selection strategy %s not recognized' %
selection_strategy)
return tf.cast(action, tf.int32)
def select_inputs(decoder_inputs, action, local_step, get_alt_inputs=False):
"""Selects sequence from decoder_inputs based on 1D actions.
Given multiple input batches, creates a single output batch by
selecting from the action[i]-ith input for the i-th batch element.
Args:
decoder_inputs: A 2-D list of tensor inputs.
action: A tensor of shape [batch_size]. Each element corresponds to an index
of decoder_inputs to choose.
step: The current timestep.
get_alt_inputs: Whether the non-chosen inputs should also be returned.
Returns:
The constructed output. Also outputs the elements that were not chosen
if get_alt_inputs is True, otherwise None.
Raises:
ValueError: if the decoder inputs contains other than two sequences.
"""
num_seqs = len(decoder_inputs)
if not num_seqs == 2:
raise ValueError('Currently only supports two sets of inputs.')
stacked_inputs = tf.stack(
[decoder_inputs[seq_index][local_step] for seq_index in range(num_seqs)],
axis=-1)
action_index = tf.one_hot(action, num_seqs)
inputs = tf.reduce_sum(stacked_inputs * action_index, axis=-1)
inputs_alt = None
# Only works for 2 models.
if get_alt_inputs:
# Reverse of action_index.
action_index_alt = tf.one_hot(action, num_seqs, on_value=0.0, off_value=1.0)
inputs_alt = tf.reduce_sum(stacked_inputs * action_index_alt, axis=-1)
return inputs, inputs_alt
def select_state(previous_state, new_state, action):
"""Select state given action.
Currently only supports binary action. If action is 0, it means the state is
generated from the large model, and thus we will update the state. Otherwise,
if the action is 1, it means the state is generated from the small model, and
in interleaved model, we skip this state update.
Args:
previous_state: A state tuple representing state from previous step.
new_state: A state tuple representing newly computed state.
action: A tensor the same shape as state.
Returns:
A state tuple selected based on the given action.
"""
action = tf.cast(action, tf.float32)
state_c = previous_state[0] * action + new_state[0] * (1 - action)
state_h = previous_state[1] * action + new_state[1] * (1 - action)
return (state_c, state_h)
|
the-stack_0_15187 | # Random Forest Regression
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
dataset = pd.read_csv('Position_Salaries.csv')
X = dataset.iloc[:, 1:2].values
y = dataset.iloc[:, 2].values
from sklearn.ensemble import RandomForestRegressor
regressor = RandomForestRegressor(n_estimators = 300, random_state = 0)
regressor.fit(X, y)
# Predicting
y_pred = regressor.predict([[ 6.5 ]])
# Visualising
X_grid = np.arange(min(X), max(X), 0.01)
X_grid = X_grid.reshape(len(X_grid), 1)
plt.scatter(X, y, color = 'red')
plt.plot(X_grid, regressor.predict(X_grid), color = 'blue')
plt.title('Truth or Bluff (Random Forest Regression)')
plt.xlabel('Position Level')
plt.ylabel('Salary')
plt.show() |
the-stack_0_15189 | import re
import logging
import uuid
import inspect
import typing
try:
orjson_enabled = True
import orjson as json
except ImportError:
orjson_enabled = False
import json
from enum import Enum
from typing import Dict, Type, Callable, Optional, List, Union, Literal
from nacl.signing import VerifyKey
from nacl.exceptions import BadSignatureError
from fastapi import FastAPI, Request
from fastapi.exceptions import HTTPException
from fastapi.responses import JSONResponse, ORJSONResponse
from pydantic import ValidationError, validate_arguments, constr, conint
from roid.components import (
Component,
ComponentType,
ButtonStyle,
EMOJI_REGEX,
SelectOption,
SelectValue,
InvokeContext,
)
from roid.exceptions import CommandAlreadyExists, ComponentAlreadyExists
from roid.objects import PartialEmoji
from roid.command import CommandType, Command, CommandGroup
from roid.interactions import InteractionType, Interaction
from roid.error_handlers import KNOWN_ERRORS
from roid.response import (
ResponsePayload,
ResponseType,
ResponseData,
Response,
)
from roid.http import HttpHandler
from roid.state import StorageBackend, MultiManagedState, SqliteBackend
from roid.deferred import CommandsBlueprint, DeferredGroupCommand
_log = logging.getLogger("roid-main")
class SlashCommands(FastAPI):
"""
A slash commands application.
This wraps the standard FastAPI class so this can in theory be used to create
a basic general web application around the bot as well. However, the `/` route
is reserved and docs are disabled.
"""
def __init__(
self,
application_id: int,
application_public_key: str,
token: str,
register_commands: bool = False,
state_backend: Optional[StorageBackend] = None,
**extra,
):
"""
A slash commands application.
This wraps the standard FastAPI class so this can in theory be used to create
a basic general web application around the bot as well. However, the `/` route
is reserved and docs are disabled.
Args:
application_id:
The application id obtained from discord.
See (https://discord.com/developers/application) to get this.
application_public_key:
The public key for request verification.
See (https://discord.com/developers/application) to get this.
token:
The bot token, this can be found in the portal at
https://discord.com/developers/applications/656598065532239892/bot.
register_commands:
An optional bool determining if the system automatically registers the
new commands.
Defaults to True.
WARNING: If this is True it will bulk overwrite the existing
application global commands and guild commands.
state_backend:
The given storage backend to use for internal state management
and `SlashCommands.state` calls.
If no backend is given the Sqlite backend is used.
"""
response_class = ORJSONResponse if orjson_enabled else JSONResponse
super().__init__(
**extra,
docs_url=None,
redoc_url=None,
default_response_class=response_class,
)
if state_backend is None:
state_backend = SqliteBackend(f"__internal_managed_state")
self.__state_backend = state_backend
self.__state: Optional[MultiManagedState] = None
self.register_commands = register_commands
self._verify_key = VerifyKey(bytes.fromhex(application_public_key))
self._application_id = application_id
self._token = token
self._global_error_handlers = KNOWN_ERRORS
self._commands: Dict[str, Union[Command, CommandGroup]] = {}
self._components: Dict[str, Component] = {}
self._http: Optional[HttpHandler] = None
# register the internal route and FastAPI internals.
self.post("/", name="Interaction Events")(self.__root)
self.on_event("startup")(self._startup)
self.on_event("shutdown")(self._shutdown)
def register_commands_on_start(self):
self.register_commands = True
@property
def state(self) -> MultiManagedState:
return self.__state
@state.setter
def state(self, _):
if hasattr(self, "_ignored_child"):
raise RuntimeError("state cannot be set at runtime.")
self._ignored_child = True
@property
def application_id(self):
return self._application_id
async def _startup(self):
"""A startup lifetime task invoked by the ASGI server."""
self._http = HttpHandler(self.application_id, self._token)
self.__state = MultiManagedState(backend=self.__state_backend)
await self.__state.startup()
if not self.register_commands:
return
# We can set the globals in bulk.
await self.reload_global_commands()
for command in self._commands.values():
if command.guild_ids is None:
continue
_log.info(
f"Registering command {command.name} for guilds: {command.guild_ids}"
)
await command.register(self)
async def _shutdown(self):
"""A shutdown lifetime task invoked by the ASGI server."""
try:
await self._http.shutdown()
finally:
await self.__state.shutdown()
async def reload_global_commands(self):
"""
Registers all global commands in bulk with Discord.
Note: This will ignore any commands with a `guild_id` or `guild_ids` specified.
"""
_log.debug("registering global commands with discord")
await self._http.register_commands(
[c for c in self._commands.values() if c.guild_ids is None]
)
def register_error(
self,
error: Type[Exception],
callback: Callable[[Exception], ResponsePayload],
):
"""
Registers a given error type to a handler.
This means that if an error is raised by the system that matches the given
exception type the callback will be invoked and it's response sent back.
The traceback is not logged if this is set.
Args:
error:
The error type itself, this must inherit from `Exception`.
callback:
The callback to handle the error and return a response.
"""
if not issubclass(error, Exception):
raise TypeError("error type does not inherit from `Exception`")
self._global_error_handlers[error] = callback
async def __root(self, request: Request):
try:
signature = request.headers["X-Signature-Ed25519"]
timestamp = request.headers["X-Signature-Timestamp"]
body = await request.body()
self._verify_key.verify(
b"%s%s" % (timestamp.encode(), body), bytes.fromhex(signature)
)
except (BadSignatureError, KeyError):
raise HTTPException(status_code=401)
data = json.loads(body)
logging.debug(f"got payload: {data}")
try:
interaction = Interaction(**data)
except ValidationError as e:
_log.warning(f"rejecting response due to {e!r}")
raise HTTPException(status_code=422, detail=e.errors())
if interaction.type == InteractionType.PING:
return {"type": ResponseType.PONG}
elif interaction.type in (
InteractionType.APPLICATION_COMMAND,
InteractionType.APPLICATION_COMMAND_AUTOCOMPLETE,
):
cmd = self._commands.get(interaction.data.name)
if cmd is None:
raise HTTPException(status_code=400, detail="No command found")
DEFAULT_RESPONSE_TYPE = ResponseType.CHANNEL_MESSAGE_WITH_SOURCE
return await self._invoke_with_handlers(
cmd, interaction, DEFAULT_RESPONSE_TYPE, pass_parent=True
)
elif interaction.type == InteractionType.MESSAGE_COMPONENT:
if interaction.data.custom_id is None:
raise HTTPException(status_code=400)
custom_id, *_ = interaction.data.custom_id.split(":", maxsplit=1)
component = self._components.get(custom_id)
if component is None:
raise HTTPException(status_code=400, detail="No component found")
DEFAULT_RESPONSE_TYPE = ResponseType.UPDATE_MESSAGE
return await self._invoke_with_handlers(
component, interaction, DEFAULT_RESPONSE_TYPE
)
raise HTTPException(status_code=400)
async def _invoke_with_handlers(
self,
callback,
interaction: Interaction,
default_response_type: ResponseType,
pass_parent: bool = False,
) -> ResponsePayload:
try:
resp = await callback(self, interaction)
except Exception as e:
handler = self._global_error_handlers.get(type(e))
if handler is None:
raise e from None
resp = handler(e)
args = [default_response_type, resp]
if pass_parent:
args.append(interaction)
resp = await self.process_response(*args)
_log.debug("returning response: %s", resp)
return resp
@validate_arguments(config={"arbitrary_types_allowed": True})
async def process_response(
self,
default_response_type: ResponseType,
response: Union[
None,
ResponsePayload,
Response,
ResponseData,
],
parent_interaction: Optional[Interaction] = None,
) -> ResponsePayload:
"""
Converts any of the possible response types into a ResponsePayload.
This is mostly useful for deferred components and allowing some level
of dynamic handling for users.
Args:
default_response_type:
The default ResponseType to use if the Response object / data
has not been set one.
response:
A given instance of the possible response types to process and
convert.
parent_interaction:
The interaction a given component belongs to.
Returns:
A ResponsePayload instance that has had all deferred components
resolved.
"""
if response is None:
return await Response().into_response_payload(
app=self,
default_type=default_response_type,
parent_interaction=parent_interaction,
)
if isinstance(response, ResponsePayload):
return response
if isinstance(response, Response):
return await response.into_response_payload(
app=self,
default_type=default_response_type,
parent_interaction=parent_interaction,
)
elif isinstance(response, ResponseData):
return ResponsePayload(type=default_response_type, data=response)
raise TypeError(
f"expected either: {ResponsePayload!r}, "
f"{ResponseData!r} or {Response!r} return type."
)
def add_blueprint(self, bp: CommandsBlueprint):
"""
Registers all commands and components linked
to the blueprint with the application.
This resolves all deferred components in the process.
"""
for command in bp._commands: # noqa
command(app=self)
for component in bp._components: # noqa
component(app=self)
@validate_arguments(config={"arbitrary_types_allowed": True})
def group(
self,
name: str,
description: str,
*,
guild_id: int = None,
guild_ids: List[int] = None,
default_permissions: bool = True,
defer_register: bool = False,
group_name: constr(
strip_whitespace=True, regex="[a-zA-Z0-9]+", min_length=1, max_length=30
) = "command",
group_description: constr(
strip_whitespace=True, regex="[a-zA-Z0-9 ]+", min_length=1, max_length=95
) = "Select a sub command to run.",
existing_commands: Dict[str, DeferredGroupCommand] = None, # noqa
):
"""
Registers a command group with the given app.
The description is required.
If either the conditions are broken a `ValueError` is raised.
Args:
name:
The name of the command. This must be unique / follow the general
slash command rules as described in the "Application Command Structure"
section of the interactions documentation.
description:
The description of the group command.
guild_id:
The optional guild id if this is a guild specific command.
guild_ids:
An optional list of id's to register this command with multiple guilds.
default_permissions:
Whether the command is enabled by default when the app is added to a guild.
defer_register:
Whether or not to automatically register the command / update the command
if needed.
If set to `False` this will not be automatically registered / updated.
group_name:
The name of the parameter to label the sub commands group select as.
group_description:
The description of the select option for the sub commands.
"""
cmd = CommandGroup(
app=self,
name=name,
description=description,
application_id=self.application_id,
guild_id=guild_id,
guild_ids=guild_ids,
default_permissions=default_permissions,
defer_register=not defer_register,
group_name=group_name,
group_description=group_description,
existing_commands=existing_commands,
)
if name in self._commands:
raise CommandAlreadyExists(
f"command with name {name!r} has already been defined and registered"
)
self._commands[name] = cmd
return cmd
@validate_arguments
def command(
self,
name: str,
description: str = None,
*,
type: CommandType = CommandType.CHAT_INPUT, # noqa
guild_id: int = None,
guild_ids: List[int] = None,
default_permissions: bool = True,
defer_register: bool = False,
):
"""
Registers a command with the given app.
If the command type is either `CommandType.MESSAGE` or `CommandType.USER`
there cannot be any description however, if the command type
is `CommandType.CHAT_INPUT` then description is required.
If either of those conditions are broken a `ValueError` is raised.
Args:
name:
The name of the command. This must be unique / follow the general
slash command rules as described in the "Application Command Structure"
section of the interactions documentation.
description:
The description of the command. This can only be applied to
`CommandType.CHAT_INPUT` commands.
type:
The type of command. This determines if it's a chat input command,
user context menu command or message context menu command.
defaults to `CommandType.CHAT_INPUT`
guild_id:
The optional guild id if this is a guild specific command.
guild_ids:
An optional list of id's to register this command with multiple guilds.
default_permissions:
Whether the command is enabled by default when the app is added to a guild.
defer_register:
Whether or not to automatically register the command / update the command
if needed.
If set to `False` this will not be automatically registered / updated.
"""
if type in (CommandType.MESSAGE, CommandType.USER) and description is not None:
raise ValueError(f"only CHAT_INPUT types can have a set description.")
elif type is CommandType.CHAT_INPUT and description is None:
raise ValueError(
f"missing required field 'description' for CHAT_INPUT commands."
)
def wrapper(func):
cmd = Command(
app=self,
callback=func,
name=name,
description=description,
application_id=self.application_id,
cmd_type=type,
guild_id=guild_id,
guild_ids=guild_ids,
default_permissions=default_permissions,
defer_register=not defer_register,
)
if name in self._commands:
raise CommandAlreadyExists(
f"command with name {name!r} has already been defined and registered"
)
self._commands[name] = cmd
return cmd
return wrapper
@validate_arguments
def button(
self,
label: str,
style: ButtonStyle,
*,
custom_id: Optional[
constr(
strip_whitespace=True, regex="[a-zA-Z0-9]+", min_length=1, max_length=32
)
] = None,
disabled: bool = False,
emoji: constr(strip_whitespace=True, regex=EMOJI_REGEX) = None,
url: Optional[str] = None,
oneshot: bool = False,
):
"""
Attaches a button component to the given command.
Args:
style:
The set button style. This can be any set style however url styles
require the url kwarg and generally would be better off using
the hyperlink helper decorator.
custom_id:
The custom button identifier. If you plan on having long running
persistent buttons that dont require context from their parent command;
e.g. reaction roles. You probably want to set this.
disabled:
If the button should start disabled or not.
label:
The button label / text shown on the button.
emoji:
The set emoji for the button. This should be a custom emoji
not a unicode emoji (use the `label` field for that.)
url:
The hyperlink url, if this is set the function body is not invoked
on click along with the `emoji` and `style` field being ignored.
oneshot:
If set to True this will remove the context from the store as soon
as it's invoked for the first time. This allows you to essentially
create one shot buttons which are invalidated after the first use.
"""
if emoji is not None:
emoji = re.findall(EMOJI_REGEX, emoji)[0]
animated, name, id_ = emoji
emoji = PartialEmoji(id=id_, name=name, animated=bool(animated))
if custom_id is None:
custom_id = str(uuid.uuid4())
if url is not None:
custom_id = None
def wrapper(func):
component = Component(
app=self,
callback=func,
type_=ComponentType.BUTTON,
style=style,
custom_id=custom_id,
disabled=disabled,
label=label,
emoji=emoji,
url=url,
oneshot=oneshot,
)
if url is None:
if custom_id in self._components:
raise ComponentAlreadyExists(
f"component with custom_id {custom_id!r} has "
f"already been defined and registered"
)
self._components[custom_id] = component
return component
return wrapper
@validate_arguments
def select(
self,
*,
custom_id: Optional[
constr(
strip_whitespace=True, regex="[a-zA-Z0-9]+", min_length=1, max_length=32
)
] = None,
disabled: bool = False,
placeholder: str = "Select an option.",
min_values: conint(ge=0, le=25) = 1,
max_values: conint(ge=0, le=25) = 1,
oneshot: bool = False,
):
"""
A select menu component.
This will occupy and entire action row so any components sharing the row
will be rejected (done on a first come first served basis.)
Args:
custom_id:
The custom button identifier. If you plan on having long running
persistent buttons that dont require context from their parent command;
e.g. reaction roles. You probably want to set this.
disabled:
If the button should start disabled or not.
placeholder:
The placeholder text the user sees while the menu is not focused.
min_values:
The minimum number of values the user must select.
max_values:
The maximum number of values the user can select.
oneshot:
If set to True this will remove the context from the store as soon
as it's invoked for the first time. This allows you to essentially
create one shot buttons which are invalidated after the first use.
"""
if custom_id is None:
custom_id = str(uuid.uuid4())
if max_values < min_values:
raise ValueError(
f"the minimum amount of select values cannot be "
f"larger than the max amount of select values."
)
def wrapper(func):
spec = inspect.getfullargspec(func)
for param, hint in spec.annotations.items():
if hint in (Interaction, InvokeContext):
continue
origin = typing.get_origin(hint)
# Needed if it's a multi-valued select.
if origin is not list and max_values != 1 and min_values != 1:
raise TypeError(
f"multi-value selects must be typed as a List[T] rather than T."
)
if origin is list:
inner, *_ = typing.get_args(origin)
if inner is str:
options = []
break
if hint is str:
options = []
break
options = _get_select_options(
typing.get_args(hint)[0] if origin is list else hint
)
if len(options) == 0:
raise ValueError(f"Select options must contain at least one value.")
break
else:
raise TypeError(
"function missing select value parameter and type hints."
)
component = Component(
app=self,
callback=func,
type_=ComponentType.SELECT_MENU,
custom_id=custom_id,
disabled=disabled,
placeholder=placeholder,
min_values=min_values,
max_values=max_values,
oneshot=oneshot,
options=options,
options_parameter=param,
)
if custom_id in self._components:
raise ComponentAlreadyExists(
f"component with custom_id {custom_id!r} has already been defined and registered"
)
self._components[custom_id] = component
return component
return wrapper
def _get_select_options(val: typing.Any) -> List[SelectOption]:
option_choices = []
if typing.get_origin(val) is Literal:
for value in typing.get_args(val):
if not isinstance(value, str):
raise TypeError(
f"select options have incompatible types. "
f"Literals must be all type `str`. "
f"Expected type str found {type(value)!r}"
)
option = SelectOption(
label=value,
value=value,
)
if option in option_choices:
raise ValueError(f"select options cannot have duplicate labels.")
option_choices.append(option)
return option_choices
if not issubclass(val, Enum):
raise TypeError(
"invalid type given expected a subclass of Enum or Literal.\n"
"Note: you can hint as type `str` to mark the select as general component. "
"This means you can add options at runtime via component.with_options()."
)
set_type = None
for v in val:
if not isinstance(v.value, (str, SelectValue)):
raise TypeError(
f"select options have incompatible types. "
f"enum must contain all `str` types or `SelectValue` types. "
f"Found {type(v.value)!r}"
)
if (set_type is not None) and (type(v.value) is not set_type):
raise TypeError(
f"enum values must all be the same type. "
f"Expected type: {set_type!r} got {type(v.value)!r}"
)
else:
set_type = type(v.value)
if isinstance(v.value, SelectValue):
value = v.value
option = SelectOption(
label=value.label,
value=value.value,
emoji=value.emoji,
description=value.description,
default=value.default,
)
else:
option = SelectOption(
label=v.value,
value=v.value,
)
if option in option_choices:
raise ValueError(f"select options cannot have duplicate labels.")
option_choices.append(option)
return option_choices
|
the-stack_0_15194 | import errno
import inspect
import os
import sys
from contextlib import contextmanager
from itertools import repeat
from functools import update_wrapper
from .types import convert_type, IntRange, BOOL
from .utils import (
PacifyFlushWrapper,
make_str,
make_default_short_help,
echo,
get_os_args,
)
from .exceptions import (
ClickException,
UsageError,
BadParameter,
Abort,
MissingParameter,
Exit,
)
from .termui import prompt, confirm, style
from .formatting import HelpFormatter, join_options
from .parser import OptionParser, split_opt
from .globals import push_context, pop_context
from ._compat import PY2, isidentifier, iteritems, string_types
from ._unicodefun import _check_for_unicode_literals, _verify_python3_env
_missing = object()
SUBCOMMAND_METAVAR = "COMMAND [ARGS]..."
SUBCOMMANDS_METAVAR = "COMMAND1 [ARGS]... [COMMAND2 [ARGS]...]..."
DEPRECATED_HELP_NOTICE = " (DEPRECATED)"
DEPRECATED_INVOKE_NOTICE = (
"DeprecationWarning: " + "The command %(name)s is deprecated."
)
def _maybe_show_deprecated_notice(cmd):
if cmd.deprecated:
echo(style(DEPRECATED_INVOKE_NOTICE % {"name": cmd.name}, fg="red"), err=True)
def fast_exit(code):
"""Exit without garbage collection, this speeds up exit by about 10ms for
things like bash completion.
"""
sys.stdout.flush()
sys.stderr.flush()
os._exit(code)
def _bashcomplete(cmd, prog_name, complete_var=None):
"""Internal handler for the bash completion support."""
if complete_var is None:
complete_var = "_%s_COMPLETE" % (prog_name.replace("-", "_")).upper()
complete_instr = os.environ.get(complete_var)
if not complete_instr:
return
from ._bashcomplete import bashcomplete
if bashcomplete(cmd, prog_name, complete_var, complete_instr):
fast_exit(1)
def _check_multicommand(base_command, cmd_name, cmd, register=False):
if not base_command.chain or not isinstance(cmd, MultiCommand):
return
if register:
hint = "It is not possible to add multi commands as children to " "another multi command that is in chain mode"
else:
hint = "Found a multi command as subcommand to a multi command " "that is in chain mode. This is not supported"
raise RuntimeError(
'%s. Command "%s" is set to chain and "%s" was '
"added as subcommand but it in itself is a "
'multi command. ("%s" is a %s within a chained '
'%s named "%s").'
% (
hint,
base_command.name,
cmd_name,
cmd_name,
cmd.__class__.__name__,
base_command.__class__.__name__,
base_command.name,
)
)
def batch(iterable, batch_size):
return list(zip(*repeat(iter(iterable), batch_size)))
def invoke_param_callback(callback, ctx, param, value):
code = getattr(callback, "__code__", None)
args = getattr(code, "co_argcount", 3)
if args < 3:
# This will become a warning in Click 3.0:
from warnings import warn
warn(
Warning(
'Invoked legacy parameter callback "%s". The new '
"signature for such callbacks starting with "
"click 2.0 is (ctx, param, value)." % callback
),
stacklevel=3,
)
return callback(ctx, value)
return callback(ctx, param, value)
@contextmanager
def augment_usage_errors(ctx, param=None):
"""Context manager that attaches extra information to exceptions that
fly.
"""
try:
yield
except BadParameter as e:
if e.ctx is None:
e.ctx = ctx
if param is not None and e.param is None:
e.param = param
raise
except UsageError as e:
if e.ctx is None:
e.ctx = ctx
raise
def iter_params_for_processing(invocation_order, declaration_order):
"""Given a sequence of parameters in the order as should be considered
for processing and an iterable of parameters that exist, this returns
a list in the correct order as they should be processed.
"""
def sort_key(item):
try:
idx = invocation_order.index(item)
except ValueError:
idx = float("inf")
return (not item.is_eager, idx)
return sorted(declaration_order, key=sort_key)
class Context(object):
"""The context is a special internal object that holds state relevant
for the script execution at every single level. It's normally invisible
to commands unless they opt-in to getting access to it.
The context is useful as it can pass internal objects around and can
control special execution features such as reading data from
environment variables.
A context can be used as context manager in which case it will call
:meth:`close` on teardown.
.. versionadded:: 2.0
Added the `resilient_parsing`, `help_option_names`,
`token_normalize_func` parameters.
.. versionadded:: 3.0
Added the `allow_extra_args` and `allow_interspersed_args`
parameters.
.. versionadded:: 4.0
Added the `color`, `ignore_unknown_options`, and
`max_content_width` parameters.
:param command: the command class for this context.
:param parent: the parent context.
:param info_name: the info name for this invocation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it is usually
the name of the script, for commands below it it's
the name of the script.
:param obj: an arbitrary object of user data.
:param auto_envvar_prefix: the prefix to use for automatic environment
variables. If this is `None` then reading
from environment variables is disabled. This
does not affect manually set environment
variables which are always read.
:param default_map: a dictionary (like object) with default values
for parameters.
:param terminal_width: the width of the terminal. The default is
inherit from parent context. If no context
defines the terminal width then auto
detection will be applied.
:param max_content_width: the maximum width for content rendered by
Click (this currently only affects help
pages). This defaults to 80 characters if
not overridden. In other words: even if the
terminal is larger than that, Click will not
format things wider than 80 characters by
default. In addition to that, formatters might
add some safety mapping on the right.
:param resilient_parsing: if this flag is enabled then Click will
parse without any interactivity or callback
invocation. Default values will also be
ignored. This is useful for implementing
things such as completion support.
:param allow_extra_args: if this is set to `True` then extra arguments
at the end will not raise an error and will be
kept on the context. The default is to inherit
from the command.
:param allow_interspersed_args: if this is set to `False` then options
and arguments cannot be mixed. The
default is to inherit from the command.
:param ignore_unknown_options: instructs click to ignore options it does
not know and keeps them for later
processing.
:param help_option_names: optionally a list of strings that define how
the default help parameter is named. The
default is ``['--help']``.
:param token_normalize_func: an optional function that is used to
normalize tokens (options, choices,
etc.). This for instance can be used to
implement case insensitive behavior.
:param color: controls if the terminal supports ANSI colors or not. The
default is autodetection. This is only needed if ANSI
codes are used in texts that Click prints which is by
default not the case. This for instance would affect
help output.
"""
def __init__(
self,
command,
parent=None,
info_name=None,
obj=None,
auto_envvar_prefix=None,
default_map=None,
terminal_width=None,
max_content_width=None,
resilient_parsing=False,
allow_extra_args=None,
allow_interspersed_args=None,
ignore_unknown_options=None,
help_option_names=None,
token_normalize_func=None,
color=None,
):
#: the parent context or `None` if none exists.
self.parent = parent
#: the :class:`Command` for this context.
self.command = command
#: the descriptive information name
self.info_name = info_name
#: the parsed parameters except if the value is hidden in which
#: case it's not remembered.
self.params = {}
#: the leftover arguments.
self.args = []
#: protected arguments. These are arguments that are prepended
#: to `args` when certain parsing scenarios are encountered but
#: must be never propagated to another arguments. This is used
#: to implement nested parsing.
self.protected_args = []
if obj is None and parent is not None:
obj = parent.obj
#: the user object stored.
self.obj = obj
self._meta = getattr(parent, "meta", {})
#: A dictionary (-like object) with defaults for parameters.
if (
default_map is None
and parent is not None
and parent.default_map is not None
):
default_map = parent.default_map.get(info_name)
self.default_map = default_map
#: This flag indicates if a subcommand is going to be executed. A
#: group callback can use this information to figure out if it's
#: being executed directly or because the execution flow passes
#: onwards to a subcommand. By default it's None, but it can be
#: the name of the subcommand to execute.
#:
#: If chaining is enabled this will be set to ``'*'`` in case
#: any commands are executed. It is however not possible to
#: figure out which ones. If you require this knowledge you
#: should use a :func:`resultcallback`.
self.invoked_subcommand = None
if terminal_width is None and parent is not None:
terminal_width = parent.terminal_width
#: The width of the terminal (None is autodetection).
self.terminal_width = terminal_width
if max_content_width is None and parent is not None:
max_content_width = parent.max_content_width
#: The maximum width of formatted content (None implies a sensible
#: default which is 80 for most things).
self.max_content_width = max_content_width
if allow_extra_args is None:
allow_extra_args = command.allow_extra_args
#: Indicates if the context allows extra args or if it should
#: fail on parsing.
#:
#: .. versionadded:: 3.0
self.allow_extra_args = allow_extra_args
if allow_interspersed_args is None:
allow_interspersed_args = command.allow_interspersed_args
#: Indicates if the context allows mixing of arguments and
#: options or not.
#:
#: .. versionadded:: 3.0
self.allow_interspersed_args = allow_interspersed_args
if ignore_unknown_options is None:
ignore_unknown_options = command.ignore_unknown_options
#: Instructs click to ignore options that a command does not
#: understand and will store it on the context for later
#: processing. This is primarily useful for situations where you
#: want to call into external programs. Generally this pattern is
#: strongly discouraged because it's not possibly to losslessly
#: forward all arguments.
#:
#: .. versionadded:: 4.0
self.ignore_unknown_options = ignore_unknown_options
if help_option_names is None:
if parent is not None:
help_option_names = parent.help_option_names
else:
help_option_names = ["--help"]
#: The names for the help options.
self.help_option_names = help_option_names
if token_normalize_func is None and parent is not None:
token_normalize_func = parent.token_normalize_func
#: An optional normalization function for tokens. This is
#: options, choices, commands etc.
self.token_normalize_func = token_normalize_func
#: Indicates if resilient parsing is enabled. In that case Click
#: will do its best to not cause any failures and default values
#: will be ignored. Useful for completion.
self.resilient_parsing = resilient_parsing
# If there is no envvar prefix yet, but the parent has one and
# the command on this level has a name, we can expand the envvar
# prefix automatically.
if auto_envvar_prefix is None:
if (
parent is not None
and parent.auto_envvar_prefix is not None
and self.info_name is not None
):
auto_envvar_prefix = "%s_%s" % (
parent.auto_envvar_prefix,
self.info_name.upper(),
)
else:
auto_envvar_prefix = auto_envvar_prefix.upper()
self.auto_envvar_prefix = auto_envvar_prefix
if color is None and parent is not None:
color = parent.color
#: Controls if styling output is wanted or not.
self.color = color
self._close_callbacks = []
self._depth = 0
def __enter__(self):
self._depth += 1
push_context(self)
return self
def __exit__(self, exc_type, exc_value, tb):
self._depth -= 1
if self._depth == 0:
self.close()
pop_context()
@contextmanager
def scope(self, cleanup=True):
"""This helper method can be used with the context object to promote
it to the current thread local (see :func:`get_current_context`).
The default behavior of this is to invoke the cleanup functions which
can be disabled by setting `cleanup` to `False`. The cleanup
functions are typically used for things such as closing file handles.
If the cleanup is intended the context object can also be directly
used as a context manager.
Example usage::
with ctx.scope():
assert get_current_context() is ctx
This is equivalent::
with ctx:
assert get_current_context() is ctx
.. versionadded:: 5.0
:param cleanup: controls if the cleanup functions should be run or
not. The default is to run these functions. In
some situations the context only wants to be
temporarily pushed in which case this can be disabled.
Nested pushes automatically defer the cleanup.
"""
if not cleanup:
self._depth += 1
try:
with self as rv:
yield rv
finally:
if not cleanup:
self._depth -= 1
@property
def meta(self):
"""This is a dictionary which is shared with all the contexts
that are nested. It exists so that click utilities can store some
state here if they need to. It is however the responsibility of
that code to manage this dictionary well.
The keys are supposed to be unique dotted strings. For instance
module paths are a good choice for it. What is stored in there is
irrelevant for the operation of click. However what is important is
that code that places data here adheres to the general semantics of
the system.
Example usage::
LANG_KEY = __name__ + '.lang'
def set_language(value):
ctx = get_current_context()
ctx.meta[LANG_KEY] = value
def get_language():
return get_current_context().meta.get(LANG_KEY, 'en_US')
.. versionadded:: 5.0
"""
return self._meta
def make_formatter(self):
"""Creates the formatter for the help and usage output."""
return HelpFormatter(
width=self.terminal_width, max_width=self.max_content_width
)
def call_on_close(self, f):
"""This decorator remembers a function as callback that should be
executed when the context tears down. This is most useful to bind
resource handling to the script execution. For instance, file objects
opened by the :class:`File` type will register their close callbacks
here.
:param f: the function to execute on teardown.
"""
self._close_callbacks.append(f)
return f
def close(self):
"""Invokes all close callbacks."""
for cb in self._close_callbacks:
cb()
self._close_callbacks = []
@property
def command_path(self):
"""The computed command path. This is used for the ``usage``
information on the help page. It's automatically created by
combining the info names of the chain of contexts to the root.
"""
rv = ""
if self.info_name is not None:
rv = self.info_name
if self.parent is not None:
rv = self.parent.command_path + " " + rv
return rv.lstrip()
def find_root(self):
"""Finds the outermost context."""
node = self
while node.parent is not None:
node = node.parent
return node
def find_object(self, object_type):
"""Finds the closest object of a given type."""
node = self
while node is not None:
if isinstance(node.obj, object_type):
return node.obj
node = node.parent
def ensure_object(self, object_type):
"""Like :meth:`find_object` but sets the innermost object to a
new instance of `object_type` if it does not exist.
"""
rv = self.find_object(object_type)
if rv is None:
self.obj = rv = object_type()
return rv
def lookup_default(self, name):
"""Looks up the default for a parameter name. This by default
looks into the :attr:`default_map` if available.
"""
if self.default_map is not None:
rv = self.default_map.get(name)
if callable(rv):
rv = rv()
return rv
def fail(self, message):
"""Aborts the execution of the program with a specific error
message.
:param message: the error message to fail with.
"""
raise UsageError(message, self)
def abort(self):
"""Aborts the script."""
raise Abort()
def exit(self, code=0):
"""Exits the application with a given exit code."""
raise Exit(code)
def get_usage(self):
"""Helper method to get formatted usage string for the current
context and command.
"""
return self.command.get_usage(self)
def get_help(self):
"""Helper method to get formatted help page for the current
context and command.
"""
return self.command.get_help(self)
def invoke(*args, **kwargs):
"""Invokes a command callback in exactly the way it expects. There
are two ways to invoke this method:
1. the first argument can be a callback and all other arguments and
keyword arguments are forwarded directly to the function.
2. the first argument is a click command object. In that case all
arguments are forwarded as well but proper click parameters
(options and click arguments) must be keyword arguments and Click
will fill in defaults.
Note that before Click 3.2 keyword arguments were not properly filled
in against the intention of this code and no context was created. For
more information about this change and why it was done in a bugfix
release see :ref:`upgrade-to-3.2`.
"""
self, callback = args[:2]
ctx = self
# It's also possible to invoke another command which might or
# might not have a callback. In that case we also fill
# in defaults and make a new context for this command.
if isinstance(callback, Command):
other_cmd = callback
callback = other_cmd.callback
ctx = Context(other_cmd, info_name=other_cmd.name, parent=self)
if callback is None:
raise TypeError(
"The given command does not have a " "callback that can be invoked."
)
for param in other_cmd.params:
if param.name not in kwargs and param.expose_value:
kwargs[param.name] = param.get_default(ctx)
args = args[2:]
with augment_usage_errors(self):
with ctx:
return callback(*args, **kwargs)
def forward(*args, **kwargs):
"""Similar to :meth:`invoke` but fills in default keyword
arguments from the current context if the other command expects
it. This cannot invoke callbacks directly, only other commands.
"""
self, cmd = args[:2]
# It's also possible to invoke another command which might or
# might not have a callback.
if not isinstance(cmd, Command):
raise TypeError("Callback is not a command.")
for param in self.params:
if param not in kwargs:
kwargs[param] = self.params[param]
return self.invoke(cmd, **kwargs)
class BaseCommand(object):
"""The base command implements the minimal API contract of commands.
Most code will never use this as it does not implement a lot of useful
functionality but it can act as the direct subclass of alternative
parsing methods that do not depend on the Click parser.
For instance, this can be used to bridge Click and other systems like
argparse or docopt.
Because base commands do not implement a lot of the API that other
parts of Click take for granted, they are not supported for all
operations. For instance, they cannot be used with the decorators
usually and they have no built-in callback system.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
"""
#: the default for the :attr:`Context.allow_extra_args` flag.
allow_extra_args = False
#: the default for the :attr:`Context.allow_interspersed_args` flag.
allow_interspersed_args = True
#: the default for the :attr:`Context.ignore_unknown_options` flag.
ignore_unknown_options = False
def __init__(self, name, context_settings=None):
#: the name the command thinks it has. Upon registering a command
#: on a :class:`Group` the group will default the command name
#: with this information. You should instead use the
#: :class:`Context`\'s :attr:`~Context.info_name` attribute.
self.name = name
if context_settings is None:
context_settings = {}
#: an optional dictionary with defaults passed to the context.
self.context_settings = context_settings
def get_usage(self, ctx):
raise NotImplementedError("Base commands cannot get usage")
def get_help(self, ctx):
raise NotImplementedError("Base commands cannot get help")
def make_context(self, info_name, args, parent=None, **extra):
"""This function when given an info name and arguments will kick
off the parsing and create a new :class:`Context`. It does not
invoke the actual command callback though.
:param info_name: the info name for this invokation. Generally this
is the most descriptive name for the script or
command. For the toplevel script it's usually
the name of the script, for commands below it it's
the name of the script.
:param args: the arguments to parse as list of strings.
:param parent: the parent context if available.
:param extra: extra keyword arguments forwarded to the context
constructor.
"""
for key, value in iteritems(self.context_settings):
if key not in extra:
extra[key] = value
ctx = Context(self, info_name=info_name, parent=parent, **extra)
with ctx.scope(cleanup=False):
self.parse_args(ctx, args)
return ctx
def parse_args(self, ctx, args):
"""Given a context and a list of arguments this creates the parser
and parses the arguments, then modifies the context as necessary.
This is automatically invoked by :meth:`make_context`.
"""
raise NotImplementedError(
"Base commands do not know how to parse " "arguments."
)
def invoke(self, ctx):
"""Given a context, this invokes the command. The default
implementation is raising a not implemented error.
"""
raise NotImplementedError("Base commands are not invokable by default")
def main(
self,
args=None,
prog_name=None,
complete_var=None,
standalone_mode=True,
**extra
):
"""This is the way to invoke a script with all the bells and
whistles as a command line application. This will always terminate
the application after a call. If this is not wanted, ``SystemExit``
needs to be caught.
This method is also available by directly calling the instance of
a :class:`Command`.
.. versionadded:: 3.0
Added the `standalone_mode` flag to control the standalone mode.
:param args: the arguments that should be used for parsing. If not
provided, ``sys.argv[1:]`` is used.
:param prog_name: the program name that should be used. By default
the program name is constructed by taking the file
name from ``sys.argv[0]``.
:param complete_var: the environment variable that controls the
bash completion support. The default is
``"_<prog_name>_COMPLETE"`` with prog_name in
uppercase.
:param standalone_mode: the default behavior is to invoke the script
in standalone mode. Click will then
handle exceptions and convert them into
error messages and the function will never
return but shut down the interpreter. If
this is set to `False` they will be
propagated to the caller and the return
value of this function is the return value
of :meth:`invoke`.
:param extra: extra keyword arguments are forwarded to the context
constructor. See :class:`Context` for more information.
"""
# If we are in Python 3, we will verify that the environment is
# sane at this point or reject further execution to avoid a
# broken script.
if not PY2:
_verify_python3_env()
else:
_check_for_unicode_literals()
if args is None:
args = get_os_args()
else:
args = list(args)
if prog_name is None:
prog_name = make_str(os.path.basename(sys.argv and sys.argv[0] or __file__))
# Hook for the Bash completion. This only activates if the Bash
# completion is actually enabled, otherwise this is quite a fast
# noop.
_bashcomplete(self, prog_name, complete_var)
try:
try:
with self.make_context(prog_name, args, **extra) as ctx:
rv = self.invoke(ctx)
if not standalone_mode:
return rv
# it's not safe to `ctx.exit(rv)` here!
# note that `rv` may actually contain data like "1" which
# has obvious effects
# more subtle case: `rv=[None, None]` can come out of
# chained commands which all returned `None` -- so it's not
# even always obvious that `rv` indicates success/failure
# by its truthiness/falsiness
ctx.exit()
except (EOFError, KeyboardInterrupt):
echo(file=sys.stderr)
raise Abort()
except ClickException as e:
if not standalone_mode:
raise
e.show()
sys.exit(e.exit_code)
except IOError as e:
if e.errno == errno.EPIPE:
sys.stdout = PacifyFlushWrapper(sys.stdout)
sys.stderr = PacifyFlushWrapper(sys.stderr)
sys.exit(1)
else:
raise
except Exit as e:
if standalone_mode:
sys.exit(e.exit_code)
else:
# in non-standalone mode, return the exit code
# note that this is only reached if `self.invoke` above raises
# an Exit explicitly -- thus bypassing the check there which
# would return its result
# the results of non-standalone execution may therefore be
# somewhat ambiguous: if there are codepaths which lead to
# `ctx.exit(1)` and to `return 1`, the caller won't be able to
# tell the difference between the two
return e.exit_code
except Abort:
if not standalone_mode:
raise
echo("Aborted!", file=sys.stderr)
sys.exit(1)
def __call__(self, *args, **kwargs):
"""Alias for :meth:`main`."""
return self.main(*args, **kwargs)
class Command(BaseCommand):
"""Commands are the basic building block of command line interfaces in
Click. A basic command handles command line parsing and might dispatch
more parsing to commands nested below it.
.. versionchanged:: 2.0
Added the `context_settings` parameter.
:param name: the name of the command to use unless a group overrides it.
:param context_settings: an optional dictionary with defaults that are
passed to the context object.
:param callback: the callback to invoke. This is optional.
:param params: the parameters to register with this command. This can
be either :class:`Option` or :class:`Argument` objects.
:param help: the help string to use for this command.
:param epilog: like the help string but it's printed at the end of the
help page after everything else.
:param short_help: the short help to use for this command. This is
shown on the command listing of the parent command.
:param add_help_option: by default each command registers a ``--help``
option. This can be disabled by this parameter.
:param hidden: hide this command from help outputs.
:param deprecated: issues a message indicating that
the command is deprecated.
"""
def __init__(
self,
name,
context_settings=None,
callback=None,
params=None,
help=None,
epilog=None,
short_help=None,
options_metavar="[OPTIONS]",
add_help_option=True,
hidden=False,
deprecated=False,
):
BaseCommand.__init__(self, name, context_settings)
#: the callback to execute when the command fires. This might be
#: `None` in which case nothing happens.
self.callback = callback
#: the list of parameters for this command in the order they
#: should show up in the help page and execute. Eager parameters
#: will automatically be handled before non eager ones.
self.params = params or []
# if a form feed (page break) is found in the help text, truncate help
# text to the content preceding the first form feed
if help and "\f" in help:
help = help.split("\f", 1)[0]
self.help = help
self.epilog = epilog
self.options_metavar = options_metavar
self.short_help = short_help
self.add_help_option = add_help_option
self.hidden = hidden
self.deprecated = deprecated
def get_usage(self, ctx):
formatter = ctx.make_formatter()
self.format_usage(ctx, formatter)
return formatter.getvalue().rstrip("\n")
def get_params(self, ctx):
rv = self.params
help_option = self.get_help_option(ctx)
if help_option is not None:
rv = rv + [help_option]
return rv
def format_usage(self, ctx, formatter):
"""Writes the usage line into the formatter."""
pieces = self.collect_usage_pieces(ctx)
formatter.write_usage(ctx.command_path, " ".join(pieces))
def collect_usage_pieces(self, ctx):
"""Returns all the pieces that go into the usage line and returns
it as a list of strings.
"""
rv = [self.options_metavar]
for param in self.get_params(ctx):
rv.extend(param.get_usage_pieces(ctx))
return rv
def get_help_option_names(self, ctx):
"""Returns the names for the help option."""
all_names = set(ctx.help_option_names)
for param in self.params:
all_names.difference_update(param.opts)
all_names.difference_update(param.secondary_opts)
return all_names
def get_help_option(self, ctx):
"""Returns the help option object."""
help_options = self.get_help_option_names(ctx)
if not help_options or not self.add_help_option:
return
def show_help(ctx, param, value):
if value and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
return Option(
help_options,
is_flag=True,
is_eager=True,
expose_value=False,
callback=show_help,
help="Show this message and exit.",
)
def make_parser(self, ctx):
"""Creates the underlying option parser for this command."""
parser = OptionParser(ctx)
for param in self.get_params(ctx):
param.add_to_parser(parser, ctx)
return parser
def get_help(self, ctx):
"""Formats the help into a string and returns it. This creates a
formatter and will call into the following formatting methods:
"""
formatter = ctx.make_formatter()
self.format_help(ctx, formatter)
return formatter.getvalue().rstrip("\n")
def get_short_help_str(self, limit=45):
"""Gets short help for the command or makes it by shortening the long help string."""
return (
self.short_help
or self.help
and make_default_short_help(self.help, limit)
or ""
)
def format_help(self, ctx, formatter):
"""Writes the help into the formatter if it exists.
This calls into the following methods:
- :meth:`format_usage`
- :meth:`format_help_text`
- :meth:`format_options`
- :meth:`format_epilog`
"""
self.format_usage(ctx, formatter)
self.format_help_text(ctx, formatter)
self.format_options(ctx, formatter)
self.format_epilog(ctx, formatter)
def format_help_text(self, ctx, formatter):
"""Writes the help text to the formatter if it exists."""
if self.help:
formatter.write_paragraph()
with formatter.indentation():
help_text = self.help
if self.deprecated:
help_text += DEPRECATED_HELP_NOTICE
formatter.write_text(help_text)
elif self.deprecated:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(DEPRECATED_HELP_NOTICE)
def format_options(self, ctx, formatter):
"""Writes all the options into the formatter if they exist."""
opts = []
for param in self.get_params(ctx):
rv = param.get_help_record(ctx)
if rv is not None:
opts.append(rv)
if opts:
with formatter.section("Options"):
formatter.write_dl(opts)
def format_epilog(self, ctx, formatter):
"""Writes the epilog into the formatter if it exists."""
if self.epilog:
formatter.write_paragraph()
with formatter.indentation():
formatter.write_text(self.epilog)
def parse_args(self, ctx, args):
parser = self.make_parser(ctx)
opts, args, param_order = parser.parse_args(args=args)
for param in iter_params_for_processing(param_order, self.get_params(ctx)):
value, args = param.handle_parse_result(ctx, opts, args)
if args and not ctx.allow_extra_args and not ctx.resilient_parsing:
ctx.fail(
"Got unexpected extra argument%s (%s)"
% (len(args) != 1 and "s" or "", " ".join(map(make_str, args)))
)
ctx.args = args
return args
def invoke(self, ctx):
"""Given a context, this invokes the attached callback (if it exists)
in the right way.
"""
_maybe_show_deprecated_notice(self)
if self.callback is not None:
return ctx.invoke(self.callback, **ctx.params)
class MultiCommand(Command):
"""A multi command is the basic implementation of a command that
dispatches to subcommands. The most common version is the
:class:`Group`.
:param invoke_without_command: this controls how the multi command itself
is invoked. By default it's only invoked
if a subcommand is provided.
:param no_args_is_help: this controls what happens if no arguments are
provided. This option is enabled by default if
`invoke_without_command` is disabled or disabled
if it's enabled. If enabled this will add
``--help`` as argument if no arguments are
passed.
:param subcommand_metavar: the string that is used in the documentation
to indicate the subcommand place.
:param chain: if this is set to `True` chaining of multiple subcommands
is enabled. This restricts the form of commands in that
they cannot have optional arguments but it allows
multiple commands to be chained together.
:param result_callback: the result callback to attach to this multi
command.
"""
allow_extra_args = True
allow_interspersed_args = False
def __init__(
self,
name=None,
invoke_without_command=False,
no_args_is_help=None,
subcommand_metavar=None,
chain=False,
result_callback=None,
**attrs
):
Command.__init__(self, name, **attrs)
if no_args_is_help is None:
no_args_is_help = not invoke_without_command
self.no_args_is_help = no_args_is_help
self.invoke_without_command = invoke_without_command
if subcommand_metavar is None:
if chain:
subcommand_metavar = SUBCOMMANDS_METAVAR
else:
subcommand_metavar = SUBCOMMAND_METAVAR
self.subcommand_metavar = subcommand_metavar
self.chain = chain
#: The result callback that is stored. This can be set or
#: overridden with the :func:`resultcallback` decorator.
self.result_callback = result_callback
if self.chain:
for param in self.params:
if isinstance(param, Argument) and not param.required:
raise RuntimeError(
"Multi commands in chain mode cannot "
"have optional arguments."
)
def collect_usage_pieces(self, ctx):
rv = Command.collect_usage_pieces(self, ctx)
rv.append(self.subcommand_metavar)
return rv
def format_options(self, ctx, formatter):
Command.format_options(self, ctx, formatter)
self.format_commands(ctx, formatter)
def resultcallback(self, replace=False):
"""Adds a result callback to the chain command. By default if a
result callback is already registered this will chain them but
this can be disabled with the `replace` parameter. The result
callback is invoked with the return value of the subcommand
(or the list of return values from all subcommands if chaining
is enabled) as well as the parameters as they would be passed
to the main callback.
Example::
@click.group()
@click.option('-i', '--input', default=23)
def cli(input):
return 42
@cli.resultcallback()
def process_result(result, input):
return result + input
.. versionadded:: 3.0
:param replace: if set to `True` an already existing result
callback will be removed.
"""
def decorator(f):
old_callback = self.result_callback
if old_callback is None or replace:
self.result_callback = f
return f
def function(__value, *args, **kwargs):
return f(old_callback(__value, *args, **kwargs), *args, **kwargs)
self.result_callback = rv = update_wrapper(function, f)
return rv
return decorator
def format_commands(self, ctx, formatter):
"""Extra format methods for multi methods that adds all the commands
after the options.
"""
commands = []
for subcommand in self.list_commands(ctx):
cmd = self.get_command(ctx, subcommand)
# What is this, the tool lied about a command. Ignore it
if cmd is None:
continue
if cmd.hidden:
continue
commands.append((subcommand, cmd))
# allow for 3 times the default spacing
if len(commands):
limit = formatter.width - 6 - max(len(cmd[0]) for cmd in commands)
rows = []
for subcommand, cmd in commands:
help = cmd.get_short_help_str(limit)
rows.append((subcommand, help))
if rows:
with formatter.section("Commands"):
formatter.write_dl(rows)
def parse_args(self, ctx, args):
if not args and self.no_args_is_help and not ctx.resilient_parsing:
echo(ctx.get_help(), color=ctx.color)
ctx.exit()
rest = Command.parse_args(self, ctx, args)
if self.chain:
ctx.protected_args = rest
ctx.args = []
elif rest:
ctx.protected_args, ctx.args = rest[:1], rest[1:]
return ctx.args
def invoke(self, ctx):
def _process_result(value):
if self.result_callback is not None:
value = ctx.invoke(self.result_callback, value, **ctx.params)
return value
if not ctx.protected_args:
# If we are invoked without command the chain flag controls
# how this happens. If we are not in chain mode, the return
# value here is the return value of the command.
# If however we are in chain mode, the return value is the
# return value of the result processor invoked with an empty
# list (which means that no subcommand actually was executed).
if self.invoke_without_command:
if not self.chain:
return Command.invoke(self, ctx)
with ctx:
Command.invoke(self, ctx)
return _process_result([])
ctx.fail("Missing command.")
# Fetch args back out
args = ctx.protected_args + ctx.args
ctx.args = []
ctx.protected_args = []
# If we're not in chain mode, we only allow the invocation of a
# single command but we also inform the current context about the
# name of the command to invoke.
if not self.chain:
# Make sure the context is entered so we do not clean up
# resources until the result processor has worked.
with ctx:
cmd_name, cmd, args = self.resolve_command(ctx, args)
ctx.invoked_subcommand = cmd_name
Command.invoke(self, ctx)
sub_ctx = cmd.make_context(cmd_name, args, parent=ctx)
with sub_ctx:
return _process_result(sub_ctx.command.invoke(sub_ctx))
# In chain mode we create the contexts step by step, but after the
# base command has been invoked. Because at that point we do not
# know the subcommands yet, the invoked subcommand attribute is
# set to ``*`` to inform the command that subcommands are executed
# but nothing else.
with ctx:
ctx.invoked_subcommand = args and "*" or None
Command.invoke(self, ctx)
# Otherwise we make every single context and invoke them in a
# chain. In that case the return value to the result processor
# is the list of all invoked subcommand's results.
contexts = []
while args:
cmd_name, cmd, args = self.resolve_command(ctx, args)
sub_ctx = cmd.make_context(
cmd_name,
args,
parent=ctx,
allow_extra_args=True,
allow_interspersed_args=False,
)
contexts.append(sub_ctx)
args, sub_ctx.args = sub_ctx.args, []
rv = []
for sub_ctx in contexts:
with sub_ctx:
rv.append(sub_ctx.command.invoke(sub_ctx))
return _process_result(rv)
def resolve_command(self, ctx, args):
cmd_name = make_str(args[0])
original_cmd_name = cmd_name
# Get the command
cmd = self.get_command(ctx, cmd_name)
# If we can't find the command but there is a normalization
# function available, we try with that one.
if cmd is None and ctx.token_normalize_func is not None:
cmd_name = ctx.token_normalize_func(cmd_name)
cmd = self.get_command(ctx, cmd_name)
# If we don't find the command we want to show an error message
# to the user that it was not provided. However, there is
# something else we should do: if the first argument looks like
# an option we want to kick off parsing again for arguments to
# resolve things like --help which now should go to the main
# place.
if cmd is None and not ctx.resilient_parsing:
if split_opt(cmd_name)[0]:
self.parse_args(ctx, ctx.args)
ctx.fail('No such command "%s".' % original_cmd_name)
return cmd_name, cmd, args[1:]
def get_command(self, ctx, cmd_name):
"""Given a context and a command name, this returns a
:class:`Command` object if it exists or returns `None`.
"""
raise NotImplementedError()
def list_commands(self, ctx):
"""Returns a list of subcommand names in the order they should
appear.
"""
return []
class Group(MultiCommand):
"""A group allows a command to have subcommands attached. This is the
most common way to implement nesting in Click.
:param commands: a dictionary of commands.
"""
def __init__(self, name=None, commands=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: the registered subcommands by their exported names.
self.commands = commands or {}
def add_command(self, cmd, name=None):
"""Registers another :class:`Command` with this group. If the name
is not provided, the name of the command is used.
"""
name = name or cmd.name
if name is None:
raise TypeError("Command has no name.")
_check_multicommand(self, name, cmd, register=True)
self.commands[name] = cmd
def command(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a command to
the group. This takes the same arguments as :func:`command` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
def decorator(f):
cmd = command(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def group(self, *args, **kwargs):
"""A shortcut decorator for declaring and attaching a group to
the group. This takes the same arguments as :func:`group` but
immediately registers the created command with this instance by
calling into :meth:`add_command`.
"""
def decorator(f):
cmd = group(*args, **kwargs)(f)
self.add_command(cmd)
return cmd
return decorator
def get_command(self, ctx, cmd_name):
return self.commands.get(cmd_name)
def list_commands(self, ctx):
return sorted(self.commands)
class CommandCollection(MultiCommand):
"""A command collection is a multi command that merges multiple multi
commands together into one. This is a straightforward implementation
that accepts a list of different multi commands as sources and
provides all the commands for each of them.
"""
def __init__(self, name=None, sources=None, **attrs):
MultiCommand.__init__(self, name, **attrs)
#: The list of registered multi commands.
self.sources = sources or []
def add_source(self, multi_cmd):
"""Adds a new multi command to the chain dispatcher."""
self.sources.append(multi_cmd)
def get_command(self, ctx, cmd_name):
for source in self.sources:
rv = source.get_command(ctx, cmd_name)
if rv is not None:
if self.chain:
_check_multicommand(self, cmd_name, rv)
return rv
def list_commands(self, ctx):
rv = set()
for source in self.sources:
rv.update(source.list_commands(ctx))
return sorted(rv)
class Parameter(object):
r"""A parameter to a command comes in two versions: they are either
:class:`Option`\s or :class:`Argument`\s. Other subclasses are currently
not supported by design as some of the internals for parsing are
intentionally not finalized.
Some settings are supported by both options and arguments.
.. versionchanged:: 2.0
Changed signature for parameter callback to also be passed the
parameter. In Click 2.0, the old callback format will still work,
but it will raise a warning to give you change to migrate the
code easier.
:param param_decls: the parameter declarations for this option or
argument. This is a list of flags or argument
names.
:param type: the type that should be used. Either a :class:`ParamType`
or a Python type. The later is converted into the former
automatically if supported.
:param required: controls if this is optional or not.
:param default: the default value if omitted. This can also be a callable,
in which case it's invoked when the default is needed
without any arguments.
:param callback: a callback that should be executed after the parameter
was matched. This is called as ``fn(ctx, param,
value)`` and needs to return the value. Before Click
2.0, the signature was ``(ctx, value)``.
:param nargs: the number of arguments to match. If not ``1`` the return
value is a tuple instead of single value. The default for
nargs is ``1`` (except if the type is a tuple, then it's
the arity of the tuple).
:param metavar: how the value is represented in the help page.
:param expose_value: if this is `True` then the value is passed onwards
to the command callback and stored on the context,
otherwise it's skipped.
:param is_eager: eager values are processed before non eager ones. This
should not be set for arguments or it will inverse the
order of processing.
:param envvar: a string or list of strings that are environment variables
that should be checked.
"""
param_type_name = "parameter"
def __init__(
self,
param_decls=None,
type=None,
required=False,
default=None,
callback=None,
nargs=None,
metavar=None,
expose_value=True,
is_eager=False,
envvar=None,
autocompletion=None,
):
self.name, self.opts, self.secondary_opts = self._parse_decls(
param_decls or (), expose_value
)
self.type = convert_type(type, default)
# Default nargs to what the type tells us if we have that
# information available.
if nargs is None:
if self.type.is_composite:
nargs = self.type.arity
else:
nargs = 1
self.required = required
self.callback = callback
self.nargs = nargs
self.multiple = False
self.expose_value = expose_value
self.default = default
self.is_eager = is_eager
self.metavar = metavar
self.envvar = envvar
self.autocompletion = autocompletion
@property
def human_readable_name(self):
"""Returns the human readable name of this parameter. This is the
same as the name for options, but the metavar for arguments.
"""
return self.name
def make_metavar(self):
if self.metavar is not None:
return self.metavar
metavar = self.type.get_metavar(self)
if metavar is None:
metavar = self.type.name.upper()
if self.nargs != 1:
metavar += "..."
return metavar
def get_default(self, ctx):
"""Given a context variable this calculates the default value."""
# Otherwise go with the regular default.
if callable(self.default):
rv = self.default()
else:
rv = self.default
return self.type_cast_value(ctx, rv)
def add_to_parser(self, parser, ctx):
pass
def consume_value(self, ctx, opts):
value = opts.get(self.name)
if value is None:
value = self.value_from_envvar(ctx)
if value is None:
value = ctx.lookup_default(self.name)
return value
def type_cast_value(self, ctx, value):
"""Given a value this runs it properly through the type system.
This automatically handles things like `nargs` and `multiple` as
well as composite types.
"""
if self.type.is_composite:
if self.nargs <= 1:
raise TypeError(
"Attempted to invoke composite type "
"but nargs has been set to %s. This is "
"not supported; nargs needs to be set to "
"a fixed value > 1." % self.nargs
)
if self.multiple:
return tuple(self.type(x or (), self, ctx) for x in value or ())
return self.type(value or (), self, ctx)
def _convert(value, level):
if level == 0:
return self.type(value, self, ctx)
return tuple(_convert(x, level - 1) for x in value or ())
return _convert(value, (self.nargs != 1) + bool(self.multiple))
def process_value(self, ctx, value):
"""Given a value and context this runs the logic to convert the
value as necessary.
"""
# If the value we were given is None we do nothing. This way
# code that calls this can easily figure out if something was
# not provided. Otherwise it would be converted into an empty
# tuple for multiple invocations which is inconvenient.
if value is not None:
return self.type_cast_value(ctx, value)
def value_is_missing(self, value):
if value is None:
return True
if (self.nargs != 1 or self.multiple) and value == ():
return True
return False
def full_process_value(self, ctx, value):
value = self.process_value(ctx, value)
if value is None and not ctx.resilient_parsing:
value = self.get_default(ctx)
if self.required and self.value_is_missing(value):
raise MissingParameter(ctx=ctx, param=self)
return value
def resolve_envvar_value(self, ctx):
if self.envvar is None:
return
if isinstance(self.envvar, (tuple, list)):
for envvar in self.envvar:
rv = os.environ.get(envvar)
if rv is not None:
return rv
else:
return os.environ.get(self.envvar)
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is not None and self.nargs != 1:
rv = self.type.split_envvar_value(rv)
return rv
def handle_parse_result(self, ctx, opts, args):
with augment_usage_errors(ctx, param=self):
value = self.consume_value(ctx, opts)
try:
value = self.full_process_value(ctx, value)
except Exception:
if not ctx.resilient_parsing:
raise
value = None
if self.callback is not None:
try:
value = invoke_param_callback(self.callback, ctx, self, value)
except Exception:
if not ctx.resilient_parsing:
raise
if self.expose_value:
ctx.params[self.name] = value
return value, args
def get_help_record(self, ctx):
pass
def get_usage_pieces(self, ctx):
return []
def get_error_hint(self, ctx):
"""Get a stringified version of the param for use in error messages to
indicate which param caused the error.
"""
hint_list = self.opts or [self.human_readable_name]
return " / ".join('"%s"' % x for x in hint_list)
class Option(Parameter):
"""Options are usually optional values on the command line and
have some extra features that arguments don't have.
All other parameters are passed onwards to the parameter constructor.
:param show_default: controls if the default value should be shown on the
help page. Normally, defaults are not shown. If this
value is a string, it shows the string instead of the
value. This is particularly useful for dynamic options.
:param show_envvar: controls if an environment variable should be shown on
the help page. Normally, environment variables
are not shown.
:param prompt: if set to `True` or a non empty string then the user will be
prompted for input. If set to `True` the prompt will be the
option name capitalized.
:param confirmation_prompt: if set then the value will need to be confirmed
if it was prompted for.
:param hide_input: if this is `True` then the input on the prompt will be
hidden from the user. This is useful for password
input.
:param is_flag: forces this option to act as a flag. The default is
auto detection.
:param flag_value: which value should be used for this flag if it's
enabled. This is set to a boolean automatically if
the option string contains a slash to mark two options.
:param multiple: if this is set to `True` then the argument is accepted
multiple times and recorded. This is similar to ``nargs``
in how it works but supports arbitrary number of
arguments.
:param count: this flag makes an option increment an integer.
:param allow_from_autoenv: if this is enabled then the value of this
parameter will be pulled from an environment
variable in case a prefix is defined on the
context.
:param help: the help string.
:param hidden: hide this option from help outputs.
"""
param_type_name = "option"
def __init__(
self,
param_decls=None,
show_default=False,
prompt=False,
confirmation_prompt=False,
hide_input=False,
is_flag=None,
flag_value=None,
multiple=False,
count=False,
allow_from_autoenv=True,
type=None,
help=None,
hidden=False,
show_choices=True,
show_envvar=False,
**attrs
):
default_is_missing = attrs.get("default", _missing) is _missing
Parameter.__init__(self, param_decls, type=type, **attrs)
if prompt is True:
prompt_text = self.name.replace("_", " ").capitalize()
elif prompt is False:
prompt_text = None
else:
prompt_text = prompt
self.prompt = prompt_text
self.confirmation_prompt = confirmation_prompt
self.hide_input = hide_input
self.hidden = hidden
# Flags
if is_flag is None:
if flag_value is not None:
is_flag = True
else:
is_flag = bool(self.secondary_opts)
if is_flag and default_is_missing:
self.default = False
if flag_value is None:
flag_value = not self.default
self.is_flag = is_flag
self.flag_value = flag_value
if self.is_flag and isinstance(self.flag_value, bool) and type is None:
self.type = BOOL
self.is_bool_flag = True
else:
self.is_bool_flag = False
# Counting
self.count = count
if count:
if type is None:
self.type = IntRange(min=0)
if default_is_missing:
self.default = 0
self.multiple = multiple
self.allow_from_autoenv = allow_from_autoenv
self.help = help
self.show_default = show_default
self.show_choices = show_choices
self.show_envvar = show_envvar
# Sanity check for stuff we don't support
if __debug__:
if self.nargs < 0:
raise TypeError("Options cannot have nargs < 0")
if self.prompt and self.is_flag and not self.is_bool_flag:
raise TypeError("Cannot prompt for flags that are not bools.")
if not self.is_bool_flag and self.secondary_opts:
raise TypeError("Got secondary option for non boolean flag.")
if self.is_bool_flag and self.hide_input and self.prompt is not None:
raise TypeError(
"Hidden input does not work with boolean " "flag prompts."
)
if self.count:
if self.multiple:
raise TypeError(
"Options cannot be multiple and count " "at the same time."
)
elif self.is_flag:
raise TypeError(
"Options cannot be count and flags at " "the same time."
)
def _parse_decls(self, decls, expose_value):
opts = []
secondary_opts = []
name = None
possible_names = []
for decl in decls:
if isidentifier(decl):
if name is not None:
raise TypeError("Name defined twice")
name = decl
else:
split_char = decl[:1] == "/" and ";" or "/"
if split_char in decl:
first, second = decl.split(split_char, 1)
first = first.rstrip()
if first:
possible_names.append(split_opt(first))
opts.append(first)
second = second.lstrip()
if second:
secondary_opts.append(second.lstrip())
else:
possible_names.append(split_opt(decl))
opts.append(decl)
if name is None and possible_names:
possible_names.sort(key=lambda x: -len(x[0])) # group long options first
name = possible_names[0][1].replace("-", "_").lower()
if not isidentifier(name):
name = None
if name is None:
if not expose_value:
return None, opts, secondary_opts
raise TypeError("Could not determine name for option")
if not opts and not secondary_opts:
raise TypeError(
"No options defined but a name was passed (%s). "
"Did you mean to declare an argument instead "
"of an option?" % name
)
return name, opts, secondary_opts
def add_to_parser(self, parser, ctx):
kwargs = {"dest": self.name, "nargs": self.nargs, "obj": self}
if self.multiple:
action = "append"
elif self.count:
action = "count"
else:
action = "store"
if self.is_flag:
kwargs.pop("nargs", None)
if self.is_bool_flag and self.secondary_opts:
parser.add_option(
self.opts, action=action + "_const", const=True, **kwargs
)
parser.add_option(
self.secondary_opts, action=action + "_const", const=False, **kwargs
)
else:
parser.add_option(
self.opts, action=action + "_const", const=self.flag_value, **kwargs
)
else:
kwargs["action"] = action
parser.add_option(self.opts, **kwargs)
def get_help_record(self, ctx):
if self.hidden:
return
any_prefix_is_slash = []
def _write_opts(opts):
rv, any_slashes = join_options(opts)
if any_slashes:
any_prefix_is_slash[:] = [True]
if not self.is_flag and not self.count:
rv += " " + self.make_metavar()
return rv
rv = [_write_opts(self.opts)]
if self.secondary_opts:
rv.append(_write_opts(self.secondary_opts))
help = self.help or ""
extra = []
if self.show_envvar:
envvar = self.envvar
if envvar is None:
if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None:
envvar = "%s_%s" % (ctx.auto_envvar_prefix, self.name.upper())
if envvar is not None:
extra.append(
"env var: %s"
% (
", ".join("%s" % d for d in envvar)
if isinstance(envvar, (list, tuple))
else envvar,
)
)
if self.default is not None and self.show_default:
if isinstance(self.show_default, string_types):
default_string = "({})".format(self.show_default)
elif isinstance(self.default, (list, tuple)):
default_string = ", ".join("%s" % d for d in self.default)
elif inspect.isfunction(self.default):
default_string = "(dynamic)"
else:
default_string = self.default
extra.append("default: {}".format(default_string))
if self.required:
extra.append("required")
if extra:
help = "%s[%s]" % (help and help + " " or "", "; ".join(extra))
return ((any_prefix_is_slash and "; " or " / ").join(rv), help)
def get_default(self, ctx):
# If we're a non boolean flag out default is more complex because
# we need to look at all flags in the same group to figure out
# if we're the the default one in which case we return the flag
# value as default.
if self.is_flag and not self.is_bool_flag:
for param in ctx.command.params:
if param.name == self.name and param.default:
return param.flag_value
return None
return Parameter.get_default(self, ctx)
def prompt_for_value(self, ctx):
"""This is an alternative flow that can be activated in the full
value processing if a value does not exist. It will prompt the
user until a valid value exists and then returns the processed
value as result.
"""
# Calculate the default before prompting anything to be stable.
default = self.get_default(ctx)
# If this is a prompt for a flag we need to handle this
# differently.
if self.is_bool_flag:
return confirm(self.prompt, default)
return prompt(
self.prompt,
default=default,
type=self.type,
hide_input=self.hide_input,
show_choices=self.show_choices,
confirmation_prompt=self.confirmation_prompt,
value_proc=lambda x: self.process_value(ctx, x),
)
def resolve_envvar_value(self, ctx):
rv = Parameter.resolve_envvar_value(self, ctx)
if rv is not None:
return rv
if self.allow_from_autoenv and ctx.auto_envvar_prefix is not None:
envvar = "%s_%s" % (ctx.auto_envvar_prefix, self.name.upper())
return os.environ.get(envvar)
def value_from_envvar(self, ctx):
rv = self.resolve_envvar_value(ctx)
if rv is None:
return None
value_depth = (self.nargs != 1) + bool(self.multiple)
if value_depth > 0 and rv is not None:
rv = self.type.split_envvar_value(rv)
if self.multiple and self.nargs != 1:
rv = batch(rv, self.nargs)
return rv
def full_process_value(self, ctx, value):
if value is None and self.prompt is not None and not ctx.resilient_parsing:
return self.prompt_for_value(ctx)
return Parameter.full_process_value(self, ctx, value)
class Argument(Parameter):
"""Arguments are positional parameters to a command. They generally
provide fewer features than options but can have infinite ``nargs``
and are required by default.
All parameters are passed onwards to the parameter constructor.
"""
param_type_name = "argument"
def __init__(self, param_decls, required=None, **attrs):
if required is None:
if attrs.get("default") is not None:
required = False
else:
required = attrs.get("nargs", 1) > 0
Parameter.__init__(self, param_decls, required=required, **attrs)
if self.default is not None and self.nargs < 0:
raise TypeError(
"nargs=-1 in combination with a default value " "is not supported."
)
@property
def human_readable_name(self):
if self.metavar is not None:
return self.metavar
return self.name.upper()
def make_metavar(self):
if self.metavar is not None:
return self.metavar
var = self.type.get_metavar(self)
if not var:
var = self.name.upper()
if not self.required:
var = "[%s]" % var
if self.nargs != 1:
var += "..."
return var
def _parse_decls(self, decls, expose_value):
if not decls:
if not expose_value:
return None, [], []
raise TypeError("Could not determine name for argument")
if len(decls) == 1:
name = arg = decls[0]
name = name.replace("-", "_").lower()
else:
raise TypeError(
"Arguments take exactly one "
"parameter declaration, got %d" % len(decls)
)
return name, [arg], []
def get_usage_pieces(self, ctx):
return [self.make_metavar()]
def get_error_hint(self, ctx):
return '"%s"' % self.make_metavar()
def add_to_parser(self, parser, ctx):
parser.add_argument(dest=self.name, nargs=self.nargs, obj=self)
# Circular dependency between decorators and core
from .decorators import command, group
|
the-stack_0_15195 | from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from app.main.config import configurations
# Initialize SQLAlchemy database
db = SQLAlchemy()
def create_app(config):
# Check if configuration is valid
if config not in configurations:
raise ValueError(f'{config} is not a valid configuration.')
# Create Flask application and initialize SQLAlchemy with the application instance
app = Flask(__name__)
app.config.from_object(configurations[config])
db.init_app(app)
return app
|
the-stack_0_15198 | from tkinter import*
raiz=Tk()
import psycopg2
from bd import conexion
import cv2
from datetime import datetime
import time
cap = cv2.VideoCapture(0)
detector = cv2.QRCodeDetector()
control='u'
#se declara un fram dentro de la ventana con dimenciones
miFrame=Frame(raiz,width=1200, height=600)
#se empaqueta
miFrame.pack()
snombre=StringVar()
sapellido=StringVar()
scedula=StringVar()
sfecha=StringVar()
#se declara un cuadro de texto
NombreBox=Entry(miFrame, textvariable=snombre)
NombreBox.grid(row=0, column=1, padx=10, pady=10)
#se declara un cuadro de texto
ApellidoBox=Entry(miFrame,textvariable=sapellido)
ApellidoBox.grid(row=1, column=1, padx=10, pady=10)
#se declara un cuadro de texto
CedulaBox=Entry(miFrame, textvariable=scedula)
CedulaBox.grid(row=2, column=1, padx=10, pady=10)
#se declara un cuadro de texto
FechaBox=Entry(miFrame, textvariable=sfecha)
FechaBox.grid(row=3, column=1, padx=10, pady=10)
#se declara una etiqueta
NombreLabel= Label(miFrame, text="Nombre:")
NombreLabel.grid(row=0, column=0, sticky="e", padx=10, pady=10)
#se declara una etiqueta
ApellidoLabel= Label(miFrame, text="Apellido:")
ApellidoLabel.grid(row=1, column=0, sticky="e", padx=10, pady=10)
#se declara una etiqueta
CedulaLabel= Label(miFrame, text="Cedula:")
CedulaLabel.grid(row=2, column=0, sticky="e", padx=10, pady=10)
#se declara una etiqueta
FechaLabel= Label(miFrame, text="Fecha:")
FechaLabel.grid(row=3, column=0, sticky="e", padx=10, pady=10)
def codigoBoton():
while True:
_, img = cap.read()
data, bbox, _ = detector.detectAndDecode(img)
if(bbox is not None):
for i in range(len(bbox)):
cv2.line(img, tuple(bbox[i][0]), tuple(bbox[(i+1) % len(bbox)][0]), color=(255,
0, 255), thickness=2)
cv2.putText(img, data, (int(bbox[0][0][0]), int(bbox[0][0][1]) - 10), cv2.FONT_HERSHEY_SIMPLEX,
0.5, (0, 255, 0), 2)
if data:
if(control!=data):
try:
with conexion.cursor() as cursor:
consulta = "SELECT nombre, apellido FROM datos WHERE cedula = %s;"
cursor.execute(consulta, (data,))
# Con fetchall traemos todas las filas
datos = cursor.fetchall()
#print(datos)
# Recorrer e imprimir
#for dato in datos:
#print(dato)
except psycopg2.Error as e:
print("Ocurrió un error al consultar con where: ", e)
if datos:
tiempo=str(datetime.now())
for dato in datos:
snombre.set(dato[0])
sapellido.set(dato[1])
scedula.set(data)
sfecha.set(tiempo)
print( data, dato[0], tiempo)
else:
print("No registrado")
snombre.set("No registrado")
control=data
#cv2.imshow("Scanner QR", img)
#if(cv2.waitKey(1) == ord("q")):
break
botonEnvio=Button(raiz, text="Enviar", command=codigoBoton)
botonEnvio.pack()
conexion.close()
#cap.release()
#cv2.destroyAllWindows()
raiz.mainloop()
|
the-stack_0_15200 | import ctypes
import struct
# 3p
import bson
from bson.codec_options import CodecOptions
from bson.son import SON
# project
from ...compat import to_unicode
from ...ext import net as netx
from ...internal.logger import get_logger
log = get_logger(__name__)
# MongoDB wire protocol commands
# http://docs.mongodb.com/manual/reference/mongodb-wire-protocol
OP_CODES = {
1: 'reply',
1000: 'msg', # DEV: 1000 was deprecated at some point, use 2013 instead
2001: 'update',
2002: 'insert',
2003: 'reserved',
2004: 'query',
2005: 'get_more',
2006: 'delete',
2007: 'kill_cursors',
2010: 'command',
2011: 'command_reply',
2013: 'msg',
}
# The maximum message length we'll try to parse
MAX_MSG_PARSE_LEN = 1024 * 1024
header_struct = struct.Struct('<iiii')
class Command(object):
""" Command stores information about a pymongo network command, """
__slots__ = ['name', 'coll', 'db', 'tags', 'metrics', 'query']
def __init__(self, name, db, coll):
self.name = name
self.coll = coll
self.db = db
self.tags = {}
self.metrics = {}
self.query = None
def __repr__(self):
return (
'Command('
'name=%s,'
'db=%s,'
'coll=%s)'
) % (self.name, self.db, self.coll)
def parse_msg(msg_bytes):
""" Return a command from a binary mongo db message or None if we shoudln't
trace it. The protocol is documented here:
http://docs.mongodb.com/manual/reference/mongodb-wire-protocol
"""
# NOTE[matt] this is used for queries in pymongo <= 3.0.0 and for inserts
# in up to date versions.
msg_len = len(msg_bytes)
if msg_len <= 0:
return None
header = header_struct.unpack_from(msg_bytes, 0)
(length, req_id, response_to, op_code) = header
op = OP_CODES.get(op_code)
if not op:
log.debug('unknown op code: %s', op_code)
return None
db = None
coll = None
offset = header_struct.size
cmd = None
if op == 'query':
# NOTE[matt] inserts, updates and queries can all use this opcode
offset += 4 # skip flags
ns = _cstring(msg_bytes[offset:])
offset += len(ns) + 1 # include null terminator
# note: here coll could be '$cmd' because it can be overridden in the
# query itself (like {'insert':'songs'})
db, coll = _split_namespace(ns)
offset += 8 # skip numberToSkip & numberToReturn
if msg_len <= MAX_MSG_PARSE_LEN:
# FIXME[matt] don't try to parse large messages for performance
# reasons. ideally we'd just peek at the first bytes to get
# the critical info (op type, collection, query, # of docs)
# rather than parse the whole thing. i suspect only massive
# inserts will be affected.
codec = CodecOptions(SON)
spec = next(bson.decode_iter(msg_bytes[offset:], codec_options=codec))
cmd = parse_spec(spec, db)
else:
# let's still note that a command happened.
cmd = Command('command', db, 'untraced_message_too_large')
# If the command didn't contain namespace info, set it here.
if not cmd.coll:
cmd.coll = coll
elif op == 'msg':
# Skip header and flag bits
offset += 4
# Parse the msg kind
kind = ord(msg_bytes[offset:offset+1])
offset += 1
# Kinds: https://docs.mongodb.com/manual/reference/mongodb-wire-protocol/#sections
# - 0: BSON Object
# - 1: Document Sequence
if kind == 0:
if msg_len <= MAX_MSG_PARSE_LEN:
codec = CodecOptions(SON)
spec = next(bson.decode_iter(msg_bytes[offset:], codec_options=codec))
cmd = parse_spec(spec, db)
else:
# let's still note that a command happened.
cmd = Command('command', db, 'untraced_message_too_large')
else:
# let's still note that a command happened.
cmd = Command('command', db, 'unsupported_msg_kind')
if cmd:
cmd.metrics[netx.BYTES_OUT] = msg_len
return cmd
def parse_query(query):
""" Return a command parsed from the given mongo db query. """
db, coll = None, None
ns = getattr(query, 'ns', None)
if ns:
# version < 3.1 stores the full namespace
db, coll = _split_namespace(ns)
else:
# version >= 3.1 stores the db and coll seperately
coll = getattr(query, 'coll', None)
db = getattr(query, 'db', None)
# pymongo < 3.1 _Query does not have a name field, so default to 'query'
cmd = Command(getattr(query, 'name', 'query'), db, coll)
cmd.query = query.spec
return cmd
def parse_spec(spec, db=None):
""" Return a Command that has parsed the relevant detail for the given
pymongo SON spec.
"""
# the first element is the command and collection
items = list(spec.items())
if not items:
return None
name, coll = items[0]
cmd = Command(name, db or spec.get('$db'), coll)
if 'ordered' in spec: # in insert and update
cmd.tags['mongodb.ordered'] = spec['ordered']
if cmd.name == 'insert':
if 'documents' in spec:
cmd.metrics['mongodb.documents'] = len(spec['documents'])
elif cmd.name == 'update':
updates = spec.get('updates')
if updates:
# FIXME[matt] is there ever more than one here?
cmd.query = updates[0].get('q')
elif cmd.name == 'delete':
dels = spec.get('deletes')
if dels:
# FIXME[matt] is there ever more than one here?
cmd.query = dels[0].get('q')
return cmd
def _cstring(raw):
""" Return the first null terminated cstring from the bufffer. """
return ctypes.create_string_buffer(raw).value
def _split_namespace(ns):
""" Return a tuple of (db, collecton) from the 'db.coll' string. """
if ns:
# NOTE[matt] ns is unicode or bytes depending on the client version
# so force cast to unicode
split = to_unicode(ns).split('.', 1)
if len(split) == 1:
raise Exception("namespace doesn't contain period: %s" % ns)
return split
return (None, None)
|
the-stack_0_15203 | #!/usr/bin/env python
# sudo apt install python3-tk
from camera import *
c = Camera('192.168.0.100', 52381)
def save_preset_labels():
with open('preset_labels.txt', 'w') as f:
for entry in entry_boxes:
f.write(entry.get())
f.write('\n')
f.close()
# GUI
from tkinter import Tk, StringVar, Button, Label, Scale, Entry, W
root = Tk()
#display_message = StringVar()
root.title('VISCA IP Camera Controller')
root['background'] = 'white'
#Label(root, text='VISCA IP Camera Controller').grid(row=0, column=0, columnspan=100)
store_column = 0
label_column = 1
recall_column = 2
pan_tilt_column = 5
pan_tilt_row = 1
zoom_column = 3
zoom_row = 1
focus_column = 3
focus_row = 8
on_off_column = 3
on_off_row = 13
button_width = 8
store_color = 'red'
recall_color = 'light grey'
pan_tilt_color = 'white'
zoom_color = 'light blue'
focus_color = 'cyan'
on_off_color = 'violet'
# Preset store buttons
Label(root, text='Store', bg=store_color).grid(row=1, column=store_column)
Button(root, text=0, width=3, bg=store_color, command=lambda: c.memory_set(0)).grid(row=2, column=store_column)
Button(root, text=1, width=3, bg=store_color, command=lambda: c.memory_set(1)).grid(row=3, column=store_column)
Button(root, text=2, width=3, bg=store_color, command=lambda: c.memory_set(2)).grid(row=4, column=store_column)
Button(root, text=3, width=3, bg=store_color, command=lambda: c.memory_set(3)).grid(row=5, column=store_column)
Button(root, text=4, width=3, bg=store_color, command=lambda: c.memory_set(4)).grid(row=6, column=store_column)
Button(root, text=5, width=3, bg=store_color, command=lambda: c.memory_set(5)).grid(row=7, column=store_column)
Button(root, text=6, width=3, bg=store_color, command=lambda: c.memory_set(6)).grid(row=8, column=store_column)
Button(root, text=7, width=3, bg=store_color, command=lambda: c.memory_set(7)).grid(row=9, column=store_column)
Button(root, text=8, width=3, bg=store_color, command=lambda: c.memory_set(8)).grid(row=10, column=store_column)
Button(root, text=9, width=3, bg=store_color, command=lambda: c.memory_set(9)).grid(row=11, column=store_column)
Button(root, text='A', width=3, bg=store_color, command=lambda: c.memory_set(10)).grid(row=12, column=store_column)
Button(root, text='B', width=3, bg=store_color, command=lambda: c.memory_set(11)).grid(row=13, column=store_column)
Button(root, text='C', width=3, bg=store_color, command=lambda: c.memory_set(12)).grid(row=14, column=store_column)
Button(root, text='D', width=3, bg=store_color, command=lambda: c.memory_set(13)).grid(row=15, column=store_column)
Button(root, text='E', width=3, bg=store_color, command=lambda: c.memory_set(14)).grid(row=16, column=store_column)
Button(root, text='F', width=3, bg=store_color, command=lambda: c.memory_set(15)).grid(row=17, column=store_column)
# Recall buttons and entries (as labels)
Label(root, text='Recall', bg=recall_color).grid(row=1, column=recall_column)
Button(root, text=0, width=5, bg=recall_color, command=lambda: c.memory_recall(0)).grid(row=2, column=recall_column)
Button(root, text=1, width=5, bg=recall_color, command=lambda: c.memory_recall(1)).grid(row=3, column=recall_column)
Button(root, text=2, width=5, bg=recall_color, command=lambda: c.memory_recall(2)).grid(row=4, column=recall_column)
Button(root, text=3, width=5, bg=recall_color, command=lambda: c.memory_recall(3)).grid(row=5, column=recall_column)
Button(root, text=4, width=5, bg=recall_color, command=lambda: c.memory_recall(4)).grid(row=6, column=recall_column)
Button(root, text=5, width=5, bg=recall_color, command=lambda: c.memory_recall(5)).grid(row=7, column=recall_column)
Button(root, text=6, width=5, bg=recall_color, command=lambda: c.memory_recall(6)).grid(row=8, column=recall_column)
Button(root, text=7, width=5, bg=recall_color, command=lambda: c.memory_recall(7)).grid(row=9, column=recall_column)
Button(root, text=8, width=5, bg=recall_color, command=lambda: c.memory_recall(8)).grid(row=10, column=recall_column)
Button(root, text=9, width=5, bg=recall_color, command=lambda: c.memory_recall(9)).grid(row=11, column=recall_column)
Button(root, text='A', width=5, bg=recall_color, command=lambda: c.memory_recall(10)).grid(row=12, column=recall_column)
Button(root, text='B', width=5, bg=recall_color, command=lambda: c.memory_recall(11)).grid(row=13, column=recall_column)
Button(root, text='C', width=5, bg=recall_color, command=lambda: c.memory_recall(12)).grid(row=14, column=recall_column)
Button(root, text='D', width=5, bg=recall_color, command=lambda: c.memory_recall(13)).grid(row=15, column=recall_column)
Button(root, text='E', width=5, bg=recall_color, command=lambda: c.memory_recall(14)).grid(row=16, column=recall_column)
Button(root, text='F', width=5, bg=recall_color, command=lambda: c.memory_recall(15)).grid(row=17, column=recall_column)
try:
with open('preset_labels.txt', 'r') as f:
labels = f.read().splitlines()
f.close()
except:
pass
entry_boxes = []
for e in range(16):
box = Entry(root, justify='right')
try:
box.insert(-1, labels[e])
except:
pass
box.grid(row=e+2, column=label_column)
entry_boxes.append(box)
Button(root, text='Save preset labels', bg=store_color, command=lambda: save_preset_labels()).grid(row=1, column=label_column)
# Pan speed and Tilt speed sliders
Label(root, text='Pan Speed', bg=pan_tilt_color).grid(row=pan_tilt_row, column=pan_tilt_column)
pan_speed_slider = Scale(root, from_=24, to=0, bg=pan_tilt_color)
pan_speed_slider.set(7)
pan_speed_slider.grid(row=pan_tilt_row+1, column=pan_tilt_column, rowspan=4)
Label(root, text='Tilt Speed', bg=pan_tilt_color).grid(row=pan_tilt_row, column=pan_tilt_column+1)
tilt_speed_slider = Scale(root, from_=24, to=0, bg=pan_tilt_color)
tilt_speed_slider.set(7)
tilt_speed_slider.grid(row=pan_tilt_row+1, column=pan_tilt_column+1, rowspan=4)
#Button(root, text='test', command=lambda: print(pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=0,column=0)
# Pan and tilt buttons
Button(root, text='↑', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('up', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row, column=pan_tilt_column+3)
Button(root, text='←', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('left', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row+1, column=pan_tilt_column+2)
Button(root, text='→', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('right', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row+1, column=pan_tilt_column+4)
Button(root, text='↓', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('down', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row+2, column=pan_tilt_column+3)
Button(root, text='↖', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('upleft', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row, column=pan_tilt_column+2)
Button(root, text='↗', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('upright', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row, column=pan_tilt_column+4)
Button(root, text='↙', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('downleft', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row+2, column=pan_tilt_column+2)
Button(root, text='↘', width=3, bg=pan_tilt_color, command=lambda: c.pantilt('downright', pan_speed_slider.get(), tilt_speed_slider.get())).grid(row=pan_tilt_row+2, column=pan_tilt_column+4)
Button(root, text='■', width=3, bg=pan_tilt_color, command=lambda: c.pantilt_stop()).grid(row=pan_tilt_row+1, column=pan_tilt_column+3)
#Button(root, text='Home', command=lambda: send_message(pan_home)).grid(row=pan_tilt_row+2, column=pan_tilt_column+1)
# Zoom buttons
Label(root, text='Zoom', bg=zoom_color, width=button_width).grid(row=zoom_row, column=zoom_column)
Button(root, text='In', bg=zoom_color, width=button_width, command=lambda: c.zoom_in()).grid(row=zoom_row+1, column=zoom_column)
Button(root, text='Stop', bg=zoom_color, width=button_width, command=lambda: c.zoom_stop()).grid(row=zoom_row+2, column=zoom_column)
Button(root, text='Out', bg=zoom_color, width=button_width, command=lambda: c.zoom_out()).grid(row=zoom_row+3, column=zoom_column)
# On off connect buttons
Label(root, text='Camera', bg=on_off_color, width=button_width).grid(row=on_off_row, column=on_off_column)
Button(root, text='On', bg=on_off_color, width=button_width, command=lambda: c.on()).grid(row=on_off_row+1, column=on_off_column)
Button(root, text='Connect', bg=on_off_color, width=button_width, command=lambda: c.connect()).grid(row=on_off_row+2, column=on_off_column)
Button(root, text='Off', bg=on_off_color, width=button_width, command=lambda: c.off()).grid(row=on_off_row+3, column=on_off_column)
Button(root, text='Info Off', bg=on_off_color, width=button_width, command=lambda: c.info_display_off()).grid(row=on_off_row+4, column=on_off_column)
# IP Label
#Label(root, text=camera_ip+':'+str(camera_port)).grid(row=6, column=0, columnspan=3)
# Connection Label
#Label(root, textvariable=display_message).grid(row=6, column=4, columnspan=3)
root.mainloop()
#'''
|
the-stack_0_15204 | #!/usr/bin/env python
#
# Electrum - Lightweight Merge Client
# Copyright (C) 2015 Thomas Voegtlin
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from functools import partial
from threading import Thread
import re
from decimal import Decimal
from kivy.clock import Clock
from electrum.i18n import _
from electrum.plugin import hook
from .trustedcoin import TrustedCoinPlugin, server, KIVY_DISCLAIMER, TrustedCoinException, ErrorConnectingServer
class Plugin(TrustedCoinPlugin):
disclaimer_msg = KIVY_DISCLAIMER
def __init__(self, parent, config, name):
super().__init__(parent, config, name)
@hook
def load_wallet(self, wallet, window):
if not isinstance(wallet, self.wallet_class):
return
self.start_request_thread(wallet)
def go_online_dialog(self, wizard):
# we skip this step on android
wizard.run('accept_terms_of_use')
def prompt_user_for_otp(self, wallet, tx, on_success, on_failure):
from ...gui.kivy.uix.dialogs.label_dialog import LabelDialog
msg = _('Please enter your Google Authenticator code')
d = LabelDialog(msg, '', lambda otp: self.on_otp(wallet, tx, otp, on_success, on_failure))
d.open()
def on_otp(self, wallet, tx, otp, on_success, on_failure):
try:
wallet.on_otp(tx, otp)
except TrustedCoinException as e:
if e.status_code == 400: # invalid OTP
Clock.schedule_once(lambda dt: on_failure(_('Invalid one-time password.')))
else:
Clock.schedule_once(lambda dt, bound_e=e: on_failure(_('Error') + ':\n' + str(bound_e)))
except Exception as e:
Clock.schedule_once(lambda dt, bound_e=e: on_failure(_('Error') + ':\n' + str(bound_e)))
else:
on_success(tx)
def accept_terms_of_use(self, wizard):
def handle_error(msg, e):
wizard.show_error(msg + ':\n' + str(e))
wizard.terminate()
try:
tos = server.get_terms_of_service()
except ErrorConnectingServer as e:
Clock.schedule_once(lambda dt, bound_e=e: handle_error(_('Error connecting to server'), bound_e))
except Exception as e:
Clock.schedule_once(lambda dt, bound_e=e: handle_error(_('Error'), bound_e))
else:
f = lambda x: self.read_email(wizard)
wizard.tos_dialog(tos=tos, run_next=f)
def read_email(self, wizard):
f = lambda x: self.create_remote_key(x, wizard)
wizard.email_dialog(run_next=f)
def request_otp_dialog(self, wizard, short_id, otp_secret, xpub3):
f = lambda otp, reset: self.check_otp(wizard, short_id, otp_secret, xpub3, otp, reset)
wizard.otp_dialog(otp_secret=otp_secret, run_next=f)
@hook
def abort_send(self, window):
wallet = window.wallet
if not isinstance(wallet, self.wallet_class):
return
if wallet.can_sign_without_server():
return
if wallet.billing_info is None:
self.start_request_thread(wallet)
Clock.schedule_once(
lambda dt: window.show_error(_('Requesting account info from TrustedCoin server...') + '\n' +
_('Please try again.')))
return True
return False
|
the-stack_0_15206 | import unittest
import types
import os
import sys
import tempfile
import shutil
import subprocess
from openmdao.api import Problem
from openmdao.test_suite.components.sellar import SellarNoDerivatives
from openmdao.devtools import iprof_mem
@unittest.skip("interactive test, not to be run with test suite")
class TestProfileMemory(unittest.TestCase):
def test_sellar(self):
prob = Problem(SellarNoDerivatives()).setup()
with iprof_mem.memtrace(min_mem=0.1):
prob.run_model()
# expect output similar to the following:
# 0.11 (435 calls) </Users/banaylor/dev/blue/openmdao/utils/name_maps.py:124>.name2abs_name
# 0.11 (14 calls) ExplicitComponent._solve_nonlinear:(IndepVarComp)
# 0.11 (7 calls) NonlinearRunOnce.solve
# 0.11 (150 calls) Vector.__contains__:(DefaultVector)
# 0.12 (7 calls) Group._solve_nonlinear
# 0.13 (1 calls) Driver._update_voi_meta
# 0.14 (2 calls) DefaultTransfer._setup_transfers
# 0.16 (1 calls) NonlinearBlockGS._iter_initialize
# 0.16 (1 calls) NonlinearSolver._iter_initialize:(NonlinearBlockGS)
# 0.19 (24 calls) ExplicitComponent._apply_nonlinear:(ExecComp)
# 0.20 (1 calls) System._setup_vectors:(SellarNoDerivatives)
# 0.25 (105 calls) _IODict.__getitem__
# 0.26 (80 calls) Vector.__init__:(DefaultVector)
# 0.26 (21 calls) ExplicitComponent._solve_nonlinear:(ExecComp)
# 0.34 (45 calls) ExecComp.compute
# 0.39 (8 calls) NonlinearSolver._run_apply:(NonlinearBlockGS)
# 0.39 (8 calls) Group._apply_nonlinear:(SellarNoDerivatives)
# 0.57 (7 calls) NonlinearBlockGS._single_iteration
# 0.59 (1 calls) System._final_setup:(SellarNoDerivatives)
# 0.75 (1 calls) Problem.final_setup
# 1.07 (1 calls) NonlinearSolver.solve:(NonlinearBlockGS)
# 1.07 (1 calls) Solver._run_iterator:(NonlinearBlockGS)
# 1.07 (1 calls) System.run_solve_nonlinear:(SellarNoDerivatives)
# 1.07 (1 calls) Group._solve_nonlinear:(SellarNoDerivatives)
# 1.83 (1 calls) Problem.run_model
class TestCmdlineMemory(unittest.TestCase):
def setUp(self):
try:
import psutil
except ImportError:
raise unittest.SkipTest("psutil is not installed")
self.tstfile = os.path.join(os.path.dirname(os.path.abspath(__file__)), 'mem_model.py')
self.startdir = os.getcwd()
self.tempdir = tempfile.mkdtemp(prefix='TestDOEDriver-')
os.chdir(self.tempdir)
def tearDown(self):
os.chdir(self.startdir)
try:
shutil.rmtree(self.tempdir)
except OSError:
pass
def _run_command(self, cmd):
try:
output = subprocess.check_output(cmd).decode('utf-8', 'ignore')
except subprocess.CalledProcessError as err:
msg = "Running command '{}' failed. " + \
"Output was: \n{}".format(cmd, err.output.decode('utf-8'))
self.fail(msg)
def test_mem(self):
self._run_command(['openmdao', 'mem', self.tstfile])
self._run_command(['openmdao', 'mempost', 'mem_trace.raw'])
def test_mem_tree(self):
self._run_command(['openmdao', 'mem', '-t', self.tstfile])
self._run_command(['openmdao', 'mempost', '-t', 'mem_trace.raw'])
if __name__ == "__main__":
unittest.main()
|
the-stack_0_15208 | import yaml
from unittest import TestCase
from .utils import TEST_DATA_PATH
from foliant.meta.classes import Chapter
from foliant.meta.classes import Meta
from foliant.meta.classes import MetaChapterDoesNotExistError
from foliant.meta.classes import MetaDublicateIDError
from foliant.meta.classes import MetaSectionDoesNotExistError
from foliant.meta.classes import Section
from foliant.meta.generate import load_meta
class TestLoadMetaFromFile(TestCase):
maxDiff = None
def test_load_sample_file1(self):
meta = Meta()
with open(TEST_DATA_PATH / 'meta1.yml', encoding='utf8') as f:
source = yaml.load(f, yaml.Loader)
meta.load_meta_from_file(TEST_DATA_PATH / 'meta1.yml')
self.assertEqual(meta.dump(), source)
def test_load_sample_file2(self):
meta = Meta()
with open(TEST_DATA_PATH / 'meta2.yml', encoding='utf8') as f:
source = yaml.load(f, yaml.Loader)
meta.load_meta_from_file(TEST_DATA_PATH / 'meta2.yml')
self.assertEqual(meta.dump(), source)
def test_load_sample_file3(self):
meta = Meta()
with open(TEST_DATA_PATH / 'meta3.yml', encoding='utf8') as f:
source = yaml.load(f, yaml.Loader)
meta.load_meta_from_file(TEST_DATA_PATH / 'meta3.yml')
self.assertEqual(meta.dump(), source)
class TestProcessIds(TestCase):
def test_load_sample_file(self):
section1 = Section(level=0,
start=0,
end=100,
data={'id': 'id1'},
title='title1')
section2 = Section(level=1,
start=10,
end=100,
data={'id': 'id2'},
title='title2')
chapter1 = Chapter(filename='filename',
name='chapter_name',
main_section=None)
section1.add_child(section2)
chapter1.main_section = section1
section3 = Section(level=0,
start=0,
end=100,
data={'id': 'id3'},
title='title3')
section4 = Section(level=1,
start=10,
end=100,
data={'id': 'id4'},
title='title4')
chapter2 = Chapter(filename='filename2',
name='chapter_name2',
main_section=None)
section3.add_child(section4)
chapter2.main_section = section3
meta = Meta()
meta.add_chapter(chapter1)
meta.add_chapter(chapter2)
expected_ids = ['id1', 'id2', 'id3', 'id4']
meta.process_ids()
for section, expected_id in zip(meta.iter_sections(), expected_ids):
self.assertEqual(section.id, expected_id)
def test_dublicate_ids(self):
section1 = Section(level=0,
start=0,
end=100,
data={'id': 'id1'},
title='title1')
section2 = Section(level=1,
start=10,
end=100,
data={'id': 'id1'},
title='title2')
chapter1 = Chapter(filename='filename',
name='chapter_name',
main_section=None)
section1.add_child(section2)
chapter1.main_section = section1
meta = Meta()
meta.add_chapter(chapter1)
with self.assertRaises(MetaDublicateIDError):
meta.process_ids()
def test_generate_ids(self):
section1 = Section(level=0,
start=0,
end=100,
data={'id': 'id1'},
title='title1')
section2 = Section(level=1,
start=10,
end=100,
data={},
title='My Section Title (78)')
chapter1 = Chapter(filename='filename',
name='chapter_name',
main_section=None)
section1.add_child(section2)
chapter1.main_section = section1
section3 = Section(level=0,
start=0,
end=100,
data={'id': 'original'},
title='title3')
section4 = Section(level=1,
start=10,
end=100,
data={},
title='original')
chapter2 = Chapter(filename='filename2',
name='chapter_name2',
main_section=None)
section3.add_child(section4)
chapter2.main_section = section3
meta = Meta()
meta.add_chapter(chapter1)
meta.add_chapter(chapter2)
expected_ids = ['id1', 'my-section-title-78', 'original', 'original-2']
meta.process_ids()
for section, expected_id in zip(meta.iter_sections(), expected_ids):
self.assertEqual(section.id, expected_id)
class TestGetChapter(TestCase):
def setUp(self):
md_root = 'test/test_data/load_meta'
chapters = [
'chapter_only_yfm.md',
'chapter_with_meta.md',
'chapter_with_one_meta_tag.md',
'chapter_without_meta.md'
]
self.meta = load_meta(chapters, md_root)
def test_wrong_chapter(self):
with self.assertRaises(MetaChapterDoesNotExistError):
self.meta.get_chapter('wrong/chapter/path')
def test_relative_path(self):
filename = 'test/test_data/load_meta/chapter_with_meta.md'
chapter = self.meta.get_chapter(filename)
self.assertTrue(chapter.filename.endswith('chapter_with_meta.md'))
def test_absolute_path(self):
filename = TEST_DATA_PATH / 'load_meta/chapter_with_meta.md'
chapter = self.meta.get_chapter(filename)
self.assertTrue(chapter.filename.endswith('chapter_with_meta.md'))
class TestGetByID(TestCase):
def test_id_exists(self):
meta = Meta()
meta.load_meta_from_file(TEST_DATA_PATH / 'meta3.yml')
id_ = 'subsection'
section = meta.get_by_id(id_)
self.assertEqual(section.id, id_)
def test_id_doesnt_exist(self):
meta = Meta()
meta.load_meta_from_file(TEST_DATA_PATH / 'meta3.yml')
id_ = 'nonexistant_id'
with self.assertRaises(MetaSectionDoesNotExistError):
section = meta.get_by_id(id_)
|
the-stack_0_15211 | from distutils.core import setup, Extension
module_device = Extension('device',
sources = ['device.cpp'],
library_dirs=["C:\Program Files (x86)\Windows Kits\10\Lib"]
)
setup (name = 'WindowsDevices',
version = '1.0',
description = 'Get device list with DirectShow',
ext_modules = [module_device])
|
the-stack_0_15212 | import numpy as np
from scipy.stats import norm
import unittest
import ray
import ray.rllib.algorithms.dqn as dqn
import ray.rllib.algorithms.pg as pg
import ray.rllib.algorithms.ppo as ppo
import ray.rllib.algorithms.sac as sac
from ray.rllib.utils.framework import try_import_tf
from ray.rllib.utils.test_utils import check, framework_iterator
from ray.rllib.utils.numpy import one_hot, fc, MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT
tf1, tf, tfv = try_import_tf()
def do_test_log_likelihood(
run,
config,
prev_a=None,
continuous=False,
layer_key=("fc", (0, 4), ("_hidden_layers.0.", "_logits.")),
logp_func=None,
):
config = config.copy()
# Run locally.
config["num_workers"] = 0
# Env setup.
if continuous:
env = "Pendulum-v1"
obs_batch = preprocessed_obs_batch = np.array([[0.0, 0.1, -0.1]])
else:
env = "FrozenLake-v1"
config["env_config"] = {"is_slippery": False, "map_name": "4x4"}
obs_batch = np.array([0])
# PG does not preprocess anymore by default.
preprocessed_obs_batch = (
one_hot(obs_batch, depth=16) if run is not pg.PG else obs_batch
)
prev_r = None if prev_a is None else np.array(0.0)
# Test against all frameworks.
for fw in framework_iterator(config):
algo = run(config=config, env=env)
policy = algo.get_policy()
vars = policy.get_weights()
# Sample n actions, then roughly check their logp against their
# counts.
num_actions = 1000 if not continuous else 50
actions = []
for _ in range(num_actions):
# Single action from single obs.
actions.append(
algo.compute_single_action(
obs_batch[0],
prev_action=prev_a,
prev_reward=prev_r,
explore=True,
# Do not unsquash actions
# (remain in normalized [-1.0; 1.0] space).
unsquash_action=False,
)
)
# Test all taken actions for their log-likelihoods vs expected values.
if continuous:
for idx in range(num_actions):
a = actions[idx]
if fw != "torch":
if isinstance(vars, list):
expected_mean_logstd = fc(
fc(obs_batch, vars[layer_key[1][0]]), vars[layer_key[1][1]]
)
else:
expected_mean_logstd = fc(
fc(
obs_batch,
vars["default_policy/{}_1/kernel".format(layer_key[0])],
),
vars["default_policy/{}_out/kernel".format(layer_key[0])],
)
else:
expected_mean_logstd = fc(
fc(
obs_batch,
vars["{}_model.0.weight".format(layer_key[2][0])],
framework=fw,
),
vars["{}_model.0.weight".format(layer_key[2][1])],
framework=fw,
)
mean, log_std = np.split(expected_mean_logstd, 2, axis=-1)
if logp_func is None:
expected_logp = np.log(norm.pdf(a, mean, np.exp(log_std)))
else:
expected_logp = logp_func(mean, log_std, a)
logp = policy.compute_log_likelihoods(
np.array([a]),
preprocessed_obs_batch,
prev_action_batch=np.array([prev_a]) if prev_a else None,
prev_reward_batch=np.array([prev_r]) if prev_r else None,
actions_normalized=True,
)
check(logp, expected_logp[0], rtol=0.2)
# Test all available actions for their logp values.
else:
for a in [0, 1, 2, 3]:
count = actions.count(a)
expected_prob = count / num_actions
logp = policy.compute_log_likelihoods(
np.array([a]),
preprocessed_obs_batch,
prev_action_batch=np.array([prev_a]) if prev_a else None,
prev_reward_batch=np.array([prev_r]) if prev_r else None,
)
check(np.exp(logp), expected_prob, atol=0.2)
class TestComputeLogLikelihood(unittest.TestCase):
@classmethod
def setUpClass(cls) -> None:
ray.init()
@classmethod
def tearDownClass(cls) -> None:
ray.shutdown()
def test_dqn(self):
"""Tests, whether DQN correctly computes logp in soft-q mode."""
config = dqn.DEFAULT_CONFIG.copy()
# Soft-Q for DQN.
config["exploration_config"] = {"type": "SoftQ", "temperature": 0.5}
config["seed"] = 42
do_test_log_likelihood(dqn.DQN, config)
def test_pg_cont(self):
"""Tests PG's (cont. actions) compute_log_likelihoods method."""
config = pg.DEFAULT_CONFIG.copy()
config["seed"] = 42
config["model"]["fcnet_hiddens"] = [10]
config["model"]["fcnet_activation"] = "linear"
prev_a = np.array([0.0])
do_test_log_likelihood(
pg.PG,
config,
prev_a,
continuous=True,
layer_key=("fc", (0, 2), ("_hidden_layers.0.", "_logits.")),
)
def test_pg_discr(self):
"""Tests PG's (cont. actions) compute_log_likelihoods method."""
config = pg.DEFAULT_CONFIG.copy()
config["seed"] = 42
prev_a = np.array(0)
do_test_log_likelihood(pg.PG, config, prev_a)
def test_ppo_cont(self):
"""Tests PPO's (cont. actions) compute_log_likelihoods method."""
config = ppo.DEFAULT_CONFIG.copy()
config["seed"] = 42
config["model"]["fcnet_hiddens"] = [10]
config["model"]["fcnet_activation"] = "linear"
prev_a = np.array([0.0])
do_test_log_likelihood(ppo.PPO, config, prev_a, continuous=True)
def test_ppo_discr(self):
"""Tests PPO's (discr. actions) compute_log_likelihoods method."""
config = ppo.DEFAULT_CONFIG.copy()
config["seed"] = 42
prev_a = np.array(0)
do_test_log_likelihood(ppo.PPO, config, prev_a)
def test_sac_cont(self):
"""Tests SAC's (cont. actions) compute_log_likelihoods method."""
config = sac.DEFAULT_CONFIG.copy()
config["seed"] = 42
config["policy_model_config"]["fcnet_hiddens"] = [10]
config["policy_model_config"]["fcnet_activation"] = "linear"
prev_a = np.array([0.0])
# SAC cont uses a squashed normal distribution. Implement it's logp
# logic here in numpy for comparing results.
def logp_func(means, log_stds, values, low=-1.0, high=1.0):
stds = np.exp(np.clip(log_stds, MIN_LOG_NN_OUTPUT, MAX_LOG_NN_OUTPUT))
unsquashed_values = np.arctanh((values - low) / (high - low) * 2.0 - 1.0)
log_prob_unsquashed = np.sum(
np.log(norm.pdf(unsquashed_values, means, stds)), -1
)
return log_prob_unsquashed - np.sum(
np.log(1 - np.tanh(unsquashed_values) ** 2), axis=-1
)
do_test_log_likelihood(
sac.SAC,
config,
prev_a,
continuous=True,
layer_key=(
"fc",
(0, 2),
("action_model._hidden_layers.0.", "action_model._logits."),
),
logp_func=logp_func,
)
def test_sac_discr(self):
"""Tests SAC's (discrete actions) compute_log_likelihoods method."""
config = sac.DEFAULT_CONFIG.copy()
config["seed"] = 42
config["policy_model_config"]["fcnet_hiddens"] = [10]
config["policy_model_config"]["fcnet_activation"] = "linear"
prev_a = np.array(0)
do_test_log_likelihood(sac.SAC, config, prev_a)
if __name__ == "__main__":
import pytest
import sys
sys.exit(pytest.main(["-v", __file__]))
|
the-stack_0_15215 | from pathlib import Path
import pytest
import torch.autograd
import colossalai
from colossalai.context.parallel_mode import ParallelMode
from colossalai.core import global_context as gpc
from colossalai.engine import Engine
from colossalai.logging import get_global_dist_logger
from colossalai.nn.layer._parallel_utilities import _gather
CONFIG_PATH = Path(__file__).parent.parent.joinpath('configs/vit_2p5d.py')
def eval(engine):
engine.eval()
accumulated_loss = 0
correct_sum = 0
total_sum = 0
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
accumulated_loss += loss.detach().cpu().numpy()
output = _gather(
output[0],
ParallelMode.PARALLEL_2P5D_ROW,
1
)
output = _gather(
output,
ParallelMode.PARALLEL_2P5D_COL,
0,
)
output = _gather(
output,
ParallelMode.PARALLEL_2P5D_DEP,
0,
)
output = torch.argmax(output, dim=-1)
correct = torch.sum(label[0] == output)
correct_sum += correct
total_sum += label[0].size(0)
avg_loss = accumulated_loss / engine.schedule.num_steps
return correct_sum, total_sum, avg_loss
def train(engine):
engine.train()
accumulated_loss = 0
for i in range(engine.schedule.num_steps):
output, label, loss = engine.step()
accumulated_loss += loss.detach().cpu().numpy()
avg_loss = accumulated_loss / engine.schedule.num_steps
return avg_loss
@pytest.mark.dist
@pytest.mark.skip("This test should be invoked by test.sh in the same folder as it runs on multiple gpus")
def test_2p5d_parallel_vision_transformer():
# init dist
model, train_dataloader, test_dataloader, criterion, optimizer, schedule, lr_scheduler = colossalai.initialize(
CONFIG_PATH)
logger = get_global_dist_logger()
engine = Engine(model=model,
train_dataloader=train_dataloader,
test_dataloader=test_dataloader,
criterion=criterion,
optimizer=optimizer,
lr_scheduler=lr_scheduler,
schedule=schedule)
logger.info('start training')
for epoch in range(gpc.config.num_epochs):
train_loss = train(engine)
logger.info(f'epoch {epoch} - train loss: {train_loss}')
if epoch % 2 == 0:
correct_sum, total_sum, eval_loss = eval(engine)
logger.info(
f'epoch {epoch} - eval loss: {eval_loss}, total: {total_sum}, '
f'correct: {correct_sum}, acc: {correct_sum / total_sum}')
if __name__ == '__main__':
test_2p5d_parallel_vision_transformer() |
the-stack_0_15216 | """
The processors exist in Pythia to make data processing pipelines in various
datasets as similar as possible while allowing code reuse.
The processors also help maintain proper abstractions to keep only what matters
inside the dataset's code. This allows us to keep the dataset ``get_item``
logic really clean and no need about maintaining opinions about data type.
Processors can work on both images and text due to their generic structure.
To create a new processor, follow these steps:
1. Inherit the ``BaseProcessor`` class.
2. Implement ``_call`` function which takes in a dict and returns a dict with
same keys preprocessed as well as any extra keys that need to be returned.
3. Register the processor using ``@registry.register_processor('name')`` to
registry where 'name' will be used to refer to your processor later.
In processor's config you can specify ``preprocessor`` option to specify
different kind of preprocessors you want in your dataset.
Let's break down processor's config inside a dataset (VQA2.0) a bit to understand
different moving parts.
Config::
dataset_attributes:
vqa2:
processors:
text_processor:
type: vocab
params:
max_length: 14
vocab:
type: intersected
embedding_name: glove.6B.300d
vocab_file: vocabs/vocabulary_100k.txt
answer_processor:
type: vqa_answer
params:
num_answers: 10
vocab_file: vocabs/answers_vqa.txt
preprocessor:
type: simple_word
params: {}
``BaseDataset`` will init the processors and they will available inside your
dataset with same attribute name as the key name, for e.g. `text_processor` will
be available as `self.text_processor` inside your dataset. As is with every module
in Pythia, processor also accept a ``ConfigNode`` with a `type` and `params`
attributes. `params` defined the custom parameters for each of the processors.
By default, processor initialization process will also init `preprocessor` attribute
which can be a processor config in itself. `preprocessor` can be then be accessed
inside the processor's functions.
Example::
from pythia.common.registry import registry
from pythia.datasets.processors import BaseProcessor
class MyProcessor(BaseProcessor):
def __init__(self, config, *args, **kwargs):
return
def __call__(self, item, *args, **kwargs):
text = item['text']
text = [t.strip() for t in text.split(" ")]
return {"text": text}
"""
import logging
import warnings
from collections import defaultdict
import numpy as np
import torch
from sam.spatial_utils import build_graph_using_normalized_boxes
from tools.registry import registry
from ..phoc import build_phoc
from .textvqa_vocab import VocabDict
logger = logging.getLogger(__name__) # pylint: disable=invalid-name
from easydict import EasyDict as edict
def _pad_tokens(tokens, PAD_TOKEN, max_length):
padded_tokens = [PAD_TOKEN] * max_length
token_length = min(len(tokens), max_length)
padded_tokens[:token_length] = tokens[:token_length]
token_length = torch.tensor(token_length, dtype=torch.long)
return padded_tokens, token_length
class WordToVectorDict:
def __init__(self, model):
self.model = model
def __getitem__(self, word):
# Check if mean for word split needs to be done here
return np.mean([self.model.get_word_vector(w) for w in word.split(" ")], axis=0)
class BaseProcessor:
"""Every processor in Pythia needs to inherit this class for compatability
with Pythia. End user mainly needs to implement ``__call__`` function.
Args:
config (ConfigNode): Config for this processor, containing `type` and
`params` attributes if available.
"""
def __init__(self, config, *args, **kwargs):
return
def __call__(self, item, *args, **kwargs):
"""Main function of the processor. Takes in a dict and returns back
a dict
Args:
item (Dict): Some item that needs to be processed.
Returns:
Dict: Processed dict.
"""
return item
class Processor:
"""Wrapper class used by Pythia to initialized processor based on their
``type`` as passed in configuration. It retrieves the processor class
registered in registry corresponding to the ``type`` key and initializes
with ``params`` passed in configuration. All functions and attributes of
the processor initialized are directly available via this class.
Args:
config (ConfigNode): ConfigNode containing ``type`` of the processor to
be initialized and ``params`` of that procesor.
"""
def __init__(self, config, *args, **kwargs):
self.writer = registry.get("writer")
if not hasattr(config, "type"):
raise AttributeError(
"Config must have 'type' attribute to specify type of processor"
)
processor_class = registry.get_processor_class(config.type)
params = {}
if not hasattr(config, "params"):
self.writer.write(
"Config doesn't have 'params' attribute to "
"specify parameters of the processor "
"of type {}. Setting to default \{\}".format(config.type)
)
else:
params = config.params
self.processor = processor_class(params, *args, **kwargs)
self._dir_representation = dir(self)
def __call__(self, item, *args, **kwargs):
return self.processor(item, *args, **kwargs)
def __getattr__(self, name):
if name in self._dir_representation:
return getattr(self, name)
elif hasattr(self.processor, name):
return getattr(self.processor, name)
else:
raise AttributeError(name)
class FastTextProcessor:
"""FastText processor, similar to GloVe processor but returns FastText vectors.
Args:
config (ConfigNode): Configuration values for the processor.
"""
def __init__(self, config, *args, **kwargs):
self.max_length = config.max_length
self._load_fasttext_model("/srv/share/ykant3/pythia/vector_cache/wiki.en.bin")
self.PAD_INDEX = 0
self.PAD_TOKEN = "<pad>"
def _load_fasttext_model(self, model_file):
from fasttext import load_model
self.model = load_model(model_file)
# String to Vector
self.stov = WordToVectorDict(self.model)
def _map_strings_to_indices(self, tokens):
length = min(len(tokens), self.max_length)
tokens = tokens[:length]
output = torch.full(
(self.max_length, self.model.get_dimension()),
fill_value=self.PAD_INDEX,
dtype=torch.float,
)
for idx, token in enumerate(tokens):
output[idx] = torch.from_numpy(self.stov[token])
return output
def __call__(self, item):
# indices are padded
indices = self._map_strings_to_indices(item["tokens"])
# pad tokens
tokens, length = _pad_tokens(item["tokens"], self.PAD_TOKEN, self.max_length)
return {
"padded_token_indices": indices,
"padded_tokens": tokens,
"length": length,
}
class VQAAnswerProcessor(BaseProcessor):
"""Processor for generating answer scores for answers passed using VQA
accuracy formula. Using VocabDict class to represent answer vocabulary,
so parameters must specify "vocab_file". "num_answers" in parameter config
specify the max number of answers possible. Takes in dict containing
"answers" or "answers_tokens". "answers" are preprocessed to generate
"answers_tokens" if passed.
Args:
config (ConfigNode): Configuration for the processor
Attributes:
answer_vocab (VocabDict): Class representing answer vocabulary
"""
DEFAULT_NUM_ANSWERS = 10
def __init__(self, config, *args, **kwargs):
self.writer = registry.get("writer")
if not hasattr(config, "vocab_file"):
raise AttributeError(
"'vocab_file' argument required, but not "
"present in AnswerProcessor's config"
)
self.answer_vocab = VocabDict(config.vocab_file, *args, **kwargs)
self.preprocessor = None
if hasattr(config, "preprocessor"):
self.preprocessor = Processor(config.preprocessor)
if self.preprocessor is None:
raise ValueError(
"No processor named {} is defined.".format(config.preprocessor)
)
if hasattr(config, "num_answers"):
self.num_answers = config.num_answers
else:
self.num_answers = self.DEFAULT_NUM_ANSWERS
warnings.warn(
"'num_answers' not defined in the config. "
"Setting to default of {}".format(self.DEFAULT_NUM_ANSWERS)
)
def __call__(self, item):
"""Takes in dict with answers or answers_tokens, and returns back
a dict with answers (processed), "answers_indices" which point to
indices of the answers if present and "answers_scores" which represent
VQA style scores for the answers.
Args:
item (Dict): Dict containing answers or answers_tokens
Returns:
Dict: Processed answers, indices and scores.
"""
tokens = None
if not isinstance(item, dict):
raise TypeError("'item' passed to processor must be a dict")
if "answer_tokens" in item:
tokens = item["answer_tokens"]
elif "answers" in item:
if self.preprocessor is None:
raise AssertionError(
"'preprocessor' must be defined if you "
"don't pass 'answer_tokens'"
)
tokens = [
self.preprocessor({"text": answer})["text"]
for answer in item["answers"]
]
else:
raise AssertionError(
"'answers' or 'answer_tokens' must be passed"
" to answer processor in a dict"
)
tokens = self._increase_to_ten(tokens)
answers_indices = torch.zeros(self.DEFAULT_NUM_ANSWERS, dtype=torch.long)
answers_indices.fill_(self.answer_vocab.get_unk_index())
for idx, token in enumerate(tokens):
answers_indices[idx] = self.answer_vocab.word2idx(token)
answers_scores = self.compute_answers_scores(answers_indices)
return {
"answers": tokens,
"answers_indices": answers_indices,
"answers_scores": answers_scores,
}
def get_vocab_size(self):
"""Get vocab size of the answer vocabulary. Can also include
soft copy dynamic answer space size.
Returns:
int: size of the answer vocabulary
"""
return self.answer_vocab.num_vocab
def get_true_vocab_size(self):
"""True vocab size can be different from normal vocab size in some cases
such as soft copy where dynamic answer space is added.
Returns:
int: True vocab size.
"""
return self.answer_vocab.num_vocab
def word2idx(self, word):
"""Convert a word to its index according to vocabulary
Args:
word (str): Word to be converted to index.
Returns:
int: Index of the word.
"""
return self.answer_vocab.word2idx(word)
def idx2word(self, idx):
"""Index to word according to the vocabulary.
Args:
idx (int): Index to be converted to the word.
Returns:
str: Word corresponding to the index.
"""
return self.answer_vocab.idx2word(idx)
def compute_answers_scores(self, answers_indices):
"""Generate VQA based answer scores for answers_indices.
Args:
answers_indices (torch.LongTensor): tensor containing indices of the answers
Returns:
torch.FloatTensor: tensor containing scores.
"""
scores = torch.zeros(self.get_vocab_size(), dtype=torch.float)
gt_answers = list(enumerate(answers_indices))
unique_answers = set(answers_indices.tolist())
for answer in unique_answers:
accs = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [item for item in other_answers if item[1] == answer]
acc = min(1, float(len(matching_answers)) / 3)
accs.append(acc)
avg_acc = sum(accs) / len(accs)
if answer != self.answer_vocab.UNK_INDEX:
scores[answer] = avg_acc
return scores
def _increase_to_ten(self, tokens):
while len(tokens) < self.DEFAULT_NUM_ANSWERS:
tokens += tokens[: self.DEFAULT_NUM_ANSWERS - len(tokens)]
return tokens
class PhocProcessor:
"""
Compute PHOC features from text tokens
"""
def __init__(self, config, *args, **kwargs):
self.max_length = config.max_length
self.config = config
self.PAD_INDEX = 0
self.PAD_TOKEN = "<pad>"
def _map_strings_to_indices(self, tokens):
length = min(len(tokens), self.max_length)
tokens = tokens[:length]
phoc_dim = 604
output = torch.full(
(self.max_length, phoc_dim),
fill_value=self.PAD_INDEX,
dtype=torch.float,
)
for idx, token in enumerate(tokens):
output[idx] = torch.from_numpy(build_phoc(token))
return output
def __call__(self, item):
indices = self._map_strings_to_indices(item["tokens"])
tokens, length = _pad_tokens(item["tokens"], self.PAD_TOKEN, self.max_length)
return {
"padded_phoc_features": indices,
"padded_tokens": tokens,
"length": length,
}
class CopyProcessor(BaseProcessor):
"""
Copy boxes from numpy array
"""
def __init__(self, config, *args, **kwargs):
self.max_length = config.max_length
def __call__(self, item):
blob = item["blob"]
final_blob = np.zeros((self.max_length,) + blob.shape[1:], blob.dtype)
final_blob[: len(blob)] = blob[: len(final_blob)]
return {"blob": torch.from_numpy(final_blob)}
def SpatialProcessor(pad_obj_ocr_bboxes):
adj_matrix = build_graph_using_normalized_boxes(
pad_obj_ocr_bboxes, distance_threshold=registry.distance_threshold
)
return adj_matrix
class BertTokenizerProcessor:
"""
Tokenize a text string with BERT tokenizer, using Tokenizer passed to the dataset.
"""
def __init__(self, config, tokenizer):
self.max_length = config.max_length
self.bert_tokenizer = tokenizer
# self.bert_tokenizer = BertTokenizer.from_pretrained('bert-base-uncased')
assert self.bert_tokenizer.encode(self.bert_tokenizer.pad_token) == [0]
def get_vocab_size(self):
return self.bert_tokenizer.vocab_size
def __call__(self, item):
# [PAD] in self.bert_tokenizer is zero (as checked in assert above)
token_inds = torch.zeros(self.max_length, dtype=torch.long)
indices = self.bert_tokenizer.encode(item["question"], add_special_tokens=True)
indices = indices[: self.max_length]
token_inds[: len(indices)] = torch.tensor(indices)
token_num = torch.tensor(len(indices), dtype=torch.long)
tokens_mask = torch.zeros(self.max_length, dtype=torch.long)
tokens_mask[: len(indices)] = 1
results = {
"token_inds": token_inds,
"token_num": token_num,
"tokens_mask": tokens_mask,
}
return results
class M4CAnswerProcessor:
"""
Process a TextVQA answer for iterative decoding in SAM4C.
# (YK): Modified to activate logits of the same word in ocr/vocabulary in targets.
"""
def __init__(self, config, *args, **kwargs):
if config.vocab_type == "5k":
self.answer_vocab = VocabDict(
registry["Vocabs"]["vocab5k"], *args, **kwargs
)
elif config.vocab_type == "5k_stvqa":
self.answer_vocab = VocabDict(
registry["Vocabs"]["vocab5k_stvqa"], *args, **kwargs
)
else:
raise ValueError
self.PAD_IDX = self.answer_vocab.word2idx("<pad>")
self.BOS_IDX = self.answer_vocab.word2idx("<s>")
self.EOS_IDX = self.answer_vocab.word2idx("</s>")
self.UNK_IDX = self.answer_vocab.UNK_INDEX
registry.PAD_IDX = self.answer_vocab.word2idx("<pad>")
registry.BOS_IDX = self.answer_vocab.word2idx("<s>")
registry.EOS_IDX = self.answer_vocab.word2idx("</s>")
registry.UNK_IDX = self.answer_vocab.UNK_INDEX
registry.answer_vocab = self.answer_vocab
# make sure PAD_IDX, BOS_IDX and PAD_IDX are valid (not <unk>)
assert self.PAD_IDX != self.answer_vocab.UNK_INDEX
assert self.BOS_IDX != self.answer_vocab.UNK_INDEX
assert self.EOS_IDX != self.answer_vocab.UNK_INDEX
assert self.PAD_IDX == 0
self.num_answers = config.num_answers
self.max_ocr_tokens = config.max_ocr_tokens
self.max_copy_steps = config.max_copy_steps
assert self.max_copy_steps >= 1
def match_answer_to_vocab_ocr_seq(
self, answer, vocab2idx_dict, ocr2inds_dict, max_match_num=20
):
"""
Match an answer to a list of sequences of indices
each index corresponds to either a fixed vocabulary or an OCR token
(in the index address space, the OCR tokens are after the fixed vocab)
"""
num_vocab = len(vocab2idx_dict)
answer_words = answer.split()
answer_word_matches = []
for word in answer_words:
# match answer word to fixed vocabulary
matched_inds = []
if word in vocab2idx_dict:
matched_inds.append(vocab2idx_dict.get(word))
# match answer word to OCR
# we put OCR after the fixed vocabulary in the answer index space
# so add num_vocab offset to the OCR index
matched_inds.extend([num_vocab + idx for idx in ocr2inds_dict[word]])
if len(matched_inds) == 0:
return []
answer_word_matches.append(matched_inds)
# expand per-word matched indices into the list of matched sequences
if len(answer_word_matches) == 0:
return []
idx_seq_list = [()]
for matched_inds in answer_word_matches:
idx_seq_list = [
seq + (idx,) for seq in idx_seq_list for idx in matched_inds
]
if len(idx_seq_list) > max_match_num:
idx_seq_list = idx_seq_list[:max_match_num]
return idx_seq_list
def get_vocab_size(self):
answer_vocab_nums = self.answer_vocab.num_vocab
answer_vocab_nums += self.max_ocr_tokens
return answer_vocab_nums
def __call__(self, item):
answers = item["answers"]
item["context_tokens"] = item["context_tokens"][: self.max_ocr_tokens]
assert len(answers) == self.num_answers
assert len(self.answer_vocab) == len(self.answer_vocab.word2idx_dict)
# Step 1: calculate the soft score of ground-truth answers
gt_answers = list(enumerate(answers))
unique_answers = sorted(set(answers))
unique_answer_scores = [0] * len(unique_answers)
for idx, unique_answer in enumerate(unique_answers):
accs = []
for gt_answer in gt_answers:
other_answers = [item for item in gt_answers if item != gt_answer]
matching_answers = [
item for item in other_answers if item[1] == unique_answer
]
acc = min(1, float(len(matching_answers)) / 3)
accs.append(acc)
unique_answer_scores[idx] = sum(accs) / len(accs)
unique_answer2score = {
a: s for a, s in zip(unique_answers, unique_answer_scores)
}
# Step 2: fill the first step soft scores for tokens
scores = torch.zeros(
self.max_copy_steps, self.get_vocab_size(), dtype=torch.float
)
# match answers to fixed vocabularies and OCR tokens.
ocr2inds_dict = defaultdict(list)
for idx, token in enumerate(item["context_tokens"]):
ocr2inds_dict[token].append(idx)
answer_dec_inds = [
self.match_answer_to_vocab_ocr_seq(
a, self.answer_vocab.word2idx_dict, ocr2inds_dict
)
for a in answers
]
# Collect all the valid decoding sequences for each answer.
# This part (idx_seq_list) was pre-computed in imdb (instead of online)
# to save time
all_idx_seq_list = []
for answer, idx_seq_list in zip(answers, answer_dec_inds):
all_idx_seq_list.extend(idx_seq_list)
# fill in the soft score for the first decoding step
score = unique_answer2score[answer]
for idx_seq in idx_seq_list:
score_idx = idx_seq[0]
# the scores for the decoding Step 0 will be the maximum
# among all answers starting with that vocab
# for example:
# if "red apple" has score 0.7 and "red flag" has score 0.8
# the score for "red" at Step 0 will be max(0.7, 0.8) = 0.8
try:
scores[0, score_idx] = max(scores[0, score_idx], score)
except:
import pdb
pdb.set_trace()
# train_prev_inds is the previous prediction indices in auto-regressive
# decoding
train_prev_inds = torch.zeros(self.max_copy_steps, dtype=torch.long)
# train_loss_mask records the decoding steps where losses are applied
train_loss_mask = torch.zeros(self.max_copy_steps, dtype=torch.float)
train_acc_mask = torch.zeros(self.max_copy_steps, dtype=torch.float)
if len(all_idx_seq_list) > 0:
# sample a random decoding answer sequence for teacher-forcing
idx_seq = all_idx_seq_list[np.random.choice(len(all_idx_seq_list))]
dec_step_num = min(1 + len(idx_seq), self.max_copy_steps)
train_loss_mask[:dec_step_num] = 1.0
train_acc_mask[: dec_step_num - 1] = 1.0
train_prev_inds[0] = self.BOS_IDX
for t in range(1, dec_step_num):
train_prev_inds[t] = idx_seq[t - 1]
score_idx = idx_seq[t] if t < len(idx_seq) else self.EOS_IDX
# if item["question_id"] == 35909:
# import pdb
# pdb.set_trace()
# this means step 1:N have only one non-zero index
# this means there will be no case with EOS_IDX_SCORE and OTHER score non-zero together!
# gather indices from both ocr/vocabulary for the same word!
all_indices = self.get_all_indices(
ocr2inds_dict, item["context_tokens"], score_idx
)
assert self.UNK_IDX not in all_indices
for idx in all_indices:
scores[t, idx] = 1.0
# scores[t, score_idx] = 1.
else:
idx_seq = ()
answer_info = {
"answers": answers,
"targets": scores,
# 'sampled_idx_seq': [train_prev_inds.new(idx_seq)],
"train_prev_inds": train_prev_inds,
"train_loss_mask": train_loss_mask,
"train_acc_mask": train_acc_mask,
}
return answer_info
def get_all_indices(self, ocr2indices, ocr_tokens, score_idx):
return_indices = [score_idx]
if score_idx >= len(self.answer_vocab):
word = ocr_tokens[score_idx - len(self.answer_vocab)]
assert word != "<pad>"
vocab_idx = self.answer_vocab.word2idx(word)
if vocab_idx != self.UNK_IDX:
return_indices.append(vocab_idx)
else:
word = self.answer_vocab.idx2word(score_idx)
ocr_indices = [x + len(self.answer_vocab) for x in ocr2indices[word]]
return_indices.extend(ocr_indices)
return return_indices
class Processors:
"""
Contains static-processors used for processing question/ocr-tokens, image/ocr features,
decoding answer.
"""
def __init__(self, bert_tokenizer, vocab_type="4k", only_registry=False):
logger.info("Loading Processors")
logger.info(f"Vocab Type: {vocab_type}")
# decode-answers
answer_config = edict()
answer_config.max_copy_steps = 12
answer_config.num_answers = 10
answer_config.max_ocr_tokens = 50
answer_config.vocab_type = vocab_type
self.answer_processor = M4CAnswerProcessor(answer_config)
self.only_registry = only_registry
# Attach bert-tokenizer
registry["bert_tokenizer"] = bert_tokenizer
if only_registry:
logger.info("Only registry processor initialized")
return
# question
question_config = edict()
question_config.max_length = 20
self.bert_processor = BertTokenizerProcessor(question_config, bert_tokenizer)
# ocr-tokens
ocr_config = edict()
ocr_config.max_length = 50
self.fasttext_processor = FastTextProcessor(ocr_config)
self.phoc_processor = PhocProcessor(ocr_config)
@staticmethod
def word_cleaner(word):
word = word.lower()
word = word.replace(",", "").replace("?", "").replace("'s", " 's")
return word.strip()
@staticmethod
def word_cleaner_lower(word):
word = word.lower()
return word.strip()
|
the-stack_0_15217 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
# pylint: disable=protected-access
"""Utilities related to layer/model functionality.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import six
from tensorflow.python.keras import backend as K
from tensorflow.python.keras.utils.conv_utils import convert_kernel
from tensorflow.python.util import nest
from tensorflow.python.util import object_identity
from tensorflow.python.util.tf_export import keras_export
@keras_export('keras.utils.get_source_inputs')
def get_source_inputs(tensor, layer=None, node_index=None):
"""Returns the list of input tensors necessary to compute `tensor`.
Output will always be a list of tensors
(potentially with 1 element).
Arguments:
tensor: The tensor to start from.
layer: Origin layer of the tensor. Will be
determined via tensor._keras_history if not provided.
node_index: Origin node index of the tensor.
Returns:
List of input tensors.
"""
if not hasattr(tensor, '_keras_history'):
return tensor
if layer is None or node_index:
layer, node_index, _ = tensor._keras_history
if not layer._inbound_nodes:
return [tensor]
else:
node = layer._inbound_nodes[node_index]
if not node.inbound_layers:
# Reached an Input layer, stop recursion.
return nest.flatten(node.input_tensors)
else:
source_tensors = []
for layer, node_index, _, tensor in node.iterate_inbound():
previous_sources = get_source_inputs(tensor, layer, node_index)
# Avoid input redundancy.
for x in previous_sources:
if all(x is not t for t in source_tensors):
source_tensors.append(x)
return source_tensors
def validate_string_arg(input_data,
allowable_strings,
layer_name,
arg_name,
allow_none=False,
allow_callables=False):
"""Validates the correctness of a string-based arg."""
if allow_none and input_data is None:
return
elif allow_callables and callable(input_data):
return
elif isinstance(input_data,
six.string_types) and input_data in allowable_strings:
return
else:
allowed_args = '`None`, ' if allow_none else ''
allowed_args += 'a `Callable`, ' if allow_callables else ''
allowed_args += 'or one of the following values: %s' % allowable_strings
raise ValueError(("%s's %s arg received an invalid value %s. " +
'Allowed values are %s.') %
(layer_name, arg_name, input_data, allowed_args))
def count_params(weights):
"""Count the total number of scalars composing the weights.
Arguments:
weights: An iterable containing the weights on which to compute params
Returns:
The total number of scalars composing the weights
"""
unique_weights = object_identity.ObjectIdentitySet(weights)
weight_shapes = [w.shape.as_list() for w in unique_weights]
standardized_weight_shapes = [
[0 if w_i is None else w_i for w_i in w] for w in weight_shapes
]
return int(sum(np.prod(p) for p in standardized_weight_shapes))
def print_summary(model, line_length=None, positions=None, print_fn=None):
"""Prints a summary of a model.
Arguments:
model: Keras model instance.
line_length: Total length of printed lines
(e.g. set this to adapt the display to different
terminal window sizes).
positions: Relative or absolute positions of log elements in each line.
If not provided, defaults to `[.33, .55, .67, 1.]`.
print_fn: Print function to use.
It will be called on each line of the summary.
You can set it to a custom function
in order to capture the string summary.
It defaults to `print` (prints to stdout).
"""
if print_fn is None:
print_fn = print
if model.__class__.__name__ == 'Sequential':
sequential_like = True
elif not model._is_graph_network:
# We treat subclassed models as a simple sequence of layers, for logging
# purposes.
sequential_like = True
else:
sequential_like = True
nodes_by_depth = model._nodes_by_depth.values()
nodes = []
for v in nodes_by_depth:
if (len(v) > 1) or (len(v) == 1 and
len(nest.flatten(v[0].inbound_layers)) > 1):
# if the model has multiple nodes
# or if the nodes have multiple inbound_layers
# the model is no longer sequential
sequential_like = False
break
nodes += v
if sequential_like:
# search for shared layers
for layer in model.layers:
flag = False
for node in layer._inbound_nodes:
if node in nodes:
if flag:
sequential_like = False
break
else:
flag = True
if not sequential_like:
break
if sequential_like:
line_length = line_length or 65
positions = positions or [.45, .85, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #']
else:
line_length = line_length or 98
positions = positions or [.33, .55, .67, 1.]
if positions[-1] <= 1:
positions = [int(line_length * p) for p in positions]
# header names for the different log elements
to_display = ['Layer (type)', 'Output Shape', 'Param #', 'Connected to']
relevant_nodes = []
for v in model._nodes_by_depth.values():
relevant_nodes += v
def print_row(fields, positions):
line = ''
for i in range(len(fields)):
if i > 0:
line = line[:-1] + ' '
line += str(fields[i])
line = line[:positions[i]]
line += ' ' * (positions[i] - len(line))
print_fn(line)
print_fn('Model: "{}"'.format(model.name))
print_fn('_' * line_length)
print_row(to_display, positions)
print_fn('=' * line_length)
def print_layer_summary(layer):
"""Prints a summary for a single layer.
Arguments:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
except RuntimeError: # output_shape unknown in Eager mode.
output_shape = '?'
name = layer.name
cls_name = layer.__class__.__name__
fields = [name + ' (' + cls_name + ')', output_shape, layer.count_params()]
print_row(fields, positions)
def print_layer_summary_with_connections(layer):
"""Prints a summary for a single layer (including topological connections).
Arguments:
layer: target layer.
"""
try:
output_shape = layer.output_shape
except AttributeError:
output_shape = 'multiple'
connections = []
for node in layer._inbound_nodes:
if relevant_nodes and node not in relevant_nodes:
# node is not part of the current network
continue
for inbound_layer, node_index, tensor_index, _ in node.iterate_inbound():
connections.append('{}[{}][{}]'.format(inbound_layer.name, node_index,
tensor_index))
name = layer.name
cls_name = layer.__class__.__name__
if not connections:
first_connection = ''
else:
first_connection = connections[0]
fields = [
name + ' (' + cls_name + ')', output_shape,
layer.count_params(), first_connection
]
print_row(fields, positions)
if len(connections) > 1:
for i in range(1, len(connections)):
fields = ['', '', '', connections[i]]
print_row(fields, positions)
layers = model.layers
for i in range(len(layers)):
if sequential_like:
print_layer_summary(layers[i])
else:
print_layer_summary_with_connections(layers[i])
if i == len(layers) - 1:
print_fn('=' * line_length)
else:
print_fn('_' * line_length)
model._check_trainable_weights_consistency()
if hasattr(model, '_collected_trainable_weights'):
trainable_count = count_params(model._collected_trainable_weights)
else:
trainable_count = count_params(model.trainable_weights)
non_trainable_count = count_params(model.non_trainable_weights)
print_fn('Total params: {:,}'.format(trainable_count + non_trainable_count))
print_fn('Trainable params: {:,}'.format(trainable_count))
print_fn('Non-trainable params: {:,}'.format(non_trainable_count))
print_fn('_' * line_length)
def gather_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected trainable weights/variables.
"""
if not trainable:
return []
weights = []
for layer in sub_layers:
weights += layer.trainable_weights
trainable_extra_variables = [
v for v in extra_variables if v.trainable]
return weights + trainable_extra_variables
def gather_non_trainable_weights(trainable, sub_layers, extra_variables):
"""Lists the non-trainable weights for an object with sub-layers.
Args:
trainable: Whether the object collecting the variables is trainable.
sub_layers: A flat list of Layer objects owned by this object, to collect
variables from.
extra_variables: Any extra variables to include. Their `.trainable` property
is used to categorize them.
Returns:
A list of collected non-trainable weights/variables.
"""
trainable_extra_variables = []
non_trainable_extra_variables = []
for v in extra_variables:
if v.trainable:
trainable_extra_variables.append(v)
else:
non_trainable_extra_variables.append(v)
weights = []
for layer in sub_layers:
weights += layer.non_trainable_weights
if not trainable:
trainable_weights = []
for layer in sub_layers:
trainable_weights += layer.trainable_weights
return (trainable_weights + trainable_extra_variables
+ weights + non_trainable_extra_variables)
return weights + non_trainable_extra_variables
@keras_export('keras.utils.convert_all_kernels_in_model')
def convert_all_kernels_in_model(model):
"""Converts all convolution kernels in a model from Theano to TensorFlow.
Also works from TensorFlow to Theano.
Arguments:
model: target model for the conversion.
"""
# Note: SeparableConvolution not included
# since only supported by TF.
conv_classes = {
'Conv1D',
'Conv2D',
'Conv3D',
'Conv2DTranspose',
}
to_assign = []
for layer in model.layers:
if layer.__class__.__name__ in conv_classes:
original_kernel = K.get_value(layer.kernel)
converted_kernel = convert_kernel(original_kernel)
to_assign.append((layer.kernel, converted_kernel))
K.batch_set_value(to_assign)
def convert_dense_weights_data_format(dense,
previous_feature_map_shape,
target_data_format='channels_first'):
"""Utility useful when changing a convnet's `data_format`.
When porting the weights of a convnet from one data format to the other,
if the convnet includes a `Flatten` layer
(applied to the last convolutional feature map)
followed by a `Dense` layer, the weights of that `Dense` layer
should be updated to reflect the new dimension ordering.
Arguments:
dense: The target `Dense` layer.
previous_feature_map_shape: A shape tuple of 3 integers,
e.g. `(512, 7, 7)`. The shape of the convolutional
feature map right before the `Flatten` layer that
came before the target `Dense` layer.
target_data_format: One of "channels_last", "channels_first".
Set it "channels_last"
if converting a "channels_first" model to "channels_last",
or reciprocally.
"""
assert target_data_format in {'channels_last', 'channels_first'}
kernel, bias = dense.get_weights()
for i in range(kernel.shape[1]):
if target_data_format == 'channels_first':
c, h, w = previous_feature_map_shape
original_fm_shape = (h, w, c)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (2, 0, 1)) # last -> first
else:
h, w, c = previous_feature_map_shape
original_fm_shape = (c, h, w)
ki = kernel[:, i].reshape(original_fm_shape)
ki = np.transpose(ki, (1, 2, 0)) # first -> last
kernel[:, i] = np.reshape(ki, (np.prod(previous_feature_map_shape),))
dense.set_weights([kernel, bias])
def is_builtin_layer(layer):
if not getattr(layer, '_keras_api_names', None):
return False
# Subclasses of `Layer` that are not exported inherit the export name
# of the base layer class.
return (layer._keras_api_names != ('keras.layers.Layer',) and
layer._keras_api_names_v1 != ('keras.layers.Layer',))
|
the-stack_0_15219 | import vcf
import argparse
from pyfaidx import Fasta
from Bio import SeqIO
from Bio.SeqRecord import SeqRecord
from Bio.Seq import MutableSeq
from Bio.Seq import Seq
parser = argparse.ArgumentParser(description='Extract ref sequence and variants for a cluster')
parser.add_argument('-f', help='the reference genome fasta', required=True)
parser.add_argument('-v', help='the input VCF file.', required=True)
parser.add_argument('-t', help='the TSV with cluster information.', required=True)
parser.add_argument('-c', help='the cluster to extract.', required=True)
parser.add_argument('-ov', help='the output VCF file', required=True)
parser.add_argument('-of', help='the output FASTA file', required=True)
args = parser.parse_args()
## extract cluster information from tsv
tsv_in = open(args.t, 'r')
chr_name = ''
start_pos = 0
end_pos = 0
svids = []
for line in tsv_in:
line = line.rstrip().split('\t')
if(line[0] == args.c):
chr_name = line[1]
start_pos = int(line[2])
end_pos = int(line[3])
svids = line[4].split(',')
# retrieve reference sequence of the region
# Open reference fasta
ref = Fasta(args.f)
reg_seq = ref[chr_name][start_pos:end_pos]
reg_seq = reg_seq.seq
# read vcf
vcfi = open(args.v, 'r')
vcf_reader = vcf.Reader(vcfi)
vcf_out = []
for record in vcf_reader:
# skip if not in variants of interest
if(record.ID not in svids):
continue
# make a VCF record
var_pos = record.POS - start_pos
rec = [chr_name, str(var_pos), record.ID, str(record.REF), str(record.ALT[0]),
'.', '.', '.']
rec = '\t'.join(rec)
vcf_out.append(rec)
vcfi.close()
# write VCF
# VCF header
vcf_h = '##fileformat=VCFv4.2\n'
vcf_h += '##contig=<ID={},length={}>\n'.format(chr_name, len(reg_seq))
vcf_h += '#CHROM\tPOS\tID\tREF\tALT\tQUAL\tFILTER\tINFO\n'
with open(args.ov, 'w') as outf:
outf.write(vcf_h + '\n'.join(vcf_out))
# write FASTA with the reference sequence
fa_rec = SeqRecord(MutableSeq(reg_seq.upper()), id=chr_name,
description='cl_' + args.c)
# write fasta
SeqIO.write(fa_rec, args.of, "fasta")
|
the-stack_0_15220 | #!/usr/bin/env conda run -n py27Env python2.7
# -*- coding: utf-8 -*-
"""
Hyperalign on one half of a hyperscanning task and look
for improvements in leave-one-out ISC in the other half.
"""
import numpy as np
import pickle
from mvpa2.suite import *
import time
import glob
import sys
sys.path.append('/dartfs-hpc/rc/lab/W/WheatleyT/f00589z/hyperscanning/support_scripts/')
from phaseScramble_2 import *
from scipy import stats
def main():
print('\nlets hyperalign\n')
# define hyperscanning task descriptions
taskDescrips = ['storytelling_independent',
'storytelling_joint',
'listening',
'reading']
# parameters
debug = False
task = 4 # see task descriptions above
radius = 3 # number of voxels in hyperalignment searchlight radius
sparse_radius = 3 # number of voxels between neighboring searchlight spheres
nproc = 10 # number of parallel processes to feed into hyperalignment function
# set dataset labels
dsLabels = ['train','test']
# set base folder
baseFolder = '/dartfs-hpc/rc/lab/W/WheatleyT/f00589z/hyperscanning/preprocessing/hyperalignment/'
if debug:
datasetFile = baseFolder + 'datasets/debug_' + taskDescrips[task]
else:
datasetFile = baseFolder + 'datasets/' + taskDescrips[task]
# load training and testing data
ds_all = h5load(datasetFile)
# get training and testing sample indices (half and half)
order = 0 # 0 = train on first half, test on second, 1 = the opposite
halfSampleNum = np.round(ds_all[0].nsamples / 2)
sampleInds = [[]] * 2
if order == 0:
sampleInds[0] = np.arange(halfSampleNum) # training sample indices
sampleInds[1] = np.arange(halfSampleNum,ds_all[0].nsamples,1) # testing sample indices
else:
sampleInds[0] = np.arange(halfSampleNum, ds_all[0].nsamples, 1) # training sample indices
sampleInds[1] = np.arange(halfSampleNum) # testing sample indices
# get number of subjects in full dataset
numSubs = len(ds_all)
# split up into training and testing datasets
ds = [[]] * 2 # initialize
for DS in range(len(ds)): # for each data set (0=training, 1=testing)
ds[DS] = [[]] * numSubs # initialize
for SUB in range(numSubs): # for each subject
ds[DS][SUB] = ds_all[SUB][sampleInds[DS],:]
ds[DS][SUB].samples = stats.zscore(ds[DS][SUB].samples, axis=0)
# verify that subject ID lists are identical between training and testing sets
# for each dataset...
EPIdata = [[]] * 2
corrData = [[]] * 2
medCorr = [[]] * 2
for DS in range(2):
# get number of subjects
numSubs = len(ds[DS])
# get EPI dimensions (samples x voxels)
dims = np.array(ds[DS][0].__array__().shape)
# initialize raw EPI data array
EPIdata[DS] = np.empty([dims[0], dims[1], len(ds[DS])])
# initialize pre-hyperalignment ISC coefficient array (subs x voxels)
corrData[DS] = np.empty([numSubs, dims[1]])
# for each subject...
for SUB in range(numSubs):
# get EPI data
EPIdata[DS][:,:,SUB] = ds[DS][SUB].__array__()
# for each subject...
for SUB in range(numSubs):
# get mean of data from all participants EXCEPT the current participant
otherSubs = np.arange(0, numSubs)
otherSubs = np.delete(otherSubs, SUB)
groupMean = np.mean(EPIdata[DS][:,:,otherSubs], axis=2)
# get correlation between current participant and groupMean
corrData[DS][SUB, :] = fastColumnCorr(EPIdata[DS][:,:,SUB], groupMean)
# get median ISC across participants
medCorr[DS] = np.median(corrData[DS], axis=0)
print('mean (across voxels) median (across subs) corr in ' + dsLabels[DS] + ' set BEFORE hyperalignment: ' + str(np.round(np.mean(medCorr[DS]),3)))
# we call SearchlightHyperalignment mostly with default values:
# each sphere has a radius of 3 voxels, sphere centers are also 3 voxels apart,
# all voxels in a given sphere are used for alignment
slhyper = SearchlightHyperalignment(radius=radius,
sparse_radius=sparse_radius,
nproc=nproc)
# call the hyperalignment object with the full dataset we have,
# resulting mappers will be stored in slhypmaps
slhyperStart = time.time()
slhypmaps = slhyper(ds[0])
print('\nHyperalignment took ' + str(time.time() - slhyperStart) + ' secs')
# compute post-hyperalignment metrics
ds_hyper = [[]] * 2
EPIdata_hyper = [[]] * 2
corrData_hyper = [[]] * 2
medCorr_hyper = [[]] * 2
for DS in range(2):
# Applying hyperalignment parameters is similar to applying any mapper in
# PyMVPA. We apply the hyperalignment parameters by running the dataset
# through the forward() function of the mapper.
ds_hyper[DS] = [h.forward(sd) for h, sd in zip(slhypmaps, ds[DS])]
# get EPI dimensions (samples x voxels)
dims = np.array(ds_hyper[DS][0].__array__().shape)
# initialize raw EPI data array
EPIdata_hyper[DS] = np.empty([dims[0], dims[1], len(ds_hyper[DS])])
# initialize pre-hyperalignment ISC coefficient array (subs x voxels)
corrData_hyper[DS] = np.empty([numSubs, dims[1]])
# for each subject...
for SUB in range(numSubs):
# get EPI data
EPIdata_hyper[DS][:, :, SUB] = ds_hyper[DS][SUB].__array__()
# for each subject...
for SUB in range(numSubs):
# get mean of data from all participants EXCEPT the current participant
otherSubs = np.arange(0, numSubs)
otherSubs = np.delete(otherSubs, SUB)
groupMean = np.mean(EPIdata_hyper[DS][:, :, otherSubs], axis=2)
# get correlation between current participant and groupMean
corrData_hyper[DS][SUB, :] = fastColumnCorr(EPIdata_hyper[DS][:, :, SUB], groupMean)
# get median ISC across participants
medCorr_hyper[DS] = np.median(corrData_hyper[DS], axis=0)
print('mean (across voxels) median (across subs) corr in ' + dsLabels[
DS] + ' set BEFORE hyperalignment: ' + str(np.round(np.mean(medCorr_hyper[DS]), 3)))
# save name
if task == 3:
saveFile = baseFolder + 'results/listening_5050'
else:
saveFile = baseFolder + 'results/reading_5050'
if debug:
saveFile = saveFile + '_debug'
print('saving files to: ')
print(saveFile + '_med_corr_pre_hyp')
print(saveFile + '_med_corr_post_hyp')
# save median correlation array
np.save(saveFile + '_med_corr_pre_hyp', medCorr)
np.save(saveFile + '_med_corr_post_hyp',medCorr_hyper)
# print('saving output...')
# with open(saveFile + '.pkl', 'wb') as f:
# pickle.dump([medCorr, medCorr_hyper], f, protocol=2)
# print('output saved here: ' + saveFile + '.pkl')
# save mapping
h5save(saveFile + '_hyperMappings.hd5', slhypmaps)
print('yay')
if __name__ == '__main__':
main()
|
the-stack_0_15222 | # Copyright 2018 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
r"""Experimental Keras MNIST Example.
To test on CPU:
python mnist.py --use_tpu=False [--fake_data=true]
To test on TPU:
python mnist.py --use_tpu=True [--tpu=$TPU_NAME]
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Standard Imports
from absl import app
from absl import flags
import numpy as np
import tensorflow as tf
from tensorflow.contrib import cluster_resolver as contrib_cluster_resolver
from tensorflow.contrib import distribute as contrib_distribute
# TODO(sourabhbajaj): Remove the need for this flag.
flags.DEFINE_bool('use_tpu', True,
'Ignored: preserved for backward compatibility.')
flags.DEFINE_string('tpu', '', 'Name of the TPU to use.')
flags.DEFINE_string(
'model_dir', None,
('The directory where the model and training/evaluation summaries '
'are stored. If unset, no summaries will be stored.'))
flags.DEFINE_bool('fake_data', False, 'Use fake data to test functionality.')
# Batch size should satify two properties to be able to run in cloud:
# num_eval_samples % batch_size == 0
# batch_size % 8 == 0
BATCH_SIZE = 200
NUM_CLASSES = 10
EPOCHS = 15
# input image dimensions
IMG_ROWS, IMG_COLS = 28, 28
FLAGS = flags.FLAGS
def mnist_model(input_shape):
"""Creates a MNIST model."""
model = tf.keras.models.Sequential()
model.add(
tf.keras.layers.Conv2D(
32, kernel_size=(3, 3), activation='relu', input_shape=input_shape))
model.add(tf.keras.layers.Conv2D(64, (3, 3), activation='relu'))
model.add(tf.keras.layers.MaxPooling2D(pool_size=(2, 2)))
model.add(tf.keras.layers.Dropout(0.25))
model.add(tf.keras.layers.Flatten())
model.add(tf.keras.layers.Dense(128, activation='relu'))
model.add(tf.keras.layers.Dropout(0.5))
model.add(tf.keras.layers.Dense(NUM_CLASSES, activation='softmax'))
return model
def run():
"""Run the model training and return evaluation output."""
resolver = contrib_cluster_resolver.TPUClusterResolver(tpu=FLAGS.tpu)
contrib_distribute.initialize_tpu_system(resolver)
strategy = contrib_distribute.TPUStrategy(resolver, steps_per_run=100)
if FLAGS.fake_data:
print('Using fake data')
x_train = np.random.random((BATCH_SIZE, IMG_ROWS, IMG_COLS))
y_train = np.zeros([BATCH_SIZE, 1], dtype=np.int32)
x_test, y_test = x_train, y_train
else:
# the data, split between train and test sets
print('Using real data')
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
x_train = x_train.reshape(x_train.shape[0], IMG_ROWS, IMG_COLS, 1)
x_test = x_test.reshape(x_test.shape[0], IMG_ROWS, IMG_COLS, 1)
input_shape = (IMG_ROWS, IMG_COLS, 1)
x_train = x_train.astype('float32')
x_test = x_test.astype('float32')
x_train /= 255
x_test /= 255
print('x_train shape:', x_train.shape)
print(x_train.shape[0], 'train samples')
print(x_test.shape[0], 'test samples')
# convert class vectors to binary class matrices
y_train = tf.keras.utils.to_categorical(y_train, NUM_CLASSES)
y_test = tf.keras.utils.to_categorical(y_test, NUM_CLASSES)
with strategy.scope():
model = mnist_model(input_shape)
model.compile(
loss=tf.keras.losses.categorical_crossentropy,
optimizer=tf.train.GradientDescentOptimizer(learning_rate=0.05),
metrics=['accuracy'])
callbacks = []
if FLAGS.model_dir:
callbacks = [tf.keras.callbacks.TensorBoard(log_dir=FLAGS.model_dir)]
model.fit(
x_train,
y_train,
batch_size=BATCH_SIZE,
callbacks=callbacks,
epochs=EPOCHS,
verbose=1,
validation_data=(x_test, y_test))
return model.evaluate(x_test, y_test, batch_size=BATCH_SIZE, verbose=1)
def main(unused_dev):
score = run()
print('Loss for final step: %s;' % score[0])
print('Accuracy: %s;' % score[1])
if __name__ == '__main__':
tf.logging.set_verbosity(tf.logging.INFO)
app.run(main)
|
the-stack_0_15225 |
'''
Extract info from a call to the clean task from CASA logs.
'''
import re
from itertools import izip
from datetime import datetime
from astropy import units as u
import numpy as np
from astropy.table import Table
# Define some strings for re
all_time_date = r"^[0-9]{4}-[0-9]{2}-[0-9]{2}\s[0-9]{2}:[0-9]{2}:[0-9]{2}\s"
casa_datetime_format = r'%Y-%m-%d %H:%M:%S'
info = r"INFO\s"
warn = r"WARN\s"
err = r"ERROR\s"
numbers = r"[-+]?\d*\.\d+|\d+"
def collect_clean_results(log_files, filename=None, format='ascii.csv',
show_in_browser=False):
'''
Loop through the list of given log files, extract results from the clean
calls, and save as a csv file.
Parameters
----------
log_files : list or np.ndarray
List or array of the log file names.
filename : str, optional
Name of file to save with clean results. If None is given, no file is
saved.
format : str of filetype
Filetype to save the table as. See the list of writers available for
`~astropy.table` here:
`http://docs.astropy.org/en/stable/io/unified.html#built-in-readers-writers`_
show_in_browser : bool, optional
Displays the table in a web browser.
'''
results_dict = {"Name": [],
"Reached Threshold": [],
"Max Residual": [],
"Iterations": [],
"Time Elapsed": []}
for i, log in enumerate(log_files):
results = CleanResults(log)
try:
results.run_all()
# Extract units
bright_unit = results.max_residuals.unit
time_unit = results.time_elapsed.unit
results_dict["Name"].append(log.rstrip(".log"))
results_dict["Reached Threshold"].append(results.finished)
results_dict["Max Residual"].append(results.max_residuals.value)
results_dict["Iterations"].append(results.niters)
results_dict["Time Elapsed"].append(results.time_elapsed.value)
except Warning as e:
print("Failed for log: " + log)
print(e)
results_dict["Name"].append(log.rstrip(".log"))
results_dict["Reached Threshold"].append(False)
results_dict["Max Residual"].append(np.NaN)
results_dict["Iterations"].append(0)
results_dict["Time Elapsed"].append(np.NaN)
# Add units back on
results_dict["Max Residual"] *= bright_unit
results_dict["Time Elapsed"] *= time_unit
# Now gather into a table.
t = Table(results_dict.values(), names=results_dict.keys())
if filename is not None:
t.write(filename, format=format)
if show_in_browser:
t.show_in_browser()
class CleanResults(object):
"""
Read the results of running clean from a log file.
Parameters
----------
filename : str
Name of the log file to search.
"""
def __init__(self, filename):
self.filename = filename
self._lines = load_log(filename)
self._line_ranges = None
self._max_residuals = None
self._time_elapsed = None
@property
def lines(self):
return self._lines
def search_log(self, expression, view=None, return_linenum=True):
'''
Search through the log for a given expression.
Return the matched lines.
'''
re_express = re.compile(expression)
if view is None:
view = slice(None)
view = fill_in_slice(view, len(self.lines))
linenum_gen = xrange(view.start, view.stop, view.step)
matched_lines = []
matched_line_nums = []
for i, line in izip(linenum_gen, self.lines[view]):
search = re_express.search(line)
if search:
matched_lines.append(line)
if return_linenum:
matched_line_nums.append(i)
if not matched_lines:
Warning("No matches found.")
if len(matched_lines) == 1:
if return_linenum:
return matched_lines[0], matched_line_nums[0]
return matched_lines[0]
if return_linenum:
return zip(matched_lines, matched_line_nums)
return matched_lines
@property
def finished(self):
'''
Did CLEAN reach the given threshold?
'''
return self._finished_calls
def get_finished(self):
finish_re = all_time_date + info + \
"*MFMSCleanImageSkyModel::solve\s*Reached*"
if not self.line_ranges:
self.get_line_ranges()
if isinstance(self.line_ranges[0], int):
start, stop = self.line_ranges
finish_match = \
self.search_log(finish_re, view=slice(start, stop))
self._finished_calls = False if not finish_match else True
else:
finished_calls = []
for clean_range in self.line_ranges:
start, stop = clean_range
finish_match = self.search_log(finish_re,
view=slice(start, stop))
if not finish_match:
finished_calls.append(False)
else:
finished_calls.append(True)
self._finished_calls = finished_calls
@property
def line_ranges(self):
return self._line_ranges
def get_line_ranges(self):
'''
Find the beginning and end of CLEAN.
'''
start_re = all_time_date + info + \
"*clean::::.\s####.*Begin Task: clean.*"
stop_re = all_time_date + info + "*clean::::\s####.*End Task: clean.*"
start_search = self.search_log(start_re)
if start_search:
start_lines = start_search[1]
self._error = False
else:
raise Warning("Could not find CASA clean call in log.")
self._error = True
stop_search = self.search_log(stop_re)
if stop_search:
stop_lines = stop_search[1]
self._error = False
else:
Warning("Could not find end to clean call. "
"An error likely occurred in CASA. "
"Setting the end to the final log line.")
stop_lines = len(self.lines) - 1
self._error = True
# If they aren't equal, there was an error (no end line)
# Must be the last clean call, since casa always crashes
# in my experience.
try:
if len(start_lines) != len(stop_lines):
Warning("One of the CLEAN class failed.")
self._error = True
start_lines.pop(-1)
self._line_ranges = zip(start_lines, stop_lines)
except TypeError:
self._line_ranges = [start_lines, stop_lines]
self._error = False
@property
def error(self):
return self._error
@property
def time_elapsed(self):
return self._time_elapsed
def get_time_elapsed(self, output_unit=u.min):
'''
Find the time needed for CLEAN to run.
'''
if not self.line_ranges:
self.get_line_ranges()
if isinstance(self.line_ranges[0], int):
start, stop = self.line_ranges
start_time = datetime.strptime(casa_time(self.lines[start]),
casa_datetime_format)
stop_time = datetime.strptime(casa_time(self.lines[stop]),
casa_datetime_format)
self._time_elapsed = \
time_difference(start_time, stop_time, output_unit=output_unit)
else:
self._time_elapsed = []
for clean_range in self.line_ranges:
start, stop = clean_range
start_time = datetime.strptime(casa_time(self.lines[start]),
casa_datetime_format)
stop_time = datetime.strptime(casa_time(self.lines[stop]),
casa_datetime_format)
diff_time = \
time_difference(start_time, stop_time,
output_unit=output_unit)
self._time_elapsed.append(diff_time)
@property
def max_residuals(self):
return self._max_residuals
def get_max_residuals(self):
res_re = all_time_date + info + \
"*MFMSCleanImageSkyModel::solve\s*Final maximum*"
if not self.line_ranges:
self.get_line_ranges()
if isinstance(self.line_ranges[0], int):
start, stop = self.line_ranges
res_match = \
self.search_log(res_re, view=slice(start, stop))
if not res_match:
Warning("Could not find final residual value.")
self._max_residuals = np.NaN * u.Jy / u.beam
else:
self._max_residuals = \
float(re.findall(numbers, res_match[
0])[-1]) * u.Jy / u.beam
else:
self._max_residuals = []
for clean_range in self.line_ranges:
start, stop = clean_range
res_match = \
self.search_log(res_re, view=slice(start, stop))
if not res_match:
Warning("Could not find final residual value.")
self._max_residuals.append(np.NaN * u.Jy / u.beam)
else:
residual = \
float(re.findall(numbers, res_match)
[-1]) * u.Jy / u.beam
self._max_residuals.append(residual)
@property
def niters(self):
return self._niters
def get_niters(self):
iter_re = all_time_date + info + \
"*MFMSCleanImageSkyModel::solve\s*Clean used*"
if not self.line_ranges:
self.get_line_ranges()
if isinstance(self.line_ranges[0], int):
start, stop = self.line_ranges
iter_match = \
self.search_log(iter_re, view=slice(start, stop),
return_linenum=False)
if not iter_match:
Warning("Could not find number of iterations used.")
self._niters = np.NaN
else:
# Take the last one, since it is printed out for each
# major cycle.
if isinstance(iter_match, list):
last_match = iter_match[-1]
else:
last_match = iter_match
self._niters = \
int(re.findall(numbers, last_match)[-1])
else:
self._niters = []
for clean_range in self.line_ranges:
start, stop = clean_range
iter_match = \
self.search_log(iter_re, view=slice(start, stop),
return_linenum=False)
if not iter_match:
Warning("Could not find number of iterations used.")
self._niters.append(np.NaN)
else:
if isinstance(iter_match, list):
last_match = iter_match[-1]
else:
last_match = iter_match
iters = \
int(re.findall(numbers, last_match)[-1])
self._max_residuals.append(iters)
def run_all(self, time_unit=u.min):
self.get_line_ranges()
self.get_finished()
self.get_max_residuals()
self.get_time_elapsed(output_unit=time_unit)
self.get_niters()
def info_dict(self):
if isinstance(self.line_ranges[0], int):
return {"Finished": self.finished,
"Max Residual": self.max_residuals,
"Time Elapsed": self.time_elapsed,
"Iterations": self.niters}
else:
results_dicts = []
for i in xrange(len(self.line_ranges[0])):
results_dicts.append(
{"Finished": self.finished[i],
"Max Residual": self.max_residuals[i],
"Time Elapsed": self.time_elapsed[i],
"Iterations": self.niters[i]})
return results_dicts
def __repr__(self):
if isinstance(self.line_ranges[0], int):
return "Finished: " + str(self.finished) + "\nMax Residual: " + \
str(self.max_residuals) + "\nIterations: " + \
str(self.niters) + "\nTime Elapsed: " + \
str(self.time_elapsed.round(3))
else:
for i in xrange(len(self.line_ranges[0])):
return "Clean " + str(i + 1) + " Finished: " + \
str(self.finished[i]) + "\n Max Residual: " + \
str(self.max_residuals[i]) + "\n Iterations: " + \
str(self.niters[i]) + "\n Time Elapsed: " + \
str(self.time_elapsed[i].round(3))
def load_log(logfile):
'''
Load the lines of a log file in.
'''
with open(logfile) as f:
lines = f.readlines()
return lines
def fill_in_slice(view, list_len):
'''
To keep track of lines in the log, fill in
undefined slice parameters with defaults.
'''
if not view.start:
start = 0
else:
start = view.start
if not view.stop:
stop = list_len
else:
stop = view.stop
if not view.step:
step = 1
else:
step = view.step
return slice(start, stop, step)
def casa_time(line):
return line.split("\t")[0]
def time_difference(time1, time2, output_unit=u.min):
diff = time2 - time1
seconds_diff = diff.total_seconds() * u.s
return seconds_diff.to(output_unit)
|
the-stack_0_15229 | from datetime import datetime
from pathlib import Path
import fire
import torch
import torch.nn as nn
import torch.optim as optim
import utils
import ignite
import ignite.distributed as idist
from ignite.contrib.engines import common
from ignite.contrib.handlers import PiecewiseLinear
from ignite.engine import Engine, Events, create_supervised_evaluator
from ignite.handlers import Checkpoint, DiskSaver
from ignite.metrics import Accuracy, Loss
from ignite.utils import manual_seed, setup_logger
def training(local_rank, config):
rank = idist.get_rank()
manual_seed(config["seed"] + rank)
device = idist.device()
logger = setup_logger(name="CIFAR10-Training", distributed_rank=local_rank)
log_basic_info(logger, config)
output_path = config["output_path"]
if rank == 0:
if config["stop_iteration"] is None:
now = datetime.now().strftime("%Y%m%d-%H%M%S")
else:
now = f"stop-on-{config['stop_iteration']}"
folder_name = f"{config['model']}_backend-{idist.backend()}-{idist.get_world_size()}_{now}"
output_path = Path(output_path) / folder_name
if not output_path.exists():
output_path.mkdir(parents=True)
config["output_path"] = output_path.as_posix()
logger.info(f"Output path: {config['output_path']}")
if "cuda" in device.type:
config["cuda device name"] = torch.cuda.get_device_name(local_rank)
if config["with_clearml"]:
try:
from clearml import Task
except ImportError:
# Backwards-compatibility for legacy Trains SDK
from trains import Task
task = Task.init("CIFAR10-Training", task_name=output_path.stem)
task.connect_configuration(config)
# Log hyper parameters
hyper_params = [
"model",
"batch_size",
"momentum",
"weight_decay",
"num_epochs",
"learning_rate",
"num_warmup_epochs",
]
task.connect({k: config[k] for k in hyper_params})
# Setup dataflow, model, optimizer, criterion
train_loader, test_loader = get_dataflow(config)
config["num_iters_per_epoch"] = len(train_loader)
model, optimizer, criterion, lr_scheduler = initialize(config)
# Create trainer for current task
trainer = create_trainer(model, optimizer, criterion, lr_scheduler, train_loader.sampler, config, logger)
# Let's now setup evaluator engine to perform model's validation and compute metrics
metrics = {
"accuracy": Accuracy(),
"loss": Loss(criterion),
}
# We define two evaluators as they wont have exactly similar roles:
# - `evaluator` will save the best model based on validation score
evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
train_evaluator = create_supervised_evaluator(model, metrics=metrics, device=device, non_blocking=True)
def run_validation(engine):
epoch = trainer.state.epoch
state = train_evaluator.run(train_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Train", state.metrics)
state = evaluator.run(test_loader)
log_metrics(logger, epoch, state.times["COMPLETED"], "Test", state.metrics)
trainer.add_event_handler(Events.EPOCH_COMPLETED(every=config["validate_every"]) | Events.COMPLETED, run_validation)
if rank == 0:
# Setup TensorBoard logging on trainer and evaluators. Logged values are:
# - Training metrics, e.g. running average loss values
# - Learning rate
# - Evaluation train/test metrics
evaluators = {"training": train_evaluator, "test": evaluator}
tb_logger = common.setup_tb_logging(output_path, trainer, optimizer, evaluators=evaluators)
# Store 3 best models by validation accuracy:
common.gen_save_best_models_by_val_score(
save_handler=get_save_handler(config),
evaluator=evaluator,
models={"model": model},
metric_name="accuracy",
n_saved=3,
trainer=trainer,
tag="test",
)
# In order to check training resuming we can stop training on a given iteration
if config["stop_iteration"] is not None:
@trainer.on(Events.ITERATION_STARTED(once=config["stop_iteration"]))
def _():
logger.info(f"Stop training on {trainer.state.iteration} iteration")
trainer.terminate()
try:
trainer.run(train_loader, max_epochs=config["num_epochs"])
except Exception as e:
import traceback
print(traceback.format_exc())
if rank == 0:
tb_logger.close()
def run(
seed=543,
data_path="/tmp/cifar10",
output_path="/tmp/output-cifar10/",
model="resnet18",
batch_size=512,
momentum=0.9,
weight_decay=1e-4,
num_workers=12,
num_epochs=24,
learning_rate=0.4,
num_warmup_epochs=4,
validate_every=3,
checkpoint_every=200,
backend=None,
resume_from=None,
log_every_iters=15,
nproc_per_node=None,
stop_iteration=None,
with_clearml=False,
**spawn_kwargs,
):
"""Main entry to train an model on CIFAR10 dataset.
Args:
seed (int): random state seed to set. Default, 543.
data_path (str): input dataset path. Default, "/tmp/cifar10".
output_path (str): output path. Default, "/tmp/output-cifar10".
model (str): model name (from torchvision) to setup model to train. Default, "resnet18".
batch_size (int): total batch size. Default, 512.
momentum (float): optimizer's momentum. Default, 0.9.
weight_decay (float): weight decay. Default, 1e-4.
num_workers (int): number of workers in the data loader. Default, 12.
num_epochs (int): number of epochs to train the model. Default, 24.
learning_rate (float): peak of piecewise linear learning rate scheduler. Default, 0.4.
num_warmup_epochs (int): number of warm-up epochs before learning rate decay. Default, 4.
validate_every (int): run model's validation every ``validate_every`` epochs. Default, 3.
checkpoint_every (int): store training checkpoint every ``checkpoint_every`` iterations. Default, 200.
backend (str, optional): backend to use for distributed configuration. Possible values: None, "nccl", "xla-tpu",
"gloo" etc. Default, None.
nproc_per_node (int, optional): optional argument to setup number of processes per node. It is useful,
when main python process is spawning training as child processes.
resume_from (str, optional): path to checkpoint to use to resume the training from. Default, None.
log_every_iters (int): argument to log batch loss every ``log_every_iters`` iterations.
It can be 0 to disable it. Default, 15.
stop_iteration (int, optional): iteration to stop the training. Can be used to check resume from checkpoint.
with_clearml (bool): if True, experiment ClearML logger is setup. Default, False.
**spawn_kwargs: Other kwargs to spawn run in child processes: master_addr, master_port, node_rank, nnodes
"""
# catch all local parameters
config = locals()
config.update(config["spawn_kwargs"])
del config["spawn_kwargs"]
spawn_kwargs["nproc_per_node"] = nproc_per_node
with idist.Parallel(backend=backend, **spawn_kwargs) as parallel:
parallel.run(training, config)
def get_dataflow(config):
# - Get train/test datasets
if idist.get_rank() > 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
train_dataset, test_dataset = utils.get_train_test_datasets(config["data_path"])
if idist.get_rank() == 0:
# Ensure that only rank 0 download the dataset
idist.barrier()
# Setup data loader also adapted to distributed config: nccl, gloo, xla-tpu
train_loader = idist.auto_dataloader(
train_dataset, batch_size=config["batch_size"], num_workers=config["num_workers"], shuffle=True, drop_last=True,
)
test_loader = idist.auto_dataloader(
test_dataset, batch_size=2 * config["batch_size"], num_workers=config["num_workers"], shuffle=False,
)
return train_loader, test_loader
def initialize(config):
model = utils.get_model(config["model"])
# Adapt model for distributed settings if configured
model = idist.auto_model(model)
optimizer = optim.SGD(
model.parameters(),
lr=config["learning_rate"],
momentum=config["momentum"],
weight_decay=config["weight_decay"],
nesterov=True,
)
optimizer = idist.auto_optim(optimizer)
criterion = nn.CrossEntropyLoss().to(idist.device())
le = config["num_iters_per_epoch"]
milestones_values = [
(0, 0.0),
(le * config["num_warmup_epochs"], config["learning_rate"]),
(le * config["num_epochs"], 0.0),
]
lr_scheduler = PiecewiseLinear(optimizer, param_name="lr", milestones_values=milestones_values)
return model, optimizer, criterion, lr_scheduler
def log_metrics(logger, epoch, elapsed, tag, metrics):
metrics_output = "\n".join([f"\t{k}: {v}" for k, v in metrics.items()])
logger.info(f"\nEpoch {epoch} - Evaluation time (seconds): {int(elapsed)} - {tag} metrics:\n {metrics_output}")
def log_basic_info(logger, config):
logger.info(f"Train {config['model']} on CIFAR10")
logger.info(f"- PyTorch version: {torch.__version__}")
logger.info(f"- Ignite version: {ignite.__version__}")
logger.info("\n")
logger.info("Configuration:")
for key, value in config.items():
logger.info(f"\t{key}: {value}")
logger.info("\n")
if idist.get_world_size() > 1:
logger.info("\nDistributed setting:")
logger.info(f"\tbackend: {idist.backend()}")
logger.info(f"\tworld size: {idist.get_world_size()}")
logger.info("\n")
def create_trainer(model, optimizer, criterion, lr_scheduler, train_sampler, config, logger):
device = idist.device()
# Setup Ignite trainer:
# - let's define training step
# - add other common handlers:
# - TerminateOnNan,
# - handler to setup learning rate scheduling,
# - ModelCheckpoint
# - RunningAverage` on `train_step` output
# - Two progress bars on epochs and optionally on iterations
def train_step(engine, batch):
x, y = batch[0], batch[1]
if x.device != device:
x = x.to(device, non_blocking=True)
y = y.to(device, non_blocking=True)
model.train()
# Supervised part
y_pred = model(x)
loss = criterion(y_pred, y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# This can be helpful for XLA to avoid performance slow down if fetch loss.item() every iteration
if config["log_every_iters"] > 0 and (engine.state.iteration - 1) % config["log_every_iters"] == 0:
batch_loss = loss.item()
engine.state.saved_batch_loss = batch_loss
else:
batch_loss = engine.state.saved_batch_loss
return {
"batch loss": batch_loss,
}
trainer = Engine(train_step)
trainer.state.saved_batch_loss = -1.0
trainer.state_dict_user_keys.append("saved_batch_loss")
trainer.logger = logger
to_save = {"trainer": trainer, "model": model, "optimizer": optimizer, "lr_scheduler": lr_scheduler}
metric_names = [
"batch loss",
]
common.setup_common_training_handlers(
trainer=trainer,
train_sampler=train_sampler,
to_save=to_save,
save_every_iters=config["checkpoint_every"],
save_handler=get_save_handler(config),
lr_scheduler=lr_scheduler,
output_names=metric_names if config["log_every_iters"] > 0 else None,
with_pbars=False,
clear_cuda_cache=False,
)
resume_from = config["resume_from"]
if resume_from is not None:
checkpoint_fp = Path(resume_from)
assert checkpoint_fp.exists(), f"Checkpoint '{checkpoint_fp.as_posix()}' is not found"
logger.info(f"Resume from a checkpoint: {checkpoint_fp.as_posix()}")
checkpoint = torch.load(checkpoint_fp.as_posix(), map_location="cpu")
Checkpoint.load_objects(to_load=to_save, checkpoint=checkpoint)
return trainer
def get_save_handler(config):
if config["with_clearml"]:
from ignite.contrib.handlers.clearml_logger import ClearMLSaver
return ClearMLSaver(dirname=config["output_path"])
return DiskSaver(config["output_path"], require_empty=False)
if __name__ == "__main__":
fire.Fire({"run": run})
|
the-stack_0_15231 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# ************************************
# @Time : 2019/7/3 22:34
# @Author : Xiang Ling
# @Lab : nesa.zju.edu.cn
# @File : cfg_train.py
# ************************************
import numpy as np
import os
import torch
from datetime import datetime
from sklearn.metrics import auc, roc_curve
from cfg_config import cfg_args
from data import CFGDataset
from model.DenseGraphMatching import MultiLevelGraphMatchNetwork
from utils import create_dir_if_not_exists, write_log_file
from utils import generate_epoch_pair
class CFGTrainer(object):
def __init__(self, node_init_dims, data_dir, device, log_file, best_model_file, args):
super(CFGTrainer, self).__init__()
# training parameters
self.max_epoch = args.epochs
self.batch_size = args.batch_size
self.lr = args.lr
self.device = device
self.log_file = log_file
self.best_model_path = best_model_file
self.model = MultiLevelGraphMatchNetwork(node_init_dims=node_init_dims, arguments=args, device=device).to(device)
write_log_file(self.log_file, str(self.model))
self.optimizer = torch.optim.Adam(self.model.parameters(), lr=self.lr)
cfg = CFGDataset(data_dir=data_dir, batch_size=self.batch_size)
self.graph_train = cfg.graph_train
self.classes_train = cfg.classes_train
self.epoch_data_valid = cfg.valid_epoch
self.epoch_data_test = cfg.test_epoch
init_val_auc = self.eval_auc_epoch(model=self.model, eval_epoch_data=self.epoch_data_valid) # evaluate the auc of init model for validation dataset
write_log_file(self.log_file, "Initial Validation AUC = {0} @ {1}".format(init_val_auc, datetime.now()))
def fit(self):
best_val_auc = None
for i in range(1, self.max_epoch + 1):
# train
loss_avg = self.train_one_epoch(model=self.model, optimizer=self.optimizer, graphs=self.graph_train, classes=self.classes_train, batch_size=self.batch_size,
device=self.device, load_data=None)
write_log_file(self.log_file, "EPOCH {0}/{1}:\tMSE loss = {2} @ {3}".format(i, self.max_epoch, loss_avg, datetime.now()))
# validation
valid_auc = self.eval_auc_epoch(model=self.model, eval_epoch_data=self.epoch_data_valid)
write_log_file(self.log_file, "Validation AUC = {0} @ {1}".format(valid_auc, datetime.now()))
# save the best validation
if best_val_auc is None or best_val_auc < valid_auc:
write_log_file(self.log_file, 'Validation AUC increased ({} ---> {}), and saving the model ... '.format(best_val_auc, valid_auc))
best_val_auc = valid_auc
torch.save(self.model.state_dict(), self.best_model_path)
write_log_file(self.log_file, 'Best Validation auc = {} '.format(best_val_auc))
return best_val_auc
def testing(self):
# load the last checkpoint with the best model
self.model.load_state_dict(torch.load(self.best_model_path))
self.model.eval()
# double check the save checkpoint model for validation
double_val_auc = self.eval_auc_epoch(model=self.model, eval_epoch_data=self.epoch_data_valid)
# evaluating on the testing dataset
final_test_auc = self.eval_auc_epoch(model=self.model, eval_epoch_data=self.epoch_data_test)
write_log_file(self.log_file, "\nDouble check for the saved best checkpoint model for validation {} ".format(double_val_auc))
write_log_file(self.log_file, "Finally, testing auc = {} @ {}".format(final_test_auc, datetime.now()))
return final_test_auc
@staticmethod
def train_one_epoch(model, optimizer, graphs, classes, batch_size, device, load_data=None):
model.train()
if load_data is None:
epoch_data = generate_epoch_pair(graphs, classes, batch_size)
else:
epoch_data = load_data
perm = np.random.permutation(len(epoch_data)) # Random shuffle
cum_loss = 0.0
num = 0
for index in perm:
cur_data = epoch_data[index]
x1, x2, adj1, adj2, y = cur_data
batch_output = model(batch_x_p=x1, batch_x_h=x2, batch_adj_p=adj1, batch_adj_h=adj2)
y = torch.FloatTensor(y).to(device)
mse_loss = torch.nn.functional.mse_loss(batch_output, y)
optimizer.zero_grad()
mse_loss.backward()
optimizer.step()
cum_loss += mse_loss
if num % int(len(perm) / 10) == 0:
print('\tTraining: {}/{}: index = {} loss = {}'.format(num, len(epoch_data), index, mse_loss))
num = num + 1
return cum_loss / len(perm)
@staticmethod
def eval_auc_epoch(model, eval_epoch_data):
model.eval()
with torch.no_grad():
tot_diff = []
tot_truth = []
for cur_data in eval_epoch_data:
x1, x2, adj1, adj2, y = cur_data
batch_output = model(batch_x_p=x1, batch_x_h=x2, batch_adj_p=adj1, batch_adj_h=adj2)
tot_diff += list(batch_output.data.cpu().numpy())
tot_truth += list(y > 0)
diff = np.array(tot_diff) * -1
truth = np.array(tot_truth)
fpr, tpr, _ = roc_curve(truth, (1 - diff) / 2)
model_auc = auc(fpr, tpr)
return model_auc
if __name__ == '__main__':
d = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
os.environ["CUDA_VISIBLE_DEVICES"] = str(cfg_args.gpu_index)
main_data_dir = cfg_args.data_dir
graph_name = cfg_args.dataset
graph_min = cfg_args.graph_size_min
graph_max = cfg_args.graph_size_max
graph_init_dim = cfg_args.graph_init_dim
# <-><-><-> only for log, delete below if open source
title = '{}_Min{}_Max{}'.format(graph_name, graph_min, graph_max)
main_log_dir = cfg_args.log_path + '{}_Min{}_Max{}_InitDims{}_Task_{}/'.format(graph_name, graph_min, graph_max, graph_init_dim, cfg_args.task)
create_log_str = create_dir_if_not_exists(main_log_dir)
best_model_dir = main_log_dir + 'BestModels_{}_{}_Repeat_{}/'.format(cfg_args.match_agg, cfg_args.global_agg, cfg_args.repeat_run)
create_BestModel_dir = create_dir_if_not_exists(best_model_dir)
LOG_FILE = main_log_dir + 'repeat_{}_'.format(cfg_args.repeat_run) + title + '.txt'
BestModel_FILE = best_model_dir + title + '.BestModel'
CSV_FILE = main_log_dir + title + '.csv'
# <-><-><-> only for log, delete above if open source
sub_data_dir = '{}_{}ACFG_min{}_max{}'.format(graph_name, graph_init_dim, graph_min, graph_max)
cfg_data_dir = os.path.join(main_data_dir, sub_data_dir) if 'ffmpeg' in sub_data_dir else os.path.join(main_data_dir, sub_data_dir, 'acfgSSL_6')
assert os.path.exists(cfg_data_dir), "the path of {} is not exist!".format(cfg_data_dir)
if cfg_args.only_test is True:
model_save_path = cfg_args.model_path
LOG_FILE = main_log_dir + 'OnlyTest_repeat_{}_'.format(cfg_args.repeat_run) + title + '.txt'
write_log_file(LOG_FILE, create_log_str)
write_log_file(LOG_FILE, create_BestModel_dir)
write_log_file(LOG_FILE, str(cfg_args))
cfg_trainer = CFGTrainer(node_init_dims=graph_init_dim, data_dir=cfg_data_dir, device=d, log_file=LOG_FILE, best_model_file=model_save_path, args=cfg_args)
ret_final_test_auc = cfg_trainer.testing()
else:
write_log_file(LOG_FILE, create_log_str)
write_log_file(LOG_FILE, create_BestModel_dir)
write_log_file(LOG_FILE, str(cfg_args))
cfg_trainer = CFGTrainer(node_init_dims=graph_init_dim, data_dir=cfg_data_dir, device=d, log_file=LOG_FILE, best_model_file=BestModel_FILE, args=cfg_args)
ret_best_val_auc = cfg_trainer.fit()
ret_final_test_auc = cfg_trainer.testing()
|
the-stack_0_15232 | #
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""DirectRunner, executing on the local machine.
The DirectRunner is a runner implementation that executes the entire
graph of transformations belonging to a pipeline on the local machine.
"""
from __future__ import absolute_import
import collections
import logging
from apache_beam.metrics.execution import MetricsEnvironment
from apache_beam.runners.direct.bundle_factory import BundleFactory
from apache_beam.runners.runner import PipelineResult
from apache_beam.runners.runner import PipelineRunner
from apache_beam.runners.runner import PipelineState
from apache_beam.runners.runner import PValueCache
from apache_beam.utils.pipeline_options import DirectOptions
from apache_beam.utils.value_provider import RuntimeValueProvider
class DirectRunner(PipelineRunner):
"""Executes a single pipeline on the local machine."""
def __init__(self):
self._cache = None
def apply_CombinePerKey(self, transform, pcoll):
# TODO: Move imports to top. Pipeline <-> Runner dependency cause problems
# with resolving imports when they are at top.
# pylint: disable=wrong-import-position
from apache_beam.runners.direct.helper_transforms import LiftedCombinePerKey
try:
return pcoll | LiftedCombinePerKey(
transform.fn, transform.args, transform.kwargs)
except NotImplementedError:
return transform.expand(pcoll)
def run(self, pipeline):
"""Execute the entire pipeline and returns an DirectPipelineResult."""
# TODO: Move imports to top. Pipeline <-> Runner dependency cause problems
# with resolving imports when they are at top.
# pylint: disable=wrong-import-position
from apache_beam.runners.direct.consumer_tracking_pipeline_visitor import \
ConsumerTrackingPipelineVisitor
from apache_beam.runners.direct.evaluation_context import EvaluationContext
from apache_beam.runners.direct.executor import Executor
from apache_beam.runners.direct.transform_evaluator import \
TransformEvaluatorRegistry
MetricsEnvironment.set_metrics_supported(True)
logging.info('Running pipeline with DirectRunner.')
self.visitor = ConsumerTrackingPipelineVisitor()
pipeline.visit(self.visitor)
evaluation_context = EvaluationContext(
pipeline.options,
BundleFactory(stacked=pipeline.options.view_as(DirectOptions)
.direct_runner_use_stacked_bundle),
self.visitor.root_transforms,
self.visitor.value_to_consumers,
self.visitor.step_names,
self.visitor.views)
evaluation_context.use_pvalue_cache(self._cache)
executor = Executor(self.visitor.value_to_consumers,
TransformEvaluatorRegistry(evaluation_context),
evaluation_context)
# Start the executor. This is a non-blocking call, it will start the
# execution in background threads and return.
if pipeline.options:
RuntimeValueProvider.set_runtime_options(pipeline.options._options_id, {})
executor.start(self.visitor.root_transforms)
result = DirectPipelineResult(executor, evaluation_context)
if self._cache:
# We are running in eager mode, block until the pipeline execution
# completes in order to have full results in the cache.
result.wait_until_finish()
self._cache.finalize()
# Unset runtime options after the pipeline finishes.
# TODO: Move this to a post finish hook and clean for all cases.
if pipeline.options:
RuntimeValueProvider.unset_runtime_options(pipeline.options._options_id)
return result
@property
def cache(self):
if not self._cache:
self._cache = BufferingInMemoryCache()
return self._cache.pvalue_cache
class BufferingInMemoryCache(object):
"""PValueCache wrapper for buffering bundles until a PValue is fully computed.
BufferingInMemoryCache keeps an in memory cache of
(applied_ptransform, tag) tuples. It accepts appending to existing cache
entries until it is finalized. finalize() will make all the existing cached
entries visible to the underyling PValueCache in their entirety, clean the in
memory cache and stop accepting new cache entries.
"""
def __init__(self):
self._cache = collections.defaultdict(list)
self._pvalue_cache = PValueCache()
self._finalized = False
@property
def pvalue_cache(self):
return self._pvalue_cache
def append(self, applied_ptransform, tag, elements):
assert not self._finalized
assert elements is not None
self._cache[(applied_ptransform, tag)].extend(elements)
def finalize(self):
"""Make buffered cache elements visible to the underlying PValueCache."""
assert not self._finalized
for key, value in self._cache.iteritems():
applied_ptransform, tag = key
self._pvalue_cache.cache_output(applied_ptransform, tag, value)
self._cache = None
class DirectPipelineResult(PipelineResult):
"""A DirectPipelineResult provides access to info about a pipeline."""
def __init__(self, executor, evaluation_context):
super(DirectPipelineResult, self).__init__(PipelineState.RUNNING)
self._executor = executor
self._evaluation_context = evaluation_context
def _is_in_terminal_state(self):
return self._state is not PipelineState.RUNNING
def wait_until_finish(self, duration=None):
if not self._is_in_terminal_state():
if duration:
raise NotImplementedError(
'DirectRunner does not support duration argument.')
try:
self._executor.await_completion()
self._state = PipelineState.DONE
except: # pylint: disable=broad-except
self._state = PipelineState.FAILED
raise
return self._state
def aggregated_values(self, aggregator_or_name):
return self._evaluation_context.get_aggregator_values(aggregator_or_name)
def metrics(self):
return self._evaluation_context.metrics()
class EagerRunner(DirectRunner):
is_eager = True
|
the-stack_0_15233 | # @date 17.05.2021.0
# @clock 22.50
# @author onur55-tr
from turtle import *
def my_goto(x,y):
penup()
goto(x,y)
pendown()
def gozler():
fillcolor('#ffffff')
begin_fill()
tracer(False)
a = 2.5
for i in range(120):
if 0 <= i < 30 or 60 <= i < 90:
a -= 0.05
lt(3)
fd(a)
else:
a += 0.05
lt(3)
fd(a)
tracer(True)
end_fill()
def sakal():
#left
my_goto(-27,135)
seth(165) #açı
fd(50) #mesafe
my_goto(-27,125)
seth(180)
fd(62)
my_goto(-27,115)
seth(195)
fd(50)
#right
my_goto(35,135)
seth(15)
fd(60)
my_goto(35,125)
seth(0)
fd(72)
my_goto(35,115)
seth(-12)
fd(60)
def mouth():
my_goto(5,148)
seth(270)
fd(100)
seth(0)
circle(110,50)
seth(230)
circle(-110,100)
def scarf():
fillcolor('#e70010')
begin_fill()
seth(0)
fd(190)
circle(-5,90)
fd(10)
circle(-5,90)
fd(207)
circle(-5,90)
fd(10)
circle(-5,90)
end_fill()
def nose():
my_goto(-10, 158)
seth(315)
fillcolor('#e70010')
begin_fill()
circle(20)
end_fill()
def siyah_gozler():
seth(0)
my_goto(-20,195)
fillcolor('#000000')
begin_fill()
circle(13)
end_fill()
pensize(6)
my_goto(20,205)
seth(75)
circle(-10,150)
pensize(3)
my_goto(-17,200)
seth(0)
fillcolor('#ffffff')
begin_fill()
circle(5)
end_fill()
my_goto(0,0)
def face():
fd(183)
lt(45)
fillcolor('#ffffff')
begin_fill()
circle(120, 100)
seth(180)
# print(pos())
fd(121)
pendown()
seth(210)
circle(120, 100)
end_fill()
my_goto(63.56,218.24)
seth(90)
gozler()
seth(180)
penup()
fd(60)
pendown()
seth(90)
gozler()
penup()
seth(180)
fd(64)
def head():
penup()
circle(150,40)
pendown()
fillcolor('#00a0de')
begin_fill()
circle(140,280)
end_fill()
def Doraemon():
head()
scarf()
face()
nose()
mouth()
sakal()
# 身体
my_goto(0, 0)
seth(0)
penup()
circle(150, 50)
pendown()
seth(30)
fd(40)
seth(70)
circle(-30, 270)
fillcolor('#00a0de')
begin_fill()
seth(230)
fd(80)
seth(90)
circle(1000, 1)
seth(-89)
circle(-1000, 10)
# print(pos())
seth(180)
fd(70)
seth(90)
circle(30, 180)
seth(180)
fd(70)
# print(pos())
seth(100)
circle(-1000, 9)
seth(-86)
circle(1000, 2)
seth(230)
fd(40)
# print(pos())
circle(-30, 230)
seth(45)
fd(81)
seth(0)
fd(203)
circle(5, 90)
fd(10)
circle(5, 90)
fd(7)
seth(40)
circle(150, 10)
seth(30)
fd(40)
end_fill()
seth(70)
fillcolor('#ffffff')
begin_fill()
circle(-30)
end_fill()
my_goto(103.74, -182.59)
seth(0)
fillcolor('#ffffff')
begin_fill()
fd(15)
circle(-15, 180)
fd(90)
circle(-15, 180)
fd(10)
end_fill()
my_goto(-96.26, -182.59)
seth(180)
fillcolor('#ffffff')
begin_fill()
fd(15)
circle(15, 180)
fd(90)
circle(15, 180)
fd(10)
end_fill()
my_goto(-133.97, -91.81)
seth(50)
fillcolor('#ffffff')
begin_fill()
circle(30)
end_fill()
my_goto(-103.42, 15.09)
seth(0)
fd(38)
seth(230)
begin_fill()
circle(90, 260)
end_fill()
my_goto(5, -40)
seth(0)
fd(70)
seth(-90)
circle(-70, 180)
seth(0)
fd(70)
my_goto(-103.42, 15.09)
fd(90)
seth(70)
fillcolor('#ffd200')
begin_fill()
circle(-20)
end_fill()
seth(170)
fillcolor('#ffd200')
begin_fill()
circle(-2, 180)
seth(10)
circle(-100, 22)
circle(-2, 180)
seth(180-10)
circle(100, 22)
end_fill()
goto(-13.42, 15.09)
seth(250)
circle(20, 110)
seth(90)
fd(15)
dot(10)
my_goto(0, -150)
siyah_gozler()
if __name__ == '__main__':
screensize(1000,600, "#f0f0f0")
pensize(4)
speed(8)
Doraemon()
my_goto(100,-300)
write('by onur55-tr', font=("Bradley Hand ITC", 30, "bold"))
mainloop() |
the-stack_0_15235 | from .helpers import get_pylint_output, write_output
from ..automation_tools import read_json
import os, sys
# https://docs.pylint.org/features.html#general-options
def find(items, filename, coreonly):
enabled = ','.join(items)
print('Generating %s in all of pygsti%s. This should take less than a minute' %
(enabled, " (core only)" if coreonly else ""))
config = read_json('config/pylint_config.json')
commands = [config['pylint-version'],
'--disable=all',
'--enable=%s' % enabled,
'--rcfile=%s' % config['config-file'],
'--reports=n'] + (['pygsti'] if coreonly else config['packages'])
output = get_pylint_output(commands, filename) # implicitly puts to screen/saves to file
def look_for(args, coreonly=True):
if len(args) == 0:
print('Please supply a filename and list of things to check for. (see https://docs.pylint.org/features.html#general-options)')
sys.exit(1)
# If only one argument is supplied, assume it is both the filename and the itemname
elif len(sys.argv) == 2:
sys.argv.append(sys.argv[1])
find(sys.argv[2:], sys.argv[1], coreonly)
|
the-stack_0_15237 | import collections
import copy
import glob
import logging
import os
import pickle
import sys
import tarfile
import time
from io import BytesIO
from dxtbx.model.experiment_list import (
Experiment,
ExperimentList,
ExperimentListFactory,
)
from libtbx.phil import parse
from libtbx.utils import Abort, Sorry
import dials.util
from dials.array_family import flex
from dials.util import log
logger = logging.getLogger("dials.command_line.stills_process")
help_message = """
DIALS script for processing still images. Import, index, refine, and integrate are all done for each image
separately.
"""
control_phil_str = """
input {
file_list = None
.type = path
.help = Path to a text file with a list of images
glob = None
.type = str
.help = For large, multi-file datasets, specify the paths using wildcards (e.g. *.cbf)
.multiple = True
image_tag = None
.type = str
.multiple = True
.help = Only process images with these tag(s). For single-image files (like CBFs or SMVs), the image \
tag for each file is the file name. For multi-image files like HDF5, the image tag is \
filename_imagenumber (including leading zeros). Use show_image_tags=True to see the list \
of image tags that will be used for a dataset.
show_image_tags = False
.type = bool
.help = Show the set of image tags that would be used during processing. To process subsets of image \
files, use these tags with the image_tag parameter.
max_images = None
.type = int
.help = Limit total number of processed images to max_images
ignore_gain_mismatch = False
.type = bool
.expert_level = 3
.help = Detector gain should be set on the detector models loaded from the images or in the \
processing parameters, not both. Override the check that this is true with this flag. \
}
dispatch {
pre_import = False
.type = bool
.expert_level = 2
.help = If True, before processing import all the data. Needed only if processing \
multiple multi-image files at once (not a recommended use case)
process_percent = None
.type = int(value_min=1, value_max=100)
.help = Percent of events to process
refine = False
.expert_level = 2
.type = bool
.help = If True, after indexing, refine the experimental models
squash_errors = True
.expert_level = 2
.type = bool
.help = If True, if an image fails to process, continue to the next image. \
otherwise, halt processing and show the error.
find_spots = True
.expert_level = 2
.type = bool
.help = Whether to do spotfinding. Needed for indexing/integration
index = True
.expert_level = 2
.type = bool
.help = Attempt to index images. find_spots also needs to be True for this to work
integrate = True
.expert_level = 2
.type = bool
.help = Integrate indexed images. Ignored if index=False or find_spots=False
coset = False
.expert_level = 2
.type = bool
.help = Within the integrate dispatcher, integrate a sublattice coset intended to represent \
negative control spots with no Bragg diffraction.
hit_finder{
enable = True
.type = bool
.help = Whether to do hitfinding. hit_finder=False: process all images
minimum_number_of_reflections = 16
.type = int
.help = If the number of strong reflections on an image is less than this, and \
the hitfinder is enabled, discard this image.
maximum_number_of_reflections = None
.type = int
.help = If specified, ignores images with more than this many number of reflections
}
}
output {
output_dir = .
.type = str
.help = Directory output files will be placed
composite_output = True
.type = bool
.help = If True, save one set of experiment/reflection files per process, where each is a \
concatenated list of all the successful events examined by that process. \
If False, output a separate experiment/reflection file per image (generates a \
lot of files).
logging_dir = None
.type = str
.help = Directory output log files will be placed
experiments_filename = None
.type = str
.help = The filename for output experiments. For example, %s_imported.expt
strong_filename = None
.type = str
.help = The filename for strong reflections from spot finder output. For example: \
%s_strong.refl
indexed_filename = %s_indexed.refl
.type = str
.help = The filename for indexed reflections.
refined_experiments_filename = %s_refined.expt
.type = str
.help = The filename for saving refined experimental models
integrated_filename = %s_integrated.refl
.type = str
.help = The filename for final integrated reflections.
integrated_experiments_filename = %s_integrated.expt
.type = str
.help = The filename for saving final experimental models.
coset_filename = %s_coset%d.refl
.type = str
.help = The filename for final coset reflections.
coset_experiments_filename = %s_coset%d.expt
.type = str
.help = The filename for saving final coset experimental models.
profile_filename = None
.type = str
.help = The filename for output reflection profile parameters
integration_pickle = int-%d-%s.pickle
.type = str
.help = Filename for cctbx.xfel-style integration pickle files
}
mp {
method = *multiprocessing sge lsf pbs mpi
.type = choice
.help = "The multiprocessing method to use"
nproc = 1
.type = int(value_min=1)
.help = "The number of processes to use."
composite_stride = None
.type = int
.help = For MPI, if using composite mode, specify how many ranks to \
aggregate data from. For example, if you have 100 processes, \
composite mode will output N*100 files, where N is the number \
of file types (expt, refl, etc). If you specify stride = 25, \
then each group of 25 process will send their results to 4 \
processes and only N*4 files will be created. Ideally, match \
stride to the number of processors per node.
debug
.expert_level = 2
{
cProfile = False
.type = bool
.help = Enable code profiling. Profiling file will be available in \
the debug folder. Use (for example) runsnake to visualize \
processing performance
output_debug_logs = True
.type = bool
.help = Whether to write debugging information for every image \
processed
}
}
"""
dials_phil_str = """
input {
reference_geometry = None
.type = str
.help = Provide an models.expt file with exactly one detector model. Data processing will use \
that geometry instead of the geometry found in the image headers.
sync_reference_geom = True
.type = bool
.help = ensures the reference hierarchy agrees with the image format
}
output {
shoeboxes = True
.type = bool
.help = Save the raw pixel values inside the reflection shoeboxes during spotfinding.
}
include scope dials.util.options.geometry_phil_scope
include scope dials.algorithms.spot_finding.factory.phil_scope
include scope dials.algorithms.indexing.indexer.phil_scope
indexing {
include scope dials.algorithms.indexing.lattice_search.basis_vector_search_phil_scope
}
include scope dials.algorithms.refinement.refiner.phil_scope
include scope dials.algorithms.integration.integrator.phil_scope
include scope dials.algorithms.profile_model.factory.phil_scope
include scope dials.algorithms.spot_prediction.reflection_predictor.phil_scope
include scope dials.algorithms.integration.stills_significance_filter.phil_scope
indexing {
stills {
method_list = None
.type = strings
.help = List of indexing methods. If indexing fails with first method, indexing will be \
attempted with the next, and so forth
}
}
integration {
include scope dials.algorithms.integration.kapton_correction.absorption_phil_scope
coset {
transformation = 6
.type = int(value_min=0, value_max=6)
.multiple = False
.help = The index number(s) of the modulus=2 sublattice transformation(s) used to produce distince coset results. \
0=Double a, 1=Double b, 2=Double c, 3=C-face centering, 4=B-face centering, 5=A-face centering, 6=Body centering \
See Sauter and Zwart, Acta D (2009) 65:553
}
integration_only_overrides {
trusted_range = None
.type = floats(size=2)
.help = "Override the panel trusted range (underload and saturation) during integration."
.short_caption = "Panel trusted range"
}
}
profile {
gaussian_rs {
parameters {
sigma_b_cutoff = 0.1
.type = float
.help = Maximum sigma_b before the image is rejected
}
}
}
"""
program_defaults_phil_str = """
indexing {
method = fft1d
}
refinement {
parameterisation {
auto_reduction {
min_nref_per_parameter = 1
action = fix
}
beam.fix = all
detector.fix = all
}
reflections {
weighting_strategy.override = stills
outlier.algorithm = null
}
}
integration {
integrator = stills
profile.fitting = False
background {
algorithm = simple
simple {
outlier.algorithm = plane
model.algorithm = linear2d
}
}
}
profile.gaussian_rs.min_spots.overall = 0
"""
phil_scope = parse(control_phil_str + dials_phil_str, process_includes=True).fetch(
parse(program_defaults_phil_str)
)
def do_import(filename, load_models=True):
logger.info("Loading %s", os.path.basename(filename))
experiments = ExperimentListFactory.from_filenames([filename], load_models=False)
if len(experiments) == 0:
try:
experiments = ExperimentListFactory.from_json_file(filename)
except ValueError:
raise Abort(f"Could not load {filename}")
if len(experiments) == 0:
raise Abort(f"Could not load {filename}")
from dxtbx.imageset import ImageSetFactory
all_experiments = ExperimentList()
for experiment in experiments:
# Convert from ImageSequence to ImageSet, if needed
imageset = ImageSetFactory.imageset_from_anyset(experiment.imageset)
for i in range(len(imageset)):
# Preserve original models if they were available (in the case of an image file
# they will not be, but in the case of a previously processed experiment list,
# then they may be available
expt = Experiment(
imageset=imageset[i : i + 1],
detector=experiment.detector,
beam=experiment.beam,
scan=experiment.scan,
goniometer=experiment.goniometer,
crystal=experiment.crystal,
)
if load_models:
expt.load_models()
all_experiments.append(expt)
return all_experiments
def sync_geometry(src, dest):
dest.set_local_frame(
src.get_local_fast_axis(), src.get_local_slow_axis(), src.get_local_origin()
)
if not src.is_panel():
for src_child, dest_child in zip(src, dest):
sync_geometry(src_child, dest_child)
class Script:
"""A class for running the script."""
def __init__(self):
"""Initialise the script."""
from dials.util.options import OptionParser
# The script usage
usage = "usage: dials.stills_process [options] [param.phil] filenames"
self.tag = None
self.reference_detector = None
# Create the parser
self.parser = OptionParser(usage=usage, phil=phil_scope, epilog=help_message)
def load_reference_geometry(self):
if self.params.input.reference_geometry is None:
return
try:
ref_experiments = ExperimentListFactory.from_json_file(
self.params.input.reference_geometry, check_format=False
)
except Exception:
try:
import dxtbx
img = dxtbx.load(self.params.input.reference_geometry)
except Exception:
raise Sorry(
"Couldn't load geometry file %s"
% self.params.input.reference_geometry
)
else:
self.reference_detector = img.get_detector()
else:
assert len(ref_experiments.detectors()) == 1
self.reference_detector = ref_experiments.detectors()[0]
def run(self, args=None):
"""Execute the script."""
from libtbx import easy_mp
try:
from mpi4py import MPI
except ImportError:
rank = 0
size = 1
else:
comm = MPI.COMM_WORLD
rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
size = comm.Get_size() # size: number of processes running in this job
if rank == 0:
# Parse the command line
params, options, all_paths = self.parser.parse_args(
args, show_diff_phil=False, return_unhandled=True, quick_parse=True
)
if params.input.glob:
all_paths.extend(params.input.glob)
globbed = []
for p in all_paths:
globbed.extend(glob.glob(p))
all_paths = globbed
if not all_paths and params.input.file_list is not None:
all_paths.extend(
[path.strip() for path in open(params.input.file_list).readlines()]
)
if size > 1:
if rank == 0:
transmitted_info = params, options, all_paths
else:
transmitted_info = None
params, options, all_paths = comm.bcast(transmitted_info, root=0)
# Check we have some filenames
if not all_paths:
self.parser.print_help()
return
if params.mp.debug.cProfile:
import cProfile
self.pr = cProfile.Profile()
self.pr.enable()
print(f"Have {len(all_paths)} files")
# Mask validation
for mask_path in params.spotfinder.lookup.mask, params.integration.lookup.mask:
if mask_path is not None and not os.path.isfile(mask_path):
raise Sorry(f"Mask {mask_path} not found")
# Save the options
self.options = options
self.params = params
st = time.time()
if params.mp.method == "mpi":
# Configure the logging
if params.output.logging_dir is None:
logfile = None
else:
log_path = os.path.join(
params.output.logging_dir, "log_rank%04d.out" % rank
)
error_path = os.path.join(
params.output.logging_dir, "error_rank%04d.out" % rank
)
print(f"Redirecting stdout to {log_path}")
print(f"Redirecting stderr to {error_path}")
sys.stdout = open(log_path, "a")
sys.stderr = open(error_path, "a")
print("Should be redirected now")
logfile = os.path.join(
params.output.logging_dir, "info_rank%04d.out" % rank
)
log.config(verbosity=options.verbose, logfile=logfile)
else:
# Configure logging
log.config(verbosity=options.verbose, logfile="dials.process.log")
bad_phils = [f for f in all_paths if os.path.splitext(f)[1] == ".phil"]
if len(bad_phils) > 0:
self.parser.print_help()
logger.error(
"Error: the following phil files were not understood: %s",
", ".join(bad_phils),
)
return
# Log the diff phil
diff_phil = self.parser.diff_phil.as_str()
if diff_phil != "":
logger.info("The following parameters have been modified:\n")
logger.info(diff_phil)
for abs_params in self.params.integration.absorption_correction:
if abs_params.apply:
if not (
self.params.integration.debug.output
and not self.params.integration.debug.separate_files
):
raise Sorry(
"Shoeboxes must be saved to integration intermediates to apply an absorption correction. "
+ "Set integration.debug.output=True, integration.debug.separate_files=False and "
+ "integration.debug.delete_shoeboxes=True to temporarily store shoeboxes."
)
self.load_reference_geometry()
from dials.command_line.dials_import import ManualGeometryUpdater
update_geometry = ManualGeometryUpdater(params)
# Import stuff
logger.info("Loading files...")
pre_import = params.dispatch.pre_import or len(all_paths) == 1
if pre_import:
# Handle still imagesets by breaking them apart into multiple experiments
# Further handle single file still imagesets (like HDF5) by tagging each
# frame using its index
experiments = ExperimentList()
for path in sorted(all_paths):
experiments.extend(do_import(path, load_models=False))
indices = []
basenames = []
basename_counts = {}
split_experiments = []
for i, imageset in enumerate(experiments.imagesets()):
assert len(imageset) == 1
paths = imageset.paths()
indices.append(i)
basename = os.path.splitext(os.path.basename(paths[0]))[0]
basenames.append(basename)
if basename in basename_counts:
basename_counts[basename] += 1
else:
basename_counts[basename] = 1
split_experiments.append(experiments[i : i + 1])
tags = []
split_experiments2 = []
for i, basename in zip(indices, basenames):
if basename_counts[basename] > 1:
tag = "%s_%05d" % (basename, i)
else:
tag = basename
if (
not self.params.input.image_tag
or tag in self.params.input.image_tag
):
tags.append(tag)
split_experiments2.append(split_experiments[i])
split_experiments = split_experiments2
# Wrapper function
def do_work(i, item_list, processor=None, finalize=True):
if not processor:
processor = Processor(
copy.deepcopy(params), composite_tag="%04d" % i, rank=i
)
for item in item_list:
tag = item[0]
experiments = split_experiments[item[1]]
try:
assert len(experiments) == 1
experiment = experiments[0]
experiment.load_models()
imageset = experiment.imageset
update_geometry(imageset)
experiment.beam = imageset.get_beam()
experiment.detector = imageset.get_detector()
except RuntimeError as e:
logger.warning("Error updating geometry on item %s, %s", tag, e)
continue
if self.reference_detector is not None:
experiment = experiments[0]
if self.params.input.sync_reference_geom:
imageset = experiment.imageset
sync_geometry(
self.reference_detector.hierarchy(),
imageset.get_detector().hierarchy(),
)
experiment.detector = imageset.get_detector()
else:
experiment.detector = copy.deepcopy(self.reference_detector)
processor.process_experiments(tag, experiments)
imageset.clear_cache()
if finalize:
processor.finalize()
return processor
iterable = list(zip(tags, range(len(split_experiments))))
else:
basenames = collections.defaultdict(int)
sorted_paths = sorted(all_paths)
for filename in sorted_paths:
basename = os.path.splitext(os.path.basename(filename))[0]
basenames[basename] += 1
tags = []
all_paths2 = []
for i, (basename, count) in enumerate(basenames.items()):
if count > 1:
tag = "%s_%05d" % (basename, i)
else:
tag = basename
if (
not self.params.input.image_tag
or tag in self.params.input.image_tag
):
tags.append(tag)
all_paths2.append(sorted_paths[i])
all_paths = all_paths2
# Wrapper function
def do_work(i, item_list, processor=None, finalize=True):
if not processor:
processor = Processor(
copy.deepcopy(params), composite_tag="%04d" % i, rank=i
)
for item in item_list:
tag, filename = item
experiments = do_import(filename, load_models=True)
imagesets = experiments.imagesets()
if len(imagesets) == 0 or len(imagesets[0]) == 0:
logger.info("Zero length imageset in file: %s", filename)
return
if len(imagesets) > 1:
raise Abort(f"Found more than one imageset in file: {filename}")
if len(imagesets[0]) > 1:
raise Abort(
"Found a multi-image file. Run again with pre_import=True"
)
try:
update_geometry(imagesets[0])
experiment = experiments[0]
experiment.beam = imagesets[0].get_beam()
experiment.detector = imagesets[0].get_detector()
except RuntimeError as e:
logger.warning("Error updating geometry on item %s, %s", tag, e)
continue
if self.reference_detector is not None:
if self.params.input.sync_reference_geom:
imageset = experiments[0].imageset
sync_geometry(
self.reference_detector.hierarchy(),
imageset.get_detector().hierarchy(),
)
experiments[0].detector = imageset.get_detector()
else:
experiments[0].detector = copy.deepcopy(
self.reference_detector
)
processor.process_experiments(tag, experiments)
if finalize:
processor.finalize()
return processor
iterable = list(zip(tags, all_paths))
if params.input.max_images:
iterable = iterable[: params.input.max_images]
if params.input.show_image_tags:
print("Showing image tags for this dataset and exiting")
for tag, item in iterable:
print(tag)
return
# prepare fractions of process_percent, if given
process_fractions = None
if params.dispatch.process_percent:
import fractions
percent = params.dispatch.process_percent / 100
process_fractions = fractions.Fraction(percent).limit_denominator(100)
def process_this_event(nevent):
# nevent modulo the denominator gives us which fraction we're in
n_mod_denom = nevent % process_fractions.denominator
# compare the 0-indexed modulo against the 1-indexed numerator (intentionally not <=)
n_accept = n_mod_denom < process_fractions.numerator
return n_accept
# Process the data
if params.mp.method == "mpi":
if size <= 2: # client/server only makes sense for n>2
subset = [
item for i, item in enumerate(iterable) if (i + rank) % size == 0
]
do_work(rank, subset)
else:
processor = Processor(
copy.deepcopy(params), composite_tag="%04d" % rank, rank=rank
)
if rank == 0:
# server process
for item_num, item in enumerate(iterable):
if process_fractions and not process_this_event(item_num):
continue
print("Getting next available process")
rankreq = comm.recv(source=MPI.ANY_SOURCE)
print(f"Process {rankreq} is ready, sending {item[0]}\n")
comm.send(item, dest=rankreq)
# send a stop command to each process
print("MPI DONE, sending stops\n")
for rankreq in range(size - 1):
rankreq = comm.recv(source=MPI.ANY_SOURCE)
print("Sending stop to %d\n" % rankreq)
comm.send("endrun", dest=rankreq)
print("All stops sent.")
else:
# client process
while True:
# inform the server this process is ready for an event
print("Rank %d getting next task" % rank)
comm.send(rank, dest=0)
print("Rank %d waiting for response" % rank)
item = comm.recv(source=0)
if item == "endrun":
print("Rank %d received endrun" % rank)
break
print("Rank %d beginning processing" % rank)
try:
processor = do_work(rank, [item], processor, finalize=False)
except Exception as e:
print(
"Rank %d unhandled exception processing event" % rank,
str(e),
)
print("Rank %d event processed" % rank)
processor.finalize()
else:
from dxtbx.command_line.image_average import splitit
if params.mp.nproc == 1:
do_work(0, iterable)
else:
result = list(
easy_mp.multi_core_run(
myfunction=do_work,
argstuples=list(enumerate(splitit(iterable, params.mp.nproc))),
nproc=params.mp.nproc,
)
)
error_list = [r[2] for r in result]
if error_list.count(None) != len(error_list):
print(
"Some processes failed execution. Not all images may have processed. Error messages:"
)
for error in error_list:
if error is None:
continue
print(error)
# Total Time
logger.info("")
logger.info("Total Time Taken = %f seconds", time.time() - st)
if params.mp.debug.cProfile:
self.pr.disable()
self.pr.dump_stats(
os.path.join(
self.params.output.output_dir, "debug", "cpu_%d.prof" % comm.rank
)
)
class Processor:
def __init__(self, params, composite_tag=None, rank=0):
self.params = params
self.composite_tag = composite_tag
# The convention is to put %s in the phil parameter to add a tag to
# each output datafile. Save the initial templates here.
self.experiments_filename_template = params.output.experiments_filename
self.strong_filename_template = params.output.strong_filename
self.indexed_filename_template = params.output.indexed_filename
self.refined_experiments_filename_template = (
params.output.refined_experiments_filename
)
self.integrated_filename_template = params.output.integrated_filename
self.integrated_experiments_filename_template = (
params.output.integrated_experiments_filename
)
if params.dispatch.coset:
self.coset_filename_template = params.output.coset_filename
self.coset_experiments_filename_template = (
params.output.coset_experiments_filename
)
debug_dir = os.path.join(params.output.output_dir, "debug")
if not os.path.exists(debug_dir):
try:
os.makedirs(debug_dir)
except OSError:
pass # due to multiprocessing, makedirs can sometimes fail
assert os.path.exists(debug_dir)
self.debug_file_path = os.path.join(debug_dir, "debug_%d.txt" % rank)
write_newline = os.path.exists(self.debug_file_path)
if write_newline: # needed if the there was a crash
self.debug_write("")
if params.output.composite_output:
assert composite_tag is not None
self.all_imported_experiments = ExperimentList()
self.all_strong_reflections = flex.reflection_table()
self.all_indexed_experiments = ExperimentList()
self.all_indexed_reflections = flex.reflection_table()
self.all_integrated_experiments = ExperimentList()
self.all_integrated_reflections = flex.reflection_table()
self.all_int_pickle_filenames = []
self.all_int_pickles = []
self.all_coset_experiments = ExperimentList()
self.all_coset_reflections = flex.reflection_table()
self.setup_filenames(composite_tag)
def setup_filenames(self, tag):
# before processing, set output paths according to the templates
if (
self.experiments_filename_template is not None
and "%s" in self.experiments_filename_template
):
self.params.output.experiments_filename = os.path.join(
self.params.output.output_dir,
self.experiments_filename_template % ("idx-" + tag),
)
if (
self.strong_filename_template is not None
and "%s" in self.strong_filename_template
):
self.params.output.strong_filename = os.path.join(
self.params.output.output_dir,
self.strong_filename_template % ("idx-" + tag),
)
if (
self.indexed_filename_template is not None
and "%s" in self.indexed_filename_template
):
self.params.output.indexed_filename = os.path.join(
self.params.output.output_dir,
self.indexed_filename_template % ("idx-" + tag),
)
if (
self.refined_experiments_filename_template is not None
and "%s" in self.refined_experiments_filename_template
):
self.params.output.refined_experiments_filename = os.path.join(
self.params.output.output_dir,
self.refined_experiments_filename_template % ("idx-" + tag),
)
if (
self.integrated_filename_template is not None
and "%s" in self.integrated_filename_template
):
self.params.output.integrated_filename = os.path.join(
self.params.output.output_dir,
self.integrated_filename_template % ("idx-" + tag),
)
if (
self.integrated_experiments_filename_template is not None
and "%s" in self.integrated_experiments_filename_template
):
self.params.output.integrated_experiments_filename = os.path.join(
self.params.output.output_dir,
self.integrated_experiments_filename_template % ("idx-" + tag),
)
if (
self.params.dispatch.coset
and self.coset_filename_template is not None
and "%s" in self.coset_filename_template
):
self.params.output.coset_filename = os.path.join(
self.params.output.output_dir,
self.coset_filename_template
% ("idx-" + tag, self.params.integration.coset.transformation),
)
if (
self.params.dispatch.coset
and self.coset_experiments_filename_template is not None
and "%s" in self.coset_experiments_filename_template
):
self.params.output.coset_experiments_filename = os.path.join(
self.params.output.output_dir,
self.coset_experiments_filename_template
% ("idx-" + tag, self.params.integration.coset.transformation),
)
def debug_start(self, tag):
if not self.params.mp.debug.output_debug_logs:
return
import socket
self.debug_str = f"{socket.gethostname()},{tag}"
self.debug_str += ",%s,%s,%s\n"
self.debug_write("start")
def debug_write(self, string, state=None):
if not self.params.mp.debug.output_debug_logs:
return
from xfel.cxi.cspad_ana import cspad_tbx # XXX move to common timestamp format
ts = cspad_tbx.evt_timestamp() # Now
debug_file_handle = open(self.debug_file_path, "a")
if string == "":
debug_file_handle.write("\n")
else:
if state is None:
state = " "
debug_file_handle.write(self.debug_str % (ts, state, string))
debug_file_handle.close()
def process_experiments(self, tag, experiments):
if not self.params.output.composite_output:
self.setup_filenames(tag)
self.tag = tag
self.debug_start(tag)
if self.params.output.experiments_filename:
if self.params.output.composite_output:
self.all_imported_experiments.extend(experiments)
else:
experiments.as_json(self.params.output.experiments_filename)
# Do the processing
try:
self.pre_process(experiments)
except Exception as e:
print("Error in pre-process", tag, str(e))
self.debug_write("preprocess_exception", "fail")
if not self.params.dispatch.squash_errors:
raise
return
try:
if self.params.dispatch.find_spots:
self.debug_write("spotfind_start")
observed = self.find_spots(experiments)
else:
print("Spot Finding turned off. Exiting")
self.debug_write("data_loaded", "done")
return
except Exception as e:
print("Error spotfinding", tag, str(e))
self.debug_write("spotfinding_exception", "fail")
if not self.params.dispatch.squash_errors:
raise
return
try:
if self.params.dispatch.index:
if (
self.params.dispatch.hit_finder.enable
and len(observed)
< self.params.dispatch.hit_finder.minimum_number_of_reflections
):
print("Not enough spots to index", tag)
self.debug_write(f"not_enough_spots_{len(observed)}", "stop")
return
if (
self.params.dispatch.hit_finder.maximum_number_of_reflections
is not None
):
if (
self.params.dispatch.hit_finder.enable
and len(observed)
> self.params.dispatch.hit_finder.maximum_number_of_reflections
):
print("Too many spots to index - Possibly junk", tag)
self.debug_write(f"too_many_spots_{len(observed)}", "stop")
return
self.debug_write("index_start")
experiments, indexed = self.index(experiments, observed)
else:
print("Indexing turned off. Exiting")
self.debug_write(f"spotfinding_ok_{len(observed)}", "done")
return
except Exception as e:
print("Couldn't index", tag, str(e))
if not self.params.dispatch.squash_errors:
raise
self.debug_write(f"indexing_failed_{len(observed)}", "stop")
return
self.debug_write("refine_start")
try:
experiments, indexed = self.refine(experiments, indexed)
except Exception as e:
print("Error refining", tag, str(e))
self.debug_write(f"refine_failed_{len(indexed)}", "fail")
if not self.params.dispatch.squash_errors:
raise
return
try:
if self.params.dispatch.integrate:
self.debug_write("integrate_start")
integrated = self.integrate(experiments, indexed)
else:
print("Integration turned off. Exiting")
self.debug_write(f"index_ok_{len(indexed)}", "done")
return
except Exception as e:
print("Error integrating", tag, str(e))
self.debug_write(f"integrate_failed_{len(indexed)}", "fail")
if not self.params.dispatch.squash_errors:
raise
return
self.debug_write(f"integrate_ok_{len(integrated)}", "done")
def pre_process(self, experiments):
"""Add any pre-processing steps here"""
if not self.params.input.ignore_gain_mismatch:
g1 = self.params.spotfinder.threshold.dispersion.gain
g2 = self.params.integration.summation.detector_gain
gain = g1 if g1 is not None else g2
if gain is not None and gain != 1.0:
for detector in experiments.detectors():
for panel in detector:
if panel.get_gain() != 1.0 and panel.get_gain() != gain:
raise RuntimeError(
"""
The detector is reporting a gain of %f but you have also supplied a gain of %f. Since the detector gain is not 1.0, your supplied gain will be multiplicatively applied in addition to the detector's gain, which is unlikely to be correct. Please re-run, removing spotfinder.dispersion.gain and integration.summation.detector_gain from your parameters. You can override this exception by setting input.ignore_gain_mismatch=True."""
% (panel.get_gain(), gain)
)
def find_spots(self, experiments):
st = time.time()
logger.info("*" * 80)
logger.info("Finding Strong Spots")
logger.info("*" * 80)
# Find the strong spots
observed = flex.reflection_table.from_observations(
experiments, self.params, is_stills=True
)
# Reset z coordinates for dials.image_viewer; see Issues #226 for details
xyzobs = observed["xyzobs.px.value"]
for i in range(len(xyzobs)):
xyzobs[i] = (xyzobs[i][0], xyzobs[i][1], 0)
bbox = observed["bbox"]
for i in range(len(bbox)):
bbox[i] = (bbox[i][0], bbox[i][1], bbox[i][2], bbox[i][3], 0, 1)
if self.params.output.composite_output:
n = len(self.all_strong_reflections.experiment_identifiers())
for i, experiment in enumerate(experiments):
refls = observed.select(observed["id"] == i)
refls["id"] = flex.int(len(refls), n)
del refls.experiment_identifiers()[i]
refls.experiment_identifiers()[n] = experiment.identifier
self.all_strong_reflections.extend(refls)
n += 1
else:
# Save the reflections to file
logger.info("\n" + "-" * 80)
if self.params.output.strong_filename:
self.save_reflections(observed, self.params.output.strong_filename)
logger.info("")
logger.info("Time Taken = %f seconds", time.time() - st)
return observed
def index(self, experiments, reflections):
from dials.algorithms.indexing.indexer import Indexer
st = time.time()
logger.info("*" * 80)
logger.info("Indexing Strong Spots")
logger.info("*" * 80)
params = copy.deepcopy(self.params)
# don't do scan-varying refinement during indexing
params.refinement.parameterisation.scan_varying = False
if hasattr(self, "known_crystal_models"):
known_crystal_models = self.known_crystal_models
else:
known_crystal_models = None
if params.indexing.stills.method_list is None:
idxr = Indexer.from_parameters(
reflections,
experiments,
known_crystal_models=known_crystal_models,
params=params,
)
idxr.index()
else:
indexing_error = None
for method in params.indexing.stills.method_list:
params.indexing.method = method
try:
idxr = Indexer.from_parameters(
reflections, experiments, params=params
)
idxr.index()
except Exception as e:
logger.info("Couldn't index using method %s", method)
if indexing_error is None:
if e is None:
e = Exception(f"Couldn't index using method {method}")
indexing_error = e
else:
indexing_error = None
break
if indexing_error is not None:
raise indexing_error
indexed = idxr.refined_reflections
experiments = idxr.refined_experiments
if known_crystal_models is not None:
filtered = flex.reflection_table()
for idx in set(indexed["miller_index"]):
sel = indexed["miller_index"] == idx
if sel.count(True) == 1:
filtered.extend(indexed.select(sel))
logger.info(
"Filtered duplicate reflections, %d out of %d remaining",
len(filtered),
len(indexed),
)
print(
"Filtered duplicate reflections, %d out of %d remaining"
% (len(filtered), len(indexed))
)
indexed = filtered
logger.info("")
logger.info("Time Taken = %f seconds", time.time() - st)
return experiments, indexed
def refine(self, experiments, centroids):
if self.params.dispatch.refine:
from dials.algorithms.refinement import RefinerFactory
st = time.time()
logger.info("*" * 80)
logger.info("Refining Model")
logger.info("*" * 80)
refiner = RefinerFactory.from_parameters_data_experiments(
self.params, centroids, experiments
)
refiner.run()
experiments = refiner.get_experiments()
predicted = refiner.predict_for_indexed()
centroids["xyzcal.mm"] = predicted["xyzcal.mm"]
centroids["entering"] = predicted["entering"]
centroids = centroids.select(refiner.selection_used_for_refinement())
# Re-estimate mosaic estimates
from dials.algorithms.indexing.nave_parameters import NaveParameters
nv = NaveParameters(
params=self.params,
experiments=experiments,
reflections=centroids,
refinery=refiner,
graph_verbose=False,
)
nv()
acceptance_flags_nv = nv.nv_acceptance_flags
centroids = centroids.select(acceptance_flags_nv)
if self.params.output.composite_output:
if (
self.params.output.refined_experiments_filename
or self.params.output.indexed_filename
):
assert (
self.params.output.refined_experiments_filename is not None
and self.params.output.indexed_filename is not None
)
n = len(self.all_indexed_experiments)
self.all_indexed_experiments.extend(experiments)
for i, experiment in enumerate(experiments):
refls = centroids.select(centroids["id"] == i)
refls["id"] = flex.int(len(refls), n)
del refls.experiment_identifiers()[i]
refls.experiment_identifiers()[n] = experiment.identifier
self.all_indexed_reflections.extend(refls)
n += 1
else:
# Dump experiments to disk
if self.params.output.refined_experiments_filename:
experiments.as_json(self.params.output.refined_experiments_filename)
if self.params.output.indexed_filename:
self.save_reflections(centroids, self.params.output.indexed_filename)
if self.params.dispatch.refine:
logger.info("")
logger.info("Time Taken = %f seconds", time.time() - st)
return experiments, centroids
def integrate(self, experiments, indexed):
st = time.time()
logger.info("*" * 80)
logger.info("Integrating Reflections")
logger.info("*" * 80)
indexed, _ = self.process_reference(indexed)
if self.params.integration.integration_only_overrides.trusted_range:
for detector in experiments.detectors():
for panel in detector:
panel.set_trusted_range(
self.params.integration.integration_only_overrides.trusted_range
)
if self.params.dispatch.coset:
from xfel.util.sublattice_helper import integrate_coset
integrate_coset(self, experiments, indexed)
# Get the integrator from the input parameters
logger.info("Configuring integrator from input parameters")
from dials.algorithms.integration.integrator import create_integrator
from dials.algorithms.profile_model.factory import ProfileModelFactory
# Compute the profile model
# Predict the reflections
# Match the predictions with the reference
# Create the integrator
experiments = ProfileModelFactory.create(self.params, experiments, indexed)
new_experiments = ExperimentList()
new_reflections = flex.reflection_table()
for expt_id, expt in enumerate(experiments):
if (
self.params.profile.gaussian_rs.parameters.sigma_b_cutoff is None
or expt.profile.sigma_b()
< self.params.profile.gaussian_rs.parameters.sigma_b_cutoff
):
refls = indexed.select(indexed["id"] == expt_id)
refls["id"] = flex.int(len(refls), len(new_experiments))
# refls.reset_ids()
del refls.experiment_identifiers()[expt_id]
refls.experiment_identifiers()[len(new_experiments)] = expt.identifier
new_reflections.extend(refls)
new_experiments.append(expt)
else:
logger.info(
"Rejected expt %d with sigma_b %f"
% (expt_id, expt.profile.sigma_b())
)
experiments = new_experiments
indexed = new_reflections
if len(experiments) == 0:
raise RuntimeError("No experiments after filtering by sigma_b")
logger.info("")
logger.info("=" * 80)
logger.info("")
logger.info("Predicting reflections")
logger.info("")
predicted = flex.reflection_table.from_predictions_multi(
experiments,
dmin=self.params.prediction.d_min,
dmax=self.params.prediction.d_max,
margin=self.params.prediction.margin,
force_static=self.params.prediction.force_static,
)
predicted.match_with_reference(indexed)
logger.info("")
integrator = create_integrator(self.params, experiments, predicted)
# Integrate the reflections
integrated = integrator.integrate()
# correct integrated intensities for absorption correction, if necessary
for abs_params in self.params.integration.absorption_correction:
if abs_params.apply:
if abs_params.algorithm == "fuller_kapton":
from dials.algorithms.integration.kapton_correction import (
multi_kapton_correction,
)
elif abs_params.algorithm == "kapton_2019":
from dials.algorithms.integration.kapton_2019_correction import (
multi_kapton_correction,
)
experiments, integrated = multi_kapton_correction(
experiments, integrated, abs_params.fuller_kapton, logger=logger
)()
if self.params.significance_filter.enable:
from dials.algorithms.integration.stills_significance_filter import (
SignificanceFilter,
)
sig_filter = SignificanceFilter(self.params)
filtered_refls = sig_filter(experiments, integrated)
accepted_expts = ExperimentList()
accepted_refls = flex.reflection_table()
logger.info(
"Removed %d reflections out of %d when applying significance filter",
len(integrated) - len(filtered_refls),
len(integrated),
)
for expt_id, expt in enumerate(experiments):
refls = filtered_refls.select(filtered_refls["id"] == expt_id)
if len(refls) > 0:
accepted_expts.append(expt)
refls["id"] = flex.int(len(refls), len(accepted_expts) - 1)
accepted_refls.extend(refls)
else:
logger.info(
"Removed experiment %d which has no reflections left after applying significance filter",
expt_id,
)
if len(accepted_refls) == 0:
raise Sorry("No reflections left after applying significance filter")
experiments = accepted_expts
integrated = accepted_refls
# Delete the shoeboxes used for intermediate calculations, if requested
if self.params.integration.debug.delete_shoeboxes and "shoebox" in integrated:
del integrated["shoebox"]
if self.params.output.composite_output:
if (
self.params.output.integrated_experiments_filename
or self.params.output.integrated_filename
):
assert (
self.params.output.integrated_experiments_filename is not None
and self.params.output.integrated_filename is not None
)
n = len(self.all_integrated_experiments)
self.all_integrated_experiments.extend(experiments)
for i, experiment in enumerate(experiments):
refls = integrated.select(integrated["id"] == i)
refls["id"] = flex.int(len(refls), n)
del refls.experiment_identifiers()[i]
refls.experiment_identifiers()[n] = experiment.identifier
self.all_integrated_reflections.extend(refls)
n += 1
else:
# Dump experiments to disk
if self.params.output.integrated_experiments_filename:
experiments.as_json(self.params.output.integrated_experiments_filename)
if self.params.output.integrated_filename:
# Save the reflections
self.save_reflections(
integrated, self.params.output.integrated_filename
)
self.write_integration_pickles(integrated, experiments)
from dials.algorithms.indexing.stills_indexer import (
calc_2D_rmsd_and_displacements,
)
rmsd_indexed, _ = calc_2D_rmsd_and_displacements(indexed)
log_str = f"RMSD indexed (px): {rmsd_indexed:f}\n"
for i in range(6):
bright_integrated = integrated.select(
(
integrated["intensity.sum.value"]
/ flex.sqrt(integrated["intensity.sum.variance"])
)
>= i
)
if len(bright_integrated) > 0:
rmsd_integrated, _ = calc_2D_rmsd_and_displacements(bright_integrated)
else:
rmsd_integrated = 0
log_str += (
"N reflections integrated at I/sigI >= %d: % 4d, RMSD (px): %f\n"
% (i, len(bright_integrated), rmsd_integrated)
)
for crystal_model in experiments.crystals():
if hasattr(crystal_model, "get_domain_size_ang"):
log_str += ". Final ML model: domain size angstroms: {:f}, half mosaicity degrees: {:f}".format(
crystal_model.get_domain_size_ang(),
crystal_model.get_half_mosaicity_deg(),
)
logger.info(log_str)
logger.info("")
logger.info("Time Taken = %f seconds", time.time() - st)
return integrated
def write_integration_pickles(self, integrated, experiments, callback=None):
"""
Write a serialized python dictionary with integrated intensities and other information
suitible for use by cxi.merge or prime.postrefine.
@param integrated Reflection table with integrated intensities
@param experiments Experiment list. One integration pickle for each experiment will be created.
@param callback Deriving classes can use callback to make further modifications to the dictionary
before it is serialized. Callback should be a function with this signature:
def functionname(params, outfile, frame), where params is the phil scope, outfile is the path
to the pickle that will be saved, and frame is the python dictionary to be serialized.
"""
if not hasattr(self.params.output, "integration_pickle"):
return
if self.params.output.integration_pickle is not None:
from xfel.command_line.frame_extractor import ConstructFrame
# Split everything into separate experiments for pickling
for e_number, experiment in enumerate(experiments):
e_selection = integrated["id"] == e_number
reflections = integrated.select(e_selection)
frame = ConstructFrame(reflections, experiment).make_frame()
frame["pixel_size"] = experiment.detector[0].get_pixel_size()[0]
if not hasattr(self, "tag") or self.tag is None:
try:
# if the data was a file on disc, get the path
event_timestamp = os.path.splitext(
experiments[0].imageset.paths()[0]
)[0]
except NotImplementedError:
# if the data is in memory only, check if the reader set a timestamp on the format object
event_timestamp = (
experiment.imageset.reader().get_format(0).timestamp
)
event_timestamp = os.path.basename(event_timestamp)
if event_timestamp.find("shot-") == 0:
event_timestamp = os.path.splitext(event_timestamp)[
0
] # micromanage the file name
else:
event_timestamp = self.tag
if hasattr(self.params.output, "output_dir"):
outfile = os.path.join(
self.params.output.output_dir,
self.params.output.integration_pickle
% (e_number, event_timestamp),
)
else:
outfile = os.path.join(
os.path.dirname(self.params.output.integration_pickle),
self.params.output.integration_pickle
% (e_number, event_timestamp),
)
if callback is not None:
callback(self.params, outfile, frame)
if self.params.output.composite_output:
self.all_int_pickle_filenames.append(os.path.basename(outfile))
self.all_int_pickles.append(frame)
else:
with open(outfile, "wb") as fh:
pickle.dump(frame, fh, protocol=pickle.HIGHEST_PROTOCOL)
def process_reference(self, reference):
"""Load the reference spots."""
if reference is None:
return None, None
st = time.time()
assert "miller_index" in reference
assert "id" in reference
logger.info("Processing reference reflections")
logger.info(" read %d strong spots", len(reference))
mask = reference.get_flags(reference.flags.indexed)
rubbish = reference.select(~mask)
if mask.count(False) > 0:
reference.del_selected(~mask)
logger.info(" removing %d unindexed reflections", mask.count(True))
if len(reference) == 0:
raise Sorry(
"""
Invalid input for reference reflections.
Expected > %d indexed spots, got %d
"""
% (0, len(reference))
)
mask = reference["miller_index"] == (0, 0, 0)
if mask.count(True) > 0:
rubbish.extend(reference.select(mask))
reference.del_selected(mask)
logger.info(" removing %d reflections with hkl (0,0,0)", mask.count(True))
mask = reference["id"] < 0
if mask.count(True) > 0:
raise Sorry(
"""
Invalid input for reference reflections.
%d reference spots have an invalid experiment id
"""
% mask.count(True)
)
logger.info(" using %d indexed reflections", len(reference))
logger.info(" found %d junk reflections", len(rubbish))
logger.info(" time taken: %g", time.time() - st)
return reference, rubbish
def save_reflections(self, reflections, filename):
"""Save the reflections to file."""
st = time.time()
logger.info("Saving %d reflections to %s", len(reflections), filename)
reflections.as_file(filename)
logger.info(" time taken: %g", time.time() - st)
def finalize(self):
"""Perform any final operations"""
if self.params.output.composite_output:
if self.params.mp.composite_stride is not None:
assert self.params.mp.method == "mpi"
stride = self.params.mp.composite_stride
from mpi4py import MPI
comm = MPI.COMM_WORLD
rank = comm.Get_rank() # each process in MPI has a unique id, 0-indexed
size = comm.Get_size() # size: number of processes running in this job
comm.barrier()
if rank % stride == 0:
subranks = [rank + i for i in range(1, stride) if rank + i < size]
for i in range(len(subranks)):
logger.info("Rank %d waiting for sender", rank)
(
sender,
imported_experiments,
strong_reflections,
indexed_experiments,
indexed_reflections,
integrated_experiments,
integrated_reflections,
coset_experiments,
coset_reflections,
int_pickles,
int_pickle_filenames,
) = comm.recv(source=MPI.ANY_SOURCE)
logger.info("Rank %d received data from rank %d", rank, sender)
def extend_with_bookkeeping(
src_expts, src_refls, dest_expts, dest_refls
):
n = len(dest_refls.experiment_identifiers())
src_refls["id"] += n
idents = src_refls.experiment_identifiers()
keys = idents.keys()
values = idents.values()
for key in keys:
del idents[key]
for i, key in enumerate(keys):
idents[key + n] = values[i]
dest_expts.extend(src_expts)
dest_refls.extend(src_refls)
if len(imported_experiments) > 0:
extend_with_bookkeeping(
imported_experiments,
strong_reflections,
self.all_imported_experiments,
self.all_strong_reflections,
)
if len(indexed_experiments) > 0:
extend_with_bookkeeping(
indexed_experiments,
indexed_reflections,
self.all_indexed_experiments,
self.all_indexed_reflections,
)
if len(integrated_experiments) > 0:
extend_with_bookkeeping(
integrated_experiments,
integrated_reflections,
self.all_integrated_experiments,
self.all_integrated_reflections,
)
if len(coset_experiments) > 0:
extend_with_bookkeeping(
coset_experiments,
coset_reflections,
self.all_coset_experiments,
self.all_coset_reflections,
)
self.all_int_pickles.extend(int_pickles)
self.all_int_pickle_filenames.extend(int_pickle_filenames)
else:
destrank = (rank // stride) * stride
logger.info(
"Rank %d sending results to rank %d",
rank,
(rank // stride) * stride,
)
comm.send(
(
rank,
self.all_imported_experiments,
self.all_strong_reflections,
self.all_indexed_experiments,
self.all_indexed_reflections,
self.all_integrated_experiments,
self.all_integrated_reflections,
self.all_coset_experiments,
self.all_coset_reflections,
self.all_int_pickles,
self.all_int_pickle_filenames,
),
dest=destrank,
)
self.all_imported_experiments = (
self.all_strong_reflections
) = (
self.all_indexed_experiments
) = (
self.all_indexed_reflections
) = (
self.all_integrated_experiments
) = (
self.all_integrated_reflections
) = (
self.all_coset_experiments
) = (
self.all_coset_reflections
) = self.all_int_pickles = self.all_integrated_reflections = []
# Dump composite files to disk
if (
len(self.all_imported_experiments) > 0
and self.params.output.experiments_filename
):
self.all_imported_experiments.as_json(
self.params.output.experiments_filename
)
if (
len(self.all_strong_reflections) > 0
and self.params.output.strong_filename
):
self.save_reflections(
self.all_strong_reflections, self.params.output.strong_filename
)
if (
len(self.all_indexed_experiments) > 0
and self.params.output.refined_experiments_filename
):
self.all_indexed_experiments.as_json(
self.params.output.refined_experiments_filename
)
if (
len(self.all_indexed_reflections) > 0
and self.params.output.indexed_filename
):
self.save_reflections(
self.all_indexed_reflections, self.params.output.indexed_filename
)
if (
len(self.all_integrated_experiments) > 0
and self.params.output.integrated_experiments_filename
):
self.all_integrated_experiments.as_json(
self.params.output.integrated_experiments_filename
)
if (
len(self.all_integrated_reflections) > 0
and self.params.output.integrated_filename
):
self.save_reflections(
self.all_integrated_reflections,
self.params.output.integrated_filename,
)
if self.params.dispatch.coset:
if (
len(self.all_coset_experiments) > 0
and self.params.output.coset_experiments_filename
):
self.all_coset_experiments.as_json(
self.params.output.coset_experiments_filename
)
if (
len(self.all_coset_reflections) > 0
and self.params.output.coset_filename
):
self.save_reflections(
self.all_coset_reflections, self.params.output.coset_filename
)
# Create a tar archive of the integration dictionary pickles
if len(self.all_int_pickles) > 0 and self.params.output.integration_pickle:
tar_template_integration_pickle = (
self.params.output.integration_pickle.replace("%d", "%s")
)
outfile = (
os.path.join(
self.params.output.output_dir,
tar_template_integration_pickle % ("x", self.composite_tag),
)
+ ".tar"
)
tar = tarfile.TarFile(outfile, "w")
for i, (fname, d) in enumerate(
zip(self.all_int_pickle_filenames, self.all_int_pickles)
):
string = BytesIO(pickle.dumps(d, protocol=2))
info = tarfile.TarInfo(name=fname)
info.size = string.getbuffer().nbytes
info.mtime = time.time()
tar.addfile(tarinfo=info, fileobj=string)
tar.close()
@dials.util.show_mail_handle_errors()
def run(args=None):
script = Script()
script.run(args)
if __name__ == "__main__":
run()
|
the-stack_0_15238 | from django.contrib.auth import views as auth_views
from django.urls import path
from prometheus_client import Gauge
import vote.views
from management import views
from management.models import ElectionManager
from vote.models import Election, Session
app_name = 'management'
election_gauge = Gauge('wahlfang_election_count', 'Wahlfang Number of Elections')
election_gauge.set_function(lambda: Election.objects.all().count())
election_manager_gauge = Gauge('wahlfang_election_manager_count', 'Wahlfang Number of Election Managers')
election_manager_gauge.set_function(lambda: ElectionManager.objects.all().count())
session_gauge = Gauge('wahlfang_session_count', 'Wahlfang Number of Sessions')
session_gauge.set_function(lambda: Session.objects.all().count())
urlpatterns = [
path('', views.index, name='index'),
path('help', views.help_page, name='help'),
# Session
path('meeting/<int:pk>', views.session_detail, name='session'),
path('meeting/<int:pk>/settings', views.session_settings, name='session_settings'),
path('meeting/<int:pk>/delete_session', views.delete_session, name='delete_session'),
path('meeting/<int:pk>/add_voters', views.add_voters, name='add_voters'),
path('meeting/<int:pk>/add_tokens', views.add_tokens, name='add_tokens'),
path('meeting/<int:pk>/add_election', views.add_election, name='add_election'),
path('meeting/<int:pk>/print_token', views.print_token, name='print_token'),
path('meeting/<int:pk>/import_csv', views.import_csv, name='import_csv'),
path('meeting/<int:pk>/spectator', views.spectator, name='spectator'),
# Election
path('election/<int:pk>/add_application', views.election_upload_application, name='add_application'),
path('election/<int:pk>/edit/<int:application_id>', views.election_upload_application, name='edit_application'),
path('election/<int:pk>/edit/<int:application_id>/delete_application', views.election_delete_application,
name='delete_application'),
path('election/<int:pk>', views.election_detail, name='election'),
path('election/<int:pk>/delete_voter', views.delete_voter, name='delete_voter'),
path('election/<int:pk>/delete_election', views.delete_election, name='delete_election'),
path('election/<int:pk>/export_csv', views.export_csv, name='export_csv'),
# account management stuff
path('login', views.LoginView.as_view(), name='login'),
path('logout', auth_views.LogoutView.as_view(
next_page='management:login',
), name='logout')
]
|
the-stack_0_15240 | import json
import web
import six
import re
import os
import urlparse
from werkzeug.exceptions import BadRequest, MethodNotAllowed
from urllib import unquote
from utils import props
from init_subclass_meta import InitSubclassMeta
from graphql import Source, execute, parse, validate
from graphql.error import format_error as format_graphql_error
from graphql.error import GraphQLError
from graphql.execution import ExecutionResult
from graphql.type.schema import GraphQLSchema
from graphql.utils.get_operation_ast import get_operation_ast
BASE_DIR = os.path.dirname(os.path.realpath(__file__))
DIR_PATH = os.path.join(BASE_DIR, 'templates')
def get_accepted_content_types():
def qualify(x):
parts = x.split(';', 1)
if len(parts) == 2:
match = re.match(r'(^|;)q=(0(\.\d{,3})?|1(\.0{,3})?)(;|$)',
parts[1])
if match:
return parts[0], float(match.group(2))
return parts[0], 1
raw_content_types = web.ctx.env.get('HTTP_ACCEPT', '*/*').split(',')
qualified_content_types = map(qualify, raw_content_types)
return list(x[0] for x in sorted(qualified_content_types,
key=lambda x: x[1], reverse=True))
class HttpError(Exception):
def __init__(self, response, message=None, *args, **kwargs):
self.response = response
self.message = message = message or response.description
super(HttpError, self).__init__(message, *args, **kwargs)
class GraphQLView:
__metaclass__ = InitSubclassMeta
schema = None
executor = None
root_value = None
context = None
pretty = False
graphiql = False
middleware = None
batch = False
graphiql_version = '0.11.11'
graphiql_temp_title = "GraphQL"
def __init__(self, *args, **kwargs):
if hasattr(self, 'GraphQLMeta'):
for key, value in props(self.GraphQLMeta).iteritems():
setattr(self, key, value)
assert not all((self.graphiql, self.batch)), 'Use either graphiql or batch processing'
assert isinstance(self.schema, GraphQLSchema), 'A Schema is required to be provided to GraphQLView.'
def get_root_value(self):
return self.root_value
def get_context(self):
if self.context is not None:
return self.context
return web.ctx
def get_middleware(self):
return self.middleware
def get_executor(self):
return self.executor
def render_graphiql(self, **kwargs):
for key, value in kwargs.iteritems():
kwargs[key] = json.dumps(kwargs.get(key, None))
render = web.template.render(DIR_PATH)
return render.graph(self.graphiql_version, **kwargs)
def dispatch(self):
try:
if web.ctx.method.lower() not in ('get', 'post'):
raise HttpError(MethodNotAllowed(['GET', 'POST'], 'GraphQL only supports GET and POST requests.'))
data = self.parse_body()
show_graphiql = self.graphiql and self.can_display_graphiql(data)
if self.batch: # False
responses = [self.get_response(entry) for entry in data]
result = '[{}]'.format(','.join([response[0] for response in responses]))
status_code = max(responses, key=lambda response: response[1])[1]
else:
result, status_code = self.get_response(data, show_graphiql)
if show_graphiql:
query, variables, operation_name, id = self.get_graphql_params(data)
return self.render_graphiql(
query=query,
variables=json.dumps(variables),
operation_name=operation_name,
result=result,
graphiql_temp_title=self.graphiql_temp_title
)
else:
web.header('Content-Type', 'application/json')
return result
except HttpError as e:
web.header('Content-Type', 'application/json')
return self.json_encode({'errors': [self.format_error(e)]})
def get_response(self, data, show_graphiql=False):
query, variables, operation_name, id = self.get_graphql_params(data)
execution_result = self.execute_graphql_request(
data,
query,
variables,
operation_name,
show_graphiql
)
status_code = 200
if execution_result:
response = {}
if execution_result.errors:
response['errors'] = [self.format_error(e) for e in execution_result.errors]
if execution_result.invalid:
status_code = 400
else:
status_code = 200
response['data'] = execution_result.data
if self.batch:
response = {
'id': id,
'payload': response,
'status': status_code,
}
result = self.json_encode(response, show_graphiql)
else:
result = None
return result, status_code
def execute(self, *args, **kwargs):
return execute(self.schema, *args, **kwargs)
def execute_graphql_request(self, data, query, variables, operation_name, show_graphiql=False):
if not query:
if show_graphiql:
return None
raise HttpError(BadRequest('Must provide query string.'))
try:
source = Source(query, name='GraphQL request')
ast = parse(source)
validation_errors = validate(self.schema, ast)
if validation_errors:
return ExecutionResult(
errors=validation_errors,
invalid=True,
)
except Exception as e:
return ExecutionResult(errors=[e], invalid=True)
if web.ctx.method.lower() == 'get':
operation_ast = get_operation_ast(ast, operation_name)
if operation_ast and operation_ast.operation != 'query':
if show_graphiql:
return None
raise HttpError(MethodNotAllowed(
['POST'], 'Can only perform a {} operation from a POST request.'.format(operation_ast.operation)
))
try:
return self.execute(
ast,
root_value=self.get_root_value(),
variable_values=variables or {},
operation_name=operation_name,
context_value=self.get_context(),
middleware=self.get_middleware(),
executor=self.get_executor()
)
except Exception as e:
return ExecutionResult(errors=[e], invalid=True)
def parse_body(self):
content_type = web.ctx.env.get('CONTENT_TYPE')
if content_type == 'application/graphql':
return dict(urlparse.parse_qsl(web.data()))
elif content_type == 'application/json':
try:
request_json = json.loads(web.data().decode('utf8'))
if self.batch:
assert isinstance(request_json, list)
else:
assert isinstance(request_json, dict)
return request_json
except:
raise HttpError(BadRequest('POST body sent invalid JSON.'))
elif content_type == 'application/x-www-form-urlencoded':
return dict(urlparse.parse_qsl(web.data()))
elif content_type == 'multipart/form-data':
return web.data()
return {}
def json_encode(self, d, show_graphiql=False):
pretty = self.pretty or show_graphiql or web.input().get('pretty')
if not pretty:
return json.dumps(d, separators=(',', ':'))
return json.dumps(d, sort_keys=True,
indent=2, separators=(',', ': '))
def get_graphql_params(self, data):
variables = query = id = operation_name = None
query = self.check_data_underfiend('query', data)
variables = self.check_data_underfiend('variables', data)
id = self.check_data_underfiend('id', data)
operation_name = self.check_data_underfiend('operationName', data)
if variables and isinstance(variables, six.text_type):
try:
variables = json.loads(variables)
except:
raise HttpError(BadRequest('Variables are invalid JSON.'))
return query, variables, operation_name, id
def GET(self):
return self.dispatch()
def POST(self):
return self.dispatch()
@staticmethod
def check_data_underfiend(param, data):
parameter = web.input().get(param, None) or data.get(param, None)
return parameter if parameter != "undefined" else None
@classmethod
def can_display_graphiql(cls, data):
raw = 'raw' in web.input() or 'raw' in web.data()
return not raw and cls.request_wants_html()
@classmethod
def request_wants_html(cls):
accepted = get_accepted_content_types()
html_index = accepted.count('text/html')
json_index = accepted.count('application/json')
return html_index > json_index
@staticmethod
def format_error(error):
if isinstance(error, GraphQLError):
return format_graphql_error(error)
return {'message': six.text_type(error)}
|
the-stack_0_15241 | #!/usr/bin/env python
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Loads data into BigQuery from an object in Google Cloud Storage.
For more information, see the README.md under /bigquery.
Example invocation:
$ python load_data_from_gcs.py example_dataset example_table \
gs://example-bucket/example-data.csv
The dataset and table should already exist.
"""
import argparse
import time
import uuid
from gcloud import bigquery
def load_data_from_gcs(dataset_name, table_name, source):
bigquery_client = bigquery.Client()
dataset = bigquery_client.dataset(dataset_name)
table = dataset.table(table_name)
job_name = str(uuid.uuid4())
job = bigquery_client.load_table_from_storage(
job_name, table, source)
job.begin()
wait_for_job(job)
print('Loaded {} rows into {}:{}.'.format(
job.output_rows, dataset_name, table_name))
def wait_for_job(job):
while True:
job.reload()
if job.state == 'DONE':
if job.error_result:
raise RuntimeError(job.error_result)
return
time.sleep(1)
if __name__ == '__main__':
parser = argparse.ArgumentParser(
description=__doc__,
formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('dataset_name')
parser.add_argument('table_name')
parser.add_argument(
'source', help='The Google Cloud Storage object to load. Must be in '
'the format gs://bucket_name/object_name')
args = parser.parse_args()
load_data_from_gcs(
args.dataset_name,
args.table_name,
args.source)
|
the-stack_0_15244 | # coding=utf-8
# --------------------------------------------------------------------------
# Copyright (c) Microsoft Corporation. All rights reserved.
# Licensed under the MIT License. See License.txt in the project root for license information.
# Code generated by Microsoft (R) AutoRest Code Generator.
# Changes may cause incorrect behavior and will be lost if the code is regenerated.
# --------------------------------------------------------------------------
# coding: utf-8
from setuptools import setup, find_packages
NAME = "nonstringenumsclient"
VERSION = "0.1.0"
# To install the library, run the following
#
# python setup.py install
#
# prerequisite: setuptools
# http://pypi.python.org/pypi/setuptools
REQUIRES = ["msrest>=0.6.18", "azure-core<2.0.0,>=1.8.0"]
setup(
name=NAME,
version=VERSION,
description="NonStringEnumsClient",
author_email="",
url="",
keywords=["Swagger", "NonStringEnumsClient"],
install_requires=REQUIRES,
packages=find_packages(),
include_package_data=True,
long_description="""\
Testing non-string enums.
"""
)
|
the-stack_0_15245 |
import os, glob
from conans import ConanFile, tools, AutoToolsBuildEnvironment
from conans.errors import ConanException
from conans.model.version import Version
class IlmBaseConan(ConanFile):
name = "ilmbase"
description = "IlmBase is a component of OpenEXR. OpenEXR is a high dynamic-range (HDR) image file format developed by Industrial Light & Magic for use in computer imaging applications."
version = "2.3.0"
license = "BSD"
url = "https://github.com/Mikayex/conan-ilmbase.git"
settings = "os", "compiler", "build_type", "arch"
options = {"shared": [True, False], "namespace_versioning": [True, False], "fPIC": [True, False]}
default_options = "shared=False", "namespace_versioning=True", "fPIC=True"
generators = "cmake"
exports = "FindIlmBase.cmake"
def config_options(self):
if self.settings.os == "Windows":
self.options.remove("fPIC")
def configure(self):
if "fPIC" in self.options.fields and self.options.shared:
self.options.fPIC = True
if self.settings.compiler == 'gcc' and Version(str(self.settings.compiler.version)) < "5":
raise ConanException("gcc >= 5 is required (support for C++14)")
if self.settings.compiler == 'apple-clang' and self.settings.compiler.libcxx == 'libstdc++':
raise ConanException("Compile with stdlib=libc++ using settings.compiler.libcxx")
def source(self):
url = "https://github.com/openexr/openexr/releases/download/v{version}/ilmbase-{version}.tar.gz"
tools.get(url.format(version=self.version))
tools.replace_in_file(os.path.join('ilmbase-{}'.format(self.version), 'CMakeLists.txt'), 'PROJECT ( ilmbase )',
"""PROJECT ( ilmbase )
include(${CMAKE_BINARY_DIR}/conanbuildinfo.cmake)
conan_basic_setup()
""")
def build(self):
yes_no = {True: "enable", False: "disable"}
args = ["--{}-shared".format(yes_no.get(bool(self.options.shared))),
"--{}-static".format(yes_no.get(not bool(self.options.shared))),
"--{}-namespaceversioning".format(yes_no.get(bool(self.options.namespace_versioning))),
]
autotools = AutoToolsBuildEnvironment(self)
autotools.configure(configure_dir='ilmbase-{}'.format(self.version), args=args)
autotools.make()
tools.replace_prefix_in_pc_file("IlmBase.pc", "${package_root_path_ilmbase}")
def package(self):
autotools = AutoToolsBuildEnvironment(self)
autotools.install()
self.copy("FindIlmBase.cmake", src=".", dst=".")
self.copy("license*", dst="licenses", src="ilmbase-%s" % self.version, ignore_case=True, keep_path=False)
for f in glob.glob(os.path.join(self.package_folder, 'lib', '*.la')):
os.remove(f)
def package_info(self):
self.cpp_info.includedirs = [os.path.join('include', 'OpenEXR'), ]
self.cpp_info.libs = ['Half', 'Iex', 'IexMath', 'IlmThread', 'Imath']
if self.options.shared and self.settings.os == "Windows":
self.cpp_info.defines.append("OPENEXR_DLL")
if not self.settings.os == "Windows":
self.cpp_info.cppflags = ["-pthread"]
|
the-stack_0_15246 | # -*- coding: utf-8 -*-
"""
Copyright (C) 2017 Sebastian Golasch (plugin.video.netflix)
Copyright (C) 2018 Caphm (original implementation module)
Common base for crypto handlers
SPDX-License-Identifier: MIT
See LICENSES/MIT.md for more information.
"""
import json
import base64
import time
import resources.lib.common as common
from resources.lib.services.msl.msl_utils import MSL_DATA_FILENAME
from resources.lib.utils.esn import get_esn
from resources.lib.utils.logging import LOG
class MSLBaseCrypto:
"""
Common base class for MSL crypto operations.
Handles MasterToken and sequence number
"""
def __init__(self):
self._msl_data = None
self.mastertoken = None
self.serial_number = None
self.sequence_number = None
self.renewal_window = None
self.expiration = None
self.bound_esn = None # Specify the ESN bound to mastertoken
def load_msl_data(self, msl_data=None):
self._msl_data = msl_data if msl_data else {}
if msl_data:
self.set_mastertoken(msl_data['tokens']['mastertoken'])
self.bound_esn = msl_data.get('bound_esn', get_esn())
def compare_mastertoken(self, mastertoken):
"""Check if the new MasterToken is different from current due to renew"""
if not self._mastertoken_is_newer_that(mastertoken):
LOG.debug('MSL mastertoken is changed due to renew')
self.set_mastertoken(mastertoken)
self._save_msl_data()
def _mastertoken_is_newer_that(self, mastertoken):
"""Check if current MasterToken is newer than mastertoken specified"""
# Based on cadmium player sourcecode and ref. to [isNewerThan] in:
# https://github.com/Netflix/msl/blob/master/core/src/main/java/com/netflix/msl/tokens/MasterToken.java
new_tokendata = json.loads(
base64.standard_b64decode(mastertoken['tokendata'].encode('utf-8')).decode('utf-8'))
if new_tokendata['sequencenumber'] == self.sequence_number:
return new_tokendata['expiration'] > self.expiration
if new_tokendata['sequencenumber'] > self.sequence_number:
cut_off = new_tokendata['sequencenumber'] - pow(2, 53) + 127
return self.sequence_number >= cut_off
cut_off = self.sequence_number - pow(2, 53) + 127
return new_tokendata['sequencenumber'] < cut_off
def parse_key_response(self, headerdata, esn, save_to_disk):
"""Parse a key response and update crypto keys"""
self.set_mastertoken(headerdata['keyresponsedata']['mastertoken'])
self._init_keys(headerdata['keyresponsedata'])
self.bound_esn = esn
if save_to_disk:
self._save_msl_data()
def set_mastertoken(self, mastertoken):
"""Set the MasterToken and check it for validity"""
tokendata = json.loads(
base64.standard_b64decode(mastertoken['tokendata'].encode('utf-8')).decode('utf-8'))
self.mastertoken = mastertoken
self.serial_number = tokendata['serialnumber']
self.sequence_number = tokendata.get('sequencenumber', 0)
self.renewal_window = tokendata['renewalwindow']
self.expiration = tokendata['expiration']
def _save_msl_data(self):
"""Save crypto keys and MasterToken to disk"""
self._msl_data['tokens'] = {'mastertoken': self.mastertoken}
self._msl_data.update(self._export_keys())
self._msl_data['bound_esn'] = self.bound_esn
common.save_file_def(MSL_DATA_FILENAME, json.dumps(self._msl_data).encode('utf-8'))
LOG.debug('Successfully saved MSL data to disk')
def _init_keys(self, key_response_data):
"""Initialize crypto keys from key_response_data"""
raise NotImplementedError
def _export_keys(self):
"""Export crypto keys to a dict"""
raise NotImplementedError
def get_user_id_token(self, profile_guid):
"""Get a valid the user id token associated to a profile guid"""
if 'user_id_tokens' in self._msl_data:
user_id_token = self._msl_data['user_id_tokens'].get(profile_guid)
if user_id_token and not self.is_user_id_token_expired(user_id_token):
return user_id_token
return None
def save_user_id_token(self, profile_guid, user_token_id):
"""Save or update a user id token associated to a profile guid"""
if 'user_id_tokens' not in self._msl_data:
save_msl_data = True
self._msl_data['user_id_tokens'] = {
profile_guid: user_token_id
}
else:
save_msl_data = not self._msl_data['user_id_tokens'].get(profile_guid) == user_token_id
self._msl_data['user_id_tokens'][profile_guid] = user_token_id
if save_msl_data:
self._save_msl_data()
def clear_user_id_tokens(self):
"""Clear all user id tokens"""
self._msl_data.pop('user_id_tokens', None)
self._save_msl_data()
def is_user_id_token_expired(self, user_id_token):
"""Check if user id token is expired"""
token_data = json.loads(base64.standard_b64decode(user_id_token['tokendata']))
# Subtract 5min as a safety measure
return (token_data['expiration'] - 300) < time.time()
def is_current_mastertoken_expired(self):
"""Check if the current MasterToken is expired"""
return self.expiration <= time.time()
def get_current_mastertoken_validity(self):
"""Gets a dict values to know if current MasterToken is renewable and/or expired"""
time_now = time.time()
renewable = self.renewal_window < time_now
expired = self.expiration <= time_now
return {'is_renewable': renewable, 'is_expired': expired}
|
the-stack_0_15247 | #!/usr/bin/env python
import os
import sys
from DIRAC import S_OK, S_ERROR, gLogger, exit
from DIRAC.Core.Base import Script
Script.setUsageMessage('''Register SE files from a list of files to DFC. These list of files must be locally readable
{0} [option|cfgfile] DFCRoot LocalRoot Filelist SE
Example: {0} /juno/lustre/junofs/PmtCharacterization/scan_data/soft/root_macros /junofs/PmtCharacterization/scan_data/soft/root_macros filelist.txt IHEP-STORM'''.format(Script.scriptName))
Script.registerSwitch( 'e', 'existCheck', 'Check if file exists')
Script.registerSwitch( 'q:', 'querySkip=', 'Skip files in the meta query')
Script.registerSwitch( 'b:', 'bufferSize=', 'Register buffer size, default to 100')
Script.parseCommandLine(ignoreErrors = False)
from DIRAC.Core.Utilities.Adler import fileAdler
from DIRAC.Core.Utilities.File import makeGuid
from DIRAC.DataManagementSystem.Client.DataManager import DataManager
from DIRAC.Resources.Catalog.FileCatalogClient import FileCatalogClient
fcc = FileCatalogClient('DataManagement/FileCatalog')
args = Script.getPositionalArgs()
if len(args) != 4:
Script.showHelp()
exit(1)
dfcRoot = args[0]
if (not args[1]) or args[1].endswith(os.sep):
localRoot = args[1]
else:
localRoot = args[1] + os.sep
fileList = args[2]
toSE = args[3]
lfnQuery = []
existCheck = False
bufferSize = 100
switches = Script.getUnprocessedSwitches()
for switch in switches:
if switch[0] == 'q' or switch[0] == 'querySkip':
result = fcc.findFilesByMetadata({'juno_transfer': switch[1]}, '/')
if result['OK']:
lfnQuery += result['Value']
if switch[0] == 'e' or switch[0] == 'existCheck':
existCheck = True
if switch[0] == 'b' or switch[0] == 'bufferSize':
bufferSize = int(switch[1])
lfnQuery = set(lfnQuery)
counter = 0
dm = DataManager()
fileTupleBuffer = []
with open(fileList) as file_obj:
for fullFn in file_obj:
counter += 1
print(fullFn)
fullFn=fullFn.strip('\n')
if not fullFn.startswith(localRoot):
gLogger.error('%s does not start with %s' % (fullFn, localDir))
continue
lastPart = fullFn[len(localRoot):]
#lastPart = os.path.basename(fullFn)
lfn = os.path.join(dfcRoot, lastPart)
print(lfn)
if lfn in lfnQuery:
if counter%1000 == 0:
gLogger.notice('Skip file in query counter: %s' % counter)
continue
if existCheck:
result = fcc.isFile(lfn)
if result['OK'] and lfn in result['Value']['Successful'] and result['Value']['Successful'][lfn]:
if counter%1000 == 0:
gLogger.notice('Skip file existed counter: %s' % counter)
continue
size = os.path.getsize(fullFn)
adler32 = fileAdler(fullFn)
guid = makeGuid()
fileTuple = ( lfn, fullFn, size, toSE, guid, adler32 )
#gLogger.notice('the parameter to registered %s %s %s %s %s %s' % (lfn,fullFn,size,toSE,guid,adler32))
fileTupleBuffer.append(fileTuple)
gLogger.debug('Register to lfn: %s' % lfn)
gLogger.debug('fileTuple: %s' % (fileTuple,))
if len(fileTupleBuffer) >= bufferSize:
result = dm.registerFile( fileTupleBuffer )
if not result['OK']:
gLogger.error('Can not register %s' % fullFn)
exit(1)
del fileTupleBuffer[:]
gLogger.notice('%s files registered' % counter)
if fileTupleBuffer:
result = dm.registerFile( fileTupleBuffer )
if not result['OK']:
gLogger.error('Can not register %s' % fullFn)
exit(1)
del fileTupleBuffer[:]
gLogger.notice('Total %s files registered' % counter)
|
the-stack_0_15248 | # Copyright (c) 2021, NVIDIA CORPORATION.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import threading
import time
import uuid
import pytest
from nvflare.apis.controller_spec import TaskCompletionStatus
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from .controller_test import TestController, create_client, create_task, get_ready, launch_task
class TestBasic(TestController):
@pytest.mark.parametrize("task_name,client_name", [["__test_task", "__test_client"]])
def test_process_submission_invalid_task(self, task_name, client_name):
controller, fl_ctx = self.start_controller()
client = create_client(name="__test_client")
with pytest.raises(RuntimeError, match=f"Unknown task: {task_name} from client {client_name}."):
controller.process_submission(
client=client, task_name=task_name, task_id=str(uuid.uuid4()), fl_ctx=FLContext(), result=Shareable()
)
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
@pytest.mark.parametrize("num_client_requests", [1, 2, 3, 4])
def test_process_task_request_client_request_multiple_times(self, method, num_client_requests):
controller, fl_ctx = self.start_controller()
client = create_client(name="__test_client")
input_data = Shareable()
input_data["hello"] = "world"
task = create_task("__test_task", data=input_data)
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client]},
},
)
get_ready(launch_thread)
for i in range(num_client_requests):
task_name_out, _, data = controller.process_task_request(client, fl_ctx)
assert task_name_out == "__test_task"
assert data == input_data
assert task.last_client_task_map["__test_client"].task_send_count == num_client_requests
controller.cancel_task(task)
launch_thread.join()
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
def test_process_submission(self, method):
controller, fl_ctx = self.start_controller()
client = create_client(name="__test_client")
task = create_task("__test_task")
kwargs = {"targets": [client]}
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": kwargs,
},
)
get_ready(launch_thread)
task_name_out, client_task_id, data = controller.process_task_request(client, fl_ctx)
# in here we make up client results:
result = Shareable()
result["result"] = "result"
controller.process_submission(
client=client, task_name="__test_task", task_id=client_task_id, fl_ctx=fl_ctx, result=result
)
assert task.last_client_task_map["__test_client"].result == result
launch_thread.join()
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
@pytest.mark.parametrize("timeout", [1, 2])
def test_task_timeout(self, method, timeout):
controller, fl_ctx = self.start_controller()
client = create_client(name="__test_client")
task = create_task(name="__test_task", data=Shareable(), timeout=timeout)
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client]},
},
)
get_ready(launch_thread)
assert controller.get_num_standing_tasks() == 1
time.sleep(timeout + 1)
assert controller.get_num_standing_tasks() == 0
assert task.completion_status == TaskCompletionStatus.TIMEOUT
launch_thread.join()
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
def test_cancel_task(self, method):
controller, fl_ctx = self.start_controller()
client = create_client(name="__test_client")
task = create_task(name="__test_task")
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client]},
},
)
get_ready(launch_thread)
assert controller.get_num_standing_tasks() == 1
controller.cancel_task(task=task)
controller._check_tasks()
assert controller.get_num_standing_tasks() == 0
assert task.completion_status == TaskCompletionStatus.CANCELLED
launch_thread.join()
self.stop_controller(controller, fl_ctx)
@pytest.mark.parametrize("method", TestController.ALL_APIS)
def test_cancel_all_tasks(self, method):
controller, fl_ctx = self.start_controller()
client = create_client(name="__test_client")
task = create_task("__test_task")
launch_thread = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client]},
},
)
get_ready(launch_thread)
task1 = create_task("__test_task1")
launch_thread1 = threading.Thread(
target=launch_task,
kwargs={
"controller": controller,
"task": task1,
"method": method,
"fl_ctx": fl_ctx,
"kwargs": {"targets": [client]},
},
)
get_ready(launch_thread1)
assert controller.get_num_standing_tasks() == 2
controller.cancel_all_tasks()
controller._check_tasks()
assert controller.get_num_standing_tasks() == 0
assert task.completion_status == TaskCompletionStatus.CANCELLED
assert task1.completion_status == TaskCompletionStatus.CANCELLED
launch_thread.join()
self.stop_controller(controller, fl_ctx)
|
the-stack_0_15249 | from brownie import (
network,
accounts,
config,
interface,
Contract,
)
from brownie.network.state import Chain
from brownie import web3
from web3 import Web3
def get_account(index=None, id=None):
if index is not None:
return accounts[index]
if id:
return accounts.load(id)
return accounts.add(config["wallets"]["from_key"])
def get_web3():
return Web3(web3.provider)
def check_solution(setup_contract):
if setup_contract.isSolved():
print("Challenge solved!")
else:
print("Challenge not solved...")
|
the-stack_0_15251 | # Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import paddle
import paddle.nn as nn
from paddle.vision.ops import DeformConv2D
from ...modules.init import kaiming_normal_, constant_, constant_init
from .builder import GENERATORS
@paddle.no_grad()
def default_init_weights(module_list, scale=1, bias_fill=0, **kwargs):
"""Initialize network weights.
Args:
module_list (list[nn.Module] | nn.Module): Modules to be initialized.
scale (float): Scale initialized weights, especially for residual
blocks. Default: 1.
bias_fill (float): The value to fill bias. Default: 0
kwargs (dict): Other arguments for initialization function.
"""
if not isinstance(module_list, list):
module_list = [module_list]
for m in module_list:
if isinstance(m, nn.Conv2D):
kaiming_normal_(m.weight, **kwargs)
scale_weight = scale * m.weight
m.weight.set_value(scale_weight)
if m.bias is not None:
constant_(m.bias, bias_fill)
elif isinstance(m, nn.Linear):
kaiming_normal_(m.weight, **kwargs)
scale_weight = scale * m.weight
m.weight.set_value(scale_weight)
if m.bias is not None:
constant_(m.bias, bias_fill)
class ResidualBlockNoBN(nn.Layer):
"""Residual block without BN.
It has a style of:
---Conv-ReLU-Conv-+-
|________________|
Args:
nf (int): Channel number of intermediate features.
Default: 64.
"""
def __init__(self, nf=64):
super(ResidualBlockNoBN, self).__init__()
self.nf = nf
self.conv1 = nn.Conv2D(self.nf, self.nf, 3, 1, 1)
self.conv2 = nn.Conv2D(self.nf, self.nf, 3, 1, 1)
self.relu = nn.ReLU()
default_init_weights([self.conv1, self.conv2], 0.1)
def forward(self, x):
identity = x
out = self.conv2(self.relu(self.conv1(x)))
return identity + out
def MakeMultiBlocks(func, num_layers, nf=64):
"""Make layers by stacking the same blocks.
Args:
func (nn.Layer): nn.Layer class for basic block.
num_layers (int): number of blocks.
Returns:
nn.Sequential: Stacked blocks in nn.Sequential.
"""
Blocks = nn.Sequential()
for i in range(num_layers):
Blocks.add_sublayer('block%d' % i, func(nf))
return Blocks
class PredeblurResNetPyramid(nn.Layer):
"""Pre-dublur module.
Args:
in_nf (int): Channel number of input image. Default: 3.
nf (int): Channel number of intermediate features. Default: 64.
HR_in (bool): Whether the input has high resolution. Default: False.
"""
def __init__(self, in_nf=3, nf=64, HR_in=False):
super(PredeblurResNetPyramid, self).__init__()
self.in_nf = in_nf
self.nf = nf
self.HR_in = True if HR_in else False
self.Leaky_relu = nn.LeakyReLU(negative_slope=0.1)
if self.HR_in:
self.conv_first_1 = nn.Conv2D(in_channels=self.in_nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.conv_first_2 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=2,
padding=1)
self.conv_first_3 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=2,
padding=1)
else:
self.conv_first = nn.Conv2D(in_channels=self.in_nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.RB_L1_1 = ResidualBlockNoBN(nf=self.nf)
self.RB_L1_2 = ResidualBlockNoBN(nf=self.nf)
self.RB_L1_3 = ResidualBlockNoBN(nf=self.nf)
self.RB_L1_4 = ResidualBlockNoBN(nf=self.nf)
self.RB_L1_5 = ResidualBlockNoBN(nf=self.nf)
self.RB_L2_1 = ResidualBlockNoBN(nf=self.nf)
self.RB_L2_2 = ResidualBlockNoBN(nf=self.nf)
self.RB_L3_1 = ResidualBlockNoBN(nf=self.nf)
self.deblur_L2_conv = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=2,
padding=1)
self.deblur_L3_conv = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=2,
padding=1)
self.upsample = nn.Upsample(scale_factor=2,
mode="bilinear",
align_corners=False,
align_mode=0)
def forward(self, x):
if self.HR_in:
L1_fea = self.Leaky_relu(self.conv_first_1(x))
L1_fea = self.Leaky_relu(self.conv_first_2(L1_fea))
L1_fea = self.Leaky_relu(self.conv_first_3(L1_fea))
else:
L1_fea = self.Leaky_relu(self.conv_first(x))
L2_fea = self.deblur_L2_conv(L1_fea)
L2_fea = self.Leaky_relu(L2_fea)
L3_fea = self.deblur_L3_conv(L2_fea)
L3_fea = self.Leaky_relu(L3_fea)
L3_fea = self.RB_L3_1(L3_fea)
L3_fea = self.upsample(L3_fea)
L2_fea = self.RB_L2_1(L2_fea) + L3_fea
L2_fea = self.RB_L2_2(L2_fea)
L2_fea = self.upsample(L2_fea)
L1_fea = self.RB_L1_1(L1_fea)
L1_fea = self.RB_L1_2(L1_fea) + L2_fea
out = self.RB_L1_3(L1_fea)
out = self.RB_L1_4(out)
out = self.RB_L1_5(out)
return out
class TSAFusion(nn.Layer):
"""Temporal Spatial Attention (TSA) fusion module.
Temporal: Calculate the correlation between center frame and
neighboring frames;
Spatial: It has 3 pyramid levels, the attention is similar to SFT.
(SFT: Recovering realistic texture in image super-resolution by deep
spatial feature transform.)
Args:
nf (int): Channel number of middle features. Default: 64.
nframes (int): Number of frames. Default: 5.
center (int): The index of center frame. Default: 2.
"""
def __init__(self, nf=64, nframes=5, center=2):
super(TSAFusion, self).__init__()
self.nf = nf
self.nframes = nframes
self.center = center
self.sigmoid = nn.Sigmoid()
self.Leaky_relu = nn.LeakyReLU(negative_slope=0.1)
self.tAtt_2 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.tAtt_1 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.fea_fusion = nn.Conv2D(in_channels=self.nf * self.nframes,
out_channels=self.nf,
kernel_size=1,
stride=1,
padding=0)
self.sAtt_1 = nn.Conv2D(in_channels=self.nf * self.nframes,
out_channels=self.nf,
kernel_size=1,
stride=1,
padding=0)
self.max_pool = nn.MaxPool2D(3, stride=2, padding=1)
self.avg_pool = nn.AvgPool2D(3, stride=2, padding=1, exclusive=False)
self.sAtt_2 = nn.Conv2D(in_channels=2 * self.nf,
out_channels=self.nf,
kernel_size=1,
stride=1,
padding=0)
self.sAtt_3 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.sAtt_4 = nn.Conv2D(
in_channels=self.nf,
out_channels=self.nf,
kernel_size=1,
stride=1,
padding=0,
)
self.sAtt_5 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.sAtt_add_1 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=1,
stride=1,
padding=0)
self.sAtt_add_2 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=1,
stride=1,
padding=0)
self.sAtt_L1 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=1,
stride=1,
padding=0)
self.sAtt_L2 = nn.Conv2D(
in_channels=2 * self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1,
)
self.sAtt_L3 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.upsample = nn.Upsample(scale_factor=2,
mode="bilinear",
align_corners=False,
align_mode=0)
def forward(self, aligned_fea):
"""
Args:
aligned_feat (Tensor): Aligned features with shape (b, n, c, h, w).
Returns:
Tensor: Features after TSA with the shape (b, c, h, w).
"""
B, N, C, H, W = aligned_fea.shape
x_center = aligned_fea[:, self.center, :, :, :]
emb_rf = self.tAtt_2(x_center)
emb = aligned_fea.reshape([-1, C, H, W])
emb = self.tAtt_1(emb)
emb = emb.reshape([-1, N, self.nf, H, W])
cor_l = []
for i in range(N):
emb_nbr = emb[:, i, :, :, :] #[B,C,W,H]
cor_tmp = paddle.sum(emb_nbr * emb_rf, axis=1)
cor_tmp = paddle.unsqueeze(cor_tmp, axis=1)
cor_l.append(cor_tmp)
cor_prob = paddle.concat(cor_l, axis=1) #[B,N,H,W]
cor_prob = self.sigmoid(cor_prob)
cor_prob = paddle.unsqueeze(cor_prob, axis=2) #[B,N,1,H,W]
cor_prob = paddle.expand(cor_prob, [B, N, self.nf, H, W]) #[B,N,C,H,W]
cor_prob = cor_prob.reshape([B, -1, H, W])
aligned_fea = aligned_fea.reshape([B, -1, H, W])
aligned_fea = aligned_fea * cor_prob
fea = self.fea_fusion(aligned_fea)
fea = self.Leaky_relu(fea)
#spatial fusion
att = self.sAtt_1(aligned_fea)
att = self.Leaky_relu(att)
att_max = self.max_pool(att)
att_avg = self.avg_pool(att)
att_pool = paddle.concat([att_max, att_avg], axis=1)
att = self.sAtt_2(att_pool)
att = self.Leaky_relu(att)
#pyramid
att_L = self.sAtt_L1(att)
att_L = self.Leaky_relu(att_L)
att_max = self.max_pool(att_L)
att_avg = self.avg_pool(att_L)
att_pool = paddle.concat([att_max, att_avg], axis=1)
att_L = self.sAtt_L2(att_pool)
att_L = self.Leaky_relu(att_L)
att_L = self.sAtt_L3(att_L)
att_L = self.Leaky_relu(att_L)
att_L = self.upsample(att_L)
att = self.sAtt_3(att)
att = self.Leaky_relu(att)
att = att + att_L
att = self.sAtt_4(att)
att = self.Leaky_relu(att)
att = self.upsample(att)
att = self.sAtt_5(att)
att_add = self.sAtt_add_1(att)
att_add = self.Leaky_relu(att_add)
att_add = self.sAtt_add_2(att_add)
att = self.sigmoid(att)
fea = fea * att * 2 + att_add
return fea
class DCNPack(nn.Layer):
"""Modulated deformable conv for deformable alignment.
Ref:
Delving Deep into Deformable Alignment in Video Super-Resolution.
"""
def __init__(self,
num_filters=64,
kernel_size=3,
stride=1,
padding=1,
dilation=1,
deformable_groups=8,
extra_offset_mask=True):
super(DCNPack, self).__init__()
self.extra_offset_mask = extra_offset_mask
self.deformable_groups = deformable_groups
self.num_filters = num_filters
if isinstance(kernel_size, int):
self.kernel_size = [kernel_size, kernel_size]
self.conv_offset_mask = nn.Conv2D(in_channels=self.num_filters,
out_channels=self.deformable_groups *
3 * self.kernel_size[0] *
self.kernel_size[1],
kernel_size=self.kernel_size,
stride=stride,
padding=padding)
self.total_channels = self.deformable_groups * 3 * self.kernel_size[
0] * self.kernel_size[1]
self.split_channels = self.total_channels // 3
self.dcn = DeformConv2D(in_channels=self.num_filters,
out_channels=self.num_filters,
kernel_size=self.kernel_size,
stride=stride,
padding=padding,
dilation=dilation,
deformable_groups=self.deformable_groups)
self.sigmoid = nn.Sigmoid()
# init conv offset
constant_init(self.conv_offset_mask, 0., 0.)
def forward(self, fea_and_offset):
out = None
x = None
if self.extra_offset_mask:
out = self.conv_offset_mask(fea_and_offset[1])
x = fea_and_offset[0]
o1 = out[:, 0:self.split_channels, :, :]
o2 = out[:, self.split_channels:2 * self.split_channels, :, :]
mask = out[:, 2 * self.split_channels:, :, :]
offset = paddle.concat([o1, o2], axis=1)
mask = self.sigmoid(mask)
y = self.dcn(x, offset, mask)
return y
class PCDAlign(nn.Layer):
"""Alignment module using Pyramid, Cascading and Deformable convolution
(PCD). It is used in EDVR.
Ref:
EDVR: Video Restoration with Enhanced Deformable Convolutional Networks
Args:
nf (int): Channel number of middle features. Default: 64.
groups (int): Deformable groups. Defaults: 8.
"""
def __init__(self, nf=64, groups=8):
super(PCDAlign, self).__init__()
self.nf = nf
self.groups = groups
self.Leaky_relu = nn.LeakyReLU(negative_slope=0.1)
self.upsample = nn.Upsample(scale_factor=2,
mode="bilinear",
align_corners=False,
align_mode=0)
# Pyramid has three levels:
# L3: level 3, 1/4 spatial size
# L2: level 2, 1/2 spatial size
# L1: level 1, original spatial size
# L3
self.PCD_Align_L3_offset_conv1 = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L3_offset_conv2 = nn.Conv2D(in_channels=nf,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L3_dcn = DCNPack(num_filters=nf,
kernel_size=3,
stride=1,
padding=1,
deformable_groups=groups)
#L2
self.PCD_Align_L2_offset_conv1 = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L2_offset_conv2 = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L2_offset_conv3 = nn.Conv2D(in_channels=nf,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L2_dcn = DCNPack(num_filters=nf,
kernel_size=3,
stride=1,
padding=1,
deformable_groups=groups)
self.PCD_Align_L2_fea_conv = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
#L1
self.PCD_Align_L1_offset_conv1 = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L1_offset_conv2 = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L1_offset_conv3 = nn.Conv2D(in_channels=nf,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_L1_dcn = DCNPack(num_filters=nf,
kernel_size=3,
stride=1,
padding=1,
deformable_groups=groups)
self.PCD_Align_L1_fea_conv = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
#cascade
self.PCD_Align_cas_offset_conv1 = nn.Conv2D(in_channels=nf * 2,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_cas_offset_conv2 = nn.Conv2D(in_channels=nf,
out_channels=nf,
kernel_size=3,
stride=1,
padding=1)
self.PCD_Align_cascade_dcn = DCNPack(num_filters=nf,
kernel_size=3,
stride=1,
padding=1,
deformable_groups=groups)
def forward(self, nbr_fea_l, ref_fea_l):
"""Align neighboring frame features to the reference frame features.
Args:
nbr_fea_l (list[Tensor]): Neighboring feature list. It
contains three pyramid levels (L1, L2, L3),
each with shape (b, c, h, w).
ref_fea_l (list[Tensor]): Reference feature list. It
contains three pyramid levels (L1, L2, L3),
each with shape (b, c, h, w).
Returns:
Tensor: Aligned features.
"""
#L3
L3_offset = paddle.concat([nbr_fea_l[2], ref_fea_l[2]], axis=1)
L3_offset = self.PCD_Align_L3_offset_conv1(L3_offset)
L3_offset = self.Leaky_relu(L3_offset)
L3_offset = self.PCD_Align_L3_offset_conv2(L3_offset)
L3_offset = self.Leaky_relu(L3_offset)
L3_fea = self.PCD_Align_L3_dcn([nbr_fea_l[2], L3_offset])
L3_fea = self.Leaky_relu(L3_fea)
#L2
L2_offset = paddle.concat([nbr_fea_l[1], ref_fea_l[1]], axis=1)
L2_offset = self.PCD_Align_L2_offset_conv1(L2_offset)
L2_offset = self.Leaky_relu(L2_offset)
L3_offset = self.upsample(L3_offset)
L2_offset = paddle.concat([L2_offset, L3_offset * 2], axis=1)
L2_offset = self.PCD_Align_L2_offset_conv2(L2_offset)
L2_offset = self.Leaky_relu(L2_offset)
L2_offset = self.PCD_Align_L2_offset_conv3(L2_offset)
L2_offset = self.Leaky_relu(L2_offset)
L2_fea = self.PCD_Align_L2_dcn([nbr_fea_l[1], L2_offset])
L3_fea = self.upsample(L3_fea)
L2_fea = paddle.concat([L2_fea, L3_fea], axis=1)
L2_fea = self.PCD_Align_L2_fea_conv(L2_fea)
L2_fea = self.Leaky_relu(L2_fea)
#L1
L1_offset = paddle.concat([nbr_fea_l[0], ref_fea_l[0]], axis=1)
L1_offset = self.PCD_Align_L1_offset_conv1(L1_offset)
L1_offset = self.Leaky_relu(L1_offset)
L2_offset = self.upsample(L2_offset)
L1_offset = paddle.concat([L1_offset, L2_offset * 2], axis=1)
L1_offset = self.PCD_Align_L1_offset_conv2(L1_offset)
L1_offset = self.Leaky_relu(L1_offset)
L1_offset = self.PCD_Align_L1_offset_conv3(L1_offset)
L1_offset = self.Leaky_relu(L1_offset)
L1_fea = self.PCD_Align_L1_dcn([nbr_fea_l[0], L1_offset])
L2_fea = self.upsample(L2_fea)
L1_fea = paddle.concat([L1_fea, L2_fea], axis=1)
L1_fea = self.PCD_Align_L1_fea_conv(L1_fea)
#cascade
offset = paddle.concat([L1_fea, ref_fea_l[0]], axis=1)
offset = self.PCD_Align_cas_offset_conv1(offset)
offset = self.Leaky_relu(offset)
offset = self.PCD_Align_cas_offset_conv2(offset)
offset = self.Leaky_relu(offset)
L1_fea = self.PCD_Align_cascade_dcn([L1_fea, offset])
L1_fea = self.Leaky_relu(L1_fea)
return L1_fea
@GENERATORS.register()
class EDVRNet(nn.Layer):
"""EDVR network structure for video super-resolution.
Now only support X4 upsampling factor.
Paper:
EDVR: Video Restoration with Enhanced Deformable Convolutional Networks
Args:
in_nf (int): Channel number of input image. Default: 3.
out_nf (int): Channel number of output image. Default: 3.
scale_factor (int): Scale factor from input image to output image. Default: 4.
nf (int): Channel number of intermediate features. Default: 64.
nframes (int): Number of input frames. Default: 5.
groups (int): Deformable groups. Defaults: 8.
front_RBs (int): Number of blocks for feature extraction. Default: 5.
back_RBs (int): Number of blocks for reconstruction. Default: 10.
center (int): The index of center frame. Frame counting from 0. Default: None.
predeblur (bool): Whether has predeblur module. Default: False.
HR_in (bool): Whether the input has high resolution. Default: False.
with_tsa (bool): Whether has TSA module. Default: True.
TSA_only (bool): Whether only use TSA module. Default: False.
"""
def __init__(self,
in_nf=3,
out_nf=3,
scale_factor=4,
nf=64,
nframes=5,
groups=8,
front_RBs=5,
back_RBs=10,
center=None,
predeblur=False,
HR_in=False,
w_TSA=True):
super(EDVRNet, self).__init__()
self.in_nf = in_nf
self.out_nf = out_nf
self.scale_factor = scale_factor
self.nf = nf
self.nframes = nframes
self.groups = groups
self.front_RBs = front_RBs
self.back_RBs = back_RBs
self.center = nframes // 2 if center is None else center
self.predeblur = True if predeblur else False
self.HR_in = True if HR_in else False
self.w_TSA = True if w_TSA else False
self.Leaky_relu = nn.LeakyReLU(negative_slope=0.1)
if self.predeblur:
self.pre_deblur = PredeblurResNetPyramid(in_nf=self.in_nf,
nf=self.nf,
HR_in=self.HR_in)
self.cov_1 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=1,
stride=1)
else:
self.conv_first = nn.Conv2D(in_channels=self.in_nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
#feature extraction module
self.feature_extractor = MakeMultiBlocks(ResidualBlockNoBN,
self.front_RBs, self.nf)
self.fea_L2_conv1 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=2,
padding=1)
self.fea_L2_conv2 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
self.fea_L3_conv1 = nn.Conv2D(
in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=2,
padding=1,
)
self.fea_L3_conv2 = nn.Conv2D(in_channels=self.nf,
out_channels=self.nf,
kernel_size=3,
stride=1,
padding=1)
#PCD alignment module
self.PCDModule = PCDAlign(nf=self.nf, groups=self.groups)
#TSA Fusion module
if self.w_TSA:
self.TSAModule = TSAFusion(nf=self.nf,
nframes=self.nframes,
center=self.center)
else:
self.TSAModule = nn.Conv2D(in_channels=self.nframes * self.nf,
out_channels=self.nf,
kernel_size=1,
stride=1)
#reconstruction module
self.reconstructor = MakeMultiBlocks(ResidualBlockNoBN, self.back_RBs,
self.nf)
self.upconv1 = nn.Conv2D(in_channels=self.nf,
out_channels=4 * self.nf,
kernel_size=3,
stride=1,
padding=1)
self.pixel_shuffle = nn.PixelShuffle(2)
self.upconv2 = nn.Conv2D(in_channels=self.nf,
out_channels=4 * 64,
kernel_size=3,
stride=1,
padding=1)
self.HRconv = nn.Conv2D(in_channels=64,
out_channels=64,
kernel_size=3,
stride=1,
padding=1)
self.conv_last = nn.Conv2D(in_channels=64,
out_channels=self.out_nf,
kernel_size=3,
stride=1,
padding=1)
if self.scale_factor == 4:
self.upsample = nn.Upsample(scale_factor=self.scale_factor,
mode="bilinear",
align_corners=False,
align_mode=0)
def forward(self, x):
"""
Args:
x (Tensor): Input features with shape (b, n, c, h, w).
Returns:
Tensor: Features after EDVR with the shape (b, c, scale_factor*h, scale_factor*w).
"""
B, N, C, H, W = x.shape
x_center = x[:, self.center, :, :, :]
L1_fea = x.reshape([-1, C, H, W]) #[B*N,C,W,H]
if self.predeblur:
L1_fea = self.pre_deblur(L1_fea)
L1_fea = self.cov_1(L1_fea)
if self.HR_in:
H, W = H // 4, W // 4
else:
L1_fea = self.conv_first(L1_fea)
L1_fea = self.Leaky_relu(L1_fea)
# feature extraction and create Pyramid
L1_fea = self.feature_extractor(L1_fea)
# L2
L2_fea = self.fea_L2_conv1(L1_fea)
L2_fea = self.Leaky_relu(L2_fea)
L2_fea = self.fea_L2_conv2(L2_fea)
L2_fea = self.Leaky_relu(L2_fea)
# L3
L3_fea = self.fea_L3_conv1(L2_fea)
L3_fea = self.Leaky_relu(L3_fea)
L3_fea = self.fea_L3_conv2(L3_fea)
L3_fea = self.Leaky_relu(L3_fea)
L1_fea = L1_fea.reshape([-1, N, self.nf, H, W])
L2_fea = L2_fea.reshape([-1, N, self.nf, H // 2, W // 2])
L3_fea = L3_fea.reshape([-1, N, self.nf, H // 4, W // 4])
# pcd align
ref_fea_l = [
L1_fea[:, self.center, :, :, :], L2_fea[:, self.center, :, :, :],
L3_fea[:, self.center, :, :, :]
]
aligned_fea = []
for i in range(N):
nbr_fea_l = [
L1_fea[:, i, :, :, :], L2_fea[:, i, :, :, :], L3_fea[:,
i, :, :, :]
]
aligned_fea.append(self.PCDModule(nbr_fea_l, ref_fea_l))
# TSA Fusion
aligned_fea = paddle.stack(aligned_fea, axis=1) # [B, N, C, H, W]
fea = None
if not self.w_TSA:
aligned_fea = aligned_fea.reshape([B, -1, H, W])
fea = self.TSAModule(aligned_fea) # [B, N, C, H, W]
#Reconstruct
out = self.reconstructor(fea)
out = self.upconv1(out)
out = self.pixel_shuffle(out)
out = self.Leaky_relu(out)
out = self.upconv2(out)
out = self.pixel_shuffle(out)
out = self.Leaky_relu(out)
out = self.HRconv(out)
out = self.Leaky_relu(out)
out = self.conv_last(out)
if self.HR_in:
base = x_center
else:
base = self.upsample(x_center)
out += base
return out
|
the-stack_0_15253 | import pytest
import json
import tempfile
import pyethereum.trie as trie
import logging
logging.basicConfig(level=logging.DEBUG, format='%(message)s')
logger = logging.getLogger()
def check_testdata(data_keys, expected_keys):
assert set(data_keys) == set(expected_keys), \
"test data changed, please adjust tests"
def load_tests():
try:
fixture = json.load(open('fixtures/trietestnextprev.json', 'r'))
except IOError:
raise IOError("Could not read trietests.json from fixtures",
"Make sure you did 'git submodule init'")
return fixture
def run_test(name):
logger.debug('testing %s', name)
t = trie.Trie(tempfile.mktemp())
data = load_tests()[name]
for k in data['in']:
logger.debug('updating with (%s, %s)', k, k)
t.update(k, k)
for point, prev, nxt in data['tests']:
assert nxt == (t.next(point) or '')
assert prev == (t.prev(point) or '')
def test_basic():
run_test('basic')
|
the-stack_0_15255 | from keras import backend as K
def matthews_correlation(y_true, y_pred):
'''Calculates the Matthews correlation coefficient measure for quality
of binary classification problems.
'''
y_pred_pos = K.round(K.clip(y_pred, 0, 1))
y_pred_neg = 1 - y_pred_pos
y_pos = K.round(K.clip(y_true, 0, 1))
y_neg = 1 - y_pos
tp = K.sum(y_pos * y_pred_pos)
tn = K.sum(y_neg * y_pred_neg)
fp = K.sum(y_neg * y_pred_pos)
fn = K.sum(y_pos * y_pred_neg)
numerator = (tp * tn - fp * fn)
denominator = K.sqrt((tp + fp) * (tp + fn) * (tn + fp) * (tn + fn))
return numerator / (denominator + K.epsilon())
def precision(y_true, y_pred):
'''Calculates the precision, a metric for multi-label classification of
how many selected items are relevant.
'''
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def recall(y_true, y_pred):
'''Calculates the recall, a metric for multi-label classification of
how many relevant items are selected.
'''
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
possible_positives = K.sum(K.round(K.clip(y_true, 0, 1)))
recall = true_positives / (possible_positives + K.epsilon())
return recall
def fbeta_score(y_true, y_pred, beta=1):
'''Calculates the F score, the weighted harmonic mean of precision and recall.
This is useful for multi-label classification, where input samples can be
classified as sets of labels. By only using accuracy (precision) a model
would achieve a perfect score by simply assigning every class to every
input. In order to avoid this, a metric should penalize incorrect class
assignments as well (recall). The F-beta score (ranged from 0.0 to 1.0)
computes this, as a weighted mean of the proportion of correct class
assignments vs. the proportion of incorrect class assignments.
With beta = 1, this is equivalent to a F-measure. With beta < 1, assigning
correct classes becomes more important, and with beta > 1 the metric is
instead weighted towards penalizing incorrect class assignments.
'''
if beta < 0:
raise ValueError('The lowest choosable beta is zero (only precision).')
# If there are no true positives, fix the F score at 0 like sklearn.
if K.sum(K.round(K.clip(y_true, 0, 1))) == 0:
return 0
p = precision(y_true, y_pred)
r = recall(y_true, y_pred)
bb = beta ** 2
fbeta_score = (1 + bb) * (p * r) / (bb * p + r + K.epsilon())
return fbeta_score
def fmeasure_acc(y_true, y_pred):
'''Calculates the f-measure, the harmonic mean of precision and recall.
'''
return fbeta_score(y_true, y_pred, beta=1)
|
the-stack_0_15256 | from __future__ import division, print_function, absolute_import
import functools
import numpy as np
import math
import sys
import types
import warnings
# trapz is a public function for scipy.integrate,
# even though it's actually a numpy function.
from numpy import trapz
from scipy.special import roots_legendre
from scipy.special import gammaln
from scipy._lib.six import xrange
__all__ = ['fixed_quad', 'quadrature', 'romberg', 'trapz', 'simps', 'romb',
'cumtrapz', 'newton_cotes']
# Make See Also linking for our local copy work properly
def _copy_func(f):
"""Based on http://stackoverflow.com/a/6528148/190597 (Glenn Maynard)"""
g = types.FunctionType(f.__code__, f.__globals__, name=f.__name__,
argdefs=f.__defaults__, closure=f.__closure__)
g = functools.update_wrapper(g, f)
g.__kwdefaults__ = f.__kwdefaults__
return g
trapz = _copy_func(trapz)
if sys.flags.optimize <= 1:
trapz.__doc__ = trapz.__doc__.replace('sum, cumsum', 'numpy.cumsum')
class AccuracyWarning(Warning):
pass
def _cached_roots_legendre(n):
"""
Cache roots_legendre results to speed up calls of the fixed_quad
function.
"""
if n in _cached_roots_legendre.cache:
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache[n] = roots_legendre(n)
return _cached_roots_legendre.cache[n]
_cached_roots_legendre.cache = dict()
def fixed_quad(func, a, b, args=(), n=5):
"""
Compute a definite integral using fixed-order Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature of
order `n`.
Parameters
----------
func : callable
A Python function or method to integrate (must accept vector inputs).
If integrating a vector-valued function, the returned array must have
shape ``(..., len(x))``.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function, if any.
n : int, optional
Order of quadrature integration. Default is 5.
Returns
-------
val : float
Gaussian quadrature approximation to the integral
none : None
Statically returned value of None
See Also
--------
quad : adaptive quadrature using QUADPACK
dblquad : double integrals
tplquad : triple integrals
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
romb : integrators for sampled data
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrator
odeint : ODE integrator
Examples
--------
>>> from scipy import integrate
>>> f = lambda x: x**8
>>> integrate.fixed_quad(f, 0.0, 1.0, n=4)
(0.1110884353741496, None)
>>> integrate.fixed_quad(f, 0.0, 1.0, n=5)
(0.11111111111111102, None)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=4)
(0.9999999771971152, None)
>>> integrate.fixed_quad(np.cos, 0.0, np.pi/2, n=5)
(1.000000000039565, None)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
x, w = _cached_roots_legendre(n)
x = np.real(x)
if np.isinf(a) or np.isinf(b):
raise ValueError("Gaussian quadrature is only available for "
"finite limits.")
y = (b-a)*(x+1)/2.0 + a
return (b-a)/2.0 * np.sum(w*func(y, *args), axis=-1), None
def vectorize1(func, args=(), vec_func=False):
"""Vectorize the call to a function.
This is an internal utility function used by `romberg` and
`quadrature` to create a vectorized version of a function.
If `vec_func` is True, the function `func` is assumed to take vector
arguments.
Parameters
----------
func : callable
User defined function.
args : tuple, optional
Extra arguments for the function.
vec_func : bool, optional
True if the function func takes vector arguments.
Returns
-------
vfunc : callable
A function that will take a vector argument and return the
result.
"""
if vec_func:
def vfunc(x):
return func(x, *args)
else:
def vfunc(x):
if np.isscalar(x):
return func(x, *args)
x = np.asarray(x)
# call with first point to get output type
y0 = func(x[0], *args)
n = len(x)
dtype = getattr(y0, 'dtype', type(y0))
output = np.empty((n,), dtype=dtype)
output[0] = y0
for i in xrange(1, n):
output[i] = func(x[i], *args)
return output
return vfunc
def quadrature(func, a, b, args=(), tol=1.49e-8, rtol=1.49e-8, maxiter=50,
vec_func=True, miniter=1):
"""
Compute a definite integral using fixed-tolerance Gaussian quadrature.
Integrate `func` from `a` to `b` using Gaussian quadrature
with absolute tolerance `tol`.
Parameters
----------
func : function
A Python function or method to integrate.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
args : tuple, optional
Extra arguments to pass to function.
tol, rtol : float, optional
Iteration stops when error between last two iterates is less than
`tol` OR the relative change is less than `rtol`.
maxiter : int, optional
Maximum order of Gaussian quadrature.
vec_func : bool, optional
True or False if func handles arrays as arguments (is
a "vector" function). Default is True.
miniter : int, optional
Minimum order of Gaussian quadrature.
Returns
-------
val : float
Gaussian quadrature approximation (within tolerance) to integral.
err : float
Difference between last two estimates of the integral.
See also
--------
romberg: adaptive Romberg quadrature
fixed_quad: fixed-order Gaussian quadrature
quad: adaptive quadrature using QUADPACK
dblquad: double integrals
tplquad: triple integrals
romb: integrator for sampled data
simps: integrator for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrator
odeint: ODE integrator
Examples
--------
>>> from scipy import integrate
>>> f = lambda x: x**8
>>> integrate.quadrature(f, 0.0, 1.0)
(0.11111111111111106, 4.163336342344337e-17)
>>> print(1/9.0) # analytical result
0.1111111111111111
>>> integrate.quadrature(np.cos, 0.0, np.pi/2)
(0.9999999999999536, 3.9611425250996035e-11)
>>> np.sin(np.pi/2)-np.sin(0) # analytical result
1.0
"""
if not isinstance(args, tuple):
args = (args,)
vfunc = vectorize1(func, args, vec_func=vec_func)
val = np.inf
err = np.inf
maxiter = max(miniter+1, maxiter)
for n in xrange(miniter, maxiter+1):
newval = fixed_quad(vfunc, a, b, (), n)[0]
err = abs(newval-val)
val = newval
if err < tol or err < rtol*abs(val):
break
else:
warnings.warn(
"maxiter (%d) exceeded. Latest difference = %e" % (maxiter, err),
AccuracyWarning)
return val, err
def tupleset(t, i, value):
l = list(t)
l[i] = value
return tuple(l)
def cumtrapz(y, x=None, dx=1.0, axis=-1, initial=None):
"""
Cumulatively integrate y(x) using the composite trapezoidal rule.
Parameters
----------
y : array_like
Values to integrate.
x : array_like, optional
The coordinate to integrate along. If None (default), use spacing `dx`
between consecutive elements in `y`.
dx : float, optional
Spacing between elements of `y`. Only used if `x` is None.
axis : int, optional
Specifies the axis to cumulate. Default is -1 (last axis).
initial : scalar, optional
If given, insert this value at the beginning of the returned result.
Typically this value should be 0. Default is None, which means no
value at ``x[0]`` is returned and `res` has one element less than `y`
along the axis of integration.
Returns
-------
res : ndarray
The result of cumulative integration of `y` along `axis`.
If `initial` is None, the shape is such that the axis of integration
has one less value than `y`. If `initial` is given, the shape is equal
to that of `y`.
See Also
--------
numpy.cumsum, numpy.cumprod
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
ode: ODE integrators
odeint: ODE integrators
Examples
--------
>>> from scipy import integrate
>>> import matplotlib.pyplot as plt
>>> x = np.linspace(-2, 2, num=20)
>>> y = x
>>> y_int = integrate.cumtrapz(y, x, initial=0)
>>> plt.plot(x, y_int, 'ro', x, y[0] + 0.5 * x**2, 'b-')
>>> plt.show()
"""
y = np.asarray(y)
if x is None:
d = dx
else:
x = np.asarray(x)
if x.ndim == 1:
d = np.diff(x)
# reshape to correct shape
shape = [1] * y.ndim
shape[axis] = -1
d = d.reshape(shape)
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
else:
d = np.diff(x, axis=axis)
if d.shape[axis] != y.shape[axis] - 1:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
nd = len(y.shape)
slice1 = tupleset((slice(None),)*nd, axis, slice(1, None))
slice2 = tupleset((slice(None),)*nd, axis, slice(None, -1))
res = np.cumsum(d * (y[slice1] + y[slice2]) / 2.0, axis=axis)
if initial is not None:
if not np.isscalar(initial):
raise ValueError("`initial` parameter should be a scalar.")
shape = list(res.shape)
shape[axis] = 1
res = np.concatenate([np.full(shape, initial, dtype=res.dtype), res],
axis=axis)
return res
def _basic_simps(y, start, stop, x, dx, axis):
nd = len(y.shape)
if start is None:
start = 0
step = 2
slice_all = (slice(None),)*nd
slice0 = tupleset(slice_all, axis, slice(start, stop, step))
slice1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
slice2 = tupleset(slice_all, axis, slice(start+2, stop+2, step))
if x is None: # Even spaced Simpson's rule.
result = np.sum(dx/3.0 * (y[slice0]+4*y[slice1]+y[slice2]),
axis=axis)
else:
# Account for possibly different spacings.
# Simpson's rule changes a bit.
h = np.diff(x, axis=axis)
sl0 = tupleset(slice_all, axis, slice(start, stop, step))
sl1 = tupleset(slice_all, axis, slice(start+1, stop+1, step))
h0 = h[sl0]
h1 = h[sl1]
hsum = h0 + h1
hprod = h0 * h1
h0divh1 = h0 / h1
tmp = hsum/6.0 * (y[slice0]*(2-1.0/h0divh1) +
y[slice1]*hsum*hsum/hprod +
y[slice2]*(2-h0divh1))
result = np.sum(tmp, axis=axis)
return result
def simps(y, x=None, dx=1, axis=-1, even='avg'):
"""
Integrate y(x) using samples along the given axis and the composite
Simpson's rule. If x is None, spacing of dx is assumed.
If there are an even number of samples, N, then there are an odd
number of intervals (N-1), but Simpson's rule requires an even number
of intervals. The parameter 'even' controls how this is handled.
Parameters
----------
y : array_like
Array to be integrated.
x : array_like, optional
If given, the points at which `y` is sampled.
dx : int, optional
Spacing of integration points along axis of `y`. Only used when
`x` is None. Default is 1.
axis : int, optional
Axis along which to integrate. Default is the last axis.
even : str {'avg', 'first', 'last'}, optional
'avg' : Average two results:1) use the first N-2 intervals with
a trapezoidal rule on the last interval and 2) use the last
N-2 intervals with a trapezoidal rule on the first interval.
'first' : Use Simpson's rule for the first N-2 intervals with
a trapezoidal rule on the last interval.
'last' : Use Simpson's rule for the last N-2 intervals with a
trapezoidal rule on the first interval.
See Also
--------
quad: adaptive quadrature using QUADPACK
romberg: adaptive Romberg quadrature
quadrature: adaptive Gaussian quadrature
fixed_quad: fixed-order Gaussian quadrature
dblquad: double integrals
tplquad: triple integrals
romb: integrators for sampled data
cumtrapz: cumulative integration for sampled data
ode: ODE integrators
odeint: ODE integrators
Notes
-----
For an odd number of samples that are equally spaced the result is
exact if the function is a polynomial of order 3 or less. If
the samples are not equally spaced, then the result is exact only
if the function is a polynomial of order 2 or less.
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(0, 10)
>>> y = np.arange(0, 10)
>>> integrate.simps(y, x)
40.5
>>> y = np.power(x, 3)
>>> integrate.simps(y, x)
1642.5
>>> integrate.quad(lambda x: x**3, 0, 9)[0]
1640.25
>>> integrate.simps(y, x, even='first')
1644.5
"""
y = np.asarray(y)
nd = len(y.shape)
N = y.shape[axis]
last_dx = dx
first_dx = dx
returnshape = 0
if x is not None:
x = np.asarray(x)
if len(x.shape) == 1:
shapex = [1] * nd
shapex[axis] = x.shape[0]
saveshape = x.shape
returnshape = 1
x = x.reshape(tuple(shapex))
elif len(x.shape) != len(y.shape):
raise ValueError("If given, shape of x must be 1-d or the "
"same as y.")
if x.shape[axis] != N:
raise ValueError("If given, length of x along axis must be the "
"same as y.")
if N % 2 == 0:
val = 0.0
result = 0.0
slice1 = (slice(None),)*nd
slice2 = (slice(None),)*nd
if even not in ['avg', 'last', 'first']:
raise ValueError("Parameter 'even' must be "
"'avg', 'last', or 'first'.")
# Compute using Simpson's rule on first intervals
if even in ['avg', 'first']:
slice1 = tupleset(slice1, axis, -1)
slice2 = tupleset(slice2, axis, -2)
if x is not None:
last_dx = x[slice1] - x[slice2]
val += 0.5*last_dx*(y[slice1]+y[slice2])
result = _basic_simps(y, 0, N-3, x, dx, axis)
# Compute using Simpson's rule on last set of intervals
if even in ['avg', 'last']:
slice1 = tupleset(slice1, axis, 0)
slice2 = tupleset(slice2, axis, 1)
if x is not None:
first_dx = x[tuple(slice2)] - x[tuple(slice1)]
val += 0.5*first_dx*(y[slice2]+y[slice1])
result += _basic_simps(y, 1, N-2, x, dx, axis)
if even == 'avg':
val /= 2.0
result /= 2.0
result = result + val
else:
result = _basic_simps(y, 0, N-2, x, dx, axis)
if returnshape:
x = x.reshape(saveshape)
return result
def romb(y, dx=1.0, axis=-1, show=False):
"""
Romberg integration using samples of a function.
Parameters
----------
y : array_like
A vector of ``2**k + 1`` equally-spaced samples of a function.
dx : float, optional
The sample spacing. Default is 1.
axis : int, optional
The axis along which to integrate. Default is -1 (last axis).
show : bool, optional
When `y` is a single 1-D array, then if this argument is True
print the table showing Richardson extrapolation from the
samples. Default is False.
Returns
-------
romb : ndarray
The integrated result for `axis`.
See also
--------
quad : adaptive quadrature using QUADPACK
romberg : adaptive Romberg quadrature
quadrature : adaptive Gaussian quadrature
fixed_quad : fixed-order Gaussian quadrature
dblquad : double integrals
tplquad : triple integrals
simps : integrators for sampled data
cumtrapz : cumulative integration for sampled data
ode : ODE integrators
odeint : ODE integrators
Examples
--------
>>> from scipy import integrate
>>> x = np.arange(10, 14.25, 0.25)
>>> y = np.arange(3, 12)
>>> integrate.romb(y)
56.0
>>> y = np.sin(np.power(x, 2.5))
>>> integrate.romb(y)
-0.742561336672229
>>> integrate.romb(y, show=True)
Richardson Extrapolation Table for Romberg Integration
====================================================================
-0.81576
4.63862 6.45674
-1.10581 -3.02062 -3.65245
-2.57379 -3.06311 -3.06595 -3.05664
-1.34093 -0.92997 -0.78776 -0.75160 -0.74256
====================================================================
-0.742561336672229
"""
y = np.asarray(y)
nd = len(y.shape)
Nsamps = y.shape[axis]
Ninterv = Nsamps-1
n = 1
k = 0
while n < Ninterv:
n <<= 1
k += 1
if n != Ninterv:
raise ValueError("Number of samples must be one plus a "
"non-negative power of 2.")
R = {}
slice_all = (slice(None),) * nd
slice0 = tupleset(slice_all, axis, 0)
slicem1 = tupleset(slice_all, axis, -1)
h = Ninterv * np.asarray(dx, dtype=float)
R[(0, 0)] = (y[slice0] + y[slicem1])/2.0*h
slice_R = slice_all
start = stop = step = Ninterv
for i in xrange(1, k+1):
start >>= 1
slice_R = tupleset(slice_R, axis, slice(start, stop, step))
step >>= 1
R[(i, 0)] = 0.5*(R[(i-1, 0)] + h*y[slice_R].sum(axis=axis))
for j in xrange(1, i+1):
prev = R[(i, j-1)]
R[(i, j)] = prev + (prev-R[(i-1, j-1)]) / ((1 << (2*j))-1)
h /= 2.0
if show:
if not np.isscalar(R[(0, 0)]):
print("*** Printing table only supported for integrals" +
" of a single data set.")
else:
try:
precis = show[0]
except (TypeError, IndexError):
precis = 5
try:
width = show[1]
except (TypeError, IndexError):
width = 8
formstr = "%%%d.%df" % (width, precis)
title = "Richardson Extrapolation Table for Romberg Integration"
print("", title.center(68), "=" * 68, sep="\n", end="\n")
for i in xrange(k+1):
for j in xrange(i+1):
print(formstr % R[(i, j)], end=" ")
print()
print("=" * 68)
print()
return R[(k, k)]
# Romberg quadratures for numeric integration.
#
# Written by Scott M. Ransom <[email protected]>
# last revision: 14 Nov 98
#
# Cosmetic changes by Konrad Hinsen <[email protected]>
# last revision: 1999-7-21
#
# Adapted to scipy by Travis Oliphant <[email protected]>
# last revision: Dec 2001
def _difftrap(function, interval, numtraps):
"""
Perform part of the trapezoidal rule to integrate a function.
Assume that we had called difftrap with all lower powers-of-2
starting with 1. Calling difftrap only returns the summation
of the new ordinates. It does _not_ multiply by the width
of the trapezoids. This must be performed by the caller.
'function' is the function to evaluate (must accept vector arguments).
'interval' is a sequence with lower and upper limits
of integration.
'numtraps' is the number of trapezoids to use (must be a
power-of-2).
"""
if numtraps <= 0:
raise ValueError("numtraps must be > 0 in difftrap().")
elif numtraps == 1:
return 0.5*(function(interval[0])+function(interval[1]))
else:
numtosum = numtraps/2
h = float(interval[1]-interval[0])/numtosum
lox = interval[0] + 0.5 * h
points = lox + h * np.arange(numtosum)
s = np.sum(function(points), axis=0)
return s
def _romberg_diff(b, c, k):
"""
Compute the differences for the Romberg quadrature corrections.
See Forman Acton's "Real Computing Made Real," p 143.
"""
tmp = 4.0**k
return (tmp * c - b)/(tmp - 1.0)
def _printresmat(function, interval, resmat):
# Print the Romberg result matrix.
i = j = 0
print('Romberg integration of', repr(function), end=' ')
print('from', interval)
print('')
print('%6s %9s %9s' % ('Steps', 'StepSize', 'Results'))
for i in xrange(len(resmat)):
print('%6d %9f' % (2**i, (interval[1]-interval[0])/(2.**i)), end=' ')
for j in xrange(i+1):
print('%9f' % (resmat[i][j]), end=' ')
print('')
print('')
print('The final result is', resmat[i][j], end=' ')
print('after', 2**(len(resmat)-1)+1, 'function evaluations.')
def romberg(function, a, b, args=(), tol=1.48e-8, rtol=1.48e-8, show=False,
divmax=10, vec_func=False):
"""
Romberg integration of a callable function or method.
Returns the integral of `function` (a function of one variable)
over the interval (`a`, `b`).
If `show` is 1, the triangular array of the intermediate results
will be printed. If `vec_func` is True (default is False), then
`function` is assumed to support vector arguments.
Parameters
----------
function : callable
Function to be integrated.
a : float
Lower limit of integration.
b : float
Upper limit of integration.
Returns
-------
results : float
Result of the integration.
Other Parameters
----------------
args : tuple, optional
Extra arguments to pass to function. Each element of `args` will
be passed as a single argument to `func`. Default is to pass no
extra arguments.
tol, rtol : float, optional
The desired absolute and relative tolerances. Defaults are 1.48e-8.
show : bool, optional
Whether to print the results. Default is False.
divmax : int, optional
Maximum order of extrapolation. Default is 10.
vec_func : bool, optional
Whether `func` handles arrays as arguments (i.e whether it is a
"vector" function). Default is False.
See Also
--------
fixed_quad : Fixed-order Gaussian quadrature.
quad : Adaptive quadrature using QUADPACK.
dblquad : Double integrals.
tplquad : Triple integrals.
romb : Integrators for sampled data.
simps : Integrators for sampled data.
cumtrapz : Cumulative integration for sampled data.
ode : ODE integrator.
odeint : ODE integrator.
References
----------
.. [1] 'Romberg's method' https://en.wikipedia.org/wiki/Romberg%27s_method
Examples
--------
Integrate a gaussian from 0 to 1 and compare to the error function.
>>> from scipy import integrate
>>> from scipy.special import erf
>>> gaussian = lambda x: 1/np.sqrt(np.pi) * np.exp(-x**2)
>>> result = integrate.romberg(gaussian, 0, 1, show=True)
Romberg integration of <function vfunc at ...> from [0, 1]
::
Steps StepSize Results
1 1.000000 0.385872
2 0.500000 0.412631 0.421551
4 0.250000 0.419184 0.421368 0.421356
8 0.125000 0.420810 0.421352 0.421350 0.421350
16 0.062500 0.421215 0.421350 0.421350 0.421350 0.421350
32 0.031250 0.421317 0.421350 0.421350 0.421350 0.421350 0.421350
The final result is 0.421350396475 after 33 function evaluations.
>>> print("%g %g" % (2*result, erf(1)))
0.842701 0.842701
"""
if np.isinf(a) or np.isinf(b):
raise ValueError("Romberg integration only available "
"for finite limits.")
vfunc = vectorize1(function, args, vec_func=vec_func)
n = 1
interval = [a, b]
intrange = b - a
ordsum = _difftrap(vfunc, interval, n)
result = intrange * ordsum
resmat = [[result]]
err = np.inf
last_row = resmat[0]
for i in xrange(1, divmax+1):
n *= 2
ordsum += _difftrap(vfunc, interval, n)
row = [intrange * ordsum / n]
for k in xrange(i):
row.append(_romberg_diff(last_row[k], row[k], k+1))
result = row[i]
lastresult = last_row[i-1]
if show:
resmat.append(row)
err = abs(result - lastresult)
if err < tol or err < rtol * abs(result):
break
last_row = row
else:
warnings.warn(
"divmax (%d) exceeded. Latest difference = %e" % (divmax, err),
AccuracyWarning)
if show:
_printresmat(vfunc, interval, resmat)
return result
# Coefficients for Netwon-Cotes quadrature
#
# These are the points being used
# to construct the local interpolating polynomial
# a are the weights for Newton-Cotes integration
# B is the error coefficient.
# error in these coefficients grows as N gets larger.
# or as samples are closer and closer together
# You can use maxima to find these rational coefficients
# for equally spaced data using the commands
# a(i,N) := integrate(product(r-j,j,0,i-1) * product(r-j,j,i+1,N),r,0,N) / ((N-i)! * i!) * (-1)^(N-i);
# Be(N) := N^(N+2)/(N+2)! * (N/(N+3) - sum((i/N)^(N+2)*a(i,N),i,0,N));
# Bo(N) := N^(N+1)/(N+1)! * (N/(N+2) - sum((i/N)^(N+1)*a(i,N),i,0,N));
# B(N) := (if (mod(N,2)=0) then Be(N) else Bo(N));
#
# pre-computed for equally-spaced weights
#
# num_a, den_a, int_a, num_B, den_B = _builtincoeffs[N]
#
# a = num_a*array(int_a)/den_a
# B = num_B*1.0 / den_B
#
# integrate(f(x),x,x_0,x_N) = dx*sum(a*f(x_i)) + B*(dx)^(2k+3) f^(2k+2)(x*)
# where k = N // 2
#
_builtincoeffs = {
1: (1,2,[1,1],-1,12),
2: (1,3,[1,4,1],-1,90),
3: (3,8,[1,3,3,1],-3,80),
4: (2,45,[7,32,12,32,7],-8,945),
5: (5,288,[19,75,50,50,75,19],-275,12096),
6: (1,140,[41,216,27,272,27,216,41],-9,1400),
7: (7,17280,[751,3577,1323,2989,2989,1323,3577,751],-8183,518400),
8: (4,14175,[989,5888,-928,10496,-4540,10496,-928,5888,989],
-2368,467775),
9: (9,89600,[2857,15741,1080,19344,5778,5778,19344,1080,
15741,2857], -4671, 394240),
10: (5,299376,[16067,106300,-48525,272400,-260550,427368,
-260550,272400,-48525,106300,16067],
-673175, 163459296),
11: (11,87091200,[2171465,13486539,-3237113, 25226685,-9595542,
15493566,15493566,-9595542,25226685,-3237113,
13486539,2171465], -2224234463, 237758976000),
12: (1, 5255250, [1364651,9903168,-7587864,35725120,-51491295,
87516288,-87797136,87516288,-51491295,35725120,
-7587864,9903168,1364651], -3012, 875875),
13: (13, 402361344000,[8181904909, 56280729661, -31268252574,
156074417954,-151659573325,206683437987,
-43111992612,-43111992612,206683437987,
-151659573325,156074417954,-31268252574,
56280729661,8181904909], -2639651053,
344881152000),
14: (7, 2501928000, [90241897,710986864,-770720657,3501442784,
-6625093363,12630121616,-16802270373,19534438464,
-16802270373,12630121616,-6625093363,3501442784,
-770720657,710986864,90241897], -3740727473,
1275983280000)
}
def newton_cotes(rn, equal=0):
r"""
Return weights and error coefficient for Newton-Cotes integration.
Suppose we have (N+1) samples of f at the positions
x_0, x_1, ..., x_N. Then an N-point Newton-Cotes formula for the
integral between x_0 and x_N is:
:math:`\int_{x_0}^{x_N} f(x)dx = \Delta x \sum_{i=0}^{N} a_i f(x_i)
+ B_N (\Delta x)^{N+2} f^{N+1} (\xi)`
where :math:`\xi \in [x_0,x_N]`
and :math:`\Delta x = \frac{x_N-x_0}{N}` is the average samples spacing.
If the samples are equally-spaced and N is even, then the error
term is :math:`B_N (\Delta x)^{N+3} f^{N+2}(\xi)`.
Parameters
----------
rn : int
The integer order for equally-spaced data or the relative positions of
the samples with the first sample at 0 and the last at N, where N+1 is
the length of `rn`. N is the order of the Newton-Cotes integration.
equal : int, optional
Set to 1 to enforce equally spaced data.
Returns
-------
an : ndarray
1-D array of weights to apply to the function at the provided sample
positions.
B : float
Error coefficient.
Examples
--------
Compute the integral of sin(x) in [0, :math:`\pi`]:
>>> from scipy.integrate import newton_cotes
>>> def f(x):
... return np.sin(x)
>>> a = 0
>>> b = np.pi
>>> exact = 2
>>> for N in [2, 4, 6, 8, 10]:
... x = np.linspace(a, b, N + 1)
... an, B = newton_cotes(N, 1)
... dx = (b - a) / N
... quad = dx * np.sum(an * f(x))
... error = abs(quad - exact)
... print('{:2d} {:10.9f} {:.5e}'.format(N, quad, error))
...
2 2.094395102 9.43951e-02
4 1.998570732 1.42927e-03
6 2.000017814 1.78136e-05
8 1.999999835 1.64725e-07
10 2.000000001 1.14677e-09
Notes
-----
Normally, the Newton-Cotes rules are used on smaller integration
regions and a composite rule is used to return the total integral.
"""
try:
N = len(rn)-1
if equal:
rn = np.arange(N+1)
elif np.all(np.diff(rn) == 1):
equal = 1
except Exception:
N = rn
rn = np.arange(N+1)
equal = 1
if equal and N in _builtincoeffs:
na, da, vi, nb, db = _builtincoeffs[N]
an = na * np.array(vi, dtype=float) / da
return an, float(nb)/db
if (rn[0] != 0) or (rn[-1] != N):
raise ValueError("The sample positions must start at 0"
" and end at N")
yi = rn / float(N)
ti = 2 * yi - 1
nvec = np.arange(N+1)
C = ti ** nvec[:, np.newaxis]
Cinv = np.linalg.inv(C)
# improve precision of result
for i in range(2):
Cinv = 2*Cinv - Cinv.dot(C).dot(Cinv)
vec = 2.0 / (nvec[::2]+1)
ai = Cinv[:, ::2].dot(vec) * (N / 2.)
if (N % 2 == 0) and equal:
BN = N/(N+3.)
power = N+2
else:
BN = N/(N+2.)
power = N+1
BN = BN - np.dot(yi**power, ai)
p1 = power+1
fac = power*math.log(N) - gammaln(p1)
fac = math.exp(fac)
return ai, BN*fac
|
the-stack_0_15258 | '''
NERYS
a universal product monitor
Current Module: Other Sites
Usage:
NERYS will monitor specified sites for keywords and sends a Discord alert
when a page has a specified keyword. This can be used to monitor any site
on a product release date to automatically detect when a product has been
uploaded. Useful when monitoring hundreds of sites for shops in different
timezones.
Complete:
- find all products on Shopify site by keyword
- send discord notifications
- monitor for new products
- optimization for Shopify to return product checkout links by size
- find all products on other sites by keyword
- attempt to get product page links for universal sites
Left To Do:
- monitor for Shopify restocks
- monitor for restocks on other sites
-- find sold out by keyword
-- find sizes by keyword
-- find countdown timer by keyword
- detect cloudflare
- get product picture for other sites
- optimization for footsites
Credits:
Niveen Jegatheeswaran - Main Dev - https://github.com/snivyn/
kyb3r - Discord Embed - https://github.com/kyb3r/
'''
import requests
from bs4 import BeautifulSoup as soup
import requests
from log import log as log
import time
from datetime import datetime
import random
import sqlite3
from bs4 import BeautifulSoup as soup
from discord_hooks import Webhook
from threading import Thread
class Product():
def __init__(self, title, link, stock, keyword):
'''
(str, str, bool, str) -> None
Creates an instance of the Product class.
'''
# Setup product attributes
self.title = title
self.stock = stock
self.link = link
self.keyword = keyword
def read_from_txt(path):
'''
(None) -> list of str
Loads up all sites from the sitelist.txt file in the root directory.
Returns the sites as a list
'''
# Initialize variables
raw_lines = []
lines = []
# Load data from the txt file
try:
f = open(path, "r")
raw_lines = f.readlines()
f.close()
# Raise an error if the file couldn't be found
except:
log('e', "Couldn't locate <" + path + ">.")
raise FileNotFound()
if(len(raw_lines) == 0):
raise NoDataLoaded()
# Parse the data
for line in raw_lines:
lines.append(line.strip("\n"))
# Return the data
return lines
def add_to_db(product):
'''
(Product) -> bool
Given a product <product>, the product is added to a database <products.db>
and whether or not a Discord alert should be sent out is returned. Discord
alerts are sent out based on whether or not a new product matching
keywords is found.
'''
# Initialize variables
title = product.title
stock = str(product.stock)
link = product.link
keyword = product.keyword
alert = False
# Create database
conn = sqlite3.connect('products.db')
c = conn.cursor()
c.execute("""CREATE TABLE IF NOT EXISTS products(title TEXT, link TEXT UNIQUE, stock TEXT, keywords TEXT)""")
# Add product to database if it's unique
try:
c.execute("""INSERT INTO products (title, link, stock, keywords) VALUES (?, ?, ?, ?)""", (title, link, stock, keyword))
log('s', "Found new product with keyword " + keyword + ". Link = " + link)
alert = True
except:
# Product already exists
pass
#log('i', "Product at URL <" + link + "> already exists in the database.")
# Close connection to the database
conn.commit()
c.close()
conn.close()
# Return whether or not it's a new product
return alert
def send_embed(product):
'''
(Product) -> None
Sends a discord alert based on info provided.
'''
url = 'https://discord.com/api/webhooks/728820147346997278/ocPnHwKHaeCLeq1N1UJ7nAmO1qvat3sxr2G5xv72TubAGZWmhajDzknK9CfR6ZpvxA2i'
embed = Webhook(url, color=123123)
embed.set_author(name='NERYS', icon='https://static.zerochan.net/Daenerys.Targaryen.full.2190849.jpg')
embed.set_desc("Found product based on keyword " + product.keyword)
embed.add_field(name="Link", value=product.link)
embed.set_footer(text='NERYS by @snivynGOD', icon='https://static.zerochan.net/Daenerys.Targaryen.full.2190849.jpg', ts=True)
embed.post()
def monitor(link, keywords):
'''
(str, list of str) -> None
Given a URL <link> and keywords <keywords>, the URL is scanned and alerts
are sent via Discord when a new product containing a keyword is detected.
'''
log('i', "Checking site <" + link + ">...")
# Parse the site from the link
pos_https = link.find("https://")
pos_http = link.find("http://")
if(pos_https == 0):
site = link[8:]
end = site.find("/")
if(end != -1):
site = site[:end]
site = "https://" + site
else:
site = link[7:]
end = site.find("/")
if(end != -1):
site = site[:end]
site = "http://" + site
# Get all the links on the "New Arrivals" page
try:
r = requests.get(link, timeout=5, verify=False)
except:
log('e', "Connection to URL <" + link + "> failed. Retrying...")
time.sleep(5)
try:
r = requests.get(link, timeout=8, verify=False)
except:
log('e', "Connection to URL <" + link + "> failed.")
return
page = soup(r.text, "html.parser")
raw_links = page.findAll("a")
hrefs = []
for raw_link in raw_links:
try:
hrefs.append(raw_link["href"])
except:
pass
# Check for links matching keywords
for href in hrefs:
found = False
for keyword in keywords:
if(keyword.upper() in href.upper()):
found = True
if("http" in href):
product_page = href
else:
product_page = site + href
product = Product("N/A", product_page, True, keyword)
alert = add_to_db(product)
if(alert):
send_embed(product)
if(__name__ == "__main__"):
# Ignore insecure messages
requests.packages.urllib3.disable_warnings()
# Keywords (seperated by -)
keywords = [
"jordan",
"dunk",
"pharrell",
"free-throw-line",
"kendrick",
"tinker",
"game-royal",
"yeezy",
"human-race",
"big-bang",
"dont-trip",
"kung-fu-kenny",
"playstation",
"valentine",
"ovo-air-jordan",
"ovo-jordan",
"air-jordan-1",
"wotherspoon"
]
# Load sites from file
sites = read_from_txt("other-sites.txt")
# Start monitoring sites
while(True):
threads = []
for site in sites:
t = Thread(target=monitor, args=(site, keywords))
threads.append(t)
t.start()
time.sleep(2) # 2 second delay before going to the next site
|
the-stack_0_15262 | # positioner_window.py, window to control a positioning instrument
# Reinier Heeres, <[email protected]>, 2008
#
# This program is free software; you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation; either version 2 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program; if not, write to the Free Software
# Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA
import gtk
import gobject
import logging
import qtclient as qt
from gettext import gettext as _L
from lib.gui.qtwindow import QTWindow
from lib.gui.qttable import QTTable
from lib.gui.dropdowns import InstrumentDropdown
from lib.gui.misc import pack_hbox, pack_vbox
from lib.misc import sign
class PositionControls(gtk.Frame):
__gsignals__ = {
'direction-clicked': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
'direction-released': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
'max-speed-changed': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
'min-speed-changed': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
'accel-changed': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
'decel-changed': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
'stop-request': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE, [])
}
def __init__(self, ins):
gtk.Frame.__init__(self)
self._config = qt.config
self.set_label(_L('Controls'))
self._table = gtk.Table(4, 9)
self._button_up = gtk.Button('/\\')
self._button_up.connect('pressed',
lambda x: self._direction_clicked(True, 0, 1, 0))
self._button_up.connect('released',
lambda x: self._direction_clicked(False, 0, 1, 0))
self._table.attach(self._button_up, 1, 2, 0, 1,
gtk.EXPAND | gtk.FILL, 0)
self._button_down = gtk.Button('\\/')
self._button_down.connect('pressed',
lambda x: self._direction_clicked(True, 0, -1, 0))
self._button_down.connect('released',
lambda x: self._direction_clicked(False, 0, -1, 0))
self._table.attach(self._button_down, 1, 2, 2, 3,
gtk.EXPAND | gtk.FILL, 0)
self._button_left = gtk.Button('<')
self._button_left.connect('pressed',
lambda x: self._direction_clicked(True, -1, 0, 0))
self._button_left.connect('released',
lambda x: self._direction_clicked(False, -1, 0, 0))
self._table.attach(self._button_left, 0, 1, 1, 2,
gtk.EXPAND | gtk.FILL, 0)
self._button_right = gtk.Button('>')
self._button_right.connect('pressed',
lambda x: self._direction_clicked(True, 1, 0, 0))
self._button_right.connect('released',
lambda x: self._direction_clicked(False, 1, 0, 0))
self._table.attach(self._button_right, 2, 3, 1, 2,
gtk.EXPAND | gtk.FILL, 0)
self._button_upleft = gtk.Button('\\')
self._button_upleft.connect('pressed',
lambda x: self._direction_clicked(True, -1, 1, 0))
self._button_upleft.connect('released',
lambda x: self._direction_clicked(False, -1, 1, 0))
self._table.attach(self._button_upleft, 0, 1, 0, 1,
gtk.EXPAND | gtk.FILL, 0)
self._button_upright = gtk.Button('/')
self._button_upright.connect('pressed',
lambda x: self._direction_clicked(True, 1, 1, 0))
self._button_upright.connect('released',
lambda x: self._direction_clicked(False, 1, 1, 0))
self._table.attach(self._button_upright, 2, 3, 0, 1,
gtk.EXPAND | gtk.FILL, 0)
self._button_downleft = gtk.Button('/')
self._button_downleft.connect('pressed',
lambda x: self._direction_clicked(True, -1, -1, 0))
self._button_downleft.connect('released',
lambda x: self._direction_clicked(False, -1, -1, 0))
self._table.attach(self._button_downleft, 0, 1, 2, 3,
gtk.EXPAND | gtk.FILL, 0)
self._button_downright = gtk.Button('\\')
self._button_downright.connect('pressed',
lambda x: self._direction_clicked(True, 1, -1, 0))
self._button_downright.connect('released',
lambda x: self._direction_clicked(False, 1, -1, 0))
self._table.attach(self._button_downright, 2, 3, 2, 3,
gtk.EXPAND | gtk.FILL, 0)
self._button_z_up = gtk.Button('/\\')
self._button_z_up.connect('pressed',
lambda x: self._direction_clicked(True, 0, 0, 1))
self._button_z_up.connect('released',
lambda x: self._direction_clicked(False, 0, 0, 1))
self._table.attach(self._button_z_up, 4, 5, 0, 1,
gtk.EXPAND | gtk.FILL, 0)
self._button_z_down = gtk.Button('\/')
self._button_z_down.connect('pressed',
lambda x: self._direction_clicked(True, 0, 0, -1))
self._button_z_down.connect('released',
lambda x: self._direction_clicked(False, 0, 0, -1))
self._table.attach(self._button_z_down, 4, 5, 2, 3,
gtk.EXPAND | gtk.FILL, 0)
self._max_speed = gtk.VScale()
self._max_speed.set_size_request(100, 90)
self._max_speed.set_range(1, 500)
self._max_speed.set_inverted(True)
self._max_speed.connect('value-changed', self._max_speed_changed_cb)
self._max_speed.set_digits(1)
self._table.attach(gtk.Label(_L('Max speed')), 5, 6, 0, 1, 0, 0)
self._table.attach(self._max_speed, 5, 6, 1, 3, 0, 0)
self._min_speed = gtk.VScale()
self._min_speed.set_size_request(100, 90)
self._min_speed.set_range(1, 500)
self._min_speed.set_inverted(True)
self._min_speed.connect('value-changed', self._min_speed_changed_cb)
self._min_speed.set_digits(1)
self._table.attach(gtk.Label(_L('Min speed')), 6, 7, 0, 1, 0, 0)
self._table.attach(self._min_speed, 6, 7, 1, 3, 0, 0)
self._accel = gtk.VScale()
self._accel.set_size_request(100, 90)
self._accel.set_range(1.1, 4.0)
self._accel.set_inverted(True)
self._accel.connect('value-changed', self._accel_changed_cb)
self._accel.set_digits(2)
self._table.attach(gtk.Label(_L('Acceleration')), 7, 8, 0, 1, 0, 0)
self._table.attach(self._accel, 7, 8, 1, 3, 0, 0)
self._decel = gtk.VScale()
self._decel.set_size_request(100, 90)
self._decel.set_range(1.1, 4.0)
self._decel.set_inverted(True)
self._decel.connect('value-changed', self._decel_changed_cb)
self._decel.set_digits(2)
self._table.attach(gtk.Label(_L('Deceleration')), 8, 9, 0, 1, 0, 0)
self._table.attach(self._decel, 8, 9, 1, 3, 0, 0)
self._stop_but = gtk.Button('Stop')
self._stop_but.connect('clicked', self._stop_clicked_cb)
self._table.attach(self._stop_but, 0, 3, 3, 4, gtk.FILL, 0)
self.connect('key-press-event', self._key_pressed_cb)
self.connect('key-release-event', self._key_released_cb)
self.add(self._table)
self._inhibit_save = False
self.set_instrument(ins)
def _load_settings(self):
if self._instrument is None:
return
insname = self._instrument.get_name()
cfg = self._config
self._inhibit_save = True
self._max_speed.set_value(cfg.get('positioner_%s_max_speed' % insname, 250))
self._min_speed.set_value(cfg.get('positioner_%s_min_speed' % insname, 50))
self._accel.set_value(cfg.get('positioner_%s_accel' % insname, 1.5))
self._decel.set_value(cfg.get('positioner_%s_decel' % insname, 2.0))
self._inhibit_save = False
def _save_settings(self):
if self._instrument is None or self._inhibit_save:
return
insname = self._instrument.get_name()
cfg = self._config
cfg.set('positioner_%s_max_speed' % insname, self._max_speed.get_value())
cfg.set('positioner_%s_min_speed' % insname, self._min_speed.get_value())
cfg.set('positioner_%s_accel' % insname, self._accel.get_value())
cfg.set('positioner_%s_decel' % insname, self._decel.get_value())
def set_instrument(self, ins):
self._instrument = ins
if self._instrument is not None:
self._channels = ins.get_channels()
else:
self._channels = 0
bval = False
if self._channels > 0:
bval = True
self._button_left.set_sensitive(bval)
self._button_right.set_sensitive(bval)
self._button_upleft.set_sensitive(bval)
self._button_upright.set_sensitive(bval)
self._button_downleft.set_sensitive(bval)
self._button_downright.set_sensitive(bval)
self._stop_but.set_sensitive(bval)
bval = False
if self._channels > 1:
bval = True
self._button_up.set_sensitive(bval)
self._button_down.set_sensitive(bval)
bval = False
if self._channels > 2:
bval = True
self._button_z_up.set_sensitive(bval)
self._button_z_down.set_sensitive(bval)
self._load_settings()
def _direction_clicked(self, clicked, x, y, z):
coord = []
if self._channels > 0:
coord.append(x)
if self._channels > 1:
coord.append(y)
if self._channels > 2:
coord.append(z)
if clicked:
self.emit('direction-clicked', coord)
else:
self.emit('direction-released', coord)
def _key_pressed_cb(self, sender, key):
pass
def _key_released_cb(self, sender, key):
pass
def _max_speed_changed_cb(self, sender):
self._save_settings()
self.emit('max-speed-changed', sender.get_value())
def _min_speed_changed_cb(self, sender):
self._save_settings()
self.emit('min-speed-changed', sender.get_value())
def get_max_speed(self):
return self._max_speed.get_value()
def get_min_speed(self):
return self._min_speed.get_value()
def get_accel(self):
return self._accel.get_value()
def get_decel(self):
return self._decel.get_value()
def _accel_changed_cb(self, sender):
self._save_settings()
self.emit('accel-changed', sender.get_value())
def _decel_changed_cb(self, sender):
self._save_settings()
self.emit('decel-changed', sender.get_value())
def _stop_clicked_cb(self, sender):
self.emit('stop-request')
class PositionBookmarks(gtk.Frame):
__gsignals__ = {
'go-request': (gobject.SIGNAL_RUN_FIRST,
gobject.TYPE_NONE,
([gobject.TYPE_PYOBJECT])),
}
def __init__(self, ins):
gtk.Frame.__init__(self)
self.set_label(_L('Bookmarks'))
self._add_button = gtk.Button(_L('Add'))
self._add_button.connect('clicked', self._add_clicked_cb)
self._goxy_button = gtk.Button(_L('Goto XY'))
self._goxy_button.connect('clicked', self._go_clicked_cb, 2)
self._goxyz_button = gtk.Button(_L('Goto XYZ'))
self._goxyz_button.connect('clicked', self._go_clicked_cb, 3)
self._remove_button = gtk.Button(_L('Remove'))
self._remove_button.connect('clicked', self._remove_clicked_cb)
self._bookmark_data = {}
self._tree_model = gtk.ListStore(str, str)
self._tree_view = QTTable([
('Label', {}),
('Position', {})
], self._tree_model)
self._config = qt.config
self._load_bookmarks()
self._label_entry = gtk.Entry()
self.set_instrument(ins)
vbox = pack_vbox([
pack_hbox([
gtk.Label(_L('Label')),
self._label_entry], True, False),
pack_hbox([
self._add_button,
self._goxy_button,
self._goxyz_button,
self._remove_button], True, True),
self._tree_view
], False, False)
vbox.set_border_width(4)
self.add(vbox)
def set_instrument(self, ins):
self._ins = ins
bval = False
if ins is not None:
bval = True
self._add_button.set_sensitive(bval)
bval = False
if ins is not None and ins.get_channels() > 1:
bval = True
self._goxy_button.set_sensitive(bval)
bval = False
if ins is not None and ins.get_channels() > 2:
bval = True
self._goxyz_button.set_sensitive(bval)
def _add_clicked_cb(self, widget):
pos = self._ins.get_position()
posstr = self._ins.format_parameter_value('position', pos)
label = self._label_entry.get_text()
index = "%s%s" % (label, posstr)
if index in self._bookmark_data:
return
self._tree_model.append((label, posstr))
self._bookmark_data[index] = pos
self._save_bookmarks()
def _remove_clicked_cb(self, widget):
(model, rows) = self._tree_view.get_selection().get_selected_rows()
for row in rows:
it = model.get_iter(row)
rowdata = model.get(it, 0, 1)
index = "%s%s" % (rowdata[0], rowdata[1])
if index in self._bookmark_data:
del self._bookmark_data[index]
model.remove(it)
self._save_bookmarks()
def _go_clicked_cb(self, widget, nchannels):
(model, rows) = self._tree_view.get_selection().get_selected_rows()
if len(rows) != 1:
logging.warning('Select 1 row only!')
row = rows[0]
it = model.get_iter(row)
label = model.get_value(it, 0)
posstr = model.get_value(it, 1)
index = "%s%s" % (label, posstr)
pos = self._bookmark_data[index]
pos = pos[:nchannels]
self.emit('go-request', pos)
def _load_bookmarks(self):
for row in self._config.get('positioner_bookmarks', []):
it = self._tree_model.append(row[:2])
index = "%s%s" % (row[0], row[1])
self._bookmark_data[index] = row[2]
def _save_bookmarks(self):
data = []
for row in self._tree_model:
index = "%s%s" % (row[0], row[1])
data.append((row[0], row[1], self._bookmark_data[index]))
self._config.set('positioner_bookmarks', data)
class PositionerWindow(QTWindow):
def __init__(self):
QTWindow.__init__(self, 'positioner', 'Positioner')
self.connect("delete-event", self._delete_event_cb)
self._moving = False
self._controls = PositionControls(None)
self._controls.connect('direction-clicked', self._direction_clicked_cb)
self._controls.connect('direction-released', self._direction_released_cb)
self._controls.connect('max-speed-changed', self._max_speed_changed_cb)
self._controls.connect('min-speed-changed', self._min_speed_changed_cb)
self._controls.connect('accel-changed', self._accel_changed_cb)
self._controls.connect('decel-changed', self._decel_changed_cb)
self._controls.connect('stop-request', self._stop_request_cb)
self._max_speed = self._controls.get_max_speed()
self._min_speed = self._controls.get_min_speed()
self._accel_factor = self._controls.get_accel()
self._decel_factor = self._controls.get_decel()
self._bookmarks = PositionBookmarks(None)
self._bookmarks.connect('go-request', self._go_request)
self._ins_combo = InstrumentDropdown(types=['positioner'])
self._ins_combo.connect('changed', self._instrument_changed_cb)
self._instrument = None
poslabel = gtk.Label()
poslabel.set_markup('<big>%s</big>' % _L('Position'))
self._position_label = gtk.Label()
self._update_position()
vbox = pack_vbox([
self._ins_combo,
pack_hbox([
poslabel,
self._position_label], True, True),
self._controls,
self._bookmarks], False, False)
# Speed control variables
self._direction_down = (0, 0, 0)
self._step_done = False
self._speed = [0, 0, 0]
self._timer_hid = None
self._counter = 0
self.add(vbox)
vbox.show_all()
def _delete_event_cb(self, widget, event, data=None):
self.hide()
return True
def _instrument_changed_cb(self, widget):
ins = self._ins_combo.get_instrument()
self._instrument = ins
self._controls.set_instrument(ins)
self._bookmarks.set_instrument(ins)
self._update_position()
def _go_request(self, sender, position):
self._instrument.move_abs(position)
def _direction_clicked_cb(self, sender, direction):
self._direction_down = direction
self._step_done = False
if self._timer_hid is None:
self._timer_hid = gobject.timeout_add(100, self._position_timer)
def _direction_released_cb(self, sender, direction):
if not self._step_done and self._speed == [0, 0, 0]:
if self._timer_hid is not None:
gobject.source_remove(self._timer_hid)
self._timer_hid = None
self._do_single_step()
self._direction_down = (0, 0, 0)
def _do_single_step(self):
for i in range(len(self._direction_down)):
if self._direction_down[i] != 0:
self._instrument.step(i, sign(self._direction_down[i]))
def _update_speed(self):
for i in range(len(self._direction_down)):
if self._direction_down[i] != 0:
if self._speed[i] == 0:
self._speed[i] = self._direction_down[i] * self._min_speed
else:
self._speed[i] = self._speed[i] * self._accel_factor
if abs(self._speed[i]) >= self._max_speed:
self._speed[i] = sign(self._speed[i]) * self._max_speed
else:
self._speed[i] = self._speed[i] / self._decel_factor
if abs(self._speed[i]) < self._min_speed:
self._speed[i] = 0
if self._speed != [0, 0, 0]:
self._step_done = True
self._instrument.set_speed(self._speed)
if not self._moving:
self._instrument.start()
self._moving = True
return True
else:
self._instrument.stop()
self._moving = False
return False
return ret
def _update_position(self):
if self._instrument is not None and self._instrument.has_parameter('position'):
pos = self._instrument.get_position()
posstr = self._instrument.format_parameter_value('position', pos)
else:
posstr = 'None'
self._position_label.set_markup('<big>%s</big>' % posstr)
def _position_timer(self):
self._counter += 1
ret = self._update_speed()
if not ret:
self._timer_hid = None
if (self._counter % 5) == 0 or not ret:
self._update_position()
return ret
def _max_speed_changed_cb(self, sender, val):
self._max_speed = val
def _min_speed_changed_cb(self, sender, val):
self._min_speed = val
def _accel_changed_cb(self, sender, val):
self._accel_factor = val
def _decel_changed_cb(self, sender, val):
self._decel_factor = val
def _stop_request_cb(self, sender):
self._instrument.stop()
Window = PositionerWindow
|
the-stack_0_15263 | """
Module containing class which computes fits of data using linear models through
analytical calculations. It has functions to output the signal estimate (with
errors), parameter covariance, and more. It can accept the noise level either
as standard deviations of channels (if uncorrelated) or as a covariance matrix
in the form of a
`distpy.util.SparseSquareBlockDiagonalMatrix.SparseSquareBlockDiagonalMatrix`.
**File**: $PYLINEX/pylinex/fitter/Fitter.py
**Author**: Keith Tauscher
**Date**: 25 May 2021
"""
from __future__ import division
import numpy as np
import numpy.linalg as la
import matplotlib.pyplot as pl
from distpy import GaussianDistribution, ChiSquaredDistribution
from ..util import Savable, create_hdf5_dataset, psi_squared
from .TrainingSetIterator import TrainingSetIterator
from .BaseFitter import BaseFitter
try:
# this runs with no issues in python 2 but raises error in python 3
basestring
except:
# this try/except allows for python 2/3 compatible string type checking
basestring = str
class Fitter(BaseFitter, Savable):
"""
Class which computes fits of data using linear models through analytical
calculations. It has functions to output the signal estimate (with errors),
parameter covariance, and more. It can accept the noise level either as
standard deviations of channels (if uncorrelated) or as a covariance matrix
in the form of a
`distpy.util.SparseSquareBlockDiagonalMatrix.SparseSquareBlockDiagonalMatrix`.
"""
def __init__(self, basis_sum, data, error=None, **priors):
"""
Initializes a new `Fitter` object using the given inputs. The
likelihood used by the fit is of the form \\(\\mathcal{L}\
(\\boldsymbol{x}) \\propto \\exp{\\left\\{-\\frac{1}{2}\
[\\boldsymbol{y}-(\\boldsymbol{G}\\boldsymbol{x} +\
\\boldsymbol{\\mu})]^T\\boldsymbol{C}^{-1}[\\boldsymbol{y}-\
(\\boldsymbol{G}\\boldsymbol{x}+\\boldsymbol{\\mu})]\\right\\}}\\) and
the prior used is \\(\\pi(\\boldsymbol{x}) \\propto\
\\exp{\\left\\{-\\frac{1}{2}(\\boldsymbol{x}-\\boldsymbol{\\nu})^T\
\\boldsymbol{\\Lambda}^{-1}(\\boldsymbol{x}-\\boldsymbol{\\nu})\
\\right\\}}\\). The posterior distribution explored is
\\(p(\\boldsymbol{x})=\
\\mathcal{L}(\\boldsymbol{x})\\times\\pi(\\boldsymbol{x})\\).
Parameters
----------
basis_sum : `pylinex.basis.BasisSum.BasisSum` or\
`pylinex.basis.Basis.Basis`
the basis used to model the data, represented in equations by
\\(\\boldsymbol{G}\\) alongside the translation component
\\(\\boldsymbol{\\mu}\\). Two types of inputs are accepted:
- If `basis_sum` is a `pylinex.basis.BasisSum.BasisSum`, then it is
assumed to have constituent bases for each modeled component
alongside `pylinex.expander.Expander.Expander` objects determining
how those components enter into the data
- If `basis_sum` is a `pylinex.basis.Basis.Basis`, then it is
assumed that this single basis represents the only component that
needs to be modeled. The
`pylinex.fitter.BaseFitter.BaseFitter.basis_sum` property will be
set to a `pylinex.basis.BasisSum.BasisSum` object with this
`pylinex.basis.Basis.Basis` as its only component, labeled with the
string name `"sole"`
data : numpy.ndarray
the data to fit, represented in equations by \\(\\boldsymbol{y}\\)
- if `data` is 1D, then its length should be the same as the
(expanded) vectors in `basis_sum`, i.e. the number of rows of
\\(\\boldsymbol{G}\\), `nchannels`
- if `data` is 2D, then it should have shape `(ncurves, nchannels)`
and it will be interpreted as a list of data vectors to fit
independently
error : numpy.ndarray or\
`distpy.util.SparseSquareBlockDiagonalMatrix.SparseSquareBlockDiagonalMatrix`
the noise level of the data that determines the covariance matrix,
represented in equations by \\(\\boldsymbol{C}\\):
- if `error` is a 1D `numpy.ndarray`, it should have the same
length as the (expanded) vectors in `basis_sum`, i.e. the number of
rows of \\(\\boldsymbol{G}\\), `nchannels` and should only contain
positive numbers. In this case, \\(\\boldsymbol{C}\\) is a diagonal
matrix whose elements are the squares of the values in `error`
- if `error` is a
`distpy.util.SparseSquareBlockDiagonalMatrix.SparseSquareBlockDiagonalMatrix`,
then it is assumed to represent a block diagonal
\\(\\boldsymbol{C}\\) directly
priors : dict
keyword arguments where the keys are exactly the names of the
`basis_sum` with `'_prior'` appended to them and the values are
`distpy.distribution.GaussianDistribution.GaussianDistribution`
objects. Priors are optional and can be included or excluded for
any given component. If `basis_sum` was given as a
`pylinex.basis.Basis.Basis`, then `priors` should either be empty
or a dictionary of the form
`{'sole_prior': gaussian_distribution}`. The means and inverse
covariances of all priors are combined into a full parameter prior
mean and full parameter prior inverse covariance, represented in
equations by \\(\\boldsymbol{\\nu}\\) and
\\(\\boldsymbol{\\Lambda}^{-1}\\), respectively. Having no prior is
equivalent to having an infinitely wide prior, i.e. a prior with an
inverse covariance matrix of \\(\\boldsymbol{0}\\)
"""
self.basis_sum = basis_sum
self.priors = priors
self.data = data
self.error = error
@property
def prior_significance(self):
"""
The prior significance, represented mathematically as
\\(\\boldsymbol{\\nu}^T\\boldsymbol{\\Lambda}^{-1}\\boldsymbol{\\nu}.
"""
if not hasattr(self, '_prior_significance'):
self._prior_significance = np.dot(self.prior_mean,\
np.dot(self.prior_inverse_covariance, self.prior_mean))
return self._prior_significance
@property
def log_prior_covariance_determinant(self):
"""
The logarithm (base e) of the determinant of the prior
parameter covariance matrix, \\(|\\boldsymbol{\\Lambda}|\\). Note that
if a given prior is not given, it is simply not used here (to avoid
getting 0 or \\(\\infty\\) as the determinant).
"""
if not hasattr(self, '_log_prior_covariance_determinant'):
self._log_prior_covariance_determinant = 0
for key in self.priors:
this_prior_covariance = self.priors[key].covariance.A
self._log_prior_covariance_determinant +=\
la.slogdet(this_prior_covariance)[1]
return self._log_prior_covariance_determinant
@property
def data_significance(self):
"""
The data significance, represented mathematically as
\\((\\boldsymbol{y}-\\boldsymbol{\\mu})^T\\boldsymbol{C}^{-1}\
(\\boldsymbol{y} - \\boldsymbol{\\mu})\\). It is either a single number
(if `Fitter.multiple_data_curves` is True) or a 1D `numpy.ndarray` (if
`Fitter.multiple_data_curves` is False)
"""
if not hasattr(self, '_data_significance'):
if self.multiple_data_curves:
self._data_significance =\
np.sum(self.weighted_translated_data ** 2, axis=1)
else:
self._data_significance =\
np.dot(self.weighted_translated_data,\
self.weighted_translated_data)
return self._data_significance
@property
def num_parameters(self):
"""
The number of parameters of the fit. This is the same as the
`num_basis_vectors` property of `Fitter.basis_sum`.
"""
return self.basis_sum.num_basis_vectors
@property
def posterior_covariance_times_prior_inverse_covariance(self):
"""
The posterior covariance multiplied on the right by the prior inverse
covariance, represented mathematically as
\\(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1}\\). This is a matrix
measure of the effect of the data on the distribution of parameters
(i.e. it approaches the zero matrix if the data constrains parameters
much more powerfully than the prior and approaches the identity matrix
if the prior constrains parameters much more powerfully than the data).
"""
if not hasattr(self,\
'_posterior_covariance_times_prior_inverse_covariance'):
self._posterior_covariance_times_prior_inverse_covariance =\
np.dot(self.parameter_covariance,\
self.prior_inverse_covariance)
return self._posterior_covariance_times_prior_inverse_covariance
@property
def model_complexity_mean_to_peak_logL(self):
"""
A measure of the model complexity that is computed by taking the
difference between the mean and peak values of the log likelihood. If
this `Fitter` has no priors, then this property will always simply
return the number of parameters, \\(p\\). It is represented
mathematically as
\\(p-\\text{tr}(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})\\).
"""
if not hasattr(self, '_model_complexity_mean_to_peak_logL'):
self._model_complexity_mean_to_peak_logL = self.num_parameters
if self.has_priors:
self._model_complexity_mean_to_peak_logL -= np.trace(\
self.posterior_covariance_times_prior_inverse_covariance)
return self._model_complexity_mean_to_peak_logL
@property
def model_complexity_logL_variance(self):
"""
A measure of the model complexity which is computed by finding the
variance of the log likelihood function. It is represented
mathematically as \\(p+2\\ \\boldsymbol{\\delta}\\boldsymbol{C}^{-1}\
\\boldsymbol{G}\\boldsymbol{S}\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\
\\boldsymbol{\\delta} + \\text{tr}(\\boldsymbol{S}\
\\boldsymbol{\\Lambda}^{-1}\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})\
-2\\ \\text{tr}(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})\\).
"""
if not hasattr(self, '_model_complexity_logL_variance'):
self._model_complexity_logL_variance = self.num_parameters
bias_term = np.dot(self.weighted_basis, self.weighted_bias.T).T
if self.multiple_data_curves:
covariance_times_bias_term =\
np.dot(bias_term, self.parameter_covariance)
bias_term =\
np.sum(bias_term * covariance_times_bias_term, axis=1)
del covariance_times_bias_term
else:
bias_term = np.dot(bias_term,\
np.dot(self.parameter_covariance, bias_term))
self._model_complexity_logL_variance += (2 * bias_term)
if self.has_priors:
self._model_complexity_logL_variance += np.trace(np.dot(\
self.posterior_covariance_times_prior_inverse_covariance,\
self.posterior_covariance_times_prior_inverse_covariance))
self._model_complexity_logL_variance -= (2 * np.trace(\
self.posterior_covariance_times_prior_inverse_covariance))
return self._model_complexity_logL_variance
@property
def basis_dot_products(self):
"""
The dot products between the `pylinex.basis.Basis.Basis` objects
underlying the `Fitter.basis_sum` this object stores. See the
`pylinex.basis.Basis.Basis.dot` method for details on this calculation.
"""
if not hasattr(self, '_basis_dot_products'):
if self.non_diagonal_noise_covariance:
raise NotImplementedError("Basis dot products are not yet " +\
"implemented for non diagonal noise covariance matrices.")
else:
self._basis_dot_products =\
self.basis_sum.basis_dot_products(error=self.error)
return self._basis_dot_products
@property
def basis_dot_product_sum(self):
"""
The sum of all off diagonal elements of the upper triangle of
`Fitter.basis_dot_products`.
"""
if not hasattr(self, '_basis_dot_product_sum'):
self._basis_dot_product_sum = np.sum(self.basis_dot_products)
self._basis_dot_product_sum = self._basis_dot_product_sum -\
np.trace(self.basis_dot_products)
self._basis_dot_product_sum = self._basis_dot_product_sum / 2.
return self._basis_dot_product_sum
@property
def parameter_inverse_covariance(self):
"""
The inverse of the posterior distribution's covariance matrix. This is
represented mathematically as \\(\\boldsymbol{S}^{-1}=\
\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G} +\
\\boldsymbol{\\Lambda}^{-1}\\).
"""
if not hasattr(self, '_parameter_inverse_covariance'):
self._parameter_inverse_covariance = self.basis_overlap_matrix
if self.has_priors:
self._parameter_inverse_covariance =\
self._parameter_inverse_covariance +\
self.prior_inverse_covariance
return self._parameter_inverse_covariance
@property
def likelihood_parameter_covariance(self):
"""
The parameter covariance implied only by the likelihood, represented
mathematically as
\\((\\boldsymbol{G}^T\\boldsymbol{C}\\boldsymbol{G})^{-1}\\).
"""
if not hasattr(self, '_likelihood_parameter_covariance'):
if self.has_priors:
self._likelihood_parameter_covariance =\
la.inv(self.basis_overlap_matrix)
else:
self._likelihood_parameter_covariance =\
self.parameter_covariance
return self._likelihood_parameter_covariance
@property
def likelihood_parameter_mean(self):
"""
Property storing the parameter mean implied by the likelihood (i.e.
disregarding priors). It is represented mathematically as
\\((\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G})^{-1}\
\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\
(\\boldsymbol{y}-\\boldsymbol{\\mu})\\).
"""
if not hasattr(self, '_likelihood_parameter_mean'):
if self.has_priors:
self._likelihood_parameter_mean =\
np.dot(self.likelihood_parameter_covariance,\
np.dot(self.weighted_basis,\
self.weighted_translated_data.T)).T
else:
self._likelihood_parameter_mean = self.parameter_mean
return self._likelihood_parameter_mean
@property
def likelihood_channel_mean(self):
"""
Property storing the channel mean associated with the likelihood
parameter mean (i.e. the result if there are no priors). It is
represented mathematically as \\(\\boldsymbol{G}\
(\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G})^{-1}\
\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\
(\\boldsymbol{y}-\\boldsymbol{\\mu}) + \\boldsymbol{\\mu}\\).
"""
if not hasattr(self, '_likelihood_channel_mean'):
if self.has_priors:
self._likelihood_channel_mean = self.basis_sum.translation +\
np.dot(self.basis_sum.basis.T,\
self.likelihood_parameter_mean.T).T
else:
self._likelihood_channel_mean = self.channel_mean
return self._likelihood_channel_mean
@property
def likelihood_channel_bias(self):
"""
Property storing the channel-space bias associated with the likelihood
parameter mean (i.e. the result if there are no priors). It is
represented mathematically as \\(\\boldsymbol{\\delta}_{\\text{NP}}=\
\\left[\\boldsymbol{I}-\\boldsymbol{G}\
(\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G})^{-1}\
\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\right]\
(\\boldsymbol{y}-\\boldsymbol{\\mu})\\).
"""
if not hasattr(self, '_likelihood_channel_bias'):
if self.has_priors:
self._likelihood_channel_bias =\
self.data - self.likelihood_channel_mean
else:
self._likelihood_channel_bias = self.channel_bias
return self._likelihood_channel_bias
@property
def likelihood_weighted_bias(self):
"""
The likelihood channel bias weighted by the error, represented
mathematically as
\\(\\boldsymbol{C}^{-1/2}\\boldsymbol{\\delta}_{\\text{NP}}\\).
"""
if not hasattr(self, '_likelihood_weighted_bias'):
if self.has_priors:
self._likelihood_weighted_bias =\
self.weight(self.likelihood_channel_bias, -1)
else:
self._likelihood_weighted_bias = self.weighted_bias
return self._likelihood_weighted_bias
@property
def likelihood_bias_statistic(self):
"""
The maximum value of the loglikelihood, represented mathematically as
\\(\\boldsymbol{\\delta}_{\\text{NP}}^T \\boldsymbol{C}^{-1}\
\\boldsymbol{\\delta}_{\\text{NP}}\\). It is equal to -2 times the peak
value of the loglikelihood.
"""
if not hasattr(self, '_likelihood_bias_statistic'):
if self.has_priors:
if self.multiple_data_curves:
self._likelihood_bias_statistic =\
np.sum(self.likelihood_weighted_bias ** 2, axis=1)
else:
self._likelihood_bias_statistic = np.dot(\
self.likelihood_weighted_bias,\
self.likelihood_weighted_bias)
else:
self._likelihood_bias_statistic = self.bias_statistic
return self._likelihood_bias_statistic
@property
def degrees_of_freedom(self):
"""
The difference between the number of channels and the number of
parameters.
"""
if not hasattr(self, '_degrees_of_freedom'):
self._degrees_of_freedom = self.num_channels - self.num_parameters
return self._degrees_of_freedom
@property
def normalized_likelihood_bias_statistic(self):
"""
The normalized version of the likelihood bias statistic. This is a
statistic that should be close to 1 which measures how well the total
data is fit and is represented mathematically as
\\(\\frac{1}{\\text{dof}}\\boldsymbol{\\delta}_{\\text{NP}}^T\
\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}_{\\text{NP}}\\), where
\\(\\text{dof}\\) and is the number of degrees of freedom.
"""
if not hasattr(self, '_normalized_likelihood_bias_statistic'):
self._normalized_likelihood_bias_statistic =\
self.likelihood_bias_statistic / self.degrees_of_freedom
return self._normalized_likelihood_bias_statistic
@property
def chi_squared(self):
"""
The (non-reduced) chi-squared value(s) of the fit(s) in this `Fitter`,
represented mathematically as
\\(\\boldsymbol{\\delta}^T\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
"""
return self.bias_statistic
@property
def reduced_chi_squared(self):
"""
The reduced chi-squared value(s) of the fit(s) in this `Fitter`,
represented mathematically as \\(\\frac{1}{\\text{dof}}\
\\boldsymbol{\\delta}^T\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
"""
return self.normalized_bias_statistic
@property
def reduced_chi_squared_expected_mean(self):
"""
The expected mean of `Fitter.reduced_chi_squared`, represented
mathematically as \\(\\frac{1}{\\text{dof}}[\\text{dof} +\
\\text{tr}(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})]\\).
"""
if not hasattr(self, '_reduced_chi_squared_expected_mean'):
if self.has_priors:
mean = np.sum(np.diag(\
self.posterior_covariance_times_prior_inverse_covariance))
else:
mean = 0
self._reduced_chi_squared_expected_mean =\
(mean + self.degrees_of_freedom) / self.degrees_of_freedom
return self._reduced_chi_squared_expected_mean
@property
def reduced_chi_squared_expected_variance(self):
"""
The expected variance of `Fitter.reduced_chi_squared`, represented
mathematically as \\(\\frac{2}{\\text{dof}^2}[\\text{dof} +\
\\text{tr}(\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1}\
\\boldsymbol{S}\\boldsymbol{\\Lambda}^{-1})]\\).
"""
if not hasattr(self, '_reduced_chi_squared_expected_variance'):
if self.has_priors:
variance =\
self.posterior_covariance_times_prior_inverse_covariance
variance = np.sum(variance * variance.T)
else:
variance = 0
self._reduced_chi_squared_expected_variance =\
(2 * (variance + self.degrees_of_freedom)) /\
(self.degrees_of_freedom ** 2)
return self._reduced_chi_squared_expected_variance
@property
def reduced_chi_squared_expected_distribution(self):
"""
A `distpy.distribution.GaussianDistribution.GaussianDistribution` with
mean given by `Fitter.reduced_chi_squared_expected_mean` and variance
given by `Fitter.reduced_chi_squared_expected_variance`.
"""
if not hasattr(self, '_reduced_chi_squared_expected_distribution'):
if self.has_priors:
self._reduced_chi_squared_expected_distribution =\
GaussianDistribution(\
self.reduced_chi_squared_expected_mean,\
self.reduced_chi_squared_expected_variance)
else:
self._reduced_chi_squared_expected_distribution =\
ChiSquaredDistribution(self.degrees_of_freedom,\
reduced=True)
return self._reduced_chi_squared_expected_distribution
@property
def psi_squared(self):
"""
Property storing the reduced psi-squared values of the fit(s) in this
Fitter.
"""
if not hasattr(self, '_psi_squared'):
if self.multiple_data_curves:
self._psi_squared =\
np.array([psi_squared(bias, error=None)\
for bias in self.weighted_bias])
else:
self._psi_squared = psi_squared(self.weighted_bias, error=None)
return self._psi_squared
@property
def maximum_loglikelihood(self):
"""
The maximum value of the Gaussian loglikelihood (when the normalizing
constant outside the exponential is left off).
"""
if not hasattr(self, '_maximum_loglikelihood'):
self._maximum_loglikelihood =\
(-(self.likelihood_bias_statistic / 2.))
return self._maximum_loglikelihood
@property
def parameter_covariance(self):
"""
The covariance matrix of the posterior parameter distribution,
represented mathematically as \\(\\boldsymbol{S}=(\\boldsymbol{G}^T\
\\boldsymbol{C}^{-1}\\boldsymbol{G} +\
\\boldsymbol{\\Lambda}^{-1})^{-1}\\).
"""
if not hasattr(self, '_parameter_covariance'):
self._parameter_covariance =\
la.inv(self.parameter_inverse_covariance)
return self._parameter_covariance
@property
def log_parameter_covariance_determinant(self):
"""
The logarithm (base e) of the determinant of the posterior parameter
covariance matrix, represented mathematically as
\\(\\Vert\\boldsymbol{S}\\Vert\\).
"""
if not hasattr(self, '_log_parameter_covariance_determinant'):
self._log_parameter_covariance_determinant =\
la.slogdet(self.parameter_covariance)[1]
return self._log_parameter_covariance_determinant
@property
def log_parameter_covariance_determinant_ratio(self):
"""
The logarithm (base e) of the ratio of the determinant of the posterior
parameter covariance matrix to the determinant of the prior parameter
covariance matrix. This can be thought of as the log of the ratio of
the hypervolume of the 1 sigma posterior ellipse to the hypervolume of
the 1 sigma prior ellipse. It is represented mathematically as
\\(\\ln{\\left(\\frac{\\Vert\\boldsymbol{S}\\Vert}{\
\\Vert\\boldsymbol{\\Lambda}\\Vert}\\right)}\\).
"""
if not hasattr(self, '_log_parameter_covariance_determinant_ratio'):
self._log_parameter_covariance_determinant_ratio =\
self.log_parameter_covariance_determinant -\
self.log_prior_covariance_determinant
return self._log_parameter_covariance_determinant_ratio
@property
def channel_error(self):
"""
The error on the estimate of the full data in channel space,
represented mathematically as
\\(\\boldsymbol{G}\\boldsymbol{S}\\boldsymbol{G}^T\\).
"""
if not hasattr(self, '_channel_error'):
SAT = np.dot(self.parameter_covariance, self.basis_sum.basis)
self._channel_error =\
np.sqrt(np.einsum('ab,ab->b', self.basis_sum.basis, SAT))
return self._channel_error
@property
def channel_RMS(self):
"""
The RMS error on the estimate of the full data in channel space.
"""
if not hasattr(self, '_channel_RMS'):
self._channel_RMS =\
np.sqrt(np.mean(np.power(self.channel_error, 2)))
return self._channel_RMS
@property
def parameter_mean(self):
"""
The posterior mean parameter vector(s). It is represented
mathematically as
\\(\\boldsymbol{\\gamma} =\
(\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\\boldsymbol{G} +\
\\boldsymbol{\\Lambda}^{-1})[\\boldsymbol{G}^T\\boldsymbol{C}^{-1}\
(\\boldsymbol{y}-\\boldsymbol{\\mu}) +\
\\boldsymbol{\\Lambda}^{-1}\\boldsymbol{\\nu}]\\) and is store in a
`numpy.ndarray` of shape of the result is either `(nparams,)` or
`(ncurves, nparams)`.
"""
if not hasattr(self, '_parameter_mean'):
self._parameter_mean =\
np.dot(self.weighted_basis, self.weighted_translated_data.T).T
if self.has_priors:
if self.multiple_data_curves:
self._parameter_mean = self._parameter_mean +\
self.prior_inverse_covariance_times_mean[np.newaxis,:]
else:
self._parameter_mean = self._parameter_mean +\
self.prior_inverse_covariance_times_mean
self._parameter_mean =\
np.dot(self.parameter_covariance, self._parameter_mean.T).T
return self._parameter_mean
@property
def parameter_distribution(self):
"""
Property storing a
`distpy.distribution.GaussianDistribution.GaussianDistribution`
representing a distribution with the mean and covariance stored in
`Fitter.parameter_mean` and `Fitter.parameter_covariance`,
respectively.
"""
if not hasattr(self, '_parameter_distribution'):
if self.multiple_data_curves:
raise ValueError("parameter_distribution only makes sense " +\
"if the Fitter has only one data curve.")
else:
self._parameter_distribution = GaussianDistribution(\
self.parameter_mean, self.parameter_covariance)
return self._parameter_distribution
@property
def posterior_significance(self):
"""
The posterior significance, represented mathematically as
\\(\\boldsymbol{z}^T \\boldsymbol{S}^{-1} \\boldsymbol{z}\\),
where \\(z\\) is `Fitter.parameter_mean`.
"""
if not hasattr(self, '_posterior_significance'):
if self.multiple_data_curves:
inverse_covariance_times_mean = np.dot(self.parameter_mean,\
self.parameter_inverse_covariance)
self._posterior_significance = np.sum(\
self.parameter_mean * inverse_covariance_times_mean,\
axis=1)
del inverse_covariance_times_mean
else:
self._posterior_significance =\
np.dot(self.parameter_mean,\
np.dot(self.parameter_inverse_covariance,\
self.parameter_mean))
return self._posterior_significance
@property
def channel_mean(self):
"""
The posterior estimate of the modeled data in channel space.
"""
if not hasattr(self, '_channel_mean'):
self._channel_mean = self.basis_sum.translation +\
np.dot(self.basis_sum.basis.T, self.parameter_mean.T).T
return self._channel_mean
@property
def channel_bias(self):
"""
The bias of the estimate of the data (i.e. the posterior estimate of
the data minus the data), represented mathematically as
\\(\\boldsymbol{\\delta}\\).
"""
if not hasattr(self, '_channel_bias'):
self._channel_bias = self.data - self.channel_mean
return self._channel_bias
@property
def channel_bias_RMS(self):
"""
The RMS of `Fitter.channel_bias`.
"""
if not hasattr(self, '_channel_bias_RMS'):
if self.multiple_data_curves:
self._channel_bias_RMS = np.sqrt(\
np.sum(self.channel_bias ** 2, axis=1) / self.num_channels)
else:
self._channel_bias_RMS =\
np.sqrt(np.dot(self.channel_bias, self.channel_bias) /\
self.num_channels)
return self._channel_bias_RMS
@property
def weighted_bias(self):
"""
The posterior channel bias weighted down by the errors, represented
mathematically as \\(\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
"""
if not hasattr(self, '_weighted_bias'):
self._weighted_bias = self.weight(self.channel_bias, -1)
return self._weighted_bias
@property
def bias_statistic(self):
"""
A statistic known as the "bias statistic", represented mathematically
as
\\(\\boldsymbol{\\delta}^T\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
It is a measure of the bias of the full model being fit. It should have
a \\(\\chi^2(N)\\) distribution where \\(N\\) is the number of degrees
of freedom.
"""
if not hasattr(self, '_bias_statistic'):
if self.multiple_data_curves:
self._bias_statistic = np.sum(self.weighted_bias ** 2, axis=1)
else:
self._bias_statistic =\
np.dot(self.weighted_bias, self.weighted_bias)
return self._bias_statistic
@property
def loglikelihood_at_posterior_maximum(self):
"""
The value of the Gaussian loglikelihood (without the normalizing factor
outside the exponential) at the maximum of the posterior distribution.
"""
if not hasattr(self, '_loglikelihood_at_posterior_maximum'):
self._loglikelihood_at_posterior_maximum =\
(-(self.bias_statistic / 2.))
return self._loglikelihood_at_posterior_maximum
@property
def normalized_bias_statistic(self):
"""
The reduced chi-squared value(s) of the fit(s) in this `Fitter`,
represented mathematically as \\(\\frac{1}{\\text{dof}}\
\\boldsymbol{\\delta}^T\\boldsymbol{C}^{-1}\\boldsymbol{\\delta}\\).
"""
if not hasattr(self, '_normalized_bias_statistic'):
self._normalized_bias_statistic =\
self.bias_statistic / self.degrees_of_freedom
return self._normalized_bias_statistic
@property
def likelihood_significance_difference(self):
"""
The likelihood covariance part of the significance difference, equal to
\\(\\boldsymbol{\\gamma}^T\\boldsymbol{C}\\boldsymbol{\\gamma}-\
\\boldsymbol{y}^T\\boldsymbol{C}^{-1}\\boldsymbol{y}\\) where
\\(\\boldsymbol{\\gamma}\\) is `Fitter.parameter_mean`.
"""
if not hasattr(self, '_likelihood_significance_difference'):
mean_sum = self.weight(self.channel_mean + self.data -\
(2 * self.basis_sum.translation), -1)
mean_difference = (self.channel_mean - self.data) / error_to_divide
if self.multiple_data_curves:
self._likelihood_significance_difference =\
np.sum(mean_sum * mean_difference, axis=1)
else:
self._likelihood_significance_difference =\
np.dot(mean_sum, mean_difference)
return self._likelihood_significance_difference
@property
def prior_significance_difference(self):
"""
Property storing the prior covariance part of the significance
difference. This is equal to (\\boldsymbol{\\gamma}^T\
\\boldsymbol{\\Lambda}^{-1} \\boldsymbol{\\gamma} -\
\\boldsymbol{\\nu}^T \\boldsymbol{\\Lambda}^{-1} \\boldsymbol{\\nu}\\).
"""
if not hasattr(self, '_prior_significance_difference'):
if self.multiple_data_curves:
self._prior_significance_difference =\
np.zeros(self.data.shape[:-1])
else:
self._prior_significance_difference = 0
for name in self.names:
key = '{!s}_prior'.format(name)
if key in self.priors:
prior = self.priors[key]
prior_mean = prior.internal_mean.A[0]
prior_inverse_covariance = prior.inverse_covariance.A
posterior_mean = self.subbasis_parameter_mean(name=name)
mean_sum = posterior_mean + prior_mean
mean_difference = posterior_mean - prior_mean
if self.multiple_data_curves:
this_term =\
np.dot(mean_difference, prior_inverse_covariance)
this_term = np.sum(this_term * mean_sum, axis=1)
else:
this_term = np.dot(mean_sum,\
np.dot(prior_inverse_covariance, mean_difference))
self._prior_significance_difference =\
self._prior_significance_difference + this_term
return self._prior_significance_difference
@property
def significance_difference(self):
"""
The difference between the posterior significance and the sum of the
data significance and prior significance. It is a term in the log
evidence and is given by
\\(\\boldsymbol{\\gamma}^T\\boldsymbol{S}^{-1}\\boldsymbol{\\gamma} -\
\\boldsymbol{y}^T\\boldsymbol{C}^{-1}\\boldsymbol{y} -\
\\boldsymbol{\\nu}^T\\boldsymbol{\\Lambda}^{-1}\\boldsymbol{\\nu}\\).
"""
if not hasattr(self, '_significance_difference'):
self._significance_difference =\
self.likelihood_significance_difference +\
self.prior_significance_difference
return self._significance_difference
@property
def log_evidence(self):
"""
The natural logarithm of the evidence (a.k.a. marginal likelihood) of
this fit. The evidence is the integral over parameter space of the
product of the likelihood and the prior and is often very large.
"""
if not hasattr(self, '_log_evidence'):
log_evidence = (self.log_parameter_covariance_determinant_ratio +\
self.significance_difference) / 2.
if self.has_all_priors:
# only constants added below, ignore if numerical problems
log_evidence = log_evidence -\
((self.num_channels * np.log(2 * np.pi)) / 2.)
if self.non_diagonal_noise_covariance:
log_evidence = log_evidence +\
(self.error.sign_and_log_abs_determinant()[1]) / 2
else:
log_evidence = log_evidence + np.sum(np.log(self.error))
self._log_evidence = log_evidence
return self._log_evidence
@property
def log_evidence_per_data_channel(self):
"""
`Fitter.log_evidence` divided by the number of channels.
"""
if not hasattr(self, '_log_evidence_per_data_channel'):
self._log_evidence_per_data_channel =\
self.log_evidence / self.num_channels
return self._log_evidence_per_data_channel
@property
def evidence(self):
"""
The evidence (a.k.a. marginal likelihood) of this fit. Beware: the
evidence is often extremely large in magnitude, with log evidences
sometimes approaching +-10^7. In these cases, the evidence will end up
NaN.
"""
if not hasattr(self, '_evidence'):
self._evidence = np.exp(self.log_evidence)
return self._evidence
@property
def evidence_per_data_channel(self):
"""
The factor by which each data channel multiplies the Bayesian evidence
on average (more precisely, the geometric mean of these numbers).
"""
if not hasattr(self, '_evidence_per_data_channel'):
self._evidence_per_data_channel =\
np.exp(self.log_evidence_per_data_channel)
return self._evidence_per_data_channel
@property
def bayesian_information_criterion(self):
"""
The Bayesian Information Criterion (BIC) which is essentially the same
as the bias statistic except it includes information about the
complexity of the model. It is \\(\\boldsymbol{\\delta}^T\
\\boldsymbol{C}^{-1}\\boldsymbol{\\delta} + p\\ln{N}\\), where \\(p\\)
is the number of parameters and \\(N\\) is the number of data channels.
"""
if not hasattr(self, '_bayesian_information_criterion'):
self._bayesian_information_criterion =\
self.likelihood_bias_statistic +\
(self.num_parameters * np.log(self.num_channels))
return self._bayesian_information_criterion
@property
def BIC(self):
"""
Alias for `Fitter.bayesian_information_criterion`.
"""
return self.bayesian_information_criterion
@property
def akaike_information_criterion(self):
"""
An information criterion given by \\(\\boldsymbol{\\delta}^T\
\\boldsymbol{C}^{-1}\\boldsymbol{\\delta} + 2p\\), where \\(p\\) is the
number of parameters.
"""
if not hasattr(self, '_akaike_information_criterion'):
self._akaike_information_criterion =\
self.likelihood_bias_statistic + (2 * self.num_parameters)
return self._akaike_information_criterion
@property
def AIC(self):
"""
Alias for `Fitter.akaike_information_criterion`.
"""
return self.akaike_information_criterion
######################## TODO documentation below this line has't been updated!
@property
def deviance_information_criterion(self):
"""
An information criterion given by -4 ln(L_max) + <2 ln(L)> where L is
the likelihood, <> denotes averaging over the posterior, and L_max is
the maximum likelihood.
"""
if not hasattr(self, '_deviance_information_criterion'):
self._deviance_information_criterion =\
self.likelihood_bias_statistic +\
(2 * self.model_complexity_mean_to_peak_logL)
return self._deviance_information_criterion
@property
def DIC(self):
"""
Alias for deviance_information_criterion property.
"""
return self.deviance_information_criterion
@property
def deviance_information_criterion_logL_variance(self):
"""
Version of the Deviance Information Criterion (DIC) which estimates the
model complexity through computation of the variance of the log
likelihood (with respect to the posterior).
"""
if not hasattr(self, '_deviance_information_criterion_logL_variance'):
self._deviance_information_criterion_logL_variance =\
self.likelihood_bias_statistic +\
self.model_complexity_logL_variance
return self._deviance_information_criterion_logL_variance
@property
def DIC2(self):
"""
Alias for the deviance_information_criterion_logL_variance property.
"""
return self.deviance_information_criterion_logL_variance
@property
def posterior_prior_mean_difference(self):
"""
Property storing the difference between the posterior parameter mean
and the prior parameter mean.
"""
if not hasattr(self, '_posterior_prior_mean_difference'):
if self.multiple_data_curves:
self._posterior_prior_mean_difference =\
self.parameter_mean - self.prior_mean[np.newaxis,:]
else:
self._posterior_prior_mean_difference =\
self.parameter_mean - self.prior_mean
return self._posterior_prior_mean_difference
@property
def bayesian_predictive_information_criterion(self):
"""
The Bayesian Predictive Information Criterion (BPIC), a statistic which
gives relatives goodness of fit values.
"""
if not hasattr(self, '_bayesian_predictive_information_criterion'):
self._bayesian_predictive_information_criterion =\
self.num_parameters + self.bias_statistic
if self.has_priors: # TODO
self._bayesian_predictive_information_criterion -= np.trace(\
self.posterior_covariance_times_prior_inverse_covariance)
term_v1 = np.dot(\
self.posterior_covariance_times_prior_inverse_covariance,\
self.posterior_prior_mean_difference.T).T
term_v2 = np.dot(self.prior_inverse_covariance,\
self.posterior_prior_mean_difference.T).T +\
(2 * np.dot(self.weighted_basis, self.weighted_bias.T).T)
if self.multiple_data_curves:
self._bayesian_predictive_information_criterion +=\
(np.sum(term_v1 * term_v2, axis=1) / self.num_channels)
else:
self._bayesian_predictive_information_criterion +=\
(np.dot(term_v1, term_v2) / self.num_channels)
if self.non_diagonal_noise_covariance:
doubly_weighted_basis =\
self.weight(self.weight(self.basis_sum.basis, -1), -1)
self._bayesian_predictive_information_criterion +=\
(2 * np.einsum('ij,ik,jk,k', self.parameter_covariance,\
doubly_weighted_basis, doubly_weighted_basis,\
self.channel_bias ** 2))
else:
weighted_error = self.channel_error / self.error
if self.multiple_data_curves:
weighted_error = weighted_error[np.newaxis,:]
to_sum = ((weighted_error * self.weighted_bias) ** 2)
self._bayesian_predictive_information_criterion +=\
(2 * np.sum(to_sum, axis=-1))
del to_sum
return self._bayesian_predictive_information_criterion
@property
def BPIC(self):
"""
Alias for `Fitter.bayesian_predictive_information_criterion`.
"""
return self.bayesian_predictive_information_criterion
def subbasis_log_separation_evidence(self, name=None):
"""
Calculates the subbasis_log_separation evidence per degree of freedom.
This is the same as the evidence with the log covariance determinant
ratio replaced by the log covariance determinant ratio for the given
subbasis (normalized by the degrees of freedom).
name: string identifying subbasis under concern
per_channel: if True, normalizes the log_separation_evidence by
dividing by the nuiber of data channels.
returns: single float number
"""
if not hasattr(self, '_subbasis_log_separation_evidences'):
self._subbasis_log_separation_evidences = {}
if name not in self._subbasis_log_separation_evidences:
self._subbasis_log_separation_evidences[name] =\
(self.log_evidence -\
(self.log_parameter_covariance_determinant_ratio / 2.) +\
(self.subbasis_log_parameter_covariance_determinant_ratio(\
name=name) / 2.)) / self.degrees_of_freedom
return self._subbasis_log_separation_evidences[name]
def subbasis_separation_evidence_per_degree_of_freedom(self, name=None):
"""
Finds the subbasis separation evidence per degree of freedom.
name: string identifying subbasis under concern
returns: single non-negative float number
"""
if not hasattr(self,\
'_subbasis_separation_evidences_per_degree_of_freedom'):
self._subbasis_separation_evidences_per_degree_of_freedom = {}
if name not in\
self._subbasis_separation_evidences_per_degree_of_freedom:
self._subbasis_separation_evidences_per_degree_of_freedom[name] =\
np.exp(self.subbasis_log_separation_evidence(name=name))
return self._subbasis_separation_evidences_per_degree_of_freedom[name]
@property
def log_separation_evidence(self):
"""
Property storing the logarithm (base e) of the separation evidence, a
version of the evidence where the log of the ratio of the determinants
of the posterior to prior covariance matrices is replaced by the sum
over all subbases of such logs of ratios.
"""
if not hasattr(self, '_log_separation_evidence'):
self._log_separation_evidence = self.log_evidence -\
(self.log_parameter_covariance_determinant_ratio / 2.) +\
(self.subbasis_log_parameter_covariance_determinant_ratios_sum\
/ 2.)
return self._log_separation_evidence
@property
def log_separation_evidence_per_data_channel(self):
"""
Property storing the log_separation_evidence divided by the number of
data channels. For more information, see the log_separation_evidence
property.
"""
if not hasattr(self, '_log_separation_evidence_per_data_channel'):
self._log_separation_evidence_per_data_channel =\
self.log_separation_evidence / self.num_channels
return self._log_separation_evidence_per_data_channel
@property
def separation_evidence(self):
"""
Property storing the separation evidence, a version of the evidence
where the log of the ratio of the determinants of the posterior to
prior covariance matrices is replaced by the sum over all subbases of
such logs of ratios.
"""
if not hasattr(self, '_separation_evidence'):
self._separation_evidence = np.exp(self.log_separation_evidence)
return self._separation_evidence
@property
def separation_evidence_per_data_channel(self):
"""
Property storing the average (geometric mean) factor by which each data
channel affects the separation evidence.
"""
if not hasattr(self, '_separation_evidence_per_data_channel'):
self._separation_evidence_per_data_channel =\
np.exp(self.log_separation_evidence_per_data_channel)
return self._separation_evidence_per_data_channel
@property
def subbasis_log_parameter_covariance_determinant_ratios_sum(self):
"""
Property storing the sum of the logarithms (base e) of the ratios of
the posterior parameter covariance matrices to the prior parameter
covariance matrices.
"""
if not hasattr(self,\
'_subbasis_log_parameter_covariance_determinant_ratios_sum'):
self._subbasis_log_parameter_covariance_determinant_ratios_sum =\
sum([self.subbasis_log_parameter_covariance_determinant_ratio(\
name=name) for name in self.names])
return self._subbasis_log_parameter_covariance_determinant_ratios_sum
def subbasis_prior_significance(self, name=None):
"""
Finds and returns the quantity: mu^T Lambda^{-1} mu, where mu is the
prior subbasis parameter mean and Lambda is the prior subbasis
parameter covariance.
name: string identifying subbasis under concern
returns: single float number
"""
if not hasattr(self, '_subbasis_prior_significances'):
self._subbasis_prior_significances = {}
if name not in self._subbasis_prior_significances:
prior = self.priors[name + '_prior']
mean = prior.internal_mean.A[0]
inverse_covariance = prior.inverse_covariance.A
self._subbasis_prior_significances[name] =\
np.dot(mean, np.dot(inverse_covariance, mean))
return self._subbasis_prior_significances[name]
def subbasis_parameter_inverse_covariance(self, name=None):
"""
Finds the inverse of the marginalized covariance matrix corresponding
to the given subbasis.
name: string identifying subbasis under concern
"""
if not hasattr(self, '_subbasis_parameter_inverse_covariances'):
self._subbasis_parameter_inverse_covariances = {}
if name not in self._subbasis_parameter_inverse_covariances:
self._subbasis_parameter_inverse_covariances[name] =\
la.inv(self.subbasis_parameter_covariance(name=name))
return self._subbasis_parameter_inverse_covariances[name]
def subbases_overlap_matrix(self, row_name=None, column_name=None):
"""
Creates a view into the overlap matrix between the given subbases.
row_name: the (string) name of the subbasis whose parameter number will
be represented by the row of the returned matrix.
column_name: the (string) name of the subbasis whose parameter number
will be represented by the column of the returned matrix
returns: n x m matrix where n is the number of basis vectors in the row
subbasis and m is the number of basis vectors in the column
subbasis in the form of a 2D numpy.ndarray
"""
row_slice = self.basis_sum.slices_by_name[row_name]
column_slice = self.basis_sum.slices_by_name[column_name]
return self.basis_overlap_matrix[:,column_slice][row_slice]
def subbasis_parameter_covariance(self, name=None):
"""
Finds and returns the portion of the parameter covariance matrix
associated with the given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns 2D numpy.ndarray of shape (k, k) where k is the number of basis
vectors in the subbasis
"""
if not hasattr(self, '_subbasis_parameter_covariances'):
self._subbasis_parameter_covariances = {}
if name not in self._subbasis_parameter_covariances:
subbasis_slice = self.basis_sum.slices_by_name[name]
self._subbasis_parameter_covariances[name] =\
self.parameter_covariance[:,subbasis_slice][subbasis_slice]
return self._subbasis_parameter_covariances[name]
def subbasis_log_parameter_covariance_determinant(self, name=None):
"""
Finds the logarithm (base e) of the determinant of the posterior
parameter covariance matrix for the given subbasis.
name: string identifying subbasis under concern
returns: single float number
"""
if not hasattr(self,\
'_subbasis_log_parameter_covariance_determinants'):
self._subbasis_log_parameter_covariance_determinants = {}
if name not in self._subbasis_log_parameter_covariance_determinants:
self._subbasis_log_parameter_covariance_determinants[name] =\
la.slogdet(self.subbasis_parameter_covariance(name=name))[1]
return self._subbasis_log_parameter_covariance_determinants[name]
def subbasis_log_prior_covariance_determinant(self, name=None):
"""
Finds the logarithm (base e) of the determinant of the prior parameter
covariance matrix for the given subbasis.
name: string identifying subbasis under concern
returns: single float number
"""
if type(name) is type(None):
return self.log_prior_covariance_determinant
if not hasattr(self, '_subbasis_log_prior_covariance_determinants'):
self._subbasis_log_prior_covariance_determinants = {}
if name not in self._subbasis_log_prior_covariance_determinants:
self._subbasis_log_prior_covariance_determinants[name] =\
la.slogdet(self.priors[name + '_prior'].covariance.A)[1]
return self._subbasis_log_prior_covariance_determinants[name]
def subbasis_log_parameter_covariance_determinant_ratio(self, name=None):
"""
Finds logarithm (base e) of the ratio of the determinant of the
posterior covariance matrix to the determinant of the prior covariance
matrix for the given subbasis.
name: string identifying subbasis under concern
returns: single float number
"""
if not hasattr(self,\
'_subbasis_log_parameter_covariance_determinant_ratios'):
self._subbasis_log_parameter_covariance_determinant_ratios = {}
if name not in\
self._subbasis_log_parameter_covariance_determinant_ratios:
self._subbasis_log_parameter_covariance_determinant_ratios[name] =\
self.subbasis_log_parameter_covariance_determinant(name=name)-\
self.subbasis_log_prior_covariance_determinant(name=name)
return self._subbasis_log_parameter_covariance_determinant_ratios[name]
def subbasis_parameter_covariance_determinant_ratio(self, name=None):
"""
Finds the ratio of the determinant of the posterior covariance matrix
to the determinant of the prior covariance matrix for the given
subbasis.
name: string identifying subbasis under concern
returns: single non-negative float number
"""
if not hasattr(self,\
'_subbasis_parameter_covariance_determinant_ratios'):
self._subbasis_parameter_covariance_determinant_ratios = {}
if type(name) is type(None):
self._subbasis_parameter_covariance_determinant_ratios[name] =\
np.exp(\
self.subbasis_log_parameter_covariance_determinant_ratios_sum)
elif name not in\
self._subbasis_parameter_covariance_determinant_ratios:
self._subbasis_parameter_covariance_determinant_ratios[name] =\
np.exp(\
self.subbasis_log_parameter_covariance_determinant_ratio(\
name=name))
return self._subbasis_parameter_covariance_determinant_ratios[name]
def subbasis_channel_error(self, name=None):
"""
Finds the error (in data channel space) of the fit by a given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns: 1D numpy.ndarray of the same length as the basis vectors of
the subbasis (which may or may not be different than the
length of the expanded basis vectors).
"""
if type(name) is type(None):
return self.channel_error
if not hasattr(self, '_subbasis_channel_errors'):
self._subbasis_channel_errors = {}
if name not in self._subbasis_channel_errors:
basis = self.basis_sum[name].basis
covariance_times_basis =\
np.dot(self.subbasis_parameter_covariance(name=name), basis)
self._subbasis_channel_errors[name] =\
np.sqrt(np.sum(covariance_times_basis * basis, axis=0))
return self._subbasis_channel_errors[name]
def subbasis_parameter_mean(self, name=None):
"""
Finds the posterior parameter mean for a subbasis. This is just a view
into the view posterior parameter mean.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns: 1D numpy.ndarray containing the parameters for the given
subbasis
"""
if not hasattr(self, '_subbasis_parameter_means'):
self._subbasis_parameter_means = {}
if name not in self._subbasis_parameter_means:
self._subbasis_parameter_means[name] =\
self.parameter_mean[...,self.basis_sum.slices_by_name[name]]
return self._subbasis_parameter_means[name]
def subbasis_channel_mean(self, name=None):
"""
The estimate of the contribution to the data from the given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns: 1D numpy.ndarray containing the channel-space estimate from
the given subbasis
"""
if not hasattr(self, '_subbasis_channel_means'):
self._subbasis_channel_means = {}
if name not in self._subbasis_channel_means:
self._subbasis_channel_means[name] =\
np.dot(self.subbasis_parameter_mean(name=name),\
self.basis_sum[name].basis) + self.basis_sum[name].translation
return self._subbasis_channel_means[name]
def subbasis_channel_RMS(self, name=None):
"""
Calculates and returns the RMS channel error on the estimate of the
contribution to the data from the given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
returns: single float number RMS
"""
if not hasattr(self, '_subbasis_channel_RMSs'):
self._subbasis_channel_RMSs = {}
if name not in self._subbasis_channel_RMSs:
self._subbasis_channel_RMSs[name] = np.sqrt(\
np.mean(np.power(self.subbasis_channel_error(name=name), 2)))
return self._subbasis_channel_RMSs[name]
def subbasis_separation_statistic(self, name=None):
"""
Finds the separation statistic associated with the given subbasis. The
separation statistic is essentially an RMS'd error expansion factor.
name: name of the subbasis for which to find the separation statistic
"""
if not hasattr(self, '_subbasis_separation_statistics'):
self._subbasis_separation_statistics = {}
if name not in self._subbasis_separation_statistics:
weighted_basis =\
self.weight(self.basis_sum[name].expanded_basis, -1)
stat = np.dot(weighted_basis, weighted_basis.T)
stat = np.sum(stat * self.subbasis_parameter_covariance(name=name))
stat = np.sqrt(stat / self.degrees_of_freedom)
self._subbasis_separation_statistics[name] = stat
return self._subbasis_separation_statistics[name]
def subbasis_channel_bias(self, name=None, true_curve=None):
"""
Calculates and returns the bias on the estimate from the given subbasis
using the given curve as a reference.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
true_curve: 1D numpy.ndarray of the same length as the basis vectors in
the subbasis channel space
returns: 1D numpy.ndarray in channel space containing the difference
between the estimate of the data's contribution from the given
subbasis and the given true curve
"""
if type(name) is type(None):
if type(true_curve) is type(None):
return self.channel_bias
else:
raise ValueError("true_curve should only be given to " +\
"subbasis_channel_bias if the name of a " +\
"subbasis is specified.")
else:
if type(true_curve) is type(None):
raise ValueError("true_curve must be given to " +\
"subbasis_channel_bias if the name of a " +\
"subbasis is specified.")
if self.multiple_data_curves and (true_curve.ndim == 1):
return true_curve[np.newaxis,:] -\
self.subbasis_channel_mean(name=name)
else:
return true_curve - self.subbasis_channel_mean(name=name)
def subbasis_weighted_bias(self, name=None, true_curve=None):
"""
The bias of the contribution of a given subbasis to the data. This
function requires knowledge of the "truth".
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
true_curve: 1D numpy.ndarray of the same length as the basis vectors in
the subbasis
returns: 1D numpy.ndarray of weighted bias values
"""
subbasis_channel_bias =\
self.subbasis_channel_bias(name=name, true_curve=true_curve)
subbasis_channel_error = self.subbasis_channel_error(name=name)
if self.multiple_data_curves:
return subbasis_channel_bias / subbasis_channel_error[np.newaxis,:]
else:
return subbasis_channel_bias / subbasis_channel_error
def subbasis_bias_statistic(self, name=None, true_curve=None,\
norm_by_dof=False):
"""
The bias statistic of the fit to the contribution of the given
subbasis. The bias statistic is delta^T C^-1 delta where delta is the
difference between the true curve(s) and the channel mean(s) normalized
by the degrees of freedom.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
true_curve: 1D numpy.ndarray of the same length as the basis vectors in
the subbasis
norm_by_dof: if True, summed squared subbasis error weighted subbasis
bias is normalized by the subbasis degrees of
freedom
if False (default), summed squared subbasis error weighted
subbasis bias is returned is
normalized by the number of channels
in the subbasis
returns: single float number representing roughly
"""
weighted_bias = self.subbasis_weighted_bias(name=name,\
true_curve=true_curve)
normalization_factor = weighted_bias.shape[-1]
if norm_by_dof:
normalization_factor -= self.basis_sum[name].num_basis_vectors
if self.multiple_data_curves:
unnormalized = np.sum(weighted_bias ** 2, axis=1)
else:
unnormalized = np.dot(weighted_bias, weighted_bias)
return unnormalized / normalization_factor
def bias_score(self, training_sets, max_block_size=2**20,\
num_curves_to_score=None, bases_to_score=None):
"""
Evaluates the candidate basis_sum given the available training sets.
training_sets: dictionary of training_sets indexed by basis name
max_block_size: number of floats in the largest possible training set
block
num_curves_to_score: total number of training set curves to consider
bases_to_score: the names of the subbases to include in the scoring
(all bases are always used, the names not in
bases_to_score simply do not have their
subbasis_bias_statistic calculated/included)
returns: scalar value of Delta
"""
if len(self.basis_sum.names) != len(training_sets):
raise ValueError("There must be the same number of basis sets " +\
"as training sets.")
if (type(bases_to_score) is type(None)) or (not bases_to_score):
bases_to_score = self.basis_sum.names
score = 0.
expanders = [basis.expander for basis in self.basis_sum]
iterator = TrainingSetIterator(training_sets, expanders=expanders,\
max_block_size=max_block_size, mode='add',\
curves_to_return=num_curves_to_score, return_constituents=True)
for (block, constituents) in iterator:
num_channels = block.shape[1]
fitter = Fitter(self.basis_sum, block, self.error, **self.priors)
for basis_to_score in bases_to_score:
true_curve =\
constituents[self.basis_sum.names.index(basis_to_score)]
result = fitter.subbasis_bias_statistic(\
name=basis_to_score, true_curve=true_curve)
score += np.sum(result)
if type(num_curves_to_score) is type(None):
num_curves_to_score =\
np.prod([ts.shape[0] for ts in training_sets])
score = score / (num_curves_to_score * num_channels)
return score
def fill_hdf5_group(self, root_group, data_link=None, error_link=None,\
basis_links=None, expander_links=None, prior_mean_links=None,\
prior_covariance_links=None, save_channel_estimates=False):
"""
Fills the given hdf5 file group with data about the inputs and results
of this Fitter.
root_group: the hdf5 file group to fill (only required argument)
data_link: link to existing data dataset, if it exists (see
create_hdf5_dataset docs for info about accepted formats)
error_link: link to existing error dataset, if it exists (see
create_hdf5_dataset docs for info about accepted formats)
basis_links: list of links to basis functions saved elsewhere (see
create_hdf5_dataset docs for info about accepted formats)
expander_links: list of links to existing saved Expander (see
create_hdf5_dataset docs for info about accepted
formats)
prior_mean_links: dict of links to existing saved prior means (see
create_hdf5_dataset docs for info about accepted
formats)
prior_covariance_links: dict of links to existing saved prior
covariances (see create_hdf5_dataset docs for
info about accepted formats)
"""
self.save_data(root_group, data_link=data_link)
self.save_error(root_group, error_link=error_link)
group = root_group.create_group('sizes')
for name in self.names:
group.attrs[name] = self.sizes[name]
group = root_group.create_group('posterior')
create_hdf5_dataset(group, 'parameter_mean', data=self.parameter_mean)
create_hdf5_dataset(group, 'parameter_covariance',\
data=self.parameter_covariance)
if save_channel_estimates:
create_hdf5_dataset(group, 'channel_mean', data=self.channel_mean)
create_hdf5_dataset(group, 'channel_error', data=self.channel_error)
for name in self.names:
subgroup = group.create_group(name)
subbasis_slice = self.basis_sum.slices_by_name[name]
create_hdf5_dataset(subgroup, 'parameter_covariance',\
link=(group['parameter_covariance'],[subbasis_slice]*2))
mean_slices =\
(((slice(None),) * (self.data.ndim - 1)) + (subbasis_slice,))
create_hdf5_dataset(subgroup, 'parameter_mean',\
link=(group['parameter_mean'],mean_slices))
if save_channel_estimates:
create_hdf5_dataset(subgroup, 'channel_mean',\
data=self.subbasis_channel_mean(name=name))
create_hdf5_dataset(subgroup, 'channel_error',\
data=self.subbasis_channel_error(name=name))
self.save_basis_sum(root_group, basis_links=basis_links,\
expander_links=expander_links)
root_group.attrs['degrees_of_freedom'] = self.degrees_of_freedom
root_group.attrs['BPIC'] = self.BPIC
root_group.attrs['DIC'] = self.DIC
root_group.attrs['AIC'] = self.AIC
root_group.attrs['BIC'] = self.BIC
root_group.attrs['normalized_likelihood_bias_statistic'] =\
self.normalized_likelihood_bias_statistic
root_group.attrs['normalized_bias_statistic'] =\
self.normalized_bias_statistic
self.save_priors(root_group, prior_mean_links=prior_mean_links,\
prior_covariance_links=prior_covariance_links)
if self.has_priors:
root_group.attrs['log_evidence_per_data_channel'] =\
self.log_evidence_per_data_channel
def plot_overlap_matrix(self, title='Overlap matrix', fig=None, ax=None,\
show=True, **kwargs):
"""
Plots the overlap matrix of the total basis.
title: (Optional) the title of the plot. default: 'Overlap matrix'
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: if True, matplotlib.pyplot.show() is called before this function
returns
**kwargs: keyword arguments to supply to matplotlib.pyplot.imshow()
"""
def_kwargs = {'interpolation': None}
def_kwargs.update(**kwargs)
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
image = ax.imshow(self.basis_overlap_matrix, **def_kwargs)
pl.colorbar(image)
ax.set_title(title)
if show:
pl.show()
else:
return ax
def plot_parameter_covariance(self, title='Covariance matrix', fig=None,\
ax=None, show=True, **kwargs):
"""
Plots the posterior parameter covariance matrix.
title: (Optional) the title of the plot. default: 'Overlap matrix'
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: if True, matplotlib.pyplot.show() is called before this function
returns
**kwargs: keyword arguments to supply to matplotlib.pyplot.imshow()
"""
def_kwargs = {'interpolation': None}
def_kwargs.update(**kwargs)
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
image = ax.imshow(self.parameter_covariance, **def_kwargs)
pl.colorbar(image)
ax.set_title(title)
if show:
pl.show()
else:
return ax
def plot_subbasis_fit(self, nsigma=1, name=None, which_data=None,\
true_curve=None, subtract_truth=False, shorter_error=None,\
x_values=None, title=None, xlabel='x', ylabel='y', fig=None, ax=None,\
show_noise_level=False, noise_level_alpha=0.5, full_error_alpha=0.2,\
colors='b', full_error_first=True, yscale='linear', show=False):
"""
Plots the fit of the contribution to the data from a given subbasis.
name: (string) name of the subbasis under consideration. if None is
given, the full basis is used.
true_curve: 1D numpy.ndarray of the same length as the basis vectors in
the subbasis
subtract_truth: Boolean which determines whether the residuals of a fit
are plotted or just the curves. Can only be True if
true_curve is given or name is None.
shorter_error: 1D numpy.ndarray of the same length as the vectors of
the subbasis containing the error on the given subbasis
x_values: (Optional) x_values to use for plot
title: (Optional) the title of the plot
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: If True, matplotlib.pyplot.show() is called before this function
returns.
"""
if self.multiple_data_curves and (type(which_data) is type(None)):
which_data = 0
if type(name) is type(None):
mean = self.channel_mean
error = self.channel_error
else:
mean = self.subbasis_channel_mean(name=name)
error = self.subbasis_channel_error(name=name)
if isinstance(colors, basestring):
colors = [colors] * 3
if self.multiple_data_curves:
mean = mean[which_data]
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
if type(x_values) is type(None):
x_values = np.arange(len(mean))
if (type(true_curve) is type(None)) and (type(name) is type(None)):
if self.multiple_data_curves:
true_curve = self.data[which_data]
else:
true_curve = self.data
if (type(true_curve) is type(None)) and subtract_truth:
raise ValueError("Truth cannot be subtracted because it is not " +\
"known. Supply it as the true_curve argument " +\
"if you wish for it to be subtracted.")
if subtract_truth:
to_subtract = true_curve
ax.plot(x_values, np.zeros_like(x_values), color='k', linewidth=2,\
label='true')
else:
to_subtract = np.zeros_like(x_values)
if type(true_curve) is not type(None):
ax.plot(x_values, true_curve, color='k', linewidth=2,\
label='true')
ax.plot(x_values, mean - to_subtract, color=colors[0], linewidth=2,\
label='mean')
if full_error_first:
ax.fill_between(x_values, mean - to_subtract - (nsigma * error),\
mean - to_subtract + (nsigma * error), alpha=full_error_alpha,\
color=colors[1])
if show_noise_level:
if type(shorter_error) is not type(None):
ax.fill_between(x_values,\
mean - to_subtract - (nsigma * shorter_error),\
mean - to_subtract + (nsigma * shorter_error),\
alpha=noise_level_alpha, color=colors[2])
elif len(mean) == self.num_channels:
if self.non_diagonal_noise_covariance:
noise_error = np.sqrt(self.error.diagonal)
ax.fill_between(x_values,\
mean - to_subtract - (nsigma * noise_error),\
mean - to_subtract + (nsigma * noise_error),\
alpha=noise_level_alpha, color=colors[2])
else:
ax.fill_between(x_values,\
mean - to_subtract - (nsigma * self.error),\
mean - to_subtract + (nsigma * self.error),\
alpha=noise_level_alpha, color=colors[2])
if not full_error_first:
ax.fill_between(x_values, mean - to_subtract - (nsigma * error),\
mean - to_subtract + (nsigma * error), alpha=full_error_alpha,\
color=colors[1])
ax.set_yscale(yscale)
ax.set_xlabel(xlabel)
ax.set_ylabel(ylabel)
if type(title) is type(None):
if subtract_truth:
ax.set_title('Fit residual')
else:
ax.set_title('Fit curve')
else:
ax.set_title(title)
if show:
pl.show()
else:
return ax
def plot_overlap_matrix_block(self, row_name=None, column_name=None,\
title='Overlap matrix', fig=None, ax=None, show=True, **kwargs):
"""
Plots a block of the overlap matrix between the given subbases.
row_name: the (string) name of the subbasis whose parameter number will
be represented by the row of the returned matrix.
column_name: the (string) name of the subbasis whose parameter number
will be represented by the column of the returned matrix
title: (Optional) the title of the plot. default: 'Overlap matrix'
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: if True, matplotlib.pyplot.show() is called before this function
returns
**kwargs: keyword arguments to supply to matplotlib.pyplot.imshow()
"""
def_kwargs = {'interpolation': None}
def_kwargs.update(**kwargs)
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
to_show = self.subbases_overlap_matrix(row_name=row_name,\
column_name=column_name)
image = ax.imshow(to_show, **def_kwargs)
pl.colorbar(image)
ax.set_title(title)
if show:
pl.show()
else:
return ax
def plot_parameter_covariance(self, name=None, title='Covariance matrix',\
fig=None, ax=None, show=True, **kwargs):
"""
Plots the posterior parameter covariance matrix.
name: the (string) name of the subbasis whose parameter number
will be represented by the rows and columns of the returned
matrix. If None, full parameter covariance is plotted.
Default: None
title: (Optional) the title of the plot. default: 'Overlap matrix'
fig: the matplotlib.figure object on which the plot should appear
ax: the matplotlib.axes object on which the plot should appear
show: if True, matplotlib.pyplot.show() is called before this function
returns
**kwargs: keyword arguments to supply to matplotlib.pyplot.imshow()
"""
def_kwargs = {'interpolation': None}
def_kwargs.update(**kwargs)
if (type(fig) is type(None)) and (type(ax) is type(None)):
fig = pl.figure()
ax = fig.add_subplot(111)
to_show = self.subbasis_parameter_covariances[name]
image = ax.imshow(to_show, **def_kwargs)
pl.colorbar(image)
ax.set_title(title)
if show:
pl.show()
else:
return ax
|
the-stack_0_15266 | # -*- test-case-name: twisted.test.test_ssl -*-
# Copyright (c) Twisted Matrix Laboratories.
# See LICENSE for details.
"""
This module implements Transport Layer Security (TLS) support for Twisted. It
requires U{PyOpenSSL <https://pypi.python.org/pypi/pyOpenSSL>}.
If you wish to establish a TLS connection, please use one of the following
APIs:
- SSL endpoints for L{servers
<twisted.internet.endpoints.SSL4ServerEndpoint>} and L{clients
<twisted.internet.endpoints.SSL4ClientEndpoint>}
- L{startTLS <twisted.internet.interfaces.ITLSTransport.startTLS>}
- L{connectSSL <twisted.internet.interfaces.IReactorSSL.connectSSL>}
- L{listenSSL <twisted.internet.interfaces.IReactorSSL.listenSSL>}
These APIs all require a C{contextFactory} argument that specifies their
security properties, such as certificate, private key, certificate authorities
to verify the peer, allowed TLS protocol versions, cipher suites, and so on.
The recommended value for this argument is a L{CertificateOptions} instance;
see its documentation for an explanation of the available options.
The C{contextFactory} name is a bit of an anachronism now, as context factories
have been replaced with "connection creators", but these objects serve the same
role.
Be warned that implementing your own connection creator (i.e.: value for the
C{contextFactory}) is both difficult and dangerous; the Twisted team has worked
hard to make L{CertificateOptions}' API comprehensible and unsurprising, and
the Twisted team is actively maintaining it to ensure that it becomes more
secure over time.
If you are really absolutely sure that you want to take on the risk of
implementing your own connection creator based on the pyOpenSSL API, see the
L{server connection creator
<twisted.internet.interfaces.IOpenSSLServerConnectionCreator>} and L{client
connection creator
<twisted.internet.interfaces.IOpenSSLServerConnectionCreator>} interfaces.
Developers using Twisted, please ignore the L{Port}, L{Connector}, and
L{Client} classes defined here, as these are details of certain reactors' TLS
implementations, exposed by accident (and remaining here only for compatibility
reasons). If you wish to establish a TLS connection, please use one of the
APIs listed above.
@note: "SSL" (Secure Sockets Layer) is an antiquated synonym for "TLS"
(Transport Layer Security). You may see these terms used interchangeably
throughout the documentation.
"""
# System imports
from OpenSSL import SSL
from zope.interface import implementer, implementer_only, implementedBy
# Twisted imports
from twisted.internet import tcp, interfaces
supported = True
@implementer(interfaces.IOpenSSLContextFactory)
class ContextFactory:
"""A factory for SSL context objects, for server SSL connections."""
isClient = 0
def getContext(self):
"""Return a SSL.Context object. override in subclasses."""
raise NotImplementedError
class DefaultOpenSSLContextFactory(ContextFactory):
"""
L{DefaultOpenSSLContextFactory} is a factory for server-side SSL context
objects. These objects define certain parameters related to SSL
handshakes and the subsequent connection.
@ivar _contextFactory: A callable which will be used to create new
context objects. This is typically L{OpenSSL.SSL.Context}.
"""
_context = None
def __init__(
self,
privateKeyFileName,
certificateFileName,
sslmethod=SSL.SSLv23_METHOD,
_contextFactory=SSL.Context,
):
"""
@param privateKeyFileName: Name of a file containing a private key
@param certificateFileName: Name of a file containing a certificate
@param sslmethod: The SSL method to use
"""
self.privateKeyFileName = privateKeyFileName
self.certificateFileName = certificateFileName
self.sslmethod = sslmethod
self._contextFactory = _contextFactory
# Create a context object right now. This is to force validation of
# the given parameters so that errors are detected earlier rather
# than later.
self.cacheContext()
def cacheContext(self):
if self._context is None:
ctx = self._contextFactory(self.sslmethod)
# Disallow SSLv2! It's insecure! SSLv3 has been around since
# 1996. It's time to move on.
ctx.set_options(SSL.OP_NO_SSLv2)
ctx.use_certificate_file(self.certificateFileName)
ctx.use_privatekey_file(self.privateKeyFileName)
self._context = ctx
def __getstate__(self):
d = self.__dict__.copy()
del d["_context"]
return d
def __setstate__(self, state):
self.__dict__ = state
def getContext(self):
"""
Return an SSL context.
"""
return self._context
@implementer(interfaces.IOpenSSLContextFactory)
class ClientContextFactory:
"""A context factory for SSL clients."""
isClient = 1
# SSLv23_METHOD allows SSLv2, SSLv3, and TLSv1. We disable SSLv2 below,
# though.
method = SSL.SSLv23_METHOD
_contextFactory = SSL.Context
def getContext(self):
ctx = self._contextFactory(self.method)
# See comment in DefaultOpenSSLContextFactory about SSLv2.
ctx.set_options(SSL.OP_NO_SSLv2)
return ctx
@implementer_only(
interfaces.ISSLTransport,
*[i for i in implementedBy(tcp.Client) if i != interfaces.ITLSTransport],
)
class Client(tcp.Client):
"""
I am an SSL client.
"""
def __init__(self, host, port, bindAddress, ctxFactory, connector, reactor=None):
# tcp.Client.__init__ depends on self.ctxFactory being set
self.ctxFactory = ctxFactory
tcp.Client.__init__(self, host, port, bindAddress, connector, reactor)
def _connectDone(self):
self.startTLS(self.ctxFactory)
self.startWriting()
tcp.Client._connectDone(self)
@implementer(interfaces.ISSLTransport)
class Server(tcp.Server):
"""
I am an SSL server.
"""
def __init__(self, *args, **kwargs):
tcp.Server.__init__(self, *args, **kwargs)
self.startTLS(self.server.ctxFactory)
def getPeerCertificate(self):
# ISSLTransport.getPeerCertificate
raise NotImplementedError("Server.getPeerCertificate")
class Port(tcp.Port):
"""
I am an SSL port.
"""
transport = Server
_type = "TLS"
def __init__(
self, port, factory, ctxFactory, backlog=50, interface="", reactor=None
):
tcp.Port.__init__(self, port, factory, backlog, interface, reactor)
self.ctxFactory = ctxFactory
def _getLogPrefix(self, factory):
"""
Override the normal prefix to include an annotation indicating this is a
port for TLS connections.
"""
return tcp.Port._getLogPrefix(self, factory) + " (TLS)"
class Connector(tcp.Connector):
def __init__(
self, host, port, factory, contextFactory, timeout, bindAddress, reactor=None
):
self.contextFactory = contextFactory
tcp.Connector.__init__(self, host, port, factory, timeout, bindAddress, reactor)
# Force some parameter checking in pyOpenSSL. It's better to fail now
# than after we've set up the transport.
contextFactory.getContext()
def _makeTransport(self):
return Client(
self.host,
self.port,
self.bindAddress,
self.contextFactory,
self,
self.reactor,
)
from twisted.internet._sslverify import (
KeyPair,
DistinguishedName,
DN,
Certificate,
CertificateRequest,
PrivateCertificate,
OpenSSLAcceptableCiphers as AcceptableCiphers,
OpenSSLCertificateOptions as CertificateOptions,
OpenSSLDiffieHellmanParameters as DiffieHellmanParameters,
platformTrust,
OpenSSLDefaultPaths,
VerificationError,
optionsForClientTLS,
ProtocolNegotiationSupport,
protocolNegotiationMechanisms,
trustRootFromCertificates,
TLSVersion,
)
__all__ = [
"ContextFactory",
"DefaultOpenSSLContextFactory",
"ClientContextFactory",
"DistinguishedName",
"DN",
"Certificate",
"CertificateRequest",
"PrivateCertificate",
"KeyPair",
"AcceptableCiphers",
"CertificateOptions",
"DiffieHellmanParameters",
"platformTrust",
"OpenSSLDefaultPaths",
"TLSVersion",
"VerificationError",
"optionsForClientTLS",
"ProtocolNegotiationSupport",
"protocolNegotiationMechanisms",
"trustRootFromCertificates",
]
|
the-stack_0_15268 | from molsysmt._private_tools.exceptions import *
from molsysmt.forms.common_gets import *
import numpy as np
from molsysmt.native.molecular_system import molecular_system_components
from molsysmt._private_tools.files_and_directories import temp_filename
form_name='file:mdcrd'
is_form = {
'file:mdcrd':form_name
}
info=["",""]
has = molecular_system_components.copy()
for ii in ['coordinates', 'box']:
has[ii]=True
info = ["AMBER mdcrd file format","https://ambermd.org/FileFormats.php#trajectory"]
def to_molsysmt_MolSys(item, molecular_system=None, atom_indices='all', frame_indices='all'):
from molsysmt.native.io.molsys import from_mdcrd as mdcrd_to_molsysmt_MolSys
tmp_item, tmp_molecular_system = mdcrd_to_molsysmt_MolSys(item,
molecular_system=molecular_system, atom_indices=atom_indices, frame_indices=frame_indices)
return tmp_item, tmp_molecular_system
def to_molsysmt_Topology(item, molecular_system=None, atom_indices='all', frame_indices='all'):
from molsysmt.native.io.topology import from_mdcrd as mdcrd_to_molsysmt_Topology
tmp_item, tmp_molecular_system = mdcrd_to_molsysmt_Topology(item,
molecular_system=molecular_system, atom_indices=atom_indices, frame_indices=frame_indices)
return tmp_item, tmp_molecular_system
def to_molsysmt_Trajectory(item, molecular_system=None, atom_indices='all', frame_indices='all'):
from molsysmt.native.io.trajectory import from_mdcrd as mdcrd_to_molsysmt_Trajectory
tmp_item, tmp_molecular_system = mdcrd_to_molsysmt_Trajectory(item,
molecular_system=molecular_system, atom_indices=atom_indices, frame_indices=frame_indices)
return tmp_item
def to_file_mdcrd(item, molecular_system=None, atom_indices='all', frame_indices='all', output_filename=None, copy_if_all=True):
tmp_molecular_system = None
if (atom_indices is 'all') and (frame_indices is 'all'):
if copy_if_all:
tmp_item = extract(item, output_filename=output_filename)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item)
else:
tmp_item = item
if molecular_system is not None:
tmp_molecular_system = molecular_system
else:
tmp_item = extract(item, atom_indices=atom_indices, frame_indices=frame_indices, output_filename=output_filename)
if molecular_system is not None:
tmp_molecular_system = molecular_system.combine_with_items(tmp_item, atom_indices=atom_indices, frame_indices=frame_indices)
return tmp_item, tmp_molecular_system
def extract(item, atom_indices='all', frame_indices='all', output_filename=None):
if output_filename is None:
output_filename = temp_filename(extension='mdcrd')
if (atom_indices is 'all') and (frame_indices is 'all'):
raise NotImplementedError()
else:
raise NotImplementedError()
return tmp_item
def merge(item_1, item_2):
raise NotImplementedError
def add(to_item, item):
raise NotImplementedError
def append_frames(item, step=None, time=None, coordinates=None, box=None):
raise NotImplementedError
def concatenate_frames(item, step=None, time=None, coordinates=None, box=None):
raise NotImplementedError
###### Get
# System
def get_n_frames_from_system (item, indices='all', frame_indices='all'):
return NotImplementedError
def get_n_atoms_from_system (item, indices='all', frame_indices='all'):
return NotImplementedError
|
the-stack_0_15269 | # -*- coding: utf-8 -*-
import logging
import torch
import torch.cuda
from beaver.data import build_dataset
from beaver.infer import beam_search
from beaver.loss import WarmAdam, LabelSmoothingLoss
from beaver.model import NMTModel
from beaver.utils import Saver
from beaver.utils import calculate_bleu
from beaver.utils import parseopt, get_device, printing_opt
from beaver.data.post_prob import get_prob_idx
logging.basicConfig(format="%(asctime)s - %(message)s", level=logging.INFO)
opt = parseopt.parse_train_args()
device = get_device()
logging.info("\n" + printing_opt(opt))
saver = Saver(opt)
def valid(model, criterion, valid_dataset, step):
model.eval()
total_loss, total = 0.0, 0
hypothesis, references = [], []
for batch in valid_dataset:
scores = model(batch.src, batch.tgt, batch.probs, batch.idxes)
loss = criterion(scores, batch.tgt)
total_loss += loss.data
total += 1
if opt.tf:
_, predictions = scores.topk(k=1, dim=-1)
else:
predictions = beam_search(opt, model, batch.src, valid_dataset.fields)
hypothesis += [valid_dataset.fields["tgt"].decode(p) for p in predictions]
references += [valid_dataset.fields["tgt"].decode(t) for t in batch.tgt]
bleu = calculate_bleu(hypothesis, references)
logging.info("Valid loss: %.2f\tValid BLEU: %3.2f" % (total_loss / total, bleu))
checkpoint = {"model": model.state_dict(), "opt": opt}
saver.save(checkpoint, step, bleu, total_loss / total)
def train(model, criterion, optimizer, train_dataset, valid_dataset):
total_loss = 0.0
model.zero_grad()
for i, batch in enumerate(train_dataset):
scores = model(batch.src, batch.tgt, batch.probs, batch.idxes)
loss = criterion(scores, batch.tgt)
loss.backward()
total_loss += loss.data
if (i + 1) % opt.grad_accum == 0:
optimizer.step()
model.zero_grad()
if optimizer.n_step % opt.report_every == 0:
mean_loss = total_loss / opt.report_every / opt.grad_accum
logging.info("step: %7d\t loss: %7f" % (optimizer.n_step, mean_loss))
total_loss = 0.0
if optimizer.n_step % opt.save_every == 0:
with torch.set_grad_enabled(False):
valid(model, criterion, valid_dataset, optimizer.n_step)
model.train()
del loss
def main():
logging.info("Build dataset...")
prob_and_idx = get_prob_idx()
train_dataset = build_dataset(opt, opt.train, opt.vocab, device, prob_and_idx, train=True)
valid_dataset = build_dataset(opt, opt.valid, opt.vocab, device, prob_and_idx, train=False)
fields = valid_dataset.fields = train_dataset.fields
logging.info("Build model...")
pad_ids = {"src": fields["src"].pad_id, "tgt": fields["tgt"].pad_id}
vocab_sizes = {"src": len(fields["src"].vocab), "tgt": len(fields["tgt"].vocab)}
model = NMTModel.load_model(opt, pad_ids, vocab_sizes).to(device)
criterion = LabelSmoothingLoss(opt.label_smoothing, vocab_sizes["tgt"], pad_ids["tgt"]).to(device)
n_step = int(opt.train_from.split("-")[-1]) if opt.train_from else 1
optimizer = WarmAdam(model.parameters(), opt.lr, opt.hidden_size, opt.warm_up, n_step)
logging.info("start training...")
train(model, criterion, optimizer, train_dataset, valid_dataset)
if __name__ == '__main__':
main()
|
the-stack_0_15270 | #!/usr/bin/python2
# -*- coding: utf-8 -*-
# Software License Agreement (BSD License)
#
# Copyright (c) 2012, Willow Garage, Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
# * Neither the name of Willow Garage, Inc. nor the names of its
# contributors may be used to endorse or promote products derived
# from this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
"""This file generates shell code for the setup.SHELL scripts to set environment variables."""
from __future__ import print_function
import argparse
import copy
import errno
import os
import platform
import sys
CATKIN_MARKER_FILE = '.catkin'
system = platform.system()
IS_DARWIN = (system == 'Darwin')
IS_WINDOWS = (system == 'Windows')
PATH_TO_ADD_SUFFIX = ['bin']
if IS_WINDOWS:
# while catkin recommends putting dll's into bin, 3rd party packages often put dll's into lib
# since Windows finds dll's via the PATH variable, prepend it with path to lib
PATH_TO_ADD_SUFFIX.extend([['lib', os.path.join('lib', 'x86_64-linux-gnu')]])
# subfolder of workspace prepended to CMAKE_PREFIX_PATH
ENV_VAR_SUBFOLDERS = {
'CMAKE_PREFIX_PATH': '',
'LD_LIBRARY_PATH' if not IS_DARWIN else 'DYLD_LIBRARY_PATH': ['lib', os.path.join('lib', 'x86_64-linux-gnu')],
'PATH': PATH_TO_ADD_SUFFIX,
'PKG_CONFIG_PATH': [os.path.join('lib', 'pkgconfig'), os.path.join('lib', 'x86_64-linux-gnu', 'pkgconfig')],
'PYTHONPATH': 'lib/python2.7/dist-packages',
}
def rollback_env_variables(environ, env_var_subfolders):
"""
Generate shell code to reset environment variables.
by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH.
This does not cover modifications performed by environment hooks.
"""
lines = []
unmodified_environ = copy.copy(environ)
for key in sorted(env_var_subfolders.keys()):
subfolders = env_var_subfolders[key]
if not isinstance(subfolders, list):
subfolders = [subfolders]
value = _rollback_env_variable(unmodified_environ, key, subfolders)
if value is not None:
environ[key] = value
lines.append(assignment(key, value))
if lines:
lines.insert(0, comment('reset environment variables by unrolling modifications based on all workspaces in CMAKE_PREFIX_PATH'))
return lines
def _rollback_env_variable(environ, name, subfolders):
"""
For each catkin workspace in CMAKE_PREFIX_PATH remove the first entry from env[NAME] matching workspace + subfolder.
:param subfolders: list of str '' or subfoldername that may start with '/'
:returns: the updated value of the environment variable.
"""
value = environ[name] if name in environ else ''
env_paths = [path for path in value.split(os.pathsep) if path]
value_modified = False
for subfolder in subfolders:
if subfolder:
if subfolder.startswith(os.path.sep) or (os.path.altsep and subfolder.startswith(os.path.altsep)):
subfolder = subfolder[1:]
if subfolder.endswith(os.path.sep) or (os.path.altsep and subfolder.endswith(os.path.altsep)):
subfolder = subfolder[:-1]
for ws_path in _get_workspaces(environ, include_fuerte=True, include_non_existing=True):
path_to_find = os.path.join(ws_path, subfolder) if subfolder else ws_path
path_to_remove = None
for env_path in env_paths:
env_path_clean = env_path[:-1] if env_path and env_path[-1] in [os.path.sep, os.path.altsep] else env_path
if env_path_clean == path_to_find:
path_to_remove = env_path
break
if path_to_remove:
env_paths.remove(path_to_remove)
value_modified = True
new_value = os.pathsep.join(env_paths)
return new_value if value_modified else None
def _get_workspaces(environ, include_fuerte=False, include_non_existing=False):
"""
Based on CMAKE_PREFIX_PATH return all catkin workspaces.
:param include_fuerte: The flag if paths starting with '/opt/ros/fuerte' should be considered workspaces, ``bool``
"""
# get all cmake prefix paths
env_name = 'CMAKE_PREFIX_PATH'
value = environ[env_name] if env_name in environ else ''
paths = [path for path in value.split(os.pathsep) if path]
# remove non-workspace paths
workspaces = [path for path in paths if os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE)) or (include_fuerte and path.startswith('/opt/ros/fuerte')) or (include_non_existing and not os.path.exists(path))]
return workspaces
def prepend_env_variables(environ, env_var_subfolders, workspaces):
"""Generate shell code to prepend environment variables for the all workspaces."""
lines = []
lines.append(comment('prepend folders of workspaces to environment variables'))
paths = [path for path in workspaces.split(os.pathsep) if path]
prefix = _prefix_env_variable(environ, 'CMAKE_PREFIX_PATH', paths, '')
lines.append(prepend(environ, 'CMAKE_PREFIX_PATH', prefix))
for key in sorted(key for key in env_var_subfolders.keys() if key != 'CMAKE_PREFIX_PATH'):
subfolder = env_var_subfolders[key]
prefix = _prefix_env_variable(environ, key, paths, subfolder)
lines.append(prepend(environ, key, prefix))
return lines
def _prefix_env_variable(environ, name, paths, subfolders):
"""
Return the prefix to prepend to the environment variable NAME.
Adding any path in NEW_PATHS_STR without creating duplicate or empty items.
"""
value = environ[name] if name in environ else ''
environ_paths = [path for path in value.split(os.pathsep) if path]
checked_paths = []
for path in paths:
if not isinstance(subfolders, list):
subfolders = [subfolders]
for subfolder in subfolders:
path_tmp = path
if subfolder:
path_tmp = os.path.join(path_tmp, subfolder)
# skip nonexistent paths
if not os.path.exists(path_tmp):
continue
# exclude any path already in env and any path we already added
if path_tmp not in environ_paths and path_tmp not in checked_paths:
checked_paths.append(path_tmp)
prefix_str = os.pathsep.join(checked_paths)
if prefix_str != '' and environ_paths:
prefix_str += os.pathsep
return prefix_str
def assignment(key, value):
if not IS_WINDOWS:
return 'export %s="%s"' % (key, value)
else:
return 'set %s=%s' % (key, value)
def comment(msg):
if not IS_WINDOWS:
return '# %s' % msg
else:
return 'REM %s' % msg
def prepend(environ, key, prefix):
if key not in environ or not environ[key]:
return assignment(key, prefix)
if not IS_WINDOWS:
return 'export %s="%s$%s"' % (key, prefix, key)
else:
return 'set %s=%s%%%s%%' % (key, prefix, key)
def find_env_hooks(environ, cmake_prefix_path):
"""Generate shell code with found environment hooks for the all workspaces."""
lines = []
lines.append(comment('found environment hooks in workspaces'))
generic_env_hooks = []
generic_env_hooks_workspace = []
specific_env_hooks = []
specific_env_hooks_workspace = []
generic_env_hooks_by_filename = {}
specific_env_hooks_by_filename = {}
generic_env_hook_ext = 'bat' if IS_WINDOWS else 'sh'
specific_env_hook_ext = environ['CATKIN_SHELL'] if not IS_WINDOWS and 'CATKIN_SHELL' in environ and environ['CATKIN_SHELL'] else None
# remove non-workspace paths
workspaces = [path for path in cmake_prefix_path.split(os.pathsep) if path and os.path.isfile(os.path.join(path, CATKIN_MARKER_FILE))]
for workspace in reversed(workspaces):
env_hook_dir = os.path.join(workspace, 'etc', 'catkin', 'profile.d')
if os.path.isdir(env_hook_dir):
for filename in sorted(os.listdir(env_hook_dir)):
if filename.endswith('.%s' % generic_env_hook_ext):
# remove previous env hook with same name if present
if filename in generic_env_hooks_by_filename:
i = generic_env_hooks.index(generic_env_hooks_by_filename[filename])
generic_env_hooks.pop(i)
generic_env_hooks_workspace.pop(i)
# append env hook
generic_env_hooks.append(os.path.join(env_hook_dir, filename))
generic_env_hooks_workspace.append(workspace)
generic_env_hooks_by_filename[filename] = generic_env_hooks[-1]
elif specific_env_hook_ext is not None and filename.endswith('.%s' % specific_env_hook_ext):
# remove previous env hook with same name if present
if filename in specific_env_hooks_by_filename:
i = specific_env_hooks.index(specific_env_hooks_by_filename[filename])
specific_env_hooks.pop(i)
specific_env_hooks_workspace.pop(i)
# append env hook
specific_env_hooks.append(os.path.join(env_hook_dir, filename))
specific_env_hooks_workspace.append(workspace)
specific_env_hooks_by_filename[filename] = specific_env_hooks[-1]
env_hooks = generic_env_hooks + specific_env_hooks
env_hooks_workspace = generic_env_hooks_workspace + specific_env_hooks_workspace
count = len(env_hooks)
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_COUNT', count))
for i in range(count):
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d' % i, env_hooks[i]))
lines.append(assignment('_CATKIN_ENVIRONMENT_HOOKS_%d_WORKSPACE' % i, env_hooks_workspace[i]))
return lines
def _parse_arguments(args=None):
parser = argparse.ArgumentParser(description='Generates code blocks for the setup.SHELL script.')
parser.add_argument('--extend', action='store_true', help='Skip unsetting previous environment variables to extend context')
parser.add_argument('--local', action='store_true', help='Only consider this prefix path and ignore other prefix path in the environment')
return parser.parse_known_args(args=args)[0]
if __name__ == '__main__':
try:
try:
args = _parse_arguments()
except Exception as e:
print(e, file=sys.stderr)
sys.exit(1)
if not args.local:
# environment at generation time
CMAKE_PREFIX_PATH = r'/home/arijitnoobstar/UAVProjectileCatcher/devel;/opt/ros/melodic'.split(';')
else:
# don't consider any other prefix path than this one
CMAKE_PREFIX_PATH = []
# prepend current workspace if not already part of CPP
base_path = os.path.dirname(__file__)
# CMAKE_PREFIX_PATH uses forward slash on all platforms, but __file__ is platform dependent
# base_path on Windows contains backward slashes, need to be converted to forward slashes before comparison
if os.path.sep != '/':
base_path = base_path.replace(os.path.sep, '/')
if base_path not in CMAKE_PREFIX_PATH:
CMAKE_PREFIX_PATH.insert(0, base_path)
CMAKE_PREFIX_PATH = os.pathsep.join(CMAKE_PREFIX_PATH)
environ = dict(os.environ)
lines = []
if not args.extend:
lines += rollback_env_variables(environ, ENV_VAR_SUBFOLDERS)
lines += prepend_env_variables(environ, ENV_VAR_SUBFOLDERS, CMAKE_PREFIX_PATH)
lines += find_env_hooks(environ, CMAKE_PREFIX_PATH)
print('\n'.join(lines))
# need to explicitly flush the output
sys.stdout.flush()
except IOError as e:
# and catch potential "broken pipe" if stdout is not writable
# which can happen when piping the output to a file but the disk is full
if e.errno == errno.EPIPE:
print(e, file=sys.stderr)
sys.exit(2)
raise
sys.exit(0)
|
the-stack_0_15271 | import fnmatch
import functools
import typing
import os
import pygears
from pygears import reg
from pygears.core.gear import OutSig
from ...base_resolver import ResolverBase, ResolverTypeError
from pygears.util.fileio import find_in_dirs, save_file
from pygears.conf import inject, Inject
from pygears.hdl import hdlmod
class HierarchicalResolver(ResolverBase):
@inject
def __init__(self, node):
self.node = node
if not node.meta_kwds.get('hdl', {}).get('hierarchical', node.hierarchical):
raise ResolverTypeError
@property
def hdl_path_list(self):
return reg[f'{self.lang}gen/include']
@property
def files(self):
files = [self.file_basename]
if 'hdl' in self.node.meta_kwds:
if 'files' in self.node.meta_kwds['hdl']:
for fn in self.node.meta_kwds['hdl']['files']:
if not os.path.splitext(fn)[-1]:
fn = f'{fn}.{self.lang}'
files.append(fn)
return files
@property
@functools.lru_cache()
def module_name(self):
if find_in_dirs(f'{self.hier_path_name}.{self.lang}',
self.hdl_path_list):
return self.hier_path_name + '_hier'
else:
return self.hier_path_name
@property
def file_basename(self):
return f'{self.module_name}.{self.lang}'
def module_context(self, template_env):
context = {
'pygears': pygears,
'module_name': self.module_name,
'intfs': template_env.port_intfs(self.node),
# 'sigs': [s.name for s in self.node.meta_kwds['signals']],
'sigs': self.node.meta_kwds['signals'],
'params': self.node.params,
'inst': [],
'generics': []
}
for port in context['intfs']:
context[f'_{port["name"]}'] = port
context[f'_{port["name"]}_t'] = port['type']
return context
@property
def params(self):
return {}
def get_hier_module(self, template_env):
context = self.module_context(template_env)
for child in self.node.local_intfs:
hmod = hdlmod(child)
contents = hmod.get_inst(template_env)
if contents:
context['inst'].append(contents)
for child in self.node.child:
for s in child.meta_kwds['signals']:
if isinstance(s, OutSig):
name = child.params['sigmap'][s.name]
context['inst'].append(f'logic [{s.width-1}:0] {name};')
hmod = hdlmod(child)
if hasattr(hmod, 'get_inst'):
contents = hmod.get_inst(template_env)
if contents:
if hmod.traced:
context['inst'].append('/*verilator tracing_on*/')
context['inst'].append(contents)
if hmod.traced:
context['inst'].append('/*verilator tracing_off*/')
return template_env.render_local(__file__, "hier_module.j2", context)
def generate(self, template_env, outdir):
save_file(self.file_basename, outdir,
self.get_hier_module(template_env))
|
the-stack_0_15273 | from app.tweet.adapters.repository import PostgresTweetAggregateRepository
from app.tweet.domain.model import Tweet
from uuid import uuid4
import pytest
class TestSave:
@pytest.mark.asyncio
async def test_save(self, postgres_session):
repo = PostgresTweetAggregateRepository(postgres_session)
aggregate = Tweet.new("Hello", uuid4())
await repo.save(aggregate)
print(await repo.find_by_id(aggregate.id))
class TestFindById:
@pytest.mark.asyncio
async def test_not_found(self, postgres_session):
repo = PostgresTweetAggregateRepository(postgres_session)
result = await repo.find_by_id(uuid4())
assert result is None
|
the-stack_0_15274 | #!/usr/bin/env python
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '../'))
import glob
import math
import shutil
import cv2
import matplotlib.pyplot as plt
import numpy as np
from skimage import img_as_ubyte
from skimage.filters import *
from Preprocess.tools.peakdetect import *
dirList = glob.glob("Input/*.pgm")
# dirList = glob.glob("../Input/P168-Fg016-R-C01-R01-fused.jpg")
# dirList = glob.glob("../Input/P123-Fg002-R-C01-R01-fused.jpg")
# dirList = glob.glob('/Users/Khmer/Downloads/sample-test/run_test/*.pgm')
def threshold_li(image):
"""Return threshold value based on adaptation of Li's Minimum Cross Entropy method.
Parameters
----------
image : (N, M) ndarray
Input image.
Returns
-------
threshold : float
Upper threshold value. All pixels with an intensity higher than
this value are assumed to be foreground.
References
----------
.. [1] Li C.H. and Lee C.K. (1993) "Minimum Cross Entropy Thresholding"
Pattern Recognition, 26(4): 617-625
DOI:10.1016/0031-3203(93)90115-D
.. [2] Li C.H. and Tam P.K.S. (1998) "An Iterative Algorithm for Minimum
Cross Entropy Thresholding" Pattern Recognition Letters, 18(8): 771-776
DOI:10.1016/S0167-8655(98)00057-9
.. [3] Sezgin M. and Sankur B. (2004) "Survey over Image Thresholding
Techniques and Quantitative Performance Evaluation" Journal of
Electronic Imaging, 13(1): 146-165
DOI:10.1117/1.1631315
.. [4] ImageJ AutoThresholder code, http://fiji.sc/wiki/index.php/Auto_Threshold
"""
# Make sure image has more than one value
if np.all(image == image.flat[0]):
raise ValueError("threshold_li is expected to work with images "
"having more than one value. The input image seems "
"to have just one value {0}.".format(image.flat[0]))
# Copy to ensure input image is not modified
image = image.copy()
# Requires positive image (because of log(mean))
immin = np.min(image)
image -= immin
imrange = np.max(image)
tolerance = 20 * imrange / 256
# Calculate the mean gray-level
mean = np.mean(image)
# Initial estimate
new_thresh = mean
old_thresh = new_thresh + 2 * tolerance
# Stop the iterations when the difference between the
# new and old threshold values is less than the tolerance
while abs(new_thresh - old_thresh) > tolerance:
old_thresh = new_thresh
threshold = old_thresh + tolerance # range
# Calculate the means of background and object pixels
mean_back = image[image <= threshold].mean()
# print(mean_back)
mean_obj = image[image > threshold].mean()
# print(mean_obj)
temp = (mean_back - mean_obj) / (np.log(mean_back) - np.log(mean_obj))
if temp < 0:
new_thresh = temp - tolerance
else:
new_thresh = temp + tolerance
# print(threshold + immin)
return threshold + immin
def rotatedRectWithMaxArea(w, h, angle):
"""
Given a rectangle of size wxh that has been rotated by 'angle' (in
radians), computes the width and height of the largest possible
axis-aligned rectangle (maximal area) within the rotated rectangle.
"""
if w <= 0 or h <= 0:
return 0, 0
width_is_longer = w >= h
side_long, side_short = (w, h) if width_is_longer else (h, w)
# since the solutions for angle, -angle and 180-angle are all the same,
# if suffices to look at the first quadrant and the absolute values of sin,cos:
sin_a, cos_a = abs(math.sin(angle)), abs(math.cos(angle))
if side_short <= 2. * sin_a * cos_a * side_long or abs(sin_a - cos_a) < 1e-10:
# half constrained case: two crop corners touch the longer side,
# the other two corners are on the mid-line parallel to the longer line
x = 0.5 * side_short
wr, hr = (x / sin_a, x / cos_a) if width_is_longer else (x / cos_a, x / sin_a)
else:
# fully constrained case: crop touches all 4 sides
cos_2a = cos_a * cos_a - sin_a * sin_a
wr, hr = (w * cos_a - h * sin_a) / cos_2a, (h * cos_a - w * sin_a) / cos_2a
return wr, hr
def rotate_bound(image, angle):
# CREDIT: https://www.pyimagesearch.com/2017/01/02/rotate-images-correctly-with-opencv-and-python/
(h, w) = image.shape[:2]
(cX, cY) = (w // 2, h // 2)
M = cv2.getRotationMatrix2D((cX, cY), -angle, 1.0)
cos = np.abs(M[0, 0])
sin = np.abs(M[0, 1])
nW = int((h * sin) + (w * cos))
nH = int((h * cos) + (w * sin))
M[0, 2] += (nW / 2) - cX
M[1, 2] += (nH / 2) - cY
return cv2.warpAffine(image, M, (nW, nH))
def rotate_max_area(image, angle):
""" image: cv2 image matrix object
angle: in degree
"""
wr, hr = rotatedRectWithMaxArea(image.shape[1], image.shape[0], math.radians(angle))
rotated = rotate_bound(image, angle)
h, w, _ = rotated.shape
y1 = h // 2 - int(hr / 2)
y2 = y1 + int(hr)
x1 = w // 2 - int(wr / 2)
x2 = x1 + int(wr)
return rotated[y1:y2, x1:x2]
def find_degree(image):
min_score = 999999
degree = 0
for d in range(-6, 7):
rotated_image = rotate_max_area(image, d)
# cv2.imwrite('./tr_' + str(d) + '.jpg', rotated_image)
ri_hist = cv2.reduce(rotated_image, 1, cv2.REDUCE_AVG).reshape(-1)
# plt.plot(ri_hist)
# plt.savefig('./tr_' + str(d) + '_h.jpg')
# plt.clf()
# plt.show()
line_peaks = peakdetect(ri_hist, lookahead=30)
score_ne = num_ne = 0
score_po = num_po = 0
for y in line_peaks[0]:
score_ne -= (y[1] * 1)
num_ne += 1
for y in line_peaks[1]:
score_po += (y[1] * 1)
num_po += 1
score = score_ne / num_ne + score_po / num_po
# print("score: ", score, " degree: ", d)
# print(": ", score_ne / num_ne, " : ", score_po / num_po)
if score < min_score:
degree = d
min_score = score
# print('Degree: ', degree)
rotated_image = rotate_max_area(image, degree)
# plt.imshow(rotated_image, cmap=plt.cm.gray)
# plt.show()
return rotated_image
def separate_cha_2(line):
line_hist = cv2.reduce(line, 0, cv2.REDUCE_AVG).reshape(-1)
new_line = cv2.cvtColor(line, cv2.COLOR_GRAY2BGR)
line_peaks = peakdetect(line_hist, lookahead=20)
Hl, Wl = new_line.shape[:2]
cha = []
# for y in line_peaks[0]:
# plt.plot(y[0], y[1], "r*")
# cv2.line(new_line, (y[0], 0), (y[0], Hl), (255, 0, 0), 3)
for y in line_peaks[1]:
cha.append(y[0])
# plt.plot(y[0], y[1], "g*")
cv2.line(new_line, (y[0], 0), (y[0], Hl), (0, 255, 0), 3)
cha.insert(0, 0)
cha.append(Wl)
plt.imshow(new_line, cmap=plt.cm.gray)
plt.show()
return cha
def separate_cha(line):
line_hist = cv2.reduce(line, 0, cv2.REDUCE_AVG).reshape(-1)
new_line = cv2.cvtColor(line, cv2.COLOR_GRAY2BGR)
line_peaks = peakdetect(line_hist, lookahead=25)
Hl, Wl = new_line.shape[:2]
cha = []
# for y in line_peaks[0]:
# plt.plot(y[0], y[1], "r*")
# cv2.line(new_line, (y[0], 0), (y[0], Hl), (255, 0, 0), 3)
for y in line_peaks[0]:
if y[1] >= 235:
cha.append(y[0])
# plt.plot(y[0], y[1], "g*")
cv2.line(new_line, (y[0], 0), (y[0], Hl), (0, 255, 0), 3)
cha.insert(0, 0)
cha.append(Wl)
# plt.plot(line_hist)
# plt.show()
# plt.imshow(new_line, cmap=plt.cm.gray)
# plt.show()
return cha
def separate_words(line):
line_hist = cv2.reduce(line, 0, cv2.REDUCE_AVG).reshape(-1)
new_line = cv2.cvtColor(line, cv2.COLOR_GRAY2BGR)
line_peaks = peakdetect(line_hist, lookahead=50)
Hl, Wl = new_line.shape[:2]
words = []
for y in line_peaks[0]:
if y[1] == 255:
words.append(y[0])
# plt.plot(y[0], y[1], "r*")
if y[1] == 255:
cv2.line(new_line, (y[0], 0), (y[0], Hl), (255, 0, 0), 3)
# for y in line_peaks[1]:
# plt.plot(y[0], y[1], "g*")
# if y[1] == 255:
# words.append(y[0])
# cv2.line(new_line, (y[0], 0), (y[0], Hl), (0, 255, 0), 3)
words.insert(0, 0)
words.append(Wl)
# plt.imshow(new_line, cmap=plt.cm.gray)
# plt.show()
return words
def crop_blank(img):
min_x, max_x, min_y, max_y = 0, 0, 0, 0
# for line in img:
# wl = True
# for x in line:
# if x != 255:
# wl = False
th, threshed = cv2.threshold(img, 127, 255, cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
pts = cv2.findNonZero(threshed)
ret = cv2.minAreaRect(pts)
(cx, cy), (w, h), ang = ret
if w < h:
crop = img[int(w):int(h), :]
else:
crop = img[int(h):int(w), :]
# plt.imshow(crop, cmap=plt.cm.gray)
# plt.show()
# if x < y:
# if w < h:
# crop = img[w:h, x:y]
# else:
# crop = img[h:w, x:y]
# else:
# if w < h:
# crop = img[w:h, y:x]
# else:
# crop = img[h:w, y:x]
#
for d in dirList:
image = cv2.imread(d)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
# kernel = np.ones((3, 3), np.float32) / 25
# image = cv2.filter2D(image, -1, kernel)
window_size = 59
thresh_sauvola = threshold_sauvola(image, window_size=window_size, k=0.5)
binary_sauvola = image > thresh_sauvola
# binary_global = image > threshold_triangle(image)
# binary_global = image > threshold_li(image)
# binary_global = image > threshold_minimum(image)
# binary_global = image > threshold_li(image)
binary_global = image > threshold_otsu(image)
cv_image = img_as_ubyte(binary_global)
ret, labels = cv2.connectedComponents(cv_image)
# Map component labels to hue val
label_hue = np.uint8(179 * labels / np.max(labels))
blank_ch = 255 * np.ones_like(label_hue)
labeled_img = cv2.merge([label_hue, blank_ch, blank_ch])
# cvt to BGR for display
labeled_img = cv2.cvtColor(labeled_img, cv2.COLOR_HSV2BGR)
# set bg label to black
labeled_img[label_hue == 0] = 0
# cv2.imwrite('./t1.jpg', cv_image)
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(cv_image, connectivity=4)
sizes = stats[:, -1]
max_label = 1
max_size = sizes[1]
for i in range(2, nb_components):
if sizes[i] > max_size:
max_label = i
max_size = sizes[i]
img2 = np.zeros(output.shape)
img2[output == max_label] = 255
# cv2.imwrite('./t2.jpg', img2)
cv2.imwrite('./tmp.jpg', img2)
tmp = cv2.imread('tmp.jpg')
im_bw = cv2.cvtColor(tmp, cv2.COLOR_RGB2GRAY)
im_bw = 255 - im_bw
nb_components, output, stats, centroids = cv2.connectedComponentsWithStats(im_bw, connectivity=4)
sizes = stats[:, -1]
max_label = 1
max_size = sizes[1]
for i in range(2, nb_components):
if sizes[i] > max_size:
max_label = i
max_size = sizes[i]
img3 = np.zeros(output.shape)
img3[output == max_label] = 255
# cv2.imwrite('./t3.jpg', img3)
s_img_2 = img_as_ubyte(binary_sauvola)
# cv2.imwrite('./t1_2.jpg', s_img_2)
s_img_2[img3 == 255] = 255
# cv2.imwrite('./t4.jpg', s_img_2)
new_img = cv2.cvtColor(s_img_2, cv2.COLOR_GRAY2BGR)
rotated = find_degree(new_img)
rotated = cv2.cvtColor(rotated, cv2.COLOR_RGB2GRAY)
hist = cv2.reduce(rotated, 1, cv2.REDUCE_AVG).reshape(-1)
H, W = rotated.shape[:2]
peaks = peakdetect(hist, lookahead=40)
rotated2 = cv2.cvtColor(rotated, cv2.COLOR_GRAY2BGR)
peak = []
for y in peaks[0]:
peak.append(y[0])
# plt.plot(y[0], y[1], "r*")
cv2.line(rotated2, (0, y[0]), (W, y[0]), (255, 0, 0), 3)
# for y in peaks[1]:
# peak.append(y[0])
# plt.plot(y[0], y[1], "g*")
# cv2.line(rotated, (0, y[0]), (W, y[0]), (0, 255, 0), 3)
# plt.plot(hist)
# plt.savefig('hist.jpg')
# plt.clf()
peak.insert(0, 0)
peak.append(H)
# print(peak)
# plt.plot(hist)
# plt.show()
if not os.path.exists(os.path.splitext('segmentation/' + d.split('/')[-1])[0]):
os.makedirs(os.path.splitext('segmentation/' + d.split('/')[-1])[0])
else:
shutil.rmtree(os.path.splitext('segmentation/' + d.split('/')[-1])[0])
os.makedirs(os.path.splitext('segmentation/' + d.split('/')[-1])[0])
# cv2.imwrite(os.path.join(os.path.splitext(d.split('/')[-1])[0], '_t.jpg'), rotated)
# crop_blank(rotated)
# plt.imshow(rotated2, cmap=plt.cm.gray)
# plt.show()
count_line = 0
for y in range(len(peak) - 1):
if not os.path.exists(os.path.join(os.path.splitext('segmentation/' + d.split('/')[-1])[0], 'line_' + str(count_line))):
os.makedirs(os.path.join(os.path.splitext('segmentation/' + d.split('/')[-1])[0], 'line_' + str(count_line)))
else:
shutil.rmtree(os.path.join(os.path.splitext('segmentation/' + d.split('/')[-1])[0], 'line_' + str(count_line)))
os.makedirs(os.path.join(os.path.splitext('segmentation/' + d.split('/')[-1])[0], 'line_' + str(count_line)))
path = os.path.join(os.path.splitext('segmentation/' + d.split('/')[-1])[0], 'line_' + str(count_line))
crop_img = rotated[peak[y]:peak[y + 1], 0:W]
# print(peak[y], peak[y + 1])
# plt.imshow(crop_img, cmap=plt.cm.gray)
# plt.show()
word_peaks = separate_words(crop_img)
# print(word_peaks)
count_line += 1
for i in range(len(word_peaks) - 1):
new_w = crop_img[:, word_peaks[i]: word_peaks[i + 1]]
os.makedirs(os.path.join(path, 'word_' + str(i)))
cv2.line(rotated2, (word_peaks[i], peak[y]), (word_peaks[i], peak[y + 1]), (0, 0, 255), 3)
# print(y0, y[0], word_peaks[i])
cha_peaks = separate_cha(new_w)
if len(cha_peaks) == 0:
continue
for j in range(len(cha_peaks) - 1):
new_c = new_w[:, cha_peaks[j]: cha_peaks[j + 1]]
cv2.imwrite(os.path.join(os.path.join(path, 'word_' + str(i)), str(j) + '.jpg'),
new_c)
# plt.imshow(rotated2, cmap=plt.cm.gray)
# plt.show()
# cv2.imwrite('./d.jpg', rotated2)
print("Successfully process image " + d.split('/')[-1].split('jpg')[0])
|
the-stack_0_15275 | from rest_framework.permissions import BasePermission
class IsNormalUser(BasePermission):
def has_permission(self, request, view):
# allow all POST requests
if not request.user.is_staff:
if request.method == 'POST' or request.method == 'PUT' or request.method == 'DELETE' \
or request.method == 'PATCH':
return False
# Otherwise, only allow authenticated requests
return request.user and request.user.is_authenticated
|
the-stack_0_15276 | import math
import random
import libpyDirtMP as prx
prx.init_random(random.randint(1,999999))
acrobot = prx.two_link_acrobot("acrobot")
simulation_step = 0.01
prx.set_simulation_step(simulation_step)
print("Using simulation_step:", simulation_step)
start_state = [0, 0, 0, 0]
goal_state = [math.pi, 0, 0, 0]
obs_pose_1 = prx.transform()
obs_pose_2 = prx.transform()
obs_pose_3 = prx.transform()
obs_pose_4 = prx.transform()
obs_pose_1.setIdentity()
obs_pose_2.setIdentity()
obs_pose_3.setIdentity()
obs_pose_4.setIdentity()
obs_pose_1.translation(prx.vector( 20, 20,0.5))
obs_pose_2.translation(prx.vector(-20, 20,0.5))
obs_pose_3.translation(prx.vector( 20,-20,0.5))
obs_pose_4.translation(prx.vector(-20,-20,0.5))
b1 = prx.box.create_obstacle("b1", 1., 1., 1., obs_pose_1)
b2 = prx.box.create_obstacle("b2", 1., 1., 1., obs_pose_2)
b3 = prx.box.create_obstacle("b3", 1., 1., 1., obs_pose_3)
b4 = prx.box.create_obstacle("b4", 1., 1., 1., obs_pose_4)
obstacles = [b1, b2, b3, b4]
obs_names = ["b1", "b2", "b3", "b4"]
### To have an obstacle-free environment, uncomment the following lines (and comment the above)
# obstacles = []
# obs_names = []
wm = prx.world_model([acrobot], obstacles)
wm.create_context("context", ["acrobot"], obs_names)
context = wm.get_context("context");
planner = prx.dirt("dirt");
planner_spec = prx.dirt_specification(context.system_group,context.collision_group);
planner_spec.blossom_number = 5
planner_spec.use_pruning = False
def acrobot_distance_function(s1, s2):
cost = 0
s1a0 = s1[0] + prx.PRX_PI
s1a1 = s1[1] + prx.PRX_PI
s1a2 = s1[2]
s1a3 = s1[3]
s2a0 = s2[0] + prx.PRX_PI
s2a1 = s2[1] + prx.PRX_PI
s2a2 = s2[2]
s2a3 = s2[3]
a0 = min((2 * prx.PRX_PI) - abs(s1a0 - s2a0), abs(s1a0 - s2a0));
a1 = min((2 * prx.PRX_PI) - abs(s1a1 - s2a1), abs(s1a1 - s2a1));
a2 = s1a2 - s2a2;
a3 = s1a3 - s2a3;
cost = a0 * a0 + a1 * a1 + a2 * a2 + a3 * a3
return math.sqrt(cost);
planner_spec.distance_function = prx.distance_function.set_df(acrobot_distance_function);
planner_spec.min_control_steps = 1
planner_spec.max_control_steps = 50
# planner_spec.random_seed = random.randint(1,999999);
planner_spec.bnb = True;
planner_query = prx.dirt_query(context.system_group.get_state_space(),context.system_group.get_control_space());
planner_query.start_state = context.system_group.get_state_space().make_point()
planner_query.goal_state = context.system_group.get_state_space().make_point()
context.system_group.get_state_space().copy_point_from_vector(planner_query.start_state, start_state);
context.system_group.get_state_space().copy_point_from_vector(planner_query.goal_state, goal_state);
print("Start State:", planner_query.start_state)
print("Goal State:", planner_query.goal_state)
planner_query.goal_region_radius = 0.5;
planner_query.get_visualization = True;
planner.link_and_setup_spec(planner_spec)
planner.preprocess()
planner.link_and_setup_query(planner_query)
### Note: Python slows down computation ==> more time might be needed
# checker = prx.condition_check("time", 60)
checker = prx.condition_check("iterations", 50000)
print("Resolving query...")
planner.resolve_query(checker)
planner.fulfill_query();
### This part is only to visualize the solution
if (planner_query.get_visualization):
vis_group = prx.three_js_group([acrobot], obstacles)
if ( len(planner_query.solution_traj) != 0 ) :
vis_group.add_vis_infos(prx.info_geometry.FULL_LINE, planner_query.solution_traj, "acrobot/ball", context.system_group.get_state_space(), "0x000000");
timestamp = 0.0
for state in planner_query.solution_traj :
context.system_group.get_state_space().copy_from_point(state);
vis_group.snapshot_state(timestamp)
timestamp += simulation_step
vis_group.output_html("py_output.html");
|
the-stack_0_15278 | import constants
import numpy as np
import MySQLdb
import time
import datetime
import os
CONCEPT_START = "START"
def get_file_prefix():
"""获得有效的文件前缀名"""
from datetime import datetime
now = datetime.now()
return "{}_{}_{}".format(now.year, now.month, now.day)
def init_file():
for i in ["_enfuzzy.csv", "_defuzzy.csv", "_record.csv"]:
if not os.path.exists(get_file_prefix() + i):
with open(get_file_prefix() + i, "w") as f:
with open("default" + i, "r") as fo:
f.write(fo.read())
print("create " + get_file_prefix() + i)
def get_valid_id():
fname = get_file_prefix() + "_enfuzzy.csv"
lid = 0
with open(fname, "r") as f:
for line in f:
lid = line.split(",")[0]
return int(lid) + 1
def record_enfuzzy(var, val, concept):
"""记录模糊化过程"""
fname = get_file_prefix() + "_enfuzzy.csv"
get_id = get_valid_id()
with open(fname, "a") as f:
# ~ print("模糊化:::{},{},{},{},{}".format(get_id, var, val, concept, time.mktime(datetime.datetime.now().timetuple())))
f.write("{},{},{},{},{}\n".format(get_id, var, val, concept, time.mktime(datetime.datetime.now().timetuple())))
return get_id
def record_inference(kid, cond, res):
"""记录推理过程"""
fname = get_file_prefix() + "_record.csv"
with open(fname, "a") as f:
# ~ print("推理:::{},{},{},{}".format(kid, cond, res, time.mktime(datetime.datetime.now().timetuple())))
f.write("{},{},{},{}\n".format(kid, cond, res, time.mktime(datetime.datetime.now().timetuple())))
def record_defuzzy(var, concept, val):
"""记录去模糊化过程"""
fname = get_file_prefix() + "_defuzzy.csv"
with open(fname, "a") as f:
# ~ print("去模糊化:::{},{},{},{}".format(var, concept, val, time.mktime(datetime.datetime.now().timetuple())))
f.write("{},{},{},{}\n".format(var, concept, val, time.mktime(datetime.datetime.now().timetuple())))
def search_defuzzy(result):
if result.count("=") != 1:
return 0
var, val = result.split("=")
fname = get_file_prefix() + "_defuzzy.csv"
data = 0
maxTime = 0
with open(fname, "r") as f:
for line in f:
d = line.rstrip("\n").split(",")
if d[0] == var and d[2] == val:
if eval(d[-1]) > maxTime:
maxTime = eval(d[-1])
data = d
return data
def get_explanation(result):
ans = search_defuzzy(result)
if ans:
return fuzzy_explain(ans)
else:
return "CAN NOT EXPLAIN"
def search_record(concept):
fname = get_file_prefix() + "_record.csv"
cond = 0
maxTime = 0
with open(fname, "r") as f:
for line in f:
d = line.rstrip("\n").split(",")
if d[2] == concept:
if maxTime < eval(d[-1]):
maxTime = eval(d[-1])
cond = d
return cond
def get_enfuzzy(enid):
fname = get_file_prefix() + "_enfuzzy.csv"
with open(fname, "r") as f:
for line in f:
d = line.rstrip("\n").split(",")
if d[0] == enid:
return d
return 0
def fuzzy_explain(ans):
defuzzy = ans[1]
inference_stack = [defuzzy]
knowledge_stack = ["defuzzy_{}->{}".format(defuzzy, ans[2])]
curr_concept = inference_stack[-1]
data = ""
while curr_concept != CONCEPT_START:
# 推理过程
data = search_record(curr_concept)
curr_concept = data[1]
inference_stack.append(curr_concept)
knowledge_stack.append(data[0])
else:
# 模糊化
enfuzzy_id = data[0]
enfuzzy_data = get_enfuzzy(enfuzzy_id)
inference_stack.pop(-1)
knowledge_stack.pop(-1)
inference_stack.append(curr_concept)
knowledge_stack.append("enfuzzy_{}:{}->{}".format(enfuzzy_data[1], enfuzzy_data[2], enfuzzy_data[3]))
infer_chain = ""
know_chain = ""
while len(inference_stack) > 0:
infer_chain = infer_chain + inference_stack.pop(-1) + "->"
know = knowledge_stack.pop(-1)
try:
x = eval(know)
if type(x) == int:
# 是一条知识的id
know_chain += "knowledge({})".format(x) + " "
except:
know_chain += know + " "
infer_chain += "END"
know_chain += "END"
return "\n".join([infer_chain, know_chain])
def initialize(csr, dbname, user):
csr.execute("USE " + dbname)
csr.execute("DROP table if EXISTS fdb")
csr.execute("DROP table if EXISTS fdb_traffic")
csr.execute("DROP table if EXISTS fdb_light_time")
csr.execute("DROP table if EXISTS ks")
ctine = "CREATE TABLE IF NOT EXISTS "
csr.execute(ctine + "FDB"
"(ID int NOT NULL AUTO_INCREMENT primary key,"
"linguistic_variable varchar(32) NOT NULL,"
"fuzzy_set int NOT NULL,"
"used int NOT NULL default 0,"
"updtime datetime,"
"administrator varchar(32) default \"%s\")" % user)
csr.execute(ctine + "KS"
"(ID int NOT NULL primary key,"
"concv varchar(32) not null,"
"closeness float(3,2) not null,"
"updtime datetime,"
"administrator varchar(32) default \"{}\")".format(user))
def getDomin(csr, ling_var):
"""
获取语言变量的域
:param csr:cursor
:param ling_var:语言变量 str
:return: 语言变量的域(numpy数组)
"""
csr.execute("SELECT VALUE from fuzzy_concept_" + ling_var)
return np.array(csr.fetchall()).reshape(1, -1)[0]
def fuzzing(csr, ling_var, val, sigma):
"""
三角法模糊化
:param conn: 数据库连接
:param dbname: 数据库名
:param ling_var: 语言变量的名字
:param val: 实际测量的精确值
:param sigma: 三角法参数
:param lb: lower bound
:param ub: upper bound
:return: 模糊集
"""
cnt = csr.execute("SELECT LingV FROM sum_lingv WHERE Lingv = '%s'" % ling_var)
if not cnt:
raise Exception("There is no such linguistic variable {} in the knowledge database as given!".format(ling_var))
domin = getDomin(csr, ling_var)
fuzzy_set = 1 - abs(domin - val) / sigma
fuzzy_set[fuzzy_set < 0] = 0
return fuzzy_set
def insert_into_FDB(dbname, csr, ling_var, fuzzy_set, c_stack):
"""
将新事实插入到FDB
:param dbname:
:param csr:
:param ling_var: (语言变量名,类型) (str,str)
:param fuzzy_set: 模糊集(数组)
:return:
"""
# 如果语言变量第一次出现,为其创建一张表
ctine = "CREATE TABLE IF NOT EXISTS "
csr.execute(ctine + "FDB_" + ling_var[0] + "("
"value " + ling_var[1] + " NOT NULL,primary key(value))")
csr.execute(
"select count(COLUMN_NAME) from information_schema.COLUMNS where table_schema = '{}' and table_name = 'fdb_{}';".format(
dbname, ling_var[0]))
num = csr.fetchone()[0]
domin = getDomin(csr, ling_var[0])
if num == 1:
for val in domin:
csr.execute("INSERT INTO fdb_" + ling_var[0] + " VALUES({})".format(val))
c_stack.append("{}set{}".format(ling_var[0],num))
# 插入事实到FDB
suc = csr.execute(
"INSERT INTO fdb(linguistic_variable, fuzzy_set, updtime) values(\"{}\",{},now())".format(ling_var[0], num))
# 插入模糊集到对应语言变量表
try:
csr.execute("ALTER TABLE fdb_{}".format(ling_var[0]) + " ADD set" + str(num) + " float(3,2) not null")
for ind in range(len(fuzzy_set)):
csr.execute("UPDATE fdb_{}".format(ling_var[0]) + " SET set" + str(num)
+ "={}".format(fuzzy_set[ind]) + "where value={}".format(domin[ind]))
except:
pass
return suc
def getSolution(csr, dbname, solution):
"""
尝试从事实库fdb中获取问题的解
:param conn:
:param dbname:
:param solution: 为问题的解指定的语言变量 str
:return: 问题的解的事实id
"""
csr.execute("select id from fdb where linguistic_variable = '" + solution + "'")
return csr.fetchall()
def defuzzing(csr, ling_var, fuzzy_set):
"""
去模糊化
:param ling_var: 语言变量 str
:param fuzzy_set: 模糊集(numpy数组)
:return: 去模糊化后的精确值
"""
fuzzy_set = np.array(fuzzy_set)
domin = getDomin(csr, ling_var)
return domin[(fuzzy_set == fuzzy_set.max())[0]].mean()
def getfdb_ling_var(csr, id):
"""
根据事实id获取事实对应的语言变量
:param csr:
:param id: fact id
:return: 事实对应的语言变量 str
"""
csr.execute("select linguistic_variable from fdb where id = {}".format(id))
return csr.fetchone()[0]
def getfdbFuzzyset(csr, id):
"""
根据事实id获取事实对应的模糊集
:param csr:
:param id:事实id
:return:事实对应的模糊集,行向量
"""
csr.execute("select linguistic_variable,fuzzy_set from fdb where id = {}".format(id))
ling_var, setid = csr.fetchone()
csr.execute("select set{} from fdb_{}".format(setid, ling_var))
return np.array(csr.fetchall()).reshape([1, -1])
def getUnusedFact(csr):
"""
从fdb中获取一条未使用过的事实
:param csr:
:return: 事实id
"""
fact = csr.execute("select id from fdb where used=0")
if fact > 0:
fact = csr.fetchone()[0]
csr.execute("update fdb set used=1 where id = {}".format(fact))
return fact
def calCloseness(csr, ling_var, fid, kid):
"""
calculate closeness 计算贴近度
:param csr:
:param fling_var: linguistic variable
:param fid: fact id
:param kid: knowledge id
:return: closeness
"""
csr.execute("select set{} from fdb_{}".format(fid, ling_var))
fset = np.array(csr.fetchall()).reshape([1, -1])
csr.execute("select FuzCptA from fuzzy_knowledge where id = {}".format(kid))
kconcpt = csr.fetchone()[0]
csr.execute("select {} from fuzzy_concept_{}".format(kconcpt, ling_var))
kset = np.array(csr.fetchall()).reshape([1, -1])
return 1 - np.linalg.norm(fset - kset) / np.sqrt(fset.size)
# return (np.minimum(fset, kset).max() + 1 - np.maximum(fset, kset).min()) / 2
def calCloseness1(fset, kset):
"""
calculate closeness 计算给定模糊集的贴近度
:param fset: fact set
:param kset: knowledge set
:return: closeness
"""
fset = np.array(fset)
kset = np.array(kset)
return (np.minimum(fset, kset).max() + 1 - np.maximum(fset, kset).min()) / 2
def fillKS(csr, fid):
"""
将与事实匹配的贴近度最大的同类知识填入到ks中
:param csr:
:param fid: fact id
:return:
"""
csr.execute("select linguistic_variable from fdb where id={}".format(fid))
fact_ling_var = csr.fetchone()[0]
csr.execute("select id,concv,lambda from fuzzy_knowledge where condv=\"{}\"".format(fact_ling_var))
kidlms = np.array(csr.fetchall())
for kidlm in kidlms:
closeness = calCloseness(csr, fact_ling_var, fid, kidlm[0])
if closeness >= kidlm[2]:
# print("insert into KS values({},\"{}\",{},now())".format(kidlm[0], kidlm[1], closeness))
csr.execute(
"insert into KS(id,concv,closeness,updtime) values({},\"{}\",{},now())".format(kidlm[0], kidlm[1],
closeness))
csr.execute("select * from ks")
csr.execute(
"select KS.id,KS.concv,KS.closeness from KS join (select concv,max(closeness) as mc from KS group by concv) b on "
"KS.concv=b.concv and KS.closeness=b.mc")
kidvs = csr.fetchall()
csr.execute("delete from ks")
concv_set = set()
for kidv in kidvs:
if kidv[1] not in concv_set:
concv_set.add(kidv[1])
csr.execute("insert into ks(ID,concv,closeness,updtime) values({},\"{}\",{},now())".format(*kidv))
def getMat(csr, kid):
"""
获取给定知识的模糊矩阵
:param csr:
:param kid: knowledge id
:return: 模糊矩阵
"""
csr.execute("select condv,fuzcptA,concv,fuzcptB from fuzzy_knowledge where id ={}".format(kid))
condv, fuzcptA, concv, fuzcptB = csr.fetchone()
cond_domin = getDomin(csr, condv)
conc_domin = getDomin(csr, concv)
mat = np.zeros([len(cond_domin), len(conc_domin)])
for i in range(mat.shape[0]):
for j in range(mat.shape[1]):
csr.execute("select R from matrix_{}_{} where val1={} and val2={}".format(fuzcptA, fuzcptB, cond_domin[i],
conc_domin[j]))
mat[i, j] = csr.fetchone()[0]
return mat
def hypomul(fset, fuz_mat):
"""
hypothesis multiple 计算假言推理矩阵乘积
:param fset: fact fuzzy set 行向量
:param fuz_mat: fuzzy matrix
:return:
"""
res = np.zeros(fuz_mat.shape[1])
for i in range(res.size):
res[i] = np.minimum(fset, fuz_mat.T[i]).max()
return res
def infer_by_knowledge(dbname, csr, k, fid, c_stack):
"""
在当前事实下,根据给定的知识进行推理
:param csr:
:param k: 给定的ks中的知识,行向量
:param fid: fact id
:return:
"""
# ling_var = getfdb_ling_var(csr,fid)
fset = getfdbFuzzyset(csr, fid)
fuz_mat = getMat(csr, k[0])
# print(k)
# print("mut")
# print(fset)
# print(fuz_mat)
res_set = hypomul(fset, fuz_mat)
csr.execute("select type from sum_lingv where lingv=\"{}\"".format(k[1]))
lingtype = csr.fetchone()[0]
insert_into_FDB(dbname, csr, (k[1], lingtype), res_set, c_stack)
# print("res", res_set)
def infer_by_ks(dbname, csr, fid, c_stack, k_stack):
"""
根据ks中的知识推理
:param csr:
:param fid: fact id
:return:
"""
csr.execute("select * from ks")
ksk = csr.fetchall()
for k in ksk:
infer_by_knowledge(dbname, csr, k, fid, c_stack)
k_stack.append(k[0])
csr.execute("delete from ks where id = {}".format(k[0]))
def infer_by_number_table(conn, dbname, user):
"""
读取number表,根据从number表中读出的车流量进行推理。number表存放从下位机传来的检测到的车流量大小。
:param conn:
:param dbname:
:param user:
:return:
"""
# initialize(conn, dbname, user)
# print("Succeeded initializing inference engine!")
csr = conn.cursor()
csr.execute("USE " + dbname)
ling_var = ("traffic", "int")
solution = "light_time"
if csr.execute("select number from number where used = 0 and direction = 'NS'"):
val = csr.fetchone()[0]
csr.execute("update number set used = 1 where number = {} and direction = 'NS'".format(val)) #
lt = infer(dbname, user, csr, ling_var, val, solution)
# try:
csr.execute(
"insert into seconds(direction,number,seconds, updtime) values('{}',{},{},now())".format('NS', val, lt))
csr.execute("commit")
# print("insert into seconds(number,seconds, updtime) values({},{},now())".format(val, lt))
# except:
# print("Error in infer_by_number_table!")
# csr.execute("rollback")
def infer(dbname, user, csr, ling_var, val, solution):
"""
推理机过程实现
:param conn:
:param dbname:
:param user:
:param ling_var: (语言变量名,类型) (str,str)
:param val: 接收到传感器传来的值
:param solution: 问题的解 是个语言变量
:return: 推理结果
"""
stack_1 = ["START"]
stack_2 = []
initialize(csr, dbname, user)
fuzzy_set = fuzzing(csr, ling_var[0], val, 5)
insert_into_FDB(dbname, csr, ling_var, fuzzy_set, stack_1)
# insert_into_FDB(conn, dbname, ling_var, fuzzing(conn, dbname, ling_var[0], 6, 2))
solutions = getSolution(csr, dbname, solution)
while len(solutions) == 0:
fid = getUnusedFact(csr)
if fid > 0:
fillKS(csr, fid)
infer_by_ks(dbname, csr, fid, stack_1, stack_2)
else:
return -1
solutions = getSolution(csr, dbname, solution)
result_fuzzy_set = getfdbFuzzyset(csr, solutions[0][0])
defuzzy_data = round(defuzzing(csr, solution, result_fuzzy_set) / 3, 2)
# TODO 解释器
# print(stack_1)
# print(stack_2)
# assert len(stack_1)-2 == len(stack_2)
enfuzzy_id = record_enfuzzy(ling_var[0], val, stack_1[1])
stack_2.insert(0, enfuzzy_id)
for i in range(len(stack_1)-1):
record_inference(stack_2[i],stack_1[i],stack_1[i+1])
record_defuzzy("light_time", stack_1[-1], defuzzy_data)
return defuzzy_data
def explain(light_val):
return get_explanation("light_time={}".format(light_val))
if __name__ == "__main__":
init_file()
host = constants.HOST
user = constants.USER
dbname = constants.DB
conn = MySQLdb.connect(host, user, constants.PWD)
solution = "light_time"
ling_var = ("traffic", "int")
csr = conn.cursor()
csr.execute("use " + dbname)
val = 5
d = infer(dbname, user, csr, ling_var, val, solution)
print(val, d)
print(get_explanation("light_time={}".format(d))) # 调用赋值型的解释器
print(explain(d)) # 直接调用数字解释器 二者完全等价
print(explain(15))
print(explain(25))
# val = np.arange(21)
# data = []
# for v in val:
# initialize(csr, dbname, user)
# d = infer(dbname, user, csr, ling_var, v, solution)
# print(v, solution, "=", d)
# data.append(d)
#
# print(get_explanation("light_time={}".format(data[3])))
|
the-stack_0_15279 | import numpy as np
import scipy.io as sio
import matplotlib.pyplot as plt
from tensorflow.python.keras import models, layers, losses, optimizers, utils
from tensorflow.python.keras import backend as K
def PINet_CIFAR10():
## model
input_shape = [32,32,3]
initial_conv_width=3
initial_stride=1
initial_filters=64
initial_pool_width=3
initial_pool_stride=2
use_global_pooling = True
dropout_rate = 0.2
model_input = layers.Input(shape=input_shape)
x = layers.Conv2D(
128,
initial_conv_width,
strides=initial_stride,
padding="same")(model_input)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.MaxPooling2D(
pool_size=initial_pool_width,
strides=initial_pool_stride,
padding="same")(x)
x = layers.Conv2D(
256,
initial_conv_width,
strides=initial_stride,
padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.MaxPooling2D(
pool_size=initial_pool_width,
strides=initial_pool_stride,
padding="same")(x)
x = layers.Conv2D(
512,
initial_conv_width,
strides=initial_stride,
padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
x = layers.MaxPooling2D(
pool_size=initial_pool_width,
strides=initial_pool_stride,
padding="same")(x)
x = layers.Conv2D(
1024,
initial_conv_width,
strides=initial_stride,
padding="same")(x)
x = layers.BatchNormalization()(x)
x = layers.Activation("relu")(x)
if use_global_pooling:
x = layers.GlobalAveragePooling2D()(x)
x_logits1 = layers.Dense(2500, activation="relu")(x)
x_logits1_reshape = layers.Reshape((1,50,50))(x_logits1)
x_logits1_reshape = layers.Permute((2,3,1))(x_logits1_reshape)
x_logits2 = layers.Conv2DTranspose(
3,
50,
strides=initial_stride,
padding="same")(x_logits1_reshape)
x_logits2 = layers.BatchNormalization()(x_logits2)
x_logits2 = layers.Activation("relu")(x_logits2)
model_output = layers.Flatten()(x_logits2)
model = models.Model(model_input, model_output)
return model |
the-stack_0_15280 | """ package aries_staticagent """
from setuptools import setup, find_packages
from version import VERSION
def parse_requirements(filename):
"""Load requirements from a pip requirements file."""
lineiter = (line.strip() for line in open(filename))
return [line for line in lineiter if line and not line.startswith("#")]
if __name__ == '__main__':
with open('README.md', 'r') as fh:
LONG_DESCRIPTION = fh.read()
setup(
name='aries-staticagent',
version=VERSION,
author='Daniel Bluhm <[email protected]>, '
'Sam Curren <[email protected]>',
description='Python Static Agent Library and Examples for Aries',
long_description=LONG_DESCRIPTION,
long_description_content_type='text/markdown',
url='https://github.com/hyperledger/aries-staticagent-python',
license='Apache 2.0',
packages=find_packages(),
install_requires=parse_requirements('requirements.txt'),
extras_require={
'test': parse_requirements('requirements.dev.txt')
},
python_requires='>=3.6',
classifiers=[
'Programming Language :: Python :: 3',
'License :: OSI Approved :: Apache Software License',
'Operating System :: OS Independent'
]
)
|
the-stack_0_15282 | # import the necessary packages
import argparse
import cv2
# construct the argument parser and parse the arguments
ap = argparse.ArgumentParser()
ap.add_argument("-i", "--image", required=True, help="Path to the image")
args = vars(ap.parse_args())
# load the image, convert it to grayscale, and blur it slightly
image = cv2.imread(args["image"])
gray = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
blurred = cv2.GaussianBlur(gray, (7, 7), 0)
cv2.imshow("Image", image)
# apply Otsu's automatic thresholding -- Otsu's method automatically
# determines the best threshold value `T` for us
(T, threshInv) = cv2.threshold(blurred, 0, 255,
cv2.THRESH_BINARY_INV | cv2.THRESH_OTSU)
cv2.imshow("Threshold", threshInv)
print("Otsu's thresholding value: {}".format(T))
# finally, we can visualize only the masked regions in the image
cv2.imshow("Output", cv2.bitwise_and(image, image, mask=threshInv))
cv2.waitKey(0) |
the-stack_0_15284 | import os
import re
import foobar
_REPO_DIR = os.path.dirname(os.path.dirname(__file__))
def test_version_number_match_with_changelog():
"""__version__ and CHANGELOG.md match for the latest version number."""
changelog = open(os.path.join(_REPO_DIR, "CHANGELOG.md")).read()
# latest version number in changelog = the 1st occurrence of '[x.y.z]'
version_changelog = re.search(r"\[\d+\.\d+\.\d+\]", changelog).group().strip("[]")
assert foobar.__version__ == version_changelog, (
f"Make sure both __version__ ({foobar.__version__}) and "
f"CHANGELOG ({version_changelog}) "
"are updated to match the latest version number"
)
|
the-stack_0_15285 | # This file is dual licensed under the terms of the Apache License, Version
# 2.0, and the BSD License. See the LICENSE file in the root of this repository
# for complete details.
import binascii
import os
import pytest
from cryptography.exceptions import (
AlreadyFinalized,
InvalidSignature,
_Reasons,
)
from cryptography.hazmat.primitives.poly1305 import Poly1305
from ...utils import (
load_nist_vectors,
load_vectors_from_file,
raises_unsupported_algorithm,
)
@pytest.mark.supported(
only_if=lambda backend: not backend.poly1305_supported(),
skip_message="Requires OpenSSL without poly1305 support",
)
def test_poly1305_unsupported(backend):
with raises_unsupported_algorithm(_Reasons.UNSUPPORTED_MAC):
Poly1305(b"0" * 32)
@pytest.mark.supported(
only_if=lambda backend: backend.poly1305_supported(),
skip_message="Requires OpenSSL with poly1305 support",
)
class TestPoly1305:
@pytest.mark.parametrize(
"vector",
load_vectors_from_file(
os.path.join("poly1305", "rfc7539.txt"), load_nist_vectors
),
)
def test_vectors(self, vector, backend):
key = binascii.unhexlify(vector["key"])
msg = binascii.unhexlify(vector["msg"])
tag = binascii.unhexlify(vector["tag"])
poly = Poly1305(key)
poly.update(msg)
assert poly.finalize() == tag
assert Poly1305.generate_tag(key, msg) == tag
Poly1305.verify_tag(key, msg, tag)
def test_key_with_no_additional_references(self, backend):
poly = Poly1305(os.urandom(32))
assert len(poly.finalize()) == 16
def test_raises_after_finalize(self, backend):
poly = Poly1305(b"0" * 32)
poly.finalize()
with pytest.raises(AlreadyFinalized):
poly.update(b"foo")
with pytest.raises(AlreadyFinalized):
poly.finalize()
def test_reject_unicode(self, backend):
poly = Poly1305(b"0" * 32)
with pytest.raises(TypeError):
poly.update("") # type:ignore[arg-type]
with pytest.raises(TypeError):
Poly1305.generate_tag(b"0" * 32, "") # type:ignore[arg-type]
def test_verify(self, backend):
poly = Poly1305(b"0" * 32)
poly.update(b"msg")
tag = poly.finalize()
with pytest.raises(AlreadyFinalized):
poly.verify(b"")
poly2 = Poly1305(b"0" * 32)
poly2.update(b"msg")
poly2.verify(tag)
Poly1305.verify_tag(b"0" * 32, b"msg", tag)
def test_invalid_verify(self, backend):
poly = Poly1305(b"0" * 32)
poly.update(b"msg")
with pytest.raises(InvalidSignature):
poly.verify(b"")
p2 = Poly1305(b"0" * 32)
p2.update(b"msg")
with pytest.raises(InvalidSignature):
p2.verify(b"\x00" * 16)
with pytest.raises(InvalidSignature):
Poly1305.verify_tag(b"0" * 32, b"msg", b"\x00" * 16)
def test_verify_reject_unicode(self, backend):
poly = Poly1305(b"0" * 32)
with pytest.raises(TypeError):
poly.verify("") # type:ignore[arg-type]
with pytest.raises(TypeError):
Poly1305.verify_tag(b"0" * 32, b"msg", "") # type:ignore[arg-type]
def test_invalid_key_type(self, backend):
with pytest.raises(TypeError):
Poly1305(object()) # type:ignore[arg-type]
with pytest.raises(TypeError):
Poly1305.generate_tag(object(), b"msg") # type:ignore[arg-type]
def test_invalid_key_length(self, backend):
with pytest.raises(ValueError):
Poly1305(b"0" * 31)
with pytest.raises(ValueError):
Poly1305.generate_tag(b"0" * 31, b"msg")
with pytest.raises(ValueError):
Poly1305(b"0" * 33)
with pytest.raises(ValueError):
Poly1305.generate_tag(b"0" * 33, b"msg")
def test_buffer_protocol(self, backend):
key = binascii.unhexlify(
b"1c9240a5eb55d38af333888604f6b5f0473917c1402b80099dca5cb"
b"c207075c0"
)
msg = binascii.unhexlify(
b"2754776173206272696c6c69672c20616e642074686520736c69746"
b"87920746f7665730a446964206779726520616e642067696d626c65"
b"20696e2074686520776162653a0a416c6c206d696d7379207765726"
b"52074686520626f726f676f7665732c0a416e6420746865206d6f6d"
b"65207261746873206f757467726162652e"
)
key = bytearray(key)
poly = Poly1305(key)
poly.update(bytearray(msg))
assert poly.finalize() == binascii.unhexlify(
b"4541669a7eaaee61e708dc7cbcc5eb62"
)
assert Poly1305.generate_tag(key, msg) == binascii.unhexlify(
b"4541669a7eaaee61e708dc7cbcc5eb62"
)
|
the-stack_0_15288 | import os, datetime
import csv
import pycurl
import sys
import shutil
from openpyxl import load_workbook
import pandas as pd
import download.box
from io import BytesIO
import numpy as np
from download.box import LifespanBox
verbose = True
snapshotdate = datetime.datetime.today().strftime('%m_%d_%Y')
box_temp='/home/petra/UbWinSharedSpace1/boxtemp' #location of local copy of curated data
box = LifespanBox(cache=box_temp)
redcapconfigfile="/home/petra/UbWinSharedSpace1/ccf-nda-behavioral/PycharmToolbox/.boxApp/redcapconfig.csv"
#grab stuff from corrected and curated
#get list of filenames
##########################
#folderlistlabels=['WashU_HCAorBoth','WashU_HCD', 'UCLA_HCAorBoth','UCLA_HCD', 'UMN_HCAorBoth','UMN_HCD', 'MGH_HCAorBoth','Harvard_HCD']
#folderlistnums= [82804729845, 82804015457,82807223120, 82805124019, 82803665867, 82805151056,82761770877, 82803734267]
#Harvard
Harv=82803734267
Harvattn=96013516511
MGH2=82761770877
MGHattn=96148925420
WashUD=82804015457
WashUDattn=96147128675
WashUA=82804729845
WashUAattn=96149947498
UMNA=82803665867
UMNAattn=96153923311
UMND=82805151056
UMNDattn=96155708581
UCLAA=82807223120
UCLAAattn=96154919803
UCLAD=82805124019
UCLADattn=96162759127
harvcleandata, harvcleanscore=curatedandcorrected(Harv,Harvattn)
mghcleandata, mghcleanscore=curatedandcorrected(MGH2,MGHattn)
washudcleandata,washudcleanscore=curatedandcorrected(WashUD,WashUDattn)
washuacleandata,washuacleanscore=curatedandcorrected(WashUA,WashUAattn)
umnacleandata,umnacleanscore=curatedandcorrected(UMNA,UMNAattn)
umndcleandata,umndcleanscore=curatedandcorrected(UMND,UMNDattn)
uclaacleandata,uclaacleanscore=curatedandcorrected(UCLAA,UCLAAattn)
ucladcleandata,ucladcleanscore=curatedandcorrected(UCLAD,UCLADattn)
###stopped here
harvcleandata.to_csv(box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
#box.update_file(497579203898,box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
harvcleanscore.to_csv(box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#box.update_file(497530866864,box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
mghcleandata.to_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
mghcleanscore.to_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#update box files by hand
washudcleandata.to_csv(box_temp+'/WashU_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
washudcleanscore.to_csv(box_temp+'/WashU_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
washuacleandata.to_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
washuacleanscore.to_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
umnacleandata.to_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
umnacleanscore.to_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
umndcleandata.to_csv(box_temp+'/UMN_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
umndcleanscore.to_csv(box_temp+'/UMN_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
uclaacleandata.to_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
uclaacleanscore.to_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
ucladcleandata.to_csv(box_temp+'/UCLA_HCDonly_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
ucladcleanscore.to_csv(box_temp+'/UCLA_HCDonly_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
#concatenate cleandata for snapshotdate - putting read_csv here in case not loaded into memory
harvcleandata=pd.read_csv(box_temp+'/Harvard_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
mghcleandata=pd.read_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
washudcleandata=pd.read_csv(box_temp+'/WashU_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
washuacleandata=pd.read_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
umnacleandata=pd.read_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
umndcleandata=pd.read_csv(box_temp+'/UMN_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
uclaacleandata=pd.read_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
ucladcleandata=pd.read_csv(box_temp+'/UCLA_HCDonly_Toolbox_Raw_Combined_12_12_2019.csv',header=0,low_memory=False)
allrawdataHCAorBoth=pd.concat([mghcleandata,washuacleandata,umnacleandata,uclaacleandata],axis=0)
allrawdataHCD=pd.concat([harvcleandata,washudcleandata,umndcleandata,ucladcleandata],axis=0)
harvcleanscore=pd.read_csv(box_temp+'/Harvard_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
mghcleanscore=pd.read_csv(box_temp+'/MGH_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
washudcleanscore=pd.read_csv(box_temp+'/WashU_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
washuacleanscore=pd.read_csv(box_temp+'/WashU_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
umnacleanscore=pd.read_csv(box_temp+'/UMN_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
umndcleanscore=pd.read_csv(box_temp+'/UMN_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
uclaacleanscore=pd.read_csv(box_temp+'/UCLA_HCAorBoth_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
ucladcleanscore=pd.read_csv(box_temp+'/UCLA_HCDonly_Toolbox_Scored_Combined_12_12_2019.csv',header=0,low_memory=False)
allscoresHCAorBoth=pd.concat([mghcleanscore,washuacleanscore,umnacleanscore,uclaacleanscore],axis=0)
allscoresHCD=pd.concat([harvcleanscore,washudcleanscore,umndcleanscore,ucladcleanscore],axis=0)
#make csv
allrawdataHCAorBoth.to_csv(box_temp+'/HCAorBoth_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
allrawdataHCD.to_csv(box_temp+'/HCD_Toolbox_Raw_Combined_'+snapshotdate+'.csv')
allscoresHCAorBoth.to_csv(box_temp+'/HCAorBoth_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
allscoresHCD.to_csv(box_temp+'/HCD_Toolbox_Scored_Combined_'+snapshotdate+'.csv')
def curatedandcorrected(curatedfolderid,needsattnfolder):
harvardfiles, harvardfolders=foldercontents(curatedfolderid)
#dont grab files that need attention
harvardfolders=harvardfolders.loc[~(harvardfolders.foldername.str.contains('needs_attention'))]
harvardfiles2, harvardfolders2=folderlistcontents(harvardfolders.foldername,harvardfolders.folder_id)
harvardfiles=pd.concat([harvardfiles,harvardfiles2],axis=0,sort=True)
data4process=harvardfiles.loc[~(harvardfiles.filename.str.upper().str.contains('SCORE')==True)]
scores4process=harvardfiles.loc[harvardfiles.filename.str.upper().str.contains('SCORE')==True]
box.download_files(data4process.file_id)
box.download_files(scores4process.file_id)
#trick the catcontents macro to create catable dataset, but dont actually cat until you remove the
#PINS in the corrected file from the curated file
#step1 - separate data4process/scores4process into corrected and old curated data
cdata=data4process.loc[data4process.filename.str.contains('corrected')]
cscores=scores4process.loc[scores4process.filename.str.contains('corrected')]
olddata=data4process.loc[~(data4process.filename.str.contains('corrected'))]
oldscores=scores4process.loc[~(scores4process.filename.str.contains('corrected'))]
#create catable dataset for corrected data
hdatainitcorr=catcontents(cdata,box_temp)
hscoreinitcorr=catcontents(cscores,box_temp)
#get list of ids in this corrected data #60 for Harvard
corrl=findpairs(hdatainitcorr,hscoreinitcorr) #this is the list of ids in both scored and raw corrected data
#create catable dataset for old curated data
hdatainitold=catcontents(olddata,box_temp)
hscoreinitold=catcontents(oldscores,box_temp)
#remove the data with PINS from corrected
hdatainitoldsub=hdatainitold[~(hdatainitold.PIN.isin(corrl))]
hscoreinitoldsub=hscoreinitold[~(hscoreinitold.PIN.isin(corrl))]
#now cat the two datasets together
hdatainit=pd.concat([hdatainitcorr,hdatainitoldsub],axis=0,sort=True) #these have 60 more unique pins than before...good
hscoreinit=pd.concat([hscoreinitcorr,hscoreinitoldsub],axis=0,sort=True) #these have 60 more than before...good
l=findpairs(hdatainit,hscoreinit) #this is the list of ids in both scored and raw data
#set aside those who arebnt in both and those that are in dlist or slist
notbothdatalist=hdatainit[~(hdatainit.PIN.isin(l))]
notbothscorelist=hscoreinit[~(hscoreinit.PIN.isin(l))]
nbs=list(notbothscorelist.PIN.unique())
nbd=list(notbothdatalist.PIN.unique())
hdatainit2=hdatainit[hdatainit.PIN.isin(l)]
hscoreinit2=hscoreinit[hscoreinit.PIN.isin(l)]
#check that this is same as above -- it is
#hdatainit2qc=hdatainit[~(hdatainit.PIN.isin(nbs+nbd))]
#hscoreinit2qc=hscoreinit[~(hscoreinit.PIN.isin(nbs+nbd))]
#find instrument duplications that are not identical
dlist,slist=findwierdos(hdatainit2,hscoreinit2)
dslist=pd.concat([dlist,slist],axis=0)
wierdlist=list(dslist.PIN.unique())
#set aside those who are in the wierdlist
nonidenticaldupdata=hdatainit2.loc[hdatainit2.PIN.isin(wierdlist)]
nonidenticaldupscore=hscoreinit2.loc[hscoreinit2.PIN.isin(wierdlist)]
wierdd=list(dlist.PIN.unique())
wierds=list(slist.PIN.unique())
#so we have the notinboth lists and the wierdlists
#Already set aside the notinbothlists
#if we exclude any wierdlist PINs from both, this should get rid of everything that isnt one-to-one
hdatainit3=hdatainit2.loc[~(hdatainit2.PIN.isin(wierdlist))]
hscoreinit3=hscoreinit2.loc[~(hscoreinit2.PIN.isin(wierdlist))]
#both have 580 unique ids - make them into a list
l3=findpairs(hdatainit3,hscoreinit3) #this is the list of ids in both scored and raw data
dlist,slist=findwierdos(hdatainit3,hscoreinit3)
#now delete any identical duplicates check for issues finding wierdos
if dlist.empty and slist.empty:
hdatainit3=hdatainit3.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
hscoreinit3=hscoreinit3.drop_duplicates(subset={'PIN','Inst'})
else:
print('Found Non-Identical Duplications')
print(dlist)
print(slist)
#export scores and data for all pins in dslist or nbs or nbd with flags
notbothdatalist.to_csv(box_temp+'/Toolbox_notinboth_Data_'+snapshotdate+'.csv')
notbothscorelist.to_csv(box_temp+'/Toolbox_notinboth_Scores_'+snapshotdate+'.csv')
box.upload_file(box_temp+'/Toolbox_notinboth_Data_'+snapshotdate+'.csv',needsattnfolder)
box.upload_file(box_temp+'/Toolbox_notinboth_Scores_'+snapshotdate+'.csv',needsattnfolder)
nonidenticaldupdata.to_csv(box_temp+'/Toolbox_NonidentDups_Data_'+snapshotdate+'.csv')
nonidenticaldupscore.to_csv(box_temp+'/Toolbox_NonidentDups_Scores_'+snapshotdate+'.csv')
box.upload_file(box_temp+'/Toolbox_NonidentDups_Data_'+snapshotdate+'.csv',needsattnfolder)
box.upload_file(box_temp+'/Toolbox_NonidentDups_Scores_'+snapshotdate+'.csv',needsattnfolder)
#last but not least...set aside ids not in REDCap, and IDs that need visit numbers
#get reds from hdatatinit3 (should be same as list from hscoreinit3)
#generate hdatainit4 and hscoreinit4 which is relieved of these ids
hdatainit4=subjectsvisits(hdatainit3)
hscoreinit4=subjectsvisits(hscoreinit3)
mv=hscoreinit4.loc[~(hscoreinit4.visit.isin(['V1','V2','V3','X1','X2','X3']))].copy()
mvs=list(mv.subject.unique()) #list of PINs without visit numbers
check=subjectpairs(hdatainit4,hscoreinit4) #this number will be fewer because V1 and V2 PINs for same subject only counted once)
redids=box.getredcapids()
dfcheck=pd.DataFrame(check,columns=['subject'])
boxids=pd.merge(dfcheck,redids,how='left',on='subject',indicator=True)
reds=list(boxids.loc[boxids._merge=='left_only'].subject) #subjects not in redcap
boxandredcap=boxids.loc[boxids._merge=='both'].subject
#export the otherwise cleanest data ready for snapshotting as the new updated curated file -- then run this for all sites befo
#write code here - has only ids with visit numbers and one to one scores and data correspondence and no wierd duplications
#but check one last time that hdatainit5 and hscoreinit5 is super clean
hdatainit5=hdatainit4.loc[~(hdatainit4.subject.isin(mvs+reds))]
hscoreinit5=hscoreinit4.loc[~(hscoreinit4.subject.isin(mvs+reds))]
#export the lists of ids and reasons they were excluded
df=pd.DataFrame(columns=['reason','affectedIDs'])
df=df.append({'reason': 'PIN In Scores but not Data', 'affectedIDs': nbs}, ignore_index=True)
df=df.append({'reason': 'PIN In Data but not Scores', 'affectedIDs': nbd}, ignore_index=True)
df=df.append({'reason': 'PIN/Instrument Non-identical Duplication in Data', 'affectedIDs': wierdd}, ignore_index=True)
df=df.append({'reason': 'PIN/Instrument Non-identical Duplication in Scores', 'affectedIDs': wierds}, ignore_index=True)
df=df.append({'reason': 'PIN/subject in Scores and Data but missing visit', 'affectedIDs': mvs}, ignore_index=True)
df=df.append({'reason': 'subject in Scores and Data but not REDCap ', 'affectedIDs': reds}, ignore_index=True)
df.to_csv(box_temp+'/List_of_IDs_and_Reasons_they_in_these_files_'+snapshotdate+'.csv')
box.upload_file(box_temp+'/List_of_IDs_and_Reasons_they_in_these_files_'+snapshotdate+'.csv',needsattnfolder)
return hdatainit5,hscoreinit5
#get subject and visit from a PIN in a dataframe
def subjectsvisits(hdatainit3):
hdatainit3['subject']=hdatainit3.PIN.str.strip().str[:10]
hdatainit3['visit']=''
hdatainit3.loc[hdatainit3.PIN.str.contains('v1',case=False),'visit']='V1'
hdatainit3.loc[hdatainit3.PIN.str.contains('v2',case=False),'visit']='V2'
hdatainit3.loc[hdatainit3.PIN.str.contains('v3',case=False),'visit']='V3'
hdatainit3.loc[hdatainit3.PIN.str.contains('x1',case=False),'visit']='X1'
hdatainit3.loc[hdatainit3.PIN.str.contains('x2',case=False),'visit']='X2'
hdatainit3.loc[hdatainit3.PIN.str.contains('x3',case=False),'visit']='X3'
return hdatainit3
#pull id visit combos that arent in both scores and data files
def findpairs(hdatainit,hscoreinit):
pinsinboth=[]
for i in hscoreinit.PIN.unique():
if i in hdatainit.PIN.unique() and isinstance(i,str):
pinsinboth=pinsinboth+[i]
else:
print('the following PINs in scores but not data:')
print(i)
for i in hdatainit.PIN.unique():
if i in hscoreinit.PIN.unique():
pass
else:
print('the following PINs in data but not scores:')
print(i)
return pinsinboth
def subjectpairs(hdatainit,hscoreinit):
pinsinboth=[]
for i in hscoreinit.subject.unique():
if i in hdatainit.subject.unique() and isinstance(i,str):
pinsinboth=pinsinboth+[i]
else:
print('the following subjects in scores but not data:')
print(i)
for i in hdatainit.subject.unique():
if i in hscoreinit.subject.unique():
pass
else:
print('the following subjectss in data but not scores:')
print(i)
return pinsinboth
def findwierdos(hdatainit,hscoreinit):
#compare the two types of sort to identify which files have non-identical duplications
sort1data=hdatainit.drop_duplicates(subset={'PIN','Inst','ItemID','Position'},keep='first')
sort1score=hscoreinit.drop_duplicates(subset={'PIN','Inst'})
sort2data=hdatainit.drop_duplicates(subset=set(hdatainit.columns).difference({'filename','file_id'}))
sort2score=hscoreinit.drop_duplicates(subset=set(hscoreinit.columns).difference({'filename','file_id'}))
s1d=sort1data.groupby('PIN').count()
s2d=sort2data.groupby('PIN').count()
databoth=pd.merge(s1d.reset_index()[['PIN','DeviceID']], s2d.reset_index()[['PIN','DeviceID']],on=['PIN','DeviceID'],how='outer',indicator=True)
wierd_data=databoth.loc[databoth._merge!='both'].rename(columns={'DeviceID':'Number of Rows'})
s1s=sort1score.groupby('PIN').count()
s2s=sort2score.groupby('PIN').count()
scoreboth=pd.merge(s1s.reset_index()[['PIN','DeviceID']], s2s.reset_index()[['PIN','DeviceID']],on=['PIN','DeviceID'],how='outer',indicator=True)
wierd_score=scoreboth.loc[scoreboth._merge!='both'].rename(columns={'DeviceID':'Number of Rows'})
return wierd_data,wierd_score
def catcontents(files,cache_space): #dataframe that has filename and file_id as columns
scoresfiles=files.copy()
scoresinit=pd.DataFrame()
for i in scoresfiles.filename:
filepath=os.path.join(cache_space,i)
filenum=scoresfiles.loc[scoresfiles.filename==i,'file_id']
try:
temp=pd.read_csv(filepath,header=0,low_memory=False)
temp['filename']=i
temp['file_id']=pd.Series(int(filenum.values[0]),index=temp.index)
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
except:
print(filepath+' wouldnt import')
temp=pd.DataFrame()
temp['filename']=pd.Series(i,index=[0])
temp['file_id']=pd.Series(int(filenum.values[0]),index=[0])
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
return scoresinit
def catfromlocal(endpoint_temp,scores2cat): #dataframe that has filenames
scoresfiles=scores2cat.copy()
scoresinit=pd.DataFrame()
for i in scoresfiles.fname:
filepath=os.path.join(endpoint_temp,i)
try:
temp=pd.read_csv(filepath,header=0,low_memory=False)
temp['filename']="endpointmachine/"+i
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
except:
print(filepath+' wouldnt import')
temp=pd.DataFrame()
temp['filename']=pd.Series("endpointmachine/"+i,index=[0])
temp['raw_cat_date']=snapshotdate
scoresinit=pd.concat([scoresinit,temp],axis=0,sort=False)
return scoresinit
def folderlistcontents(folderslabels,folderslist):
bdasfilelist=pd.DataFrame()
bdasfolderlist=pd.DataFrame()
for i in range(len(folderslist)):
print('getting file and folder contents of box folder ' +folderslabels[i])
subfiles,subfolders=foldercontents(folderslist[i]) #foldercontents generates two dfs: a df with names and ids of files and a df with names and ids of folders
bdasfilelist=bdasfilelist.append(subfiles)
bdasfolderlist=bdasfolderlist.append(subfolders)
return bdasfilelist,bdasfolderlist
def foldercontents(folder_id):
filelist=[]
fileidlist=[]
folderlist=[]
folderidlist=[]
WUlist=box.client.folder(folder_id=folder_id).get_items(limit=None, offset=0, marker=None, use_marker=False, sort=None, direction=None, fields=None)
for item in WUlist:
if item.type == 'file':
filelist.append(item.name)
fileidlist.append(item.id)
if item.type == 'folder':
folderlist.append(item.name)
folderidlist.append(item.id)
files=pd.DataFrame({'filename':filelist, 'file_id':fileidlist})
folders=pd.DataFrame({'foldername':folderlist, 'folder_id':folderidlist})
return files,folders
|
the-stack_0_15289 | from guild import batch_util
# Flags
max_trials = 5
batch_fail = False
trials_fail = ""
batch_run = batch_util.batch_run()
proto_flags = batch_run.batch_proto.get("flags") or {}
trials_count = batch_run.get("max_trials") or max_trials
trials_fail_list = [int(s) for s in str(trials_fail).split(",") if s]
for i in range(trials_count):
trial_flags = dict(proto_flags)
trial_flags["fail"] = (i + 1) in trials_fail_list
try:
batch_util.run_trial(batch_run, trial_flags)
except SystemExit as e:
if batch_fail:
print("BATCH FAIL")
raise SystemExit(2)
|
the-stack_0_15290 | #!/usr/bin/env python
# coding: utf-8
# Imports
from luigi.parameter import IntParameter
from luigi import LocalTarget, Task
from luigi.format import UTF8
import datetime
import pandas as pd
import re
import os
from configs.Configurations import Configurations
'''bigrams'''
from gensim.models import Phrases
from collections import Counter
from Preprocessor import Preprocessor
'''Plotting'''
import matplotlib.pyplot as plt
class DataVisualizer(Task):
# Date for Output-File prefix
from datetime import date
date = datetime.datetime.now()
configId = IntParameter(default=0)
# Method to declare the Output-File
def output(self):
prefix = self.date.strftime("%Y-%m-%dT%H%M%S")
return LocalTarget("../output/%s_configID_%s_DataVisualizer.csv" % (prefix, self.configId), format=UTF8)
# Method to define the required Task (Importer)
def requires(self):
return Preprocessor(self.configId)
# Prepare preprocessed data for data evaluation
def run(self):
# use configID from commandline
configs = Configurations().configs[self.configId]
# default values if not set otherwise in configs
n_most_freq_words = 20
n_most_freq_words_per_class = 15
n_most_freq_bigrams_per_class = 15
# set values according to configs
if ("n_most_freq_words" in configs):
n_most_freq_words = configs.get("n_most_freq_words")
if ("n_most_freq_words_per_class" in configs):
n_most_freq_words_per_class = configs.get("n_most_freq_words_per_class")
if ("n_most_freq_bigrams_per_class" in configs):
n_most_freq_bigrams_per_class = configs.get("n_most_freq_bigrams_per_class")
# dictionary holding all data_reports
eval_dict = {}
input_df = pd.read_csv(self.input().path)
cleaned_df = pd.DataFrame(columns=('text', 'cleaned_text', 'url', 'title', 'class'))
# convert document['cleaned_text'] from string to list of words
for index, document in input_df.iterrows():
text = document['cleaned_text']
text = re.sub(r"[',\[\]]", "", text)
wordlist = text.split(" ")
row = [document.text, wordlist, document.url, document.title, document['class']]
cleaned_df.loc[index] = row
# Top n most frequent words for all the articles
cl_text_list = cleaned_df['cleaned_text']
wf = self.word_freq(cl_text_list, n_most_freq_words)
eval_dict['n_frequent_words'] = wf.head(n_most_freq_words)
# Avg word count by category
cleaned_df['word_count'] = cleaned_df['cleaned_text'].apply(self.word_count)
avg_wc = cleaned_df.groupby('class').mean().reset_index()
eval_dict['avg_word_count_per_class'] = avg_wc[['class', 'word_count']]
# Preparing the dataframes
# Splitting the df into the different classes
df_menu = cleaned_df.loc[cleaned_df['class'] == 1]
df_no_menu = cleaned_df.loc[cleaned_df['class'] == 0]
# Top n words by category. Taking bigrams into account
text_menu = df_menu['cleaned_text']
text_no_menu = df_no_menu['cleaned_text']
menu = self.word_freq_bigrams(text_menu, top_n=n_most_freq_words_per_class)
no_menu = self.word_freq_bigrams(text_no_menu, top_n=n_most_freq_words_per_class)
df_wf = pd.concat([menu, no_menu], axis=1)
cols = ['menu', 'count', 'no menu', 'count']
df_wf.columns = cols
eval_dict['n_top_words_per_class'] = df_wf
# Top n bigrams by category
menu_bigrams = self.bigram_freq(text_menu, top_n=n_most_freq_bigrams_per_class)
no_menu_bigrams = self.bigram_freq(text_no_menu, top_n=n_most_freq_bigrams_per_class)
df_bigram_wf = pd.concat([menu_bigrams, no_menu_bigrams], axis=1)
df_bigram_wf.columns = cols
eval_dict['n_top_bigrams_per_class'] = df_bigram_wf
#
# Plot the distribution of word count by article
fig, ax = plt.subplots(1, 2, figsize=(12, 10))
fig.suptitle('Distribution of Word Count by Category', fontsize=15)
bins = 200
ax[0].hist(df_menu['word_count'], bins=bins, color='#41924F')
ax[0].set_title('Menu Category', fontsize=13)
ax[0].set_xlim(0, 150)
ax[1].hist(df_no_menu['word_count'], bins=bins, color='#FFC300')
ax[1].set_title('No Menu Category', fontsize=13)
ax[1].set_xlim(0, 150)
# create data report
data_report = "Data report\n\n"
data_report += "configID: %s\n" % self.configId
#
data_report += "\n"
data_report += "Average word count per class\n"
data_report += str(eval_dict['avg_word_count_per_class'].head())
data_report += "\n"
#
data_report += "\n"
data_report += "Top %s frequent words\n" % n_most_freq_words
data_report += str(eval_dict['n_frequent_words'].head(n_most_freq_words))
data_report += "\n"
#
data_report += "\n"
data_report += "Top %s words by category (Taking bigrams into account)\n" % n_most_freq_words_per_class
data_report += str(eval_dict['n_top_words_per_class'].head(n_most_freq_bigrams_per_class))
data_report += "\n"
#
data_report += "\n"
data_report += "Top %s bigrams by category\n" % n_most_freq_words_per_class
data_report += str(eval_dict['n_top_bigrams_per_class'].head(n_most_freq_bigrams_per_class))
data_report += "\n"
# write report to file
prefix = self.date.strftime("%Y-%m-%dT%H%M%S")
filename = "../data/data_report/configID_%s_%s.txt" % (self.configId, prefix)
os.makedirs(os.path.dirname(filename), exist_ok=True)
f = open(filename, "w")
f.write(data_report)
f.close()
plt.savefig("../data/data_report/word_dist_by_class_%s.png" % prefix)
plt.close(fig)
# Write .csv-File
with self.output().open("w") as out:
input_df.to_csv(out, encoding="utf-8")
def word_count(self, text):
return len(str(text).split(' '))
def word_freq(self, clean_text_list, top_n):
"""
Word Frequency
"""
flat = [item for sublist in clean_text_list for item in sublist]
with_counts = Counter(flat)
top = with_counts.most_common(top_n)
word = [each[0] for each in top]
num = [each[1] for each in top]
return pd.DataFrame([word, num]).T
def bigram_freq(self, clean_text_list, top_n):
bigram_model = Phrases(clean_text_list, min_count=2, threshold=1)
w_bigrams = bigram_model[clean_text_list]
flat_w_bigrams = [item for sublist in w_bigrams for item in sublist]
bigrams = []
for each in flat_w_bigrams:
if '_' in each:
bigrams.append(each)
counts = Counter(bigrams)
top = counts.most_common(top_n)
word = [each[0] for each in top]
num = [each[1] for each in top]
return pd.DataFrame([word, num]).T
def word_freq_bigrams(self, clean_text_list, top_n):
"""
Word Frequency With Bigrams
"""
bigram_model = Phrases(clean_text_list, min_count=2, threshold=1)
w_bigrams = bigram_model[clean_text_list]
flat_w_bigrams = [item for sublist in w_bigrams for item in sublist]
with_counts = Counter(flat_w_bigrams)
top = with_counts.most_common(top_n)
word = [each[0] for each in top]
num = [each[1] for each in top]
return pd.DataFrame([word, num]).T
|
the-stack_0_15293 |
import logging
import os
from chirp.common import conf
from chirp.library import album
from chirp.library import audio_file
class Dropbox(object):
def __init__(self, dropbox_path=None):
dropbox_path = dropbox_path or conf.MUSIC_DROPBOX
self._path = dropbox_path
self._dirs = {}
self._all_files = []
# Scan the path and remember all of the subdirectories and
# the MP3 files that they cotain.
for basename in os.listdir(dropbox_path):
child_path = os.path.join(dropbox_path, basename)
if os.path.isdir(child_path):
mp3_names = []
for name in os.listdir(child_path):
# Skip dot-files.
if name.startswith("."):
continue
# Must have the right file extension.
if not name.lower().endswith(".mp3"):
continue
mp3_path = os.path.join(child_path, name)
# Only accept things that look like ordinary files.
if os.path.isfile(mp3_path):
mp3_names.append(name)
self._all_files.append(mp3_path)
self._dirs[child_path] = mp3_names
self._all_albums = None
self._all_tracks = None
def files(self):
return list(self._all_files)
def scan_fast(self):
"""Quickly scan all MP3 files in the dropbox.
Returns:
A dict mapping relative file paths to either audio_file.AudioFile
objects, or to None in the case of a corrupted or unreadable file.
"""
# Note the use of ad-hoc relativization in the path.
return dict(
(mp3_path[len(self._path):], audio_file.scan_fast(mp3_path))
for mp3_path in self._all_files)
def albums(self):
"""Return unstandardized versions of all albums in the dropbox."""
if self._all_albums is None:
self._all_albums = []
for path in sorted(self._dirs):
for au in album.from_directory(path):
self._all_albums.append(au)
yield au
else:
for au in self._all_albums:
yield au
def tracks(self):
"""Do a fast scan and return all tracks in the dropbox."""
if self._all_tracks is None:
self._all_tracks = []
for path in self._dirs:
for alb in album.from_directory(path, fast=True):
self._all_tracks.extend(alb.all_au_files)
return self._all_tracks
|
the-stack_0_15294 | """
We need new tests framework
"""
from unittest import TestCase
from slack_entities.entities.channel import Channel
class ChannelTestCase(TestCase):
def test_get(self):
# Getting channel by name
channel_1 = Channel.get(name='test')
# Getting channel by id
channel_2 = Channel.get(id=channel_1.id)
self.assertEqual(channel_1, channel_2)
|
the-stack_0_15295 | from flask import request, redirect, abort, jsonify, url_for
from CTFd.models import db, Solves, Challenges, WrongKeys, Keys, Tags, Files
from CTFd import utils
import os
import boto3
import hashlib
import string
from werkzeug.utils import secure_filename
def clean_filename(c):
if c in string.ascii_letters + string.digits + '-' + '_' + '.':
return True
def get_s3_conn(app):
access_key_id = utils.get_app_config('ACCESS_KEY_ID')
secret_access_key = utils.get_app_config('SECRET_ACCESS_KEY')
if access_key_id and secret_access_key:
client = boto3.client(
's3',
aws_access_key_id=access_key_id,
aws_secret_access_key=secret_access_key
)
bucket = utils.get_app_config('BUCKET')
return client, bucket
else:
client = boto3.client('s3')
bucket = utils.get_app_config('BUCKET')
return client, bucket
def load(app):
def upload_file(file, chalid):
s3, bucket = get_s3_conn(app)
filename = ''.join(list(filter(clean_filename, secure_filename(file.filename).replace(' ', '_'))))
if len(filename) <= 0:
return False
md5hash = hashlib.md5(os.urandom(64)).hexdigest()
key = md5hash + '/' + filename
s3.upload_fileobj(file, bucket, key)
db_f = Files(chalid, key)
db.session.add(db_f)
db.session.commit()
return db_f.id, (md5hash + '/' + filename)
def delete_file(filename):
s3, bucket = get_s3_conn(app)
f = Files.query.filter_by(id=filename).first_or_404()
key = f.location
s3.delete_object(Bucket=bucket, Key=key)
db.session.delete(f)
db.session.commit()
return True
def file_handler(path):
f = Files.query.filter_by(location=path).first_or_404()
chal = Challenges.query.filter_by(id=f.chal).first()
s3, bucket = get_s3_conn(app)
if utils.is_admin() or chal is None:
key = f.location
url = s3.generate_presigned_url('get_object', Params = {
'Bucket': bucket,
'Key': key, })
return redirect(url)
if utils.user_can_view_challenges():
if not utils.ctftime():
if not utils.view_after_ctf():
abort(403)
if chal.hidden:
abort(403)
key = f.location
url = s3.generate_presigned_url('get_object', Params = {
'Bucket': bucket,
'Key': key, })
return redirect(url)
else:
return redirect(url_for('auth.login'))
utils.upload_file = upload_file
utils.delete_file = delete_file
app.view_functions['views.file_handler'] = file_handler
|
the-stack_0_15296 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Test merkleblock fetch/validation
#
from test_framework.test_framework import DankcoinTestFramework
from test_framework.util import *
class MerkleBlockTest(DankcoinTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 4
def setup_network(self):
self.nodes = []
# Nodes 0/1 are "wallet" nodes
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(1, self.options.tmpdir, ["-debug"]))
# Nodes 2/3 are used for testing
self.nodes.append(start_node(2, self.options.tmpdir, ["-debug"]))
self.nodes.append(start_node(3, self.options.tmpdir, ["-debug", "-txindex"]))
connect_nodes(self.nodes[0], 1)
connect_nodes(self.nodes[0], 2)
connect_nodes(self.nodes[0], 3)
self.is_network_split = False
self.sync_all()
def run_test(self):
print("Mining blocks...")
self.nodes[0].generate(105)
self.sync_all()
chain_height = self.nodes[1].getblockcount()
assert_equal(chain_height, 105)
assert_equal(self.nodes[1].getbalance(), 0)
assert_equal(self.nodes[2].getbalance(), 0)
node0utxos = self.nodes[0].listunspent(1)
tx1 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid1 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx1)["hex"])
tx2 = self.nodes[0].createrawtransaction([node0utxos.pop()], {self.nodes[1].getnewaddress(): 49.99})
txid2 = self.nodes[0].sendrawtransaction(self.nodes[0].signrawtransaction(tx2)["hex"])
assert_raises(JSONRPCException, self.nodes[0].gettxoutproof, [txid1])
self.nodes[0].generate(1)
blockhash = self.nodes[0].getblockhash(chain_height + 1)
self.sync_all()
txlist = []
blocktxn = self.nodes[0].getblock(blockhash, True)["tx"]
txlist.append(blocktxn[1])
txlist.append(blocktxn[2])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1])), [txid1])
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2], blockhash)), txlist)
txin_spent = self.nodes[1].listunspent(1).pop()
tx3 = self.nodes[1].createrawtransaction([txin_spent], {self.nodes[0].getnewaddress(): 49.98})
self.nodes[0].sendrawtransaction(self.nodes[1].signrawtransaction(tx3)["hex"])
self.nodes[0].generate(1)
self.sync_all()
txid_spent = txin_spent["txid"]
txid_unspent = txid1 if txin_spent["txid"] != txid1 else txid2
# We can't find the block from a fully-spent tx
assert_raises(JSONRPCException, self.nodes[2].gettxoutproof, [txid_spent])
# ...but we can if we specify the block
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_spent], blockhash)), [txid_spent])
# ...or if the first tx is not fully-spent
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid_unspent])), [txid_unspent])
try:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid1, txid2])), txlist)
except JSONRPCException:
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[2].gettxoutproof([txid2, txid1])), txlist)
# ...or if we have a -txindex
assert_equal(self.nodes[2].verifytxoutproof(self.nodes[3].gettxoutproof([txid_spent])), [txid_spent])
if __name__ == '__main__':
MerkleBlockTest().main()
|
the-stack_0_15298 | import os
import logging
from django.conf import settings
log_path = os.path.join(os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'logs'), 'web.log')
# 创建 logger
logger = logging.getLogger()
logger.setLevel(logging.DEBUG)
# logger.propagate = 0
formatter = logging.Formatter('%(asctime)s - %(filename)s[line:%(lineno)d] - %(levelname)s - %(message)s')
consoleHandler = logging.StreamHandler()
consoleHandler.setFormatter(formatter)
consoleHandler.setLevel(logging.DEBUG)
# 创建一个输出到文件的 handler
fileHandler = logging.FileHandler(log_path, mode='w')
fileHandler.setFormatter(formatter)
fileHandler.setLevel(logging.INFO)
if settings.DEBUG:
fileHandler.setLevel(logging.DEBUG)
else:
fileHandler.setLevel(logging.INFO)
if settings.DEBUG:
logger.addHandler(consoleHandler)
logger.addHandler(fileHandler) |
the-stack_0_15300 | # Copyright 2020 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from yaml import dump, load, Dumper, Loader
from data_validation import cli_tools, clients, consts, jellyfish_distance
from data_validation.config_manager import ConfigManager
from data_validation.data_validation import DataValidation
def _get_arg_config_file(args):
"""Return String yaml config file path."""
if not args.config_file:
raise ValueError("YAML Config File was not supplied.")
return args.config_file
def _get_yaml_config_from_file(config_file_path):
"""Return Dict of yaml validation data."""
with open(config_file_path, "r") as yaml_file:
yaml_configs = load(yaml_file.read(), Loader=Loader)
return yaml_configs
def get_aggregate_config(args, config_manager):
"""Return list of formated aggregation objects.
Args:
config_manager (ConfigManager): Validation config manager instance.
"""
aggregate_configs = [config_manager.build_config_count_aggregate()]
if args.count:
col_args = None if args.count == "*" else cli_tools.get_arg_list(args.count)
aggregate_configs += config_manager.build_config_column_aggregates(
"count", col_args, None
)
if args.sum:
col_args = None if args.sum == "*" else cli_tools.get_arg_list(args.sum)
aggregate_configs += config_manager.build_config_column_aggregates(
"sum", col_args, consts.NUMERIC_DATA_TYPES
)
if args.avg:
col_args = None if args.avg == "*" else cli_tools.get_arg_list(args.avg)
aggregate_configs += config_manager.build_config_column_aggregates(
"avg", col_args, consts.NUMERIC_DATA_TYPES
)
if args.min:
col_args = None if args.min == "*" else cli_tools.get_arg_list(args.min)
aggregate_configs += config_manager.build_config_column_aggregates(
"min", col_args, consts.NUMERIC_DATA_TYPES
)
if args.max:
col_args = None if args.max == "*" else cli_tools.get_arg_list(args.max)
aggregate_configs += config_manager.build_config_column_aggregates(
"max", col_args, consts.NUMERIC_DATA_TYPES
)
return aggregate_configs
def build_config_from_args(args, config_manager):
"""Return config manager object ready to execute.
Args:
config_manager (ConfigManager): Validation config manager instance.
"""
config_manager.append_aggregates(get_aggregate_config(args, config_manager))
if config_manager.validation_type in [
consts.GROUPED_COLUMN_VALIDATION,
consts.ROW_VALIDATION,
]:
grouped_columns = cli_tools.get_arg_list(args.grouped_columns)
config_manager.append_query_groups(
config_manager.build_config_grouped_columns(grouped_columns)
)
if config_manager.validation_type in [consts.ROW_VALIDATION]:
primary_keys = cli_tools.get_arg_list(args.primary_keys, default_value=[])
config_manager.append_primary_keys(
config_manager.build_config_grouped_columns(primary_keys)
)
# TODO(GH#18): Add query filter config logic
return config_manager
def build_config_managers_from_args(args):
"""Return a list of config managers ready to execute."""
configs = []
config_type = args.type
source_conn = cli_tools.get_connection(args.source_conn)
target_conn = cli_tools.get_connection(args.target_conn)
labels = cli_tools.get_labels(args.labels)
result_handler_config = None
if args.bq_result_handler:
result_handler_config = cli_tools.get_result_handler(
args.bq_result_handler, args.service_account
)
elif args.result_handler_config:
result_handler_config = cli_tools.get_result_handler(
args.result_handler_config, args.service_account
)
filter_config = []
if args.filters:
filter_config = cli_tools.get_filters(args.filters)
source_client = clients.get_data_client(source_conn)
target_client = clients.get_data_client(target_conn)
threshold = args.threshold if args.threshold else 0.0
is_filesystem = True if source_conn["source_type"] == "FileSystem" else False
tables_list = cli_tools.get_tables_list(
args.tables_list, default_value=[], is_filesystem=is_filesystem
)
for table_obj in tables_list:
config_manager = ConfigManager.build_config_manager(
config_type,
source_conn,
target_conn,
source_client,
target_client,
table_obj,
labels,
threshold,
result_handler_config=result_handler_config,
filter_config=filter_config,
verbose=args.verbose,
)
configs.append(build_config_from_args(args, config_manager))
return configs
def build_config_managers_from_yaml(args):
"""Returns List[ConfigManager] instances ready to be executed."""
config_managers = []
config_file_path = _get_arg_config_file(args)
yaml_configs = _get_yaml_config_from_file(config_file_path)
source_conn = cli_tools.get_connection(yaml_configs[consts.YAML_SOURCE])
target_conn = cli_tools.get_connection(yaml_configs[consts.YAML_TARGET])
source_client = clients.get_data_client(source_conn)
target_client = clients.get_data_client(target_conn)
for config in yaml_configs[consts.YAML_VALIDATIONS]:
config[consts.CONFIG_SOURCE_CONN] = source_conn
config[consts.CONFIG_TARGET_CONN] = target_conn
config[consts.CONFIG_RESULT_HANDLER] = yaml_configs[consts.YAML_RESULT_HANDLER]
config_manager = ConfigManager(
config, source_client, target_client, verbose=args.verbose
)
config_managers.append(config_manager)
return config_managers
def _compare_match_tables(source_table_map, target_table_map):
"""Return dict config object from matching tables."""
# TODO(dhercher): evaluate if improved comparison and score cutoffs should be used.
table_configs = []
target_keys = target_table_map.keys()
for source_key in source_table_map:
target_key = jellyfish_distance.extract_closest_match(
source_key, target_keys, score_cutoff=0.8
)
if target_key is None:
continue
table_config = {
consts.CONFIG_SCHEMA_NAME: source_table_map[source_key][
consts.CONFIG_SCHEMA_NAME
],
consts.CONFIG_TABLE_NAME: source_table_map[source_key][
consts.CONFIG_TABLE_NAME
],
consts.CONFIG_TARGET_SCHEMA_NAME: target_table_map[target_key][
consts.CONFIG_SCHEMA_NAME
],
consts.CONFIG_TARGET_TABLE_NAME: target_table_map[target_key][
consts.CONFIG_TABLE_NAME
],
}
table_configs.append(table_config)
return table_configs
def get_table_map(client, allowed_schemas=None):
"""Return dict with searchable keys for table matching."""
table_map = {}
table_objs = clients.get_all_tables(client, allowed_schemas=allowed_schemas)
for table_obj in table_objs:
table_key = ".".join([t for t in table_obj if t])
table_map[table_key] = {
consts.CONFIG_SCHEMA_NAME: table_obj[0],
consts.CONFIG_TABLE_NAME: table_obj[1],
}
return table_map
def find_tables_using_string_matching(args):
"""Return JSON String with matched tables for use in validations."""
source_conn = cli_tools.get_connection(args.source_conn)
target_conn = cli_tools.get_connection(args.target_conn)
source_client = clients.get_data_client(source_conn)
target_client = clients.get_data_client(target_conn)
allowed_schemas = cli_tools.get_arg_list(args.allowed_schemas)
source_table_map = get_table_map(source_client, allowed_schemas=allowed_schemas)
target_table_map = get_table_map(target_client)
table_configs = _compare_match_tables(source_table_map, target_table_map)
return json.dumps(table_configs)
def run_raw_query_against_connection(args):
"""Return results of raw query for adhoc usage."""
conn = cli_tools.get_connection(args.conn)
client = clients.get_data_client(conn)
with client.raw_sql(args.query, results=True) as cur:
return cur.fetchall()
def convert_config_to_yaml(args, config_managers):
"""Return dict objects formatted for yaml validations.
Args:
config_managers (list[ConfigManager]): List of config manager instances.
"""
yaml_config = {
consts.YAML_SOURCE: args.source_conn,
consts.YAML_TARGET: args.target_conn,
consts.YAML_RESULT_HANDLER: config_managers[0].result_handler_config,
consts.YAML_VALIDATIONS: [],
}
for config_manager in config_managers:
yaml_config[consts.YAML_VALIDATIONS].append(
config_manager.get_yaml_validation_block()
)
return yaml_config
def run_validation(config_manager, verbose=False):
"""Run a single validation.
Args:
config_manager (ConfigManager): Validation config manager instance.
verbose (bool): Validation setting to log queries run.
"""
validator = DataValidation(
config_manager.config,
validation_builder=None,
result_handler=None,
verbose=verbose,
)
validator.execute()
def run_validations(args, config_managers):
"""Run and manage a series of validations.
Args:
config_managers (list[ConfigManager]): List of config manager instances.
"""
# TODO(issue/31): Add parallel execution logic
for config_manager in config_managers:
run_validation(config_manager, verbose=args.verbose)
def store_yaml_config_file(args, config_managers):
"""Build a YAML config file fromt he supplied configs.
Args:
config_managers (list[ConfigManager]): List of config manager instances.
"""
config_file_path = _get_arg_config_file(args)
yaml_configs = convert_config_to_yaml(args, config_managers)
yaml_config_str = dump(yaml_configs, Dumper=Dumper)
with open(config_file_path, "w") as yaml_file:
yaml_file.write(yaml_config_str)
def run(args):
""" """
config_managers = build_config_managers_from_args(args)
if args.config_file:
store_yaml_config_file(args, config_managers)
else:
run_validations(args, config_managers)
def run_connections(args):
""" Run commands related to connection management."""
if args.connect_cmd == "list":
cli_tools.list_connections()
elif args.connect_cmd == "add":
conn = cli_tools.get_connection_config_from_args(args)
# Test getting a client to validate connection details
_ = clients.get_data_client(conn)
cli_tools.store_connection(args.connection_name, conn)
else:
raise ValueError(f"Connections Argument '{args.connect_cmd}' is not supported")
def main():
# Create Parser and Get Deployment Info
args = cli_tools.get_parsed_args()
if args.command == "run":
run(args)
elif args.command == "connections":
run_connections(args)
elif args.command == "run-config":
config_managers = build_config_managers_from_yaml(args)
run_validations(args, config_managers)
elif args.command == "find-tables":
print(find_tables_using_string_matching(args))
elif args.command == "query":
print(run_raw_query_against_connection(args))
else:
raise ValueError(f"Positional Argument '{args.command}' is not supported")
if __name__ == "__main__":
main()
|
the-stack_0_15303 | from manimlib import *
import networkx as nx
from .algo_vgroup import *
from .algo_node import *
import queue
class AlgoSegTreeNode(object):
def __init__(self, id, l, r, v, left=None, right=None):
self.l = l
self.r = r
self.v = v
self.id = id
self.left = left
self.right = right
class AlgoSegTree(AlgoVGroup):
def __init__(self, scene, datas = [], **kwargs):
self.datas = datas
self.arrows = {}
self.node_objs = {}
self.scene = scene
self.edges = []
self.nodes = []
super().__init__(**kwargs)
self.build_id = 0
self.root = self.build(datas, 0, len(datas)-1)
self.travel_to_nodes(self.root)
self.init_networkx(self.nodes, self.edges)
for k in self.nodes:
n = AlgoNode(str(k["data"]))
p = self.get_node_pos(k["id"])
n.shift(p)
self.node_objs[k["id"]] = n
self.add(n)
for k in self.edges:
self.add_edge_internal(k[0], k[1])
self.center()
def get_build_id(self):
self.build_id += 1
return self.build_id
def travel_to_nodes(self, root):
q = []
q.append(root)
while len(q)>0:
p = q.pop(0)
self.nodes.append({"id":p.id, "data": p.v})
if p.left:
self.edges.append([p.id, p.left.id])
q.append(p.left)
if p.right:
self.edges.append([p.id, p.right.id])
q.append(p.right)
def hide_all(self):
for k in self.node_objs:
self.remove(self.node_objs[k])
for k in self.arrows:
self.remove(self.arrows[k])
def show_node(self, id):
n = self.get_node(id)
self.scene.play(FadeIn(n))
def show_edge(self, i, j):
a = self.arrows[(i, j)]
self.scene.play(FadeIn(a))
def build(self, datas, l, r):
if l == r:
return AlgoSegTreeNode(self.get_build_id(), l, r, datas[l])
m = math.floor((l+r)/2)
left = self.build(datas, l, m)
right = self.build(datas, m+1, r)
val = left.v+right.v
return AlgoSegTreeNode(self.get_build_id(), l, r, val, left, right)
def init_networkx(self, nodes, edges):
self.g = nx.Graph()
for k in nodes:
self.g.add_node(k["id"])
for k in edges:
self.g.add_edge(*k)
self.pos_infos = nx.nx_agraph.graphviz_layout(self.g, prog='dot', args='-Grankdir="TB"')
def get_node_pos(self, k):
p = self.pos_infos[k]
ratio = 60
return [p[0]/ratio, p[1]/ratio, 0]
def clear_edges(self):
self.g.clear_edges()
for k in self.arrows:
self.scene.play(FadeOut(k, run_time=0.3))
self.arrows = []
def add_edge_internal(self, i, j):
color = "#6e6e6c"
if i == j:
a = Arrow(self.get_node_pos(i), self.get_node_pos(j)+RIGHT*0.1,
path_arc=np.pi*1.5, thickness=0.03, color=color).scale(0.5)
self.arrows[(i, j)] = a
a.set_color(color)
self.add(a)
else:
a = Arrow(self.get_node_pos(i), self.get_node_pos(j), thickness=0.03, color=color)
self.add(a)
a.set_color(color)
self.arrows[(i, j)] = a
def add_edge(self, i, j):
color = "#6e6e6c"
ni = self.node_objs[i]
nj = self.node_objs[j]
if i == j:
a = Arrow(ni.get_center(), nj.get_center()+RIGHT*0.1, path_arc=np.pi*1.5, thickness=0.03, color=color).scale(0.5)
self.arrows[(i, j)] = a
self.add(a)
self.scene.play(FadeIn(a), run_time=0.3)
else:
a = Arrow(ni.get_center(), nj.get_center(), thickness=0.03, color=color)
self.add(a)
self.arrows[(i, j)] = a
self.scene.play(FadeIn(a), run_time=0.3)
def remove_edge(self, i, j):
a = self.arrows[(i, j)]
self.remove(a)
self.scene.play(FadeOut(a))
del self.arrows[(i, j)]
def get_edge(self, i, j):
return self.arrows[(i, j)]
def get_node(self, i):
return self.node_objs[i]
|
the-stack_0_15305 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""Read pages from Parameter namespace in old wiki and save in new wiki."""
import pywikibot
import pywikibot.pagegenerators
FAC_NS = 102
MACHINE_NS = 116
TABLE_NS = 118
old_site = pywikibot.Site('en', 'siriuswiki')
new_site = pywikibot.Site('en', 'newsiriuswiki')
comment = ('Moving from local wiki')
g = pywikibot.pagegenerators.AllpagesPageGenerator(
site=old_site,
namespace=FAC_NS
)
titles = []
for page in g:
titles.append(page.title())
print(titles)
for title in titles:
old_page = pywikibot.Page(old_site, title)
new_page = pywikibot.Page(new_site, title)
new_page.text = old_page.text
try:
# print(new_page.text)
new_page.save(comment)
except pywikibot.PageNotSaved:
print("Error saving %s" % title)
|
the-stack_0_15307 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
#
# Copyright 2014 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Discovery document tests
Unit tests for objects created from discovery documents.
"""
from __future__ import absolute_import
import six
__author__ = "[email protected] (Joe Gregorio)"
from six import BytesIO, StringIO
from six.moves.urllib.parse import urlparse, parse_qs
import copy
import datetime
import httplib2
import itertools
import json
import os
import pickle
import re
import sys
import unittest2 as unittest
from collections import defaultdict
from parameterized import parameterized
import mock
import google.auth.credentials
from google.auth.transport import mtls
from google.auth.exceptions import MutualTLSChannelError
import google_auth_httplib2
import google.api_core.exceptions
from googleapiclient.discovery import _fix_up_media_upload
from googleapiclient.discovery import _fix_up_method_description
from googleapiclient.discovery import _fix_up_parameters
from googleapiclient.discovery import _urljoin
from googleapiclient.discovery import build
from googleapiclient.discovery import build_from_document
from googleapiclient.discovery import DISCOVERY_URI
from googleapiclient.discovery import key2param
from googleapiclient.discovery import MEDIA_BODY_PARAMETER_DEFAULT_VALUE
from googleapiclient.discovery import MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE
from googleapiclient.discovery import ResourceMethodParameters
from googleapiclient.discovery import STACK_QUERY_PARAMETERS
from googleapiclient.discovery import STACK_QUERY_PARAMETER_DEFAULT_VALUE
from googleapiclient.discovery import V1_DISCOVERY_URI
from googleapiclient.discovery import V2_DISCOVERY_URI
from googleapiclient.discovery_cache import DISCOVERY_DOC_MAX_AGE
from googleapiclient.discovery_cache.base import Cache
from googleapiclient.errors import HttpError
from googleapiclient.errors import InvalidJsonError
from googleapiclient.errors import MediaUploadSizeError
from googleapiclient.errors import ResumableUploadError
from googleapiclient.errors import UnacceptableMimeTypeError
from googleapiclient.errors import UnknownApiNameOrVersion
from googleapiclient.errors import UnknownFileType
from googleapiclient.http import build_http
from googleapiclient.http import BatchHttpRequest
from googleapiclient.http import HttpMock
from googleapiclient.http import HttpMockSequence
from googleapiclient.http import MediaFileUpload
from googleapiclient.http import MediaIoBaseUpload
from googleapiclient.http import MediaUpload
from googleapiclient.http import MediaUploadProgress
from googleapiclient.http import tunnel_patch
from googleapiclient.model import JsonModel
from googleapiclient.schema import Schemas
from oauth2client import GOOGLE_TOKEN_URI
from oauth2client.client import OAuth2Credentials, GoogleCredentials
from googleapiclient import _helpers as util
import uritemplate
DATA_DIR = os.path.join(os.path.dirname(__file__), "data")
def assertUrisEqual(testcase, expected, actual):
"""Test that URIs are the same, up to reordering of query parameters."""
expected = urlparse(expected)
actual = urlparse(actual)
testcase.assertEqual(expected.scheme, actual.scheme)
testcase.assertEqual(expected.netloc, actual.netloc)
testcase.assertEqual(expected.path, actual.path)
testcase.assertEqual(expected.params, actual.params)
testcase.assertEqual(expected.fragment, actual.fragment)
expected_query = parse_qs(expected.query)
actual_query = parse_qs(actual.query)
for name in list(expected_query.keys()):
testcase.assertEqual(expected_query[name], actual_query[name])
for name in list(actual_query.keys()):
testcase.assertEqual(expected_query[name], actual_query[name])
def assert_discovery_uri(testcase, actual, service_name, version, discovery):
"""Assert that discovery URI used was the one that was expected
for a given service and version."""
params = {"api": service_name, "apiVersion": version}
expanded_requested_uri = uritemplate.expand(discovery, params)
assertUrisEqual(testcase, expanded_requested_uri, actual)
def validate_discovery_requests(testcase, http_mock, service_name, version, discovery):
"""Validates that there have > 0 calls to Http Discovery
and that LAST discovery URI used was the one that was expected
for a given service and version."""
testcase.assertTrue(len(http_mock.request_sequence) > 0)
if len(http_mock.request_sequence) > 0:
actual_uri = http_mock.request_sequence[-1][0]
assert_discovery_uri(testcase, actual_uri, service_name, version, discovery)
def datafile(filename):
return os.path.join(DATA_DIR, filename)
def read_datafile(filename, mode="r"):
with open(datafile(filename), mode=mode) as f:
return f.read()
class SetupHttplib2(unittest.TestCase):
def test_retries(self):
# Merely loading googleapiclient.discovery should set the RETRIES to 1.
self.assertEqual(1, httplib2.RETRIES)
class Utilities(unittest.TestCase):
def setUp(self):
self.zoo_root_desc = json.loads(read_datafile("zoo.json", "r"))
self.zoo_get_method_desc = self.zoo_root_desc["methods"]["query"]
self.zoo_animals_resource = self.zoo_root_desc["resources"]["animals"]
self.zoo_insert_method_desc = self.zoo_animals_resource["methods"]["insert"]
self.zoo_schema = Schemas(self.zoo_root_desc)
def test_key2param(self):
self.assertEqual("max_results", key2param("max-results"))
self.assertEqual("x007_bond", key2param("007-bond"))
def _base_fix_up_parameters_test(self, method_desc, http_method, root_desc, schema):
self.assertEqual(method_desc["httpMethod"], http_method)
method_desc_copy = copy.deepcopy(method_desc)
self.assertEqual(method_desc, method_desc_copy)
parameters = _fix_up_parameters(
method_desc_copy, root_desc, http_method, schema
)
self.assertNotEqual(method_desc, method_desc_copy)
for param_name in STACK_QUERY_PARAMETERS:
self.assertEqual(
STACK_QUERY_PARAMETER_DEFAULT_VALUE, parameters[param_name]
)
for param_name, value in six.iteritems(root_desc.get("parameters", {})):
self.assertEqual(value, parameters[param_name])
return parameters
def test_fix_up_parameters_get(self):
parameters = self._base_fix_up_parameters_test(
self.zoo_get_method_desc, "GET", self.zoo_root_desc, self.zoo_schema
)
# Since http_method is 'GET'
self.assertFalse("body" in parameters)
def test_fix_up_parameters_insert(self):
parameters = self._base_fix_up_parameters_test(
self.zoo_insert_method_desc, "POST", self.zoo_root_desc, self.zoo_schema
)
body = {"description": "The request body.", "type": "object", "$ref": "Animal"}
self.assertEqual(parameters["body"], body)
def test_fix_up_parameters_check_body(self):
dummy_root_desc = {}
dummy_schema = {
"Request": {
"properties": {
"description": "Required. Dummy parameter.",
"type": "string",
}
}
}
no_payload_http_method = "DELETE"
with_payload_http_method = "PUT"
invalid_method_desc = {"response": "Who cares"}
valid_method_desc = {
"request": {"key1": "value1", "key2": "value2", "$ref": "Request"}
}
parameters = _fix_up_parameters(
invalid_method_desc, dummy_root_desc, no_payload_http_method, dummy_schema
)
self.assertFalse("body" in parameters)
parameters = _fix_up_parameters(
valid_method_desc, dummy_root_desc, no_payload_http_method, dummy_schema
)
self.assertFalse("body" in parameters)
parameters = _fix_up_parameters(
invalid_method_desc, dummy_root_desc, with_payload_http_method, dummy_schema
)
self.assertFalse("body" in parameters)
parameters = _fix_up_parameters(
valid_method_desc, dummy_root_desc, with_payload_http_method, dummy_schema
)
body = {
"description": "The request body.",
"type": "object",
"$ref": "Request",
"key1": "value1",
"key2": "value2",
}
self.assertEqual(parameters["body"], body)
def test_fix_up_parameters_optional_body(self):
# Request with no parameters
dummy_schema = {"Request": {"properties": {}}}
method_desc = {"request": {"$ref": "Request"}}
parameters = _fix_up_parameters(method_desc, {}, "POST", dummy_schema)
def _base_fix_up_method_description_test(
self,
method_desc,
initial_parameters,
final_parameters,
final_accept,
final_max_size,
final_media_path_url,
):
fake_root_desc = {
"rootUrl": "http://root/",
"servicePath": "fake/",
"mtlsRootUrl": "http://root/",
}
fake_path_url = "fake-path/"
accept, max_size, media_path_url = _fix_up_media_upload(
method_desc, fake_root_desc, fake_path_url, initial_parameters
)
self.assertEqual(accept, final_accept)
self.assertEqual(max_size, final_max_size)
self.assertEqual(media_path_url, final_media_path_url)
self.assertEqual(initial_parameters, final_parameters)
def test_fix_up_media_upload_no_initial_invalid(self):
invalid_method_desc = {"response": "Who cares"}
self._base_fix_up_method_description_test(
invalid_method_desc, {}, {}, [], 0, None
)
def test_fix_up_media_upload_no_initial_valid_minimal(self):
valid_method_desc = {"mediaUpload": {"accept": []}}
final_parameters = {
"media_body": MEDIA_BODY_PARAMETER_DEFAULT_VALUE,
"media_mime_type": MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE,
}
self._base_fix_up_method_description_test(
valid_method_desc,
{},
final_parameters,
[],
0,
"http://root/upload/fake/fake-path/",
)
def test_fix_up_media_upload_no_initial_valid_full(self):
valid_method_desc = {"mediaUpload": {"accept": ["*/*"], "maxSize": "10GB"}}
final_parameters = {
"media_body": MEDIA_BODY_PARAMETER_DEFAULT_VALUE,
"media_mime_type": MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE,
}
ten_gb = 10 * 2 ** 30
self._base_fix_up_method_description_test(
valid_method_desc,
{},
final_parameters,
["*/*"],
ten_gb,
"http://root/upload/fake/fake-path/",
)
def test_fix_up_media_upload_with_initial_invalid(self):
invalid_method_desc = {"response": "Who cares"}
initial_parameters = {"body": {}}
self._base_fix_up_method_description_test(
invalid_method_desc, initial_parameters, initial_parameters, [], 0, None
)
def test_fix_up_media_upload_with_initial_valid_minimal(self):
valid_method_desc = {"mediaUpload": {"accept": []}}
initial_parameters = {"body": {}}
final_parameters = {
"body": {},
"media_body": MEDIA_BODY_PARAMETER_DEFAULT_VALUE,
"media_mime_type": MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE,
}
self._base_fix_up_method_description_test(
valid_method_desc,
initial_parameters,
final_parameters,
[],
0,
"http://root/upload/fake/fake-path/",
)
def test_fix_up_media_upload_with_initial_valid_full(self):
valid_method_desc = {"mediaUpload": {"accept": ["*/*"], "maxSize": "10GB"}}
initial_parameters = {"body": {}}
final_parameters = {
"body": {},
"media_body": MEDIA_BODY_PARAMETER_DEFAULT_VALUE,
"media_mime_type": MEDIA_MIME_TYPE_PARAMETER_DEFAULT_VALUE,
}
ten_gb = 10 * 2 ** 30
self._base_fix_up_method_description_test(
valid_method_desc,
initial_parameters,
final_parameters,
["*/*"],
ten_gb,
"http://root/upload/fake/fake-path/",
)
def test_fix_up_method_description_get(self):
result = _fix_up_method_description(
self.zoo_get_method_desc, self.zoo_root_desc, self.zoo_schema
)
path_url = "query"
http_method = "GET"
method_id = "bigquery.query"
accept = []
max_size = 0
media_path_url = None
self.assertEqual(
result, (path_url, http_method, method_id, accept, max_size, media_path_url)
)
def test_fix_up_method_description_insert(self):
result = _fix_up_method_description(
self.zoo_insert_method_desc, self.zoo_root_desc, self.zoo_schema
)
path_url = "animals"
http_method = "POST"
method_id = "zoo.animals.insert"
accept = ["image/png"]
max_size = 1024
media_path_url = "https://www.googleapis.com/upload/zoo/v1/animals"
self.assertEqual(
result, (path_url, http_method, method_id, accept, max_size, media_path_url)
)
def test_urljoin(self):
# We want to exhaustively test various URL combinations.
simple_bases = ["https://www.googleapis.com", "https://www.googleapis.com/"]
long_urls = ["foo/v1/bar:custom?alt=json", "/foo/v1/bar:custom?alt=json"]
long_bases = [
"https://www.googleapis.com/foo/v1",
"https://www.googleapis.com/foo/v1/",
]
simple_urls = ["bar:custom?alt=json", "/bar:custom?alt=json"]
final_url = "https://www.googleapis.com/foo/v1/bar:custom?alt=json"
for base, url in itertools.product(simple_bases, long_urls):
self.assertEqual(final_url, _urljoin(base, url))
for base, url in itertools.product(long_bases, simple_urls):
self.assertEqual(final_url, _urljoin(base, url))
def test_ResourceMethodParameters_zoo_get(self):
parameters = ResourceMethodParameters(self.zoo_get_method_desc)
param_types = {
"a": "any",
"b": "boolean",
"e": "string",
"er": "string",
"i": "integer",
"n": "number",
"o": "object",
"q": "string",
"rr": "string",
}
keys = list(param_types.keys())
self.assertEqual(parameters.argmap, dict((key, key) for key in keys))
self.assertEqual(parameters.required_params, [])
self.assertEqual(sorted(parameters.repeated_params), ["er", "rr"])
self.assertEqual(parameters.pattern_params, {"rr": "[a-z]+"})
self.assertEqual(
sorted(parameters.query_params),
["a", "b", "e", "er", "i", "n", "o", "q", "rr"],
)
self.assertEqual(parameters.path_params, set())
self.assertEqual(parameters.param_types, param_types)
enum_params = {"e": ["foo", "bar"], "er": ["one", "two", "three"]}
self.assertEqual(parameters.enum_params, enum_params)
def test_ResourceMethodParameters_zoo_animals_patch(self):
method_desc = self.zoo_animals_resource["methods"]["patch"]
parameters = ResourceMethodParameters(method_desc)
param_types = {"name": "string"}
keys = list(param_types.keys())
self.assertEqual(parameters.argmap, dict((key, key) for key in keys))
self.assertEqual(parameters.required_params, ["name"])
self.assertEqual(parameters.repeated_params, [])
self.assertEqual(parameters.pattern_params, {})
self.assertEqual(parameters.query_params, [])
self.assertEqual(parameters.path_params, set(["name"]))
self.assertEqual(parameters.param_types, param_types)
self.assertEqual(parameters.enum_params, {})
class Discovery(unittest.TestCase):
def test_discovery_http_is_closed(self):
http = HttpMock(datafile("malformed.json"), {"status": "200"})
service = build("plus", "v1", credentials=mock.sentinel.credentials)
http.close.assert_called_once()
class DiscoveryErrors(unittest.TestCase):
def test_tests_should_be_run_with_strict_positional_enforcement(self):
try:
plus = build("plus", "v1", None, static_discovery=False)
self.fail("should have raised a TypeError exception over missing http=.")
except TypeError:
pass
def test_failed_to_parse_discovery_json(self):
self.http = HttpMock(datafile("malformed.json"), {"status": "200"})
try:
plus = build("plus", "v1", http=self.http, cache_discovery=False, static_discovery=False)
self.fail("should have raised an exception over malformed JSON.")
except InvalidJsonError:
pass
def test_unknown_api_name_or_version(self):
http = HttpMockSequence(
[
({"status": "404"}, read_datafile("zoo.json", "rb")),
({"status": "404"}, read_datafile("zoo.json", "rb")),
]
)
with self.assertRaises(UnknownApiNameOrVersion):
plus = build("plus", "v1", http=http, cache_discovery=False)
def test_credentials_and_http_mutually_exclusive(self):
http = HttpMock(datafile("plus.json"), {"status": "200"})
with self.assertRaises(ValueError):
build("plus", "v1", http=http, credentials=mock.sentinel.credentials, static_discovery=False)
def test_credentials_file_and_http_mutually_exclusive(self):
http = HttpMock(datafile("plus.json"), {"status": "200"})
with self.assertRaises(ValueError):
build(
"plus",
"v1",
http=http,
client_options=google.api_core.client_options.ClientOptions(
credentials_file="credentials.json"
),
static_discovery=False,
)
def test_credentials_and_credentials_file_mutually_exclusive(self):
with self.assertRaises(google.api_core.exceptions.DuplicateCredentialArgs):
build(
"plus",
"v1",
credentials=mock.sentinel.credentials,
client_options=google.api_core.client_options.ClientOptions(
credentials_file="credentials.json"
),
static_discovery=False,
)
class DiscoveryFromDocument(unittest.TestCase):
MOCK_CREDENTIALS = mock.Mock(spec=google.auth.credentials.Credentials)
def test_can_build_from_local_document(self):
discovery = read_datafile("plus.json")
plus = build_from_document(
discovery,
base="https://www.googleapis.com/",
credentials=self.MOCK_CREDENTIALS,
)
self.assertIsNotNone(plus)
self.assertTrue(hasattr(plus, "activities"))
def test_can_build_from_local_deserialized_document(self):
discovery = read_datafile("plus.json")
discovery = json.loads(discovery)
plus = build_from_document(
discovery,
base="https://www.googleapis.com/",
credentials=self.MOCK_CREDENTIALS,
)
self.assertIsNotNone(plus)
self.assertTrue(hasattr(plus, "activities"))
def test_building_with_base_remembers_base(self):
discovery = read_datafile("plus.json")
base = "https://www.example.com/"
plus = build_from_document(
discovery, base=base, credentials=self.MOCK_CREDENTIALS
)
self.assertEqual("https://www.googleapis.com/plus/v1/", plus._baseUrl)
def test_building_with_optional_http_with_authorization(self):
discovery = read_datafile("plus.json")
plus = build_from_document(
discovery,
base="https://www.googleapis.com/",
credentials=self.MOCK_CREDENTIALS,
)
# plus service requires Authorization, hence we expect to see AuthorizedHttp object here
self.assertIsInstance(plus._http, google_auth_httplib2.AuthorizedHttp)
self.assertIsInstance(plus._http.http, httplib2.Http)
self.assertIsInstance(plus._http.http.timeout, int)
self.assertGreater(plus._http.http.timeout, 0)
def test_building_with_optional_http_with_no_authorization(self):
discovery = read_datafile("plus.json")
# Cleanup auth field, so we would use plain http client
discovery = json.loads(discovery)
discovery["auth"] = {}
discovery = json.dumps(discovery)
plus = build_from_document(
discovery, base="https://www.googleapis.com/", credentials=None
)
# plus service requires Authorization
self.assertIsInstance(plus._http, httplib2.Http)
self.assertIsInstance(plus._http.timeout, int)
self.assertGreater(plus._http.timeout, 0)
def test_building_with_explicit_http(self):
http = HttpMock()
discovery = read_datafile("plus.json")
plus = build_from_document(
discovery, base="https://www.googleapis.com/", http=http
)
self.assertEqual(plus._http, http)
def test_building_with_developer_key_skips_adc(self):
discovery = read_datafile("plus.json")
plus = build_from_document(
discovery, base="https://www.googleapis.com/", developerKey="123"
)
self.assertIsInstance(plus._http, httplib2.Http)
# It should not be an AuthorizedHttp, because that would indicate that
# application default credentials were used.
self.assertNotIsInstance(plus._http, google_auth_httplib2.AuthorizedHttp)
def test_building_with_context_manager(self):
discovery = read_datafile("plus.json")
with mock.patch("httplib2.Http") as http:
with build_from_document(discovery, base="https://www.googleapis.com/", credentials=self.MOCK_CREDENTIALS) as plus:
self.assertIsNotNone(plus)
self.assertTrue(hasattr(plus, "activities"))
plus._http.http.close.assert_called_once()
def test_resource_close(self):
discovery = read_datafile("plus.json")
with mock.patch("httplib2.Http") as http:
plus = build_from_document(
discovery,
base="https://www.googleapis.com/",
credentials=self.MOCK_CREDENTIALS,
)
plus.close()
plus._http.http.close.assert_called_once()
def test_api_endpoint_override_from_client_options(self):
discovery = read_datafile("plus.json")
api_endpoint = "https://foo.googleapis.com/"
options = google.api_core.client_options.ClientOptions(
api_endpoint=api_endpoint
)
plus = build_from_document(
discovery, client_options=options, credentials=self.MOCK_CREDENTIALS
)
self.assertEqual(plus._baseUrl, api_endpoint)
def test_api_endpoint_override_from_client_options_mapping_object(self):
discovery = read_datafile("plus.json")
api_endpoint = "https://foo.googleapis.com/"
mapping_object = defaultdict(str)
mapping_object["api_endpoint"] = api_endpoint
plus = build_from_document(discovery, client_options=mapping_object)
self.assertEqual(plus._baseUrl, api_endpoint)
def test_api_endpoint_override_from_client_options_dict(self):
discovery = read_datafile("plus.json")
api_endpoint = "https://foo.googleapis.com/"
plus = build_from_document(
discovery,
client_options={"api_endpoint": api_endpoint},
credentials=self.MOCK_CREDENTIALS,
)
self.assertEqual(plus._baseUrl, api_endpoint)
def test_scopes_from_client_options(self):
discovery = read_datafile("plus.json")
with mock.patch("googleapiclient._auth.default_credentials") as default:
plus = build_from_document(
discovery, client_options={"scopes": ["1", "2"]},
)
default.assert_called_once_with(scopes=["1", "2"], quota_project_id=None)
def test_quota_project_from_client_options(self):
discovery = read_datafile("plus.json")
with mock.patch("googleapiclient._auth.default_credentials") as default:
plus = build_from_document(
discovery,
client_options=google.api_core.client_options.ClientOptions(
quota_project_id="my-project"
),
)
default.assert_called_once_with(scopes=None, quota_project_id="my-project")
def test_credentials_file_from_client_options(self):
discovery = read_datafile("plus.json")
with mock.patch("googleapiclient._auth.credentials_from_file") as default:
plus = build_from_document(
discovery,
client_options=google.api_core.client_options.ClientOptions(
credentials_file="credentials.json"
),
)
default.assert_called_once_with(
"credentials.json", scopes=None, quota_project_id=None
)
REGULAR_ENDPOINT = "https://www.googleapis.com/plus/v1/"
MTLS_ENDPOINT = "https://www.mtls.googleapis.com/plus/v1/"
class DiscoveryFromDocumentMutualTLS(unittest.TestCase):
MOCK_CREDENTIALS = mock.Mock(spec=google.auth.credentials.Credentials)
ADC_CERT_PATH = "adc_cert_path"
ADC_KEY_PATH = "adc_key_path"
ADC_PASSPHRASE = "adc_passphrase"
def check_http_client_cert(self, resource, has_client_cert="false"):
if isinstance(resource._http, google_auth_httplib2.AuthorizedHttp):
certs = list(resource._http.http.certificates.iter(""))
else:
certs = list(resource._http.certificates.iter(""))
if has_client_cert == "true":
self.assertEqual(len(certs), 1)
self.assertEqual(
certs[0], (self.ADC_KEY_PATH, self.ADC_CERT_PATH, self.ADC_PASSPHRASE)
)
else:
self.assertEqual(len(certs), 0)
def client_encrypted_cert_source(self):
return self.ADC_CERT_PATH, self.ADC_KEY_PATH, self.ADC_PASSPHRASE
@parameterized.expand(
[
("never", "true"),
("auto", "true"),
("always", "true"),
("never", "false"),
("auto", "false"),
("always", "false"),
]
)
def test_mtls_not_trigger_if_http_provided(self, use_mtls_env, use_client_cert):
discovery = read_datafile("plus.json")
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_MTLS_ENDPOINT": use_mtls_env}
):
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert}
):
plus = build_from_document(discovery, http=httplib2.Http())
self.assertIsNotNone(plus)
self.assertEqual(plus._baseUrl, REGULAR_ENDPOINT)
self.check_http_client_cert(plus, has_client_cert="false")
@parameterized.expand(
[
("never", "true"),
("auto", "true"),
("always", "true"),
("never", "false"),
("auto", "false"),
("always", "false"),
]
)
def test_exception_with_client_cert_source(self, use_mtls_env, use_client_cert):
discovery = read_datafile("plus.json")
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_MTLS_ENDPOINT": use_mtls_env}
):
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert}
):
with self.assertRaises(MutualTLSChannelError):
build_from_document(
discovery,
credentials=self.MOCK_CREDENTIALS,
client_options={"client_cert_source": mock.Mock()},
)
@parameterized.expand(
[
("never", "true", REGULAR_ENDPOINT),
("auto", "true", MTLS_ENDPOINT),
("always", "true", MTLS_ENDPOINT),
("never", "false", REGULAR_ENDPOINT),
("auto", "false", REGULAR_ENDPOINT),
("always", "false", MTLS_ENDPOINT),
]
)
def test_mtls_with_provided_client_cert(
self, use_mtls_env, use_client_cert, base_url
):
discovery = read_datafile("plus.json")
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_MTLS_ENDPOINT": use_mtls_env}
):
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert}
):
plus = build_from_document(
discovery,
credentials=self.MOCK_CREDENTIALS,
client_options={
"client_encrypted_cert_source": self.client_encrypted_cert_source
},
)
self.assertIsNotNone(plus)
self.check_http_client_cert(plus, has_client_cert=use_client_cert)
self.assertEqual(plus._baseUrl, base_url)
@parameterized.expand(
[
("never", "true"),
("auto", "true"),
("always", "true"),
("never", "false"),
("auto", "false"),
("always", "false"),
]
)
def test_endpoint_not_switch(self, use_mtls_env, use_client_cert):
# Test endpoint is not switched if user provided api endpoint
discovery = read_datafile("plus.json")
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_MTLS_ENDPOINT": use_mtls_env}
):
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert}
):
plus = build_from_document(
discovery,
credentials=self.MOCK_CREDENTIALS,
client_options={
"api_endpoint": "https://foo.googleapis.com",
"client_encrypted_cert_source": self.client_encrypted_cert_source,
},
)
self.assertIsNotNone(plus)
self.check_http_client_cert(plus, has_client_cert=use_client_cert)
self.assertEqual(plus._baseUrl, "https://foo.googleapis.com")
@parameterized.expand(
[
("never", "true", REGULAR_ENDPOINT),
("auto", "true", MTLS_ENDPOINT),
("always", "true", MTLS_ENDPOINT),
("never", "false", REGULAR_ENDPOINT),
("auto", "false", REGULAR_ENDPOINT),
("always", "false", MTLS_ENDPOINT),
]
)
@mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source", autospec=True
)
@mock.patch(
"google.auth.transport.mtls.default_client_encrypted_cert_source", autospec=True
)
def test_mtls_with_default_client_cert(
self,
use_mtls_env,
use_client_cert,
base_url,
default_client_encrypted_cert_source,
has_default_client_cert_source,
):
has_default_client_cert_source.return_value = True
default_client_encrypted_cert_source.return_value = (
self.client_encrypted_cert_source
)
discovery = read_datafile("plus.json")
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_MTLS_ENDPOINT": use_mtls_env}
):
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert}
):
plus = build_from_document(
discovery,
credentials=self.MOCK_CREDENTIALS,
adc_cert_path=self.ADC_CERT_PATH,
adc_key_path=self.ADC_KEY_PATH,
)
self.assertIsNotNone(plus)
self.check_http_client_cert(plus, has_client_cert=use_client_cert)
self.assertEqual(plus._baseUrl, base_url)
@parameterized.expand(
[
("never", "true", REGULAR_ENDPOINT),
("auto", "true", REGULAR_ENDPOINT),
("always", "true", MTLS_ENDPOINT),
("never", "false", REGULAR_ENDPOINT),
("auto", "false", REGULAR_ENDPOINT),
("always", "false", MTLS_ENDPOINT),
]
)
@mock.patch(
"google.auth.transport.mtls.has_default_client_cert_source", autospec=True
)
def test_mtls_with_no_client_cert(
self, use_mtls_env, use_client_cert, base_url, has_default_client_cert_source
):
has_default_client_cert_source.return_value = False
discovery = read_datafile("plus.json")
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_MTLS_ENDPOINT": use_mtls_env}
):
with mock.patch.dict(
"os.environ", {"GOOGLE_API_USE_CLIENT_CERTIFICATE": use_client_cert}
):
plus = build_from_document(
discovery,
credentials=self.MOCK_CREDENTIALS,
adc_cert_path=self.ADC_CERT_PATH,
adc_key_path=self.ADC_KEY_PATH,
)
self.assertIsNotNone(plus)
self.check_http_client_cert(plus, has_client_cert="false")
self.assertEqual(plus._baseUrl, base_url)
class DiscoveryFromHttp(unittest.TestCase):
def setUp(self):
self.old_environ = os.environ.copy()
def tearDown(self):
os.environ = self.old_environ
def test_userip_is_added_to_discovery_uri(self):
# build() will raise an HttpError on a 400, use this to pick the request uri
# out of the raised exception.
os.environ["REMOTE_ADDR"] = "10.0.0.1"
try:
http = HttpMockSequence(
[({"status": "400"}, read_datafile("zoo.json", "rb"))]
)
zoo = build(
"zoo",
"v1",
http=http,
developerKey=None,
discoveryServiceUrl="http://example.com",
static_discovery=False,
)
self.fail("Should have raised an exception.")
except HttpError as e:
self.assertEqual(e.uri, "http://example.com?userIp=10.0.0.1")
def test_userip_missing_is_not_added_to_discovery_uri(self):
# build() will raise an HttpError on a 400, use this to pick the request uri
# out of the raised exception.
try:
http = HttpMockSequence(
[({"status": "400"}, read_datafile("zoo.json", "rb"))]
)
zoo = build(
"zoo",
"v1",
http=http,
developerKey=None,
discoveryServiceUrl="http://example.com",
static_discovery=False,
)
self.fail("Should have raised an exception.")
except HttpError as e:
self.assertEqual(e.uri, "http://example.com")
def test_key_is_added_to_discovery_uri(self):
# build() will raise an HttpError on a 400, use this to pick the request uri
# out of the raised exception.
try:
http = HttpMockSequence(
[({"status": "400"}, read_datafile("zoo.json", "rb"))]
)
zoo = build(
"zoo",
"v1",
http=http,
developerKey="foo",
discoveryServiceUrl="http://example.com",
static_discovery=False,
)
self.fail("Should have raised an exception.")
except HttpError as e:
self.assertEqual(e.uri, "http://example.com?key=foo")
def test_discovery_loading_from_v2_discovery_uri(self):
http = HttpMockSequence(
[
({"status": "404"}, "Not found"),
({"status": "200"}, read_datafile("zoo.json", "rb")),
]
)
zoo = build("zoo", "v1", http=http, cache_discovery=False, static_discovery=False)
self.assertTrue(hasattr(zoo, "animals"))
def test_api_endpoint_override_from_client_options(self):
http = HttpMockSequence(
[
({"status": "404"}, "Not found"),
({"status": "200"}, read_datafile("zoo.json", "rb")),
]
)
api_endpoint = "https://foo.googleapis.com/"
options = google.api_core.client_options.ClientOptions(
api_endpoint=api_endpoint
)
zoo = build(
"zoo", "v1", http=http, cache_discovery=False, client_options=options, static_discovery=False
)
self.assertEqual(zoo._baseUrl, api_endpoint)
def test_api_endpoint_override_from_client_options_dict(self):
http = HttpMockSequence(
[
({"status": "404"}, "Not found"),
({"status": "200"}, read_datafile("zoo.json", "rb")),
]
)
api_endpoint = "https://foo.googleapis.com/"
zoo = build(
"zoo",
"v1",
http=http,
cache_discovery=False,
client_options={"api_endpoint": api_endpoint},
static_discovery=False,
)
self.assertEqual(zoo._baseUrl, api_endpoint)
def test_discovery_with_empty_version_uses_v2(self):
http = HttpMockSequence([({"status": "200"}, read_datafile("zoo.json", "rb")),])
build("zoo", version=None, http=http, cache_discovery=False, static_discovery=False)
validate_discovery_requests(self, http, "zoo", None, V2_DISCOVERY_URI)
def test_discovery_with_empty_version_preserves_custom_uri(self):
http = HttpMockSequence([({"status": "200"}, read_datafile("zoo.json", "rb")),])
custom_discovery_uri = "https://foo.bar/$discovery"
build(
"zoo",
version=None,
http=http,
cache_discovery=False,
discoveryServiceUrl=custom_discovery_uri,
static_discovery=False,
)
validate_discovery_requests(self, http, "zoo", None, custom_discovery_uri)
def test_discovery_with_valid_version_uses_v1(self):
http = HttpMockSequence([({"status": "200"}, read_datafile("zoo.json", "rb")),])
build("zoo", version="v123", http=http, cache_discovery=False, static_discovery=False)
validate_discovery_requests(self, http, "zoo", "v123", V1_DISCOVERY_URI)
class DiscoveryRetryFromHttp(unittest.TestCase):
def test_repeated_500_retries_and_fails(self):
http = HttpMockSequence(
[
({"status": "500"}, read_datafile("500.json", "rb")),
({"status": "503"}, read_datafile("503.json", "rb")),
]
)
with self.assertRaises(HttpError):
with mock.patch("time.sleep") as mocked_sleep:
build("zoo", "v1", http=http, cache_discovery=False, static_discovery=False)
mocked_sleep.assert_called_once()
# We also want to verify that we stayed with v1 discovery
validate_discovery_requests(self, http, "zoo", "v1", V1_DISCOVERY_URI)
def test_v2_repeated_500_retries_and_fails(self):
http = HttpMockSequence(
[
({"status": "404"}, "Not found"), # last v1 discovery call
({"status": "500"}, read_datafile("500.json", "rb")),
({"status": "503"}, read_datafile("503.json", "rb")),
]
)
with self.assertRaises(HttpError):
with mock.patch("time.sleep") as mocked_sleep:
build("zoo", "v1", http=http, cache_discovery=False, static_discovery=False)
mocked_sleep.assert_called_once()
# We also want to verify that we switched to v2 discovery
validate_discovery_requests(self, http, "zoo", "v1", V2_DISCOVERY_URI)
def test_single_500_retries_and_succeeds(self):
http = HttpMockSequence(
[
({"status": "500"}, read_datafile("500.json", "rb")),
({"status": "200"}, read_datafile("zoo.json", "rb")),
]
)
with mock.patch("time.sleep") as mocked_sleep:
zoo = build("zoo", "v1", http=http, cache_discovery=False, static_discovery=False)
self.assertTrue(hasattr(zoo, "animals"))
mocked_sleep.assert_called_once()
# We also want to verify that we stayed with v1 discovery
validate_discovery_requests(self, http, "zoo", "v1", V1_DISCOVERY_URI)
def test_single_500_then_404_retries_and_succeeds(self):
http = HttpMockSequence(
[
({"status": "500"}, read_datafile("500.json", "rb")),
({"status": "404"}, "Not found"), # last v1 discovery call
({"status": "200"}, read_datafile("zoo.json", "rb")),
]
)
with mock.patch("time.sleep") as mocked_sleep:
zoo = build("zoo", "v1", http=http, cache_discovery=False, static_discovery=False)
self.assertTrue(hasattr(zoo, "animals"))
mocked_sleep.assert_called_once()
# We also want to verify that we switched to v2 discovery
validate_discovery_requests(self, http, "zoo", "v1", V2_DISCOVERY_URI)
class DiscoveryFromAppEngineCache(unittest.TestCase):
def setUp(self):
self.old_environ = os.environ.copy()
os.environ["APPENGINE_RUNTIME"] = "python27"
def tearDown(self):
os.environ = self.old_environ
def test_appengine_memcache(self):
# Hack module import
self.orig_import = __import__
self.mocked_api = mock.MagicMock()
def import_mock(name, *args, **kwargs):
if name == "google.appengine.api":
return self.mocked_api
return self.orig_import(name, *args, **kwargs)
import_fullname = "__builtin__.__import__"
if sys.version_info[0] >= 3:
import_fullname = "builtins.__import__"
with mock.patch(import_fullname, side_effect=import_mock):
namespace = "google-api-client"
self.http = HttpMock(datafile("plus.json"), {"status": "200"})
self.mocked_api.memcache.get.return_value = None
plus = build("plus", "v1", http=self.http, static_discovery=False)
# memcache.get is called once
url = "https://www.googleapis.com/discovery/v1/apis/plus/v1/rest"
self.mocked_api.memcache.get.assert_called_once_with(
url, namespace=namespace
)
# memcache.set is called once
content = read_datafile("plus.json")
self.mocked_api.memcache.set.assert_called_once_with(
url, content, time=DISCOVERY_DOC_MAX_AGE, namespace=namespace
)
# Returns the cached content this time.
self.mocked_api.memcache.get.return_value = content
# Make sure the contents are returned from the cache.
# (Otherwise it should through an error)
self.http = HttpMock(None, {"status": "200"})
plus = build("plus", "v1", http=self.http, static_discovery=False)
# memcache.get is called twice
self.mocked_api.memcache.get.assert_has_calls(
[
mock.call(url, namespace=namespace),
mock.call(url, namespace=namespace),
]
)
# memcahce.set is called just once
self.mocked_api.memcache.set.assert_called_once_with(
url, content, time=DISCOVERY_DOC_MAX_AGE, namespace=namespace
)
class DiscoveryFromStaticDocument(unittest.TestCase):
def test_retrieve_from_local_when_static_discovery_true(self):
http = HttpMockSequence([({"status": "400"}, "")])
drive = build("drive", "v3", http=http, cache_discovery=False,
static_discovery=True)
self.assertIsNotNone(drive)
self.assertTrue(hasattr(drive, "files"))
def test_retrieve_from_internet_when_static_discovery_false(self):
http = HttpMockSequence([({"status": "400"}, "")])
with self.assertRaises(HttpError):
build("drive", "v3", http=http, cache_discovery=False,
static_discovery=False)
def test_unknown_api_when_static_discovery_true(self):
with self.assertRaises(UnknownApiNameOrVersion):
build("doesnotexist", "v3", cache_discovery=False,
static_discovery=True)
class DictCache(Cache):
def __init__(self):
self.d = {}
def get(self, url):
return self.d.get(url, None)
def set(self, url, content):
self.d[url] = content
def contains(self, url):
return url in self.d
class DiscoveryFromFileCache(unittest.TestCase):
def test_file_based_cache(self):
cache = mock.Mock(wraps=DictCache())
with mock.patch(
"googleapiclient.discovery_cache.autodetect", return_value=cache
):
self.http = HttpMock(datafile("plus.json"), {"status": "200"})
plus = build("plus", "v1", http=self.http, static_discovery=False)
# cache.get is called once
url = "https://www.googleapis.com/discovery/v1/apis/plus/v1/rest"
cache.get.assert_called_once_with(url)
# cache.set is called once
content = read_datafile("plus.json")
cache.set.assert_called_once_with(url, content)
# Make sure there is a cache entry for the plus v1 discovery doc.
self.assertTrue(cache.contains(url))
# Make sure the contents are returned from the cache.
# (Otherwise it should through an error)
self.http = HttpMock(None, {"status": "200"})
plus = build("plus", "v1", http=self.http, static_discovery=False)
# cache.get is called twice
cache.get.assert_has_calls([mock.call(url), mock.call(url)])
# cahce.set is called just once
cache.set.assert_called_once_with(url, content)
class Discovery(unittest.TestCase):
def test_method_error_checking(self):
self.http = HttpMock(datafile("plus.json"), {"status": "200"})
plus = build("plus", "v1", http=self.http, static_discovery=False)
# Missing required parameters
try:
plus.activities().list()
self.fail()
except TypeError as e:
self.assertTrue("Missing" in str(e))
# Missing required parameters even if supplied as None.
try:
plus.activities().list(collection=None, userId=None)
self.fail()
except TypeError as e:
self.assertTrue("Missing" in str(e))
# Parameter doesn't match regex
try:
plus.activities().list(collection="not_a_collection_name", userId="me")
self.fail()
except TypeError as e:
self.assertTrue("not an allowed value" in str(e))
# Unexpected parameter
try:
plus.activities().list(flubber=12)
self.fail()
except TypeError as e:
self.assertTrue("unexpected" in str(e))
def _check_query_types(self, request):
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q["q"], ["foo"])
self.assertEqual(q["i"], ["1"])
self.assertEqual(q["n"], ["1.0"])
self.assertEqual(q["b"], ["false"])
self.assertEqual(q["a"], ["[1, 2, 3]"])
self.assertEqual(q["o"], ["{'a': 1}"])
self.assertEqual(q["e"], ["bar"])
def test_type_coercion(self):
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
request = zoo.query(
q="foo", i=1.0, n=1.0, b=0, a=[1, 2, 3], o={"a": 1}, e="bar"
)
self._check_query_types(request)
request = zoo.query(
q="foo", i=1, n=1, b=False, a=[1, 2, 3], o={"a": 1}, e="bar"
)
self._check_query_types(request)
request = zoo.query(
q="foo", i="1", n="1", b="", a=[1, 2, 3], o={"a": 1}, e="bar", er="two"
)
request = zoo.query(
q="foo",
i="1",
n="1",
b="",
a=[1, 2, 3],
o={"a": 1},
e="bar",
er=["one", "three"],
rr=["foo", "bar"],
)
self._check_query_types(request)
# Five is right out.
self.assertRaises(TypeError, zoo.query, er=["one", "five"])
def test_optional_stack_query_parameters(self):
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
request = zoo.query(trace="html", fields="description")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q["trace"], ["html"])
self.assertEqual(q["fields"], ["description"])
def test_string_params_value_of_none_get_dropped(self):
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
request = zoo.query(trace=None, fields="description")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertFalse("trace" in q)
def test_model_added_query_parameters(self):
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
request = zoo.animals().get(name="Lion")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q["alt"], ["json"])
self.assertEqual(request.headers["accept"], "application/json")
def test_fallback_to_raw_model(self):
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
request = zoo.animals().getmedia(name="Lion")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertTrue("alt" not in q)
self.assertEqual(request.headers["accept"], "*/*")
def test_patch(self):
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
request = zoo.animals().patch(name="lion", body='{"description": "foo"}')
self.assertEqual(request.method, "PATCH")
def test_batch_request_from_discovery(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
# zoo defines a batchPath
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
batch_request = zoo.new_batch_http_request()
self.assertEqual(
batch_request._batch_uri, "https://www.googleapis.com/batchZoo"
)
def test_batch_request_from_default(self):
self.http = HttpMock(datafile("plus.json"), {"status": "200"})
# plus does not define a batchPath
plus = build("plus", "v1", http=self.http, cache_discovery=False, static_discovery=False)
batch_request = plus.new_batch_http_request()
self.assertEqual(batch_request._batch_uri, "https://www.googleapis.com/batch")
def test_tunnel_patch(self):
http = HttpMockSequence(
[
({"status": "200"}, read_datafile("zoo.json", "rb")),
({"status": "200"}, "echo_request_headers_as_json"),
]
)
http = tunnel_patch(http)
zoo = build("zoo", "v1", http=http, cache_discovery=False, static_discovery=False)
resp = zoo.animals().patch(name="lion", body='{"description": "foo"}').execute()
self.assertTrue("x-http-method-override" in resp)
def test_plus_resources(self):
self.http = HttpMock(datafile("plus.json"), {"status": "200"})
plus = build("plus", "v1", http=self.http, static_discovery=False)
self.assertTrue(getattr(plus, "activities"))
self.assertTrue(getattr(plus, "people"))
def test_oauth2client_credentials(self):
credentials = mock.Mock(spec=GoogleCredentials)
credentials.create_scoped_required.return_value = False
discovery = read_datafile("plus.json")
service = build_from_document(discovery, credentials=credentials)
self.assertEqual(service._http, credentials.authorize.return_value)
def test_google_auth_credentials(self):
credentials = mock.Mock(spec=google.auth.credentials.Credentials)
discovery = read_datafile("plus.json")
service = build_from_document(discovery, credentials=credentials)
self.assertIsInstance(service._http, google_auth_httplib2.AuthorizedHttp)
self.assertEqual(service._http.credentials, credentials)
def test_no_scopes_no_credentials(self):
# Zoo doesn't have scopes
discovery = read_datafile("zoo.json")
service = build_from_document(discovery)
# Should be an ordinary httplib2.Http instance and not AuthorizedHttp.
self.assertIsInstance(service._http, httplib2.Http)
def test_full_featured(self):
# Zoo should exercise all discovery facets
# and should also have no future.json file.
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
self.assertTrue(getattr(zoo, "animals"))
request = zoo.animals().list(name="bat", projection="full")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q["name"], ["bat"])
self.assertEqual(q["projection"], ["full"])
def test_nested_resources(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
self.assertTrue(getattr(zoo, "animals"))
request = zoo.my().favorites().list(max_results="5")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q["max-results"], ["5"])
@unittest.skipIf(six.PY3, "print is not a reserved name in Python 3")
def test_methods_with_reserved_names(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http)
self.assertTrue(getattr(zoo, "animals"))
request = zoo.global_().print_().assert_(max_results="5")
parsed = urlparse(request.uri)
self.assertEqual(parsed[2], "/zoo/v1/global/print/assert")
def test_top_level_functions(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
self.assertTrue(getattr(zoo, "query"))
request = zoo.query(q="foo")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q["q"], ["foo"])
def test_simple_media_uploads(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
doc = getattr(zoo.animals().insert, "__doc__")
self.assertTrue("media_body" in doc)
def test_simple_media_upload_no_max_size_provided(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
request = zoo.animals().crossbreed(media_body=datafile("small.png"))
self.assertEqual("image/png", request.headers["content-type"])
self.assertEqual(b"PNG", request.body[1:4])
def test_simple_media_raise_correct_exceptions(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
try:
zoo.animals().insert(media_body=datafile("smiley.png"))
self.fail("should throw exception if media is too large.")
except MediaUploadSizeError:
pass
try:
zoo.animals().insert(media_body=datafile("small.jpg"))
self.fail("should throw exception if mimetype is unacceptable.")
except UnacceptableMimeTypeError:
pass
def test_simple_media_good_upload(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
request = zoo.animals().insert(media_body=datafile("small.png"))
self.assertEqual("image/png", request.headers["content-type"])
self.assertEqual(b"PNG", request.body[1:4])
assertUrisEqual(
self,
"https://www.googleapis.com/upload/zoo/v1/animals?uploadType=media&alt=json",
request.uri,
)
def test_simple_media_unknown_mimetype(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
try:
zoo.animals().insert(media_body=datafile("small-png"))
self.fail("should throw exception if mimetype is unknown.")
except UnknownFileType:
pass
request = zoo.animals().insert(
media_body=datafile("small-png"), media_mime_type="image/png"
)
self.assertEqual("image/png", request.headers["content-type"])
self.assertEqual(b"PNG", request.body[1:4])
assertUrisEqual(
self,
"https://www.googleapis.com/upload/zoo/v1/animals?uploadType=media&alt=json",
request.uri,
)
def test_multipart_media_raise_correct_exceptions(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
try:
zoo.animals().insert(media_body=datafile("smiley.png"), body={})
self.fail("should throw exception if media is too large.")
except MediaUploadSizeError:
pass
try:
zoo.animals().insert(media_body=datafile("small.jpg"), body={})
self.fail("should throw exception if mimetype is unacceptable.")
except UnacceptableMimeTypeError:
pass
def test_multipart_media_good_upload(self, static_discovery=False):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
request = zoo.animals().insert(media_body=datafile("small.png"), body={})
self.assertTrue(request.headers["content-type"].startswith("multipart/related"))
contents = read_datafile("small.png", "rb")
boundary = re.match(b"--=+([^=]+)", request.body).group(1)
self.assertEqual(
request.body.rstrip(b"\n"), # Python 2.6 does not add a trailing \n
b"--==============="
+ boundary
+ b"==\n"
+ b"Content-Type: application/json\n"
+ b"MIME-Version: 1.0\n\n"
+ b'{"data": {}}\n'
+ b"--==============="
+ boundary
+ b"==\n"
+ b"Content-Type: image/png\n"
+ b"MIME-Version: 1.0\n"
+ b"Content-Transfer-Encoding: binary\n\n"
+ contents
+ b"\n--==============="
+ boundary
+ b"==--",
)
assertUrisEqual(
self,
"https://www.googleapis.com/upload/zoo/v1/animals?uploadType=multipart&alt=json",
request.uri,
)
def test_media_capable_method_without_media(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
request = zoo.animals().insert(body={})
self.assertTrue(request.headers["content-type"], "application/json")
def test_resumable_multipart_media_good_upload(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
media_upload = MediaFileUpload(datafile("small.png"), resumable=True)
request = zoo.animals().insert(media_body=media_upload, body={})
self.assertTrue(request.headers["content-type"].startswith("application/json"))
self.assertEqual('{"data": {}}', request.body)
self.assertEqual(media_upload, request.resumable)
self.assertEqual("image/png", request.resumable.mimetype())
self.assertNotEqual(request.body, None)
self.assertEqual(request.resumable_uri, None)
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
({"status": "308", "location": "http://upload.example.com/2"}, ""),
(
{
"status": "308",
"location": "http://upload.example.com/3",
"range": "0-12",
},
"",
),
(
{
"status": "308",
"location": "http://upload.example.com/4",
"range": "0-%d" % (media_upload.size() - 2),
},
"",
),
({"status": "200"}, '{"foo": "bar"}'),
]
)
status, body = request.next_chunk(http=http)
self.assertEqual(None, body)
self.assertTrue(isinstance(status, MediaUploadProgress))
self.assertEqual(0, status.resumable_progress)
# Two requests should have been made and the resumable_uri should have been
# updated for each one.
self.assertEqual(request.resumable_uri, "http://upload.example.com/2")
self.assertEqual(media_upload, request.resumable)
self.assertEqual(0, request.resumable_progress)
# This next chuck call should upload the first chunk
status, body = request.next_chunk(http=http)
self.assertEqual(request.resumable_uri, "http://upload.example.com/3")
self.assertEqual(media_upload, request.resumable)
self.assertEqual(13, request.resumable_progress)
# This call will upload the next chunk
status, body = request.next_chunk(http=http)
self.assertEqual(request.resumable_uri, "http://upload.example.com/4")
self.assertEqual(media_upload.size() - 1, request.resumable_progress)
self.assertEqual('{"data": {}}', request.body)
# Final call to next_chunk should complete the upload.
status, body = request.next_chunk(http=http)
self.assertEqual(body, {"foo": "bar"})
self.assertEqual(status, None)
def test_resumable_media_good_upload(self):
"""Not a multipart upload."""
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
media_upload = MediaFileUpload(datafile("small.png"), resumable=True)
request = zoo.animals().insert(media_body=media_upload, body=None)
self.assertEqual(media_upload, request.resumable)
self.assertEqual("image/png", request.resumable.mimetype())
self.assertEqual(request.body, None)
self.assertEqual(request.resumable_uri, None)
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
(
{
"status": "308",
"location": "http://upload.example.com/2",
"range": "0-12",
},
"",
),
(
{
"status": "308",
"location": "http://upload.example.com/3",
"range": "0-%d" % (media_upload.size() - 2),
},
"",
),
({"status": "200"}, '{"foo": "bar"}'),
]
)
status, body = request.next_chunk(http=http)
self.assertEqual(None, body)
self.assertTrue(isinstance(status, MediaUploadProgress))
self.assertEqual(13, status.resumable_progress)
# Two requests should have been made and the resumable_uri should have been
# updated for each one.
self.assertEqual(request.resumable_uri, "http://upload.example.com/2")
self.assertEqual(media_upload, request.resumable)
self.assertEqual(13, request.resumable_progress)
status, body = request.next_chunk(http=http)
self.assertEqual(request.resumable_uri, "http://upload.example.com/3")
self.assertEqual(media_upload.size() - 1, request.resumable_progress)
self.assertEqual(request.body, None)
# Final call to next_chunk should complete the upload.
status, body = request.next_chunk(http=http)
self.assertEqual(body, {"foo": "bar"})
self.assertEqual(status, None)
def test_resumable_media_good_upload_from_execute(self):
"""Not a multipart upload."""
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
media_upload = MediaFileUpload(datafile("small.png"), resumable=True)
request = zoo.animals().insert(media_body=media_upload, body=None)
assertUrisEqual(
self,
"https://www.googleapis.com/upload/zoo/v1/animals?uploadType=resumable&alt=json",
request.uri,
)
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
(
{
"status": "308",
"location": "http://upload.example.com/2",
"range": "0-12",
},
"",
),
(
{
"status": "308",
"location": "http://upload.example.com/3",
"range": "0-%d" % media_upload.size(),
},
"",
),
({"status": "200"}, '{"foo": "bar"}'),
]
)
body = request.execute(http=http)
self.assertEqual(body, {"foo": "bar"})
def test_resumable_media_fail_unknown_response_code_first_request(self):
"""Not a multipart upload."""
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
media_upload = MediaFileUpload(datafile("small.png"), resumable=True)
request = zoo.animals().insert(media_body=media_upload, body=None)
http = HttpMockSequence(
[({"status": "400", "location": "http://upload.example.com"}, "")]
)
try:
request.execute(http=http)
self.fail("Should have raised ResumableUploadError.")
except ResumableUploadError as e:
self.assertEqual(400, e.resp.status)
def test_resumable_media_fail_unknown_response_code_subsequent_request(self):
"""Not a multipart upload."""
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
media_upload = MediaFileUpload(datafile("small.png"), resumable=True)
request = zoo.animals().insert(media_body=media_upload, body=None)
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
({"status": "400"}, ""),
]
)
self.assertRaises(HttpError, request.execute, http=http)
self.assertTrue(request._in_error_state)
http = HttpMockSequence(
[
({"status": "308", "range": "0-5"}, ""),
({"status": "308", "range": "0-6"}, ""),
]
)
status, body = request.next_chunk(http=http)
self.assertEqual(
status.resumable_progress,
7,
"Should have first checked length and then tried to PUT more.",
)
self.assertFalse(request._in_error_state)
# Put it back in an error state.
http = HttpMockSequence([({"status": "400"}, "")])
self.assertRaises(HttpError, request.execute, http=http)
self.assertTrue(request._in_error_state)
# Pretend the last request that 400'd actually succeeded.
http = HttpMockSequence([({"status": "200"}, '{"foo": "bar"}')])
status, body = request.next_chunk(http=http)
self.assertEqual(body, {"foo": "bar"})
def test_media_io_base_stream_unlimited_chunksize_resume(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
# Set up a seekable stream and try to upload in single chunk.
fd = BytesIO(b'01234"56789"')
media_upload = MediaIoBaseUpload(
fd=fd, mimetype="text/plain", chunksize=-1, resumable=True
)
request = zoo.animals().insert(media_body=media_upload, body=None)
# The single chunk fails, restart at the right point.
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
(
{
"status": "308",
"location": "http://upload.example.com/2",
"range": "0-4",
},
"",
),
({"status": "200"}, "echo_request_body"),
]
)
body = request.execute(http=http)
self.assertEqual("56789", body)
def test_media_io_base_stream_chunksize_resume(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
# Set up a seekable stream and try to upload in chunks.
fd = BytesIO(b"0123456789")
media_upload = MediaIoBaseUpload(
fd=fd, mimetype="text/plain", chunksize=5, resumable=True
)
request = zoo.animals().insert(media_body=media_upload, body=None)
# The single chunk fails, pull the content sent out of the exception.
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
({"status": "400"}, "echo_request_body"),
]
)
try:
body = request.execute(http=http)
except HttpError as e:
self.assertEqual(b"01234", e.content)
def test_resumable_media_handle_uploads_of_unknown_size(self):
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
({"status": "200"}, "echo_request_headers_as_json"),
]
)
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
# Create an upload that doesn't know the full size of the media.
class IoBaseUnknownLength(MediaUpload):
def chunksize(self):
return 10
def mimetype(self):
return "image/png"
def size(self):
return None
def resumable(self):
return True
def getbytes(self, begin, length):
return "0123456789"
upload = IoBaseUnknownLength()
request = zoo.animals().insert(media_body=upload, body=None)
status, body = request.next_chunk(http=http)
self.assertEqual(body, {"Content-Range": "bytes 0-9/*", "Content-Length": "10"})
def test_resumable_media_no_streaming_on_unsupported_platforms(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
class IoBaseHasStream(MediaUpload):
def chunksize(self):
return 10
def mimetype(self):
return "image/png"
def size(self):
return None
def resumable(self):
return True
def getbytes(self, begin, length):
return "0123456789"
def has_stream(self):
return True
def stream(self):
raise NotImplementedError()
upload = IoBaseHasStream()
orig_version = sys.version_info
sys.version_info = (2, 6, 5, "final", 0)
request = zoo.animals().insert(media_body=upload, body=None)
# This should raise an exception because stream() will be called.
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
({"status": "200"}, "echo_request_headers_as_json"),
]
)
self.assertRaises(NotImplementedError, request.next_chunk, http=http)
sys.version_info = orig_version
def test_resumable_media_handle_uploads_of_unknown_size_eof(self):
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
({"status": "200"}, "echo_request_headers_as_json"),
]
)
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
fd = BytesIO(b"data goes here")
# Create an upload that doesn't know the full size of the media.
upload = MediaIoBaseUpload(
fd=fd, mimetype="image/png", chunksize=15, resumable=True
)
request = zoo.animals().insert(media_body=upload, body=None)
status, body = request.next_chunk(http=http)
self.assertEqual(
body, {"Content-Range": "bytes 0-13/14", "Content-Length": "14"}
)
def test_resumable_media_handle_resume_of_upload_of_unknown_size(self):
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
({"status": "400"}, ""),
]
)
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
# Create an upload that doesn't know the full size of the media.
fd = BytesIO(b"data goes here")
upload = MediaIoBaseUpload(
fd=fd, mimetype="image/png", chunksize=500, resumable=True
)
request = zoo.animals().insert(media_body=upload, body=None)
# Put it in an error state.
self.assertRaises(HttpError, request.next_chunk, http=http)
http = HttpMockSequence(
[({"status": "400", "range": "0-5"}, "echo_request_headers_as_json")]
)
try:
# Should resume the upload by first querying the status of the upload.
request.next_chunk(http=http)
except HttpError as e:
expected = {"Content-Range": "bytes */14", "content-length": "0"}
self.assertEqual(
expected,
json.loads(e.content.decode("utf-8")),
"Should send an empty body when requesting the current upload status.",
)
def test_pickle(self):
sorted_resource_keys = [
"_baseUrl",
"_developerKey",
"_dynamic_attrs",
"_http",
"_model",
"_requestBuilder",
"_resourceDesc",
"_rootDesc",
"_schema",
"animals",
"global_",
"load",
"loadNoTemplate",
"my",
"new_batch_http_request",
"query",
"scopedAnimals",
]
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
self.assertEqual(sorted(zoo.__dict__.keys()), sorted_resource_keys)
pickled_zoo = pickle.dumps(zoo)
new_zoo = pickle.loads(pickled_zoo)
self.assertEqual(sorted(new_zoo.__dict__.keys()), sorted_resource_keys)
self.assertTrue(hasattr(new_zoo, "animals"))
self.assertTrue(callable(new_zoo.animals))
self.assertTrue(hasattr(new_zoo, "global_"))
self.assertTrue(callable(new_zoo.global_))
self.assertTrue(hasattr(new_zoo, "load"))
self.assertTrue(callable(new_zoo.load))
self.assertTrue(hasattr(new_zoo, "loadNoTemplate"))
self.assertTrue(callable(new_zoo.loadNoTemplate))
self.assertTrue(hasattr(new_zoo, "my"))
self.assertTrue(callable(new_zoo.my))
self.assertTrue(hasattr(new_zoo, "query"))
self.assertTrue(callable(new_zoo.query))
self.assertTrue(hasattr(new_zoo, "scopedAnimals"))
self.assertTrue(callable(new_zoo.scopedAnimals))
self.assertEqual(sorted(zoo._dynamic_attrs), sorted(new_zoo._dynamic_attrs))
self.assertEqual(zoo._baseUrl, new_zoo._baseUrl)
self.assertEqual(zoo._developerKey, new_zoo._developerKey)
self.assertEqual(zoo._requestBuilder, new_zoo._requestBuilder)
self.assertEqual(zoo._resourceDesc, new_zoo._resourceDesc)
self.assertEqual(zoo._rootDesc, new_zoo._rootDesc)
# _http, _model and _schema won't be equal since we will get new
# instances upon un-pickling
def _dummy_zoo_request(self):
zoo_contents = read_datafile("zoo.json")
zoo_uri = uritemplate.expand(DISCOVERY_URI, {"api": "zoo", "apiVersion": "v1"})
if "REMOTE_ADDR" in os.environ:
zoo_uri = util._add_query_parameter(
zoo_uri, "userIp", os.environ["REMOTE_ADDR"]
)
http = build_http()
original_request = http.request
def wrapped_request(uri, method="GET", *args, **kwargs):
if uri == zoo_uri:
return httplib2.Response({"status": "200"}), zoo_contents
return original_request(uri, method=method, *args, **kwargs)
http.request = wrapped_request
return http
def _dummy_token(self):
access_token = "foo"
client_id = "some_client_id"
client_secret = "cOuDdkfjxxnv+"
refresh_token = "1/0/a.df219fjls0"
token_expiry = datetime.datetime.utcnow()
user_agent = "refresh_checker/1.0"
return OAuth2Credentials(
access_token,
client_id,
client_secret,
refresh_token,
token_expiry,
GOOGLE_TOKEN_URI,
user_agent,
)
def test_pickle_with_credentials(self):
credentials = self._dummy_token()
http = self._dummy_zoo_request()
http = credentials.authorize(http)
self.assertTrue(hasattr(http.request, "credentials"))
zoo = build("zoo", "v1", http=http, static_discovery=False)
pickled_zoo = pickle.dumps(zoo)
new_zoo = pickle.loads(pickled_zoo)
self.assertEqual(sorted(zoo.__dict__.keys()), sorted(new_zoo.__dict__.keys()))
new_http = new_zoo._http
self.assertFalse(hasattr(new_http.request, "credentials"))
def test_resumable_media_upload_no_content(self):
self.http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=self.http, static_discovery=False)
media_upload = MediaFileUpload(datafile("empty"), resumable=True)
request = zoo.animals().insert(media_body=media_upload, body=None)
self.assertEqual(media_upload, request.resumable)
self.assertEqual(request.body, None)
self.assertEqual(request.resumable_uri, None)
http = HttpMockSequence(
[
({"status": "200", "location": "http://upload.example.com"}, ""),
(
{
"status": "308",
"location": "http://upload.example.com/2",
"range": "0-0",
},
"",
),
]
)
status, body = request.next_chunk(http=http)
self.assertEqual(None, body)
self.assertTrue(isinstance(status, MediaUploadProgress))
self.assertEqual(0, status.progress())
class Next(unittest.TestCase):
def test_next_successful_none_on_no_next_page_token(self):
self.http = HttpMock(datafile("tasks.json"), {"status": "200"})
tasks = build("tasks", "v1", http=self.http)
request = tasks.tasklists().list()
self.assertEqual(None, tasks.tasklists().list_next(request, {}))
def test_next_successful_none_on_empty_page_token(self):
self.http = HttpMock(datafile("tasks.json"), {"status": "200"})
tasks = build("tasks", "v1", http=self.http)
request = tasks.tasklists().list()
next_request = tasks.tasklists().list_next(request, {"nextPageToken": ""})
self.assertEqual(None, next_request)
def test_next_successful_with_next_page_token(self):
self.http = HttpMock(datafile("tasks.json"), {"status": "200"})
tasks = build("tasks", "v1", http=self.http)
request = tasks.tasklists().list()
next_request = tasks.tasklists().list_next(request, {"nextPageToken": "123abc"})
parsed = list(urlparse(next_request.uri))
q = parse_qs(parsed[4])
self.assertEqual(q["pageToken"][0], "123abc")
def test_next_successful_with_next_page_token_alternate_name(self):
self.http = HttpMock(datafile("bigquery.json"), {"status": "200"})
bigquery = build("bigquery", "v2", http=self.http)
request = bigquery.tabledata().list(datasetId="", projectId="", tableId="")
next_request = bigquery.tabledata().list_next(request, {"pageToken": "123abc"})
parsed = list(urlparse(next_request.uri))
q = parse_qs(parsed[4])
self.assertEqual(q["pageToken"][0], "123abc")
def test_next_successful_with_next_page_token_in_body(self):
self.http = HttpMock(datafile("logging.json"), {"status": "200"})
logging = build("logging", "v2", http=self.http)
request = logging.entries().list(body={})
next_request = logging.entries().list_next(request, {"nextPageToken": "123abc"})
body = JsonModel().deserialize(next_request.body)
self.assertEqual(body["pageToken"], "123abc")
def test_next_with_method_with_no_properties(self):
self.http = HttpMock(datafile("latitude.json"), {"status": "200"})
service = build("latitude", "v1", http=self.http, static_discovery=False)
service.currentLocation().get()
def test_next_nonexistent_with_no_next_page_token(self):
self.http = HttpMock(datafile("drive.json"), {"status": "200"})
drive = build("drive", "v3", http=self.http)
drive.changes().watch(body={})
self.assertFalse(callable(getattr(drive.changes(), "watch_next", None)))
def test_next_successful_with_next_page_token_required(self):
self.http = HttpMock(datafile("drive.json"), {"status": "200"})
drive = build("drive", "v3", http=self.http)
request = drive.changes().list(pageToken="startPageToken")
next_request = drive.changes().list_next(request, {"nextPageToken": "123abc"})
parsed = list(urlparse(next_request.uri))
q = parse_qs(parsed[4])
self.assertEqual(q["pageToken"][0], "123abc")
class MediaGet(unittest.TestCase):
def test_get_media(self):
http = HttpMock(datafile("zoo.json"), {"status": "200"})
zoo = build("zoo", "v1", http=http, static_discovery=False)
request = zoo.animals().get_media(name="Lion")
parsed = urlparse(request.uri)
q = parse_qs(parsed[4])
self.assertEqual(q["alt"], ["media"])
self.assertEqual(request.headers["accept"], "*/*")
http = HttpMockSequence([({"status": "200"}, "standing in for media")])
response = request.execute(http=http)
self.assertEqual(b"standing in for media", response)
if __name__ == "__main__":
unittest.main()
|
the-stack_0_15309 | import unittest
import unittest.mock
from programy.clients.render.renderer import RichMediaRenderer
class MockRichMediaRenderer(RichMediaRenderer):
def __init__(self, config):
RichMediaRenderer.__init__(self, config)
def handle_text(self, userid, text):
self._userid = userid
self._text = text
return None
def handle_url_button(self, userid, button):
self._userid = userid
self._button = button
return None
def handle_postback_button(self, userid, button):
self._userid = userid
self._button = button
return None
def handle_link(self, userid, link):
self._userid = userid
self._link = link
return None
def handle_image(self, userid, image):
self._userid = userid
self._image = image
return None
def handle_video(self, userid, video):
self._userid = userid
self._video = video
return None
def handle_card(self, userid, card):
self._userid = userid
self._card = card
return None
def handle_carousel(self, userid, carousel):
self._userid = userid
self._carousel = carousel
return None
def handle_reply(self, userid, reply):
self._userid = userid
self._reply = reply
return None
def handle_delay(self, userid, delay):
self._userid = userid
self._delay = delay
return None
def handle_split(self, userid, split):
self._userid = userid
self._split = split
return None
def handle_list(self, userid, list):
self._userid = userid
self._list = list
return None
def handle_ordered_list(self, userid, items):
self._userid = userid
self._list = list
return None
def handle_location(self, userid, location):
self._userid = userid
self._location = location
return None
def handle_tts(self, userid, text):
self._userid = userid
self._text = text
return None
class OpenChatBotRichMediaRendererTests(unittest.TestCase):
def test_card(self):
mock_config = unittest.mock.Mock()
renderer = MockRichMediaRenderer(mock_config)
self.assertIsNotNone(renderer)
renderer.render("testuser", """
<card>

<title>Fauteuil enfant, Visslegris</title>
<subtitle>Quand ils peuvent imiter les adultes, les enfants sesentent spéciaux et importants. C'est pourquoi nous avons créé une version miniature du fauteuil STRANDMON, l'un de nos produits favoris.</subtitle>
<button>
<text>Acheter en ligne</text>
<url>https://serv-api.target2sell.com/1.1/R/cookie/OFCBMN5RRHSG5L/1200/OFCBMN5RRHSG5L-1200-5/20343224/1/viewTogether-%7BtypeOfContextList%3A%5B%22current%22%2C%22view%22%5D%7D/f082e51f-561d-47f7-c0cb-13735e58bfc1</url>
</button>
</card>""")
self.assertEqual(renderer._userid, "testuser")
self.assertIsNotNone(renderer._card)
self.assertEqual("card", renderer._card['type'])
self.assertEqual(renderer._card['image'], "https://www.ikea.com/fr/fr/images/products/strandmon-fauteuil-enfant-gris__0574584_PE668407_S4.JPG")
self.assertEqual(renderer._card['title'], "Fauteuil enfant, Visslegris")
self.assertEqual(renderer._card['subtitle'], "Quand ils peuvent imiter les adultes, les enfants sesentent spéciaux et importants. C'est pourquoi nous avons créé une version miniature du fauteuil STRANDMON, l'un de nos produits favoris.")
self.assertEqual(len(renderer._card['buttons']), 1)
button1 = renderer._card['buttons'][0]
self.assertEqual("button", button1['type'])
self.assertEqual(button1['text'], "Acheter en ligne")
self.assertEqual(button1['url'], "https://serv-api.target2sell.com/1.1/R/cookie/OFCBMN5RRHSG5L/1200/OFCBMN5RRHSG5L-1200-5/20343224/1/viewTogether-%7BtypeOfContextList%3A%5B%22current%22%2C%22view%22%5D%7D/f082e51f-561d-47f7-c0cb-13735e58bfc1")
|
the-stack_0_15311 | # coding: utf-8
from __future__ import unicode_literals
import re
from .common import InfoExtractor
from ..compat import (
compat_urllib_request,
)
from ..utils import (
parse_iso8601,
)
class SportDeutschlandIE(InfoExtractor):
_VALID_URL = r'https?://sportdeutschland\.tv/(?P<sport>[^/?#]+)/(?P<id>[^?#/]+)(?:$|[?#])'
_TESTS = [{
'url': 'http://sportdeutschland.tv/badminton/live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen',
'info_dict': {
'id': 'live-li-ning-badminton-weltmeisterschaft-2014-kopenhagen',
'ext': 'mp4',
'title': 're:Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen',
'categories': ['Badminton'],
'view_count': int,
'thumbnail': 're:^https?://.*\.jpg$',
'description': 're:Die Badminton-WM 2014 aus Kopenhagen bei Sportdeutschland\.TV',
'timestamp': int,
'upload_date': 're:^201408[23][0-9]$',
},
'params': {
'skip_download': 'Live stream',
},
}, {
'url': 'http://sportdeutschland.tv/li-ning-badminton-wm-2014/lee-li-ning-badminton-weltmeisterschaft-2014-kopenhagen-herren-einzel-wei-vs',
'info_dict': {
'id': 'lee-li-ning-badminton-weltmeisterschaft-2014-kopenhagen-herren-einzel-wei-vs',
'ext': 'mp4',
'upload_date': '20140825',
'description': 'md5:60a20536b57cee7d9a4ec005e8687504',
'timestamp': 1408976060,
'title': 'Li-Ning Badminton Weltmeisterschaft 2014 Kopenhagen: Herren Einzel, Wei Lee vs. Keun Lee',
'thumbnail': 're:^https?://.*\.jpg$',
'view_count': int,
'categories': ['Li-Ning Badminton WM 2014'],
}
}]
def _real_extract(self, url):
mobj = re.match(self._VALID_URL, url)
video_id = mobj.group('id')
sport_id = mobj.group('sport')
api_url = 'http://splink.tv/api/permalinks/%s/%s' % (
sport_id, video_id)
req = compat_urllib_request.Request(api_url, headers={
'Accept': 'application/vnd.vidibus.v2.html+json',
'Referer': url,
})
data = self._download_json(req, video_id)
categories = list(data.get('section', {}).get('tags', {}).values())
asset = data['asset']
assets_info = self._download_json(asset['url'], video_id)
formats = []
smil_url = assets_info['video']
if '.smil' in smil_url:
m3u8_url = smil_url.replace('.smil', '.m3u8')
formats.extend(
self._extract_m3u8_formats(m3u8_url, video_id, ext='mp4'))
smil_doc = self._download_xml(
smil_url, video_id, note='Downloading SMIL metadata')
base_url = smil_doc.find('./head/meta').attrib['base']
formats.extend([{
'format_id': 'rmtp',
'url': base_url,
'play_path': n.attrib['src'],
'ext': 'flv',
'preference': -100,
'format_note': 'Seems to fail at example stream',
} for n in smil_doc.findall('./body/video')])
else:
formats.append({'url': smil_url})
self._sort_formats(formats)
return {
'id': video_id,
'formats': formats,
'title': asset['title'],
'thumbnail': asset.get('image'),
'description': asset.get('teaser'),
'categories': categories,
'view_count': asset.get('views'),
'rtmp_live': asset.get('live'),
'timestamp': parse_iso8601(asset.get('date')),
}
|
the-stack_0_15313 | import os, setuptools
dir_path = os.path.dirname(os.path.realpath(__file__))
with open(os.path.join(dir_path, 'requirements.txt')) as f:
required_packages = f.read().splitlines()
with open(os.path.join(dir_path, 'README.md'), "r") as fh:
long_description = fh.read()
setuptools.setup(
name='FINE',
version='1.0.0',
author='Lara Welder',
author_email='[email protected]',
description='Framework for integrated energy systems assessment',
long_description=long_description,
long_description_content_type="text/markdown",
url='https://github.com/FZJ-IEK3-VSA/FINE',
include_package_data=True,
packages=setuptools.find_packages(),
install_requires=required_packages,
setup_requires=['setuptools-git'],
classifiers=[
'Development Status :: 4 - Beta',
'Intended Audience :: End Users/Desktop',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: MIT License',
'Natural Language :: English',
'Operating System :: OS Independent',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Topic :: Scientific/Engineering :: Mathematics',
'Topic :: Software Development :: Libraries :: Python Modules'],
keywords=['energy system', 'optimization'],
)
|
the-stack_0_15314 | # import pemfc_dash
# import pemfc_dash.main
from pemfc_dash.main import app
server = app.server
if __name__ == "__main__":
# [print(num, x) for num, x in enumerate(dl.ID_LIST) ]
app.run_server(debug=True, use_reloader=False)
# app.run_server(debug=True, use_reloader=False,
# host="0.0.0.0", port=int(os.environ.get("PORT", 8080)))
|
the-stack_0_15315 | import re
from rest_framework import serializers
from rest_framework.validators import UniqueTogetherValidator
from api.helpers.utils import StatusChoices
from users.serializers import UserSerializer
from flights.serializers import FlightSerializer
from .models import Booking
def is_valid_ticket(value):
if re.search(r"^[a-zA-Z0-9]{6}$", value) is None:
raise serializers.ValidationError('Ticket number invalid please provide a valid ticket')
class BookingSerializer(serializers.ModelSerializer):
"""Booking serializer
Arguments:
ModelSerializer {serializer} -- rest framework model serializer
"""
class Meta:
model = Booking
fields = '__all__'
validators = [
UniqueTogetherValidator(
queryset=Booking.objects.all(),
fields=('flight_id', 'passenger_id'),
message='Ticket already booked'
)
]
extra_kwargs = {'flight_status': {'read_only': True}}
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['flight_id'].error_messages[
'does_not_exist'] = 'Flight with the id "{pk_value}" does not exist'
class TicketSerializer(serializers.ModelSerializer):
"""Ticket serializer
Arguments:
ModelSerializer {serializer} -- rest framework model serializer
"""
flight_status = serializers.CharField(source='get_flight_status_display')
passenger = UserSerializer(read_only=True, source='passenger_id')
flight = FlightSerializer(read_only=True, source='flight_id')
class Meta:
model = Booking
exclude = ('flight_id', 'passenger_id')
class TicketStatusSerializer(serializers.ModelSerializer):
"""Ticket status serializer
Arguments:
ModelSerializer {serializer} -- rest framework model serializer
"""
ticket = serializers.CharField(write_only=True,
source='ticket_number',
validators=[is_valid_ticket])
ticket_number = serializers.CharField(read_only=True)
class Meta:
model = Booking
fields = ('ticket_number', 'ticket')
class TicketReservationSerializer(serializers.ModelSerializer):
"""Ticket reservation serializer
Arguments:
ModelSerializer {serializer} -- rest framework model serializer
"""
class Meta:
model = Booking
fields = ('flight_status', 'amount_paid', 'reserved_at')
def validate(self, data):
if not 'amount_paid' in data:
raise serializers.ValidationError('Field amount paid is required')
if data['amount_paid'] != self.instance.flight_id.flight_cost.amount:
raise serializers.ValidationError('Amount paid is not equal to the flight cost')
return data
class BookingReservationsSerializer(serializers.ModelSerializer):
date = serializers.DateField(required=True, write_only=True)
status = serializers.ChoiceField(required=True,
write_only=True,
choices=[(choice.value, choice.name)
for choice in StatusChoices])
flight_status = serializers.CharField(read_only=True, source='get_flight_status_display')
class Meta:
model = Booking
fields = '__all__'
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
for field in self.fields:
if field not in ('date', 'status'):
self.fields[field].read_only = True
|
the-stack_0_15316 | """Tests for the Battery data frame"""
import json
import os
import h5py
import pandas as pd
from pandas import HDFStore
from pytest import fixture
from batdata.data import BatteryDataset
@fixture()
def test_df():
return BatteryDataset(raw_data=pd.DataFrame({
'current': [1, 0, -1],
'voltage': [2, 2, 2]
}), metadata={'name': 'Test data'})
def test_write_hdf(tmpdir, test_df):
"""Test whether the contents of the HDF5 file are reasonably understandable"""
# Write the HDF file
out_path = os.path.join(tmpdir, 'test.h5')
test_df.to_batdata_hdf(out_path)
# Investigate the contents
with h5py.File(out_path) as f:
assert 'metadata' in f.attrs
assert json.loads(f.attrs['metadata'])['name'] == 'Test data'
assert 'raw_data' in f
# Test writing to an already-open HDFStore
store = HDFStore(out_path, 'r+')
test_df.to_batdata_hdf(store)
def test_read_hdf(tmpdir, test_df):
# Write it
out_path = os.path.join(tmpdir, 'test.h5')
test_df.to_batdata_hdf(out_path)
# Test reading only the metadata
metadata = BatteryDataset.get_metadata_from_hdf5(out_path)
assert metadata.name == 'Test data'
# Read it
data = BatteryDataset.from_batdata_hdf(out_path)
assert data.metadata.name == 'Test data'
# Test reading from an already-open file
store = HDFStore(out_path, 'r')
data = BatteryDataset.from_batdata_hdf(store)
assert data.metadata.name == 'Test data'
def test_dict(test_df):
# Test writing it
d = test_df.to_batdata_dict()
assert d['metadata']['name'] == 'Test data'
assert 'raw_data' in d
# Test reading it
data = BatteryDataset.from_batdata_dict(d)
assert len(data.raw_data) == 3
assert data.metadata.name == 'Test data'
|
the-stack_0_15320 | import os
import re
import tempfile
import pytest
from analyzer import util
comment = (
'@S.Jovan The expected result should look sth. like this:\n[\n{ ""key1"": str10, ""key2"": str20, ""key3"": str30 },\n{ ""key1"": str11, ""key2"": str21, ""key3"": str31 },\n{ ""key1"": str12, ""key2"": str22, ""key3"": str32 },\n...'
)
PREDICTIONS_SAMPLE = os.path.abspath(
os.path.join(
os.path.dirname(__file__), 'sanitized_comments_predictions.csv'))
NEG_COUNTS = 21
POS_COUNTS = 21
NEUTRAL_COUNTS = 166
def setup_function(function):
global post_base_text, post_expected_text, code_segment, pre_segment, blockquote_segment
post_base_text = "Hi, I have a problem. Here is my code:{}{}{}Can anyone help me?"
code_segment = "<code> for i in range(10):\n print(10)\n#wupwup!</code>"
pre_segment = "<pre> for val in elems:\n\n\n #do something\nprint(val)</pre>"
blockquote_segment = r"<blockquote>Gzipped data: \x1f\x8b\x08\x00\xf9w[Y\x02\xff%\x8e=\x0e\xc30\x08F\xaf\x82\x98\x91\x05\xe6\xc7\xa6c\xf7\x9e\xa0\xca\x96\xa5[\x86lQ\xee^\xdcN\xf0\xf4\xc1\x83\x0b?\xf8\x00|=\xe7D\x02<\n\xde\x17\xee\xab\xb85%\x82L\x02\xcb\xa6N\xa0\x7fri\xae\xd5K\xe1$\xe83\xc3\x08\x86Z\x81\xa9g-y\x88\xf6\x9a\xf5E\xde\x99\x7f\x96\xb1\xd5\x99\xb3\xfcb\x99\x121D\x1bG\xe7^.\xdcWPO\xdc\xdb\xfd\x05\x0ev\x15\x1d\x99\x00\x00\x00</blockquote>"
def test_sanitize_post_md_code_pattern_is_not_greedy():
"""Test that the markdown code pattern does not remove too much."""
post = ("`this is code` but a greedy```other code``` pattern\nwould remove"
"`this whole post`"
"```along with``` this as well```hehe```")
expected = "but a greedy pattern would remove this as well"
sanitized = util.sanitize_post(post)
assert sanitized == expected
def test_sanitize_post_replaces_all_whitespace_with_single_spaces():
sanitized = util.sanitize_post(
post_base_text.format(code_segment, pre_segment, blockquote_segment))
counter = 0
for ws in re.findall('\s+', sanitized):
counter += 1
assert ws == ' '
assert counter # meta assert
def test_sanitize_post_removes_url():
https_url = "https://hello.world#aweseaf45we23.com"
http_url = "http://blabla.com#badonk"
c = "{} and other stuff {} awesome donk {}\n\nhurrdurr".format(
comment, https_url, http_url)
sanitized = util.sanitize_post(c)
assert https_url not in sanitized
assert http_url not in sanitized
def test_sanitize_post_removes_single_backtick_code():
markdown_code = '`for i in range(10):\n print(i)`'
c = "{} blablabla bla 234 d23r23 {}\nAnd just the finishing touch.".format(
comment, markdown_code)
sanitized = util.sanitize_post(c)
assert markdown_code not in sanitized
assert '`' not in sanitized
# and some subpatterns
assert 'for i in range' not in sanitized
assert 'range(10)' not in sanitized
def test_sanitize_post_removes_triple_backtick_code():
markdown_code = '```for i in range(10):\n print(i)```'
c = "{} blablabla bla 234 d23r23 {}\nAnd just the finishing touch.".format(
comment, markdown_code)
sanitized = util.sanitize_post(c)
assert markdown_code not in sanitized
assert '`' not in sanitized
# and some subpatterns
assert 'for i in range' not in sanitized
assert 'range(10)' not in sanitized
def test_sanitize_post_removes_blockquote_segments():
text = post_base_text.format(blockquote_segment, "\n", "")
expected_text = post_base_text.format("", " ", "")
sanitized = util.sanitize_post(text)
assert sanitized == expected_text
def test_sanitize_post_removes_linefeeds():
text = "This is a text with \r\n some \u2028 nbbbb \u2029 random \n linefeeds \r and carriege returns \r\n hello \n"
sanitized = util.sanitize_post(text)
assert '\n' not in sanitized
assert '\r' not in sanitized
assert '\u2028' not in sanitized
assert '\u2029' not in sanitized
def test_sanitize_post_removes_code_segments():
text = post_base_text.format("\n", code_segment, "\n")
# the two newlines are replaced with single space
expected_text = post_base_text.format(" ", "", "")
res = util.sanitize_post(text)
assert res == expected_text
def test_sanitize_post_removes_pre_segments():
text = post_base_text.format("\n", pre_segment, "\n")
# the two newlines are replaced with single space
expected_text = post_base_text.format(" ", "", "")
res = util.sanitize_post(text)
assert res == expected_text
def test_sanitize_post_removes_code_pre_and_tags():
text = post_base_text.format("</a href=https://url.com>", code_segment,
pre_segment)
expected_text = post_base_text.format("", "", "")
res = util.sanitize_post(text)
assert res == expected_text
@pytest.mark.timeout(0.2)
def test_sanitize_post_handles_tag_case_mismatch():
"""Previous version of sanitize post froze due to case mismatch in tags.
In this particular case, it was the <pre> ... </prE> that cause exponential
backtracking (we think) to kick in.
"""
text =\
'''<p><em>"I didn't like this because I have only two C files and it seemed very odd to split the source base at the language level like this"</em></p>
<p>Why does it seem odd? Consider this project:</p>
<pre>
project1\src\java
project1\src\cpp
project1\src\python
</pre>
<p>Or, if you decide to split things up into modules:</p>
<p><pre>
project1\module1\src\java
project1\module1\src\cpp
project1\module2\src\java
project1\module2\src\python
</prE></p>
<p>I guess it's a matter of personal taste, but the above structure is fairly common, and I think it works quite well once you get used to it.</p>'''
util.sanitize_post(text)
def test_sanitize_comment_replaces_all_whitespace_with_single_spaces():
sanitized = util.sanitize_comment(comment)
counter = 0
for ws in re.findall('\s+', sanitized):
counter += 1
assert ws == ' '
assert counter # meta assert
def test_sanitize_comment_removes_url():
https_url = "https://hello.world#aweseaf45we23.com"
http_url = "http://blabla.com#badonk"
c = "{} and other stuff {} awesome donk {}\n\nhurrdurr".format(
comment, https_url, http_url)
sanitized = util.sanitize_comment(c)
assert https_url not in sanitized
assert http_url not in sanitized
def test_sanitize_comment_leaves_user_mentions():
sanitized = util.sanitize_comment(comment)
assert '@S.Jovan' in sanitized
def test_sanitize_comment_strips_leading_and_trailing_ws():
text = " there is leading whitespace here <code>some\ncode</code> "
sanitized = util.sanitize_comment(text)
assert sanitized == sanitized.strip()
def test_sanitize_comment_removes_single_backtick_code():
markdown_code = '`for i in range(10):\n print(i)`'
c = "{} blablabla bla 234 d23r23 {}\nAnd just the finishing touch.".format(
comment, markdown_code)
sanitized = util.sanitize_comment(c)
assert markdown_code not in sanitized
assert '`' not in sanitized
# and some subpatterns
assert 'for i in range' not in sanitized
assert 'range(10)' not in sanitized
def test_sanitize_comment_removes_triple_backtick_code():
markdown_code = '```for i in range(10):\n print(i)```'
c = "{} blablabla bla 234 d23r23 {}\nAnd just the finishing touch.".format(
comment, markdown_code)
sanitized = util.sanitize_comment(c)
assert markdown_code not in sanitized
assert '`' not in sanitized
# and some subpatterns
assert 'for i in range' not in sanitized
assert 'range(10)' not in sanitized
def test_sanitize_comment_removes_markdown_formatting():
random_md = "This is ```for i in range(t)``` just a **test** to see that _some_ `inline code` and **other\nmarkdown** stuff is removed."
sanitized_md = "This is just a test to see that some and other markdown stuff is removed."
text = post_base_text.format("", random_md, "")
expected = post_base_text.format("", sanitized_md, "")
sanitized = util.sanitize_comment(text)
assert sanitized == expected
def test_sanitize_real_post():
"""Test sanitizing a real post (answer) from SO, authored by Simon Larsén."""
text =\
"""<p>You can do this in just two lines.</p>
<pre><code>with open('path/to/file') as f:
line_lists = [list(line.strip()) for line in f]
</code></pre>
<p><code>list</code> on a <code>str</code> object will return a list where each character is an element (as a <code>char</code>). <code>line</code> is stripped first, which removes leading and trailing whitespace. This is assuming that you actually want the characters as <code>char</code>. If you want them parsed to <code>int</code>, this will work:</p>
<pre><code>with open('path/to/file') as f:
line_lists = [[int(x) for x in line.strip()] for line in f]
</code></pre>
<p>Mind you that there should be some error checking here, the above example will crash if any of the characters cannot be parsed to int.</p>
"""
expected = "You can do this in just two lines. on a object will return a list where each character is an element (as a ). is stripped first, which removes leading and trailing whitespace. This is assuming that you actually want the characters as . If you want them parsed to , this will work: Mind you that there should be some error checking here, the above example will crash if any of the characters cannot be parsed to int."
sanitized = util.sanitize_post(text)
assert sanitized == expected
def test_yield_batches():
expected = [[0, 1, 2], [3, 4, 5], [6, 7, 8]]
it = (i for i in range(9))
actual = [batch for batch in util.yield_batches(it, 3)]
assert actual == expected
|
the-stack_0_15321 | """Holder for the (test kind, list of tests) pair with additional metadata their execution."""
from __future__ import absolute_import
import itertools
import threading
import time
from . import report as _report
from . import summary as _summary
from .. import config as _config
from .. import selector as _selector
def synchronized(method):
"""Provide decorator to enfore instance lock ownership when calling the method."""
def synced(self, *args, **kwargs):
"""Sync an instance lock."""
lock = getattr(self, "_lock")
with lock:
return method(self, *args, **kwargs)
return synced
class Suite(object): # pylint: disable=too-many-instance-attributes
"""A suite of tests of a particular kind (e.g. C++ unit tests, dbtests, jstests)."""
def __init__(self, suite_name, suite_config, suite_options=_config.SuiteOptions.ALL_INHERITED):
"""Initialize the suite with the specified name and configuration."""
self._lock = threading.RLock()
self._suite_name = suite_name
self._suite_config = suite_config
self._suite_options = suite_options
self.test_kind = self.get_test_kind_config()
self.tests, self.excluded = self._get_tests_for_kind(self.test_kind)
self.return_code = None # Set by the executor.
self._suite_start_time = None
self._suite_end_time = None
self._test_start_times = []
self._test_end_times = []
self._reports = []
# We keep a reference to the TestReports from the currently running jobs so that we can
# report intermediate results.
self._partial_reports = None
def _get_tests_for_kind(self, test_kind):
"""Return the tests to run based on the 'test_kind'-specific filtering policy."""
selector_config = self.get_selector_config()
# The mongos_test doesn't have to filter anything, the selector_config is just the
# arguments to the mongos program to be used as the test case.
if test_kind == "mongos_test":
mongos_options = selector_config # Just for easier reading.
if not isinstance(mongos_options, dict):
raise TypeError("Expected dictionary of arguments to mongos")
return [mongos_options], []
return _selector.filter_tests(test_kind, selector_config)
def get_name(self):
"""Return the name of the test suite."""
return self._suite_name
def get_display_name(self):
"""Return the name of the test suite with a unique identifier for its SuiteOptions."""
if self.options.description is None:
return self.get_name()
return "{} ({})".format(self.get_name(), self.options.description)
def get_selector_config(self):
"""Return the "selector" section of the YAML configuration."""
if "selector" not in self._suite_config:
return {}
selector = self._suite_config["selector"].copy()
if self.options.include_tags is not None:
if "include_tags" in selector:
selector["include_tags"] = {
"$allOf": [
selector["include_tags"],
self.options.include_tags,
]
}
elif "exclude_tags" in selector:
selector["exclude_tags"] = {
"$anyOf": [
selector["exclude_tags"],
{"$not": self.options.include_tags},
]
}
else:
selector["include_tags"] = self.options.include_tags
return selector
def get_executor_config(self):
"""Return the "executor" section of the YAML configuration."""
return self._suite_config["executor"]
def get_test_kind_config(self):
"""Return the "test_kind" section of the YAML configuration."""
return self._suite_config["test_kind"]
@property
def options(self):
"""Get the options."""
return self._suite_options.resolve()
def with_options(self, suite_options):
"""Return a Suite instance with the specified resmokelib.config.SuiteOptions."""
return Suite(self._suite_name, self._suite_config, suite_options)
@synchronized
def record_suite_start(self):
"""Record the start time of the suite."""
self._suite_start_time = time.time()
@synchronized
def record_suite_end(self):
"""Record the end time of the suite."""
self._suite_end_time = time.time()
@synchronized
def record_test_start(self, partial_reports):
"""Record the start time of an execution.
The result is stored in the TestReports for currently running jobs.
"""
self._test_start_times.append(time.time())
self._partial_reports = partial_reports
@synchronized
def record_test_end(self, report):
"""Record the end time of an execution."""
self._test_end_times.append(time.time())
self._reports.append(report)
self._partial_reports = None
@synchronized
def get_active_report(self):
"""Return the partial report of the currently running execution, if there is one."""
if not self._partial_reports:
return None
return _report.TestReport.combine(*self._partial_reports)
@synchronized
def get_reports(self):
"""Return the list of reports.
If there's an execution currently in progress, then a report for the partial results
is included in the returned list.
"""
if self._partial_reports is not None:
return self._reports + [self.get_active_report()]
return self._reports
@synchronized
def summarize(self, sb):
"""Append a summary of the suite onto the string builder 'sb'."""
if not self._reports and not self._partial_reports:
sb.append("No tests ran.")
summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
elif not self._reports and self._partial_reports:
summary = self.summarize_latest(sb)
elif len(self._reports) == 1 and not self._partial_reports:
summary = self._summarize_execution(0, sb)
else:
summary = self._summarize_repeated(sb)
summarized_group = " %ss: %s" % (self.test_kind, "\n ".join(sb))
if summary.num_run == 0:
sb.append("Suite did not run any tests.")
return
# Override the 'time_taken' attribute of the summary if we have more accurate timing
# information available.
if self._suite_start_time is not None and self._suite_end_time is not None:
time_taken = self._suite_end_time - self._suite_start_time
summary = summary._replace(time_taken=time_taken)
sb.append("%d test(s) ran in %0.2f seconds"
" (%d succeeded, %d were skipped, %d failed, %d errored)" % summary)
sb.append(summarized_group)
@synchronized
def summarize_latest(self, sb):
"""Return a summary of the latest execution of the suite.
Also append a summary of that execution onto the string builder 'sb'.
If there's an execution currently in progress, then the partial
summary of that execution is appended to 'sb'.
"""
if self._partial_reports is None:
return self._summarize_execution(-1, sb)
active_report = _report.TestReport.combine(*self._partial_reports)
# Use the current time as the time that this suite finished running.
end_time = time.time()
return self._summarize_report(active_report, self._test_start_times[-1], end_time, sb)
def _summarize_repeated(self, sb):
"""Return the summary information of all executions.
Also append each execution's summary onto the string builder 'sb' and
information of how many repetitions there were.
"""
reports = self.get_reports() # Also includes the combined partial reports.
num_iterations = len(reports)
start_times = self._test_start_times[:]
end_times = self._test_end_times[:]
if self._partial_reports:
end_times.append(time.time()) # Add an end time in this copy for the partial reports.
total_time_taken = end_times[-1] - start_times[0]
sb.append("Executed %d times in %0.2f seconds:" % (num_iterations, total_time_taken))
combined_summary = _summary.Summary(0, 0.0, 0, 0, 0, 0)
for iteration in xrange(num_iterations):
# Summarize each execution as a bulleted list of results.
bulleter_sb = []
summary = self._summarize_report(reports[iteration], start_times[iteration],
end_times[iteration], bulleter_sb)
combined_summary = _summary.combine(combined_summary, summary)
for (i, line) in enumerate(bulleter_sb):
# Only bullet first line, indent others.
prefix = "* " if i == 0 else " "
sb.append(prefix + line)
return combined_summary
def _summarize_execution(self, iteration, sb):
"""Return the summary information of the execution given by 'iteration'.
Also append a summary of that execution onto the string builder 'sb'.
"""
return self._summarize_report(self._reports[iteration], self._test_start_times[iteration],
self._test_end_times[iteration], sb)
def _summarize_report(self, report, start_time, end_time, sb):
"""Return the summary information of the execution.
The summary is for 'report' that started at 'start_time' and finished at 'end_time'.
Also append a summary of that execution onto the string builder 'sb'.
"""
time_taken = end_time - start_time
# Tests that were interrupted are treated as failures because (1) the test has already been
# started and therefore isn't skipped and (2) the test has yet to finish and therefore
# cannot be said to have succeeded.
num_failed = report.num_failed + report.num_interrupted
num_run = report.num_succeeded + report.num_errored + num_failed
num_skipped = len(self.tests) + report.num_dynamic - num_run
if report.num_succeeded == num_run and num_skipped == 0:
sb.append("All %d test(s) passed in %0.2f seconds." % (num_run, time_taken))
return _summary.Summary(num_run, time_taken, num_run, 0, 0, 0)
summary = _summary.Summary(num_run, time_taken, report.num_succeeded, num_skipped,
num_failed, report.num_errored)
sb.append("%d test(s) ran in %0.2f seconds"
" (%d succeeded, %d were skipped, %d failed, %d errored)" % summary)
if num_failed > 0:
sb.append("The following tests failed (with exit code):")
for test_info in itertools.chain(report.get_failed(), report.get_interrupted()):
sb.append(" %s (%d)" % (test_info.test_id, test_info.return_code))
if report.num_errored > 0:
sb.append("The following tests had errors:")
for test_info in report.get_errored():
sb.append(" %s" % (test_info.test_id))
return summary
@staticmethod
def log_summaries(logger, suites, time_taken):
"""Log summary of all suites."""
sb = []
sb.append("Summary of all suites: %d suites ran in %0.2f seconds" % (len(suites),
time_taken))
for suite in suites:
suite_sb = []
suite.summarize(suite_sb)
sb.append(" %s: %s" % (suite.get_display_name(), "\n ".join(suite_sb)))
logger.info("=" * 80)
logger.info("\n".join(sb))
|
the-stack_0_15324 | # Copyright 2020 The Cirq Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Code for generating random quantum circuits."""
import dataclasses
import itertools
from typing import (
Any,
Callable,
Container,
Dict,
Iterable,
List,
Sequence,
TYPE_CHECKING,
Tuple,
Union,
Optional,
cast,
Iterator,
)
import networkx as nx
import numpy as np
from cirq import circuits, devices, ops, protocols, value
from cirq._doc import document
if TYPE_CHECKING:
import cirq
QidPairT = Tuple['cirq.Qid', 'cirq.Qid']
GridQubitPairT = Tuple['cirq.GridQubit', 'cirq.GridQubit']
@dataclasses.dataclass(frozen=True)
class GridInteractionLayer(Container[GridQubitPairT]):
"""A layer of aligned or staggered two-qubit interactions on a grid.
Layers of this type have two different basic structures,
aligned:
*-* *-* *-*
*-* *-* *-*
*-* *-* *-*
*-* *-* *-*
*-* *-* *-*
*-* *-* *-*
and staggered:
*-* *-* *-*
* *-* *-* *
*-* *-* *-*
* *-* *-* *
*-* *-* *-*
* *-* *-* *
Other variants are obtained by offsetting these lattices to the right by
some number of columns, and/or transposing into the vertical orientation.
There are a total of 4 aligned and 4 staggered variants.
The 2x2 unit cells for the aligned and staggered versions of this layer
are, respectively:
*-*
*-*
and
*-*
* *-
with left/top qubits at (0, 0) and (1, 0) in the aligned case, or
(0, 0) and (1, 1) in the staggered case. Other variants have the same unit
cells after transposing and offsetting.
Args:
col_offset: Number of columns by which to shift the basic lattice.
vertical: Whether gates should be oriented vertically rather than
horizontally.
stagger: Whether to stagger gates in neighboring rows.
"""
col_offset: int = 0
vertical: bool = False
stagger: bool = False
def __contains__(self, pair) -> bool:
"""Checks whether a pair is in this layer."""
if self.vertical:
# Transpose row, col coords for vertical orientation.
a, b = pair
pair = devices.GridQubit(a.col, a.row), devices.GridQubit(b.col, b.row)
a, b = sorted(pair)
# qubits should be 1 column apart.
if (a.row != b.row) or (b.col != a.col + 1):
return False
# mod to get the position in the 2 x 2 unit cell with column offset.
pos = a.row % 2, (a.col - self.col_offset) % 2
return pos == (0, 0) or pos == (1, self.stagger)
def _json_dict_(self) -> Dict[str, Any]:
return protocols.obj_to_dict_helper(self, ['col_offset', 'vertical', 'stagger'])
def __repr__(self) -> str:
return (
'cirq.experiments.GridInteractionLayer('
f'col_offset={self.col_offset}, '
f'vertical={self.vertical}, '
f'stagger={self.stagger})'
)
GRID_STAGGERED_PATTERN = (
GridInteractionLayer(col_offset=0, vertical=True, stagger=True), # A
GridInteractionLayer(col_offset=1, vertical=True, stagger=True), # B
GridInteractionLayer(col_offset=1, vertical=False, stagger=True), # C
GridInteractionLayer(col_offset=0, vertical=False, stagger=True), # D
GridInteractionLayer(col_offset=1, vertical=False, stagger=True), # C
GridInteractionLayer(col_offset=0, vertical=False, stagger=True), # D
GridInteractionLayer(col_offset=0, vertical=True, stagger=True), # A
GridInteractionLayer(col_offset=1, vertical=True, stagger=True), # B
)
document(
GRID_STAGGERED_PATTERN,
"""A pattern of two-qubit gates that is hard to simulate.
This pattern of gates was used in the paper
https://www.nature.com/articles/s41586-019-1666-5
to demonstrate quantum supremacy.
""",
)
HALF_GRID_STAGGERED_PATTERN = (
GridInteractionLayer(col_offset=0, vertical=True, stagger=True), # A
GridInteractionLayer(col_offset=1, vertical=True, stagger=True), # B
GridInteractionLayer(col_offset=1, vertical=False, stagger=True), # C
GridInteractionLayer(col_offset=0, vertical=False, stagger=True), # D
)
document(
HALF_GRID_STAGGERED_PATTERN,
"""A pattern that is half of GRID_STAGGERED_PATTERN.
It activates each link in a grid once in a staggered way permits
easier simulation.
""",
)
GRID_ALIGNED_PATTERN = (
GridInteractionLayer(col_offset=0, vertical=False, stagger=False), # E
GridInteractionLayer(col_offset=1, vertical=False, stagger=False), # F
GridInteractionLayer(col_offset=0, vertical=True, stagger=False), # G
GridInteractionLayer(col_offset=1, vertical=True, stagger=False), # H
)
document(
GRID_ALIGNED_PATTERN,
"""A pattern of two-qubit gates that is easy to simulate.
This pattern of gates was used in the paper
https://www.nature.com/articles/s41586-019-1666-5
to evaluate the performance of a quantum computer.
""",
)
def random_rotations_between_two_qubit_circuit(
q0: 'cirq.Qid',
q1: 'cirq.Qid',
depth: int,
two_qubit_op_factory: Callable[
['cirq.Qid', 'cirq.Qid', 'np.random.RandomState'], 'cirq.OP_TREE'
] = lambda a, b, _: ops.CZPowGate()(a, b),
single_qubit_gates: Sequence['cirq.Gate'] = (
ops.X ** 0.5,
ops.Y ** 0.5,
ops.PhasedXPowGate(phase_exponent=0.25, exponent=0.5),
),
add_final_single_qubit_layer: bool = True,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> 'cirq.Circuit':
"""Generate a random two-qubit quantum circuit.
This construction uses a similar structure to those in the paper
https://www.nature.com/articles/s41586-019-1666-5.
The generated circuit consists of a number of "cycles", this number being
specified by `depth`. Each cycle is actually composed of two sub-layers:
a layer of single-qubit gates followed by a layer of two-qubit gates,
controlled by their respective arguments, see below.
Args:
q0: The first qubit
q1: The second qubit
depth: The number of cycles.
two_qubit_op_factory: A callable that returns a two-qubit operation.
These operations will be generated with calls of the form
`two_qubit_op_factory(q0, q1, prng)`, where `prng` is the
pseudorandom number generator.
single_qubit_gates: Single-qubit gates are selected randomly from this
sequence. No qubit is acted upon by the same single-qubit gate in
consecutive cycles. If only one choice of single-qubit gate is
given, then this constraint is not enforced.
add_final_single_qubit_layer: Whether to include a final layer of
single-qubit gates after the last cycle (subject to the same
non-consecutivity constraint).
seed: A seed or random state to use for the pseudorandom number
generator.
"""
prng = value.parse_random_state(seed)
circuit = circuits.Circuit()
previous_single_qubit_layer = circuits.Moment()
single_qubit_layer_factory = _single_qubit_gates_arg_to_factory(
single_qubit_gates=single_qubit_gates, qubits=(q0, q1), prng=prng
)
for _ in range(depth):
single_qubit_layer = single_qubit_layer_factory.new_layer(previous_single_qubit_layer)
circuit += single_qubit_layer
circuit += two_qubit_op_factory(q0, q1, prng)
previous_single_qubit_layer = single_qubit_layer
if add_final_single_qubit_layer:
circuit += single_qubit_layer_factory.new_layer(previous_single_qubit_layer)
return circuit
def generate_library_of_2q_circuits(
n_library_circuits: int,
two_qubit_gate: 'cirq.Gate',
*,
max_cycle_depth: int = 100,
q0: 'cirq.Qid' = devices.LineQubit(0),
q1: 'cirq.Qid' = devices.LineQubit(1),
random_state: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> List['cirq.Circuit']:
"""Generate a library of two-qubit Circuits.
For single-qubit gates, this uses PhasedXZGates where the axis-in-XY-plane is one
of eight eighth turns and the Z rotation angle is one of eight eighth turns. This
provides 8*8=64 total choices, each implementable with one PhasedXZGate. This is
appropriate for architectures with microwave single-qubit control.
Args:
n_library_circuits: The number of circuits to generate.
two_qubit_gate: The two qubit gate to use in the circuits.
max_cycle_depth: The maximum cycle_depth in the circuits to generate. If you are using XEB,
this must be greater than or equal to the maximum value in `cycle_depths`.
q0: The first qubit to use when constructing the circuits.
q1: The second qubit to use when constructing the circuits
random_state: A random state or seed used to deterministically sample the random circuits.
"""
rs = value.parse_random_state(random_state)
exponents = np.linspace(0, 7 / 4, 8)
single_qubit_gates = [
ops.PhasedXZGate(x_exponent=0.5, z_exponent=z, axis_phase_exponent=a)
for a, z in itertools.product(exponents, repeat=2)
]
return [
random_rotations_between_two_qubit_circuit(
q0,
q1,
depth=max_cycle_depth,
two_qubit_op_factory=lambda a, b, _: two_qubit_gate(a, b),
single_qubit_gates=single_qubit_gates,
seed=rs,
)
for _ in range(n_library_circuits)
]
def _get_active_pairs(graph: nx.Graph, grid_layer: GridInteractionLayer):
"""Extract pairs of qubits from a device graph and a GridInteractionLayer."""
for edge in graph.edges:
if edge in grid_layer:
yield edge
@dataclasses.dataclass(frozen=True)
class CircuitLibraryCombination:
"""For a given layer (specifically, a set of pairs of qubits), `combinations` is a 2d array
of shape (n_combinations, len(pairs)) where each row represents a combination (with replacement)
of two-qubit circuits. The actual values are indices into a list of library circuits.
`layer` is used for record-keeping. This is the GridInteractionLayer if using
`get_random_combinations_for_device`, the Moment if using
`get_random_combinations_for_layer_circuit` and ommitted if using
`get_random_combinations_for_pairs`.
"""
layer: Optional[Any]
combinations: np.array
pairs: List[QidPairT]
def _get_random_combinations(
n_library_circuits: int,
n_combinations: int,
*,
pair_gen: Iterator[Tuple[List[QidPairT], Any]],
random_state: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> List[CircuitLibraryCombination]:
"""For qubit pairs, prepare a set of combinations to efficiently sample
parallel two-qubit XEB circuits.
This helper function should be called by one of
`get_random_comibations_for_device`,
`get_random_combinations_for_layer_circuit`, or
`get_random_combinations_for_pairs` which define
appropriate `pair_gen` arguments.
Args:
n_library_circuits: The number of circuits in your library. Likely the value
passed to `generate_library_of_2q_circuits`.
n_combinations: The number of combinations (with replacement) to generate
using the library circuits. Since this function returns a
`CircuitLibraryCombination`, the combinations will be represented
by indexes between 0 and `n_library_circuits-1` instead of the circuits
themselves. The more combinations, the more precise of an estimate for XEB
fidelity estimation, but a corresponding increase in the number of circuits
you must sample.
pair_gen: A generator that yields tuples of (pairs, layer_meta) where pairs is a list
of qubit pairs and layer_meta is additional data describing the "layer" assigned
to the CircuitLibraryCombination.layer field.
random_state: A random-state-like object to seed the random combination generation.
Returns:
A list of `CircuitLibraryCombination`, each corresponding to a layer
generated from `pair_gen`. Each object has a `combinations` matrix of circuit
indices of shape `(n_combinations, len(pairs))`. This
returned list can be provided to `sample_2q_xeb_circuits` to efficiently
sample parallel XEB circuits.
"""
rs = value.parse_random_state(random_state)
combinations_by_layer = []
for pairs, layer in pair_gen:
combinations = rs.randint(0, n_library_circuits, size=(n_combinations, len(pairs)))
combinations_by_layer.append(
CircuitLibraryCombination(layer=layer, combinations=combinations, pairs=pairs)
)
return combinations_by_layer
def get_random_combinations_for_device(
n_library_circuits: int,
n_combinations: int,
device_graph: nx.Graph,
*,
pattern: Sequence[GridInteractionLayer] = HALF_GRID_STAGGERED_PATTERN,
random_state: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> List[CircuitLibraryCombination]:
"""For a given device, prepare a set of combinations to efficiently sample
parallel two-qubit XEB circuits.
Args:
n_library_circuits: The number of circuits in your library. Likely the value
passed to `generate_library_of_2q_circuits`.
n_combinations: The number of combinations (with replacement) to generate
using the library circuits. Since this function returns a
`CircuitLibraryCombination`, the combinations will be represented
by indexes between 0 and `n_library_circuits-1` instead of the circuits
themselves. The more combinations, the more precise of an estimate for XEB
fidelity estimation, but a corresponding increase in the number of circuits
you must sample.
device_graph: A graph whose nodes are qubits and whose edges represent
the possibility of doing a two-qubit gate. This combined with the
`pattern` argument determines which two qubit pairs are activated
when.
pattern: A sequence of `GridInteractionLayer`, each of which has
a particular set of qubits that are activated simultaneously. These
pairs of qubits are deduced by combining this argument with `device_graph`.
random_state: A random-state-like object to seed the random combination generation.
Returns:
A list of `CircuitLibraryCombination`, each corresponding to an interaction
layer in `pattern` where there is a non-zero number of pairs which would be activated.
Each object has a `combinations` matrix of circuit
indices of shape `(n_combinations, len(pairs))` where `len(pairs)` may
be different for each entry (i.e. for each layer in `pattern`). This
returned list can be provided to `sample_2q_xeb_circuits` to efficiently
sample parallel XEB circuits.
"""
def pair_gen():
for layer in pattern:
pairs = sorted(_get_active_pairs(device_graph, layer))
if len(pairs) == 0:
continue
yield pairs, layer
return _get_random_combinations(
n_library_circuits=n_library_circuits,
n_combinations=n_combinations,
random_state=random_state,
pair_gen=pair_gen(),
)
def get_random_combinations_for_pairs(
n_library_circuits: int,
n_combinations: int,
all_pairs: List[List[QidPairT]],
random_state: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> List[CircuitLibraryCombination]:
"""For an explicit nested list of pairs, prepare a set of combinations to efficiently sample
parallel two-qubit XEB circuits.
Args:
n_library_circuits: The number of circuits in your library. Likely the value
passed to `generate_library_of_2q_circuits`.
n_combinations: The number of combinations (with replacement) to generate
using the library circuits. Since this function returns a
`CircuitLibraryCombination`, the combinations will be represented
by indexes between 0 and `n_library_circuits-1` instead of the circuits
themselves. The more combinations, the more precise of an estimate for XEB
fidelity estimation, but a corresponding increase in the number of circuits
you must sample.
all_pairs: A nested list of qubit pairs. The outer list should represent a "layer"
where the inner pairs should all be able to be activated simultaneously.
random_state: A random-state-like object to seed the random combination generation.
Returns:
A list of `CircuitLibraryCombination`, each corresponding to an interaction
layer the outer list of `all_pairs`. Each object has a `combinations` matrix of circuit
indices of shape `(n_combinations, len(pairs))` where `len(pairs)` may
be different for each entry. This
returned list can be provided to `sample_2q_xeb_circuits` to efficiently
sample parallel XEB circuits.
"""
def pair_gen():
for pairs in all_pairs:
yield pairs, None
return _get_random_combinations(
n_library_circuits=n_library_circuits,
n_combinations=n_combinations,
random_state=random_state,
pair_gen=pair_gen(),
)
def _pairs_from_moment(moment: 'cirq.Moment') -> List[QidPairT]:
"""Helper function in `get_random_combinations_for_layer_circuit` pair generator.
The moment should contain only two qubit operations, which define a list of qubit pairs.
"""
pairs: List[QidPairT] = []
for op in moment.operations:
if len(op.qubits) != 2:
raise ValueError("Layer circuit contains non-2-qubit operations.")
qpair = cast(QidPairT, op.qubits)
pairs.append(qpair)
return pairs
def get_random_combinations_for_layer_circuit(
n_library_circuits: int,
n_combinations: int,
layer_circuit: 'cirq.Circuit',
random_state: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> List[CircuitLibraryCombination]:
"""For a layer circuit, prepare a set of combinations to efficiently sample
parallel two-qubit XEB circuits.
Args:
n_library_circuits: The number of circuits in your library. Likely the value
passed to `generate_library_of_2q_circuits`.
n_combinations: The number of combinations (with replacement) to generate
using the library circuits. Since this function returns a
`CircuitLibraryCombination`, the combinations will be represented
by indexes between 0 and `n_library_circuits-1` instead of the circuits
themselves. The more combinations, the more precise of an estimate for XEB
fidelity estimation, but a corresponding increase in the number of circuits
you must sample.
layer_circuit: A calibration-style circuit where each Moment represents a layer.
Two qubit operations indicate the pair should be activated. This circuit should
only contain Moments which only contain two-qubit operations.
random_state: A random-state-like object to seed the random combination generation.
Returns:
A list of `CircuitLibraryCombination`, each corresponding to a moment in `layer_circuit`.
Each object has a `combinations` matrix of circuit
indices of shape `(n_combinations, len(pairs))` where `len(pairs)` may
be different for each entry (i.e. for moment). This
returned list can be provided to `sample_2q_xeb_circuits` to efficiently
sample parallel XEB circuits.
"""
def pair_gen():
for moment in layer_circuit.moments:
yield _pairs_from_moment(moment), moment
return _get_random_combinations(
n_library_circuits=n_library_circuits,
n_combinations=n_combinations,
random_state=random_state,
pair_gen=pair_gen(),
)
def get_grid_interaction_layer_circuit(
device_graph: nx.Graph,
pattern: Sequence[GridInteractionLayer] = HALF_GRID_STAGGERED_PATTERN,
two_qubit_gate=ops.ISWAP ** 0.5,
) -> 'cirq.Circuit':
"""Create a circuit representation of a grid interaction pattern on a given device topology.
The resulting circuit is deterministic, of depth len(pattern), and consists of `two_qubit_gate`
applied to each pair in `pattern` restricted to available connections in `device_graph`.
Args:
device_graph: A graph whose nodes are qubits and whose edges represent the possibility of
doing a two-qubit gate. This combined with the `pattern` argument determines which
two qubit pairs are activated when.
pattern: A sequence of `GridInteractionLayer`, each of which has a particular set of
qubits that are activated simultaneously. These pairs of qubits are deduced by
combining this argument with `device_graph`.
two_qubit_gate: The two qubit gate to use in constructing the circuit layers.
"""
moments = []
for layer in pattern:
pairs = sorted(_get_active_pairs(device_graph, layer))
if len(pairs) == 0:
continue
moments += [circuits.Moment(two_qubit_gate.on(*pair) for pair in pairs)]
return circuits.Circuit(moments)
def random_rotations_between_grid_interaction_layers_circuit(
qubits: Iterable['cirq.GridQubit'],
depth: int,
*, # forces keyword arguments
two_qubit_op_factory: Callable[
['cirq.GridQubit', 'cirq.GridQubit', 'np.random.RandomState'], 'cirq.OP_TREE'
] = lambda a, b, _: ops.CZPowGate()(a, b),
pattern: Sequence[GridInteractionLayer] = GRID_STAGGERED_PATTERN,
single_qubit_gates: Sequence['cirq.Gate'] = (
ops.X ** 0.5,
ops.Y ** 0.5,
ops.PhasedXPowGate(phase_exponent=0.25, exponent=0.5),
),
add_final_single_qubit_layer: bool = True,
seed: 'cirq.RANDOM_STATE_OR_SEED_LIKE' = None,
) -> 'cirq.Circuit':
"""Generate a random quantum circuit of a particular form.
This construction is based on the circuits used in the paper
https://www.nature.com/articles/s41586-019-1666-5.
The generated circuit consists of a number of "cycles", this number being
specified by `depth`. Each cycle is actually composed of two sub-layers:
a layer of single-qubit gates followed by a layer of two-qubit gates,
controlled by their respective arguments, see below. The pairs of qubits
in a given entangling layer is controlled by the `pattern` argument,
see below.
Args:
qubits: The qubits to use.
depth: The number of cycles.
two_qubit_op_factory: A callable that returns a two-qubit operation.
These operations will be generated with calls of the form
`two_qubit_op_factory(q0, q1, prng)`, where `prng` is the
pseudorandom number generator.
pattern: A sequence of GridInteractionLayers, each of which determine
which pairs of qubits are entangled. The layers in a pattern are
iterated through sequentially, repeating until `depth` is reached.
single_qubit_gates: Single-qubit gates are selected randomly from this
sequence. No qubit is acted upon by the same single-qubit gate in
consecutive cycles. If only one choice of single-qubit gate is
given, then this constraint is not enforced.
add_final_single_qubit_layer: Whether to include a final layer of
single-qubit gates after the last cycle.
seed: A seed or random state to use for the pseudorandom number
generator.
"""
prng = value.parse_random_state(seed)
qubits = list(qubits)
coupled_qubit_pairs = _coupled_qubit_pairs(qubits)
circuit = circuits.Circuit()
previous_single_qubit_layer = circuits.Moment()
single_qubit_layer_factory = _single_qubit_gates_arg_to_factory(
single_qubit_gates=single_qubit_gates, qubits=qubits, prng=prng
)
for i in range(depth):
single_qubit_layer = single_qubit_layer_factory.new_layer(previous_single_qubit_layer)
circuit += single_qubit_layer
two_qubit_layer = _two_qubit_layer(
coupled_qubit_pairs, two_qubit_op_factory, pattern[i % len(pattern)], prng
)
circuit += two_qubit_layer
previous_single_qubit_layer = single_qubit_layer
if add_final_single_qubit_layer:
circuit += single_qubit_layer_factory.new_layer(previous_single_qubit_layer)
return circuit
def _coupled_qubit_pairs(
qubits: List['cirq.GridQubit'],
) -> List[GridQubitPairT]:
pairs = []
qubit_set = set(qubits)
for qubit in qubits:
def add_pair(neighbor: 'cirq.GridQubit'):
if neighbor in qubit_set:
pairs.append((qubit, neighbor))
add_pair(devices.GridQubit(qubit.row, qubit.col + 1))
add_pair(devices.GridQubit(qubit.row + 1, qubit.col))
return pairs
class _RandomSingleQubitLayerFactory:
def __init__(
self,
qubits: Sequence['cirq.Qid'],
single_qubit_gates: Sequence['cirq.Gate'],
prng: 'np.random.RandomState',
) -> None:
self.qubits = qubits
self.single_qubit_gates = single_qubit_gates
self.prng = prng
def new_layer(self, previous_single_qubit_layer: 'cirq.Moment') -> 'cirq.Moment':
def random_gate(qubit: 'cirq.Qid') -> 'cirq.Gate':
excluded_op = previous_single_qubit_layer.operation_at(qubit)
excluded_gate = excluded_op.gate if excluded_op is not None else None
g = self.single_qubit_gates[self.prng.randint(0, len(self.single_qubit_gates))]
while g is excluded_gate:
g = self.single_qubit_gates[self.prng.randint(0, len(self.single_qubit_gates))]
return g
return circuits.Moment(random_gate(q).on(q) for q in self.qubits)
class _FixedSingleQubitLayerFactory:
def __init__(self, fixed_single_qubit_layer: Dict['cirq.Qid', 'cirq.Gate']) -> None:
self.fixed_single_qubit_layer = fixed_single_qubit_layer
def new_layer(self, previous_single_qubit_layer: 'cirq.Moment') -> 'cirq.Moment':
return circuits.Moment(v.on(q) for q, v in self.fixed_single_qubit_layer.items())
_SingleQubitLayerFactory = Union[_FixedSingleQubitLayerFactory, _RandomSingleQubitLayerFactory]
def _single_qubit_gates_arg_to_factory(
single_qubit_gates: Sequence['cirq.Gate'],
qubits: Sequence['cirq.Qid'],
prng: 'np.random.RandomState',
) -> _SingleQubitLayerFactory:
"""Parse the `single_qubit_gates` argument for circuit generation functions.
If only one single qubit gate is provided, it will be used everywhere.
Otherwise, we use the factory that excludes operations that were used
in the previous layer. This check is done by gate identity, not equality.
"""
if len(set(single_qubit_gates)) == 1:
return _FixedSingleQubitLayerFactory({q: single_qubit_gates[0] for q in qubits})
return _RandomSingleQubitLayerFactory(qubits, single_qubit_gates, prng)
def _two_qubit_layer(
coupled_qubit_pairs: List[GridQubitPairT],
two_qubit_op_factory: Callable[
['cirq.GridQubit', 'cirq.GridQubit', 'np.random.RandomState'], 'cirq.OP_TREE'
],
layer: GridInteractionLayer,
prng: 'np.random.RandomState',
) -> 'cirq.OP_TREE':
for a, b in coupled_qubit_pairs:
if (a, b) in layer:
yield two_qubit_op_factory(a, b, prng)
|
the-stack_0_15326 | import sys
import pytest
import shutil
from pathlib import Path
from cookiecutter import main
CCDS_ROOT = Path(__file__).parents[1].resolve()
args = {
'project_name': 'AwesomeProject',
'author_name': 'AwesomeName',
'description': 'A very awesome project.',
'open_source_license': 'BSD-3-Clause',
'python_interpreter': 'python',
'version': '0.1.0'
}
def system_check(basename):
platform = sys.platform
if 'linux' in platform:
basename = basename.lower()
return basename
@pytest.fixture(scope='class', params=[{}, args])
def default_baked_project(tmpdir_factory, request):
temp = tmpdir_factory.mktemp('data-project')
out_dir = Path(temp).resolve()
pytest.param = request.param
main.cookiecutter(
str(CCDS_ROOT),
no_input=True,
extra_context=pytest.param,
output_dir=out_dir
)
project_name = pytest.param.get('project_name') or 'project_name'
# project name gets converted to lower case on Linux but not Mac
project_name = system_check(project_name)
project_path = out_dir/project_name
request.cls.project_path = project_path
yield
# cleanup after
shutil.rmtree(out_dir)
|
the-stack_0_15327 | import tty
import sys
import curses
import datetime
import locale
from decimal import Decimal
import getpass
import logging
import electrum_mona
from electrum_mona.util import format_satoshis
from electrum_mona.bitcoin import is_address, COIN, TYPE_ADDRESS
from electrum_mona.transaction import TxOutput
from electrum_mona.wallet import Wallet
from electrum_mona.storage import WalletStorage
from electrum_mona.network import NetworkParameters, TxBroadcastError, BestEffortRequestFailed
from electrum_mona.interface import deserialize_server
from electrum_mona.logging import console_stderr_handler
_ = lambda x:x # i18n
class ElectrumGui:
def __init__(self, config, daemon, plugins):
self.config = config
self.network = daemon.network
storage = WalletStorage(config.get_wallet_path())
if not storage.file_exists():
print("Wallet not found. try 'electrum-mona create'")
exit()
if storage.is_encrypted():
password = getpass.getpass('Password:', stream=None)
storage.decrypt(password)
self.wallet = Wallet(storage)
self.wallet.start_network(self.network)
self.contacts = self.wallet.contacts
locale.setlocale(locale.LC_ALL, '')
self.encoding = locale.getpreferredencoding()
self.stdscr = curses.initscr()
curses.noecho()
curses.cbreak()
curses.start_color()
curses.use_default_colors()
curses.init_pair(1, curses.COLOR_WHITE, curses.COLOR_BLUE)
curses.init_pair(2, curses.COLOR_WHITE, curses.COLOR_CYAN)
curses.init_pair(3, curses.COLOR_BLACK, curses.COLOR_WHITE)
self.stdscr.keypad(1)
self.stdscr.border(0)
self.maxy, self.maxx = self.stdscr.getmaxyx()
self.set_cursor(0)
self.w = curses.newwin(10, 50, 5, 5)
console_stderr_handler.setLevel(logging.CRITICAL)
self.tab = 0
self.pos = 0
self.popup_pos = 0
self.str_recipient = ""
self.str_description = ""
self.str_amount = ""
self.str_fee = ""
self.history = None
if self.network:
self.network.register_callback(self.update, ['wallet_updated', 'network_updated'])
self.tab_names = [_("History"), _("Send"), _("Receive"), _("Addresses"), _("Contacts"), _("Banner")]
self.num_tabs = len(self.tab_names)
def set_cursor(self, x):
try:
curses.curs_set(x)
except Exception:
pass
def restore_or_create(self):
pass
def verify_seed(self):
pass
def get_string(self, y, x):
self.set_cursor(1)
curses.echo()
self.stdscr.addstr( y, x, " "*20, curses.A_REVERSE)
s = self.stdscr.getstr(y,x)
curses.noecho()
self.set_cursor(0)
return s
def update(self, event, *args):
self.update_history()
if self.tab == 0:
self.print_history()
self.refresh()
def print_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
if self.history is None:
self.update_history()
self.print_list(self.history[::-1], format_str%( _("Date"), _("Description"), _("Amount"), _("Balance")))
def update_history(self):
width = [20, 40, 14, 14]
delta = (self.maxx - sum(width) - 4)/3
format_str = "%"+"%d"%width[0]+"s"+"%"+"%d"%(width[1]+delta)+"s"+"%"+"%d"%(width[2]+delta)+"s"+"%"+"%d"%(width[3]+delta)+"s"
b = 0
self.history = []
for hist_item in self.wallet.get_history():
if hist_item.tx_mined_status.conf:
timestamp = hist_item.tx_mined_status.timestamp
try:
time_str = datetime.datetime.fromtimestamp(timestamp).isoformat(' ')[:-3]
except Exception:
time_str = "------"
else:
time_str = 'unconfirmed'
label = self.wallet.get_label(hist_item.txid)
if len(label) > 40:
label = label[0:37] + '...'
self.history.append(format_str % (time_str, label, format_satoshis(hist_item.value, whitespaces=True),
format_satoshis(hist_item.balance, whitespaces=True)))
def print_balance(self):
if not self.network:
msg = _("Offline")
elif self.network.is_connected():
if not self.wallet.up_to_date:
msg = _("Synchronizing...")
else:
c, u, x = self.wallet.get_balance()
msg = _("Balance")+": %f "%(Decimal(c) / COIN)
if u:
msg += " [%f unconfirmed]"%(Decimal(u) / COIN)
if x:
msg += " [%f unmatured]"%(Decimal(x) / COIN)
else:
msg = _("Not connected")
self.stdscr.addstr( self.maxy -1, 3, msg)
for i in range(self.num_tabs):
self.stdscr.addstr( 0, 2 + 2*i + len(''.join(self.tab_names[0:i])), ' '+self.tab_names[i]+' ', curses.A_BOLD if self.tab == i else 0)
self.stdscr.addstr(self.maxy -1, self.maxx-30, ' '.join([_("Settings"), _("Network"), _("Quit")]))
def print_receive(self):
addr = self.wallet.get_receiving_address()
self.stdscr.addstr(2, 1, "Address: "+addr)
self.print_qr(addr)
def print_contacts(self):
messages = map(lambda x: "%20s %45s "%(x[0], x[1][1]), self.contacts.items())
self.print_list(messages, "%19s %15s "%("Key", "Value"))
def print_addresses(self):
fmt = "%-35s %-30s"
messages = map(lambda addr: fmt % (addr, self.wallet.labels.get(addr,"")), self.wallet.get_addresses())
self.print_list(messages, fmt % ("Address", "Label"))
def print_edit_line(self, y, label, text, index, size):
text += " "*(size - len(text) )
self.stdscr.addstr( y, 2, label)
self.stdscr.addstr( y, 15, text, curses.A_REVERSE if self.pos%6==index else curses.color_pair(1))
def print_send_tab(self):
self.stdscr.clear()
self.print_edit_line(3, _("Pay to"), self.str_recipient, 0, 40)
self.print_edit_line(5, _("Description"), self.str_description, 1, 40)
self.print_edit_line(7, _("Amount"), self.str_amount, 2, 15)
self.print_edit_line(9, _("Fee"), self.str_fee, 3, 15)
self.stdscr.addstr( 12, 15, _("[Send]"), curses.A_REVERSE if self.pos%6==4 else curses.color_pair(2))
self.stdscr.addstr( 12, 25, _("[Clear]"), curses.A_REVERSE if self.pos%6==5 else curses.color_pair(2))
self.maxpos = 6
def print_banner(self):
if self.network and self.network.banner:
banner = self.network.banner
banner = banner.replace('\r', '')
self.print_list(banner.split('\n'))
def print_qr(self, data):
import qrcode
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
s = StringIO()
self.qr = qrcode.QRCode()
self.qr.add_data(data)
self.qr.print_ascii(out=s, invert=False)
msg = s.getvalue()
lines = msg.split('\n')
try:
for i, l in enumerate(lines):
l = l.encode("utf-8")
self.stdscr.addstr(i+5, 5, l, curses.color_pair(3))
except curses.error:
m = 'error. screen too small?'
m = m.encode(self.encoding)
self.stdscr.addstr(5, 1, m, 0)
def print_list(self, lst, firstline = None):
lst = list(lst)
self.maxpos = len(lst)
if not self.maxpos: return
if firstline:
firstline += " "*(self.maxx -2 - len(firstline))
self.stdscr.addstr( 1, 1, firstline )
for i in range(self.maxy-4):
msg = lst[i] if i < len(lst) else ""
msg += " "*(self.maxx - 2 - len(msg))
m = msg[0:self.maxx - 2]
m = m.encode(self.encoding)
self.stdscr.addstr( i+2, 1, m, curses.A_REVERSE if i == (self.pos % self.maxpos) else 0)
def refresh(self):
if self.tab == -1: return
self.stdscr.border(0)
self.print_balance()
self.stdscr.refresh()
def main_command(self):
c = self.stdscr.getch()
print(c)
cc = curses.unctrl(c).decode()
if c == curses.KEY_RIGHT: self.tab = (self.tab + 1)%self.num_tabs
elif c == curses.KEY_LEFT: self.tab = (self.tab - 1)%self.num_tabs
elif c == curses.KEY_DOWN: self.pos +=1
elif c == curses.KEY_UP: self.pos -= 1
elif c == 9: self.pos +=1 # tab
elif cc in ['^W', '^C', '^X', '^Q']: self.tab = -1
elif cc in ['^N']: self.network_dialog()
elif cc == '^S': self.settings_dialog()
else: return c
if self.pos<0: self.pos=0
if self.pos>=self.maxpos: self.pos=self.maxpos - 1
def run_tab(self, i, print_func, exec_func):
while self.tab == i:
self.stdscr.clear()
print_func()
self.refresh()
c = self.main_command()
if c: exec_func(c)
def run_history_tab(self, c):
if c == 10:
out = self.run_popup('',["blah","foo"])
def edit_str(self, target, c, is_num=False):
# detect backspace
cc = curses.unctrl(c).decode()
if c in [8, 127, 263] and target:
target = target[:-1]
elif not is_num or cc in '0123456789.':
target += cc
return target
def run_send_tab(self, c):
if self.pos%6 == 0:
self.str_recipient = self.edit_str(self.str_recipient, c)
if self.pos%6 == 1:
self.str_description = self.edit_str(self.str_description, c)
if self.pos%6 == 2:
self.str_amount = self.edit_str(self.str_amount, c, True)
elif self.pos%6 == 3:
self.str_fee = self.edit_str(self.str_fee, c, True)
elif self.pos%6==4:
if c == 10: self.do_send()
elif self.pos%6==5:
if c == 10: self.do_clear()
def run_receive_tab(self, c):
if c == 10:
out = self.run_popup('Address', ["Edit label", "Freeze", "Prioritize"])
def run_contacts_tab(self, c):
if c == 10 and self.contacts:
out = self.run_popup('Address', ["Copy", "Pay to", "Edit label", "Delete"]).get('button')
key = list(self.contacts.keys())[self.pos%len(self.contacts.keys())]
if out == "Pay to":
self.tab = 1
self.str_recipient = key
self.pos = 2
elif out == "Edit label":
s = self.get_string(6 + self.pos, 18)
if s:
self.wallet.labels[key] = s
def run_banner_tab(self, c):
self.show_message(repr(c))
pass
def main(self):
tty.setraw(sys.stdin)
try:
while self.tab != -1:
self.run_tab(0, self.print_history, self.run_history_tab)
self.run_tab(1, self.print_send_tab, self.run_send_tab)
self.run_tab(2, self.print_receive, self.run_receive_tab)
self.run_tab(3, self.print_addresses, self.run_banner_tab)
self.run_tab(4, self.print_contacts, self.run_contacts_tab)
self.run_tab(5, self.print_banner, self.run_banner_tab)
except curses.error as e:
raise Exception("Error with curses. Is your screen too small?") from e
finally:
tty.setcbreak(sys.stdin)
curses.nocbreak()
self.stdscr.keypad(0)
curses.echo()
curses.endwin()
def do_clear(self):
self.str_amount = ''
self.str_recipient = ''
self.str_fee = ''
self.str_description = ''
def do_send(self):
if not is_address(self.str_recipient):
self.show_message(_('Invalid Monacoin address'))
return
try:
amount = int(Decimal(self.str_amount) * COIN)
except Exception:
self.show_message(_('Invalid Amount'))
return
try:
fee = int(Decimal(self.str_fee) * COIN)
except Exception:
self.show_message(_('Invalid Fee'))
return
if self.wallet.has_password():
password = self.password_dialog()
if not password:
return
else:
password = None
try:
tx = self.wallet.mktx([TxOutput(TYPE_ADDRESS, self.str_recipient, amount)],
password, self.config, fee)
except Exception as e:
self.show_message(repr(e))
return
if self.str_description:
self.wallet.labels[tx.txid()] = self.str_description
self.show_message(_("Please wait..."), getchar=False)
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
self.show_message(msg)
except BestEffortRequestFailed as e:
msg = repr(e)
self.show_message(msg)
else:
self.show_message(_('Payment sent.'))
self.do_clear()
#self.update_contacts_tab()
def show_message(self, message, getchar = True):
w = self.w
w.clear()
w.border(0)
for i, line in enumerate(message.split('\n')):
w.addstr(2+i,2,line)
w.refresh()
if getchar: c = self.stdscr.getch()
def run_popup(self, title, items):
return self.run_dialog(title, list(map(lambda x: {'type':'button','label':x}, items)), interval=1, y_pos = self.pos+3)
def network_dialog(self):
if not self.network:
return
net_params = self.network.get_parameters()
host, port, protocol = net_params.host, net_params.port, net_params.protocol
proxy_config, auto_connect = net_params.proxy, net_params.auto_connect
srv = 'auto-connect' if auto_connect else self.network.default_server
out = self.run_dialog('Network', [
{'label':'server', 'type':'str', 'value':srv},
{'label':'proxy', 'type':'str', 'value':self.config.get('proxy', '')},
], buttons = 1)
if out:
if out.get('server'):
server = out.get('server')
auto_connect = server == 'auto-connect'
if not auto_connect:
try:
host, port, protocol = deserialize_server(server)
except Exception:
self.show_message("Error:" + server + "\nIn doubt, type \"auto-connect\"")
return False
if out.get('server') or out.get('proxy'):
proxy = electrum_mona.network.deserialize_proxy(out.get('proxy')) if out.get('proxy') else proxy_config
net_params = NetworkParameters(host, port, protocol, proxy, auto_connect)
self.network.run_from_another_thread(self.network.set_parameters(net_params))
def settings_dialog(self):
fee = str(Decimal(self.config.fee_per_kb()) / COIN)
out = self.run_dialog('Settings', [
{'label':'Default fee', 'type':'satoshis', 'value': fee }
], buttons = 1)
if out:
if out.get('Default fee'):
fee = int(Decimal(out['Default fee']) * COIN)
self.config.set_key('fee_per_kb', fee, True)
def password_dialog(self):
out = self.run_dialog('Password', [
{'label':'Password', 'type':'password', 'value':''}
], buttons = 1)
return out.get('Password')
def run_dialog(self, title, items, interval=2, buttons=None, y_pos=3):
self.popup_pos = 0
self.w = curses.newwin( 5 + len(list(items))*interval + (2 if buttons else 0), 50, y_pos, 5)
w = self.w
out = {}
while True:
w.clear()
w.border(0)
w.addstr( 0, 2, title)
num = len(list(items))
numpos = num
if buttons: numpos += 2
for i in range(num):
item = items[i]
label = item.get('label')
if item.get('type') == 'list':
value = item.get('value','')
elif item.get('type') == 'satoshis':
value = item.get('value','')
elif item.get('type') == 'str':
value = item.get('value','')
elif item.get('type') == 'password':
value = '*'*len(item.get('value',''))
else:
value = ''
if value is None:
value = ''
if len(value)<20:
value += ' '*(20-len(value))
if 'value' in item:
w.addstr( 2+interval*i, 2, label)
w.addstr( 2+interval*i, 15, value, curses.A_REVERSE if self.popup_pos%numpos==i else curses.color_pair(1) )
else:
w.addstr( 2+interval*i, 2, label, curses.A_REVERSE if self.popup_pos%numpos==i else 0)
if buttons:
w.addstr( 5+interval*i, 10, "[ ok ]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-2) else curses.color_pair(2))
w.addstr( 5+interval*i, 25, "[cancel]", curses.A_REVERSE if self.popup_pos%numpos==(numpos-1) else curses.color_pair(2))
w.refresh()
c = self.stdscr.getch()
if c in [ord('q'), 27]: break
elif c in [curses.KEY_LEFT, curses.KEY_UP]: self.popup_pos -= 1
elif c in [curses.KEY_RIGHT, curses.KEY_DOWN]: self.popup_pos +=1
else:
i = self.popup_pos%numpos
if buttons and c==10:
if i == numpos-2:
return out
elif i == numpos -1:
return {}
item = items[i]
_type = item.get('type')
if _type == 'str':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item.get('value')
elif _type == 'password':
item['value'] = self.edit_str(item['value'], c)
out[item.get('label')] = item ['value']
elif _type == 'satoshis':
item['value'] = self.edit_str(item['value'], c, True)
out[item.get('label')] = item.get('value')
elif _type == 'list':
choices = item.get('choices')
try:
j = choices.index(item.get('value'))
except Exception:
j = 0
new_choice = choices[(j + 1)% len(choices)]
item['value'] = new_choice
out[item.get('label')] = item.get('value')
elif _type == 'button':
out['button'] = item.get('label')
break
return out
|
the-stack_0_15331 | # Set up an NN to recognize clothing
# Use 85% of MNIST data to train and 15% to test
# We will also used ReLU
from __future__ import absolute_import, division, print_function
# Import Tensorflow
import tensorflow as tf
import tensorflow_datasets as tfds
tf.compat.v1.logging.set_verbosity(tf.compat.v1.logging.ERROR) # updated
# Helper Libraries
import math
import numpy as np
import matplotlib.pyplot as plt
# Improve progress bar display
import tqdm
import tqdm.auto
tqdm.tqdm = tqdm.auto.tqdm
#print(tf.__version__)
# Load dataset and metadata
dataset, metadata = tfds.load('fashion_mnist', as_supervised = True, with_info = True)
train_dataset = dataset['train']
test_dataset = dataset['test']
class_names = metadata.features['label'].names
print("Class names: {}" .format(class_names))
# Explore Data
num_train_examples = metadata.splits['train'].num_examples
num_test_examples = metadata.splits['test'].num_examples
print("Number of training examples: {}" .format(num_train_examples)) # 60000
print("Number of test examples: {}" .format(num_test_examples)) # 10000
# Preprocess the data
# Image has pixels with values [0, 255] ---- NORMALIZATION
def normalize(images, labels):
images = tf.cast(images, tf.float32) # cast it as float
images /= 255 # Casting, to return a value between 0 and 1
return images, labels
# Map function applies normalize function to each element in the followin sets
train_dataset = train_dataset.map(normalize)
test_dataset = test_dataset.map(normalize)
"""
# Plot the first image of test_dataset
for image, label in test_dataset.take(1):
break
image = image.numpy().reshape((28, 28))
# Plot the image
plt.figure()
plt.imshow(image, cmap = plt.cm.binary)
plt.colorbar
plt.grid(False)
plt.show()
# Diplay the first 25 imgaes from Training Set and display the class
plt.figure(figsize=(10, 10))
i = 0
for (image, label) in test_dataset.take(25):
image = image.numpy().reshape((28, 28))
plt.subplot(5, 5, i+1)
plt.xticks([])
plt.yticks([])
plt.grid(False)
plt.imshow(image, cmap = plt.cm.binary)
plt.xlabel(class_names[label])
i +=1
plt.show()
"""
# Build the model
# 1 - Set up Layers
model = tf.keras.Sequential([
tf.keras.layers.Flatten(input_shape = (28, 28, 1)), # Image from 2d array of 28X28 to 1D of 784
tf.keras.layers.Dense(128, activation = tf.nn.relu), # Densely connected hidden Layer of 128 Neurons
tf.keras.layers.Dense(10, activation = tf.nn.softmax) # 10-node softmax layer, each node is a clothing class
])# Input-hidden-output
# 2 - Compile the model
model.compile(optimizer = 'adam',
loss = 'sparse_categorical_crossentropy',
metrics = ['accuracy']) # images that are correctly classified
# 3 - Train the model
BATCH_SIZE = 32
train_dataset = train_dataset.repeat().shuffle(num_train_examples).batch(BATCH_SIZE)
test_dataset = test_dataset.batch(BATCH_SIZE)
model.fit(train_dataset, epochs = 5, steps_per_epoch = math.ceil(num_train_examples / BATCH_SIZE))
# Notice improving accuracy that reaches 0,89
# 4 - Evaluate Accuracy
test_loss, test_accuracy = model.evaluate(test_dataset, steps = math.ceil(num_test_examples / 32))
print("Accuracy on test dataset: ", test_accuracy) # 0,87
# 5 - Predictions and Exploration
for test_images, test_labels in test_dataset.take(1):
test_images = test_images.numpy()
test_labels = test_labels.numpy()
predictions = model.predict(test_images)
print(predictions.shape) # (32,10) 32 answers 10 classes
print(predictions[0]) # For 1st image
print(np.argmax(predictions[0])) # Class 4 to take the largest prediction
test_labels[0]
# Plot the results on full 10 channel set
def plot_image(i, predictions_array, true_labels, images):
predictions_array, true_label, img = predictions_array[i], true_labels[i], images[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
plt.imshow(img[..., 0], cmap = plt.cm.binary)
predicted_label = np.argmax(predictions_array)
if predicted_label == true_label:
color = 'blue'
else:
color = 'red'
plt.xlabel("{} {:2.0f}% ({})".format(class_names[predicted_label],
100 * np.max(predictions_array),
class_names[true_label]),
color = color)
def plot_value_array(i, predictions_array, true_label):
predictions_array, true_label = predictions_array[i], true_label[i]
plt.grid(False)
plt.xticks([])
plt.yticks([])
thisplot = plt.bar(range(10), predictions_array, color = "#777777")
plt.ylim([0,1])
predicted_label = np.argmax(predictions_array)
thisplot[predicted_label].set_color('red')
thisplot[true_label].set_color('blue')
# Check our for a certain pic
"""
i = 12 # a Pullover
plt.figure(figsize=(6, 3))
plt.subplot(1, 2, 1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(1, 2, 1)
plot_value_array(i, predictions, test_labels)
"""
# Plot the first X test images, their predicted label, and the true label
# Color correct predictions in blue, incorrect predictions in red
num_rows = 5
num_cols = 3
num_images = num_rows*num_cols
plt.figure(figsize=(2*2*num_cols, 2*num_rows))
for i in range(num_images):
plt.subplot(num_rows, 2*num_cols, 2*i+1)
plot_image(i, predictions, test_labels, test_images)
plt.subplot(num_rows, 2*num_cols, 2*i+2)
plot_value_array(i, predictions, test_labels)
plt.show()
# Grab an image from the test dataset
img = test_images[0]
print(img.shape)
# Add the image to a batch where it's the only member.
img = np.array([img])
print(img.shape)
# now predict
predictions_single = model.predict(img)
print(predictions_single)
plot_value_array(0, predictions_single, test_labels)
_ = plt.xticks(range(10), class_names, rotation=45)
plt.show()
print(np.argmax(predictions_single[0])) |
the-stack_0_15333 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('web', '0011_card_how_to_obtain'),
]
operations = [
migrations.AddField(
model_name='account',
name='stars',
field=models.PositiveIntegerField(null=True, verbose_name='Stars', blank=True),
preserve_default=True,
),
migrations.AlterField(
model_name='card',
name='how_to_obtain',
field=models.TextField(help_text="For event or special songs cards. Leave empty if it's only obtainable in recruitment.", null=True, verbose_name='How to get it?', blank=True),
preserve_default=True,
),
]
|
the-stack_0_15335 | if __name__ == '__main__' and __package__ is None:
import sys
from os import path
sys.path.append( path.dirname( path.dirname( path.abspath(__file__) ) ))
import pickle
import numpy as np
import matplotlib.pyplot as plt
import cv2
from data.transforms.image import de_transform
from vision.multiview import coord2pix, pix2coord
import scipy
from matplotlib.patches import Circle
import time
import math
RGB_MATCHING_COLOR = '#0066cc'
BASELINE_MATCHING_COLOR = 'y'
OURS_MATCHING_COLOR = 'r'
GROUNDTRUTH_COLOR = 'g'
def de_normalize(pts, H, W, engine='numpy'):
"""
Args:
pts: *N x 2 (x, y -> W, H)
"""
pts_ = pts.copy()
if engine == 'torch':
WH = torch.tensor([W, H], dtype=pts.dtype, device=pts.device)
return (pts + 1) * (WH - 1) / 2.
pts_[..., 0] = (pts[..., 0] + 1) * (W - 1) / 2.
pts_[..., 1] = (pts[..., 1] + 1) * (H - 1) / 2.
return pts_
def normalize(pts, H, W):
"""
Args:
pts: *N x 2 (x, y -> W, H)
"""
pts_ = pts.copy()
pts_[..., 0] = -1. + 2. * pts[..., 0] / (W - 1)
pts_[..., 1] = -1. + 2. * pts[..., 1] / (H - 1)
return pts_
def BGR2Lab(image):
return cv2.cvtColor(image, cv2.COLOR_BGR2Lab)
def Lab2ab(image):
_, A, B = cv2.split(image)
return np.stack([A, B])
class Output(object):
def __init__(self, pkl_path):
with open(pkl_path,"rb") as f:
output = pickle.load(f)
img1 = output['img1'][0]
img1 = de_transform(img1).transpose(1,2,0)
img2 = output['img2'][0]
img2 = de_transform(img2).transpose(1,2,0)
self.img1 = img1[:, :, ::-1]
self.img2 = img2[:, :, ::-1]
img1_ab = Lab2ab(BGR2Lab(img1)).transpose(1,2,0)
img2_ab = Lab2ab(BGR2Lab(img2)).transpose(1,2,0)
self.img1_ab = img1_ab
self.img2_ab = img2_ab
self.depth = output['depth']
self.corr_pos_pred = output['corr_pos_pred']
self.sample_locs = output['sample_locs']
self.img1_path = output['img1_path']
self.img2_path = output['img2_path']
self.camera = output['camera'][0]
self.other_camera = output['other_camera'][0]
self.heatmap_pred = output['heatmap_pred']
self.batch_locs = output['batch_locs']
self.points_2d = output['points-2d']
self.H, self.W = img1.shape[:2]
def calc_color_score(self, x, y):
cx, cy = int(coord2pix(x, 4)), int(coord2pix(y, 4))
ref_point = self.img1_ab[int(y), int(x), :]
color_score = []
max_score_id = None
max_score = -1
for i in range(0, 64):
pos = self.sample_locs[i][int(cy)][int(cx)]
depos = de_normalize(pos, self.H, self.W)
source_point = self.img2_ab[int(depos[1]), int(depos[0]), :]
color_score.append(np.dot(ref_point, source_point))
if color_score[-1] > max_score:
max_score = color_score[-1]
max_score_id = (int(depos[0]), int(depos[1]))
color_score = color_score / sum(color_score)
return color_score, max_score_id
class Complex_Draw(object):
def __init__(self, output, b_output):
self.output = output
self.b_output = b_output
self.ref_img = output.img1
assert output.img1_path == b_output.img1_path
def draw_sample_ax(self, ax, x, y):
output = self.output
b_output = self.b_output
cx, cy = int(coord2pix(x, 4)), int(coord2pix(y, 4))
ax.clear()
# update the line positions
ax.imshow(self.ref_img)
self.lx.set_ydata(y)
self.ly.set_xdata(x)
circ = Circle((x, y), 3, color=GROUNDTRUTH_COLOR)
ax.add_patch(circ)
self.txt.set_text('x=%1.1f, y=%1.1f; g: groundtruth; y: baseline; r: prediction' % (x, y))
def draw_dist_ax(self, ax, x, y):
output = self.output
cx, cy = int(coord2pix(x, 4)), int(coord2pix(y, 4))
color_score, max_score_id = output.calc_color_score(x, y)
xrange = np.arange(0, 64)
ax.clear()
lines_color = {
'feat. matching': OURS_MATCHING_COLOR,
'rgb matching' : '#0066cc',
'non-fusion feat. matching': BASELINE_MATCHING_COLOR,
}
lines_data = {
'feat. matching': output.depth[:, cy, cx],
'rgb matching' : color_score,
'non-fusion feat. matching': self.b_output.depth[:, cy, cx],
}
ax.clear()
for label, line in lines_data.items():
ax.plot(xrange[1:-1], line[1:-1], color=lines_color[label], label=label)
ax.set_yscale('log')
ax.set_ylabel('similarity (log)')
ax.tick_params(bottom=False, top=True)
ax.tick_params(labelbottom=False, labeltop=True)
ax.legend()
return max_score_id
def draw_other_ax(self, ax, x, y, max_score_id, joint_id=None):
output = self.output
b_output = self.b_output
cx, cy = int(coord2pix(x, 4)), int(coord2pix(y, 4))
xx, yy = output.corr_pos_pred[cy][cx]
bxx, byy = self.b_output.corr_pos_pred[cy][cx]
ax.clear()
ax.imshow(output.img2)
circ = Circle(max_score_id, 3, color=RGB_MATCHING_COLOR)
ax.add_patch(circ)
# draw epipolar lines
line_start1 = de_normalize(output.sample_locs[1][int(cy)][int(cx)], output.H, output.W)
line_start2 = de_normalize(output.sample_locs[63][int(cy)][int(cx)], output.H, output.W)
ax.plot([line_start1[0], line_start2[0]], [line_start1[1], line_start2[1]], alpha=0.5, color='b', zorder=1)
# draw groundtruth points
# for i in range(17):
gx, gy = output.points_2d[output.other_camera][joint_id][0], output.points_2d[output.other_camera][joint_id][1]
circ = Circle((gx, gy), 3, color=GROUNDTRUTH_COLOR, zorder=2)
ax.add_patch(circ)
# draw baseline predicted point
circ = Circle((pix2coord(bxx, 4), pix2coord(byy, 4)), 3, color=BASELINE_MATCHING_COLOR, zorder=2)
ax.add_patch(circ)
# draw predicted point
circ = Circle((pix2coord(xx, 4), pix2coord(yy, 4)), 3, color=OURS_MATCHING_COLOR, zorder=3)
ax.add_patch(circ)
def dist(x1, y1, x2, y2):
return math.sqrt((x1 - x2)**2 + (y1-y2) **2)
flag = True
# predicted - gt > baseline - gt
if dist(pix2coord(xx, 4), pix2coord(yy,4), gx, gy)*1.5 > dist(pix2coord(bxx, 4), pix2coord(byy,4), gx, gy):
flag = False
# predicted - gt > TH: 3
if dist(pix2coord(bxx, 4), pix2coord(byy,4), gx, gy) < 5:
flag = False
if flag:
print('img1 path: ', output.img1_path)
print('img2 path: ', output.img2_path)
print('pred - gt: ', dist(pix2coord(xx, 4), pix2coord(yy,4), gx, gy))
print('baseline - gt', dist(pix2coord(bxx, 4), pix2coord(byy,4), gx, gy))
txt = self.sample_ax.text(0, 0, '', va="bottom", ha="left")
txt.set_text('g: groundtruth; y: baseline; r: our prediction')
return flag
def draw_heatmap_ax(self, ax):
output = self.output
ax.clear()
ax.imshow(output.heatmap_pred.max(0))
def draw(self, x, y, save_path, joint_id=None):
self.fig, self.axs = plt.subplots(2, 2, squeeze=True, figsize=(12, 8))
self.sample_ax = self.axs[0, 0]
self.dist_ax = self.axs[0, 1]
self.other_ax = self.axs[1, 0]
self.heatmap_ax = self.axs[1, 1]
self.lx = self.sample_ax.axhline(color='k') # the horiz line
self.ly = self.sample_ax.axvline(color='k') # the vert line
self.txt = self.sample_ax.text(0, 0, '', va="bottom", ha="left")
output = self.output
self.draw_sample_ax(self.sample_ax, x, y)
max_score_id = self.draw_dist_ax(self.dist_ax, x, y)
flag = self.draw_other_ax(self.other_ax, x, y, max_score_id, joint_id)
if not flag:
plt.close()
return flag
self.draw_heatmap_ax(self.heatmap_ax)
plt.savefig(save_path) #, transparent=True)
print('saved for ', save_path)
return flag
class Easy_Draw(Complex_Draw):
def __init__(self, output, b_output):
self.output = output
self.b_output = b_output
self.ref_img = output.img1
assert output.img1_path == b_output.img1_path
def draw(self, x, y, save_path):
self.fig, self.ax = plt.subplots(1, figsize=(12, 8))
output = self.output
self.draw_dist_ax(self.ax, x, y)
plt.savefig(save_path, transparent=True)
print('saved for ', save_path)
root_dir = "outs/epipolar/keypoint_h36m_fixed/visualizations/h36m/"
# for i in range(4,5):
i = 1
j = 2
ours_pkl = root_dir + "output_{}.pkl".format(i)
baseline_pkl = root_dir + "output_baseline_{}.pkl".format(i)
complex_output = root_dir + "{}_joint{}_output.eps"
easy_output = root_dir + "easy_output/{}_joint{}_easy_output.eps"
output = Output(ours_pkl)
b_output = Output(baseline_pkl)
cd = Complex_Draw(output, b_output)
ed = Easy_Draw(output, b_output)
flag = cd.draw(x=output.points_2d[output.camera][j][0], y=output.points_2d[output.camera][j][1], save_path=complex_output.format(i, j), joint_id=j)
if flag:
ed.draw(x=output.points_2d[output.camera][j][0], y=output.points_2d[output.camera][j][1], save_path=easy_output.format(i, j))
fig, ax = plt.subplots()
plt.imshow(output.img1)
ax.axis('off')
fig.savefig(root_dir+'original/{}_ref_img.eps'.format(i),bbox_inches='tight', pad_inches=0)
fig, ax = plt.subplots()
ax.axis('off')
plt.imshow(output.img2)
fig.savefig(root_dir+'original/{}_source_img.eps'.format(i),bbox_inches='tight', pad_inches=0)
print('saved original images')
|
the-stack_0_15336 | from django.db import models
from django.utils.text import slugify
from django.core.validators import MinValueValidator, MinLengthValidator
from django.db.models.fields import SlugField
from django.contrib.auth.models import User
# Create your models here.
class Person(models.Model):
DIRECTOR = 'DR'
DEAN_OF_ACADEMIC_AFFAIRS = 'DOAA'
DEAN_OF_FACULTY_AFFAIRS = 'DOFA'
DEAN_OF_STUDENT_AFFAIRS = 'DOSA'
HEAD_OF_DEPARTMENT = 'HOD'
FACULTY = 'F'
VISITING_FACULTY = 'VF'
REGISTRAR = 'RG'
HEAD_OF_STAFF = 'HOS'
STAFF = 'S'
COMPUTER_SCIENCE_AND_ENGINEERING = 'CSE'
ELECTRONICS_AND_COMMUNICATION_ENGINEERING = 'ECE'
MECHANICAL_AND_MECHATRONICS_ENGINEERING = 'ME'
HUMANITIES_AND_SOCIAL_SCIENCES = 'HSS'
MATHEMATICS = 'MH'
PHYSICS = 'PH'
NON_TEACHING_STAFF = 'NTS'
PERSON_ROLES = (
('DR', 'Director'),
('DOAA', 'Dean of Academic Affairs'),
('DOFA', 'Dean of Faculty Affairs'),
('DOSA', 'Dean of Student Affairs'),
('HOD', 'Head of Department'),
('F', 'Faculty'),
('VF', 'Visiting Faculty'),
('RG', 'Registrar'),
('HOS', 'Head of Staff'),
('S', 'Staff'),
)
DEPARTMENT = (
('CSE', 'Computer Science and Engineering'),
('ECE', 'Electronics and Communication Engineering'),
('ME', 'Mechanical and Mechatronics Engineering'),
('HSS', 'Humanities and Social Sciences'),
('MH', 'Mathematics'),
('PH', 'Physics'),
('NTS', 'Non Teaching Staff'),
)
user = models.OneToOneField(User, on_delete=models.CASCADE, related_name='person')
faculty_id = models.CharField(max_length=10, blank=False, null=False, unique=True, default='0')
leave_count = models.IntegerField(validators=[MinValueValidator(0)], blank=False, null=False, default=22)
department = models.CharField(max_length=3, choices=DEPARTMENT, blank=False, null=False, default='CSE')
first_name = models.CharField(max_length=50, validators=[MinLengthValidator(1)], blank=False, null=False)
last_name = models.CharField(max_length=50, blank=True, null=False)
email = models.EmailField(blank=False, null=False, unique=True)
office_no = models.IntegerField(blank=False, null=False, default='0000')
role = models.CharField(max_length=5, choices=PERSON_ROLES, default='F')
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
# slug = models.SlugField(unique=True)
class Meta:
ordering = ['id']
verbose_name = 'Person'
verbose_name_plural = 'Persons'
def is_director(self):
return self.role == 'DR'
def is_dean(self):
return self.role == 'DN'
def is_hod(self):
return self.role == 'HOD'
def is_valid(self):
if self.email.split('@')[1] != 'lnmiit.ac.in':
return False
return len(self.first_name) > 0 and self.user is not None and self.leave_count >= 0
def __str__(self):
return f'{self.id}. {self.first_name} {self.last_name}'
class Application(models.Model):
PENDING = 'P'
APPROVED = 'A'
REJECTED = 'R'
APPLICATION_STATUS = (
('P', 'Pending'),
('A', 'Approved'),
('R', 'Rejected'),
)
person = models.ForeignKey(Person, on_delete=models.CASCADE, related_name='applicant', default=1)
status = models.CharField(max_length=1, choices=APPLICATION_STATUS, default='P')
start_date = models.DateField(blank=False, null=False)
end_date = models.DateField(blank=False, null=False)
hasClasses = models.BooleanField(blank=False, null=False, default=False)
rescheduled_date = models.DateField(blank=True, null=True)
up_next = models.ForeignKey(Person, on_delete=models.CASCADE, related_name='up_next', default=1)
comments = models.TextField(blank=True, null=False)
updated_at = models.DateTimeField(auto_now=True)
created_at = models.DateTimeField(auto_now_add=True)
# slug = models.SlugField(unique=True)
class Meta:
ordering = ['start_date', 'end_date']
def is_valid(self):
return self.person.is_valid() and self.start_date < self.end_date
def __str__(self):
return f'{self.id}. {self.person.first_name} {self.person.last_name} - {self.get_status_display()}'
|
the-stack_0_15339 | #! /usr/bin/env python
# -*- coding: utf-8 -*-
# __author__ = "xh"
# Date: 2019/11/13
from core import info_collection
from conf import settings
import urllib.request
import urllib.parse, urllib.error
import os, sys
import json
import datetime
class ArgvHandler(object):
def __init__(self, argv_list):
self.argvs = argv_list
self.pase_argvs()
def pase_argvs(self):
if len(self.argvs) > 1:
if hasattr(self, self.argvs[1]):
func = getattr(self, self.argvs[1])
func()
else:
self.help_msg()
else:
self.help_msg()
def help_msg(self):
msg = '''
collect_data 收集资产数据
run_forever ...
get_asset_id 获取资产id
report_asset 汇报资产数据到服务器
'''
print(msg)
def collect_data(self):
obj = info_collection.InfoCollection()
asset_data = obj.collect()
print("asset", asset_data)
return asset_data
def get_asset_id(self):
pass
def load_asset_id(self, sn=None):
asset_id_file = settings.Params["asset_id"]
has_asset_id = False
if os.path.isfile(asset_id_file):
asset_id = open(asset_id_file).read().strip()
if asset_id.isdigit():
return asset_id
else:
has_asset_id = False
else:
has_asset_id = False
def __updata_asset_id(self, new_asset_id):
'''将服务端返回的资产id更新到本地'''
asset_id_file = settings.Params["asset_id"]
with open(asset_id_file, "w", encoding="utf-8") as f:
f.write(str(new_asset_id))
def log_record(self, log, action_type=None):
'''记录日志'''
f = open(settings.Params["log_file"], "ab")
if type(log) is str:
pass
if type(log) is dict:
if "info" in log:
for msg in log["info"]:
log_format = "%s\tINFO\t%s\n" % (datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S"), msg)
f.write(log_format)
if "error" in log:
for msg in log:
log_format = "%s\tERROR\t%s\n" % (datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S"), msg)
f.write(log_format)
if "warning" in log:
for msg in log:
log_format = "%s\tWARNING\t%s\n" % (datetime.datetime.now().strftime("%Y-%m-%d-%H:%M:%S"), msg)
f.write(log_format)
f.close()
def report_asset(self):
obj = info_collection.InfoCollection()
asset_data = obj.collect()
asset_id = self.load_asset_id(asset_data["sn"])
if asset_id: # 资产之前汇报过,只需把存在客户端的asset_id放进asset_data中,直接汇报到正式资产库中
asset_data["asset_id"] = asset_id
post_url = "asset_report"
else: # 否则资产为第一次汇报,需要先汇报到待批准区
asset_data["asset_id"] = None
post_url = "asset_report_with_no_id"
data = {"asset_data": json.dumps(asset_data)}
respones = self.__submit_data(post_url, data, method="post")
print("返回的respones", respones)
if "asset_id" in str(respones):
self.__updata_asset_id(respones["asset_id"])
def __submit_data(self, action_type, data, method):
'''
发达数据到目标主机
:param action_type: url
:param data: 数据
:param method: 请求方式
:return:
'''
if action_type in settings.Params["urls"]:
if type(settings.Params["port"]) is int:
url = "http://%s:%s%s" % (
settings.Params["server"], settings.Params["port"], settings.Params["urls"][action_type])
else:
url = "http://%s%s" % (settings.Params["server"], settings.Params["urls"][action_type])
if method == "get":
args = ""
for k, v in data.item:
args += "&%s=%s" % (k, v)
args = args[1:]
url_with_args = "%s?%s" % (url, args)
try:
req = urllib.request.urlopen(url_with_args, timeout=settings.Params["request_timeout"])
callback = req.read()
return callback
except urllib.error as e:
sys.exit("\033[31;1m%s\033[0m" % e)
elif method == "post":
try:
data_encode = urllib.parse.urlencode(data).encode()
req = urllib.request.urlopen(url=url, data=data_encode, timeout=settings.Params['request_timeout'])
callback = req.read()
print("\033[31;1m[%s]:[%s]\033[0m response:\n%s" % (method, url, callback))
return callback
except Exception as e:
sys.exit("\033[31;1m%s\033[0m" % e)
else:
raise KeyError
|
the-stack_0_15340 | '''
Copyright 2019 The Microsoft DeepSpeed Team
'''
import math
import torch
import torch.distributed as dist
try:
from deepspeed.git_version_info import version
from deepspeed.moe.utils import is_moe_param
from deepspeed.ops.adam import DeepSpeedCPUAdam
from deepspeed.ops.op_builder import UtilsBuilder
from deepspeed.runtime.zero.config import ZERO_OPTIMIZATION_GRADIENTS
except ImportError:
pass
from packaging import version as pkg_version
from torch._six import inf
from torch.distributed.distributed_c10d import _get_global_rank
from torch.optim import Optimizer
from colossalai.core import global_context as gpc
from colossalai.registry import OPTIMIZER_WRAPPERS
from colossalai.utils import report_memory_usage
from ._utils import is_model_parallel_parameter
from .loss_scaler import LossScaler, DynamicLossScaler
from ...context.parallel_mode import ParallelMode
# Toggle this to true to enable correctness test
# with gradient partitioning and without
pg_correctness_test = False
def input(msg):
return
def split_half_float_double(tensors):
dtypes = [
"torch.cuda.HalfTensor",
"torch.cuda.FloatTensor",
"torch.cuda.DoubleTensor"
]
buckets = []
for i, dtype in enumerate(dtypes):
bucket = [t for t in tensors if t.type() == dtype]
if bucket:
buckets.append(bucket)
return buckets
def isclose(a, b, rtol=1e-09, atol=0.0):
return abs(a - b) <= max(rtol * max(abs(a), abs(b)), atol)
def lcm(x, y):
from fractions import gcd # or can import gcd from `math` in Python 3
return x * y // gcd(x, y)
def get_alignment_padding(tensor_list, alignment):
num_elements = sum([tensor.numel() for tensor in tensor_list])
remainder = num_elements % alignment
return (alignment - remainder) if remainder else remainder
def move_to_cpu(tensor_list):
for tensor in tensor_list:
tensor.data = tensor.data.cpu()
def print_rank_msg(msg):
print(f"rank {dist.get_rank()} - {msg}")
@OPTIMIZER_WRAPPERS.register_module
class ZeroRedundancyOptimizer_Level_2(Optimizer):
"""
ZeroRedundancyOptimizer_Level_2 designed to reduce the memory footprint
required for training large deep learning models.
For more details please see ZeRO: Memory Optimization Towards Training A Trillion Parameter Models
https://arxiv.org/abs/1910.02054
"""
def __init__(self,
init_optimizer,
dp_parallel_mode=ParallelMode.DATA,
static_loss_scale=1.0,
dynamic_loss_scale=False,
dynamic_loss_args=None,
verbose=False,
contiguous_gradients=True,
reduce_bucket_size=500000000,
allgather_bucket_size=5000000000,
reduce_scatter=True,
overlap_comm=False,
cpu_offload=False,
clip_grad=0.0,
allreduce_always_fp32=False,
postscale_gradients=True,
gradient_predivide_factor=1.0,
gradient_accumulation_steps=1,
ignore_unused_parameters=True,
round_robin_gradients=False,
fp16_master_weights_and_gradients=False):
# mpu = None is removed from the parameter list
# tensor parallel will be automatically detected later
# LSG: default arguments for compatibility
has_moe_layers = False
partition_grads = True
expert_parallel_group = None
expert_data_parallel_group = None
self.timers = None
self.defaults = init_optimizer.defaults
dp_process_group = gpc.get_group(dp_parallel_mode)
if gpc.get_world_size(dp_parallel_mode) == 1:
partition_grads = False # for compatibility with dp size = 1
self.verbose = verbose
if dist.get_rank() == 0 and self.verbose:
print(f"Reduce bucket size {reduce_bucket_size}")
print(f"Allgather bucket size {allgather_bucket_size}")
print(f"CPU Offload: {cpu_offload}")
print(
f'Round robin gradient partitioning: {round_robin_gradients}')
# The fused optimizer does all the work. We need this layer for two reason:
# 1. maintain same user API from apex.fp16_utils
# 2. keep common stuff here in case we need to add ne552w fused optimizer later
# differences from apex.fp16_utils:
# - assume all model params in fp16
# - assume all params requires grad
# - flat by groups, not keeping state. TODO: remove state explicitly?
# - master gard and unflat master weight never exist. TODO: a way to save out unflat master?
if not torch.cuda.is_available:
raise SystemError("Cannot use fp16 without CUDA.")
self.optimizer = init_optimizer
# Load pre-built or JIT compile (un)flatten ops
util_ops = UtilsBuilder().load()
self.flatten = util_ops.flatten
self.unflatten = util_ops.unflatten
# ZeRO stage 1 (False) or 2 (True)
self.partition_gradients = partition_grads
self.reduce_scatter = reduce_scatter
self.overlap_comm = overlap_comm
self.cpu_offload = cpu_offload
self.deepspeed_adam_offload = cpu_offload
self.device = torch.cuda.current_device() if not self.cpu_offload else 'cpu'
self.dp_process_group = dp_process_group
# expert parallel group
self.ep_process_group = expert_parallel_group
# data parallel group for experts
self.expert_dp_process_group = expert_data_parallel_group
# data parallel size for non-experts
dp_size = dist.get_world_size(group=self.dp_process_group)
# For MoE models this maybe different for different param group
# It will be modified during MoE setup later in the init
self.real_dp_process_group = [
dp_process_group for i in range(len(self.optimizer.param_groups))
]
self.partition_count = [dp_size for i in range(
len(self.optimizer.param_groups))]
self.is_gradient_accumulation_boundary = True
# CPU-Offload requires contiguous gradients
self.contiguous_gradients = contiguous_gradients or cpu_offload
self.has_moe_layers = has_moe_layers
if self.has_moe_layers:
self._configure_moe_settings()
if not gpc.is_initialized(ParallelMode.TENSOR) or gpc.get_world_size(ParallelMode.TENSOR) == 1:
self.model_parallel_group = None
self.model_parallel_rank = 0
else:
self.model_parallel_group = gpc.get_group(ParallelMode.TENSOR)
self.model_parallel_rank = gpc.get_local_rank(ParallelMode.TENSOR)
self.overflow = False
self.clip_grad = clip_grad
self.allreduce_always_fp32 = allreduce_always_fp32
self.gradient_predivide_factor = gradient_predivide_factor
self.postscale_gradients = postscale_gradients
self.gradient_accumulation_steps = gradient_accumulation_steps
self.micro_step_id = 0
self.ignore_unused_parameters = ignore_unused_parameters
self.round_robin_gradients = round_robin_gradients
self.extra_large_param_to_reduce = None
self.fp16_master_weights_and_gradients = fp16_master_weights_and_gradients
if self.fp16_master_weights_and_gradients:
assert self.cpu_offload and type(self.optimizer) in [
DeepSpeedCPUAdam], f"fp16_master_and_gradients requires optimizer to support keeping fp16 master and gradients while keeping the optimizer states in fp32. Currenty only supported using ZeRO-Offload with DeepSpeedCPUAdam. But current setting is ZeRO-Offload:{self.cpu_offload} and optimizer type {type(self.optimizer)}. Either disable fp16_master_weights_and_gradients or enable ZeRO-2 Offload with DeepSpeedCPUAdam"
if self.reduce_scatter:
assert not self.allreduce_always_fp32, "allreduce_always_fp32 is not yet supported with ZeRO-2 with reduce scatter enabled"
assert self.gradient_predivide_factor == 1.0, "gradient_predivide_factor != 1.0 is not yet supported with ZeRO-2 with reduce scatter enabled"
assert self.postscale_gradients, "pre-scale gradients is not yet supported with ZeRO-2 with reduce scatter enabled"
# param flattened by groups
self.fp16_groups = []
self.fp16_groups_flat = []
# param partitioned by data parallel degree
# this will contain a list of equal sized tensors
# each of which will be updated by a different process
self.parallel_partitioned_fp16_groups = []
# a single 32-bit partition of the parallel partitioned parameters
# that this process will update
self.single_partition_of_fp32_groups = []
# param partition info
# These are the parameters in each group that will not be updated by this process directly
self.params_not_in_partition = []
# These are the parameters that will be updated by this process directly
self.params_in_partition = []
# Offset from the first paramter in the the self.params_in_partition
# the parameter boundaries may not align with partition boundaries
# so we need to keep track of the offset
self.first_offset = []
# number of elements per partition in each group
self.partition_size = []
# align nccl all-gather send buffers to 4-bye boundary
# 4-byte alignment/sizeof(fp16) = 2
self.nccl_start_alignment_factor = 2
assert (
allgather_bucket_size % self.nccl_start_alignment_factor == 0), f"allgather_bucket_size must be a multiple of nccl_start_alignment_factor, {self.nccl_start_alignment_factor} "
self.all_reduce_print = False
self.dtype = self.optimizer.param_groups[0]['params'][0].dtype
self.round_robin_fp16_groups = []
self.round_robin_fp6_indices = []
# padding on each partition for alignment purposes
self.groups_padding = []
# loop to deal with groups
for i, param_group in enumerate(self.optimizer.param_groups):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
# push this group to list before modify
# TODO: Explore simplification that avoids the extra book-keeping by pushing the reordered group
self.fp16_groups.append(param_group['params'])
# Record padding required to align group to world size
if partition_id == dist.get_world_size(
group=self.real_dp_process_group[i]) - 1:
padding = get_alignment_padding(self.fp16_groups[i],
self.partition_count[i])
else:
padding = 0
self.groups_padding.append(padding)
# not sure why apex was cloning the weights before flattening
# removing cloning here
if self.verbose:
report_memory_usage(f"Before moving param group {i} to CPU")
# move all the parameters to cpu to free up GPU space for creating flat buffer
move_to_cpu(self.fp16_groups[i])
if self.verbose:
report_memory_usage(f"After moving param group {i} to CPU")
# Reorder group parameters for load balancing of gradient partitioning during backward among ranks.
# This ensures that gradients are reduced in a fashion such that ownership round robins among the ranks.
# For example, rather than 3 gradients (g_n+2, g_n+1, g_n) that are reduced consecutively belonging
# to the same rank, instead they will belong to 3 ranks (r_m+2, r_m+1, r_m).
if self.round_robin_gradients:
round_robin_tensors, round_robin_indices = self._round_robin_reorder(
self.fp16_groups[i],
dist.get_world_size(group=self.real_dp_process_group[i])
)
else:
round_robin_tensors = self.fp16_groups[i]
round_robin_indices = list(range(len(self.fp16_groups[i])))
self.round_robin_fp16_groups.append(round_robin_tensors)
self.round_robin_fp6_indices.append(round_robin_indices)
# create flat buffer in CPU and move to GPU
self.fp16_groups_flat.append(
self.flatten_dense_tensors_aligned(
self.round_robin_fp16_groups[i],
self.nccl_start_alignment_factor *
dist.get_world_size(group=self.real_dp_process_group[i])).cuda(
torch.cuda.current_device()))
if self.verbose:
report_memory_usage(
f"After flattening and moving param group {i} to GPU")
if dist.get_rank(group=self.real_dp_process_group[i]) == 0:
report_memory_usage(
f"After Flattening and after emptying param group {i} cache")
# set model fp16 weight to slices of flattened buffer
self._update_model_fp16_weights(i)
# divide the flat weights into near equal partition equal to the data parallel degree
# each process will compute on a different part of the partition
data_parallel_partitions = self.get_data_parallel_partitions(
self.fp16_groups_flat[i],
i)
self.parallel_partitioned_fp16_groups.append(
data_parallel_partitions)
# verify that data partition start locations are 4-byte aligned
for partitioned_data in data_parallel_partitions:
assert (partitioned_data.data_ptr() %
(2 * self.nccl_start_alignment_factor) == 0)
# a partition of the fp32 master weights that will be updated by this process
if not fp16_master_weights_and_gradients:
self.single_partition_of_fp32_groups.append(
self.parallel_partitioned_fp16_groups[i][partition_id].to(
self.device).clone().float().detach())
else:
self.single_partition_of_fp32_groups.append(
self.parallel_partitioned_fp16_groups[i][partition_id].to(
self.device).clone().half().detach())
# modify optimizer of have flat master weight
self.single_partition_of_fp32_groups[
i].requires_grad = True # keep this in case internal optimizer uses it
param_group['params'] = [self.single_partition_of_fp32_groups[i]]
partition_size = len(self.fp16_groups_flat[i]) / dist.get_world_size(
group=self.real_dp_process_group[i])
params_in_partition, params_not_in_partition, first_offset = self.get_partition_info(
self.round_robin_fp16_groups[i],
partition_size,
partition_id)
self.partition_size.append(partition_size)
self.params_in_partition.append(params_in_partition)
self.params_not_in_partition.append(params_not_in_partition)
self.first_offset.append(first_offset)
for rank in range(dist.get_world_size()):
if dist.get_rank() == rank and self.verbose:
print(
f"Rank: {rank} partition count {self.partition_count} and sizes{[(p.numel(), self.is_moe_param_group[i] if hasattr(self, 'is_moe_param_group') else False) for i, p in enumerate(self.single_partition_of_fp32_groups)]} "
)
dist.barrier()
# exit(0)
self.reduce_bucket_size = int(reduce_bucket_size)
self.allgather_bucket_size = int(allgather_bucket_size)
self.reduction_event = torch.cuda.Event(
enable_timing=False, blocking=False)
self.reduction_stream = torch.cuda.Stream()
self.cpu_computation_stream = torch.cuda.Stream()
self.copy_grad_stream = torch.cuda.Stream()
self.callback_queued = False
self.param_dict = {}
# map between param_id and bool to specify if a param is in this partition
self.is_param_in_current_partition = {}
self.grads_in_ipg_bucket = []
self.params_in_ipg_bucket = []
self.elements_in_ipg_bucket = 0
self.params_already_reduced = []
self._release_ipg_buffers()
self.previous_reduced_grads = None
self.ipg_bucket_has_moe_params = False
# simplified param id
self.param_id = {}
largest_param_numel = 0
count = 0
for i, params_group in enumerate(self.fp16_groups):
for param in params_group:
unique_id = id(param)
self.param_id[unique_id] = count
self.param_dict[count] = param
self.params_already_reduced.append(False)
if param.numel() > largest_param_numel:
largest_param_numel = param.numel()
count = count + 1
for param_group in self.params_in_partition:
for param in param_group:
self.is_param_in_current_partition[self.get_param_id(
param)] = True
for param_group in self.params_not_in_partition:
for param in param_group:
self.is_param_in_current_partition[self.get_param_id(
param)] = False
if self.cpu_offload:
self.accumulated_grads_in_cpu = {}
self.norm_for_param_grads = {}
self.local_overflow = False
self.grad_position = {}
self.temp_grad_buffer_for_cpu_offload = torch.zeros(
largest_param_numel,
device=self.device,
dtype=self.dtype).pin_memory()
self.temp_grad_buffer_for_gpu_offload = torch.zeros(
largest_param_numel,
device=torch.cuda.current_device(),
dtype=self.dtype)
for i, params_group in enumerate(self.fp16_groups):
self.get_grad_position(i,
self.params_in_partition[i],
self.first_offset[i],
self.partition_size[i])
# mapping from parameter to partition that it belongs to
self.param_to_partition_ids = {}
# stores if a partition has been reduced in this step
self.is_partition_reduced = {}
# number of grads in partition that still need to be computed
self.remaining_grads_in_partition = {}
# total number of grads in partition
self.total_grads_in_partition = {}
# stores if a grad in a partition has been computed or not
self.is_grad_computed = {}
# stores the offset at which a parameter gradient needs to be inserted in a partition
self.grad_partition_insertion_offset = {}
# the offset in the gradient at which it must be inserted at the beginning of the partition
self.grad_start_offset = {}
# will store the averaged gradients required by this partition
self.averaged_gradients = {}
# store index of first parameter in each partition
self.first_param_index_in_partition = {}
# initializes all data structures for implementing gradient partitioning
self.initialize_gradient_partitioning_data_structures()
# resets the data structure value for the next backward propagation
self.reset_partition_gradient_structures()
# creates backward hooks for gradient partitioning
if self.partition_gradients or self.overlap_comm:
self.create_reduce_and_remove_grad_hooks()
# we may have a way of fusing dynamic scale. Do not support for now
if self.dtype == torch.float or not dynamic_loss_scale:
loss_scale_value = 1.0 if self.dtype == torch.float else static_loss_scale
self.dynamic_loss_scale = False
self.loss_scaler = LossScaler(scale=loss_scale_value)
cur_iter = 0
else:
if dynamic_loss_args is None:
self.loss_scaler = DynamicLossScaler()
else:
self.loss_scaler = DynamicLossScaler(**dynamic_loss_args)
self.dynamic_loss_scale = True
if self.verbose:
report_memory_usage("Before initializing optimizer states")
self.initialize_optimizer_states()
if self.verbose:
report_memory_usage("After initializing optimizer states")
if dist.get_rank() == 0:
print(f"optimizer state initialized")
if dist.get_rank(group=self.dp_process_group) == 0:
report_memory_usage(f"After initializing ZeRO optimizer")
def _configure_moe_settings(self):
assert self.contiguous_gradients, "Contiguous Gradients in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE"
assert self.reduce_scatter, "Reduce Scatter in ZeRO Stage 2 must be set to True for MoE. Other code paths are not tested with MoE"
def is_moe_group(group):
return 'moe' in group and group['moe']
assert any([is_moe_group(group) for group in
self.optimizer.param_groups]), "The model has moe layers, but None of the param groups are marked as MoE. Create a param group with 'moe' key set to True before creating optimizer"
self.is_moe_param_group = []
for i, group in enumerate(self.optimizer.param_groups):
if is_moe_group(group):
assert all(
[is_moe_param(param) for param in group['params']]), "All params in MoE group must be MoE params"
self.real_dp_process_group[i] = self.expert_dp_process_group
self.partition_count[i] = dist.get_world_size(
group=self.expert_dp_process_group)
self.is_moe_param_group.append(True)
else:
self.is_moe_param_group.append(False)
assert self.expert_dp_process_group is not None, "Expert data parallel group should be configured with MoE"
assert self.ep_process_group is not None, "Expert parallel group should be configured with MoE"
def _update_model_fp16_weights(self, group_index):
updated_params = self.unflatten(self.fp16_groups_flat[group_index],
self.round_robin_fp16_groups[group_index])
for p, q in zip(self.round_robin_fp16_groups[group_index], updated_params):
p.data = q.data
# set model fp16 weight to slices of reordered flattened buffer
for param_index, param in enumerate(self.fp16_groups[group_index]):
new_index = self.round_robin_fp6_indices[group_index][param_index]
param.data = self.round_robin_fp16_groups[group_index][new_index].data
def _round_robin_reorder(self, tensor_list, num_partitions):
# disable round robin if need to debug something
# return tensor_list, list(range(len(tensor_list)))
partition_tensors = {}
for i, tensor in enumerate(tensor_list):
j = i % num_partitions
if not j in partition_tensors:
partition_tensors[j] = []
partition_tensors[j].append((i, tensor))
reordered_tensors = []
reordered_indices = {}
for partition_index in partition_tensors.keys():
for i, (original_index, tensor) in enumerate(partition_tensors[partition_index]):
reordered_indices[original_index] = len(reordered_tensors)
reordered_tensors.append(tensor)
return reordered_tensors, reordered_indices
def _release_ipg_buffers(self):
if self.contiguous_gradients:
self.ipg_buffer = None
self.grads_in_partition = None
self.grads_in_partition_offset = 0
def initialize_optimizer_states(self):
for i, group in enumerate(self.fp16_groups):
single_grad_partition = torch.zeros(
int(self.partition_size[i]),
dtype=self.single_partition_of_fp32_groups[i].dtype,
device=self.device)
self.single_partition_of_fp32_groups[
i].grad = single_grad_partition.pin_memory(
) if self.cpu_offload else single_grad_partition
self.optimizer.step()
if not self.cpu_offload:
for group in self.single_partition_of_fp32_groups:
group.grad = None # class init
return
#########################################################################
#################### ZeRO Stage 1 - reduce gradients ####################
#########################################################################
def reduce_gradients(self, pipeline_parallel=False):
world_size = dist.get_world_size(self.dp_process_group)
my_rank = dist.get_rank(self.dp_process_group)
# with PP we must create ipg buffer, since backward is handled outside zero
if pipeline_parallel and self.contiguous_gradients:
self.ipg_buffer = []
buf_0 = torch.empty(int(self.reduce_bucket_size),
dtype=self.dtype,
device=torch.cuda.current_device())
self.ipg_buffer.append(buf_0)
self.ipg_index = 0
if not self.overlap_comm:
for i, group in enumerate(self.fp16_groups):
for param in group:
if param.grad is not None:
self.reduce_ready_partitions_and_remove_grads(param, i)
# reduce any pending grads in either hook/non-hook case
self.overlapping_partition_gradients_reduce_epilogue()
#########################################################################
#########################ZeRO Partition Gradients########################
#########################################################################
def get_first_param_index(self, group_id, param_group, partition_id):
for index, param in enumerate(param_group):
param_id = self.get_param_id(param)
if partition_id in self.param_to_partition_ids[group_id][param_id]:
return index
return None
def initialize_gradient_partitioning_data_structures(self):
for i, param_group in enumerate(self.round_robin_fp16_groups):
total_partitions = dist.get_world_size(
group=self.real_dp_process_group[i])
self.param_to_partition_ids[i] = {}
self.is_partition_reduced[i] = {}
self.total_grads_in_partition[i] = {}
self.remaining_grads_in_partition[i] = {}
self.is_grad_computed[i] = {}
self.grad_partition_insertion_offset[i] = {}
self.grad_start_offset[i] = {}
self.first_param_index_in_partition[i] = {}
for partition_id in range(total_partitions):
self.is_grad_computed[i][partition_id] = {}
self.grad_partition_insertion_offset[i][partition_id] = {}
self.grad_start_offset[i][partition_id] = {}
self.total_grads_in_partition[i][partition_id] = 0
self.initialize_gradient_partition(
i, param_group, partition_id)
self.is_partition_reduced[i][partition_id] = False
self.first_param_index_in_partition[i][
partition_id] = self.get_first_param_index(
i,
param_group,
partition_id)
def independent_gradient_partition_epilogue(self):
if self.verbose:
self.report_ipg_memory_usage(
f"In ipg_epilogue before reduce_ipg_grads", 0)
self.reduce_ipg_grads()
if self.verbose:
self.report_ipg_memory_usage(
f"In ipg_epilogue after reduce_ipg_grads", 0)
# if dist.get_rank() == 0:
# print()("Params already reduced %s", self.params_already_reduced)
for i in range(len(self.params_already_reduced)):
self.params_already_reduced[i] = False
if self.overlap_comm:
torch.cuda.synchronize()
# It is safe to clear previously reduced grads of other partitions
self._clear_previous_reduced_grads()
if self.cpu_offload is False:
for i, _ in enumerate(self.fp16_groups):
if not i in self.averaged_gradients or self.averaged_gradients[i] is None:
self.averaged_gradients[i] = self.get_flat_partition(
self.params_in_partition[i],
self.first_offset[i],
self.partition_size[i],
dtype=self.dtype,
device=torch.cuda.current_device(),
return_tensor_list=True)
else:
avg_new = self.get_flat_partition(self.params_in_partition[i],
self.first_offset[i],
self.partition_size[i],
dtype=self.dtype,
device=torch.cuda.current_device(),
return_tensor_list=True)
for accumulated_grad, new_avg_grad in zip(self.averaged_gradients[i], avg_new):
accumulated_grad.add_(new_avg_grad)
self._release_ipg_buffers()
# No need to keep the gradients anymore.
# All gradients required by the step
# are in self.averaged_gradients
self.zero_grad()
if self.verbose:
report_memory_usage(f"End ipg_epilogue")
# resets all partition to no reduced
# sets remaining grads to the total number of grads in each partition
# set is grad computed to false for all grads in partition
def reset_partition_gradient_structures(self):
for i, _ in enumerate(self.fp16_groups):
total_partitions = dist.get_world_size(
group=self.real_dp_process_group[i])
for partition_id in range(total_partitions):
self.is_partition_reduced[i][partition_id] = False
self.remaining_grads_in_partition[i][
partition_id] = self.total_grads_in_partition[i][partition_id]
for param_id in self.is_grad_computed[i][partition_id]:
self.is_grad_computed[i][partition_id][param_id] = False
def initialize_gradient_partition(self, i, param_group, partition_id):
def set_key_value_list(dictionary, key, value):
if key in dictionary:
dictionary[key].append(value)
else:
dictionary[key] = [value]
def increment_value(dictionary, key):
if key in dictionary:
dictionary[key] += 1
else:
dictionary[key] = 1
partition_size = self.partition_size[i]
start_index = partition_size * partition_id
end_index = partition_size * (partition_id + 1)
current_index = 0
first_offset = 0
for param in param_group:
param_size = param.numel()
param_id = self.get_param_id(param)
if (current_index >= start_index and current_index < end_index):
set_key_value_list(self.param_to_partition_ids[i],
param_id,
partition_id)
increment_value(self.total_grads_in_partition[i], partition_id)
self.is_grad_computed[i][partition_id][param_id] = False
self.grad_partition_insertion_offset[i][partition_id][
param_id] = current_index - start_index
self.grad_start_offset[i][partition_id][param_id] = 0
elif start_index > current_index and start_index < (current_index +
param_size):
assert (
first_offset == 0), "This can happen either zero or only once as this must be the first tensor in the partition"
first_offset = start_index - current_index
set_key_value_list(self.param_to_partition_ids[i],
param_id,
partition_id)
increment_value(self.total_grads_in_partition[i], partition_id)
self.is_grad_computed[i][partition_id][param_id] = False
self.grad_partition_insertion_offset[i][partition_id][param_id] = 0
self.grad_start_offset[i][partition_id][param_id] = first_offset
current_index = current_index + param_size
def overlapping_partition_gradients_reduce_epilogue(self):
self.independent_gradient_partition_epilogue()
def create_reduce_and_remove_grad_hooks(self):
self.grad_accs = []
for i, param_group in enumerate(self.fp16_groups):
for param in param_group:
if param.requires_grad:
def wrapper(param, i):
param_tmp = param.expand_as(param)
grad_acc = param_tmp.grad_fn.next_functions[0][0]
def reduce_partition_and_remove_grads(*notneeded):
self.reduce_ready_partitions_and_remove_grads(
param, i)
grad_acc.register_hook(
reduce_partition_and_remove_grads)
self.grad_accs.append(grad_acc)
wrapper(param, i)
def get_param_id(self, param):
unique_id = id(param)
return self.param_id[unique_id]
def report_ipg_memory_usage(self, tag, param_elems):
elem_count = self.elements_in_ipg_bucket + param_elems
percent_of_bucket_size = (
100.0 * elem_count) // self.reduce_bucket_size
if self.verbose:
report_memory_usage(
f"{tag}: elems in_bucket {self.elements_in_ipg_bucket} param {param_elems} max_percent {percent_of_bucket_size}"
)
# create a flat tensor aligned at the alignment boundary
def flatten_dense_tensors_aligned(self, tensor_list, alignment):
num_elements = 0
for tensor in tensor_list:
num_elements = num_elements + tensor.numel()
remaining = num_elements % alignment
if remaining:
elements_to_add = alignment - remaining
pad_tensor = torch.zeros(elements_to_add,
device=tensor_list[0].device,
dtype=tensor_list[0].dtype)
padded_tensor_list = tensor_list + [pad_tensor]
num_elements = num_elements + elements_to_add
else:
padded_tensor_list = tensor_list
return self.flatten(padded_tensor_list)
############### Independent Partition Gradient ########################
def reduce_independent_p_g_buckets_and_remove_grads(self, param, i):
if self.elements_in_ipg_bucket + param.numel() > self.reduce_bucket_size:
self.report_ipg_memory_usage("In ipg_remove_grads before reduce_ipg_grads",
param.numel())
self.reduce_ipg_grads()
if self.contiguous_gradients and self.overlap_comm:
# Swap ipg_index between 0 and 1
self.ipg_index = 1 - self.ipg_index
self.report_ipg_memory_usage("In ipg_remove_grads after reduce_ipg_grads",
param.numel())
param_id = self.get_param_id(param)
assert self.params_already_reduced[param_id] == False, \
f"The parameter {param_id} has already been reduced. \
Gradient computed twice for this partition. \
Multiple gradient reduction is currently not supported"
if param.numel() > self.reduce_bucket_size:
self.extra_large_param_to_reduce = param
elif self.contiguous_gradients:
# keeping the gradients contiguous to prevent memory fragmentation, and avoid flattening
new_grad_tensor = self.ipg_buffer[self.ipg_index].narrow(
0,
self.elements_in_ipg_bucket,
param.numel())
new_grad_tensor.copy_(param.grad.view(-1))
param.grad.data = new_grad_tensor.data.view_as(param.grad)
self.elements_in_ipg_bucket += param.numel()
assert param.grad is not None, f"rank {dist.get_rank()} - Invalid to reduce Param {param_id} with None gradient"
self.grads_in_ipg_bucket.append(param.grad)
self.params_in_ipg_bucket.append((i, param, param_id))
# make sure the average tensor function knows how to average the gradients
if is_moe_param(param):
self.ipg_bucket_has_moe_params = True
self.report_ipg_memory_usage("End ipg_remove_grads", 0)
def print_rank_0(self, message):
if dist.get_rank() == 0 and self.verbose:
print(message)
def gradient_reduction_w_predivide(self, tensor):
dp_world_size = dist.get_world_size(group=self.dp_process_group)
tensor_to_allreduce = tensor
if self.allreduce_always_fp32:
tensor_to_allreduce = tensor.float()
if self.postscale_gradients:
if self.gradient_predivide_factor != 1.0:
tensor_to_allreduce.mul_(1. / self.gradient_predivide_factor)
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
if self.gradient_predivide_factor != dp_world_size:
tensor_to_allreduce.mul_(
self.gradient_predivide_factor / dp_world_size)
else:
tensor_to_allreduce.div_(dp_world_size)
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
if self.allreduce_always_fp32 and tensor is not tensor_to_allreduce:
tensor.copy_(tensor_to_allreduce)
return tensor
def average_tensor(self, tensor):
if self.overlap_comm:
torch.cuda.synchronize()
stream = self.reduction_stream
else:
stream = torch.cuda.current_stream()
with torch.cuda.stream(stream):
if not self.reduce_scatter:
self.gradient_reduction_w_predivide(tensor)
return
# Accumulate destination ranks and bucket offsets for each gradient slice.
# Note: potential future optimization, record access pattern of parameters
# in backward pass and partition gradients w.r.t. access pattern so that our
# bucket is guaranteed to be contiguous w.r.t. ranks
rank_and_offsets = []
real_dp_process_group = []
curr_size = 0
prev_id = -1
process_group = self.dp_process_group
# count = 0
for i, param, param_id in self.params_in_ipg_bucket:
process_group = self.dp_process_group
# Averages gradients at parameter level if ipg has a moe param
# Otherwise averaging is done at the entire buffer level at the end of the loop
if self.ipg_bucket_has_moe_params:
process_group = self.expert_dp_process_group if is_moe_param(
param) else self.dp_process_group
param.grad.data.div_(
dist.get_world_size(group=process_group))
partition_ids = self.param_to_partition_ids[i][param_id]
partition_size = self.partition_size[i]
# Get all partition ids + their offsets
partition_ids_w_offsets = []
for partition_id in partition_ids:
offset = self.grad_start_offset[i][partition_id][param_id]
partition_ids_w_offsets.append((partition_id, offset))
partition_ids_w_offsets.sort(key=lambda t: t[1])
# Calculate rank and offsets for grad slices
for idx in range(len(partition_ids_w_offsets)):
partition_id, offset = partition_ids_w_offsets[idx]
# if dist.get_rank() == 0 and count < 100:
# print(f"Rank {dist.get_rank()} rank offet id {idx} calculated dp size {dist.get_world_size(group=process_group)} real dp size {dist.get_world_size(self.real_dp_process_group[i])} and dst: {partition_id}")
# count += 1
# Calculate numel for grad slice depending on partition location
if idx == len(partition_ids_w_offsets) - 1:
# Last partition_id uses its own offset
numel = param.numel() - offset
else:
# Set numel to next partition's offset
numel = partition_ids_w_offsets[idx + 1][1] - offset
# Merge bucket ranges if they belong to the same rank
if partition_id == prev_id:
prev_pid, prev_size, prev_numel = rank_and_offsets[-1]
rank_and_offsets[-1] = (prev_pid,
prev_size, prev_numel + numel)
else:
rank_and_offsets.append(
(partition_id, curr_size, numel))
real_dp_process_group.append(process_group)
curr_size += numel
prev_id = partition_id
if not self.ipg_bucket_has_moe_params:
tensor.div_(dist.get_world_size(group=self.dp_process_group))
async_handles = []
for i, (dst, bucket_offset, numel) in enumerate(rank_and_offsets):
grad_slice = tensor.narrow(0, int(bucket_offset), int(numel))
# if dist.get_rank() == 0:
# print(f"Rank {dist.get_rank()} rank offet id {i} real dp size {dist.get_world_size(group=real_dp_process_group[i])} and dst: {dst}")
# dist.barrier()
# dist.barrier()
dst_rank = _get_global_rank(real_dp_process_group[i], dst)
async_handle = dist.reduce(grad_slice,
dst=dst_rank,
group=real_dp_process_group[i],
async_op=True)
async_handles.append(async_handle)
for handle in async_handles:
handle.wait()
##############################################################################
############################# CPU Offload Methods#############################
##############################################################################
def get_grad_position(self, group_id, tensor_list, first_offset, partition_size):
current_offset = 0
for i, tensor in enumerate(tensor_list):
param_id = self.get_param_id(tensor)
param_start_offset = 0
num_elements = tensor.numel()
tensor_offset = 0
# we need to offset to get to the right element
if i == 0 and first_offset > 0:
tensor_offset = first_offset
num_elements = num_elements - tensor_offset
param_start_offset = first_offset
# we dont need all elements of the tensor
if num_elements > (partition_size - current_offset):
num_elements = partition_size - current_offset
self.grad_position[param_id] = [
int(group_id),
int(param_start_offset),
int(current_offset),
int(num_elements)
]
current_offset += num_elements
def update_overflow_tracker_for_param_grad(self, param):
if param.grad is not None and self._has_inf_or_nan(param.grad.data):
self.local_overflow = True
def async_accumulate_grad_in_cpu_via_gpu(self, param):
param_id = self.get_param_id(param)
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
# copy to a preexisiting buffer to avoid memory allocation penalty
dest_buffer = self.temp_grad_buffer_for_gpu_offload.view(-1).narrow(
0,
0,
param.numel())
# buffer for storing gradients for this parameter in CPU
def buffer_to_accumulate_to_in_cpu():
if not self.fp16_master_weights_and_gradients:
return torch.zeros(param.numel(),
dtype=param.dtype,
device=self.device).pin_memory()
else:
return self.single_partition_of_fp32_groups[i].grad.view(-1).narrow(
0,
dest_offset,
num_elements)
# accumulate gradients into param.grad or parts of it that belongs to this parittion
def accumulate_gradients():
if not self.fp16_master_weights_and_gradients:
dest_buffer.copy_(self.accumulated_grads_in_cpu[param_id].view(-1),
non_blocking=True)
param.grad.data.view(-1).add_(dest_buffer)
else:
dest_buffer.narrow(0,
source_offset,
num_elements).copy_(
self.accumulated_grads_in_cpu[param_id].view(-1),
non_blocking=True)
param.grad.data.view(-1).narrow(
0,
source_offset,
num_elements).add_(dest_buffer.narrow(0,
source_offset,
num_elements))
# move accumulated gradients back to CPU
def copy_gradients_to_cpu():
if not self.fp16_master_weights_and_gradients:
self.accumulated_grads_in_cpu[param_id].data.copy_(
param.grad.data.view(-1),
non_blocking=True)
else:
self.accumulated_grads_in_cpu[param_id].data.copy_(
param.grad.data.view(-1).narrow(0,
source_offset,
num_elements),
non_blocking=True)
if param_id not in self.accumulated_grads_in_cpu:
self.accumulated_grads_in_cpu[param_id] = buffer_to_accumulate_to_in_cpu(
)
if self.micro_step_id > 0:
accumulate_gradients()
# at the boundary we will send 32bit directly
if not self.is_gradient_accumulation_boundary:
copy_gradients_to_cpu()
def set_norm_for_param_grad(self, param):
param_id = self.get_param_id(param)
accumulated_grad = self.accumulated_grads_in_cpu[
param_id] if self.gradient_accumulation_steps > 1 else param.grad
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
start = source_offset
accumulated_grad = accumulated_grad.view(
-1).narrow(0, start, num_elements)
self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm(
2)
def set_norm_for_param_grad_in_gpu(self, param):
param_id = self.get_param_id(param)
accumulated_grad = param.grad
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
start = source_offset
accumulated_grad = accumulated_grad.view(
-1).narrow(0, start, num_elements)
self.norm_for_param_grads[param_id] = accumulated_grad.data.double().norm(
2)
def async_inplace_copy_grad_to_fp32_buffer_from_gpu(self, param):
param_id = self.get_param_id(param)
[i, source_offset, dest_offset, num_elements] = self.grad_position[param_id]
dest_tensor = self.single_partition_of_fp32_groups[i].grad.view(-1).narrow(
0,
dest_offset,
num_elements)
src_tensor = param.grad.view(-1).narrow(0, source_offset, num_elements)
if not self.fp16_master_weights_and_gradients:
src_tensor = src_tensor.float()
dest_tensor.copy_(src_tensor, non_blocking=True)
param.grad = None # offload only
def complete_grad_norm_calculation_for_cpu_offload(self, params):
total_norm = 0.0
norm_type = 2.0
for p in params:
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
param_id = self.get_param_id(p)
# as some model have trainable parameters but skipped in training,
# their backward hooks in self.create_reduce_and_remove_grad_hooks() will not run,
# so they have no norm_for_param_grads
if param_id in self.norm_for_param_grads:
param_norm = self.norm_for_param_grads[param_id]
total_norm += param_norm.item() ** 2
else:
# As unused parameters in modules may not be expected sometimes,
# add an explicit error msg when it occurred and an option to
# avoid the error
assert self.ignore_unused_parameters, """
This assert indicates that your module has parameters that
were not used in producing loss.
You can avoid this assert by
(1) enable ignore_unused_parameters option in zero_optimization config;
(2) making sure all trainable parameters and `forward` function
outputs participate in calculating loss.
"""
# Sum across all model parallel GPUs.
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
torch.distributed.all_reduce(total_norm_cuda,
op=torch.distributed.ReduceOp.SUM,
group=self.dp_process_group)
self._model_parallel_all_reduce(tensor=total_norm_cuda,
op=torch.distributed.ReduceOp.SUM)
total_norm = total_norm_cuda[0].item() ** (1. / norm_type)
if total_norm == float(
'inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
############################################################################################
def copy_grads_in_partition(self, param):
if self.cpu_offload:
if self.gradient_accumulation_steps > 1:
self.async_accumulate_grad_in_cpu_via_gpu(param)
if self.is_gradient_accumulation_boundary:
self.set_norm_for_param_grad_in_gpu(param)
self.update_overflow_tracker_for_param_grad(param)
self.async_inplace_copy_grad_to_fp32_buffer_from_gpu(param)
return
# print(f"ID {self.get_param_id(param)} grad norm {param.grad.norm()}")
if self.grads_in_partition is None:
self.grads_in_partition_offset = 0
total_size = 0
for group in self.params_in_partition:
for param_in_partition in group:
total_size += param_in_partition.numel()
if self.verbose:
report_memory_usage(
f"before copying {total_size} gradients into partition")
self.grads_in_partition = torch.empty(int(total_size),
dtype=self.dtype,
device=torch.cuda.current_device())
if self.verbose:
report_memory_usage(
f"after copying {total_size} gradients into partition")
# The allreduce buffer will be rewritted. Copy the gradients in partition to a new buffer
new_grad_tensor = self.grads_in_partition.view(-1).narrow(
0,
self.grads_in_partition_offset,
param.numel())
new_grad_tensor.copy_(param.grad.view(-1))
param.grad.data = new_grad_tensor.data.view_as(param.grad)
# print(f"Grad norm after copy to contiguous_buffer {param.grad.data.norm()}")
self.grads_in_partition_offset += param.numel()
def reduce_ipg_grads(self):
if self.contiguous_gradients:
if self.extra_large_param_to_reduce is not None:
assert len(
self.params_in_ipg_bucket) == 1, "more than 1 param in ipg bucket, this shouldn't happen"
_, _, param_id = self.params_in_ipg_bucket[0]
assert self.get_param_id(
self.extra_large_param_to_reduce) == param_id, "param in ipg bucket does not match extra-large param"
self.average_tensor(
self.extra_large_param_to_reduce.grad.view(-1))
self.extra_large_param_to_reduce = None
else:
self.average_tensor(self.ipg_buffer[self.ipg_index])
else:
self.buffered_reduce_fallback(
None,
self.grads_in_ipg_bucket,
elements_per_buffer=self.elements_in_ipg_bucket)
if self.overlap_comm:
stream = self.reduction_stream
elif self.cpu_offload:
# TODO: copy_grad_stream is disabled because of race with reduce. This hurts perf and should be fixed.
# torch.cuda.synchronize()
# stream = self.copy_grad_stream
stream = torch.cuda.current_stream()
else:
stream = torch.cuda.current_stream()
with torch.cuda.stream(stream):
for _, param, param_id in self.params_in_ipg_bucket:
assert self.params_already_reduced[param_id] == False, \
f"The parameter {param_id} has already been reduced. \
Gradient computed twice for this partition. \
Multiple gradient reduction is currently not supported"
self.params_already_reduced[param_id] = True
if self.partition_gradients:
if not self.is_param_in_current_partition[param_id]:
if self.overlap_comm and self.contiguous_gradients is False:
# Clear grads of other partitions during the next reduction
# to avoid clearing them before the reduction is complete.
if self.previous_reduced_grads is None:
self.previous_reduced_grads = []
self.previous_reduced_grads.append(param)
else:
param.grad = None # only if self.partition_gradients
elif self.contiguous_gradients:
self.copy_grads_in_partition(param)
self.grads_in_ipg_bucket = []
self.params_in_ipg_bucket = []
self.ipg_bucket_has_moe_params = False
self.elements_in_ipg_bucket = 0
#####################################################################
def reduce_ready_partitions_and_remove_grads(self, param, i):
if self.partition_gradients or self.is_gradient_accumulation_boundary:
self.reduce_independent_p_g_buckets_and_remove_grads(param, i)
def zero_reduced_gradients(self, partition_id, i):
def are_all_related_partitions_reduced(params_id):
for partition_id in self.param_to_partition_ids[i][params_id]:
if not self.is_partition_reduced[i][partition_id]:
return False
return True
for params_id in self.is_grad_computed[i][partition_id]:
if are_all_related_partitions_reduced(params_id):
self.param_dict[params_id].grad = None # dead code
def flatten_and_print(self, message, tensors, start=0, n=5):
flatten_tensor = self.flatten(tensors)
def print_func():
print(flatten_tensor.contiguous().view(-1).narrow(0, start, n))
self.sequential_execution(print_func, message)
def get_grads_to_reduce(self, i, partition_id):
def get_reducable_portion(key):
grad = self.param_dict[key].grad
total_elements = grad.numel()
start = self.grad_start_offset[i][partition_id][key]
num_elements = min(
total_elements - start,
self.partition_size[i] -
self.grad_partition_insertion_offset[i][partition_id][key])
if not pg_correctness_test:
if num_elements == total_elements:
return grad
else:
return grad.contiguous().view(-1).narrow(0,
int(start),
int(num_elements))
else:
if num_elements == total_elements:
return grad.clone()
else:
return grad.clone().contiguous().view(-1).narrow(
0,
int(start),
int(num_elements))
grads_to_reduce = []
for key in self.is_grad_computed[i][partition_id]:
grad = get_reducable_portion(key)
grads_to_reduce.append(grad)
return grads_to_reduce
def sequential_execution(self, function, message, group=None):
if group is None:
group = self.dp_process_group
if dist.get_rank(group=group) == 0:
print(message)
for id in range(dist.get_world_size(group=group)):
if id == dist.get_rank(group=group):
function()
dist.barrier(group=group)
def set_none_gradients_to_zero(self, i, partition_id):
for param_id in self.is_grad_computed[i][partition_id]:
param = self.param_dict[param_id]
if param.grad is None:
param.grad = torch.zero_like(param)
######################Reduction Related Methods##############################
def allreduce_bucket(self, bucket, allreduce_always_fp32=False, rank=None, log=None):
rank = None
tensor = self.flatten(bucket)
tensor_to_allreduce = tensor
if pg_correctness_test:
allreduce_always_fp32 = True
if allreduce_always_fp32:
tensor_to_allreduce = tensor.float()
tensor_to_allreduce.div_(
dist.get_world_size(group=self.dp_process_group))
if rank is None:
# "All Reducing"
dist.all_reduce(tensor_to_allreduce, group=self.dp_process_group)
else:
global_rank = _get_global_rank(self.dp_process_group, rank)
dist.reduce(tensor_to_allreduce, global_rank,
group=self.dp_process_group)
if allreduce_always_fp32 and tensor is not tensor_to_allreduce:
if rank is None or rank == dist.get_rank(group=self.dp_process_group):
tensor.copy_(tensor_to_allreduce)
return tensor
def _clear_previous_reduced_grads(self):
if self.previous_reduced_grads is not None:
for param in self.previous_reduced_grads:
param.grad = None # overlap enabled
self.previous_reduced_grads = None
# if rank is specified do a reduction instead of an allreduce
def allreduce_and_copy(self, small_bucket, rank=None, log=None):
if self.overlap_comm:
torch.cuda.synchronize()
# It is safe to clear the previously reduced grads of other partitions
self._clear_previous_reduced_grads()
stream = self.reduction_stream
else:
stream = torch.cuda.current_stream()
with torch.cuda.stream(stream):
allreduced = self.allreduce_bucket(
small_bucket, rank=rank, log=log)
if rank is None or rank == dist.get_rank(group=self.dp_process_group):
for buf, synced in zip(small_bucket, self.unflatten(allreduced, small_bucket)):
buf.copy_(synced)
def allreduce_no_retain(self,
bucket,
numel_per_bucket=500000000,
rank=None,
log=None):
small_bucket = []
numel = 0
for tensor in bucket:
small_bucket.append(tensor)
numel = numel + tensor.numel()
if numel > numel_per_bucket:
self.allreduce_and_copy(small_bucket, rank=rank, log=None)
small_bucket = []
if len(small_bucket) > 0:
self.allreduce_and_copy(small_bucket, rank=rank, log=log)
# allows using reduction of gradients instead of using all_reduce
def buffered_reduce_fallback(self,
rank,
grads,
elements_per_buffer=500000000,
log=None):
split_buckets = split_half_float_double(grads)
for i, bucket in enumerate(split_buckets):
self.allreduce_no_retain(bucket,
numel_per_bucket=elements_per_buffer,
rank=rank,
log=log)
#############################################################################
#############################################################################
#############################################################################
# views the tensor as multiple partitions and returns
# those partitions
def get_data_parallel_partitions(self, tensor, group_id):
partitions = []
dp = dist.get_world_size(group=self.real_dp_process_group[group_id])
dp_id = dist.get_rank(group=self.real_dp_process_group[group_id])
total_num_elements = tensor.numel()
base_size = total_num_elements // dp
remaining = total_num_elements % dp
start = 0
for id in range(dp):
partition_size = base_size
if id < remaining:
partition_size = partition_size + 1
partitions.append(tensor.narrow(0, start, partition_size))
start = start + partition_size
return partitions
def get_partition_info(self, tensor_list, partition_size, partition_id):
params_in_partition = []
params_not_in_partition = []
start_index = partition_size * partition_id
end_index = partition_size * (partition_id + 1)
current_index = 0
first_offset = 0
for tensor in tensor_list:
tensor_size = tensor.numel()
if (current_index >= start_index and current_index < end_index):
params_in_partition.append(tensor)
elif start_index > current_index and start_index < (current_index +
tensor_size):
params_in_partition.append(tensor)
assert (
first_offset == 0), "This can happen either zero or only once as this must be the first tensor in the partition"
first_offset = start_index - current_index
else:
params_not_in_partition.append(tensor)
current_index = current_index + tensor_size
return params_in_partition, params_not_in_partition, first_offset
def zero_grad(self, set_grads_to_None=True):
"""
Zero FP16 parameter grads.
"""
# FP32 grad should never exist.
# For speed, set model fp16 grad to None by default
for group in self.fp16_groups:
for p in group:
if set_grads_to_None:
p.grad = None # epilogue and in step
else:
if p.grad is not None:
p.grad.detach_()
p.grad.zero_()
def _model_parallel_all_reduce(self, tensor, op):
""" Perform all reduce within model parallel group, if any.
"""
if self.model_parallel_group is None:
pass
else:
torch.distributed.all_reduce(tensor=tensor,
op=op,
group=self.model_parallel_group)
def get_grad_norm_direct(self, gradients, params, norm_type=2):
"""Clips gradient norm of an iterable of parameters.
This is adapted from ``torch.nn.utils.clip_grad.clip_grad_norm_`` and
added functionality to handle model parallel parameters. Note that
the gradients are modified in place.
Arguments:
parameters (Iterable[Tensor] or Tensor): an iterable of Tensors or a
single Tensor that will have gradients normalized
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Total norm of the parameters (viewed as a single vector).
"""
norm_type = float(norm_type)
if norm_type == inf:
total_norm = max(g.data.abs().max() for g in gradients)
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
torch.distributed.all_reduce(total_norm_cuda,
op=torch.distributed.ReduceOp.MAX,
group=self.dp_process_group)
# Take max across all GPUs.
self._model_parallel_all_reduce(tensor=total_norm_cuda,
op=torch.distributed.ReduceOp.MAX)
total_norm = total_norm_cuda[0].item()
else:
total_norm = 0.0
# if dist.get_rank() == 0:
# print()(f"Total Norm begining {total_norm}")
for g, p in zip(gradients, params):
if is_model_parallel_parameter(p) or (self.model_parallel_rank == 0):
param_norm = g.data.double().norm(2)
total_norm += param_norm.item() ** 2
# Sum across all model parallel GPUs.
total_norm_cuda = torch.cuda.FloatTensor([float(total_norm)])
torch.distributed.all_reduce(total_norm_cuda,
op=torch.distributed.ReduceOp.SUM,
group=self.dp_process_group)
self._model_parallel_all_reduce(tensor=total_norm_cuda,
op=torch.distributed.ReduceOp.SUM)
total_norm = total_norm_cuda[0].item() ** (1. / norm_type)
if total_norm == float(
'inf') or total_norm == -float('inf') or total_norm != total_norm:
total_norm = -1
return total_norm
# creates a flat fused tensor from the tensor list starting at the first_offset
# in the first tensor of the list. If there are not enough elements in the tensor
# list then the flat tensor will be padded with zeros
def get_flat_partition(self,
tensor_list,
first_offset,
partition_size,
dtype,
device,
return_tensor_list=False):
flat_tensor_list = []
current_size = 0
for i, tensor in enumerate(tensor_list):
if tensor.grad is None:
tensor.grad = torch.zeros_like(tensor)
tensor = tensor.grad
num_elements = tensor.numel()
tensor_offset = 0
# we need to offset to get to the right element
if i == 0 and first_offset > 0:
tensor_offset = first_offset
num_elements = num_elements - tensor_offset
# we dont need all elements of the tensor
if num_elements > (partition_size - current_size):
num_elements = partition_size - current_size
# we need a narrow view of the tensor based on the tensor offset and number of elements that
# we need from this tensor
if tensor_offset > 0 or num_elements < tensor.numel():
flat_tensor_list.append(tensor.contiguous().view(-1).narrow(
0,
int(tensor_offset),
int(num_elements)))
else:
flat_tensor_list.append(tensor)
current_size = current_size + num_elements
# this means its the last partition and does not align with the dp boundary. We need to pad before flattening
if current_size < partition_size:
flat_tensor_list.append(
torch.zeros(int(partition_size - current_size),
dtype=dtype,
device=device))
if return_tensor_list:
return flat_tensor_list
return self.flatten(flat_tensor_list)
def free_grad_in_param_list(self, param_list):
for p in param_list:
p.grad = None # in step
def reset_cpu_buffers(self):
self.norm_for_param_grads = {}
self.local_overflow = False
def log_timers(self, timer_names):
if self.timers is None:
return
self.timers.log(names=list(timer_names))
def start_timers(self, timer_names):
if self.timers is None:
return
for name in timer_names:
self.timers(name).start()
def stop_timers(self, timer_names):
if self.timers is None:
return
for name in timer_names:
self.timers(name).stop()
def step(self, closure=None):
"""
Not supporting closure.
"""
self.micro_step_id = -1
if self.verbose:
report_memory_usage(f"In step before checking overflow")
# First compute norm for all group so we know if there is overflow
self.check_overflow(self.partition_gradients)
OPTIMIZER_ALLGATHER = 'optimizer_allgather'
OPTIMIZER_GRADIENTS = 'optimizer_gradients'
OPTIMIZER_STEP = 'optimizer_step'
timer_names = [OPTIMIZER_ALLGATHER,
OPTIMIZER_GRADIENTS, OPTIMIZER_STEP]
prev_scale = self.loss_scale
self._update_scale(self.overflow)
if self.overflow:
if self.verbose:
report_memory_usage('After overflow before clearing gradients')
self.zero_grad()
if self.cpu_offload:
self.reset_cpu_buffers()
else:
self.averaged_gradients = {}
if self.verbose:
report_memory_usage('After overflow after clearing gradients')
print(
"[deepspeed] fp16 dynamic loss scale overflow! Rank {} Skipping step. Attempted loss scale: {}, "
"reducing to {}".format(dist.get_rank(),
prev_scale,
self.loss_scale))
self.start_timers(timer_names)
self.stop_timers(timer_names)
return
self.start_timers([OPTIMIZER_GRADIENTS])
norm_groups = []
single_partition_grad_groups = []
skip = False
for i, group in enumerate(self.fp16_groups):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
if self.cpu_offload:
norm_groups.append(
self.complete_grad_norm_calculation_for_cpu_offload(
self.params_in_partition[i]))
single_grad_partition = self.single_partition_of_fp32_groups[i].grad
else:
norm_groups.append(
self.get_grad_norm_direct(self.averaged_gradients[i],
self.params_in_partition[i]))
# free gradients for all the prameters that are not updated by this process
self.free_grad_in_param_list(self.params_not_in_partition[i])
# create a flat gradients for parameters updated by this process
# If we are last partition, ensure we have same size grads and partition size, if not pad with zero tensors
if partition_id == dist.get_world_size(
group=self.real_dp_process_group[i]) - 1:
single_grad_partition = self.flatten_dense_tensors_aligned(
self.averaged_gradients[i],
int(self.partition_size[i])).to(
self.single_partition_of_fp32_groups[i].dtype)
else:
single_grad_partition = self.flatten(self.averaged_gradients[i]).to(
self.single_partition_of_fp32_groups[i].dtype)
assert single_grad_partition.numel() == self.partition_size[i], \
"averaged gradients have different number of elements that partition size {} {} {} {}".format(
single_grad_partition.numel(), self.partition_size[i], i, partition_id)
self.single_partition_of_fp32_groups[i].grad = single_grad_partition
# release all the gradient since we have already created a necessary copy in dp_grad_partition
self.free_grad_in_param_list(self.params_in_partition[i])
self.averaged_gradients[i] = None
single_partition_grad_groups.append(single_grad_partition)
if self.has_moe_layers:
self._average_expert_grad_norms(norm_groups)
self.unscale_and_clip_grads(single_partition_grad_groups, norm_groups)
self.stop_timers([OPTIMIZER_GRADIENTS])
self.start_timers([OPTIMIZER_STEP])
if self.deepspeed_adam_offload:
from deepspeed.ops.adam import DeepSpeedCPUAdam
if type(self.optimizer) == DeepSpeedCPUAdam and self.dtype == torch.half:
fp16_param_groups = [
fp16_partitions[partition_id]
for fp16_partitions in self.parallel_partitioned_fp16_groups
]
self.optimizer.step(fp16_param_groups=fp16_param_groups)
else:
self.optimizer.step()
for fp16_partitions, fp32_partition in zip(self.parallel_partitioned_fp16_groups,
self.single_partition_of_fp32_groups):
fp16_partitions[partition_id].data.copy_(
fp32_partition.data)
else:
self.optimizer.step()
# get rid of the fp32 gradients. Not needed anymore
if not self.cpu_offload:
for group in self.single_partition_of_fp32_groups:
group.grad = None # in step
for fp16_partitions, fp32_partition in zip(self.parallel_partitioned_fp16_groups,
self.single_partition_of_fp32_groups):
fp16_partitions[partition_id].data.copy_(fp32_partition.data)
self.stop_timers([OPTIMIZER_STEP])
if self.cpu_offload:
self.reset_cpu_buffers()
self.start_timers([OPTIMIZER_ALLGATHER])
# gather the updated weights from everyone
for group_id, partitioned_params in enumerate(self.parallel_partitioned_fp16_groups):
# Sequential AllGather Best of both worlds
dp_world_size = dist.get_world_size(
group=self.real_dp_process_group[group_id])
num_shards = max(
1,
partitioned_params[partition_id].numel() * dp_world_size //
self.allgather_bucket_size)
shard_size = partitioned_params[partition_id].numel() // num_shards
num_elements = shard_size
assert shard_size * \
num_shards <= partitioned_params[partition_id].numel()
for shard_id in range(num_shards):
if shard_id == (num_shards - 1):
num_elements = partitioned_params[partition_id].numel(
) - shard_id * shard_size
shard_list = []
for dp_id in range(dp_world_size):
curr_shard = partitioned_params[dp_id].narrow(
0,
shard_id * shard_size,
num_elements).detach()
shard_list.append(curr_shard)
dist.all_gather(shard_list,
shard_list[partition_id],
group=self.real_dp_process_group[group_id])
self.stop_timers([OPTIMIZER_ALLGATHER])
# TODO: we probably don't need this? just to be safe
for i in range(len(norm_groups)):
self._update_model_fp16_weights(i)
self.log_timers(timer_names)
if self.verbose:
report_memory_usage('After zero_optimizer step')
return
def _average_expert_grad_norms(self, norm_groups):
for i, norm in enumerate(norm_groups):
if self.is_moe_param_group[i]:
scaled_norm = norm * 1.0 / float(
dist.get_world_size(group=self.ep_process_group))
scaled_norm_tensor = torch.tensor(scaled_norm,
device='cuda',
dtype=torch.float)
dist.all_reduce(scaled_norm_tensor,
group=self.ep_process_group)
norm_groups[i] = scaled_norm_tensor.item()
def unscale_and_clip_grads(self, grad_groups_flat, norm_groups):
total_norm = 0.0
for norm in norm_groups:
total_norm += norm ** 2.0
total_norm = math.sqrt(total_norm)
# compute combined scale factor for this group
combined_scale = self.loss_scale
if self.clip_grad > 0.:
# norm is in fact norm*scale
clip = ((total_norm / self.loss_scale) + 1e-6) / self.clip_grad
if clip > 1:
combined_scale = clip * self.loss_scale
for grad in grad_groups_flat:
if isinstance(grad, list):
sub_partitions = grad
for g in sub_partitions:
g.data.mul_(1. / combined_scale)
else:
grad.data.mul_(1. / combined_scale)
def _check_overflow(self, partition_gradients=True):
self.overflow = self.has_overflow(partition_gradients)
# `params` is a list / generator of torch.Variable
def has_overflow_serial(self, params, is_grad_list=False):
for p in params:
if p.grad is not None and self._has_inf_or_nan(p.grad.data):
return True
return False
def has_overflow_partitioned_grads_serial(self):
for i in range(len(self.fp16_groups)):
for j, grad in enumerate(self.averaged_gradients[i]):
if grad is not None and self._has_inf_or_nan(grad.data, j):
return True
return False
def has_overflow(self, partition_gradients=True):
if partition_gradients:
overflow = self.local_overflow if self.cpu_offload else self.has_overflow_partitioned_grads_serial(
)
overflow_gpu = torch.cuda.ByteTensor([overflow])
'''This will capture overflow across all data parallel and expert parallel process
Since expert parallel process are a subset of data parallel process'''
torch.distributed.all_reduce(overflow_gpu,
op=torch.distributed.ReduceOp.MAX,
group=self.dp_process_group)
else:
params = []
for group in self.fp16_groups:
for param in group:
params.append(param)
overflow = self.has_overflow_serial(
params, is_grad_list=partition_gradients)
overflow_gpu = torch.cuda.ByteTensor([overflow])
# Since each model parallel GPU carries only part of the model,
# make sure overflow flag is synced across all the model parallel GPUs
self._model_parallel_all_reduce(tensor=overflow_gpu,
op=torch.distributed.ReduceOp.MAX)
overflow = overflow_gpu[0].item()
return bool(overflow)
# `x` is a torch.Tensor
@staticmethod
def _has_inf_or_nan(x, j=None):
try:
# if x is half, the .float() incurs an additional deep copy, but it's necessary if
# Pytorch's .sum() creates a one-element tensor of the same type as x
# (which is true for some recent version of pytorch).
cpu_sum = float(x.float().sum())
# More efficient version that can be used if .sum() returns a Python scalar
# cpu_sum = float(x.sum())
except RuntimeError as instance:
# We want to check if inst is actually an overflow exception.
# RuntimeError could come from a different error.
# If so, we still want the exception to propagate.
if "value cannot be converted" not in instance.args[0]:
raise
return True
else:
if cpu_sum == float('inf') or cpu_sum == -float('inf') or cpu_sum != cpu_sum:
return True
return False
def backward(self, loss, retain_graph=False):
"""
:attr:`backward` performs the following steps:
1. fp32_loss = loss.float()
2. scaled_loss = fp32_loss*loss_scale
3. scaled_loss.backward(), which accumulates scaled gradients into the ``.grad`` attributes of the model's fp16 leaves
"""
self.micro_step_id += 1
if self.contiguous_gradients:
self.ipg_buffer = []
buf_0 = torch.empty(int(self.reduce_bucket_size),
dtype=self.dtype,
device=torch.cuda.current_device())
self.ipg_buffer.append(buf_0)
# Use double buffers to avoid data access conflict when overlap_comm is enabled.
if self.overlap_comm:
buf_1 = torch.empty(int(self.reduce_bucket_size),
dtype=self.dtype,
device=torch.cuda.current_device())
self.ipg_buffer.append(buf_1)
self.ipg_index = 0
self.loss_scaler.backward(loss.float(), retain_graph=retain_graph)
def check_overflow(self, partition_gradients=True):
self._check_overflow(partition_gradients)
def _update_scale(self, has_overflow=False):
self.loss_scaler.update_scale(has_overflow)
# Promote state so it can be retrieved or set via "fp16_optimizer_instance.state"
def _get_state(self):
return self.optimizer.state
def _set_state(self, value):
self.optimizer.state = value
state = property(_get_state, _set_state)
# Promote param_groups so it can be retrieved or set via "fp16_optimizer_instance.param_groups"
# (for example, to adjust the learning rate)
def _get_param_groups(self):
return self.optimizer.param_groups
def _set_param_groups(self, value):
self.optimizer.param_groups = value
param_groups = property(_get_param_groups, _set_param_groups)
# Promote loss scale so it can be retrieved or set via "fp16_optimizer_instance.loss_scale"
def _get_loss_scale(self):
return self.loss_scaler.loss_scale
def _set_loss_scale(self, value):
self.loss_scaler.cur_scale = value
loss_scale = property(_get_loss_scale, _set_loss_scale)
cur_scale = property(_get_loss_scale, _set_loss_scale)
# Return group tensor after removing paddings that are added for alignment to DP world size.
# This method works on the assumption that each group contains a single flattened tensor.
def _get_groups_without_padding(self, groups_with_padding):
groups_without_padding = []
for i, group in enumerate(groups_with_padding):
lean_length = group.numel() - self.groups_padding[i]
groups_without_padding.append(group[:lean_length])
return groups_without_padding
# Return optimizer state after removing paddings that are added for alignment.
def _get_state_without_padding(self, state_with_padding, padding):
lean_state = {}
for key, value in state_with_padding.items():
if torch.is_tensor(value):
lean_length = value.numel() - padding
lean_state[key] = value[:lean_length]
else:
lean_state[key] = value
return lean_state
# Return base optimizer states.
# This method assumes that each param group contains a single flattened tensor.
def _get_base_optimizer_state(self):
optimizer_groups_state = []
for i, group in enumerate(self.optimizer.param_groups):
p = group['params'][0]
lean_optimizer_state = self._get_state_without_padding(
self.optimizer.state[p],
self.groups_padding[i])
optimizer_groups_state.append(lean_optimizer_state)
return optimizer_groups_state
def state_dict(self):
"""
Returns a dict containing the current state of this :class:`FP16_Optimizer` instance.
This dict contains attributes of :class:`FP16_Optimizer`, as well as the state_dict
of the contained Pytorch optimizer.
Example::
checkpoint = {}
checkpoint['model'] = model.state_dict()
checkpoint['optimizer'] = optimizer.state_dict()
torch.save(checkpoint, "saved.pth")
"""
state_dict = {}
state_dict['loss_scaler'] = self.loss_scaler
state_dict['dynamic_loss_scale'] = self.dynamic_loss_scale
state_dict['overflow'] = self.overflow
state_dict['base_optimizer_state'] = self._get_base_optimizer_state()
state_dict['zero_stage'] = ZERO_OPTIMIZATION_GRADIENTS
state_dict['partition_count'] = self.partition_count
state_dict['ds_version'] = version
# Remove paddings for DP alignment to enable loading for other alignment values
fp32_groups_without_padding = self._get_groups_without_padding(
self.single_partition_of_fp32_groups)
state_dict['single_partition_of_fp32_groups'] = fp32_groups_without_padding
# if self.cpu_offload:
# state_dict_tmp = async_copy_to(state_dict,
# 'cpu',
# torch.cuda.current_stream())
# state_dict = state_dict_tmp
return state_dict
# Restore base optimizer fp32 weights from checkpoint by:
# 1) Merging fp32 weights from checkpoints of all partitions
# 2) Extracting fp32 weights for current partition from merged weights
# 3) Using extracted weights to update base optimizer weights directly.
def _restore_from_fp32_weights(self, all_state_dict):
merged_single_partition_of_fp32_groups = []
for i in range(len(self.single_partition_of_fp32_groups)):
partition_id = dist.get_rank(group=self.real_dp_process_group[i])
merged_partitions = [
sd['single_partition_of_fp32_groups'][i] for sd in all_state_dict
]
flat_merged_partitions = self.flatten_dense_tensors_aligned(
merged_partitions,
self.nccl_start_alignment_factor *
dist.get_world_size(group=self.real_dp_process_group[i]))
dp_partitions = self.get_data_parallel_partitions(
flat_merged_partitions, i)
merged_single_partition_of_fp32_groups.append(
dp_partitions[partition_id])
for current, saved in zip(self.single_partition_of_fp32_groups, merged_single_partition_of_fp32_groups):
current.data.copy_(saved.data)
# Restore base optimizer fp32 weights from ZeRO fp16 weights
def _restore_from_fp16_weights(self):
for group_id, fp16_partitions, fp32_partition in enumerate(
zip(self.parallel_partitioned_fp16_groups, self.single_partition_of_fp32_groups)):
partition_id = dist.get_rank(
group=self.real_dp_process_group[group_id])
fp32_partition.data.copy_(fp16_partitions[partition_id].data)
# Refresh the fp32 master params from the fp16 copies.
def refresh_fp32_params(self):
self._restore_from_fp16_weights()
# Extract optimizer state for current partition from merged states of all partitions
def _partition_base_optimizer_state(self, state_key, all_partition_states, group_id):
partition_id = dist.get_rank(
group=self.real_dp_process_group[group_id])
alignment = dist.get_world_size(
group=self.real_dp_process_group[group_id])
if torch.is_tensor(all_partition_states[0]):
flat_merged_partitions = self.flatten_dense_tensors_aligned(
all_partition_states,
alignment)
dp_partitions = self.get_data_parallel_partitions(flat_merged_partitions,
group_id)
return dp_partitions[partition_id]
else:
# Assume non-tensor states are not partitioned and equal across ranks, so return first one
return all_partition_states[0]
# Restore base optimizer state from checkpoint by
# 1) Merging optimizer state from checkpoints of all partitions
# 2) Extracting optimizer state for current partition from the merged state
# 3) Using the extracted value to directly update the base optimizer.
def _restore_base_optimizer_state(self, all_state_dict):
base_optimizer_group_states = []
for i in range(len(self.optimizer.param_groups)):
partition_states = {}
all_partition_group_states = [
sd['base_optimizer_state'][i] for sd in all_state_dict
]
for key in all_partition_group_states[0].keys():
all_partition_states = [
all_states[key] for all_states in all_partition_group_states
]
partition_states[key] = self._partition_base_optimizer_state(
key,
all_partition_states,
i)
base_optimizer_group_states.append(partition_states)
for i, group in enumerate(self.optimizer.param_groups):
p = group['params'][0]
for key, saved in base_optimizer_group_states[i].items():
if torch.is_tensor(self.optimizer.state[p][key]):
self.optimizer.state[p][key].data.copy_(saved.data)
else:
self.optimizer.state[p][key] = saved
def load_state_dict(self,
state_dict_list,
load_optimizer_states=True,
load_from_fp32_weights=False):
r"""Loading ZeRO checkpoint
Arguments:
state_dict_list: List of all saved ZeRO checkpoints, one for each saved partition.
Note that the number of saved partitions may differ from number of loading partitions to support
changing GPU count, specifically DP world size, between saving and loading checkpoints.
load_optimizer_states: Boolean indicating whether or not to load base optimizer states
load_from_fp32_weights: Boolean indicating whether to initialize fp32 master weights from fp32
copies in checkpoints (no precision loss) or from model's fp16 copies (with precision loss).
"""
"""
Loads a state_dict created by an earlier call to state_dict().
If ``fp16_optimizer_instance`` was constructed from some ``init_optimizer``,
whose parameters in turn came from ``model``, it is expected that the user
will call ``model.load_state_dict()`` before
``fp16_optimizer_instance.load_state_dict()`` is called.
Example::
model = torch.nn.Linear(D_in, D_out).cuda().half()
optimizer = torch.optim.SGD(model.parameters(), lr=1e-3)
optimizer = FP16_Optimizer(optimizer, static_loss_scale = 128.0)
...
checkpoint = torch.load("saved.pth")
model.load_state_dict(checkpoint['model'])
optimizer.load_state_dict(checkpoint['optimizer'])
"""
# I think it should actually be ok to reload the optimizer before the model.
self.loss_scaler = state_dict_list[0]['loss_scaler']
self.dynamic_loss_scale = state_dict_list[0]['dynamic_loss_scale']
self.overflow = state_dict_list[0]['overflow']
# zero stage 1 mode
if not self.partition_gradients:
required_version = pkg_version.parse("0.3.17")
ckpt_version = state_dict_list[0].get("ds_version", False)
error_str = f"ZeRO stage 1 changed in {required_version} and is not backwards compatible " \
"with older stage 1 checkpoints. If you'd like to load an old ZeRO-1 checkpoint " \
"please set 'legacy_stage1': true in your zero config json. This old version of " \
"stage 1 will be removed in v0.4.0."
assert ckpt_version, f"Empty ds_version! {error_str}"
assert required_version <= pkg_version.parse(
ckpt_version), f"Old version: {ckpt_version} {error_str}"
if load_optimizer_states:
self._restore_base_optimizer_state(state_dict_list)
# At this point, the optimizer's references to the model's fp32 parameters are up to date.
# The optimizer's hyperparameters and internal buffers are also up to date.
# However, the fp32 master copies of the model's fp16 params stored by the optimizer are still
# out of date. There are two options.
# 1: Refresh the master params from the model's fp16 params.
# This requires less storage but incurs precision loss.
# 2: Save and restore the fp32 master copies separately.
# We choose option 1 if changing DP degree and option 2 otherwise.
#
# Pytorch Optimizer.load_state_dict casts saved buffers (e.g. momentum) to the type and device
# of their associated parameters, because it's possible those buffers might not exist yet in
# the current optimizer instance. In our case, as long as the current FP16_Optimizer has been
# constructed in the same way as the one whose state_dict we are loading, the same master params
# are guaranteed to exist, so we can just copy_() from the saved master params.
if load_from_fp32_weights:
self._restore_from_fp32_weights(state_dict_list)
else:
self._restore_from_fp16_weights()
def allreduce_gradients(self):
self.overlapping_partition_gradients_reduce_epilogue()
def _handle_overflow(cpu_sum, x, i):
import math
rank = torch.distributed.get_rank()
if rank == 0:
t_i = -1
for v_i, v in enumerate(x.data.contiguous().view(-1)):
if not math.isfinite(float(v)):
t_i = v_i
break
print(
f"rank {rank} detected overflow {cpu_sum} in tensor {i}:{t_i} shape {x.shape}"
)
def estimate_zero2_model_states_mem_needs(total_params,
num_gpus_per_node=1,
num_nodes=1,
cpu_offload=True,
additional_buffer_factor=1.5):
total_gpus = num_nodes * num_gpus_per_node
if cpu_offload:
gpu_mem = 2 * total_params
cpu_mem = total_params * \
max(4 * total_gpus, 16) * additional_buffer_factor
else:
gpu_mem = 4 * total_params + int(16 * total_params / total_gpus)
cpu_mem = total_params * 4 * num_gpus_per_node * additional_buffer_factor
return int(cpu_mem), int(gpu_mem)
def model_to_params(model):
# shared params calculated only once
total_params = sum(
dict((p.data_ptr(),
p.numel()) for p in model.parameters()).values())
return total_params
def estimate_zero2_model_states_mem_needs_all_live(model,
num_gpus_per_node=1,
num_nodes=1,
additional_buffer_factor=1.5):
"""
Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients
for a given ``model`` and hardware setup.
If you have an actual model object, use this function and everything will be derived
automatically.
If it's a hypothetical model, use ``estimate_zero2_model_states_mem_needs_all_cold`` where you have to pass
the ``total_params`` explicitly.
Args:
- ``model``: ``nn.Module`` object
- ``num_gpus_per_node``: how many gpus per node (defaults to 1)
- ``num_nodes``: how many nodes (defaults to 1),
- ``additional_buffer_factor``: estimation factor (defaults to 1.5):
"""
total_params = model_to_params(model)
estimate_zero2_model_states_mem_needs_all_cold(
total_params=total_params,
num_gpus_per_node=num_gpus_per_node,
num_nodes=num_nodes,
additional_buffer_factor=additional_buffer_factor)
def estimate_zero2_model_states_mem_needs_all_cold(total_params,
num_gpus_per_node=1,
num_nodes=1,
additional_buffer_factor=1.5):
"""
Print out estimates on memory usage requirements for ZeRO 2 params, optim states and gradients
for a given ``model`` and hardware setup.
If it's a hypothetical model, use this function where you have to pass
the ``total_params`` and ``largest_layer_params`` explicitly.
If you have an actual model object, use ``estimate_zero2_model_states_mem_needs_all_live`` and everything
will be derived automatically.
Args:
- ``total_params``: total model params
- ``num_gpus_per_node``: how many gpus per node (defaults to 1)
- ``num_nodes``: how many nodes (defaults to 1),
- ``additional_buffer_factor``: estimation factor (defaults to 1.5):
"""
def format_options(cpu_offload):
enabled = []
enabled.append(f"cpu_offload={1 if cpu_offload else 0}")
return ", ".join(enabled)
nodes_str = "nodes" if num_nodes > 1 else "node"
gpus_str = "GPUs" if num_gpus_per_node > 1 else "GPU"
print(
"Estimated memory needed for params, optim states and gradients for a:\n"
f"HW: Setup with {num_nodes} {nodes_str}, {num_gpus_per_node} {gpus_str} per node.\n"
f"SW: Model with {int(total_params / 1e6)}M total params.")
print(" per CPU | per GPU | Options")
for cpu_offload in [True, False]:
cpu_mem, gpu_mem = estimate_zero2_model_states_mem_needs(
total_params=total_params,
num_gpus_per_node=num_gpus_per_node,
num_nodes=num_nodes,
cpu_offload=cpu_offload,
additional_buffer_factor=additional_buffer_factor
)
options_str = format_options(cpu_offload=cpu_offload)
print(
f" {cpu_mem / 2 ** 30:7.2f}GB | {gpu_mem / 2 ** 30:6.2f}GB | {options_str}")
|
the-stack_0_15343 | from time import sleep
from json import dumps
from kafka import KafkaProducer
producer = KafkaProducer(bootstrap_servers=['localhost:9092'], value_serializer=lambda x: dumps(x).encode('utf-8'))
print("Please insert a number --> 'stop' to exit")
input_user = input()
index = 0
while input_user != "stop":
data = {"id": "PXL"+str(index), "number" : input_user}
producer.send("pxl_data", value=data)
print(f"Sending data: {data}")
index += 1
print("Insert new data (stop to exit)")
input_user = input()
|
the-stack_0_15344 | # -*- coding: utf-8 -*-
"""
pygments.lexers.agile
~~~~~~~~~~~~~~~~~~~~~
Lexers for agile languages.
:copyright: Copyright 2006-2009 by the Pygments team, see AUTHORS.
:license: BSD, see LICENSE for details.
"""
import re
try:
set
except NameError:
from sets import Set as set
from pygments.lexer import Lexer, RegexLexer, ExtendedRegexLexer, \
LexerContext, include, combined, do_insertions, bygroups, using
from pygments.token import Error, Text, \
Comment, Operator, Keyword, Name, String, Number, Generic, Punctuation
from pygments.util import get_bool_opt, get_list_opt, shebang_matches
from pygments import unistring as uni
__all__ = ['PythonLexer', 'PythonConsoleLexer', 'PythonTracebackLexer',
'RubyLexer', 'RubyConsoleLexer', 'PerlLexer', 'LuaLexer',
'MiniDLexer', 'IoLexer', 'TclLexer', 'ClojureLexer',
'Python3Lexer', 'Python3TracebackLexer']
# b/w compatibility
from pygments.lexers.functional import SchemeLexer
line_re = re.compile('.*?\n')
class PythonLexer(RegexLexer):
"""
For `Python <http://www.python.org>`_ source code.
"""
name = 'Python'
aliases = ['python', 'py']
filenames = ['*.py', '*.pyw', '*.sc', 'SConstruct', 'SConscript']
mimetypes = ['text/x-python', 'application/x-python']
tokens = {
'root': [
(r'\n', Text),
(r'^(\s*)("""(?:.|\n)*?""")', bygroups(Text, String.Doc)),
(r"^(\s*)('''(?:.|\n)*?''')", bygroups(Text, String.Doc)),
(r'[^\S\n]+', Text),
(r'#.*$', Comment),
(r'[]{}:(),;[]', Punctuation),
(r'\\\n', Text),
(r'\\', Text),
(r'(in|is|and|or|not)\b', Operator.Word),
(r'!=|==|<<|>>|[-~+/*%=<>&^|.]', Operator),
include('keywords'),
(r'(def)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)((?:\s|\\\s)+)', bygroups(Keyword, Text), 'classname'),
(r'(from)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), 'fromimport'),
(r'(import)((?:\s|\\\s)+)', bygroups(Keyword.Namespace, Text), 'import'),
include('builtins'),
include('backtick'),
('(?:[rR]|[uU][rR]|[rR][uU])"""', String, 'tdqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'''", String, 'tsqs'),
('(?:[rR]|[uU][rR]|[rR][uU])"', String, 'dqs'),
("(?:[rR]|[uU][rR]|[rR][uU])'", String, 'sqs'),
('[uU]?"""', String, combined('stringescape', 'tdqs')),
("[uU]?'''", String, combined('stringescape', 'tsqs')),
('[uU]?"', String, combined('stringescape', 'dqs')),
("[uU]?'", String, combined('stringescape', 'sqs')),
include('name'),
include('numbers'),
],
'keywords': [
(r'(assert|break|continue|del|elif|else|except|exec|'
r'finally|for|global|if|lambda|pass|print|raise|'
r'return|try|while|yield|as|with)\b', Keyword),
],
'builtins': [
(r'(?<!\.)(__import__|abs|all|any|apply|basestring|bin|bool|buffer|'
r'bytearray|bytes|callable|chr|classmethod|cmp|coerce|compile|'
r'complex|delattr|dict|dir|divmod|enumerate|eval|execfile|exit|'
r'file|filter|float|frozenset|getattr|globals|hasattr|hash|hex|id|'
r'input|int|intern|isinstance|issubclass|iter|len|list|locals|'
r'long|map|max|min|next|object|oct|open|ord|pow|property|range|'
r'raw_input|reduce|reload|repr|reversed|round|set|setattr|slice|'
r'sorted|staticmethod|str|sum|super|tuple|type|unichr|unicode|'
r'vars|xrange|zip)\b', Name.Builtin),
(r'(?<!\.)(self|None|Ellipsis|NotImplemented|False|True'
r')\b', Name.Builtin.Pseudo),
(r'(?<!\.)(ArithmeticError|AssertionError|AttributeError|'
r'BaseException|DeprecationWarning|EOFError|EnvironmentError|'
r'Exception|FloatingPointError|FutureWarning|GeneratorExit|IOError|'
r'ImportError|ImportWarning|IndentationError|IndexError|KeyError|'
r'KeyboardInterrupt|LookupError|MemoryError|NameError|'
r'NotImplemented|NotImplementedError|OSError|OverflowError|'
r'OverflowWarning|PendingDeprecationWarning|ReferenceError|'
r'RuntimeError|RuntimeWarning|StandardError|StopIteration|'
r'SyntaxError|SyntaxWarning|SystemError|SystemExit|TabError|'
r'TypeError|UnboundLocalError|UnicodeDecodeError|'
r'UnicodeEncodeError|UnicodeError|UnicodeTranslateError|'
r'UnicodeWarning|UserWarning|ValueError|VMSError|Warning|'
r'WindowsError|ZeroDivisionError)\b', Name.Exception),
],
'numbers': [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+[eE][+-]?[0-9]+', Number.Float),
(r'0\d+', Number.Oct),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+L', Number.Integer.Long),
(r'\d+', Number.Integer)
],
'backtick': [
('`.*?`', String.Backtick),
],
'name': [
(r'@[a-zA-Z0-9_.]+', Name.Decorator),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
],
'funcname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Function, '#pop')
],
'classname': [
('[a-zA-Z_][a-zA-Z0-9_]*', Name.Class, '#pop')
],
'import': [
(r'((?:\s|\\\s)+)(as)((?:\s|\\\s)+)',
bygroups(Text, Keyword.Namespace, Text)),
(r'[a-zA-Z_][a-zA-Z0-9_.]*', Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
(r'', Text, '#pop') # all else: go back
],
'fromimport': [
(r'((?:\s|\\\s)+)(import)\b', bygroups(Text, Keyword.Namespace), '#pop'),
(r'[a-zA-Z_.][a-zA-Z0-9_.]*', Name.Namespace),
],
'stringescape': [
(r'\\([\\abfnrtv"\']|\n|N{.*?}|u[a-fA-F0-9]{4}|'
r'U[a-fA-F0-9]{8}|x[a-fA-F0-9]{2}|[0-7]{1,3})', String.Escape)
],
'strings': [
(r'%(\([a-zA-Z0-9_]+\))?[-#0 +]*([0-9]+|[*])?(\.([0-9]+|[*]))?'
'[hlL]?[diouxXeEfFgGcrs%]', String.Interpol),
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
],
'nl': [
(r'\n', String)
],
'dqs': [
(r'"', String, '#pop'),
(r'\\\\|\\"|\\\n', String.Escape), # included here again for raw strings
include('strings')
],
'sqs': [
(r"'", String, '#pop'),
(r"\\\\|\\'|\\\n", String.Escape), # included here again for raw strings
include('strings')
],
'tdqs': [
(r'"""', String, '#pop'),
include('strings'),
include('nl')
],
'tsqs': [
(r"'''", String, '#pop'),
include('strings'),
include('nl')
],
}
def analyse_text(text):
return shebang_matches(text, r'pythonw?(2\.\d)?')
class Python3Lexer(RegexLexer):
"""
For `Python <http://www.python.org>`_ source code (version 3.0).
*New in Pygments 0.10.*
"""
name = 'Python 3'
aliases = ['python3', 'py3']
filenames = [] # Nothing until Python 3 gets widespread
mimetypes = ['text/x-python3', 'application/x-python3']
flags = re.MULTILINE | re.UNICODE
uni_name = "[%s][%s]*" % (uni.xid_start, uni.xid_continue)
tokens = PythonLexer.tokens.copy()
tokens['keywords'] = [
(r'(assert|break|continue|del|elif|else|except|'
r'finally|for|global|if|lambda|pass|raise|'
r'return|try|while|yield|as|with|True|False|None)\b', Keyword),
]
tokens['builtins'] = [
(r'(?<!\.)(__import__|abs|all|any|bin|bool|bytearray|bytes|'
r'chr|classmethod|cmp|compile|complex|delattr|dict|dir|'
r'divmod|enumerate|eval|filter|float|format|frozenset|getattr|'
r'globals|hasattr|hash|hex|id|input|int|isinstance|issubclass|'
r'iter|len|list|locals|map|max|memoryview|min|next|object|oct|'
r'open|ord|pow|print|property|range|repr|reversed|round|'
r'set|setattr|slice|sorted|staticmethod|str|sum|super|tuple|type|'
r'vars|zip)\b', Name.Builtin),
(r'(?<!\.)(self|Ellipsis|NotImplemented)\b', Name.Builtin.Pseudo),
(r'(?<!\.)(ArithmeticError|AssertionError|AttributeError|'
r'BaseException|BufferError|BytesWarning|DeprecationWarning|'
r'EOFError|EnvironmentError|Exception|FloatingPointError|'
r'FutureWarning|GeneratorExit|IOError|ImportError|'
r'ImportWarning|IndentationError|IndexError|KeyError|'
r'KeyboardInterrupt|LookupError|MemoryError|NameError|'
r'NotImplementedError|OSError|OverflowError|'
r'PendingDeprecationWarning|ReferenceError|'
r'RuntimeError|RuntimeWarning|StopIteration|'
r'SyntaxError|SyntaxWarning|SystemError|SystemExit|TabError|'
r'TypeError|UnboundLocalError|UnicodeDecodeError|'
r'UnicodeEncodeError|UnicodeError|UnicodeTranslateError|'
r'UnicodeWarning|UserWarning|ValueError|VMSError|Warning|'
r'WindowsError|ZeroDivisionError)\b', Name.Exception),
]
tokens['numbers'] = [
(r'(\d+\.\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'0[oO][0-7]+', Number.Oct),
(r'0[bB][01]+', Number.Bin),
(r'0[xX][a-fA-F0-9]+', Number.Hex),
(r'\d+', Number.Integer)
]
tokens['backtick'] = []
tokens['name'] = [
(r'@[a-zA-Z0-9_]+', Name.Decorator),
(uni_name, Name),
]
tokens['funcname'] = [
(uni_name, Name.Function, '#pop')
]
tokens['classname'] = [
(uni_name, Name.Class, '#pop')
]
tokens['import'] = [
(r'(\s+)(as)(\s+)', bygroups(Text, Keyword, Text)),
(r'\.', Name.Namespace),
(uni_name, Name.Namespace),
(r'(\s*)(,)(\s*)', bygroups(Text, Operator, Text)),
(r'', Text, '#pop') # all else: go back
]
tokens['fromimport'] = [
(r'(\s+)(import)\b', bygroups(Text, Keyword), '#pop'),
(r'\.', Name.Namespace),
(uni_name, Name.Namespace),
]
# don't highlight "%s" substitutions
tokens['strings'] = [
(r'[^\\\'"%\n]+', String),
# quotes, percents and backslashes must be parsed one at a time
(r'[\'"\\]', String),
# unhandled string formatting sign
(r'%', String)
# newlines are an error (use "nl" state)
]
def analyse_text(text):
return shebang_matches(text, r'pythonw?3(\.\d)?')
class PythonConsoleLexer(Lexer):
"""
For Python console output or doctests, such as:
.. sourcecode:: pycon
>>> a = 'foo'
>>> print a
foo
>>> 1 / 0
Traceback (most recent call last):
File "<stdin>", line 1, in <module>
ZeroDivisionError: integer division or modulo by zero
Additional options:
`python3`
Use Python 3 lexer for code. Default is ``False``.
*New in Pygments 1.0.*
"""
name = 'Python console session'
aliases = ['pycon']
mimetypes = ['text/x-python-doctest']
def __init__(self, **options):
self.python3 = get_bool_opt(options, 'python3', False)
Lexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
if self.python3:
pylexer = Python3Lexer(**self.options)
tblexer = Python3TracebackLexer(**self.options)
else:
pylexer = PythonLexer(**self.options)
tblexer = PythonTracebackLexer(**self.options)
curcode = ''
insertions = []
curtb = ''
tbindex = 0
tb = 0
for match in line_re.finditer(text):
line = match.group()
if line.startswith('>>> ') or line.startswith('... '):
tb = 0
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:4])]))
curcode += line[4:]
elif line.rstrip() == '...':
tb = 0
insertions.append((len(curcode),
[(0, Generic.Prompt, '...')]))
curcode += line[3:]
else:
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
if (line.startswith('Traceback (most recent call last):') or
re.match(r' File "[^"]+", line \d+\n$', line)):
tb = 1
curtb = line
tbindex = match.start()
elif line == 'KeyboardInterrupt\n':
yield match.start(), Name.Class, line
elif tb:
curtb += line
if not (line.startswith(' ') or line.strip() == '...'):
tb = 0
for i, t, v in tblexer.get_tokens_unprocessed(curtb):
yield tbindex+i, t, v
else:
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
pylexer.get_tokens_unprocessed(curcode)):
yield item
class PythonTracebackLexer(RegexLexer):
"""
For Python tracebacks.
*New in Pygments 0.7.*
"""
name = 'Python Traceback'
aliases = ['pytb']
filenames = ['*.pytb']
mimetypes = ['text/x-python-traceback']
tokens = {
'root': [
(r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
# SyntaxError starts with this.
(r'^(?= File "[^"]+", line \d+\n)', Generic.Traceback, 'intb'),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name.Identifier, Text)),
(r'^( File )("[^"]+")(, line )(\d+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text)),
(r'^( )(.+)(\n)',
bygroups(Text, using(PythonLexer), Text)),
(r'^([ \t]*)(...)(\n)',
bygroups(Text, Comment, Text)), # for doctests...
(r'^(.+)(: )(.+)(\n)',
bygroups(Name.Class, Text, Name.Identifier, Text), '#pop'),
(r'^([a-zA-Z_][a-zA-Z0-9_]*)(:?\n)',
bygroups(Name.Class, Text), '#pop')
],
}
class Python3TracebackLexer(RegexLexer):
"""
For Python 3.0 tracebacks, with support for chained exceptions.
*New in Pygments 1.0.*
"""
name = 'Python 3.0 Traceback'
aliases = ['py3tb']
filenames = ['*.py3tb']
mimetypes = ['text/x-python3-traceback']
tokens = {
'root': [
(r'\n', Text),
(r'^Traceback \(most recent call last\):\n', Generic.Traceback, 'intb'),
(r'^During handling of the above exception, another '
r'exception occurred:\n\n', Generic.Traceback),
(r'^The above exception was the direct cause of the '
r'following exception:\n\n', Generic.Traceback),
],
'intb': [
(r'^( File )("[^"]+")(, line )(\d+)(, in )(.+)(\n)',
bygroups(Text, Name.Builtin, Text, Number, Text, Name.Identifier, Text)),
(r'^( )(.+)(\n)',
bygroups(Text, using(Python3Lexer), Text)),
(r'^([ \t]*)(...)(\n)',
bygroups(Text, Comment, Text)), # for doctests...
(r'^(.+)(: )(.+)(\n)',
bygroups(Name.Class, Text, Name.Identifier, Text), '#pop'),
(r'^([a-zA-Z_][a-zA-Z0-9_]*)(:?\n)',
bygroups(Name.Class, Text), '#pop')
],
}
class RubyLexer(ExtendedRegexLexer):
"""
For `Ruby <http://www.ruby-lang.org>`_ source code.
"""
name = 'Ruby'
aliases = ['rb', 'ruby']
filenames = ['*.rb', '*.rbw', 'Rakefile', '*.rake', '*.gemspec', '*.rbx']
mimetypes = ['text/x-ruby', 'application/x-ruby']
flags = re.DOTALL | re.MULTILINE
def heredoc_callback(self, match, ctx):
# okay, this is the hardest part of parsing Ruby...
# match: 1 = <<-?, 2 = quote? 3 = name 4 = quote? 5 = rest of line
start = match.start(1)
yield start, Operator, match.group(1) # <<-?
yield match.start(2), String.Heredoc, match.group(2) # quote ", ', `
yield match.start(3), Name.Constant, match.group(3) # heredoc name
yield match.start(4), String.Heredoc, match.group(4) # quote again
heredocstack = ctx.__dict__.setdefault('heredocstack', [])
outermost = not bool(heredocstack)
heredocstack.append((match.group(1) == '<<-', match.group(3)))
ctx.pos = match.start(5)
ctx.end = match.end(5)
# this may find other heredocs
for i, t, v in self.get_tokens_unprocessed(context=ctx):
yield i, t, v
ctx.pos = match.end()
if outermost:
# this is the outer heredoc again, now we can process them all
for tolerant, hdname in heredocstack:
lines = []
for match in line_re.finditer(ctx.text, ctx.pos):
if tolerant:
check = match.group().strip()
else:
check = match.group().rstrip()
if check == hdname:
for amatch in lines:
yield amatch.start(), String.Heredoc, amatch.group()
yield match.start(), Name.Constant, match.group()
ctx.pos = match.end()
break
else:
lines.append(match)
else:
# end of heredoc not found -- error!
for amatch in lines:
yield amatch.start(), Error, amatch.group()
ctx.end = len(ctx.text)
del heredocstack[:]
def gen_rubystrings_rules():
def intp_regex_callback(self, match, ctx):
yield match.start(1), String.Regex, match.group(1) # begin
nctx = LexerContext(match.group(3), 0, ['interpolated-regex'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Regex, match.group(4) # end[mixounse]*
ctx.pos = match.end()
def intp_string_callback(self, match, ctx):
yield match.start(1), String.Other, match.group(1)
nctx = LexerContext(match.group(3), 0, ['interpolated-string'])
for i, t, v in self.get_tokens_unprocessed(context=nctx):
yield match.start(3)+i, t, v
yield match.start(4), String.Other, match.group(4) # end
ctx.pos = match.end()
states = {}
states['strings'] = [
# easy ones
(r'\:([a-zA-Z_][\w_]*[\!\?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)', String.Symbol),
(r":'(\\\\|\\'|[^'])*'", String.Symbol),
(r"'(\\\\|\\'|[^'])*'", String.Single),
(r':"', String.Symbol, 'simple-sym'),
(r'"', String.Double, 'simple-string'),
(r'(?<!\.)`', String.Backtick, 'simple-backtick'),
]
# double-quoted string and symbol
for name, ttype, end in ('string', String.Double, '"'), \
('sym', String.Symbol, '"'), \
('backtick', String.Backtick, '`'):
states['simple-'+name] = [
include('string-intp-escaped'),
(r'[^\\%s#]+' % end, ttype),
(r'[\\#]', ttype),
(end, ttype, '#pop'),
]
# braced quoted strings
for lbrace, rbrace, name in ('\\{', '\\}', 'cb'), \
('\\[', '\\]', 'sb'), \
('\\(', '\\)', 'pa'), \
('<', '>', 'ab'):
states[name+'-intp-string'] = [
(r'\\[\\' + lbrace + rbrace + ']', String.Other),
(r'(?<!\\)' + lbrace, String.Other, '#push'),
(r'(?<!\\)' + rbrace, String.Other, '#pop'),
include('string-intp-escaped'),
(r'[\\#' + lbrace + rbrace + ']', String.Other),
(r'[^\\#' + lbrace + rbrace + ']+', String.Other),
]
states['strings'].append((r'%[QWx]?' + lbrace, String.Other,
name+'-intp-string'))
states[name+'-string'] = [
(r'\\[\\' + lbrace + rbrace + ']', String.Other),
(r'(?<!\\)' + lbrace, String.Other, '#push'),
(r'(?<!\\)' + rbrace, String.Other, '#pop'),
(r'[\\#' + lbrace + rbrace + ']', String.Other),
(r'[^\\#' + lbrace + rbrace + ']+', String.Other),
]
states['strings'].append((r'%[qsw]' + lbrace, String.Other,
name+'-string'))
states[name+'-regex'] = [
(r'\\[\\' + lbrace + rbrace + ']', String.Regex),
(r'(?<!\\)' + lbrace, String.Regex, '#push'),
(r'(?<!\\)' + rbrace + '[mixounse]*', String.Regex, '#pop'),
include('string-intp'),
(r'[\\#' + lbrace + rbrace + ']', String.Regex),
(r'[^\\#' + lbrace + rbrace + ']+', String.Regex),
]
states['strings'].append((r'%r' + lbrace, String.Regex,
name+'-regex'))
# these must come after %<brace>!
states['strings'] += [
# %r regex
(r'(%r([^a-zA-Z0-9]))([^\2\\]*(?:\\.[^\2\\]*)*)(\2[mixounse]*)',
intp_regex_callback),
# regular fancy strings with qsw
(r'%[qsw]([^a-zA-Z0-9])([^\1\\]*(?:\\.[^\1\\]*)*)\1', String.Other),
(r'(%[QWx]([^a-zA-Z0-9]))([^\2\\]*(?:\\.[^\2\\]*)*)(\2)',
intp_string_callback),
# special forms of fancy strings after operators or
# in method calls with braces
(r'(?<=[-+/*%=<>&!^|~,(])(\s*)(%([\t ])(?:[^\3\\]*(?:\\.[^\3\\]*)*)\3)',
bygroups(Text, String.Other, None)),
# and because of fixed width lookbehinds the whole thing a
# second time for line startings...
(r'^(\s*)(%([\t ])(?:[^\3\\]*(?:\\.[^\3\\]*)*)\3)',
bygroups(Text, String.Other, None)),
# all regular fancy strings without qsw
(r'(%([^a-zA-Z0-9\s]))([^\2\\]*(?:\\.[^\2\\]*)*)(\2)',
intp_string_callback),
]
return states
tokens = {
'root': [
(r'#.*?$', Comment.Single),
(r'=begin\s.*?\n=end', Comment.Multiline),
# keywords
(r'(BEGIN|END|alias|begin|break|case|defined\?|'
r'do|else|elsif|end|ensure|for|if|in|next|redo|'
r'rescue|raise|retry|return|super|then|undef|unless|until|when|'
r'while|yield)\b', Keyword),
# start of function, class and module names
(r'(module)(\s+)([a-zA-Z_][a-zA-Z0-9_]*(::[a-zA-Z_][a-zA-Z0-9_]*)*)',
bygroups(Keyword, Text, Name.Namespace)),
(r'(def)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'def(?=[*%&^`~+-/\[<>=])', Keyword, 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
# special methods
(r'(initialize|new|loop|include|extend|raise|attr_reader|'
r'attr_writer|attr_accessor|attr|catch|throw|private|'
r'module_function|public|protected|true|false|nil)\b', Keyword.Pseudo),
(r'(not|and|or)\b', Operator.Word),
(r'(autoload|block_given|const_defined|eql|equal|frozen|include|'
r'instance_of|is_a|iterator|kind_of|method_defined|nil|'
r'private_method_defined|protected_method_defined|'
r'public_method_defined|respond_to|tainted)\?', Name.Builtin),
(r'(chomp|chop|exit|gsub|sub)!', Name.Builtin),
(r'(?<!\.)(Array|Float|Integer|String|__id__|__send__|abort|ancestors|'
r'at_exit|autoload|binding|callcc|caller|'
r'catch|chomp|chop|class_eval|class_variables|'
r'clone|const_defined\?|const_get|const_missing|const_set|constants|'
r'display|dup|eval|exec|exit|extend|fail|fork|'
r'format|freeze|getc|gets|global_variables|gsub|'
r'hash|id|included_modules|inspect|instance_eval|'
r'instance_method|instance_methods|'
r'instance_variable_get|instance_variable_set|instance_variables|'
r'lambda|load|local_variables|loop|'
r'method|method_missing|methods|module_eval|name|'
r'object_id|open|p|print|printf|private_class_method|'
r'private_instance_methods|'
r'private_methods|proc|protected_instance_methods|'
r'protected_methods|public_class_method|'
r'public_instance_methods|public_methods|'
r'putc|puts|raise|rand|readline|readlines|require|'
r'scan|select|self|send|set_trace_func|singleton_methods|sleep|'
r'split|sprintf|srand|sub|syscall|system|taint|'
r'test|throw|to_a|to_s|trace_var|trap|type|untaint|untrace_var|'
r'warn)\b', Name.Builtin),
(r'__(FILE|LINE)__\b', Name.Builtin.Pseudo),
# normal heredocs
(r'(?<!\w)(<<-?)(["`\']?)([a-zA-Z_]\w*)(\2)(.*?\n)', heredoc_callback),
# empty string heredocs
(r'(<<-?)("|\')()(\2)(.*?\n)', heredoc_callback),
(r'__END__', Comment.Preproc, 'end-part'),
# multiline regex (after keywords or assignments)
(r'(?:^|(?<=[=<>~!])|'
r'(?<=(?:\s|;)when\s)|'
r'(?<=(?:\s|;)or\s)|'
r'(?<=(?:\s|;)and\s)|'
r'(?<=(?:\s|;|\.)index\s)|'
r'(?<=(?:\s|;|\.)scan\s)|'
r'(?<=(?:\s|;|\.)sub\s)|'
r'(?<=(?:\s|;|\.)sub!\s)|'
r'(?<=(?:\s|;|\.)gsub\s)|'
r'(?<=(?:\s|;|\.)gsub!\s)|'
r'(?<=(?:\s|;|\.)match\s)|'
r'(?<=(?:\s|;)if\s)|'
r'(?<=(?:\s|;)elsif\s)|'
r'(?<=^when\s)|'
r'(?<=^index\s)|'
r'(?<=^scan\s)|'
r'(?<=^sub\s)|'
r'(?<=^gsub\s)|'
r'(?<=^sub!\s)|'
r'(?<=^gsub!\s)|'
r'(?<=^match\s)|'
r'(?<=^if\s)|'
r'(?<=^elsif\s)'
r')(\s*)(/)(?!=)', bygroups(Text, String.Regex), 'multiline-regex'),
# multiline regex (in method calls)
(r'(?<=\(|,)/', String.Regex, 'multiline-regex'),
# multiline regex (this time the funny no whitespace rule)
(r'(\s+)(/[^\s=])', String.Regex, 'multiline-regex'),
# lex numbers and ignore following regular expressions which
# are division operators in fact (grrrr. i hate that. any
# better ideas?)
# since pygments 0.7 we also eat a "?" operator after numbers
# so that the char operator does not work. Chars are not allowed
# there so that you can use the ternary operator.
# stupid example:
# x>=0?n[x]:""
(r'(0_?[0-7]+(?:_[0-7]+)*)(\s*)([/?])?',
bygroups(Number.Oct, Text, Operator)),
(r'(0x[0-9A-Fa-f]+(?:_[0-9A-Fa-f]+)*)(\s*)([/?])?',
bygroups(Number.Hex, Text, Operator)),
(r'(0b[01]+(?:_[01]+)*)(\s*)([/?])?',
bygroups(Number.Bin, Text, Operator)),
(r'([\d]+(?:_\d+)*)(\s*)([/?])?',
bygroups(Number.Integer, Text, Operator)),
# Names
(r'@@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Class),
(r'@[a-zA-Z_][a-zA-Z0-9_]*', Name.Variable.Instance),
(r'\$[a-zA-Z0-9_]+', Name.Variable.Global),
(r'\$[!@&`\'+~=/\\,;.<>_*$?:"]', Name.Variable.Global),
(r'\$-[0adFiIlpvw]', Name.Variable.Global),
(r'::', Operator),
include('strings'),
# chars
(r'\?(\\[MC]-)*' # modifiers
r'(\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})|\S)'
r'(?!\w)',
String.Char),
(r'[A-Z][a-zA-Z0-9_]+', Name.Constant),
# this is needed because ruby attributes can look
# like keywords (class) or like this: ` ?!?
(r'(\.|::)([a-zA-Z_]\w*[\!\?]?|[*%&^`~+-/\[<>=])',
bygroups(Operator, Name)),
(r'[a-zA-Z_][\w_]*[\!\?]?', Name),
(r'(\[|\]|\*\*|<<?|>>?|>=|<=|<=>|=~|={3}|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&!^|~]=?', Operator),
(r'[(){};,/?:\\]', Punctuation),
(r'\s+', Text)
],
'funcname': [
(r'\(', Punctuation, 'defexpr'),
(r'(?:([a-zA-Z_][a-zA-Z0-9_]*)(\.))?'
r'([a-zA-Z_][\w_]*[\!\?]?|\*\*?|[-+]@?|'
r'[/%&|^`~]|\[\]=?|<<|>>|<=?>|>=?|===?)',
bygroups(Name.Class, Operator, Name.Function), '#pop'),
(r'', Text, '#pop')
],
'classname': [
(r'\(', Punctuation, 'defexpr'),
(r'<<', Operator, '#pop'),
(r'[A-Z_][\w_]*', Name.Class, '#pop'),
(r'', Text, '#pop')
],
'defexpr': [
(r'(\))(\.|::)?', bygroups(Punctuation, Operator), '#pop'),
(r'\(', Operator, '#push'),
include('root')
],
'in-intp': [
('}', String.Interpol, '#pop'),
include('root'),
],
'string-intp': [
(r'#{', String.Interpol, 'in-intp'),
(r'#@@?[a-zA-Z_][a-zA-Z0-9_]*', String.Interpol),
(r'#\$[a-zA-Z_][a-zA-Z0-9_]*', String.Interpol)
],
'string-intp-escaped': [
include('string-intp'),
(r'\\([\\abefnrstv#"\']|x[a-fA-F0-9]{1,2}|[0-7]{1,3})', String.Escape)
],
'interpolated-regex': [
include('string-intp'),
(r'[\\#]', String.Regex),
(r'[^\\#]+', String.Regex),
],
'interpolated-string': [
include('string-intp'),
(r'[\\#]', String.Other),
(r'[^\\#]+', String.Other),
],
'multiline-regex': [
include('string-intp'),
(r'\\\\', String.Regex),
(r'\\/', String.Regex),
(r'[\\#]', String.Regex),
(r'[^\\/#]+', String.Regex),
(r'/[mixounse]*', String.Regex, '#pop'),
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
]
}
tokens.update(gen_rubystrings_rules())
def analyse_text(text):
return shebang_matches(text, r'ruby(1\.\d)?')
class RubyConsoleLexer(Lexer):
"""
For Ruby interactive console (**irb**) output like:
.. sourcecode:: rbcon
irb(main):001:0> a = 1
=> 1
irb(main):002:0> puts a
1
=> nil
"""
name = 'Ruby irb session'
aliases = ['rbcon', 'irb']
mimetypes = ['text/x-ruby-shellsession']
_prompt_re = re.compile('irb\([a-zA-Z_][a-zA-Z0-9_]*\):\d{3}:\d+[>*"\'] '
'|>> |\?> ')
def get_tokens_unprocessed(self, text):
rblexer = RubyLexer(**self.options)
curcode = ''
insertions = []
for match in line_re.finditer(text):
line = match.group()
m = self._prompt_re.match(line)
if m is not None:
end = m.end()
insertions.append((len(curcode),
[(0, Generic.Prompt, line[:end])]))
curcode += line[end:]
else:
if curcode:
for item in do_insertions(insertions,
rblexer.get_tokens_unprocessed(curcode)):
yield item
curcode = ''
insertions = []
yield match.start(), Generic.Output, line
if curcode:
for item in do_insertions(insertions,
rblexer.get_tokens_unprocessed(curcode)):
yield item
class PerlLexer(RegexLexer):
"""
For `Perl <http://www.perl.org>`_ source code.
"""
name = 'Perl'
aliases = ['perl', 'pl']
filenames = ['*.pl', '*.pm']
mimetypes = ['text/x-perl', 'application/x-perl']
flags = re.DOTALL | re.MULTILINE
# TODO: give this a perl guy who knows how to parse perl...
tokens = {
'balanced-regex': [
(r'/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex, '#pop'),
(r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'),
(r'\\(\\\\|[^\\])*\\[egimosx]*', String.Regex, '#pop'),
(r'{(\\\\|\\}|[^}])*}[egimosx]*', String.Regex, '#pop'),
(r'<(\\\\|\\>|[^>])*>[egimosx]*', String.Regex, '#pop'),
(r'\[(\\\\|\\\]|[^\]])*\][egimosx]*', String.Regex, '#pop'),
(r'\((\\\\|\\\)|[^\)])*\)[egimosx]*', String.Regex, '#pop'),
(r'@(\\\\|\\\@|[^\@])*@[egimosx]*', String.Regex, '#pop'),
(r'%(\\\\|\\\%|[^\%])*%[egimosx]*', String.Regex, '#pop'),
(r'\$(\\\\|\\\$|[^\$])*\$[egimosx]*', String.Regex, '#pop'),
(r'!(\\\\|\\!|[^!])*![egimosx]*', String.Regex, '#pop'),
],
'root': [
(r'\#.*?$', Comment.Single),
(r'^=[a-zA-Z0-9]+\s+.*?\n=cut', Comment.Multiline),
(r'(case|continue|do|else|elsif|for|foreach|if|last|my|'
r'next|our|redo|reset|then|unless|until|while|use|'
r'print|new|BEGIN|END|return)\b', Keyword),
(r'(format)(\s+)([a-zA-Z0-9_]+)(\s*)(=)(\s*\n)',
bygroups(Keyword, Text, Name, Text, Punctuation, Text), 'format'),
(r'(eq|lt|gt|le|ge|ne|not|and|or|cmp)\b', Operator.Word),
# common delimiters
(r's/(\\\\|\\/|[^/])*/(\\\\|\\/|[^/])*/[egimosx]*', String.Regex),
(r's!(\\\\|\\!|[^!])*!(\\\\|\\!|[^!])*![egimosx]*', String.Regex),
(r's\\(\\\\|[^\\])*\\(\\\\|[^\\])*\\[egimosx]*', String.Regex),
(r's@(\\\\|\\@|[^@])*@(\\\\|\\@|[^@])*@[egimosx]*', String.Regex),
(r's%(\\\\|\\%|[^%])*%(\\\\|\\%|[^%])*%[egimosx]*', String.Regex),
# balanced delimiters
(r's{(\\\\|\\}|[^}])*}\s*', String.Regex, 'balanced-regex'),
(r's<(\\\\|\\>|[^>])*>\s*', String.Regex, 'balanced-regex'),
(r's\[(\\\\|\\\]|[^\]])*\]\s*', String.Regex, 'balanced-regex'),
(r's\((\\\\|\\\)|[^\)])*\)\s*', String.Regex, 'balanced-regex'),
(r'm?/(\\\\|\\/|[^/\n])*/[gcimosx]*', String.Regex),
(r'((?<==~)|(?<=\())\s*/(\\\\|\\/|[^/])*/[gcimosx]*', String.Regex),
(r'\s+', Text),
(r'(abs|accept|alarm|atan2|bind|binmode|bless|caller|chdir|'
r'chmod|chomp|chop|chown|chr|chroot|close|closedir|connect|'
r'continue|cos|crypt|dbmclose|dbmopen|defined|delete|die|'
r'dump|each|endgrent|endhostent|endnetent|endprotoent|'
r'endpwent|endservent|eof|eval|exec|exists|exit|exp|fcntl|'
r'fileno|flock|fork|format|formline|getc|getgrent|getgrgid|'
r'getgrnam|gethostbyaddr|gethostbyname|gethostent|getlogin|'
r'getnetbyaddr|getnetbyname|getnetent|getpeername|getpgrp|'
r'getppid|getpriority|getprotobyname|getprotobynumber|'
r'getprotoent|getpwent|getpwnam|getpwuid|getservbyname|'
r'getservbyport|getservent|getsockname|getsockopt|glob|gmtime|'
r'goto|grep|hex|import|index|int|ioctl|join|keys|kill|last|'
r'lc|lcfirst|length|link|listen|local|localtime|log|lstat|'
r'map|mkdir|msgctl|msgget|msgrcv|msgsnd|my|next|no|oct|open|'
r'opendir|ord|our|pack|package|pipe|pop|pos|printf|'
r'prototype|push|quotemeta|rand|read|readdir|'
r'readline|readlink|readpipe|recv|redo|ref|rename|require|'
r'reverse|rewinddir|rindex|rmdir|scalar|seek|seekdir|'
r'select|semctl|semget|semop|send|setgrent|sethostent|setnetent|'
r'setpgrp|setpriority|setprotoent|setpwent|setservent|'
r'setsockopt|shift|shmctl|shmget|shmread|shmwrite|shutdown|'
r'sin|sleep|socket|socketpair|sort|splice|split|sprintf|sqrt|'
r'srand|stat|study|substr|symlink|syscall|sysopen|sysread|'
r'sysseek|system|syswrite|tell|telldir|tie|tied|time|times|tr|'
r'truncate|uc|ucfirst|umask|undef|unlink|unpack|unshift|untie|'
r'utime|values|vec|wait|waitpid|wantarray|warn|write'
r')\b', Name.Builtin),
(r'((__(DATA|DIE|WARN)__)|(STD(IN|OUT|ERR)))\b', Name.Builtin.Pseudo),
(r'<<([\'"]?)([a-zA-Z_][a-zA-Z0-9_]*)\1;?\n.*?\n\2\n', String),
(r'__END__', Comment.Preproc, 'end-part'),
(r'\$\^[ADEFHILMOPSTWX]', Name.Variable.Global),
(r"\$[\\\"\[\]'&`+*.,;=%~?@$!<>(^|/-](?!\w)", Name.Variable.Global),
(r'[$@%#]+', Name.Variable, 'varname'),
(r'0_?[0-7]+(_[0-7]+)*', Number.Oct),
(r'0x[0-9A-Fa-f]+(_[0-9A-Fa-f]+)*', Number.Hex),
(r'0b[01]+(_[01]+)*', Number.Bin),
(r'\d+', Number.Integer),
(r"'(\\\\|\\'|[^'])*'", String),
(r'"(\\\\|\\"|[^"])*"', String),
(r'`(\\\\|\\`|[^`])*`', String.Backtick),
(r'<([^\s>]+)>', String.Regexp),
(r'(q|qq|qw|qr|qx)\{', String.Other, 'cb-string'),
(r'(q|qq|qw|qr|qx)\(', String.Other, 'rb-string'),
(r'(q|qq|qw|qr|qx)\[', String.Other, 'sb-string'),
(r'(q|qq|qw|qr|qx)\<', String.Other, 'lt-string'),
(r'(q|qq|qw|qr|qx)(.)[.\n]*?\1', String.Other),
(r'package\s+', Keyword, 'modulename'),
(r'sub\s+', Keyword, 'funcname'),
(r'(\[\]|\*\*|::|<<|>>|>=|<=|<=>|={3}|!=|=~|'
r'!~|&&?|\|\||\.{1,3})', Operator),
(r'[-+/*%=<>&^|!\\~]=?', Operator),
(r'[\(\)\[\]:;,<>/\?\{\}]', Punctuation), # yes, there's no shortage
# of punctuation in Perl!
(r'(?=\w)', Name, 'name'),
],
'format': [
(r'\.\n', String.Interpol, '#pop'),
(r'[^\n]*\n', String.Interpol),
],
'varname': [
(r'\s+', Text),
(r'\{', Punctuation, '#pop'), # hash syntax?
(r'\)|,', Punctuation, '#pop'), # argument specifier
(r'[a-zA-Z0-9_]+::', Name.Namespace),
(r'[a-zA-Z0-9_:]+', Name.Variable, '#pop'),
],
'name': [
(r'[a-zA-Z0-9_]+::', Name.Namespace),
(r'[a-zA-Z0-9_:]+', Name, '#pop'),
(r'[A-Z_]+(?=[^a-zA-Z0-9_])', Name.Constant, '#pop'),
(r'(?=[^a-zA-Z0-9_])', Text, '#pop'),
],
'modulename': [
(r'[a-zA-Z_][\w_]*', Name.Namespace, '#pop')
],
'funcname': [
(r'[a-zA-Z_][\w_]*[\!\?]?', Name.Function),
(r'\s+', Text),
# argument declaration
(r'(\([$@%]*\))(\s*)', bygroups(Punctuation, Text)),
(r'.*?{', Punctuation, '#pop'),
(r';', Punctuation, '#pop'),
],
'cb-string': [
(r'\\[\{\}\\]', String.Other),
(r'\\', String.Other),
(r'\{', String.Other, 'cb-string'),
(r'\}', String.Other, '#pop'),
(r'[^\{\}\\]+', String.Other)
],
'rb-string': [
(r'\\[\(\)\\]', String.Other),
(r'\\', String.Other),
(r'\(', String.Other, 'rb-string'),
(r'\)', String.Other, '#pop'),
(r'[^\(\)]+', String.Other)
],
'sb-string': [
(r'\\[\[\]\\]', String.Other),
(r'\\', String.Other),
(r'\[', String.Other, 'sb-string'),
(r'\]', String.Other, '#pop'),
(r'[^\[\]]+', String.Other)
],
'lt-string': [
(r'\\[\<\>\\]', String.Other),
(r'\\', String.Other),
(r'\<', String.Other, 'lt-string'),
(r'\>', String.Other, '#pop'),
(r'[^\<\>]]+', String.Other)
],
'end-part': [
(r'.+', Comment.Preproc, '#pop')
]
}
def analyse_text(text):
if shebang_matches(text, r'perl(\d\.\d\.\d)?'):
return True
if 'my $' in text:
return 0.9
return 0.1 # who knows, might still be perl!
class LuaLexer(RegexLexer):
"""
For `Lua <http://www.lua.org>`_ source code.
Additional options accepted:
`func_name_highlighting`
If given and ``True``, highlight builtin function names
(default: ``True``).
`disabled_modules`
If given, must be a list of module names whose function names
should not be highlighted. By default all modules are highlighted.
To get a list of allowed modules have a look into the
`_luabuiltins` module:
.. sourcecode:: pycon
>>> from pygments.lexers._luabuiltins import MODULES
>>> MODULES.keys()
['string', 'coroutine', 'modules', 'io', 'basic', ...]
"""
name = 'Lua'
aliases = ['lua']
filenames = ['*.lua']
mimetypes = ['text/x-lua', 'application/x-lua']
tokens = {
'root': [
(r'(?s)--\[(=*)\[.*?\]\1\]', Comment.Multiline),
('--.*$', Comment.Single),
(r'(?i)(\d*\.\d+|\d+\.\d*)(e[+-]?\d+)?', Number.Float),
(r'(?i)\d+e[+-]?\d+', Number.Float),
('(?i)0x[0-9a-f]*', Number.Hex),
(r'\d+', Number.Integer),
(r'\n', Text),
(r'[^\S\n]', Text),
(r'(?s)\[(=*)\[.*?\]\1\]', String.Multiline),
(r'[\[\]\{\}\(\)\.,:;]', Punctuation),
(r'(==|~=|<=|>=|\.\.|\.\.\.|[=+\-*/%^<>#])', Operator),
(r'(and|or|not)\b', Operator.Word),
('(break|do|else|elseif|end|for|if|in|repeat|return|then|until|'
r'while)\b', Keyword),
(r'(local)\b', Keyword.Declaration),
(r'(true|false|nil)\b', Keyword.Constant),
(r'(function)(\s+)', bygroups(Keyword, Text), 'funcname'),
(r'(class)(\s+)', bygroups(Keyword, Text), 'classname'),
(r'[A-Za-z_][A-Za-z0-9_]*(\.[A-Za-z_][A-Za-z0-9_]*)?', Name),
# multiline strings
(r'(?s)\[(=*)\[(.*?)\]\1\]', String),
("'", String.Single, combined('stringescape', 'sqs')),
('"', String.Double, combined('stringescape', 'dqs'))
],
'funcname': [
('[A-Za-z_][A-Za-z0-9_]*', Name.Function, '#pop'),
# inline function
('\(', Punctuation, '#pop'),
],
'classname': [
('[A-Za-z_][A-Za-z0-9_]*', Name.Class, '#pop')
],
# if I understand correctly, every character is valid in a lua string,
# so this state is only for later corrections
'string': [
('.', String)
],
'stringescape': [
(r'''\\([abfnrtv\\"']|\d{1,3})''', String.Escape)
],
'sqs': [
("'", String, '#pop'),
include('string')
],
'dqs': [
('"', String, '#pop'),
include('string')
]
}
def __init__(self, **options):
self.func_name_highlighting = get_bool_opt(
options, 'func_name_highlighting', True)
self.disabled_modules = get_list_opt(options, 'disabled_modules', [])
self._functions = set()
if self.func_name_highlighting:
from pygments.lexers._luabuiltins import MODULES
for mod, func in MODULES.iteritems():
if mod not in self.disabled_modules:
self._functions.update(func)
RegexLexer.__init__(self, **options)
def get_tokens_unprocessed(self, text):
for index, token, value in \
RegexLexer.get_tokens_unprocessed(self, text):
if token is Name:
if value in self._functions:
yield index, Name.Builtin, value
continue
elif '.' in value:
a, b = value.split('.')
yield index, Name, a
yield index + len(a), Punctuation, u'.'
yield index + len(a) + 1, Name, b
continue
yield index, token, value
class MiniDLexer(RegexLexer):
"""
For `MiniD <http://www.dsource.org/projects/minid>`_ (a D-like scripting
language) source.
"""
name = 'MiniD'
filenames = ['*.md']
aliases = ['minid']
mimetypes = ['text/x-minidsrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r'//(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nestedcomment'),
# Keywords
(r'(as|assert|break|case|catch|class|continue|coroutine|default'
r'|do|else|finally|for|foreach|function|global|namespace'
r'|if|import|in|is|local|module|return|super|switch'
r'|this|throw|try|vararg|while|with|yield)\b', Keyword),
(r'(false|true|null)\b', Keyword.Constant),
# FloatLiteral
(r'([0-9][0-9_]*)?\.[0-9_]+([eE][+\-]?[0-9_]+)?', Number.Float),
# IntegerLiteral
# -- Binary
(r'0[Bb][01_]+', Number),
# -- Octal
(r'0[Cc][0-7_]+', Number.Oct),
# -- Hexadecimal
(r'0[xX][0-9a-fA-F_]+', Number.Hex),
# -- Decimal
(r'(0|[1-9][0-9_]*)', Number.Integer),
# CharacterLiteral
(r"""'(\\['"?\\abfnrtv]|\\x[0-9a-fA-F]{2}|\\[0-9]{1,3}"""
r"""|\\u[0-9a-fA-F]{4}|\\U[0-9a-fA-F]{8}|.)'""",
String.Char
),
# StringLiteral
# -- WysiwygString
(r'@"(""|.)*"', String),
# -- AlternateWysiwygString
(r'`(``|.)*`', String),
# -- DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# Tokens
(
r'(~=|\^=|%=|\*=|==|!=|>>>=|>>>|>>=|>>|>=|<=>|\?=|-\>'
r'|<<=|<<|<=|\+\+|\+=|--|-=|\|\||\|=|&&|&=|\.\.|/=)'
r'|[-/.&$@|\+<>!()\[\]{}?,;:=*%^~#\\]', Punctuation
),
# Identifier
(r'[a-zA-Z_]\w*', Name),
],
'nestedcomment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
],
}
class IoLexer(RegexLexer):
"""
For `Io <http://iolanguage.com/>`_ (a small, prototype-based
programming language) source.
*New in Pygments 0.10.*
"""
name = 'Io'
filenames = ['*.io']
aliases = ['io']
mimetypes = ['text/x-iosrc']
tokens = {
'root': [
(r'\n', Text),
(r'\s+', Text),
# Comments
(r'//(.*?)\n', Comment.Single),
(r'#(.*?)\n', Comment.Single),
(r'/(\\\n)?[*](.|\n)*?[*](\\\n)?/', Comment.Multiline),
(r'/\+', Comment.Multiline, 'nestedcomment'),
# DoubleQuotedString
(r'"(\\\\|\\"|[^"])*"', String),
# Operators
(r'::=|:=|=|\(|\)|;|,|\*|-|\+|>|<|@|!|/|\||\^|\.|%|&|\[|\]|\{|\}',
Operator),
# keywords
(r'(clone|do|doFile|doString|method|for|if|else|elseif|then)\b',
Keyword),
# constants
(r'(nil|false|true)\b', Name.Constant),
# names
('(Object|list|List|Map|args|Sequence|Coroutine|File)\b',
Name.Builtin),
('[a-zA-Z_][a-zA-Z0-9_]*', Name),
# numbers
(r'(\d+\.?\d*|\d*\.\d+)([eE][+-]?[0-9]+)?', Number.Float),
(r'\d+', Number.Integer)
],
'nestedcomment': [
(r'[^+/]+', Comment.Multiline),
(r'/\+', Comment.Multiline, '#push'),
(r'\+/', Comment.Multiline, '#pop'),
(r'[+/]', Comment.Multiline),
]
}
class TclLexer(RegexLexer):
"""
For Tcl source code.
*New in Pygments 0.10.*
"""
keyword_cmds_re = (
r'\b(after|apply|array|break|catch|continue|elseif|else|error|'
r'eval|expr|for|foreach|global|if|namespace|proc|rename|return|'
r'set|switch|then|trace|unset|update|uplevel|upvar|variable|'
r'vwait|while)\b'
)
builtin_cmds_re = (
r'\b(append|bgerror|binary|cd|chan|clock|close|concat|dde|dict|'
r'encoding|eof|exec|exit|fblocked|fconfigure|fcopy|file|'
r'fileevent|flush|format|gets|glob|history|http|incr|info|interp|'
r'join|lappend|lassign|lindex|linsert|list|llength|load|loadTk|'
r'lrange|lrepeat|lreplace|lreverse|lsearch|lset|lsort|mathfunc|'
r'mathop|memory|msgcat|open|package|pid|pkg::create|pkg_mkIndex|'
r'platform|platform::shell|puts|pwd|re_syntax|read|refchan|'
r'regexp|registry|regsub|scan|seek|socket|source|split|string|'
r'subst|tell|time|tm|unknown|unload)\b'
)
name = 'Tcl'
aliases = ['tcl']
filenames = ['*.tcl']
mimetypes = ['text/x-tcl', 'text/x-script.tcl', 'application/x-tcl']
def _gen_command_rules(keyword_cmds_re, builtin_cmds_re, context=""):
return [
(keyword_cmds_re, Keyword, 'params' + context),
(builtin_cmds_re, Name.Builtin, 'params' + context),
(r'([\w\.\-]+)', Name.Variable, 'params' + context),
(r'#', Comment, 'comment'),
]
tokens = {
'root': [
include('command'),
include('basic'),
include('data'),
],
'command': _gen_command_rules(keyword_cmds_re, builtin_cmds_re),
'command-in-brace': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-brace"),
'command-in-bracket': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-bracket"),
'command-in-paren': _gen_command_rules(keyword_cmds_re,
builtin_cmds_re,
"-in-paren"),
'basic': [
(r'\(', Keyword, 'paren'),
(r'\[', Keyword, 'bracket'),
(r'\{', Keyword, 'brace'),
(r'"', String.Double, 'string'),
(r'(eq|ne|in|ni)\b', Operator.Word),
(r'!=|==|<<|>>|<=|>=|&&|\|\||\*\*|[-+~!*/%<>&^|?:]', Operator),
],
'data': [
(r'\s+', Text),
(r'0x[a-fA-F0-9]+', Number.Hex),
(r'0[0-7]+', Number.Oct),
(r'\d+\.\d+', Number.Float),
(r'\d+', Number.Integer),
(r'\$([\w\.\-\:]+)', Name.Variable),
(r'([\w\.\-\:]+)', Text),
],
'params': [
(r';', Keyword, '#pop'),
(r'\n', Text, '#pop'),
(r'(else|elseif|then)', Keyword),
include('basic'),
include('data'),
],
'params-in-brace': [
(r'}', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-paren': [
(r'\)', Keyword, ('#pop', '#pop')),
include('params')
],
'params-in-bracket': [
(r'\]', Keyword, ('#pop', '#pop')),
include('params')
],
'string': [
(r'\[', String.Double, 'string-square'),
(r'(\\\\|\\[0-7]+|\\.|[^"])', String.Double),
(r'"', String.Double, '#pop')
],
'string-square': [
(r'\[', String.Double, 'string-square'),
(r'(\\\\|\\[0-7]+|\\.|[^\]])', String.Double),
(r'\]', String.Double, '#pop')
],
'brace': [
(r'}', Keyword, '#pop'),
include('command-in-brace'),
include('basic'),
include('data'),
],
'paren': [
(r'\)', Keyword, '#pop'),
include('command-in-paren'),
include('basic'),
include('data'),
],
'bracket': [
(r'\]', Keyword, '#pop'),
include('command-in-bracket'),
include('basic'),
include('data'),
],
'comment': [
(r'.*[^\\]\n', Comment, '#pop'),
(r'.*\\\n', Comment),
],
}
def analyse_text(text):
return shebang_matches(text, r'(tcl)')
class ClojureLexer(RegexLexer):
"""
Lexer for `Clojure <http://clojure.org/>`_ source code.
*New in Pygments 0.11.*
"""
name = 'Clojure'
aliases = ['clojure', 'clj']
filenames = ['*.clj']
mimetypes = ['text/x-clojure', 'application/x-clojure']
keywords = [
'fn', 'def', 'defn', 'defmacro', 'defmethod', 'defmulti', 'defn-',
'defstruct',
'if', 'cond',
'let', 'for'
]
builtins = [
'.', '..',
'*', '+', '-', '->', '..', '/', '<', '<=', '=', '==', '>', '>=',
'accessor', 'agent', 'agent-errors', 'aget', 'alength', 'all-ns',
'alter', 'and', 'append-child', 'apply', 'array-map', 'aset',
'aset-boolean', 'aset-byte', 'aset-char', 'aset-double', 'aset-float',
'aset-int', 'aset-long', 'aset-short', 'assert', 'assoc', 'await',
'await-for', 'bean', 'binding', 'bit-and', 'bit-not', 'bit-or',
'bit-shift-left', 'bit-shift-right', 'bit-xor', 'boolean', 'branch?',
'butlast', 'byte', 'cast', 'char', 'children', 'class',
'clear-agent-errors', 'comment', 'commute', 'comp', 'comparator',
'complement', 'concat', 'conj', 'cons', 'constantly',
'construct-proxy', 'contains?', 'count', 'create-ns', 'create-struct',
'cycle', 'dec', 'deref', 'difference', 'disj', 'dissoc', 'distinct',
'doall', 'doc', 'dorun', 'doseq', 'dosync', 'dotimes', 'doto',
'double', 'down', 'drop', 'drop-while', 'edit', 'end?', 'ensure',
'eval', 'every?', 'false?', 'ffirst', 'file-seq', 'filter', 'find',
'find-doc', 'find-ns', 'find-var', 'first', 'float', 'flush',
'fnseq', 'frest', 'gensym', 'get', 'get-proxy-class',
'hash-map', 'hash-set', 'identical?', 'identity', 'if-let', 'import',
'in-ns', 'inc', 'index', 'insert-child', 'insert-left', 'insert-right',
'inspect-table', 'inspect-tree', 'instance?', 'int', 'interleave',
'intersection', 'into', 'into-array', 'iterate', 'join', 'key', 'keys',
'keyword', 'keyword?', 'last', 'lazy-cat', 'lazy-cons', 'left',
'lefts', 'line-seq', 'list', 'list*', 'load', 'load-file',
'locking', 'long', 'loop', 'macroexpand', 'macroexpand-1',
'make-array', 'make-node', 'map', 'map-invert', 'map?', 'mapcat',
'max', 'max-key', 'memfn', 'merge', 'merge-with', 'meta', 'min',
'min-key', 'name', 'namespace', 'neg?', 'new', 'newline', 'next',
'nil?', 'node', 'not', 'not-any?', 'not-every?', 'not=', 'ns-imports',
'ns-interns', 'ns-map', 'ns-name', 'ns-publics', 'ns-refers',
'ns-resolve', 'ns-unmap', 'nth', 'nthrest', 'or', 'parse', 'partial',
'path', 'peek', 'pop', 'pos?', 'pr', 'pr-str', 'print', 'print-str',
'println', 'println-str', 'prn', 'prn-str', 'project', 'proxy',
'proxy-mappings', 'quot', 'rand', 'rand-int', 'range', 're-find',
're-groups', 're-matcher', 're-matches', 're-pattern', 're-seq',
'read', 'read-line', 'reduce', 'ref', 'ref-set', 'refer', 'rem',
'remove', 'remove-method', 'remove-ns', 'rename', 'rename-keys',
'repeat', 'replace', 'replicate', 'resolve', 'rest', 'resultset-seq',
'reverse', 'rfirst', 'right', 'rights', 'root', 'rrest', 'rseq',
'second', 'select', 'select-keys', 'send', 'send-off', 'seq',
'seq-zip', 'seq?', 'set', 'short', 'slurp', 'some', 'sort',
'sort-by', 'sorted-map', 'sorted-map-by', 'sorted-set',
'special-symbol?', 'split-at', 'split-with', 'str', 'string?',
'struct', 'struct-map', 'subs', 'subvec', 'symbol', 'symbol?',
'sync', 'take', 'take-nth', 'take-while', 'test', 'time', 'to-array',
'to-array-2d', 'tree-seq', 'true?', 'union', 'up', 'update-proxy',
'val', 'vals', 'var-get', 'var-set', 'var?', 'vector', 'vector-zip',
'vector?', 'when', 'when-first', 'when-let', 'when-not',
'with-local-vars', 'with-meta', 'with-open', 'with-out-str',
'xml-seq', 'xml-zip', 'zero?', 'zipmap', 'zipper']
# valid names for identifiers
# well, names can only not consist fully of numbers
# but this should be good enough for now
valid_name = r'[a-zA-Z0-9!$%&*+,/:<=>?@^_~-]+'
tokens = {
'root' : [
# the comments - always starting with semicolon
# and going to the end of the line
(r';.*$', Comment.Single),
# whitespaces - usually not relevant
(r'\s+', Text),
# numbers
(r'-?\d+\.\d+', Number.Float),
(r'-?\d+', Number.Integer),
# support for uncommon kinds of numbers -
# have to figure out what the characters mean
#(r'(#e|#i|#b|#o|#d|#x)[\d.]+', Number),
# strings, symbols and characters
(r'"(\\\\|\\"|[^"])*"', String),
(r"'" + valid_name, String.Symbol),
(r"\\([()/'\".'_!§$%& ?;=+-]{1}|[a-zA-Z0-9]+)", String.Char),
# constants
(r'(#t|#f)', Name.Constant),
# special operators
(r"('|#|`|,@|,|\.)", Operator),
# highlight the keywords
('(%s)' % '|'.join([
re.escape(entry) + ' ' for entry in keywords]),
Keyword
),
# first variable in a quoted string like
# '(this is syntactic sugar)
(r"(?<='\()" + valid_name, Name.Variable),
(r"(?<=#\()" + valid_name, Name.Variable),
# highlight the builtins
("(?<=\()(%s)" % '|'.join([
re.escape(entry) + ' ' for entry in builtins]),
Name.Builtin
),
# the remaining functions
(r'(?<=\()' + valid_name, Name.Function),
# find the remaining variables
(valid_name, Name.Variable),
# Clojure accepts vector notation
(r'(\[|\])', Punctuation),
# Clojure accepts map notation
(r'(\{|\})', Punctuation),
# the famous parentheses!
(r'(\(|\))', Punctuation),
],
}
|
the-stack_0_15345 | from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import time
import os
import warnings
import numpy as np
from keras import Model
from keras import layers
from keras.optimizers import SGD, Adam
from keras.callbacks import ModelCheckpoint, EarlyStopping
from ConfigSpace import ConfigurationSpace
from ConfigSpace import UniformFloatHyperparameter, CategoricalHyperparameter, InCondition
from alphaml.engine.components.models.base_model import BaseClassificationModel
from alphaml.engine.components.data_preprocessing.image_preprocess import preprocess
from alphaml.engine.components.data_manager import DataManager
class BaseImageClassificationModel(BaseClassificationModel):
def __init__(self):
self.base_model = None
self.model_name = None
self.work_size = None
self.min_size = None
self.default_size = None
super().__init__()
def set_model_config(self, inputshape, classnum, *args, **kwargs):
self.inputshape = inputshape
self.classnum = classnum
@staticmethod
def set_training_space(cs: ConfigurationSpace):
'''
Set hyperparameters for training
'''
batch_size = CategoricalHyperparameter('batch_size', [16, 32], default_value=32)
keep_prob = UniformFloatHyperparameter('keep_prob', 0, 0.99, default_value=0.5)
cs.add_hyperparameters([batch_size, keep_prob])
@staticmethod
def set_optimizer_space(cs: ConfigurationSpace):
'''
Set hyperparameters for optimizers
'''
optimizer = CategoricalHyperparameter('optimizer', ['SGD', 'Adam'], default_value='Adam')
sgd_lr = UniformFloatHyperparameter('sgd_lr', 0.00001, 0.1,
default_value=0.005, log=True) # log scale
sgd_decay = UniformFloatHyperparameter('sgd_decay', 0.0001, 0.1,
default_value=0.05, log=True) # log scale
sgd_momentum = UniformFloatHyperparameter('sgd_momentum', 0.3, 0.99, default_value=0.9)
adam_lr = UniformFloatHyperparameter('adam_lr', 0.00001, 0.1,
default_value=0.005, log=True) # log scale
adam_decay = UniformFloatHyperparameter('adam_decay', 0.0001, 0.1,
default_value=0.05, log=True) # log scale
sgd_lr_cond = InCondition(child=sgd_lr, parent=optimizer, values=['SGD'])
sgd_decay_cond = InCondition(child=sgd_decay, parent=optimizer, values=['SGD'])
sgd_momentum_cond = InCondition(child=sgd_momentum, parent=optimizer, values=['SGD'])
adam_lr_cond = InCondition(child=adam_lr, parent=optimizer, values=['Adam'])
adam_decay_cond = InCondition(child=adam_decay, parent=optimizer, values=['Adam'])
cs.add_hyperparameters([optimizer, sgd_lr, sgd_decay, sgd_momentum, adam_lr, adam_decay])
cs.add_conditions([sgd_lr_cond, sgd_decay_cond, sgd_momentum_cond, adam_lr_cond, adam_decay_cond])
def validate_inputshape(self):
if self.inputshape[0] < self.min_size or self.inputshape[1] < self.min_size:
raise ValueError(
"The minimum inputshape of " + self.model_name + " is " + str((self.min_size, self.min_size)) +
", while " + str(self.inputshape[0:2]) + " given.")
if self.inputshape[0] < self.work_size or self.inputshape[1] < self.work_size:
warnings.warn(
"The minimum recommended inputshape of the model is " + str((self.work_size, self.work_size)) +
", while " + str(self.inputshape[0:2]) + " given.")
def parse_monitor(self):
if self.metricstr == 'mse':
self.monitor = 'mean_squared_error'
elif self.metricstr == 'mae':
self.monitor = 'mean_abstract_error'
else:
self.monitor = self.metricstr
return
def load_data(self, data, **kwargs):
trainpregen, validpregen = preprocess(data)
self.metricstr = kwargs['metric']
self.parse_monitor()
if data.train_X is None and data.train_y is None:
if hasattr(data, 'train_valid_dir') or (hasattr(data, 'train_dir') and hasattr(data, 'valid_dir')):
if hasattr(data, 'train_valid_dir'):
self.train_gen = trainpregen.flow_from_directory(data.train_valid_dir,
target_size=self.inputshape[:2],
batch_size=self.batch_size, subset='training')
self.valid_gen = trainpregen.flow_from_directory(data.train_valid_dir,
target_size=self.inputshape[:2],
batch_size=self.batch_size, subset='validation')
self.classnum = self.train_gen.num_classes
self.monitor = 'val_' + self.monitor
else:
self.train_gen = trainpregen.flow_from_directory(data.train_dir, target_size=self.inputshape[:2],
batch_size=self.batch_size)
self.valid_gen = validpregen.flow_from_directory(data.valid_dir, target_size=self.inputshape[:2],
batch_size=self.batch_size)
self.classnum = self.train_gen.num_classes
self.monitor = 'val_' + self.monitor
else:
raise ValueError("Invalid data input!")
else:
if data.val_X is None and data.val_y is None:
if_valid = False
else:
if_valid = True
self.train_gen = trainpregen.flow(data.train_X, data.train_y, batch_size=self.batch_size)
if if_valid:
self.valid_gen = validpregen.flow(data.val_X, data.val_y, batch_size=self.batch_size)
self.monitor = 'val_' + self.monitor
else:
self.valid_gen = None
self.monitor = self.monitor
def fit(self, data: DataManager, **kwargs):
if self.base_model is None:
raise AttributeError("Base model is not defined!")
if self.optimizer == 'SGD':
optimizer = SGD(self.sgd_lr, self.sgd_momentum, self.sgd_decay, nesterov=True)
elif self.optimizer == 'Adam':
optimizer = Adam(self.adam_lr, decay=self.adam_decay)
else:
raise ValueError('No optimizer named %s defined' % str(self.optimizer))
timestr = time.strftime('%Y-%m-%d-%H:%M:%S', time.localtime(time.time()))
# build model
if self.classnum == 1:
final_activation = 'sigmoid'
loss = 'binary_crossentropy'
else:
final_activation = 'softmax'
loss = 'categorical_crossentropy'
y = self.base_model.output
y = layers.Dropout(1 - self.keep_prob)(y)
y = layers.Dense(self.classnum, activation=final_activation, name='Dense_final')(y)
model = Model(inputs=self.base_model.input, outputs=y)
# TODO: save models after training
save_dir = 'dl_models'
if not os.path.exists(save_dir):
os.makedirs(save_dir)
modelpath = os.path.join(save_dir, 'model_%s.hdf5' % timestr)
checkpoint = ModelCheckpoint(filepath=modelpath,
monitor=self.monitor,
save_best_only=True,
period=1)
earlystop = EarlyStopping(monitor=self.monitor, patience=12)
model.compile(optimizer=optimizer, loss=loss, metrics=[self.metricstr])
model.fit_generator(generator=self.train_gen,
epochs=200,
validation_data=self.valid_gen,
callbacks=[checkpoint, earlystop])
self.estimator = model
self.best_result = checkpoint.best
return self, modelpath
def predict_proba(self, data: DataManager):
if self.estimator is None:
raise TypeError("Unsupported estimator type 'NoneType'!")
_, testpregen = preprocess()
if hasattr(data, 'test_dir'):
self.test_gen = testpregen.flow_from_directory(data.test_dir, target_size=self.inputshape[:2],
batch_size=32)
else:
self.test_gen = testpregen.flow(data.test_X, target_size=self.inputshape[:2],
batch_size=32)
pred = self.estimator.predict_generator(self.test_gen)
return pred
def predict(self, data: DataManager):
pred = self.predict_proba(data)
pred = np.argmax(pred, axis=-1)
return pred
|
the-stack_0_15346 | import cntk as C
import numpy as np
from helpers import *
from cntk.layers import *
from cntk.layers.sequence import *
from cntk.layers.typing import *
from cntk.debugging import debug_model
import pickle
import importlib
import os
class PolyMath:
def __init__(self, config_file):
data_config = importlib.import_module(config_file).data_config
model_config = importlib.import_module(config_file).model_config
self.word_count_threshold = data_config['word_count_threshold']
self.char_count_threshold = data_config['char_count_threshold']
self.word_size = data_config['word_size']
self.abs_path = os.path.dirname(os.path.abspath(__file__))
pickle_file = os.path.join(self.abs_path, data_config['pickle_file'])
with open(pickle_file, 'rb') as vf:
known, self.vocab, self.chars = pickle.load(vf)
self.wg_dim = known
self.wn_dim = len(self.vocab) - known
self.c_dim = len(self.chars)
self.a_dim = 1
self.hidden_dim = model_config['hidden_dim']
self.w2v_hidden_dim = model_config['w2v_hidden_dim']
self.convs = model_config['char_convs']
self.dropout = model_config['dropout']
self.char_emb_dim = model_config['char_emb_dim']
self.highway_layers = model_config['highway_layers']
self.two_step = model_config['two_step']
self.use_cudnn = model_config['use_cudnn']
self.use_sparse = True
# Source and target inputs to the model
inputAxis = C.Axis('inputAxis')
outputAxis = C.Axis('outputAxis')
InputSequence = C.layers.SequenceOver[inputAxis]
OutputSequence = C.layers.SequenceOver[outputAxis]
print('dropout', self.dropout)
print('use_cudnn', self.use_cudnn)
print('use_sparse', self.use_sparse)
def charcnn(self, x):
conv_out = C.layers.Sequential([
C.layers.Embedding(self.char_emb_dim),
C.layers.Dropout(self.dropout),
C.layers.Convolution2D((5,self.char_emb_dim), self.convs, activation=C.relu, init=C.glorot_uniform(), bias=True, init_bias=0, name='charcnn_conv')])(x)
return C.reduce_max(conv_out, axis=1) # workaround cudnn failure in GlobalMaxPooling
def embed(self):
# load glove
npglove = np.zeros((self.wg_dim, self.w2v_hidden_dim), dtype=np.float32)
with open(os.path.join(self.abs_path, 'glove.6B.100d.txt'), encoding='utf-8') as f:
for line in f:
parts = line.split()
word = parts[0].lower()
if word in self.vocab:
npglove[self.vocab[word],:] = np.asarray([float(p) for p in parts[1:]])
glove = C.constant(npglove)
nonglove = C.parameter(shape=(len(self.vocab) - self.wg_dim, self.w2v_hidden_dim), init=C.glorot_uniform(), name='TrainableE')
def func(wg, wn):
return C.times(wg, glove) + C.times(wn, nonglove)
return func
def input_layer(self,cgw,cnw,cc,qgw,qnw,qc):
cgw_ph = C.placeholder()
cnw_ph = C.placeholder()
cc_ph = C.placeholder()
qgw_ph = C.placeholder()
qnw_ph = C.placeholder()
qc_ph = C.placeholder()
input_chars = C.placeholder(shape=(1,self.word_size,self.c_dim))
input_glove_words = C.placeholder(shape=(self.wg_dim,))
input_nonglove_words = C.placeholder(shape=(self.wn_dim,))
# we need to reshape because GlobalMaxPooling/reduce_max is retaining a trailing singleton dimension
# todo GlobalPooling/reduce_max should have a keepdims default to False
embedded = C.splice(
C.reshape(self.charcnn(input_chars), self.convs),
self.embed()(input_glove_words, input_nonglove_words), name='splice_embed')
processed = C.layers.Sequential([For(range(2), lambda: OptimizedRnnStack(self.hidden_dim, bidirectional=True, use_cudnn=self.use_cudnn, name='input_rnn'))])(embedded)
qce = C.one_hot(qc_ph, num_classes=self.c_dim, sparse_output=self.use_sparse)
cce = C.one_hot(cc_ph, num_classes=self.c_dim, sparse_output=self.use_sparse)
q_processed = processed.clone(C.CloneMethod.share, {input_chars:qce, input_glove_words:qgw_ph, input_nonglove_words:qnw_ph})
c_processed = processed.clone(C.CloneMethod.share, {input_chars:cce, input_glove_words:cgw_ph, input_nonglove_words:cnw_ph})
return C.as_block(
C.combine([c_processed, q_processed]),
[(cgw_ph, cgw),(cnw_ph, cnw),(cc_ph, cc),(qgw_ph, qgw),(qnw_ph, qnw),(qc_ph, qc)],
'input_layer',
'input_layer')
def gated_attention_gru_layer(self, context, query):
q_processed = C.placeholder(shape=(2*self.hidden_dim,))
c_processed = C.placeholder(shape=(2*self.hidden_dim,))
#gate weight
Wg = C.parameter(shape=(4*self.hidden_dim, 4*self.hidden_dim))
att_gru = C.layers.GRU(2*self.hidden_dim)
attention_model = C.layers.AttentionModel(self.hidden_dim, name='attention_model')
@C.Function
def out_func0(att_input, enc_input):
enc_input2 = enc_input
@C.Function
def gru_with_attentioin(dh, x):
c_att = attention_model(att_input, x)
x = C.splice(x, c_att)
x = C.element_times(x, C.sigmoid(C.times(x, Wg)))
return att_gru(dh, x)
att_context = Recurrence(gru_with_attentioin)(enc_input2)
return att_context
att_context = out_func0(q_processed, c_processed)
return C.as_block(
att_context,
[(c_processed, context), (q_processed, query)],
'gated_attention_gru_layer',
'gated_attention_gru_layer')
def matching_attention_layer(self, attention_context):
att_context = C.placeholder(shape=(2*self.hidden_dim,))
#matching layer
matching_model = C.layers.AttentionModel(attention_dim=self.hidden_dim, name='attention_model')
#gate weight
Wg = C.parameter(shape=(2*self.hidden_dim, 2*self.hidden_dim))
#gru
att_gru = C.layers.GRU(self.hidden_dim)
@C.Function
def out_func1(att_input, enc_input):
enc_input2 = enc_input
@C.Function
def bigru_with_match(dh, x):
c_att = matching_model(att_input, dh)
x = C.splice(x, c_att)
x = C.element_times(x, C.sigmoid(C.times(x, Wg)))
return att_gru(dh, x)
return C.splice(C.layers.Recurrence(bigru_with_match)(enc_input2),
C.layers.Recurrence(bigru_with_match, go_backwards=True)(enc_input2),
name="bigru_with_match")
match_context = out_func1(att_context, att_context)
return C.as_block(
match_context,
[(att_context, attention_context)],
'matching_attention_layer',
'matching_attention_layer')
def output_layer(self, query, match_context):
q_processed = C.placeholder(shape=(2*self.hidden_dim,))
mat_context = C.placeholder(shape=(2*self.hidden_dim,))
#output layer
r_q = question_pooling(q_processed, 2*self.hidden_dim) #shape n*(2*self.hidden_dim)
p1_logits = attention_weight(mat_context, r_q, 2*self.hidden_dim)
attention_pool = C.sequence.reduce_sum(p1_logits * mat_context)
state = C.layers.GRU(2*self.hidden_dim)(attention_pool, r_q)
p2_logits = attention_weight(mat_context, state, 2*self.hidden_dim)
@C.Function
def start_ave_point(p1_logits, p2_logits, point):
@C.Function
def start_ave(last, now):
now = now + last - last
new_start = now * C.sequence.gather(p2_logits, point)
point = C.sequence.future_value(point)
return new_start
start_logits_ave = C.layers.Recurrence(start_ave)(p1_logits)
return start_logits_ave
point = C.sequence.is_first(p1_logits)
point = C.layers.Sequential([For(range(2), lambda: C.layers.Recurrence(C.plus))])(point)
point = C.greater(C.constant(16), point)
start_logits_ave = start_ave_point(p1_logits, p2_logits, point)
@C.Function
def end_ave_point(p1_logits, p2_logits, point):
@C.Function
def end_ave(last, now):
now = now + last - last
new_end = now * C.sequence.gather(p2_logits, point)
point = C.sequence.past_value(point)
return new_end
end_logits_ave = C.layers.Recurrence(end_ave, go_backwards=True)(p2_logits)
return end_logits_ave
point = C.sequence.is_last(p1_logits)
point = C.layers.Sequential([For(range(2), lambda: C.layers.Recurrence(C.plus, go_backwards=True))])(point)
point = C.greater(C.constant(16),point)
end_logits_ave = end_ave_point(p1_logits, p2_logits, point)
start_logits = seq_hardmax(start_logits_ave)
end_logits = seq_hardmax(end_logits_ave)
'''
start_logits = seq_hardmax(p1_logits)
end_logits = seq_hardmax(p2_logits)
'''
return C.as_block(
C.combine([start_logits, end_logits]),
[(q_processed, query), (mat_context, match_context)],
'output_layer',
'output_layer')
def model(self):
c = C.Axis.new_unique_dynamic_axis('c')
q = C.Axis.new_unique_dynamic_axis('q')
b = C.Axis.default_batch_axis()
cgw = C.input_variable(self.wg_dim, dynamic_axes=[b,c], is_sparse=self.use_sparse, name='cgw')
cnw = C.input_variable(self.wn_dim, dynamic_axes=[b,c], is_sparse=self.use_sparse, name='cnw')
qgw = C.input_variable(self.wg_dim, dynamic_axes=[b,q], is_sparse=self.use_sparse, name='qgw')
qnw = C.input_variable(self.wn_dim, dynamic_axes=[b,q], is_sparse=self.use_sparse, name='qnw')
cc = C.input_variable((1,self.word_size), dynamic_axes=[b,c], name='cc')
qc = C.input_variable((1,self.word_size), dynamic_axes=[b,q], name='qc')
ab = C.input_variable(self.a_dim, dynamic_axes=[b,c], name='ab')
ae = C.input_variable(self.a_dim, dynamic_axes=[b,c], name='ae')
#input layer
c_processed, q_processed = self.input_layer(cgw,cnw,cc,qgw,qnw,qc).outputs
# attention layer
att_context = self.gated_attention_gru_layer(c_processed, q_processed)
# seif-matching_attention layer
match_context = self.matching_attention_layer(att_context)
# output layer
start_logits, end_logits = self.output_layer(q_processed, match_context).outputs
# loss
start_loss = seq_loss(start_logits, ab)
end_loss = seq_loss(end_logits, ae)
#paper_loss = start_loss + end_loss
new_loss = all_spans_loss(start_logits, ab, end_logits, ae)
return C.combine([start_logits, end_logits]), new_loss
|
the-stack_0_15347 | """
Misc tools for implementing data structures
"""
import re
import collections
import numbers
import codecs
import csv
import types
from datetime import datetime, timedelta
from numpy.lib.format import read_array, write_array
import numpy as np
import pandas as pd
import pandas.algos as algos
import pandas.lib as lib
import pandas.tslib as tslib
from pandas import compat
from pandas.compat import StringIO, BytesIO, range, long, u, zip, map
from pandas.core.config import get_option
from pandas.core import array as pa
class PandasError(Exception):
pass
class SettingWithCopyError(ValueError):
pass
class SettingWithCopyWarning(Warning):
pass
class AmbiguousIndexError(PandasError, KeyError):
pass
_POSSIBLY_CAST_DTYPES = set([np.dtype(t).name
for t in ['O', 'int8',
'uint8', 'int16', 'uint16', 'int32',
'uint32', 'int64', 'uint64']])
_NS_DTYPE = np.dtype('M8[ns]')
_TD_DTYPE = np.dtype('m8[ns]')
_INT64_DTYPE = np.dtype(np.int64)
_DATELIKE_DTYPES = set([np.dtype(t) for t in ['M8[ns]', '<M8[ns]', '>M8[ns]',
'm8[ns]', '<m8[ns]', '>m8[ns]']])
# define abstract base classes to enable isinstance type checking on our
# objects
def create_pandas_abc_type(name, attr, comp):
@classmethod
def _check(cls, inst):
return getattr(inst, attr, None) in comp
dct = dict(__instancecheck__=_check,
__subclasscheck__=_check)
meta = type("ABCBase", (type,), dct)
return meta(name, tuple(), dct)
ABCSeries = create_pandas_abc_type("ABCSeries", "_typ", ("series",))
ABCDataFrame = create_pandas_abc_type("ABCDataFrame", "_typ", ("dataframe",))
ABCPanel = create_pandas_abc_type("ABCPanel", "_typ", ("panel",))
ABCSparseSeries = create_pandas_abc_type("ABCSparseSeries", "_subtyp",
('sparse_series',
'sparse_time_series'))
ABCSparseArray = create_pandas_abc_type("ABCSparseArray", "_subtyp",
('sparse_array', 'sparse_series'))
class _ABCGeneric(type):
def __instancecheck__(cls, inst):
return hasattr(inst, "_data")
ABCGeneric = _ABCGeneric("ABCGeneric", tuple(), {})
def bind_method(cls, name, func):
"""Bind a method to class, python 2 and python 3 compatible.
Parameters
----------
cls : type
class to receive bound method
name : basestring
name of method on class instance
func : function
function to be bound as method
Returns
-------
None
"""
# only python 2 has bound/unbound method issue
if not compat.PY3:
setattr(cls, name, types.MethodType(func, None, cls))
else:
setattr(cls, name, func)
class CategoricalDtypeType(type):
"""
the type of CategoricalDtype, this metaclass determines subclass ability
"""
def __init__(cls, name, bases, attrs):
pass
class CategoricalDtype(object):
__meta__ = CategoricalDtypeType
"""
A np.dtype duck-typed class, suitable for holding a custom categorical dtype.
THIS IS NOT A REAL NUMPY DTYPE, but essentially a sub-class of np.object
"""
name = 'category'
names = None
type = CategoricalDtypeType
subdtype = None
kind = 'O'
str = '|O08'
num = 100
shape = tuple()
itemsize = 8
base = np.dtype('O')
isbuiltin = 0
isnative = 0
def __unicode__(self):
return self.name
def __str__(self):
"""
Return a string representation for a particular Object
Invoked by str(df) in both py2/py3.
Yields Bytestring in Py2, Unicode String in py3.
"""
if compat.PY3:
return self.__unicode__()
return self.__bytes__()
def __bytes__(self):
"""
Return a string representation for a particular object.
Invoked by bytes(obj) in py3 only.
Yields a bytestring in both py2/py3.
"""
from pandas.core.config import get_option
encoding = get_option("display.encoding")
return self.__unicode__().encode(encoding, 'replace')
def __repr__(self):
"""
Return a string representation for a particular object.
Yields Bytestring in Py2, Unicode String in py3.
"""
return str(self)
def __hash__(self):
# make myself hashable
return hash(str(self))
def __eq__(self, other):
if isinstance(other, compat.string_types):
return other == self.name
return isinstance(other, CategoricalDtype)
def isnull(obj):
"""Detect missing values (NaN in numeric arrays, None/NaN in object arrays)
Parameters
----------
arr : ndarray or object value
Object to check for null-ness
Returns
-------
isnulled : array-like of bool or bool
Array or bool indicating whether an object is null or if an array is
given which of the element is null.
See also
--------
pandas.notnull: boolean inverse of pandas.isnull
"""
return _isnull(obj)
def _isnull_new(obj):
if lib.isscalar(obj):
return lib.checknull(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, pd.MultiIndex):
raise NotImplementedError("isnull is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray)):
return _isnull_ndarraylike(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isnull(func=isnull))
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isnull_ndarraylike(np.asarray(obj))
else:
return obj is None
def _isnull_old(obj):
"""Detect missing values. Treat None, NaN, INF, -INF as null.
Parameters
----------
arr: ndarray or object value
Returns
-------
boolean ndarray or boolean
"""
if lib.isscalar(obj):
return lib.checknull_old(obj)
# hack (for now) because MI registers as ndarray
elif isinstance(obj, pd.MultiIndex):
raise NotImplementedError("isnull is not defined for MultiIndex")
elif isinstance(obj, (ABCSeries, np.ndarray)):
return _isnull_ndarraylike_old(obj)
elif isinstance(obj, ABCGeneric):
return obj._constructor(obj._data.isnull(func=_isnull_old))
elif isinstance(obj, list) or hasattr(obj, '__array__'):
return _isnull_ndarraylike_old(np.asarray(obj))
else:
return obj is None
_isnull = _isnull_new
def _use_inf_as_null(key):
"""Option change callback for null/inf behaviour
Choose which replacement for numpy.isnan / -numpy.isfinite is used.
Parameters
----------
flag: bool
True means treat None, NaN, INF, -INF as null (old way),
False means None and NaN are null, but INF, -INF are not null
(new way).
Notes
-----
This approach to setting global module values is discussed and
approved here:
* http://stackoverflow.com/questions/4859217/
programmatically-creating-variables-in-python/4859312#4859312
"""
flag = get_option(key)
if flag:
globals()['_isnull'] = _isnull_old
else:
globals()['_isnull'] = _isnull_new
def _isnull_ndarraylike(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if dtype.kind in ('O', 'S', 'U'):
# Working around NumPy ticket 1542
shape = values.shape
if dtype.kind in ('S', 'U'):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj(values.ravel())
result[...] = vec.reshape(shape)
elif dtype in _DATELIKE_DTYPES:
# this is the NaT pattern
result = values.view('i8') == tslib.iNaT
else:
result = np.isnan(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def _isnull_ndarraylike_old(obj):
values = getattr(obj, 'values', obj)
dtype = values.dtype
if dtype.kind in ('O', 'S', 'U'):
# Working around NumPy ticket 1542
shape = values.shape
if values.dtype.kind in ('S', 'U'):
result = np.zeros(values.shape, dtype=bool)
else:
result = np.empty(shape, dtype=bool)
vec = lib.isnullobj_old(values.ravel())
result[:] = vec.reshape(shape)
elif dtype in _DATELIKE_DTYPES:
# this is the NaT pattern
result = values.view('i8') == tslib.iNaT
else:
result = ~np.isfinite(values)
# box
if isinstance(obj, ABCSeries):
from pandas import Series
result = Series(result, index=obj.index, name=obj.name, copy=False)
return result
def notnull(obj):
"""Replacement for numpy.isfinite / -numpy.isnan which is suitable for use
on object arrays.
Parameters
----------
arr : ndarray or object value
Object to check for *not*-null-ness
Returns
-------
isnulled : array-like of bool or bool
Array or bool indicating whether an object is *not* null or if an array
is given which of the element is *not* null.
See also
--------
pandas.isnull : boolean inverse of pandas.notnull
"""
res = isnull(obj)
if np.isscalar(res):
return not res
return ~res
def _is_null_datelike_scalar(other):
""" test whether the object is a null datelike, e.g. Nat
but guard against passing a non-scalar """
if other is pd.NaT or other is None:
return True
elif np.isscalar(other):
# a timedelta
if hasattr(other,'dtype'):
return other.view('i8') == tslib.iNaT
elif is_integer(other) and other == tslib.iNaT:
return True
return isnull(other)
return False
def array_equivalent(left, right):
"""
True if two arrays, left and right, have equal non-NaN elements, and NaNs in
corresponding locations. False otherwise. It is assumed that left and right
are NumPy arrays of the same dtype. The behavior of this function
(particularly with respect to NaNs) is not defined if the dtypes are
different.
Parameters
----------
left, right : ndarrays
Returns
-------
b : bool
Returns True if the arrays are equivalent.
Examples
--------
>>> array_equivalent(np.array([1, 2, nan]), np.array([1, 2, nan]))
True
>>> array_equivalent(np.array([1, nan, 2]), np.array([1, 2, nan]))
False
"""
left, right = np.asarray(left), np.asarray(right)
if left.shape != right.shape: return False
# NaNs occur only in object arrays, float or complex arrays.
if issubclass(left.dtype.type, np.object_):
return ((left == right) | (pd.isnull(left) & pd.isnull(right))).all()
if issubclass(left.dtype.type, (np.floating, np.complexfloating)):
return ((left == right) | (np.isnan(left) & np.isnan(right))).all()
return np.array_equal(left, right)
def _iterable_not_string(x):
return (isinstance(x, collections.Iterable) and
not isinstance(x, compat.string_types))
def flatten(l):
"""Flatten an arbitrarily nested sequence.
Parameters
----------
l : sequence
The non string sequence to flatten
Notes
-----
This doesn't consider strings sequences.
Returns
-------
flattened : generator
"""
for el in l:
if _iterable_not_string(el):
for s in flatten(el):
yield s
else:
yield el
def mask_missing(arr, values_to_mask):
"""
Return a masking array of same size/shape as arr
with entries equaling any member of values_to_mask set to True
"""
if not isinstance(values_to_mask, (list, np.ndarray)):
values_to_mask = [values_to_mask]
try:
values_to_mask = np.array(values_to_mask, dtype=arr.dtype)
except Exception:
values_to_mask = np.array(values_to_mask, dtype=object)
na_mask = isnull(values_to_mask)
nonna = values_to_mask[~na_mask]
mask = None
for x in nonna:
if mask is None:
mask = arr == x
# if x is a string and arr is not, then we get False and we must
# expand the mask to size arr.shape
if np.isscalar(mask):
mask = np.zeros(arr.shape, dtype=bool)
else:
mask |= arr == x
if na_mask.any():
if mask is None:
mask = isnull(arr)
else:
mask |= isnull(arr)
return mask
def _pickle_array(arr):
arr = arr.view(np.ndarray)
buf = BytesIO()
write_array(buf, arr)
return buf.getvalue()
def _unpickle_array(bytes):
arr = read_array(BytesIO(bytes))
# All datetimes should be stored as M8[ns]. When unpickling with
# numpy1.6, it will read these as M8[us]. So this ensures all
# datetime64 types are read as MS[ns]
if is_datetime64_dtype(arr):
arr = arr.view(_NS_DTYPE)
return arr
def _view_wrapper(f, arr_dtype=None, out_dtype=None, fill_wrap=None):
def wrapper(arr, indexer, out, fill_value=np.nan):
if arr_dtype is not None:
arr = arr.view(arr_dtype)
if out_dtype is not None:
out = out.view(out_dtype)
if fill_wrap is not None:
fill_value = fill_wrap(fill_value)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _convert_wrapper(f, conv_dtype):
def wrapper(arr, indexer, out, fill_value=np.nan):
arr = arr.astype(conv_dtype)
f(arr, indexer, out, fill_value=fill_value)
return wrapper
def _take_2d_multi_generic(arr, indexer, out, fill_value, mask_info):
# this is not ideal, performance-wise, but it's better than raising
# an exception (best to optimize in Cython to avoid getting here)
row_idx, col_idx = indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
if fill_value is not None:
if row_needs:
out[row_mask, :] = fill_value
if col_needs:
out[:, col_mask] = fill_value
for i in range(len(row_idx)):
u_ = row_idx[i]
for j in range(len(col_idx)):
v = col_idx[j]
out[i, j] = arr[u_, v]
def _take_nd_generic(arr, indexer, out, axis, fill_value, mask_info):
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
if arr.dtype != out.dtype:
arr = arr.astype(out.dtype)
if arr.shape[axis] > 0:
arr.take(_ensure_platform_int(indexer), axis=axis, out=out)
if needs_masking:
outindexer = [slice(None)] * arr.ndim
outindexer[axis] = mask
out[tuple(outindexer)] = fill_value
_take_1d_dict = {
('int8', 'int8'): algos.take_1d_int8_int8,
('int8', 'int32'): algos.take_1d_int8_int32,
('int8', 'int64'): algos.take_1d_int8_int64,
('int8', 'float64'): algos.take_1d_int8_float64,
('int16', 'int16'): algos.take_1d_int16_int16,
('int16', 'int32'): algos.take_1d_int16_int32,
('int16', 'int64'): algos.take_1d_int16_int64,
('int16', 'float64'): algos.take_1d_int16_float64,
('int32', 'int32'): algos.take_1d_int32_int32,
('int32', 'int64'): algos.take_1d_int32_int64,
('int32', 'float64'): algos.take_1d_int32_float64,
('int64', 'int64'): algos.take_1d_int64_int64,
('int64', 'float64'): algos.take_1d_int64_float64,
('float32', 'float32'): algos.take_1d_float32_float32,
('float32', 'float64'): algos.take_1d_float32_float64,
('float64', 'float64'): algos.take_1d_float64_float64,
('object', 'object'): algos.take_1d_object_object,
('bool', 'bool'):
_view_wrapper(algos.take_1d_bool_bool, np.uint8, np.uint8),
('bool', 'object'):
_view_wrapper(algos.take_1d_bool_object, np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_1d_int64_int64, np.int64, np.int64, np.int64)
}
_take_2d_axis0_dict = {
('int8', 'int8'): algos.take_2d_axis0_int8_int8,
('int8', 'int32'): algos.take_2d_axis0_int8_int32,
('int8', 'int64'): algos.take_2d_axis0_int8_int64,
('int8', 'float64'): algos.take_2d_axis0_int8_float64,
('int16', 'int16'): algos.take_2d_axis0_int16_int16,
('int16', 'int32'): algos.take_2d_axis0_int16_int32,
('int16', 'int64'): algos.take_2d_axis0_int16_int64,
('int16', 'float64'): algos.take_2d_axis0_int16_float64,
('int32', 'int32'): algos.take_2d_axis0_int32_int32,
('int32', 'int64'): algos.take_2d_axis0_int32_int64,
('int32', 'float64'): algos.take_2d_axis0_int32_float64,
('int64', 'int64'): algos.take_2d_axis0_int64_int64,
('int64', 'float64'): algos.take_2d_axis0_int64_float64,
('float32', 'float32'): algos.take_2d_axis0_float32_float32,
('float32', 'float64'): algos.take_2d_axis0_float32_float64,
('float64', 'float64'): algos.take_2d_axis0_float64_float64,
('object', 'object'): algos.take_2d_axis0_object_object,
('bool', 'bool'):
_view_wrapper(algos.take_2d_axis0_bool_bool, np.uint8, np.uint8),
('bool', 'object'):
_view_wrapper(algos.take_2d_axis0_bool_object, np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis0_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_axis1_dict = {
('int8', 'int8'): algos.take_2d_axis1_int8_int8,
('int8', 'int32'): algos.take_2d_axis1_int8_int32,
('int8', 'int64'): algos.take_2d_axis1_int8_int64,
('int8', 'float64'): algos.take_2d_axis1_int8_float64,
('int16', 'int16'): algos.take_2d_axis1_int16_int16,
('int16', 'int32'): algos.take_2d_axis1_int16_int32,
('int16', 'int64'): algos.take_2d_axis1_int16_int64,
('int16', 'float64'): algos.take_2d_axis1_int16_float64,
('int32', 'int32'): algos.take_2d_axis1_int32_int32,
('int32', 'int64'): algos.take_2d_axis1_int32_int64,
('int32', 'float64'): algos.take_2d_axis1_int32_float64,
('int64', 'int64'): algos.take_2d_axis1_int64_int64,
('int64', 'float64'): algos.take_2d_axis1_int64_float64,
('float32', 'float32'): algos.take_2d_axis1_float32_float32,
('float32', 'float64'): algos.take_2d_axis1_float32_float64,
('float64', 'float64'): algos.take_2d_axis1_float64_float64,
('object', 'object'): algos.take_2d_axis1_object_object,
('bool', 'bool'):
_view_wrapper(algos.take_2d_axis1_bool_bool, np.uint8, np.uint8),
('bool', 'object'):
_view_wrapper(algos.take_2d_axis1_bool_object, np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_axis1_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
_take_2d_multi_dict = {
('int8', 'int8'): algos.take_2d_multi_int8_int8,
('int8', 'int32'): algos.take_2d_multi_int8_int32,
('int8', 'int64'): algos.take_2d_multi_int8_int64,
('int8', 'float64'): algos.take_2d_multi_int8_float64,
('int16', 'int16'): algos.take_2d_multi_int16_int16,
('int16', 'int32'): algos.take_2d_multi_int16_int32,
('int16', 'int64'): algos.take_2d_multi_int16_int64,
('int16', 'float64'): algos.take_2d_multi_int16_float64,
('int32', 'int32'): algos.take_2d_multi_int32_int32,
('int32', 'int64'): algos.take_2d_multi_int32_int64,
('int32', 'float64'): algos.take_2d_multi_int32_float64,
('int64', 'int64'): algos.take_2d_multi_int64_int64,
('int64', 'float64'): algos.take_2d_multi_int64_float64,
('float32', 'float32'): algos.take_2d_multi_float32_float32,
('float32', 'float64'): algos.take_2d_multi_float32_float64,
('float64', 'float64'): algos.take_2d_multi_float64_float64,
('object', 'object'): algos.take_2d_multi_object_object,
('bool', 'bool'):
_view_wrapper(algos.take_2d_multi_bool_bool, np.uint8, np.uint8),
('bool', 'object'):
_view_wrapper(algos.take_2d_multi_bool_object, np.uint8, None),
('datetime64[ns]', 'datetime64[ns]'):
_view_wrapper(algos.take_2d_multi_int64_int64, np.int64, np.int64,
fill_wrap=np.int64)
}
def _get_take_nd_function(ndim, arr_dtype, out_dtype, axis=0, mask_info=None):
if ndim <= 2:
tup = (arr_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
return func
tup = (out_dtype.name, out_dtype.name)
if ndim == 1:
func = _take_1d_dict.get(tup, None)
elif ndim == 2:
if axis == 0:
func = _take_2d_axis0_dict.get(tup, None)
else:
func = _take_2d_axis1_dict.get(tup, None)
if func is not None:
func = _convert_wrapper(func, out_dtype)
return func
def func(arr, indexer, out, fill_value=np.nan):
_take_nd_generic(arr, indexer, out, axis=axis,
fill_value=fill_value, mask_info=mask_info)
return func
def take_nd(arr, indexer, axis=0, out=None, fill_value=np.nan,
mask_info=None, allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
Parameters
----------
arr : ndarray
Input array
indexer : ndarray
1-D array of indices to take, subarrays corresponding to -1 value
indicies are filed with fill_value
axis : int, default 0
Axis to take from
out : ndarray or None, default None
Optional output array, must be appropriate type to hold input and
fill_value together, if indexer has any -1 value entries; call
common._maybe_promote to determine this type for any fill_value
fill_value : any, default np.nan
Fill value to replace -1 values with
mask_info : tuple of (ndarray, boolean)
If provided, value should correspond to:
(indexer != -1, (indexer != -1).any())
If not provided, it will be computed internally if necessary
allow_fill : boolean, default True
If False, indexer is assumed to contain no -1 values so no filling
will be done. This short-circuits computation of a mask. Result is
undefined if allow_fill == False and -1 is present in indexer.
"""
if indexer is None:
indexer = np.arange(arr.shape[axis], dtype=np.int64)
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
indexer = _ensure_int64(indexer)
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = _maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
mask, needs_masking = mask_info
else:
mask = indexer == -1
needs_masking = mask.any()
mask_info = mask, needs_masking
if needs_masking:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
flip_order = False
if arr.ndim == 2:
if arr.flags.f_contiguous:
flip_order = True
if flip_order:
arr = arr.T
axis = arr.ndim - axis - 1
if out is not None:
out = out.T
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = list(arr.shape)
out_shape[axis] = len(indexer)
out_shape = tuple(out_shape)
if arr.flags.f_contiguous and axis == arr.ndim - 1:
# minor tweak that can make an order-of-magnitude difference
# for dataframes initialized directly from 2-d ndarrays
# (s.t. df.values is c-contiguous and df._data.blocks[0] is its
# f-contiguous transpose)
out = np.empty(out_shape, dtype=dtype, order='F')
else:
out = np.empty(out_shape, dtype=dtype)
func = _get_take_nd_function(arr.ndim, arr.dtype, out.dtype,
axis=axis, mask_info=mask_info)
func(arr, indexer, out, fill_value)
if flip_order:
out = out.T
return out
take_1d = take_nd
def take_2d_multi(arr, indexer, out=None, fill_value=np.nan,
mask_info=None, allow_fill=True):
"""
Specialized Cython take which sets NaN values in one pass
"""
if indexer is None or (indexer[0] is None and indexer[1] is None):
row_idx = np.arange(arr.shape[0], dtype=np.int64)
col_idx = np.arange(arr.shape[1], dtype=np.int64)
indexer = row_idx, col_idx
dtype, fill_value = arr.dtype, arr.dtype.type()
else:
row_idx, col_idx = indexer
if row_idx is None:
row_idx = np.arange(arr.shape[0], dtype=np.int64)
else:
row_idx = _ensure_int64(row_idx)
if col_idx is None:
col_idx = np.arange(arr.shape[1], dtype=np.int64)
else:
col_idx = _ensure_int64(col_idx)
indexer = row_idx, col_idx
if not allow_fill:
dtype, fill_value = arr.dtype, arr.dtype.type()
mask_info = None, False
else:
# check for promotion based on types only (do this first because
# it's faster than computing a mask)
dtype, fill_value = _maybe_promote(arr.dtype, fill_value)
if dtype != arr.dtype and (out is None or out.dtype != dtype):
# check if promotion is actually required based on indexer
if mask_info is not None:
(row_mask, col_mask), (row_needs, col_needs) = mask_info
else:
row_mask = row_idx == -1
col_mask = col_idx == -1
row_needs = row_mask.any()
col_needs = col_mask.any()
mask_info = (row_mask, col_mask), (row_needs, col_needs)
if row_needs or col_needs:
if out is not None and out.dtype != dtype:
raise TypeError('Incompatible type for fill_value')
else:
# if not, then depromote, set fill_value to dummy
# (it won't be used but we don't want the cython code
# to crash when trying to cast it to dtype)
dtype, fill_value = arr.dtype, arr.dtype.type()
# at this point, it's guaranteed that dtype can hold both the arr values
# and the fill_value
if out is None:
out_shape = len(row_idx), len(col_idx)
out = np.empty(out_shape, dtype=dtype)
func = _take_2d_multi_dict.get((arr.dtype.name, out.dtype.name), None)
if func is None and arr.dtype != out.dtype:
func = _take_2d_multi_dict.get((out.dtype.name, out.dtype.name), None)
if func is not None:
func = _convert_wrapper(func, out.dtype)
if func is None:
def func(arr, indexer, out, fill_value=np.nan):
_take_2d_multi_generic(arr, indexer, out,
fill_value=fill_value, mask_info=mask_info)
func(arr, indexer, out=out, fill_value=fill_value)
return out
_diff_special = {
'float64': algos.diff_2d_float64,
'float32': algos.diff_2d_float32,
'int64': algos.diff_2d_int64,
'int32': algos.diff_2d_int32,
'int16': algos.diff_2d_int16,
'int8': algos.diff_2d_int8,
}
def diff(arr, n, axis=0):
""" difference of n between self,
analagoust to s-s.shift(n) """
n = int(n)
dtype = arr.dtype
na = np.nan
if is_timedelta64_dtype(arr) or is_datetime64_dtype(arr):
dtype = 'timedelta64[ns]'
arr = arr.view('i8')
na = tslib.iNaT
elif issubclass(dtype.type, np.integer):
dtype = np.float64
elif issubclass(dtype.type, np.bool_):
dtype = np.object_
out_arr = np.empty(arr.shape, dtype=dtype)
na_indexer = [slice(None)] * arr.ndim
na_indexer[axis] = slice(None, n) if n >= 0 else slice(n, None)
out_arr[tuple(na_indexer)] = na
if arr.ndim == 2 and arr.dtype.name in _diff_special:
f = _diff_special[arr.dtype.name]
f(arr, out_arr, n, axis)
else:
res_indexer = [slice(None)] * arr.ndim
res_indexer[axis] = slice(n, None) if n >= 0 else slice(None, n)
res_indexer = tuple(res_indexer)
lag_indexer = [slice(None)] * arr.ndim
lag_indexer[axis] = slice(None, -n) if n > 0 else slice(-n, None)
lag_indexer = tuple(lag_indexer)
# need to make sure that we account for na for datelike/timedelta
# we don't actually want to subtract these i8 numbers
if dtype == 'timedelta64[ns]':
res = arr[res_indexer]
lag = arr[lag_indexer]
mask = (arr[res_indexer] == na) | (arr[lag_indexer] == na)
if mask.any():
res = res.copy()
res[mask] = 0
lag = lag.copy()
lag[mask] = 0
result = res - lag
result[mask] = na
out_arr[res_indexer] = result
else:
out_arr[res_indexer] = arr[res_indexer] - arr[lag_indexer]
return out_arr
def _coerce_to_dtypes(result, dtypes):
""" given a dtypes and a result set, coerce the result elements to the
dtypes
"""
if len(result) != len(dtypes):
raise AssertionError("_coerce_to_dtypes requires equal len arrays")
from pandas.tseries.timedeltas import _coerce_scalar_to_timedelta_type
def conv(r, dtype):
try:
if isnull(r):
pass
elif dtype == _NS_DTYPE:
r = lib.Timestamp(r)
elif dtype == _TD_DTYPE:
r = _coerce_scalar_to_timedelta_type(r)
elif dtype == np.bool_:
# messy. non 0/1 integers do not get converted.
if is_integer(r) and r not in [0,1]:
return int(r)
r = bool(r)
elif dtype.kind == 'f':
r = float(r)
elif dtype.kind == 'i':
r = int(r)
except:
pass
return r
return [conv(r, dtype) for r, dtype in zip(result, dtypes)]
def _infer_dtype_from_scalar(val):
""" interpret the dtype from a scalar, upcast floats and ints
return the new value and the dtype """
dtype = np.object_
# a 1-element ndarray
if isinstance(val, pa.Array):
if val.ndim != 0:
raise ValueError(
"invalid ndarray passed to _infer_dtype_from_scalar")
dtype = val.dtype
val = val.item()
elif isinstance(val, compat.string_types):
# If we create an empty array using a string to infer
# the dtype, NumPy will only allocate one character per entry
# so this is kind of bad. Alternately we could use np.repeat
# instead of np.empty (but then you still don't want things
# coming out as np.str_!
dtype = np.object_
elif isinstance(val, (np.datetime64, datetime)) and getattr(val,'tz',None) is None:
val = lib.Timestamp(val).value
dtype = np.dtype('M8[ns]')
elif isinstance(val, (np.timedelta64, timedelta)):
val = tslib.convert_to_timedelta(val,'ns')
dtype = np.dtype('m8[ns]')
elif is_bool(val):
dtype = np.bool_
# provide implicity upcast on scalars
elif is_integer(val):
dtype = np.int64
elif is_float(val):
dtype = np.float64
elif is_complex(val):
dtype = np.complex_
return dtype, val
def _maybe_cast_scalar(dtype, value):
""" if we a scalar value and are casting to a dtype that needs nan -> NaT
conversion
"""
if np.isscalar(value) and dtype in _DATELIKE_DTYPES and isnull(value):
return tslib.iNaT
return value
def _maybe_promote(dtype, fill_value=np.nan):
# if we passed an array here, determine the fill value by dtype
if isinstance(fill_value, np.ndarray):
if issubclass(fill_value.dtype.type, (np.datetime64, np.timedelta64)):
fill_value = tslib.iNaT
else:
# we need to change to object type as our
# fill_value is of object type
if fill_value.dtype == np.object_:
dtype = np.dtype(np.object_)
fill_value = np.nan
# returns tuple of (dtype, fill_value)
if issubclass(dtype.type, (np.datetime64, np.timedelta64)):
# for now: refuse to upcast datetime64
# (this is because datetime64 will not implicitly upconvert
# to object correctly as of numpy 1.6.1)
if isnull(fill_value):
fill_value = tslib.iNaT
else:
if issubclass(dtype.type, np.datetime64):
try:
fill_value = lib.Timestamp(fill_value).value
except:
# the proper thing to do here would probably be to upcast
# to object (but numpy 1.6.1 doesn't do this properly)
fill_value = tslib.iNaT
else:
fill_value = tslib.iNaT
elif is_float(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
dtype = np.float64
elif is_bool(fill_value):
if not issubclass(dtype.type, np.bool_):
dtype = np.object_
elif is_integer(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, np.integer):
# upcast to prevent overflow
arr = np.asarray(fill_value)
if arr != arr.astype(dtype):
dtype = arr.dtype
elif is_complex(fill_value):
if issubclass(dtype.type, np.bool_):
dtype = np.object_
elif issubclass(dtype.type, (np.integer, np.floating)):
dtype = np.complex128
else:
dtype = np.object_
# in case we have a string that looked like a number
if issubclass(np.dtype(dtype).type, compat.string_types):
dtype = np.object_
return dtype, fill_value
def _maybe_upcast_putmask(result, mask, other, dtype=None, change=None):
""" a safe version of put mask that (potentially upcasts the result
return the result
if change is not None, then MUTATE the change (and change the dtype)
return a changed flag
"""
if mask.any():
other = _maybe_cast_scalar(result.dtype, other)
def changeit():
# try to directly set by expanding our array to full
# length of the boolean
try:
om = other[mask]
om_at = om.astype(result.dtype)
if (om == om_at).all():
new_other = result.values.copy()
new_other[mask] = om_at
result[:] = new_other
return result, False
except:
pass
# we are forced to change the dtype of the result as the input
# isn't compatible
r, fill_value = _maybe_upcast(
result, fill_value=other, dtype=dtype, copy=True)
np.putmask(r, mask, other)
# we need to actually change the dtype here
if change is not None:
# if we are trying to do something unsafe
# like put a bigger dtype in a smaller one, use the smaller one
# pragma: no cover
if change.dtype.itemsize < r.dtype.itemsize:
raise AssertionError(
"cannot change dtype of input to smaller size")
change.dtype = r.dtype
change[:] = r
return r, True
# we want to decide whether putmask will work
# if we have nans in the False portion of our mask then we need to
# upcast (possibily) otherwise we DON't want to upcast (e.g. if we are
# have values, say integers in the success portion then its ok to not
# upcast)
new_dtype, fill_value = _maybe_promote(result.dtype, other)
if new_dtype != result.dtype:
# we have a scalar or len 0 ndarray
# and its nan and we are changing some values
if (np.isscalar(other) or
(isinstance(other, np.ndarray) and other.ndim < 1)):
if isnull(other):
return changeit()
# we have an ndarray and the masking has nans in it
else:
if isnull(other[mask]).any():
return changeit()
try:
np.putmask(result, mask, other)
except:
return changeit()
return result, False
def _maybe_upcast(values, fill_value=np.nan, dtype=None, copy=False):
""" provide explict type promotion and coercion
Parameters
----------
values : the ndarray that we want to maybe upcast
fill_value : what we want to fill with
dtype : if None, then use the dtype of the values, else coerce to this type
copy : if True always make a copy even if no upcast is required
"""
if dtype is None:
dtype = values.dtype
new_dtype, fill_value = _maybe_promote(dtype, fill_value)
if new_dtype != values.dtype:
values = values.astype(new_dtype)
elif copy:
values = values.copy()
return values, fill_value
def _possibly_cast_item(obj, item, dtype):
chunk = obj[item]
if chunk.values.dtype != dtype:
if dtype in (np.object_, np.bool_):
obj[item] = chunk.astype(np.object_)
elif not issubclass(dtype, (np.integer, np.bool_)): # pragma: no cover
raise ValueError("Unexpected dtype encountered: %s" % dtype)
def _possibly_downcast_to_dtype(result, dtype):
""" try to cast to the specified dtype (e.g. convert back to bool/int
or could be an astype of float64->float32
"""
if np.isscalar(result):
return result
trans = lambda x: x
if isinstance(dtype, compat.string_types):
if dtype == 'infer':
inferred_type = lib.infer_dtype(_ensure_object(result.ravel()))
if inferred_type == 'boolean':
dtype = 'bool'
elif inferred_type == 'integer':
dtype = 'int64'
elif inferred_type == 'datetime64':
dtype = 'datetime64[ns]'
elif inferred_type == 'timedelta64':
dtype = 'timedelta64[ns]'
# try to upcast here
elif inferred_type == 'floating':
dtype = 'int64'
if issubclass(result.dtype.type, np.number):
trans = lambda x: x.round()
else:
dtype = 'object'
if isinstance(dtype, compat.string_types):
dtype = np.dtype(dtype)
try:
# don't allow upcasts here (except if empty)
if dtype.kind == result.dtype.kind:
if result.dtype.itemsize <= dtype.itemsize and np.prod(result.shape):
return result
if issubclass(dtype.type, np.floating):
return result.astype(dtype)
elif dtype == np.bool_ or issubclass(dtype.type, np.integer):
# if we don't have any elements, just astype it
if not np.prod(result.shape):
return trans(result).astype(dtype)
# do a test on the first element, if it fails then we are done
r = result.ravel()
arr = np.array([r[0]])
if not np.allclose(arr, trans(arr).astype(dtype)):
return result
# a comparable, e.g. a Decimal may slip in here
elif not isinstance(r[0], (np.integer, np.floating, np.bool, int,
float, bool)):
return result
if (issubclass(result.dtype.type, (np.object_, np.number)) and
notnull(result).all()):
new_result = trans(result).astype(dtype)
try:
if np.allclose(new_result, result):
return new_result
except:
# comparison of an object dtype with a number type could
# hit here
if (new_result == result).all():
return new_result
# a datetimelike
elif dtype.kind in ['M','m'] and result.dtype.kind in ['i']:
try:
result = result.astype(dtype)
except:
pass
except:
pass
return result
def _lcd_dtypes(a_dtype, b_dtype):
""" return the lcd dtype to hold these types """
if is_datetime64_dtype(a_dtype) or is_datetime64_dtype(b_dtype):
return _NS_DTYPE
elif is_timedelta64_dtype(a_dtype) or is_timedelta64_dtype(b_dtype):
return _TD_DTYPE
elif is_complex_dtype(a_dtype):
if is_complex_dtype(b_dtype):
return a_dtype
return np.float64
elif is_integer_dtype(a_dtype):
if is_integer_dtype(b_dtype):
if a_dtype.itemsize == b_dtype.itemsize:
return a_dtype
return np.int64
return np.float64
elif is_float_dtype(a_dtype):
if is_float_dtype(b_dtype):
if a_dtype.itemsize == b_dtype.itemsize:
return a_dtype
else:
return np.float64
elif is_integer(b_dtype):
return np.float64
return np.object
def _fill_zeros(result, x, y, name, fill):
"""
if this is a reversed op, then flip x,y
if we have an integer value (or array in y)
and we have 0's, fill them with the fill,
return the result
mask the nan's from x
"""
if fill is not None:
if name.startswith('r'):
x,y = y,x
if not isinstance(y, np.ndarray):
dtype, value = _infer_dtype_from_scalar(y)
y = pa.empty(result.shape, dtype=dtype)
y.fill(value)
if is_integer_dtype(y):
if (y.ravel() == 0).any():
shape = result.shape
result = result.ravel().astype('float64')
# GH 7325, mask and nans must be broadcastable
signs = np.sign(result)
mask = ((y == 0) & ~np.isnan(x)).ravel()
np.putmask(result, mask, fill)
# if we have a fill of inf, then sign it
# correctly
# GH 6178
if np.isinf(fill):
np.putmask(result,signs<0 & mask, -fill)
result = result.reshape(shape)
return result
def _interp_wrapper(f, wrap_dtype, na_override=None):
def wrapper(arr, mask, limit=None):
view = arr.view(wrap_dtype)
f(view, mask, limit=limit)
return wrapper
_pad_1d_datetime = _interp_wrapper(algos.pad_inplace_int64, np.int64)
_pad_2d_datetime = _interp_wrapper(algos.pad_2d_inplace_int64, np.int64)
_backfill_1d_datetime = _interp_wrapper(algos.backfill_inplace_int64,
np.int64)
_backfill_2d_datetime = _interp_wrapper(algos.backfill_2d_inplace_int64,
np.int64)
def pad_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'pad_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _pad_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.pad_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_1d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def backfill_1d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'backfill_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _backfill_1d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.backfill_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_1d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
_method(values, mask, limit=limit)
return values
def pad_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'pad_2d_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _pad_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.pad_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.pad_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for pad_2d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def backfill_2d(values, limit=None, mask=None, dtype=None):
if dtype is None:
dtype = values.dtype
_method = None
if is_float_dtype(values):
_method = getattr(algos, 'backfill_2d_inplace_%s' % dtype.name, None)
elif dtype in _DATELIKE_DTYPES or is_datetime64_dtype(values):
_method = _backfill_2d_datetime
elif is_integer_dtype(values):
values = _ensure_float64(values)
_method = algos.backfill_2d_inplace_float64
elif values.dtype == np.object_:
_method = algos.backfill_2d_inplace_object
if _method is None:
raise ValueError('Invalid dtype for backfill_2d [%s]' % dtype.name)
if mask is None:
mask = isnull(values)
mask = mask.view(np.uint8)
if np.all(values.shape):
_method(values, mask, limit=limit)
else:
# for test coverage
pass
return values
def _clean_interp_method(method, order=None, **kwargs):
valid = ['linear', 'time', 'index', 'values', 'nearest', 'zero', 'slinear',
'quadratic', 'cubic', 'barycentric', 'polynomial',
'krogh', 'piecewise_polynomial',
'pchip', 'spline']
if method in ('spline', 'polynomial') and order is None:
raise ValueError("You must specify the order of the spline or "
"polynomial.")
if method not in valid:
raise ValueError("method must be one of {0}."
"Got '{1}' instead.".format(valid, method))
return method
def interpolate_1d(xvalues, yvalues, method='linear', limit=None,
fill_value=None, bounds_error=False, **kwargs):
"""
Logic for the 1-d interpolation. The result should be 1-d, inputs
xvalues and yvalues will each be 1-d arrays of the same length.
Bounds_error is currently hardcoded to False since non-scipy ones don't
take it as an argumnet.
"""
# Treat the original, non-scipy methods first.
invalid = isnull(yvalues)
valid = ~invalid
valid_y = yvalues[valid]
valid_x = xvalues[valid]
new_x = xvalues[invalid]
if method == 'time':
if not getattr(xvalues, 'is_all_dates', None):
# if not issubclass(xvalues.dtype.type, np.datetime64):
raise ValueError('time-weighted interpolation only works '
'on Series or DataFrames with a '
'DatetimeIndex')
method = 'values'
def _interp_limit(invalid, limit):
"""mask off values that won't be filled since they exceed the limit"""
all_nans = np.where(invalid)[0]
violate = [invalid[x:x + limit + 1] for x in all_nans]
violate = np.array([x.all() & (x.size > limit) for x in violate])
return all_nans[violate] + limit
xvalues = getattr(xvalues, 'values', xvalues)
yvalues = getattr(yvalues, 'values', yvalues)
if limit:
violate_limit = _interp_limit(invalid, limit)
if valid.any():
firstIndex = valid.argmax()
valid = valid[firstIndex:]
invalid = invalid[firstIndex:]
result = yvalues.copy()
if valid.all():
return yvalues
else:
# have to call np.array(xvalues) since xvalues could be an Index
# which cant be mutated
result = np.empty_like(np.array(xvalues), dtype=np.float64)
result.fill(np.nan)
return result
if method in ['linear', 'time', 'index', 'values']:
if method in ('values', 'index'):
inds = np.asarray(xvalues)
# hack for DatetimeIndex, #1646
if issubclass(inds.dtype.type, np.datetime64):
inds = inds.view(pa.int64)
if inds.dtype == np.object_:
inds = lib.maybe_convert_objects(inds)
else:
inds = xvalues
inds = inds[firstIndex:]
result[firstIndex:][invalid] = np.interp(inds[invalid], inds[valid],
yvalues[firstIndex:][valid])
if limit:
result[violate_limit] = np.nan
return result
sp_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'barycentric', 'krogh', 'spline', 'polynomial',
'piecewise_polynomial', 'pchip']
if method in sp_methods:
new_x = new_x[firstIndex:]
xvalues = xvalues[firstIndex:]
result[firstIndex:][invalid] = _interpolate_scipy_wrapper(
valid_x, valid_y, new_x, method=method, fill_value=fill_value,
bounds_error=bounds_error, **kwargs)
if limit:
result[violate_limit] = np.nan
return result
def _interpolate_scipy_wrapper(x, y, new_x, method, fill_value=None,
bounds_error=False, order=None, **kwargs):
"""
passed off to scipy.interpolate.interp1d. method is scipy's kind.
Returns an array interpolated at new_x. Add any new methods to
the list in _clean_interp_method
"""
try:
from scipy import interpolate
from pandas import DatetimeIndex
except ImportError:
raise ImportError('{0} interpolation requires Scipy'.format(method))
new_x = np.asarray(new_x)
# ignores some kwargs that could be passed along.
alt_methods = {
'barycentric': interpolate.barycentric_interpolate,
'krogh': interpolate.krogh_interpolate,
'piecewise_polynomial': interpolate.piecewise_polynomial_interpolate,
}
if getattr(x, 'is_all_dates', False):
# GH 5975, scipy.interp1d can't hande datetime64s
x, new_x = x.values.astype('i8'), new_x.astype('i8')
try:
alt_methods['pchip'] = interpolate.pchip_interpolate
except AttributeError:
if method == 'pchip':
raise ImportError("Your version of scipy does not support "
"PCHIP interpolation.")
interp1d_methods = ['nearest', 'zero', 'slinear', 'quadratic', 'cubic',
'polynomial']
if method in interp1d_methods:
if method == 'polynomial':
method = order
terp = interpolate.interp1d(x, y, kind=method, fill_value=fill_value,
bounds_error=bounds_error)
new_y = terp(new_x)
elif method == 'spline':
terp = interpolate.UnivariateSpline(x, y, k=order)
new_y = terp(new_x)
else:
# GH 7295: need to be able to write for some reason
# in some circumstances: check all three
if not x.flags.writeable:
x = x.copy()
if not y.flags.writeable:
y = y.copy()
if not new_x.flags.writeable:
new_x = new_x.copy()
method = alt_methods[method]
new_y = method(x, y, new_x)
return new_y
def interpolate_2d(values, method='pad', axis=0, limit=None, fill_value=None, dtype=None):
""" perform an actual interpolation of values, values will be make 2-d if
needed fills inplace, returns the result
"""
transf = (lambda x: x) if axis == 0 else (lambda x: x.T)
# reshape a 1 dim if needed
ndim = values.ndim
if values.ndim == 1:
if axis != 0: # pragma: no cover
raise AssertionError("cannot interpolate on a ndim == 1 with "
"axis != 0")
values = values.reshape(tuple((1,) + values.shape))
if fill_value is None:
mask = None
else: # todo create faster fill func without masking
mask = mask_missing(transf(values), fill_value)
method = _clean_fill_method(method)
if method == 'pad':
values = transf(pad_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
else:
values = transf(backfill_2d(transf(values), limit=limit, mask=mask, dtype=dtype))
# reshape back
if ndim == 1:
values = values[0]
return values
def _consensus_name_attr(objs):
name = objs[0].name
for obj in objs[1:]:
if obj.name != name:
return None
return name
_fill_methods = {'pad': pad_1d, 'backfill': backfill_1d}
def _get_fill_func(method):
method = _clean_fill_method(method)
return _fill_methods[method]
#----------------------------------------------------------------------
# Lots of little utilities
def _validate_date_like_dtype(dtype):
try:
typ = np.datetime_data(dtype)[0]
except ValueError as e:
raise TypeError('%s' % e)
if typ != 'generic' and typ != 'ns':
raise ValueError('%r is too specific of a frequency, try passing %r'
% (dtype.name, dtype.type.__name__))
def _invalidate_string_dtypes(dtype_set):
"""Change string like dtypes to object for ``DataFrame.select_dtypes()``."""
non_string_dtypes = dtype_set - _string_dtypes
if non_string_dtypes != dtype_set:
raise TypeError("string dtypes are not allowed, use 'object' instead")
def _get_dtype_from_object(dtype):
"""Get a numpy dtype.type-style object.
Notes
-----
If nothing can be found, returns ``object``.
"""
# type object from a dtype
if isinstance(dtype, type) and issubclass(dtype, np.generic):
return dtype
elif isinstance(dtype, np.dtype): # dtype object
try:
_validate_date_like_dtype(dtype)
except TypeError:
# should still pass if we don't have a datelike
pass
return dtype.type
elif isinstance(dtype, compat.string_types):
if dtype == 'datetime' or dtype == 'timedelta':
dtype += '64'
elif dtype == 'category':
return CategoricalDtypeType
try:
return _get_dtype_from_object(getattr(np, dtype))
except AttributeError:
# handles cases like _get_dtype(int)
# i.e., python objects that are valid dtypes (unlike user-defined
# types, in general)
pass
return _get_dtype_from_object(np.dtype(dtype))
def _get_info_slice(obj, indexer):
"""Slice the info axis of `obj` with `indexer`."""
if not hasattr(obj, '_info_axis_number'):
raise TypeError('object of type %r has no info axis' %
type(obj).__name__)
slices = [slice(None)] * obj.ndim
slices[obj._info_axis_number] = indexer
return tuple(slices)
def _maybe_box(indexer, values, obj, key):
# if we have multiples coming back, box em
if isinstance(values, np.ndarray):
return obj[indexer.get_loc(key)]
# return the value
return values
def _maybe_box_datetimelike(value):
# turn a datetime like into a Timestamp/timedelta as needed
if isinstance(value, np.datetime64):
value = tslib.Timestamp(value)
elif isinstance(value, np.timedelta64):
pass
return value
_values_from_object = lib.values_from_object
def _possibly_convert_objects(values, convert_dates=True,
convert_numeric=True,
convert_timedeltas=True):
""" if we have an object dtype, try to coerce dates and/or numbers """
# if we have passed in a list or scalar
if isinstance(values, (list, tuple)):
values = np.array(values, dtype=np.object_)
if not hasattr(values, 'dtype'):
values = np.array([values], dtype=np.object_)
# convert dates
if convert_dates and values.dtype == np.object_:
# we take an aggressive stance and convert to datetime64[ns]
if convert_dates == 'coerce':
new_values = _possibly_cast_to_datetime(
values, 'M8[ns]', coerce=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(
values, convert_datetime=convert_dates)
# convert timedeltas
if convert_timedeltas and values.dtype == np.object_:
if convert_timedeltas == 'coerce':
from pandas.tseries.timedeltas import \
_possibly_cast_to_timedelta
values = _possibly_cast_to_timedelta(values, coerce=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
else:
values = lib.maybe_convert_objects(
values, convert_timedelta=convert_timedeltas)
# convert to numeric
if values.dtype == np.object_:
if convert_numeric:
try:
new_values = lib.maybe_convert_numeric(
values, set(), coerce_numeric=True)
# if we are all nans then leave me alone
if not isnull(new_values).all():
values = new_values
except:
pass
else:
# soft-conversion
values = lib.maybe_convert_objects(values)
return values
def _possibly_castable(arr):
# return False to force a non-fastpath
# check datetime64[ns]/timedelta64[ns] are valid
# otherwise try to coerce
kind = arr.dtype.kind
if kind == 'M' or kind == 'm':
return arr.dtype in _DATELIKE_DTYPES
return arr.dtype.name not in _POSSIBLY_CAST_DTYPES
def _possibly_convert_platform(values):
""" try to do platform conversion, allow ndarray or list here """
if isinstance(values, (list, tuple)):
values = lib.list_to_object_array(values)
if getattr(values, 'dtype', None) == np.object_:
if hasattr(values, 'values'):
values = values.values
values = lib.maybe_convert_objects(values)
return values
def _possibly_cast_to_datetime(value, dtype, coerce=False):
""" try to cast the array/value to a datetimelike dtype, converting float
nan to iNaT
"""
if dtype is not None:
if isinstance(dtype, compat.string_types):
dtype = np.dtype(dtype)
is_datetime64 = is_datetime64_dtype(dtype)
is_timedelta64 = is_timedelta64_dtype(dtype)
if is_datetime64 or is_timedelta64:
# force the dtype if needed
if is_datetime64 and dtype != _NS_DTYPE:
if dtype.name == 'datetime64[ns]':
dtype = _NS_DTYPE
else:
raise TypeError(
"cannot convert datetimelike to dtype [%s]" % dtype)
elif is_timedelta64 and dtype != _TD_DTYPE:
if dtype.name == 'timedelta64[ns]':
dtype = _TD_DTYPE
else:
raise TypeError(
"cannot convert timedeltalike to dtype [%s]" % dtype)
if np.isscalar(value):
if value == tslib.iNaT or isnull(value):
value = tslib.iNaT
else:
value = np.array(value,copy=False)
# have a scalar array-like (e.g. NaT)
if value.ndim == 0:
value = tslib.iNaT
# we have an array of datetime or timedeltas & nulls
elif np.prod(value.shape) and value.dtype != dtype:
try:
if is_datetime64:
from pandas.tseries.tools import to_datetime
value = to_datetime(value, coerce=coerce).values
elif is_timedelta64:
from pandas.tseries.timedeltas import \
_possibly_cast_to_timedelta
value = _possibly_cast_to_timedelta(value, coerce='compat', dtype=dtype)
except:
pass
else:
is_array = isinstance(value, np.ndarray)
# catch a datetime/timedelta that is not of ns variety
# and no coercion specified
if (is_array and value.dtype.kind in ['M','m']):
dtype = value.dtype
if dtype.kind == 'M' and dtype != _NS_DTYPE:
value = value.astype(_NS_DTYPE)
elif dtype.kind == 'm' and dtype != _TD_DTYPE:
from pandas.tseries.timedeltas import \
_possibly_cast_to_timedelta
value = _possibly_cast_to_timedelta(value, coerce='compat')
# only do this if we have an array and the dtype of the array is not
# setup already we are not an integer/object, so don't bother with this
# conversion
elif (is_array and not (
issubclass(value.dtype.type, np.integer) or
value.dtype == np.object_)):
pass
# try to infer if we have a datetimelike here
# otherwise pass thru
else:
value = _possibly_infer_to_datetimelike(value)
return value
def _possibly_infer_to_datetimelike(value):
# we might have a array (or single object) that is datetime like,
# and no dtype is passed don't change the value unless we find a
# datetime/timedelta set
# this is pretty strict in that a datetime/timedelta is REQUIRED
# in addition to possible nulls/string likes
# ONLY strings are NOT datetimelike
v = value
if not is_list_like(v):
v = [v]
v = np.array(v,copy=False)
shape = v.shape
if not v.ndim == 1:
v = v.ravel()
if len(v):
def _try_datetime(v):
# safe coerce to datetime64
try:
return tslib.array_to_datetime(v, raise_=True).reshape(shape)
except:
return v
def _try_timedelta(v):
# safe coerce to timedelta64
# will try first with a string & object conversion
from pandas.tseries.timedeltas import to_timedelta
try:
return to_timedelta(v).values.reshape(shape)
except:
# this is for compat with numpy < 1.7
# but string-likes will fail here
from pandas.tseries.timedeltas import \
_possibly_cast_to_timedelta
try:
return _possibly_cast_to_timedelta(v, coerce='compat').reshape(shape)
except:
return v
# do a quick inference for perf
sample = v[:min(3,len(v))]
inferred_type = lib.infer_dtype(sample)
if inferred_type in ['datetime', 'datetime64']:
value = _try_datetime(v)
elif inferred_type in ['timedelta', 'timedelta64']:
value = _try_timedelta(v)
# its possible to have nulls intermixed within the datetime or timedelta
# these will in general have an inferred_type of 'mixed', so have to try
# both datetime and timedelta
# try timedelta first to avoid spurious datetime conversions
# e.g. '00:00:01' is a timedelta but technically is also a datetime
elif inferred_type in ['mixed']:
if lib.is_possible_datetimelike_array(_ensure_object(v)):
value = _try_timedelta(v)
if lib.infer_dtype(value) in ['mixed']:
value = _try_datetime(v)
return value
def _is_bool_indexer(key):
if isinstance(key, (ABCSeries, np.ndarray)):
if key.dtype == np.object_:
key = np.asarray(_values_from_object(key))
if not lib.is_bool_array(key):
if isnull(key).any():
raise ValueError('cannot index with vector containing '
'NA / NaN values')
return False
return True
elif key.dtype == np.bool_:
return True
elif isinstance(key, list):
try:
arr = np.asarray(key)
return arr.dtype == np.bool_ and len(arr) == len(key)
except TypeError: # pragma: no cover
return False
return False
def _default_index(n):
from pandas.core.index import Int64Index
values = np.arange(n, dtype=np.int64)
result = values.view(Int64Index)
result.name = None
result.is_unique = True
return result
def ensure_float(arr):
if issubclass(arr.dtype.type, (np.integer, np.bool_)):
arr = arr.astype(float)
return arr
def _mut_exclusive(**kwargs):
item1, item2 = kwargs.items()
label1, val1 = item1
label2, val2 = item2
if val1 is not None and val2 is not None:
raise TypeError('mutually exclusive arguments: %r and %r' %
(label1, label2))
elif val1 is not None:
return val1
else:
return val2
def _any_none(*args):
for arg in args:
if arg is None:
return True
return False
def _all_not_none(*args):
for arg in args:
if arg is None:
return False
return True
def _try_sort(iterable):
listed = list(iterable)
try:
return sorted(listed)
except Exception:
return listed
def _count_not_none(*args):
return sum(x is not None for x in args)
#------------------------------------------------------------------------------
# miscellaneous python tools
def rands(n):
"""Generates a random alphanumeric string of length *n*"""
from random import Random
import string
return ''.join(Random().sample(string.ascii_letters + string.digits, n))
def adjoin(space, *lists):
"""
Glues together two sets of strings using the amount of space requested.
The idea is to prettify.
"""
out_lines = []
newLists = []
lengths = [max(map(len, x)) + space for x in lists[:-1]]
# not the last one
lengths.append(max(map(len, lists[-1])))
maxLen = max(map(len, lists))
for i, lst in enumerate(lists):
nl = [x.ljust(lengths[i]) for x in lst]
nl.extend([' ' * lengths[i]] * (maxLen - len(lst)))
newLists.append(nl)
toJoin = zip(*newLists)
for lines in toJoin:
out_lines.append(_join_unicode(lines))
return _join_unicode(out_lines, sep='\n')
def _join_unicode(lines, sep=''):
try:
return sep.join(lines)
except UnicodeDecodeError:
sep = compat.text_type(sep)
return sep.join([x.decode('utf-8') if isinstance(x, str) else x
for x in lines])
def iterpairs(seq):
"""
Parameters
----------
seq: sequence
Returns
-------
iterator returning overlapping pairs of elements
Examples
--------
>>> iterpairs([1, 2, 3, 4])
[(1, 2), (2, 3), (3, 4)
"""
# input may not be sliceable
seq_it = iter(seq)
seq_it_next = iter(seq)
next(seq_it_next)
return zip(seq_it, seq_it_next)
def split_ranges(mask):
""" Generates tuples of ranges which cover all True value in mask
>>> list(split_ranges([1,0,0,1,0]))
[(0, 1), (3, 4)]
"""
ranges = [(0, len(mask))]
for pos, val in enumerate(mask):
if not val: # this pos should be ommited, split off the prefix range
r = ranges.pop()
if pos > r[0]: # yield non-zero range
yield (r[0], pos)
if pos + 1 < len(mask): # save the rest for processing
ranges.append((pos + 1, len(mask)))
if ranges:
yield ranges[-1]
def indent(string, spaces=4):
dent = ' ' * spaces
return '\n'.join([dent + x for x in string.split('\n')])
def banner(message):
"""
Return 80-char width message declaration with = bars on top and bottom.
"""
bar = '=' * 80
return '%s\n%s\n%s' % (bar, message, bar)
def _long_prod(vals):
result = long(1)
for x in vals:
result *= x
return result
class groupby(dict):
"""
A simple groupby different from the one in itertools.
Does not require the sequence elements to be sorted by keys,
however it is slower.
"""
def __init__(self, seq, key=lambda x: x):
for value in seq:
k = key(value)
self.setdefault(k, []).append(value)
try:
__iter__ = dict.iteritems
except AttributeError: # pragma: no cover
# Python 3
def __iter__(self):
return iter(dict.items(self))
def map_indices_py(arr):
"""
Returns a dictionary with (element, index) pairs for each element in the
given array/list
"""
return dict([(x, i) for i, x in enumerate(arr)])
def union(*seqs):
result = set([])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result |= seq
return type(seqs[0])(list(result))
def difference(a, b):
return type(a)(list(set(a) - set(b)))
def intersection(*seqs):
result = set(seqs[0])
for seq in seqs:
if not isinstance(seq, set):
seq = set(seq)
result &= seq
return type(seqs[0])(list(result))
def _asarray_tuplesafe(values, dtype=None):
from pandas.core.index import Index
if not (isinstance(values, (list, tuple))
or hasattr(values, '__array__')):
values = list(values)
elif isinstance(values, Index):
return values.values
if isinstance(values, list) and dtype in [np.object_, object]:
return lib.list_to_object_array(values)
result = np.asarray(values, dtype=dtype)
if issubclass(result.dtype.type, compat.string_types):
result = np.asarray(values, dtype=object)
if result.ndim == 2:
if isinstance(values, list):
return lib.list_to_object_array(values)
else:
# Making a 1D array that safely contains tuples is a bit tricky
# in numpy, leading to the following
try:
result = np.empty(len(values), dtype=object)
result[:] = values
except ValueError:
# we have a list-of-list
result[:] = [tuple(x) for x in values]
return result
def _index_labels_to_array(labels):
if isinstance(labels, (compat.string_types, tuple)):
labels = [labels]
if not isinstance(labels, (list, np.ndarray)):
try:
labels = list(labels)
except TypeError: # non-iterable
labels = [labels]
labels = _asarray_tuplesafe(labels)
return labels
def _maybe_make_list(obj):
if obj is not None and not isinstance(obj, (tuple, list)):
return [obj]
return obj
is_bool = lib.is_bool
is_integer = lib.is_integer
is_float = lib.is_float
is_complex = lib.is_complex
def is_iterator(obj):
# python 3 generators have __next__ instead of next
return hasattr(obj, 'next') or hasattr(obj, '__next__')
def is_number(obj):
return isinstance(obj, (numbers.Number, np.number))
def _get_dtype(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype
if isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype)
return arr_or_dtype.dtype
def _get_dtype_type(arr_or_dtype):
if isinstance(arr_or_dtype, np.dtype):
return arr_or_dtype.type
if isinstance(arr_or_dtype, type):
return np.dtype(arr_or_dtype).type
return arr_or_dtype.dtype.type
def _is_any_int_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.integer)
def is_integer_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.integer) and
not issubclass(tipo, (np.datetime64, np.timedelta64)))
def _is_int_or_datetime_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, np.integer) or
issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_datetime64_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.datetime64)
def is_datetime64_ns_dtype(arr_or_dtype):
tipo = _get_dtype(arr_or_dtype)
return tipo == _NS_DTYPE
def is_timedelta64_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.timedelta64)
def is_timedelta64_ns_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return tipo == _TD_DTYPE
def _is_datetime_or_timedelta_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, (np.datetime64, np.timedelta64))
needs_i8_conversion = _is_datetime_or_timedelta_dtype
def is_numeric_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return (issubclass(tipo, (np.number, np.bool_))
and not issubclass(tipo, (np.datetime64, np.timedelta64)))
def is_float_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.floating)
def _is_floating_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return isinstance(tipo, np.floating)
def is_bool_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.bool_)
def is_categorical_dtype(arr_or_dtype):
if hasattr(arr_or_dtype,'dtype'):
arr_or_dtype = arr_or_dtype.dtype
if isinstance(arr_or_dtype, CategoricalDtype):
return True
try:
return arr_or_dtype == 'category'
except:
return False
def is_complex_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.complexfloating)
def is_object_dtype(arr_or_dtype):
tipo = _get_dtype_type(arr_or_dtype)
return issubclass(tipo, np.object_)
def is_re(obj):
return isinstance(obj, re._pattern_type)
def is_re_compilable(obj):
try:
re.compile(obj)
except TypeError:
return False
else:
return True
def is_list_like(arg):
return (hasattr(arg, '__iter__') and
not isinstance(arg, compat.string_and_binary_types))
def _is_sequence(x):
try:
iter(x)
len(x) # it has a length
return not isinstance(x, compat.string_and_binary_types)
except (TypeError, AttributeError):
return False
_string_dtypes = frozenset(map(_get_dtype_from_object, (compat.binary_type,
compat.text_type)))
_ensure_float64 = algos.ensure_float64
_ensure_float32 = algos.ensure_float32
_ensure_int64 = algos.ensure_int64
_ensure_int32 = algos.ensure_int32
_ensure_int16 = algos.ensure_int16
_ensure_int8 = algos.ensure_int8
_ensure_platform_int = algos.ensure_platform_int
_ensure_object = algos.ensure_object
def _astype_nansafe(arr, dtype, copy=True):
""" return a view if copy is False, but
need to be very careful as the result shape could change! """
if not isinstance(dtype, np.dtype):
dtype = np.dtype(dtype)
if is_datetime64_dtype(arr):
if dtype == object:
return tslib.ints_to_pydatetime(arr.view(np.int64))
elif dtype == np.int64:
return arr.view(dtype)
elif dtype != _NS_DTYPE:
raise TypeError("cannot astype a datetimelike from [%s] to [%s]" %
(arr.dtype, dtype))
return arr.astype(_NS_DTYPE)
elif is_timedelta64_dtype(arr):
if dtype == np.int64:
return arr.view(dtype)
elif dtype == object:
return arr.astype(object)
# in py3, timedelta64[ns] are int64
elif ((compat.PY3 and dtype not in [_INT64_DTYPE, _TD_DTYPE]) or
(not compat.PY3 and dtype != _TD_DTYPE)):
# allow frequency conversions
if dtype.kind == 'm':
mask = isnull(arr)
result = arr.astype(dtype).astype(np.float64)
result[mask] = np.nan
return result
raise TypeError("cannot astype a timedelta from [%s] to [%s]" %
(arr.dtype, dtype))
return arr.astype(_TD_DTYPE)
elif (np.issubdtype(arr.dtype, np.floating) and
np.issubdtype(dtype, np.integer)):
if np.isnan(arr).any():
raise ValueError('Cannot convert NA to integer')
elif arr.dtype == np.object_ and np.issubdtype(dtype.type, np.integer):
# work around NumPy brokenness, #1987
return lib.astype_intsafe(arr.ravel(), dtype).reshape(arr.shape)
elif issubclass(dtype.type, compat.text_type):
# in Py3 that's str, in Py2 that's unicode
return lib.astype_unicode(arr.ravel()).reshape(arr.shape)
elif issubclass(dtype.type, compat.string_types):
return lib.astype_str(arr.ravel()).reshape(arr.shape)
if copy:
return arr.astype(dtype)
return arr.view(dtype)
def _clean_fill_method(method):
if method is None:
return None
method = method.lower()
if method == 'ffill':
method = 'pad'
if method == 'bfill':
method = 'backfill'
if method not in ['pad', 'backfill']:
msg = ('Invalid fill method. Expecting pad (ffill) or backfill '
'(bfill). Got %s' % method)
raise ValueError(msg)
return method
def _all_none(*args):
for arg in args:
if arg is not None:
return False
return True
class UTF8Recoder:
"""
Iterator that reads an encoded stream and reencodes the input to UTF-8
"""
def __init__(self, f, encoding):
self.reader = codecs.getreader(encoding)(f)
def __iter__(self):
return self
def read(self, bytes=-1):
return self.reader.read(bytes).encode('utf-8')
def readline(self):
return self.reader.readline().encode('utf-8')
def next(self):
return next(self.reader).encode("utf-8")
# Python 3 iterator
__next__ = next
def _get_handle(path, mode, encoding=None, compression=None):
"""Gets file handle for given path and mode.
NOTE: Under Python 3.2, getting a compressed file handle means reading in
the entire file, decompressing it and decoding it to ``str`` all at once
and then wrapping it in a StringIO.
"""
if compression is not None:
if encoding is not None and not compat.PY3:
msg = 'encoding + compression not yet supported in Python 2'
raise ValueError(msg)
if compression == 'gzip':
import gzip
f = gzip.GzipFile(path, 'rb')
elif compression == 'bz2':
import bz2
f = bz2.BZ2File(path, 'rb')
else:
raise ValueError('Unrecognized compression type: %s' %
compression)
if compat.PY3_2:
# gzip and bz2 don't work with TextIOWrapper in 3.2
encoding = encoding or get_option('display.encoding')
f = StringIO(f.read().decode(encoding))
elif compat.PY3:
from io import TextIOWrapper
f = TextIOWrapper(f, encoding=encoding)
return f
else:
if compat.PY3:
if encoding:
f = open(path, mode, encoding=encoding)
else:
f = open(path, mode, errors='replace')
else:
f = open(path, mode)
return f
if compat.PY3: # pragma: no cover
def UnicodeReader(f, dialect=csv.excel, encoding="utf-8", **kwds):
# ignore encoding
return csv.reader(f, dialect=dialect, **kwds)
def UnicodeWriter(f, dialect=csv.excel, encoding="utf-8", **kwds):
return csv.writer(f, dialect=dialect, **kwds)
else:
class UnicodeReader:
"""
A CSV reader which will iterate over lines in the CSV file "f",
which is encoded in the given encoding.
On Python 3, this is replaced (below) by csv.reader, which handles
unicode.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
f = UTF8Recoder(f, encoding)
self.reader = csv.reader(f, dialect=dialect, **kwds)
def next(self):
row = next(self.reader)
return [compat.text_type(s, "utf-8") for s in row]
# python 3 iterator
__next__ = next
def __iter__(self): # pragma: no cover
return self
class UnicodeWriter:
"""
A CSV writer which will write rows to CSV file "f",
which is encoded in the given encoding.
"""
def __init__(self, f, dialect=csv.excel, encoding="utf-8", **kwds):
# Redirect output to a queue
self.queue = StringIO()
self.writer = csv.writer(self.queue, dialect=dialect, **kwds)
self.stream = f
self.encoder = codecs.getincrementalencoder(encoding)()
self.quoting = kwds.get("quoting", None)
def writerow(self, row):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
row = [x if _check_as_is(x)
else pprint_thing(x).encode('utf-8') for x in row]
self.writer.writerow([s for s in row])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def writerows(self, rows):
def _check_as_is(x):
return (self.quoting == csv.QUOTE_NONNUMERIC and
is_number(x)) or isinstance(x, str)
for i, row in enumerate(rows):
rows[i] = [x if _check_as_is(x)
else pprint_thing(x).encode('utf-8') for x in row]
self.writer.writerows([[s for s in row] for row in rows])
# Fetch UTF-8 output from the queue ...
data = self.queue.getvalue()
data = data.decode("utf-8")
# ... and reencode it into the target encoding
data = self.encoder.encode(data)
# write to the target stream
self.stream.write(data)
# empty queue
self.queue.truncate(0)
def _concat_compat(to_concat, axis=0):
# filter empty arrays
nonempty = [x for x in to_concat if x.shape[axis] > 0]
# If all arrays are empty, there's nothing to convert, just short-cut to
# the concatenation, #3121.
#
# Creating an empty array directly is tempting, but the winnings would be
# marginal given that it would still require shape & dtype calculation and
# np.concatenate which has them both implemented is compiled.
if nonempty:
is_datetime64 = [x.dtype == _NS_DTYPE for x in nonempty]
if all(is_datetime64):
# work around NumPy 1.6 bug
new_values = np.concatenate([x.view(np.int64) for x in nonempty],
axis=axis)
return new_values.view(_NS_DTYPE)
elif any(is_datetime64):
to_concat = [_to_pydatetime(x) for x in nonempty]
return np.concatenate(to_concat, axis=axis)
def _to_pydatetime(x):
if x.dtype == _NS_DTYPE:
shape = x.shape
x = tslib.ints_to_pydatetime(x.view(np.int64).ravel())
x = x.reshape(shape)
return x
def _where_compat(mask, arr1, arr2):
if arr1.dtype == _NS_DTYPE and arr2.dtype == _NS_DTYPE:
new_vals = np.where(mask, arr1.view('i8'), arr2.view('i8'))
return new_vals.view(_NS_DTYPE)
import pandas.tslib as tslib
if arr1.dtype == _NS_DTYPE:
arr1 = tslib.ints_to_pydatetime(arr1.view('i8'))
if arr2.dtype == _NS_DTYPE:
arr2 = tslib.ints_to_pydatetime(arr2.view('i8'))
return np.where(mask, arr1, arr2)
def sentinel_factory():
class Sentinel(object):
pass
return Sentinel()
def in_interactive_session():
""" check if we're running in an interactive shell
returns True if running under python/ipython interactive shell
"""
def check_main():
import __main__ as main
return (not hasattr(main, '__file__') or
get_option('mode.sim_interactive'))
try:
return __IPYTHON__ or check_main()
except:
return check_main()
def in_qtconsole():
"""
check if we're inside an IPython qtconsole
DEPRECATED: This is no longer needed, or working, in IPython 3 and above.
"""
try:
ip = get_ipython()
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', "")
)
if 'qtconsole' in front_end.lower():
return True
except:
return False
return False
def in_ipnb():
"""
check if we're inside an IPython Notebook
DEPRECATED: This is no longer used in pandas, and won't work in IPython 3
and above.
"""
try:
ip = get_ipython()
front_end = (
ip.config.get('KernelApp', {}).get('parent_appname', "") or
ip.config.get('IPKernelApp', {}).get('parent_appname', "")
)
if 'notebook' in front_end.lower():
return True
except:
return False
return False
def in_ipython_frontend():
"""
check if we're inside an an IPython zmq frontend
"""
try:
ip = get_ipython()
return 'zmq' in str(type(ip)).lower()
except:
pass
return False
# Unicode consolidation
# ---------------------
#
# pprinting utility functions for generating Unicode text or
# bytes(3.x)/str(2.x) representations of objects.
# Try to use these as much as possible rather then rolling your own.
#
# When to use
# -----------
#
# 1) If you're writing code internal to pandas (no I/O directly involved),
# use pprint_thing().
#
# It will always return unicode text which can handled by other
# parts of the package without breakage.
#
# 2) If you need to send something to the console, use console_encode().
#
# console_encode() should (hopefully) choose the right encoding for you
# based on the encoding set in option "display.encoding"
#
# 3) if you need to write something out to file, use
# pprint_thing_encoded(encoding).
#
# If no encoding is specified, it defaults to utf-8. Since encoding pure
# ascii with utf-8 is a no-op you can safely use the default utf-8 if you're
# working with straight ascii.
def _pprint_seq(seq, _nest_lvl=0, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
bounds length of printed sequence, depending on options
"""
if isinstance(seq, set):
fmt = u("set([%s])")
else:
fmt = u("[%s]") if hasattr(seq, '__setitem__') else u("(%s)")
nitems = get_option("max_seq_items") or len(seq)
s = iter(seq)
r = []
for i in range(min(nitems, len(seq))): # handle sets, no slicing
r.append(pprint_thing(next(s), _nest_lvl + 1, **kwds))
body = ", ".join(r)
if nitems < len(seq):
body += ", ..."
elif isinstance(seq, tuple) and len(seq) == 1:
body += ','
return fmt % body
def _pprint_dict(seq, _nest_lvl=0, **kwds):
"""
internal. pprinter for iterables. you should probably use pprint_thing()
rather then calling this directly.
"""
fmt = u("{%s}")
pairs = []
pfmt = u("%s: %s")
nitems = get_option("max_seq_items") or len(seq)
for k, v in list(seq.items())[:nitems]:
pairs.append(pfmt % (pprint_thing(k, _nest_lvl + 1, **kwds),
pprint_thing(v, _nest_lvl + 1, **kwds)))
if nitems < len(seq):
return fmt % (", ".join(pairs) + ", ...")
else:
return fmt % ", ".join(pairs)
def pprint_thing(thing, _nest_lvl=0, escape_chars=None, default_escapes=False,
quote_strings=False):
"""
This function is the sanctioned way of converting objects
to a unicode representation.
properly handles nested sequences containing unicode strings
(unicode(object) does not)
Parameters
----------
thing : anything to be formatted
_nest_lvl : internal use only. pprint_thing() is mutually-recursive
with pprint_sequence, this argument is used to keep track of the
current nesting level, and limit it.
escape_chars : list or dict, optional
Characters to escape. If a dict is passed the values are the
replacements
default_escapes : bool, default False
Whether the input escape characters replaces or adds to the defaults
Returns
-------
result - unicode object on py2, str on py3. Always Unicode.
"""
def as_escaped_unicode(thing, escape_chars=escape_chars):
# Unicode is fine, else we try to decode using utf-8 and 'replace'
# if that's not it either, we have no way of knowing and the user
# should deal with it himself.
try:
result = compat.text_type(thing) # we should try this first
except UnicodeDecodeError:
# either utf-8 or we replace errors
result = str(thing).decode('utf-8', "replace")
translate = {'\t': r'\t',
'\n': r'\n',
'\r': r'\r',
}
if isinstance(escape_chars, dict):
if default_escapes:
translate.update(escape_chars)
else:
translate = escape_chars
escape_chars = list(escape_chars.keys())
else:
escape_chars = escape_chars or tuple()
for c in escape_chars:
result = result.replace(c, translate[c])
return compat.text_type(result)
if (compat.PY3 and hasattr(thing, '__next__')) or hasattr(thing, 'next'):
return compat.text_type(thing)
elif (isinstance(thing, dict) and
_nest_lvl < get_option("display.pprint_nest_depth")):
result = _pprint_dict(thing, _nest_lvl, quote_strings=True)
elif _is_sequence(thing) and _nest_lvl < \
get_option("display.pprint_nest_depth"):
result = _pprint_seq(thing, _nest_lvl, escape_chars=escape_chars,
quote_strings=quote_strings)
elif isinstance(thing, compat.string_types) and quote_strings:
if compat.PY3:
fmt = "'%s'"
else:
fmt = "u'%s'"
result = fmt % as_escaped_unicode(thing)
else:
result = as_escaped_unicode(thing)
return compat.text_type(result) # always unicode
def pprint_thing_encoded(object, encoding='utf-8', errors='replace', **kwds):
value = pprint_thing(object) # get unicode representation of object
return value.encode(encoding, errors, **kwds)
def console_encode(object, **kwds):
"""
this is the sanctioned way to prepare something for
sending *to the console*, it delegates to pprint_thing() to get
a unicode representation of the object relies on the global encoding
set in display.encoding. Use this everywhere
where you output to the console.
"""
return pprint_thing_encoded(object,
get_option("display.encoding"))
def load(path): # TODO remove in 0.13
"""
Load pickled pandas object (or any other pickled object) from the specified
file path
Warning: Loading pickled data received from untrusted sources can be
unsafe. See: http://docs.python.org/2.7/library/pickle.html
Parameters
----------
path : string
File path
Returns
-------
unpickled : type of object stored in file
"""
import warnings
warnings.warn("load is deprecated, use read_pickle", FutureWarning)
from pandas.io.pickle import read_pickle
return read_pickle(path)
def save(obj, path): # TODO remove in 0.13
"""
Pickle (serialize) object to input file path
Parameters
----------
obj : any object
path : string
File path
"""
import warnings
warnings.warn("save is deprecated, use obj.to_pickle", FutureWarning)
from pandas.io.pickle import to_pickle
return to_pickle(obj, path)
def _maybe_match_name(a, b):
a_name = getattr(a, 'name', None)
b_name = getattr(b, 'name', None)
if a_name == b_name:
return a_name
return None
|
the-stack_0_15348 | import pytest
from subprocess import call
import os
import yaml
"""
test metafunc
this test will test metafunc.
this test will also show how to run tests where
failure is expected (i.e., checking that we handle
invalid parameters).
"""
class TestCLI:
"""
simple metafunc test class
This uses the subprocess PIPE var
to capture system input and output,
since we are running metafunc from the
command line directly using subprocess.
"""
@classmethod
def setup_class(self):
""" setup any state specific to the execution of the given class (which
usually contains tests).
"""
def testSetup(self):
"""
test workflow
"""
command = ["metafunc", "setup", "-n", "test"]
pwd = os.path.abspath(os.path.dirname(__file__))
rc = call(command, cwd=pwd)
assert rc == 0
@pytest.mark.parametrize(
"test_input_config,expected",
[("test/config.yaml", 0), ("config_wrong.yaml", 1)],
)
def test_run(self, test_input_config, expected):
"""
test workflow
"""
command_prefix = ["metafunc", "run"]
pwd = os.path.abspath(os.path.dirname(__file__))
command = command_prefix + [test_input_config]
rc = call(command, cwd=pwd)
assert rc == expected
# clean up run dat
# config files here specify a resultdir where the snakemake run results
# will be written to. Here we find it for each indifivual run and delete
# the directory after successful runs.
config_data = yaml.safe_load(open(os.path.join(pwd, test_input_config)))
print(config_data)
resultdir = config_data["resultdir"]
rc = call(["rm", "-rf", resultdir], cwd=pwd)
assert rc == 0
@classmethod
def teardown_class(self):
""" teardown any state that was previously setup with a call to
setup_class.
"""
pwd = os.path.abspath(os.path.dirname(__file__))
rc = call(["rm", "-rf", "test"], cwd=pwd)
assert rc == 0
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.