prompt
large_stringlengths
70
991k
completion
large_stringlengths
0
1.02k
<|file_name|>test_TurtleSVGMachine.py<|end_file_name|><|fim▁begin|>from turtlelsystem.TurtleSVGMachine import TurtleSVGMachine from nose.tools import assert_almost_equal <|fim▁hole|> assert_almost_equal(turtle.x, 20.0) def test_backward(): turtle = TurtleSVGMachine(width = 20, height = 20) turtle.do_command("BACKWARD 10") assert_almost_equal(turtle.x, 0.0) def test_left(): turtle = TurtleSVGMachine() turtle.do_command("LEFT 30") assert_almost_equal(turtle.theta, 30.0) def test_right(): turtle = TurtleSVGMachine() turtle.do_command("RIGHT 30") assert_almost_equal(turtle.theta, 330.0)<|fim▁end|>
def test_forward(): turtle = TurtleSVGMachine(width = 20, height = 20) turtle.do_command("FORWARD 10")
<|file_name|>httpserver_test.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python from __future__ import absolute_import, division, print_function, with_statement from tornado import netutil from tornado.escape import json_decode, json_encode, utf8, _unicode, recursive_unicode, native_str from tornado import gen from tornado.http1connection import HTTP1Connection from tornado.httpserver import HTTPServer from tornado.httputil import HTTPHeaders, HTTPMessageDelegate, HTTPServerConnectionDelegate, ResponseStartLine from tornado.iostream import IOStream from tornado.log import gen_log from tornado.netutil import ssl_options_to_context from tornado.simple_httpclient import SimpleAsyncHTTPClient from tornado.testing import AsyncHTTPTestCase, AsyncHTTPSTestCase, AsyncTestCase, ExpectLog, gen_test from tornado.test.util import unittest, skipOnTravis from tornado.web import Application, RequestHandler, asynchronous, stream_request_body from contextlib import closing import datetime import gzip import os import shutil import socket import ssl import sys import tempfile from io import BytesIO def read_stream_body(stream, callback): """Reads an HTTP response from `stream` and runs callback with its headers and body.""" chunks = [] class Delegate(HTTPMessageDelegate): def headers_received(self, start_line, headers): self.headers = headers def data_received(self, chunk): chunks.append(chunk) def finish(self): callback((self.headers, b''.join(chunks))) conn = HTTP1Connection(stream, True) conn.read_response(Delegate()) class HandlerBaseTestCase(AsyncHTTPTestCase): def get_app(self): return Application([('/', self.__class__.Handler)]) def fetch_json(self, *args, **kwargs): response = self.fetch(*args, **kwargs) response.rethrow() return json_decode(response.body) class HelloWorldRequestHandler(RequestHandler): def initialize(self, protocol="http"): self.expected_protocol = protocol def get(self): if self.request.protocol != self.expected_protocol: raise Exception("unexpected protocol") self.finish("Hello world") def post(self): self.finish("Got %d bytes in POST" % len(self.request.body)) # In pre-1.0 versions of openssl, SSLv23 clients always send SSLv2 # ClientHello messages, which are rejected by SSLv3 and TLSv1 # servers. Note that while the OPENSSL_VERSION_INFO was formally # introduced in python3.2, it was present but undocumented in # python 2.7 skipIfOldSSL = unittest.skipIf( getattr(ssl, 'OPENSSL_VERSION_INFO', (0, 0)) < (1, 0), "old version of ssl module and/or openssl") class BaseSSLTest(AsyncHTTPSTestCase): def get_app(self): return Application([('/', HelloWorldRequestHandler, dict(protocol="https"))]) class SSLTestMixin(object): def get_ssl_options(self): return dict(ssl_version=self.get_ssl_version(), # type: ignore **AsyncHTTPSTestCase.get_ssl_options()) def get_ssl_version(self): raise NotImplementedError() def test_ssl(self): response = self.fetch('/') self.assertEqual(response.body, b"Hello world") def test_large_post(self): response = self.fetch('/', method='POST', body='A' * 5000) self.assertEqual(response.body, b"Got 5000 bytes in POST") def test_non_ssl_request(self): # Make sure the server closes the connection when it gets a non-ssl # connection, rather than waiting for a timeout or otherwise # misbehaving. with ExpectLog(gen_log, '(SSL Error|uncaught exception)'): with ExpectLog(gen_log, 'Uncaught exception', required=False): self.http_client.fetch( self.get_url("/").replace('https:', 'http:'), self.stop, request_timeout=3600, connect_timeout=3600) response = self.wait() self.assertEqual(response.code, 599) def test_error_logging(self): # No stack traces are logged for SSL errors. with ExpectLog(gen_log, 'SSL Error') as expect_log: self.http_client.fetch( self.get_url("/").replace("https:", "http:"), self.stop) response = self.wait() self.assertEqual(response.code, 599) self.assertFalse(expect_log.logged_stack) # Python's SSL implementation differs significantly between versions. # For example, SSLv3 and TLSv1 throw an exception if you try to read # from the socket before the handshake is complete, but the default # of SSLv23 allows it. class SSLv23Test(BaseSSLTest, SSLTestMixin): def get_ssl_version(self): return ssl.PROTOCOL_SSLv23 @skipIfOldSSL class SSLv3Test(BaseSSLTest, SSLTestMixin): def get_ssl_version(self): return ssl.PROTOCOL_SSLv3 @skipIfOldSSL class TLSv1Test(BaseSSLTest, SSLTestMixin): def get_ssl_version(self): return ssl.PROTOCOL_TLSv1 @unittest.skipIf(not hasattr(ssl, 'SSLContext'), 'ssl.SSLContext not present') class SSLContextTest(BaseSSLTest, SSLTestMixin): def get_ssl_options(self): context = ssl_options_to_context( AsyncHTTPSTestCase.get_ssl_options(self)) assert isinstance(context, ssl.SSLContext) return context class BadSSLOptionsTest(unittest.TestCase): def test_missing_arguments(self): application = Application() self.assertRaises(KeyError, HTTPServer, application, ssl_options={ "keyfile": "/__missing__.crt", }) def test_missing_key(self): """A missing SSL key should cause an immediate exception.""" application = Application() module_dir = os.path.dirname(__file__) existing_certificate = os.path.join(module_dir, 'test.crt') existing_key = os.path.join(module_dir, 'test.key') self.assertRaises((ValueError, IOError), HTTPServer, application, ssl_options={ "certfile": "/__mising__.crt", }) self.assertRaises((ValueError, IOError), HTTPServer, application, ssl_options={ "certfile": existing_certificate, "keyfile": "/__missing__.key" }) # This actually works because both files exist HTTPServer(application, ssl_options={ "certfile": existing_certificate, "keyfile": existing_key, }) class MultipartTestHandler(RequestHandler): def post(self): self.finish({"header": self.request.headers["X-Header-Encoding-Test"], "argument": self.get_argument("argument"), "filename": self.request.files["files"][0].filename, "filebody": _unicode(self.request.files["files"][0]["body"]), }) # This test is also called from wsgi_test class HTTPConnectionTest(AsyncHTTPTestCase): def get_handlers(self): return [("/multipart", MultipartTestHandler), ("/hello", HelloWorldRequestHandler)] def get_app(self): return Application(self.get_handlers()) def raw_fetch(self, headers, body, newline=b"\r\n"): with closing(IOStream(socket.socket())) as stream: stream.connect(('127.0.0.1', self.get_http_port()), self.stop) self.wait() stream.write( newline.join(headers + [utf8("Content-Length: %d" % len(body))]) + newline + newline + body) read_stream_body(stream, self.stop) headers, body = self.wait() return body def test_multipart_form(self): # Encodings here are tricky: Headers are latin1, bodies can be # anything (we use utf8 by default). response = self.raw_fetch([ b"POST /multipart HTTP/1.0", b"Content-Type: multipart/form-data; boundary=1234567890", b"X-Header-encoding-test: \xe9", ], b"\r\n".join([ b"Content-Disposition: form-data; name=argument", b"", u"\u00e1".encode("utf-8"), b"--1234567890", u'Content-Disposition: form-data; name="files"; filename="\u00f3"'.encode("utf8"), b"", u"\u00fa".encode("utf-8"), b"--1234567890--", b"", ])) data = json_decode(response) self.assertEqual(u"\u00e9", data["header"]) self.assertEqual(u"\u00e1", data["argument"]) self.assertEqual(u"\u00f3", data["filename"]) self.assertEqual(u"\u00fa", data["filebody"]) def test_newlines(self): # We support both CRLF and bare LF as line separators. for newline in (b"\r\n", b"\n"): response = self.raw_fetch([b"GET /hello HTTP/1.0"], b"", newline=newline) self.assertEqual(response, b'Hello world') def test_100_continue(self): # Run through a 100-continue interaction by hand: # When given Expect: 100-continue, we get a 100 response after the # headers, and then the real response after the body. stream = IOStream(socket.socket(), io_loop=self.io_loop) stream.connect(("127.0.0.1", self.get_http_port()), callback=self.stop) self.wait() stream.write(b"\r\n".join([b"POST /hello HTTP/1.1", b"Content-Length: 1024", b"Expect: 100-continue", b"Connection: close", b"\r\n"]), callback=self.stop) self.wait() stream.read_until(b"\r\n\r\n", self.stop) data = self.wait() self.assertTrue(data.startswith(b"HTTP/1.1 100 "), data) stream.write(b"a" * 1024) stream.read_until(b"\r\n", self.stop) first_line = self.wait() self.assertTrue(first_line.startswith(b"HTTP/1.1 200"), first_line) stream.read_until(b"\r\n\r\n", self.stop) header_data = self.wait() headers = HTTPHeaders.parse(native_str(header_data.decode('latin1'))) stream.read_bytes(int(headers["Content-Length"]), self.stop) body = self.wait() self.assertEqual(body, b"Got 1024 bytes in POST") stream.close() class EchoHandler(RequestHandler): def get(self): self.write(recursive_unicode(self.request.arguments)) def post(self): self.write(recursive_unicode(self.request.arguments)) class TypeCheckHandler(RequestHandler): def prepare(self): self.errors = {} fields = [ ('method', str), ('uri', str), ('version', str), ('remote_ip', str), ('protocol', str), ('host', str), ('path', str), ('query', str), ] for field, expected_type in fields: self.check_type(field, getattr(self.request, field), expected_type) self.check_type('header_key', list(self.request.headers.keys())[0], str) self.check_type('header_value', list(self.request.headers.values())[0], str) self.check_type('cookie_key', list(self.request.cookies.keys())[0], str) self.check_type('cookie_value', list(self.request.cookies.values())[0].value, str) # secure cookies self.check_type('arg_key', list(self.request.arguments.keys())[0], str) self.check_type('arg_value', list(self.request.arguments.values())[0][0], bytes) def post(self): self.check_type('body', self.request.body, bytes) self.write(self.errors) def get(self): self.write(self.errors) def check_type(self, name, obj, expected_type): actual_type = type(obj) if expected_type != actual_type: self.errors[name] = "expected %s, got %s" % (expected_type, actual_type) class HTTPServerTest(AsyncHTTPTestCase): def get_app(self): return Application([("/echo", EchoHandler), ("/typecheck", TypeCheckHandler), ("//doubleslash", EchoHandler), ]) def test_query_string_encoding(self): response = self.fetch("/echo?foo=%C3%A9") data = json_decode(response.body) self.assertEqual(data, {u"foo": [u"\u00e9"]}) def test_empty_query_string(self): response = self.fetch("/echo?foo=&foo=") data = json_decode(response.body) self.assertEqual(data, {u"foo": [u"", u""]}) def test_empty_post_parameters(self): response = self.fetch("/echo", method="POST", body="foo=&bar=") data = json_decode(response.body) self.assertEqual(data, {u"foo": [u""], u"bar": [u""]}) def test_types(self): headers = {"Cookie": "foo=bar"} response = self.fetch("/typecheck?foo=bar", headers=headers) data = json_decode(response.body) self.assertEqual(data, {}) response = self.fetch("/typecheck", method="POST", body="foo=bar", headers=headers) data = json_decode(response.body) self.assertEqual(data, {}) def test_double_slash(self): # urlparse.urlsplit (which tornado.httpserver used to use # incorrectly) would parse paths beginning with "//" as # protocol-relative urls. response = self.fetch("//doubleslash") self.assertEqual(200, response.code) self.assertEqual(json_decode(response.body), {}) def test_malformed_body(self): # parse_qs is pretty forgiving, but it will fail on python 3 # if the data is not utf8. On python 2 parse_qs will work, # but then the recursive_unicode call in EchoHandler will # fail. if str is bytes: return with ExpectLog(gen_log, 'Invalid x-www-form-urlencoded body'): response = self.fetch( '/echo', method="POST", headers={'Content-Type': 'application/x-www-form-urlencoded'}, body=b'\xe9') self.assertEqual(200, response.code) self.assertEqual(b'{}', response.body) class HTTPServerRawTest(AsyncHTTPTestCase): def get_app(self): return Application([ ('/echo', EchoHandler), ]) def setUp(self): super(HTTPServerRawTest, self).setUp() self.stream = IOStream(socket.socket())<|fim▁hole|> self.wait() def tearDown(self): self.stream.close() super(HTTPServerRawTest, self).tearDown() def test_empty_request(self): self.stream.close() self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop) self.wait() def test_malformed_first_line(self): with ExpectLog(gen_log, '.*Malformed HTTP request line'): self.stream.write(b'asdf\r\n\r\n') # TODO: need an async version of ExpectLog so we don't need # hard-coded timeouts here. self.io_loop.add_timeout(datetime.timedelta(seconds=0.05), self.stop) self.wait() def test_malformed_headers(self): with ExpectLog(gen_log, '.*Malformed HTTP headers'): self.stream.write(b'GET / HTTP/1.0\r\nasdf\r\n\r\n') self.io_loop.add_timeout(datetime.timedelta(seconds=0.05), self.stop) self.wait() def test_chunked_request_body(self): # Chunked requests are not widely supported and we don't have a way # to generate them in AsyncHTTPClient, but HTTPServer will read them. self.stream.write(b"""\ POST /echo HTTP/1.1 Transfer-Encoding: chunked Content-Type: application/x-www-form-urlencoded 4 foo= 3 bar 0 """.replace(b"\n", b"\r\n")) read_stream_body(self.stream, self.stop) headers, response = self.wait() self.assertEqual(json_decode(response), {u'foo': [u'bar']}) def test_chunked_request_uppercase(self): # As per RFC 2616 section 3.6, "Transfer-Encoding" header's value is # case-insensitive. self.stream.write(b"""\ POST /echo HTTP/1.1 Transfer-Encoding: Chunked Content-Type: application/x-www-form-urlencoded 4 foo= 3 bar 0 """.replace(b"\n", b"\r\n")) read_stream_body(self.stream, self.stop) headers, response = self.wait() self.assertEqual(json_decode(response), {u'foo': [u'bar']}) def test_invalid_content_length(self): with ExpectLog(gen_log, '.*Only integer Content-Length is allowed'): self.stream.write(b"""\ POST /echo HTTP/1.1 Content-Length: foo bar """.replace(b"\n", b"\r\n")) self.stream.read_until_close(self.stop) self.wait() class XHeaderTest(HandlerBaseTestCase): class Handler(RequestHandler): def get(self): self.write(dict(remote_ip=self.request.remote_ip, remote_protocol=self.request.protocol)) def get_httpserver_options(self): return dict(xheaders=True) def test_ip_headers(self): self.assertEqual(self.fetch_json("/")["remote_ip"], "127.0.0.1") valid_ipv4 = {"X-Real-IP": "4.4.4.4"} self.assertEqual( self.fetch_json("/", headers=valid_ipv4)["remote_ip"], "4.4.4.4") valid_ipv4_list = {"X-Forwarded-For": "127.0.0.1, 4.4.4.4"} self.assertEqual( self.fetch_json("/", headers=valid_ipv4_list)["remote_ip"], "4.4.4.4") valid_ipv6 = {"X-Real-IP": "2620:0:1cfe:face:b00c::3"} self.assertEqual( self.fetch_json("/", headers=valid_ipv6)["remote_ip"], "2620:0:1cfe:face:b00c::3") valid_ipv6_list = {"X-Forwarded-For": "::1, 2620:0:1cfe:face:b00c::3"} self.assertEqual( self.fetch_json("/", headers=valid_ipv6_list)["remote_ip"], "2620:0:1cfe:face:b00c::3") invalid_chars = {"X-Real-IP": "4.4.4.4<script>"} self.assertEqual( self.fetch_json("/", headers=invalid_chars)["remote_ip"], "127.0.0.1") invalid_chars_list = {"X-Forwarded-For": "4.4.4.4, 5.5.5.5<script>"} self.assertEqual( self.fetch_json("/", headers=invalid_chars_list)["remote_ip"], "127.0.0.1") invalid_host = {"X-Real-IP": "www.google.com"} self.assertEqual( self.fetch_json("/", headers=invalid_host)["remote_ip"], "127.0.0.1") def test_scheme_headers(self): self.assertEqual(self.fetch_json("/")["remote_protocol"], "http") https_scheme = {"X-Scheme": "https"} self.assertEqual( self.fetch_json("/", headers=https_scheme)["remote_protocol"], "https") https_forwarded = {"X-Forwarded-Proto": "https"} self.assertEqual( self.fetch_json("/", headers=https_forwarded)["remote_protocol"], "https") bad_forwarded = {"X-Forwarded-Proto": "unknown"} self.assertEqual( self.fetch_json("/", headers=bad_forwarded)["remote_protocol"], "http") class SSLXHeaderTest(AsyncHTTPSTestCase, HandlerBaseTestCase): def get_app(self): return Application([('/', XHeaderTest.Handler)]) def get_httpserver_options(self): output = super(SSLXHeaderTest, self).get_httpserver_options() output['xheaders'] = True return output def test_request_without_xprotocol(self): self.assertEqual(self.fetch_json("/")["remote_protocol"], "https") http_scheme = {"X-Scheme": "http"} self.assertEqual( self.fetch_json("/", headers=http_scheme)["remote_protocol"], "http") bad_scheme = {"X-Scheme": "unknown"} self.assertEqual( self.fetch_json("/", headers=bad_scheme)["remote_protocol"], "https") class ManualProtocolTest(HandlerBaseTestCase): class Handler(RequestHandler): def get(self): self.write(dict(protocol=self.request.protocol)) def get_httpserver_options(self): return dict(protocol='https') def test_manual_protocol(self): self.assertEqual(self.fetch_json('/')['protocol'], 'https') @unittest.skipIf(not hasattr(socket, 'AF_UNIX') or sys.platform == 'cygwin', "unix sockets not supported on this platform") class UnixSocketTest(AsyncTestCase): """HTTPServers can listen on Unix sockets too. Why would you want to do this? Nginx can proxy to backends listening on unix sockets, for one thing (and managing a namespace for unix sockets can be easier than managing a bunch of TCP port numbers). Unfortunately, there's no way to specify a unix socket in a url for an HTTP client, so we have to test this by hand. """ def setUp(self): super(UnixSocketTest, self).setUp() self.tmpdir = tempfile.mkdtemp() self.sockfile = os.path.join(self.tmpdir, "test.sock") sock = netutil.bind_unix_socket(self.sockfile) app = Application([("/hello", HelloWorldRequestHandler)]) self.server = HTTPServer(app, io_loop=self.io_loop) self.server.add_socket(sock) self.stream = IOStream(socket.socket(socket.AF_UNIX), io_loop=self.io_loop) self.stream.connect(self.sockfile, self.stop) self.wait() def tearDown(self): self.stream.close() self.server.stop() shutil.rmtree(self.tmpdir) super(UnixSocketTest, self).tearDown() def test_unix_socket(self): self.stream.write(b"GET /hello HTTP/1.0\r\n\r\n") self.stream.read_until(b"\r\n", self.stop) response = self.wait() self.assertEqual(response, b"HTTP/1.1 200 OK\r\n") self.stream.read_until(b"\r\n\r\n", self.stop) headers = HTTPHeaders.parse(self.wait().decode('latin1')) self.stream.read_bytes(int(headers["Content-Length"]), self.stop) body = self.wait() self.assertEqual(body, b"Hello world") def test_unix_socket_bad_request(self): # Unix sockets don't have remote addresses so they just return an # empty string. with ExpectLog(gen_log, "Malformed HTTP message from"): self.stream.write(b"garbage\r\n\r\n") self.stream.read_until_close(self.stop) response = self.wait() self.assertEqual(response, b"") class KeepAliveTest(AsyncHTTPTestCase): """Tests various scenarios for HTTP 1.1 keep-alive support. These tests don't use AsyncHTTPClient because we want to control connection reuse and closing. """ def get_app(self): class HelloHandler(RequestHandler): def get(self): self.finish('Hello world') def post(self): self.finish('Hello world') class LargeHandler(RequestHandler): def get(self): # 512KB should be bigger than the socket buffers so it will # be written out in chunks. self.write(''.join(chr(i % 256) * 1024 for i in range(512))) class FinishOnCloseHandler(RequestHandler): @asynchronous def get(self): self.flush() def on_connection_close(self): # This is not very realistic, but finishing the request # from the close callback has the right timing to mimic # some errors seen in the wild. self.finish('closed') return Application([('/', HelloHandler), ('/large', LargeHandler), ('/finish_on_close', FinishOnCloseHandler)]) def setUp(self): super(KeepAliveTest, self).setUp() self.http_version = b'HTTP/1.1' def tearDown(self): # We just closed the client side of the socket; let the IOLoop run # once to make sure the server side got the message. self.io_loop.add_timeout(datetime.timedelta(seconds=0.001), self.stop) self.wait() if hasattr(self, 'stream'): self.stream.close() super(KeepAliveTest, self).tearDown() # The next few methods are a crude manual http client def connect(self): self.stream = IOStream(socket.socket(), io_loop=self.io_loop) self.stream.connect(('127.0.0.1', self.get_http_port()), self.stop) self.wait() def read_headers(self): self.stream.read_until(b'\r\n', self.stop) first_line = self.wait() self.assertTrue(first_line.startswith(b'HTTP/1.1 200'), first_line) self.stream.read_until(b'\r\n\r\n', self.stop) header_bytes = self.wait() headers = HTTPHeaders.parse(header_bytes.decode('latin1')) return headers def read_response(self): self.headers = self.read_headers() self.stream.read_bytes(int(self.headers['Content-Length']), self.stop) body = self.wait() self.assertEqual(b'Hello world', body) def close(self): self.stream.close() del self.stream def test_two_requests(self): self.connect() self.stream.write(b'GET / HTTP/1.1\r\n\r\n') self.read_response() self.stream.write(b'GET / HTTP/1.1\r\n\r\n') self.read_response() self.close() def test_request_close(self): self.connect() self.stream.write(b'GET / HTTP/1.1\r\nConnection: close\r\n\r\n') self.read_response() self.stream.read_until_close(callback=self.stop) data = self.wait() self.assertTrue(not data) self.close() # keepalive is supported for http 1.0 too, but it's opt-in def test_http10(self): self.http_version = b'HTTP/1.0' self.connect() self.stream.write(b'GET / HTTP/1.0\r\n\r\n') self.read_response() self.stream.read_until_close(callback=self.stop) data = self.wait() self.assertTrue(not data) self.assertTrue('Connection' not in self.headers) self.close() def test_http10_keepalive(self): self.http_version = b'HTTP/1.0' self.connect() self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n') self.read_response() self.assertEqual(self.headers['Connection'], 'Keep-Alive') self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n') self.read_response() self.assertEqual(self.headers['Connection'], 'Keep-Alive') self.close() def test_http10_keepalive_extra_crlf(self): self.http_version = b'HTTP/1.0' self.connect() self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n\r\n') self.read_response() self.assertEqual(self.headers['Connection'], 'Keep-Alive') self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n') self.read_response() self.assertEqual(self.headers['Connection'], 'Keep-Alive') self.close() def test_pipelined_requests(self): self.connect() self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n') self.read_response() self.read_response() self.close() def test_pipelined_cancel(self): self.connect() self.stream.write(b'GET / HTTP/1.1\r\n\r\nGET / HTTP/1.1\r\n\r\n') # only read once self.read_response() self.close() def test_cancel_during_download(self): self.connect() self.stream.write(b'GET /large HTTP/1.1\r\n\r\n') self.read_headers() self.stream.read_bytes(1024, self.stop) self.wait() self.close() def test_finish_while_closed(self): self.connect() self.stream.write(b'GET /finish_on_close HTTP/1.1\r\n\r\n') self.read_headers() self.close() def test_keepalive_chunked(self): self.http_version = b'HTTP/1.0' self.connect() self.stream.write(b'POST / HTTP/1.0\r\nConnection: keep-alive\r\n' b'Transfer-Encoding: chunked\r\n' b'\r\n0\r\n') self.read_response() self.assertEqual(self.headers['Connection'], 'Keep-Alive') self.stream.write(b'GET / HTTP/1.0\r\nConnection: keep-alive\r\n\r\n') self.read_response() self.assertEqual(self.headers['Connection'], 'Keep-Alive') self.close() class GzipBaseTest(object): def get_app(self): return Application([('/', EchoHandler)]) def post_gzip(self, body): bytesio = BytesIO() gzip_file = gzip.GzipFile(mode='w', fileobj=bytesio) gzip_file.write(utf8(body)) gzip_file.close() compressed_body = bytesio.getvalue() return self.fetch('/', method='POST', body=compressed_body, headers={'Content-Encoding': 'gzip'}) def test_uncompressed(self): response = self.fetch('/', method='POST', body='foo=bar') self.assertEquals(json_decode(response.body), {u'foo': [u'bar']}) class GzipTest(GzipBaseTest, AsyncHTTPTestCase): def get_httpserver_options(self): return dict(decompress_request=True) def test_gzip(self): response = self.post_gzip('foo=bar') self.assertEquals(json_decode(response.body), {u'foo': [u'bar']}) class GzipUnsupportedTest(GzipBaseTest, AsyncHTTPTestCase): def test_gzip_unsupported(self): # Gzip support is opt-in; without it the server fails to parse # the body (but parsing form bodies is currently just a log message, # not a fatal error). with ExpectLog(gen_log, "Unsupported Content-Encoding"): response = self.post_gzip('foo=bar') self.assertEquals(json_decode(response.body), {}) class StreamingChunkSizeTest(AsyncHTTPTestCase): # 50 characters long, and repetitive so it can be compressed. BODY = b'01234567890123456789012345678901234567890123456789' CHUNK_SIZE = 16 def get_http_client(self): # body_producer doesn't work on curl_httpclient, so override the # configured AsyncHTTPClient implementation. return SimpleAsyncHTTPClient(io_loop=self.io_loop) def get_httpserver_options(self): return dict(chunk_size=self.CHUNK_SIZE, decompress_request=True) class MessageDelegate(HTTPMessageDelegate): def __init__(self, connection): self.connection = connection def headers_received(self, start_line, headers): self.chunk_lengths = [] def data_received(self, chunk): self.chunk_lengths.append(len(chunk)) def finish(self): response_body = utf8(json_encode(self.chunk_lengths)) self.connection.write_headers( ResponseStartLine('HTTP/1.1', 200, 'OK'), HTTPHeaders({'Content-Length': str(len(response_body))})) self.connection.write(response_body) self.connection.finish() def get_app(self): class App(HTTPServerConnectionDelegate): def start_request(self, server_conn, request_conn): return StreamingChunkSizeTest.MessageDelegate(request_conn) return App() def fetch_chunk_sizes(self, **kwargs): response = self.fetch('/', method='POST', **kwargs) response.rethrow() chunks = json_decode(response.body) self.assertEqual(len(self.BODY), sum(chunks)) for chunk_size in chunks: self.assertLessEqual(chunk_size, self.CHUNK_SIZE, 'oversized chunk: ' + str(chunks)) self.assertGreater(chunk_size, 0, 'empty chunk: ' + str(chunks)) return chunks def compress(self, body): bytesio = BytesIO() gzfile = gzip.GzipFile(mode='w', fileobj=bytesio) gzfile.write(body) gzfile.close() compressed = bytesio.getvalue() if len(compressed) >= len(body): raise Exception("body did not shrink when compressed") return compressed def test_regular_body(self): chunks = self.fetch_chunk_sizes(body=self.BODY) # Without compression we know exactly what to expect. self.assertEqual([16, 16, 16, 2], chunks) def test_compressed_body(self): self.fetch_chunk_sizes(body=self.compress(self.BODY), headers={'Content-Encoding': 'gzip'}) # Compression creates irregular boundaries so the assertions # in fetch_chunk_sizes are as specific as we can get. def test_chunked_body(self): def body_producer(write): write(self.BODY[:20]) write(self.BODY[20:]) chunks = self.fetch_chunk_sizes(body_producer=body_producer) # HTTP chunk boundaries translate to application-visible breaks self.assertEqual([16, 4, 16, 14], chunks) def test_chunked_compressed(self): compressed = self.compress(self.BODY) self.assertGreater(len(compressed), 20) def body_producer(write): write(compressed[:20]) write(compressed[20:]) self.fetch_chunk_sizes(body_producer=body_producer, headers={'Content-Encoding': 'gzip'}) class MaxHeaderSizeTest(AsyncHTTPTestCase): def get_app(self): return Application([('/', HelloWorldRequestHandler)]) def get_httpserver_options(self): return dict(max_header_size=1024) def test_small_headers(self): response = self.fetch("/", headers={'X-Filler': 'a' * 100}) response.rethrow() self.assertEqual(response.body, b"Hello world") def test_large_headers(self): with ExpectLog(gen_log, "Unsatisfiable read", required=False): response = self.fetch("/", headers={'X-Filler': 'a' * 1000}) # 431 is "Request Header Fields Too Large", defined in RFC # 6585. However, many implementations just close the # connection in this case, resulting in a 599. self.assertIn(response.code, (431, 599)) @skipOnTravis class IdleTimeoutTest(AsyncHTTPTestCase): def get_app(self): return Application([('/', HelloWorldRequestHandler)]) def get_httpserver_options(self): return dict(idle_connection_timeout=0.1) def setUp(self): super(IdleTimeoutTest, self).setUp() self.streams = [] def tearDown(self): super(IdleTimeoutTest, self).tearDown() for stream in self.streams: stream.close() def connect(self): stream = IOStream(socket.socket()) stream.connect(('127.0.0.1', self.get_http_port()), self.stop) self.wait() self.streams.append(stream) return stream def test_unused_connection(self): stream = self.connect() stream.set_close_callback(self.stop) self.wait() def test_idle_after_use(self): stream = self.connect() stream.set_close_callback(lambda: self.stop("closed")) # Use the connection twice to make sure keep-alives are working for i in range(2): stream.write(b"GET / HTTP/1.1\r\n\r\n") stream.read_until(b"\r\n\r\n", self.stop) self.wait() stream.read_bytes(11, self.stop) data = self.wait() self.assertEqual(data, b"Hello world") # Now let the timeout trigger and close the connection. data = self.wait() self.assertEqual(data, "closed") class BodyLimitsTest(AsyncHTTPTestCase): def get_app(self): class BufferedHandler(RequestHandler): def put(self): self.write(str(len(self.request.body))) @stream_request_body class StreamingHandler(RequestHandler): def initialize(self): self.bytes_read = 0 def prepare(self): if 'expected_size' in self.request.arguments: self.request.connection.set_max_body_size( int(self.get_argument('expected_size'))) if 'body_timeout' in self.request.arguments: self.request.connection.set_body_timeout( float(self.get_argument('body_timeout'))) def data_received(self, data): self.bytes_read += len(data) def put(self): self.write(str(self.bytes_read)) return Application([('/buffered', BufferedHandler), ('/streaming', StreamingHandler)]) def get_httpserver_options(self): return dict(body_timeout=3600, max_body_size=4096) def get_http_client(self): # body_producer doesn't work on curl_httpclient, so override the # configured AsyncHTTPClient implementation. return SimpleAsyncHTTPClient(io_loop=self.io_loop) def test_small_body(self): response = self.fetch('/buffered', method='PUT', body=b'a' * 4096) self.assertEqual(response.body, b'4096') response = self.fetch('/streaming', method='PUT', body=b'a' * 4096) self.assertEqual(response.body, b'4096') def test_large_body_buffered(self): with ExpectLog(gen_log, '.*Content-Length too long'): response = self.fetch('/buffered', method='PUT', body=b'a' * 10240) self.assertEqual(response.code, 599) def test_large_body_buffered_chunked(self): with ExpectLog(gen_log, '.*chunked body too large'): response = self.fetch('/buffered', method='PUT', body_producer=lambda write: write(b'a' * 10240)) self.assertEqual(response.code, 599) def test_large_body_streaming(self): with ExpectLog(gen_log, '.*Content-Length too long'): response = self.fetch('/streaming', method='PUT', body=b'a' * 10240) self.assertEqual(response.code, 599) def test_large_body_streaming_chunked(self): with ExpectLog(gen_log, '.*chunked body too large'): response = self.fetch('/streaming', method='PUT', body_producer=lambda write: write(b'a' * 10240)) self.assertEqual(response.code, 599) def test_large_body_streaming_override(self): response = self.fetch('/streaming?expected_size=10240', method='PUT', body=b'a' * 10240) self.assertEqual(response.body, b'10240') def test_large_body_streaming_chunked_override(self): response = self.fetch('/streaming?expected_size=10240', method='PUT', body_producer=lambda write: write(b'a' * 10240)) self.assertEqual(response.body, b'10240') @gen_test def test_timeout(self): stream = IOStream(socket.socket()) try: yield stream.connect(('127.0.0.1', self.get_http_port())) # Use a raw stream because AsyncHTTPClient won't let us read a # response without finishing a body. stream.write(b'PUT /streaming?body_timeout=0.1 HTTP/1.0\r\n' b'Content-Length: 42\r\n\r\n') with ExpectLog(gen_log, 'Timeout reading body'): response = yield stream.read_until_close() self.assertEqual(response, b'') finally: stream.close() @gen_test def test_body_size_override_reset(self): # The max_body_size override is reset between requests. stream = IOStream(socket.socket()) try: yield stream.connect(('127.0.0.1', self.get_http_port())) # Use a raw stream so we can make sure it's all on one connection. stream.write(b'PUT /streaming?expected_size=10240 HTTP/1.1\r\n' b'Content-Length: 10240\r\n\r\n') stream.write(b'a' * 10240) headers, response = yield gen.Task(read_stream_body, stream) self.assertEqual(response, b'10240') # Without the ?expected_size parameter, we get the old default value stream.write(b'PUT /streaming HTTP/1.1\r\n' b'Content-Length: 10240\r\n\r\n') with ExpectLog(gen_log, '.*Content-Length too long'): data = yield stream.read_until_close() self.assertEqual(data, b'') finally: stream.close() class LegacyInterfaceTest(AsyncHTTPTestCase): def get_app(self): # The old request_callback interface does not implement the # delegate interface, and writes its response via request.write # instead of request.connection.write_headers. def handle_request(request): self.http1 = request.version.startswith("HTTP/1.") if not self.http1: # This test will be skipped if we're using HTTP/2, # so just close it out cleanly using the modern interface. request.connection.write_headers( ResponseStartLine('', 200, 'OK'), HTTPHeaders()) request.connection.finish() return message = b"Hello world" request.write(utf8("HTTP/1.1 200 OK\r\n" "Content-Length: %d\r\n\r\n" % len(message))) request.write(message) request.finish() return handle_request def test_legacy_interface(self): response = self.fetch('/') if not self.http1: self.skipTest("requires HTTP/1.x") self.assertEqual(response.body, b"Hello world")<|fim▁end|>
self.stream.connect(('127.0.0.1', self.get_http_port()), self.stop)
<|file_name|>_trustregion_ncg.py<|end_file_name|><|fim▁begin|>"""Newton-CG trust-region optimization.""" from __future__ import division, print_function, absolute_import import math import numpy as np import scipy.linalg from ._trustregion import (_minimize_trust_region, BaseQuadraticSubproblem) __all__ = [] def _minimize_trust_ncg(fun, x0, args=(), jac=None, hess=None, hessp=None, **trust_region_options): """<|fim▁hole|> the Newton conjugate gradient trust-region algorithm. Options ------- initial_trust_radius : float Initial trust-region radius. max_trust_radius : float Maximum value of the trust-region radius. No steps that are longer than this value will be proposed. eta : float Trust region related acceptance stringency for proposed steps. gtol : float Gradient norm must be less than `gtol` before successful termination. """ if jac is None: raise ValueError('Jacobian is required for Newton-CG trust-region ' 'minimization') if hess is None and hessp is None: raise ValueError('Either the Hessian or the Hessian-vector product ' 'is required for Newton-CG trust-region minimization') return _minimize_trust_region(fun, x0, args=args, jac=jac, hess=hess, hessp=hessp, subproblem=CGSteihaugSubproblem, **trust_region_options) class CGSteihaugSubproblem(BaseQuadraticSubproblem): """Quadratic subproblem solved by a conjugate gradient method""" def solve(self, trust_radius): """ Solve the subproblem using a conjugate gradient method. Parameters ---------- trust_radius : float We are allowed to wander only this far away from the origin. Returns ------- p : ndarray The proposed step. hits_boundary : bool True if the proposed step is on the boundary of the trust region. Notes ----- This is algorithm (7.2) of Nocedal and Wright 2nd edition. Only the function that computes the Hessian-vector product is required. The Hessian itself is not required, and the Hessian does not need to be positive semidefinite. """ # get the norm of jacobian and define the origin p_origin = np.zeros_like(self.jac) # define a default tolerance tolerance = min(0.5, math.sqrt(self.jac_mag)) * self.jac_mag # Stop the method if the search direction # is a direction of nonpositive curvature. if self.jac_mag < tolerance: hits_boundary = False return p_origin, hits_boundary # init the state for the first iteration z = p_origin r = self.jac d = -r # Search for the min of the approximation of the objective function. while True: # do an iteration Bd = self.hessp(d) dBd = np.dot(d, Bd) if dBd <= 0: # Look at the two boundary points. # Find both values of t to get the boundary points such that # ||z + t d|| == trust_radius # and then choose the one with the predicted min value. ta, tb = self.get_boundaries_intersections(z, d, trust_radius) pa = z + ta * d pb = z + tb * d if self(pa) < self(pb): p_boundary = pa else: p_boundary = pb hits_boundary = True return p_boundary, hits_boundary r_squared = np.dot(r, r) alpha = r_squared / dBd z_next = z + alpha * d if scipy.linalg.norm(z_next) >= trust_radius: # Find t >= 0 to get the boundary point such that # ||z + t d|| == trust_radius ta, tb = self.get_boundaries_intersections(z, d, trust_radius) p_boundary = z + tb * d hits_boundary = True return p_boundary, hits_boundary r_next = r + alpha * Bd r_next_squared = np.dot(r_next, r_next) if math.sqrt(r_next_squared) < tolerance: hits_boundary = False return z_next, hits_boundary beta_next = r_next_squared / r_squared d_next = -r_next + beta_next * d # update the state for the next iteration z = z_next r = r_next d = d_next<|fim▁end|>
Minimization of scalar function of one or more variables using
<|file_name|>parse_fdr.py<|end_file_name|><|fim▁begin|>''' Parse execution data log stream. Allows access to selected parts of program memory at the time of recorded events. ''' # Copyright (c) 2012-2013 Wladimir J. van der Laan # # Permission is hereby granted, free of charge, to any person obtaining a # copy of this software and associated documentation files (the "Software"), # to deal in the Software without restriction, including without limitation # the rights to use, copy, modify, merge, publish, distribute, sub license, # and/or sell copies of the Software, and to permit persons to whom the # Software is furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice (including the # next paragraph) shall be included in all copies or substantial portions # of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NON-INFRINGEMENT. IN NO EVENT SHALL # THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING # FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER # DEALINGS IN THE SOFTWARE. from __future__ import print_function, division, unicode_literals import os, sys, struct from collections import namedtuple from bisect import bisect_right from binascii import b2a_hex LITTLE_ENDIAN = b'<' BIG_ENDIAN = b'>' # target architecture description ENDIAN = LITTLE_ENDIAN DEBUG = False RECTYPE_CHAR = b'B' # always 8 bit MAGIC_CHAR = b'I' # always 32 bit WORD_CHAR = b'I' # 32 bit ADDR_CHAR = b'I' # 32/64 bit SHORT_STRING_SIZE_CHAR = b'B' # struct specifiers for decoding<|fim▁hole|>RANGE_SPEC = struct.Struct(ENDIAN + ADDR_CHAR + ADDR_CHAR) SHORT_STRING_SIZE_SPEC = struct.Struct(ENDIAN + SHORT_STRING_SIZE_CHAR) FDR_MAGIC = 0x8e1aaa8f FDR_VERSION = 1 class RTYPE: ''' FDR record types ''' RANGE_DATA = 0 RANGE_TEMP_DATA = 1 ADD_UPDATED_RANGE = 2 REMOVE_UPDATED_RANGE = 3 EVENT = 4 COMMENT = 5 def read_spec(f, spec): return spec.unpack(f.read(spec.size)) def read_short_string(f): (size,) = read_spec(f, SHORT_STRING_SIZE_SPEC) return f.read(size) Event = namedtuple('Event', ['event_type', 'parameters']) Comment = namedtuple('Comment', ['data']) Parameter = namedtuple('Parameter', ['name','value']) class FDRLoader(object): ''' High-level interface for playing back FDR files. The object is an iterable that returns event records: - Event(...) in case of an event - Comment(...) in case of an comment Also it can be subscripted to return the current contents of a memory range, like fdr[ptr:ptr+4] to return a range, or just fdr[ptr] to return one byte. An IndexError will be raised if either the start or stop is out of range (or not up to date at the time of this event). ''' def __init__(self, input_file): self.f = open(input_file, 'rb') magic,version = read_spec(self.f, HDR_SPEC) if magic != FDR_MAGIC: raise ValueError('Magic value %08x not recognized (should be %08x)' % (magic, FDR_MAGIC)) if version != FDR_VERSION: raise ValueError('Version %08x not recognized (should be %08x)' % (version, FDR_VERSION)) # Stored memory ranges self.stored = [] # Active memory ranges self.updated_ranges = [] # Temporary data self.temp_ranges = [] # Cached list of starting addresses for bisection self.updated_ranges_start = [] self.temp_ranges_start = [] # IMPORTANT precondition: all ranges must be non-overlapping def _flush_temps(self): self.temp_ranges = [] self.temp_ranges_start = [] def __iter__(self): f = self.f while True: try: rt, = read_spec(f, RECTYPE_SPEC) except struct.error: # could not parse entire structure; end of file allowed here break if rt == RTYPE.RANGE_DATA: addr_start,addr_end = read_spec(f, RANGE_SPEC) data = f.read(addr_end - addr_start) if DEBUG: print('RANGE_DATA 0x%08x 0x%08x %s...' % (addr_start, addr_end, b2a_hex(data[0:16]))) # TODO update self.stored self.update(addr_start, addr_end, data) elif rt == RTYPE.RANGE_TEMP_DATA: addr_start,addr_end = read_spec(f, RANGE_SPEC) data = f.read(addr_end - addr_start) if DEBUG: print('RANGE_TEMP_DATA 0x%08x 0x%08x %s...' % (addr_start, addr_end, b2a_hex(data[0:16]))) self.temp_ranges.append((addr_start, addr_end, data)) elif rt == RTYPE.ADD_UPDATED_RANGE: addr_start,addr_end = read_spec(f, RANGE_SPEC) if DEBUG: print('ADD_UPDATED_RANGE 0x%08x 0x%08x' % (addr_start, addr_end)) self.updated_ranges.append((addr_start, addr_end, bytearray(addr_end - addr_start))) self.updated_ranges.sort() self.updated_ranges_start = [r[0] for r in self.updated_ranges] elif rt == RTYPE.REMOVE_UPDATED_RANGE: addr_start,addr_end = read_spec(f, RANGE_SPEC) i = bisect_right(self.updated_ranges_start, addr_start) - 1 if DEBUG: print('REMOVE_UPDATED_RANGE 0x%08x 0x%08x (%i)' % (addr_start, addr_end, i)) assert(self.updated_ranges[i][0] == addr_start and self.updated_ranges[i][1] == addr_end) del self.updated_ranges[i] # keep cached list of ranges up-to-date self.updated_ranges_start = [r[0] for r in self.updated_ranges] #self.updated_ranges.remove((addr_start, addr_end)) elif rt == RTYPE.EVENT: event_type = read_short_string(f) num_parameters, = read_spec(f, WORD_SPEC) parameters = {} for i in range(num_parameters): par = Parameter( name=read_short_string(f), value=read_spec(f, ADDR_SPEC)[0]) parameters[par.name] = par parstr = ' '.join([('%s=0x%x' % par) for par in parameters.itervalues()]) self.temp_ranges.sort() self.temp_ranges_start = [r[0] for r in self.temp_ranges] if DEBUG: print('EVENT %s %s' % (event_type, parstr)) yield Event(event_type, parameters) self._flush_temps() elif rt == RTYPE.COMMENT: size, = read_spec(f, ADDR_SPEC) comment = f.read(size) if DEBUG: print('COMMENT') yield Comment(comment) else: raise ValueError('Unexpected record type %i' % rt) def __getitem__(self, key): ''' Get one byte or a range of bytes from this memory map. ''' # Support slicing as well as single lookups if isinstance(key, slice): start = key.start stop = key.stop if key.step is not None: raise KeyError('Extended slices not supported') else: start = key stop = key+1 try: return self.fetch(self.temp_ranges_start, self.temp_ranges, start, stop) except IndexError,e: # need to convert to str explicitly because struct won't work with bytearray return str(self.fetch(self.updated_ranges_start, self.updated_ranges, start, stop)) def fetch(self, ranges_start, ranges, start, stop): '''Look up in stored or temp ranges''' # XXX we don't handle the case of a request spanning multiple consecutive ranges idx = bisect_right(ranges_start, start) - 1 if idx < 0: raise IndexError('Start address 0x%x out of range' % (start)) (range_start, range_end, range_data) = ranges[idx] if stop > range_end: raise IndexError('End address 0x%x out of range (ends 0x%x)' % (stop, range_end)) return range_data[start-range_start:stop-range_start] def update(self, start, stop, data): ''' Update a stored memory range. ''' idx = bisect_right(self.updated_ranges_start, start) - 1 if idx < 0: raise IndexError('Start address 0x%x out of range' % (start)) (range_start, range_end, range_data) = self.updated_ranges[idx] if stop > range_end: raise IndexError('End address 0x%x out of range (ends 0x%x)' % (stop, range_end)) range_data[start-range_start:stop-range_start] = data<|fim▁end|>
RECTYPE_SPEC = struct.Struct(ENDIAN + RECTYPE_CHAR) HDR_SPEC = struct.Struct(ENDIAN + MAGIC_CHAR + WORD_CHAR) WORD_SPEC = struct.Struct(ENDIAN + WORD_CHAR) ADDR_SPEC = struct.Struct(ENDIAN + ADDR_CHAR)
<|file_name|>trimetrics.cpp<|end_file_name|><|fim▁begin|>#include "trimetrics.hpp" #include <math.h> void Metric2DTri::draw(int /*xwin*/, int /*ywin*/ ) { // draw metric information glCallList(drawingList); // draw moused point glBegin(GL_POINTS); glPointSize(5.0); glColor3f(0.0,0.0,0.0); glVertex3f(currX, currY, 0);<|fim▁hole|> void Metric2DTri::mouseEvent(QMouseEvent *e, int xmax, int ymax, bool) { // convert window coords to world coords int ywin = ymax - e->y(); int xwin = e->x(); double nodes[3][3] = { {-.5,1,0}, {-.5,0,0}, {.5,0,0}}; nodes[0][0] = 2*xRange*(double)xwin/(double)xmax - xRange; nodes[0][1] = yRange*(double)ywin/(double)ymax; currX = nodes[0][0]; currY = nodes[0][1]; // calculate metric currMetricVal = (*func)(3, nodes); // emit value changed emit current_val_changed(); } void Metric2DTri::generate_plot() { // create a drawing list and delete old one if it exists if(drawingList) glDeleteLists(drawingList,1); drawingList = glGenLists(1); glNewList(drawingList, GL_COMPILE); { double nodes[3][3] = { {-.5,1,0}, {-.5,0,0}, {.5,0,0}}; glPointSize(4.0); // coordinates can range between (-xRange, xRange) and (0, yRange) double hscan , vscan; hscan = vscan = sqrt((double)NUM_POINTS); // scan vertically for(int i=0; i<vscan; i++) { nodes[0][1] = (double)i/(double)vscan * yRange; // scan horizontally for(int j=0; j<hscan; j++) { nodes[0][0] = (double)j/(double)hscan * 2 * xRange - xRange; // calculate metric double val = (*func)(3, nodes); // set color based on value glColor3f( (colorFactor-val)*(colorFactor-val), val*val,2*(colorFactor-val)*val); // draw the point glBegin(GL_POINTS); glVertex3d(nodes[0][0], nodes[0][1], nodes[0][2]); glEnd(); } } // draw fixed nodes glPointSize(5.0); glColor3f(0,0,0); glBegin(GL_POINTS); glVertex3d(-.5,0,0); glVertex3d( .5,0,0); glEnd(); } glEndList(); }<|fim▁end|>
glEnd(); }
<|file_name|>find_anon_type.rs<|end_file_name|><|fim▁begin|>use rustc_hir as hir; use rustc_hir::intravisit::{self, NestedVisitorMap, Visitor}; use rustc_hir::Node; use rustc_middle::hir::map::Map; use rustc_middle::middle::resolve_lifetime as rl; use rustc_middle::ty::{self, Region, TyCtxt}; /// This function calls the `visit_ty` method for the parameters /// corresponding to the anonymous regions. The `nested_visitor.found_type` /// contains the anonymous type. /// /// # Arguments /// region - the anonymous region corresponding to the anon_anon conflict /// br - the bound region corresponding to the above region which is of type `BrAnon(_)` /// /// # Example /// ``` /// fn foo(x: &mut Vec<&u8>, y: &u8) /// { x.push(y); } /// ``` /// The function returns the nested type corresponding to the anonymous region /// for e.g., `&u8` and `Vec<&u8>`. pub(crate) fn find_anon_type( tcx: TyCtxt<'tcx>, region: Region<'tcx>, br: &ty::BoundRegionKind, ) -> Option<(&'tcx hir::Ty<'tcx>, &'tcx hir::FnDecl<'tcx>)> { if let Some(anon_reg) = tcx.is_suitable_region(region) { let hir_id = tcx.hir().local_def_id_to_hir_id(anon_reg.def_id); let fndecl = match tcx.hir().get(hir_id) { Node::Item(&hir::Item { kind: hir::ItemKind::Fn(ref m, ..), .. }) | Node::TraitItem(&hir::TraitItem { kind: hir::TraitItemKind::Fn(ref m, ..), .. }) | Node::ImplItem(&hir::ImplItem { kind: hir::ImplItemKind::Fn(ref m, ..), .. }) => { &m.decl } _ => return None, }; fndecl .inputs .iter() .find_map(|arg| find_component_for_bound_region(tcx, arg, br)) .map(|ty| (ty, &**fndecl)) } else { None } } // This method creates a FindNestedTypeVisitor which returns the type corresponding // to the anonymous region. fn find_component_for_bound_region( tcx: TyCtxt<'tcx>, arg: &'tcx hir::Ty<'tcx>, br: &ty::BoundRegionKind, ) -> Option<&'tcx hir::Ty<'tcx>> { let mut nested_visitor = FindNestedTypeVisitor { tcx, bound_region: *br, found_type: None, current_index: ty::INNERMOST, }; nested_visitor.visit_ty(arg); nested_visitor.found_type } // The FindNestedTypeVisitor captures the corresponding `hir::Ty` of the // anonymous region. The example above would lead to a conflict between // the two anonymous lifetimes for &u8 in x and y respectively. This visitor // would be invoked twice, once for each lifetime, and would // walk the types like &mut Vec<&u8> and &u8 looking for the HIR // where that lifetime appears. This allows us to highlight the // specific part of the type in the error message. struct FindNestedTypeVisitor<'tcx> { tcx: TyCtxt<'tcx>, // The bound_region corresponding to the Refree(freeregion) // associated with the anonymous region we are looking for. bound_region: ty::BoundRegionKind, // The type where the anonymous lifetime appears // for e.g., Vec<`&u8`> and <`&u8`> found_type: Option<&'tcx hir::Ty<'tcx>>, current_index: ty::DebruijnIndex, } impl Visitor<'tcx> for FindNestedTypeVisitor<'tcx> { type Map = Map<'tcx>; fn nested_visit_map(&mut self) -> NestedVisitorMap<Self::Map> { NestedVisitorMap::OnlyBodies(self.tcx.hir()) } fn visit_ty(&mut self, arg: &'tcx hir::Ty<'tcx>) { match arg.kind { hir::TyKind::BareFn(_) => { self.current_index.shift_in(1); intravisit::walk_ty(self, arg); self.current_index.shift_out(1); return; } hir::TyKind::TraitObject(bounds, ..) => { for bound in bounds { self.current_index.shift_in(1); self.visit_poly_trait_ref(bound, hir::TraitBoundModifier::None); self.current_index.shift_out(1); } } hir::TyKind::Rptr(ref lifetime, _) => { // the lifetime of the TyRptr let hir_id = lifetime.hir_id; match (self.tcx.named_region(hir_id), self.bound_region) { // Find the index of the anonymous region that was part of the // error. We will then search the function parameters for a bound // region at the right depth with the same index ( Some(rl::Region::LateBoundAnon(debruijn_index, _, anon_index)), ty::BrAnon(br_index), ) => { debug!( "LateBoundAnon depth = {:?} anon_index = {:?} br_index={:?}", debruijn_index, anon_index, br_index ); if debruijn_index == self.current_index && anon_index == br_index { self.found_type = Some(arg); return; // we can stop visiting now } } // Find the index of the named region that was part of the // error. We will then search the function parameters for a bound // region at the right depth with the same index (Some(rl::Region::EarlyBound(_, id, _)), ty::BrNamed(def_id, _)) => { debug!("EarlyBound id={:?} def_id={:?}", id, def_id); if id == def_id { self.found_type = Some(arg); return; // we can stop visiting now } } // Find the index of the named region that was part of the // error. We will then search the function parameters for a bound // region at the right depth with the same index ( Some(rl::Region::LateBound(debruijn_index, _, id, _)), ty::BrNamed(def_id, _), ) => { debug!( "FindNestedTypeVisitor::visit_ty: LateBound depth = {:?}", debruijn_index ); debug!("LateBound id={:?} def_id={:?}", id, def_id); if debruijn_index == self.current_index && id == def_id { self.found_type = Some(arg); return; // we can stop visiting now } } ( Some( rl::Region::Static | rl::Region::Free(_, _) | rl::Region::EarlyBound(_, _, _) | rl::Region::LateBound(_, _, _, _) | rl::Region::LateBoundAnon(_, _, _), ) | None, _, ) => { debug!("no arg found"); } } } // Checks if it is of type `hir::TyKind::Path` which corresponds to a struct. hir::TyKind::Path(_) => { let subvisitor = &mut TyPathVisitor { tcx: self.tcx, found_it: false, bound_region: self.bound_region, current_index: self.current_index, }; intravisit::walk_ty(subvisitor, arg); // call walk_ty; as visit_ty is empty, // this will visit only outermost type if subvisitor.found_it { self.found_type = Some(arg); } } _ => {} } // walk the embedded contents: e.g., if we are visiting `Vec<&Foo>`, // go on to visit `&Foo` intravisit::walk_ty(self, arg); } } // The visitor captures the corresponding `hir::Ty` of the anonymous region // in the case of structs ie. `hir::TyKind::Path`. // This visitor would be invoked for each lifetime corresponding to a struct, // and would walk the types like Vec<Ref> in the above example and Ref looking for the HIR // where that lifetime appears. This allows us to highlight the // specific part of the type in the error message. struct TyPathVisitor<'tcx> { tcx: TyCtxt<'tcx>, found_it: bool, bound_region: ty::BoundRegionKind, current_index: ty::DebruijnIndex, } impl Visitor<'tcx> for TyPathVisitor<'tcx> { type Map = Map<'tcx>; fn nested_visit_map(&mut self) -> NestedVisitorMap<Map<'tcx>> { NestedVisitorMap::OnlyBodies(self.tcx.hir()) } <|fim▁hole|> ( Some(rl::Region::LateBoundAnon(debruijn_index, _, anon_index)), ty::BrAnon(br_index), ) => { if debruijn_index == self.current_index && anon_index == br_index { self.found_it = true; return; } } (Some(rl::Region::EarlyBound(_, id, _)), ty::BrNamed(def_id, _)) => { debug!("EarlyBound id={:?} def_id={:?}", id, def_id); if id == def_id { self.found_it = true; return; // we can stop visiting now } } (Some(rl::Region::LateBound(debruijn_index, _, id, _)), ty::BrNamed(def_id, _)) => { debug!("FindNestedTypeVisitor::visit_ty: LateBound depth = {:?}", debruijn_index,); debug!("id={:?}", id); debug!("def_id={:?}", def_id); if debruijn_index == self.current_index && id == def_id { self.found_it = true; return; // we can stop visiting now } } ( Some( rl::Region::Static | rl::Region::EarlyBound(_, _, _) | rl::Region::LateBound(_, _, _, _) | rl::Region::LateBoundAnon(_, _, _) | rl::Region::Free(_, _), ) | None, _, ) => { debug!("no arg found"); } } } fn visit_ty(&mut self, arg: &'tcx hir::Ty<'tcx>) { // ignore nested types // // If you have a type like `Foo<'a, &Ty>` we // are only interested in the immediate lifetimes ('a). // // Making `visit_ty` empty will ignore the `&Ty` embedded // inside, it will get reached by the outer visitor. debug!("`Ty` corresponding to a struct is {:?}", arg); } }<|fim▁end|>
fn visit_lifetime(&mut self, lifetime: &hir::Lifetime) { match (self.tcx.named_region(lifetime.hir_id), self.bound_region) { // the lifetime of the TyPath!
<|file_name|>sensor.py<|end_file_name|><|fim▁begin|>"""Support for GTFS (Google/General Transport Format Schema).""" from __future__ import annotations import datetime import logging import os import threading from typing import Any, Callable import pygtfs from sqlalchemy.sql import text import voluptuous as vol from homeassistant.components.sensor import PLATFORM_SCHEMA, SensorEntity from homeassistant.const import ( ATTR_ATTRIBUTION, CONF_NAME, CONF_OFFSET, DEVICE_CLASS_TIMESTAMP, STATE_UNKNOWN, ) from homeassistant.core import HomeAssistant import homeassistant.helpers.config_validation as cv from homeassistant.helpers.typing import ConfigType, DiscoveryInfoType from homeassistant.util import slugify import homeassistant.util.dt as dt_util _LOGGER = logging.getLogger(__name__) ATTR_ARRIVAL = "arrival" ATTR_BICYCLE = "trip_bikes_allowed_state" ATTR_DAY = "day" ATTR_FIRST = "first" ATTR_DROP_OFF_DESTINATION = "destination_stop_drop_off_type_state" ATTR_DROP_OFF_ORIGIN = "origin_stop_drop_off_type_state" ATTR_INFO = "info" ATTR_OFFSET = CONF_OFFSET ATTR_LAST = "last" ATTR_LOCATION_DESTINATION = "destination_station_location_type_name" ATTR_LOCATION_ORIGIN = "origin_station_location_type_name" ATTR_PICKUP_DESTINATION = "destination_stop_pickup_type_state" ATTR_PICKUP_ORIGIN = "origin_stop_pickup_type_state" ATTR_ROUTE_TYPE = "route_type_name" ATTR_TIMEPOINT_DESTINATION = "destination_stop_timepoint_exact" ATTR_TIMEPOINT_ORIGIN = "origin_stop_timepoint_exact" ATTR_WHEELCHAIR = "trip_wheelchair_access_available" ATTR_WHEELCHAIR_DESTINATION = "destination_station_wheelchair_boarding_available" ATTR_WHEELCHAIR_ORIGIN = "origin_station_wheelchair_boarding_available" CONF_DATA = "data" CONF_DESTINATION = "destination" CONF_ORIGIN = "origin" CONF_TOMORROW = "include_tomorrow" DEFAULT_NAME = "GTFS Sensor" DEFAULT_PATH = "gtfs"<|fim▁hole|>DROP_OFF_TYPE_DEFAULT = STATE_UNKNOWN DROP_OFF_TYPE_OPTIONS = { 0: "Regular", 1: "Not Available", 2: "Call Agency", 3: "Contact Driver", } ICON = "mdi:train" ICONS = { 0: "mdi:tram", 1: "mdi:subway", 2: "mdi:train", 3: "mdi:bus", 4: "mdi:ferry", 5: "mdi:train-variant", 6: "mdi:gondola", 7: "mdi:stairs", 100: "mdi:train", 101: "mdi:train", 102: "mdi:train", 103: "mdi:train", 104: "mdi:train-car", 105: "mdi:train", 106: "mdi:train", 107: "mdi:train", 108: "mdi:train", 109: "mdi:train", 110: "mdi:train-variant", 111: "mdi:train-variant", 112: "mdi:train-variant", 113: "mdi:train-variant", 114: "mdi:train-variant", 115: "mdi:train-variant", 116: "mdi:train-variant", 117: "mdi:train-variant", 200: "mdi:bus", 201: "mdi:bus", 202: "mdi:bus", 203: "mdi:bus", 204: "mdi:bus", 205: "mdi:bus", 206: "mdi:bus", 207: "mdi:bus", 208: "mdi:bus", 209: "mdi:bus", 400: "mdi:subway-variant", 401: "mdi:subway-variant", 402: "mdi:subway", 403: "mdi:subway-variant", 404: "mdi:subway-variant", 405: "mdi:subway-variant", 700: "mdi:bus", 701: "mdi:bus", 702: "mdi:bus", 703: "mdi:bus", 704: "mdi:bus", 705: "mdi:bus", 706: "mdi:bus", 707: "mdi:bus", 708: "mdi:bus", 709: "mdi:bus", 710: "mdi:bus", 711: "mdi:bus", 712: "mdi:bus-school", 713: "mdi:bus-school", 714: "mdi:bus", 715: "mdi:bus", 716: "mdi:bus", 800: "mdi:bus", 900: "mdi:tram", 901: "mdi:tram", 902: "mdi:tram", 903: "mdi:tram", 904: "mdi:tram", 905: "mdi:tram", 906: "mdi:tram", 1000: "mdi:ferry", 1100: "mdi:airplane", 1200: "mdi:ferry", 1300: "mdi:airplane", 1400: "mdi:gondola", 1500: "mdi:taxi", 1501: "mdi:taxi", 1502: "mdi:ferry", 1503: "mdi:train-variant", 1504: "mdi:bicycle-basket", 1505: "mdi:taxi", 1506: "mdi:car-multiple", 1507: "mdi:taxi", 1700: "mdi:train-car", 1702: "mdi:horse-variant", } LOCATION_TYPE_DEFAULT = "Stop" LOCATION_TYPE_OPTIONS = { 0: "Station", 1: "Stop", 2: "Station Entrance/Exit", 3: "Other", } PICKUP_TYPE_DEFAULT = STATE_UNKNOWN PICKUP_TYPE_OPTIONS = { 0: "Regular", 1: "None Available", 2: "Call Agency", 3: "Contact Driver", } ROUTE_TYPE_OPTIONS = { 0: "Tram", 1: "Subway", 2: "Rail", 3: "Bus", 4: "Ferry", 5: "Cable Tram", 6: "Aerial Lift", 7: "Funicular", 100: "Railway Service", 101: "High Speed Rail Service", 102: "Long Distance Trains", 103: "Inter Regional Rail Service", 104: "Car Transport Rail Service", 105: "Sleeper Rail Service", 106: "Regional Rail Service", 107: "Tourist Railway Service", 108: "Rail Shuttle (Within Complex)", 109: "Suburban Railway", 110: "Replacement Rail Service", 111: "Special Rail Service", 112: "Lorry Transport Rail Service", 113: "All Rail Services", 114: "Cross-Country Rail Service", 115: "Vehicle Transport Rail Service", 116: "Rack and Pinion Railway", 117: "Additional Rail Service", 200: "Coach Service", 201: "International Coach Service", 202: "National Coach Service", 203: "Shuttle Coach Service", 204: "Regional Coach Service", 205: "Special Coach Service", 206: "Sightseeing Coach Service", 207: "Tourist Coach Service", 208: "Commuter Coach Service", 209: "All Coach Services", 400: "Urban Railway Service", 401: "Metro Service", 402: "Underground Service", 403: "Urban Railway Service", 404: "All Urban Railway Services", 405: "Monorail", 700: "Bus Service", 701: "Regional Bus Service", 702: "Express Bus Service", 703: "Stopping Bus Service", 704: "Local Bus Service", 705: "Night Bus Service", 706: "Post Bus Service", 707: "Special Needs Bus", 708: "Mobility Bus Service", 709: "Mobility Bus for Registered Disabled", 710: "Sightseeing Bus", 711: "Shuttle Bus", 712: "School Bus", 713: "School and Public Service Bus", 714: "Rail Replacement Bus Service", 715: "Demand and Response Bus Service", 716: "All Bus Services", 800: "Trolleybus Service", 900: "Tram Service", 901: "City Tram Service", 902: "Local Tram Service", 903: "Regional Tram Service", 904: "Sightseeing Tram Service", 905: "Shuttle Tram Service", 906: "All Tram Services", 1000: "Water Transport Service", 1100: "Air Service", 1200: "Ferry Service", 1300: "Aerial Lift Service", 1400: "Funicular Service", 1500: "Taxi Service", 1501: "Communal Taxi Service", 1502: "Water Taxi Service", 1503: "Rail Taxi Service", 1504: "Bike Taxi Service", 1505: "Licensed Taxi Service", 1506: "Private Hire Service Vehicle", 1507: "All Taxi Services", 1700: "Miscellaneous Service", 1702: "Horse-drawn Carriage", } TIMEPOINT_DEFAULT = True TIMEPOINT_OPTIONS = {0: False, 1: True} WHEELCHAIR_ACCESS_DEFAULT = STATE_UNKNOWN WHEELCHAIR_ACCESS_OPTIONS = {1: True, 2: False} WHEELCHAIR_BOARDING_DEFAULT = STATE_UNKNOWN WHEELCHAIR_BOARDING_OPTIONS = {1: True, 2: False} PLATFORM_SCHEMA = PLATFORM_SCHEMA.extend( { # type: ignore vol.Required(CONF_ORIGIN): cv.string, vol.Required(CONF_DESTINATION): cv.string, vol.Required(CONF_DATA): cv.string, vol.Optional(CONF_NAME): cv.string, vol.Optional(CONF_OFFSET, default=0): cv.time_period, vol.Optional(CONF_TOMORROW, default=False): cv.boolean, } ) def get_next_departure( schedule: Any, start_station_id: Any, end_station_id: Any, offset: cv.time_period, include_tomorrow: bool = False, ) -> dict: """Get the next departure for the given schedule.""" now = dt_util.now().replace(tzinfo=None) + offset now_date = now.strftime(dt_util.DATE_STR_FORMAT) yesterday = now - datetime.timedelta(days=1) yesterday_date = yesterday.strftime(dt_util.DATE_STR_FORMAT) tomorrow = now + datetime.timedelta(days=1) tomorrow_date = tomorrow.strftime(dt_util.DATE_STR_FORMAT) # Fetch all departures for yesterday, today and optionally tomorrow, # up to an overkill maximum in case of a departure every minute for those # days. limit = 24 * 60 * 60 * 2 tomorrow_select = tomorrow_where = tomorrow_order = "" if include_tomorrow: limit = int(limit / 2 * 3) tomorrow_name = tomorrow.strftime("%A").lower() tomorrow_select = f"calendar.{tomorrow_name} AS tomorrow," tomorrow_where = f"OR calendar.{tomorrow_name} = 1" tomorrow_order = f"calendar.{tomorrow_name} DESC," sql_query = f""" SELECT trip.trip_id, trip.route_id, time(origin_stop_time.arrival_time) AS origin_arrival_time, time(origin_stop_time.departure_time) AS origin_depart_time, date(origin_stop_time.departure_time) AS origin_depart_date, origin_stop_time.drop_off_type AS origin_drop_off_type, origin_stop_time.pickup_type AS origin_pickup_type, origin_stop_time.shape_dist_traveled AS origin_dist_traveled, origin_stop_time.stop_headsign AS origin_stop_headsign, origin_stop_time.stop_sequence AS origin_stop_sequence, origin_stop_time.timepoint AS origin_stop_timepoint, time(destination_stop_time.arrival_time) AS dest_arrival_time, time(destination_stop_time.departure_time) AS dest_depart_time, destination_stop_time.drop_off_type AS dest_drop_off_type, destination_stop_time.pickup_type AS dest_pickup_type, destination_stop_time.shape_dist_traveled AS dest_dist_traveled, destination_stop_time.stop_headsign AS dest_stop_headsign, destination_stop_time.stop_sequence AS dest_stop_sequence, destination_stop_time.timepoint AS dest_stop_timepoint, calendar.{yesterday.strftime("%A").lower()} AS yesterday, calendar.{now.strftime("%A").lower()} AS today, {tomorrow_select} calendar.start_date AS start_date, calendar.end_date AS end_date FROM trips trip INNER JOIN calendar calendar ON trip.service_id = calendar.service_id INNER JOIN stop_times origin_stop_time ON trip.trip_id = origin_stop_time.trip_id INNER JOIN stops start_station ON origin_stop_time.stop_id = start_station.stop_id INNER JOIN stop_times destination_stop_time ON trip.trip_id = destination_stop_time.trip_id INNER JOIN stops end_station ON destination_stop_time.stop_id = end_station.stop_id WHERE (calendar.{yesterday.strftime("%A").lower()} = 1 OR calendar.{now.strftime("%A").lower()} = 1 {tomorrow_where} ) AND start_station.stop_id = :origin_station_id AND end_station.stop_id = :end_station_id AND origin_stop_sequence < dest_stop_sequence AND calendar.start_date <= :today AND calendar.end_date >= :today ORDER BY calendar.{yesterday.strftime("%A").lower()} DESC, calendar.{now.strftime("%A").lower()} DESC, {tomorrow_order} origin_stop_time.departure_time LIMIT :limit """ result = schedule.engine.execute( text(sql_query), origin_station_id=start_station_id, end_station_id=end_station_id, today=now_date, limit=limit, ) # Create lookup timetable for today and possibly tomorrow, taking into # account any departures from yesterday scheduled after midnight, # as long as all departures are within the calendar date range. timetable = {} yesterday_start = today_start = tomorrow_start = None yesterday_last = today_last = "" for row in result: if row["yesterday"] == 1 and yesterday_date >= row["start_date"]: extras = {"day": "yesterday", "first": None, "last": False} if yesterday_start is None: yesterday_start = row["origin_depart_date"] if yesterday_start != row["origin_depart_date"]: idx = f"{now_date} {row['origin_depart_time']}" timetable[idx] = {**row, **extras} yesterday_last = idx if row["today"] == 1: extras = {"day": "today", "first": False, "last": False} if today_start is None: today_start = row["origin_depart_date"] extras["first"] = True if today_start == row["origin_depart_date"]: idx_prefix = now_date else: idx_prefix = tomorrow_date idx = f"{idx_prefix} {row['origin_depart_time']}" timetable[idx] = {**row, **extras} today_last = idx if ( "tomorrow" in row and row["tomorrow"] == 1 and tomorrow_date <= row["end_date"] ): extras = {"day": "tomorrow", "first": False, "last": None} if tomorrow_start is None: tomorrow_start = row["origin_depart_date"] extras["first"] = True if tomorrow_start == row["origin_depart_date"]: idx = f"{tomorrow_date} {row['origin_depart_time']}" timetable[idx] = {**row, **extras} # Flag last departures. for idx in filter(None, [yesterday_last, today_last]): timetable[idx]["last"] = True _LOGGER.debug("Timetable: %s", sorted(timetable.keys())) item = {} for key in sorted(timetable.keys()): if dt_util.parse_datetime(key) > now: item = timetable[key] _LOGGER.debug( "Departure found for station %s @ %s -> %s", start_station_id, key, item ) break if item == {}: return {} # Format arrival and departure dates and times, accounting for the # possibility of times crossing over midnight. origin_arrival = now if item["origin_arrival_time"] > item["origin_depart_time"]: origin_arrival -= datetime.timedelta(days=1) origin_arrival_time = ( f"{origin_arrival.strftime(dt_util.DATE_STR_FORMAT)} " f"{item['origin_arrival_time']}" ) origin_depart_time = f"{now_date} {item['origin_depart_time']}" dest_arrival = now if item["dest_arrival_time"] < item["origin_depart_time"]: dest_arrival += datetime.timedelta(days=1) dest_arrival_time = ( f"{dest_arrival.strftime(dt_util.DATE_STR_FORMAT)} " f"{item['dest_arrival_time']}" ) dest_depart = dest_arrival if item["dest_depart_time"] < item["dest_arrival_time"]: dest_depart += datetime.timedelta(days=1) dest_depart_time = ( f"{dest_depart.strftime(dt_util.DATE_STR_FORMAT)} " f"{item['dest_depart_time']}" ) depart_time = dt_util.parse_datetime(origin_depart_time) arrival_time = dt_util.parse_datetime(dest_arrival_time) origin_stop_time = { "Arrival Time": origin_arrival_time, "Departure Time": origin_depart_time, "Drop Off Type": item["origin_drop_off_type"], "Pickup Type": item["origin_pickup_type"], "Shape Dist Traveled": item["origin_dist_traveled"], "Headsign": item["origin_stop_headsign"], "Sequence": item["origin_stop_sequence"], "Timepoint": item["origin_stop_timepoint"], } destination_stop_time = { "Arrival Time": dest_arrival_time, "Departure Time": dest_depart_time, "Drop Off Type": item["dest_drop_off_type"], "Pickup Type": item["dest_pickup_type"], "Shape Dist Traveled": item["dest_dist_traveled"], "Headsign": item["dest_stop_headsign"], "Sequence": item["dest_stop_sequence"], "Timepoint": item["dest_stop_timepoint"], } return { "trip_id": item["trip_id"], "route_id": item["route_id"], "day": item["day"], "first": item["first"], "last": item["last"], "departure_time": depart_time, "arrival_time": arrival_time, "origin_stop_time": origin_stop_time, "destination_stop_time": destination_stop_time, } def setup_platform( hass: HomeAssistant, config: ConfigType, add_entities: Callable[[list], None], discovery_info: DiscoveryInfoType | None = None, ) -> None: """Set up the GTFS sensor.""" gtfs_dir = hass.config.path(DEFAULT_PATH) data = config[CONF_DATA] origin = config.get(CONF_ORIGIN) destination = config.get(CONF_DESTINATION) name = config.get(CONF_NAME) offset = config.get(CONF_OFFSET) include_tomorrow = config[CONF_TOMORROW] if not os.path.exists(gtfs_dir): os.makedirs(gtfs_dir) if not os.path.exists(os.path.join(gtfs_dir, data)): _LOGGER.error("The given GTFS data file/folder was not found") return (gtfs_root, _) = os.path.splitext(data) sqlite_file = f"{gtfs_root}.sqlite?check_same_thread=False" joined_path = os.path.join(gtfs_dir, sqlite_file) gtfs = pygtfs.Schedule(joined_path) # pylint: disable=no-member if not gtfs.feeds: pygtfs.append_feed(gtfs, os.path.join(gtfs_dir, data)) add_entities( [GTFSDepartureSensor(gtfs, name, origin, destination, offset, include_tomorrow)] ) class GTFSDepartureSensor(SensorEntity): """Implementation of a GTFS departure sensor.""" _attr_device_class = DEVICE_CLASS_TIMESTAMP def __init__( self, gtfs: Any, name: Any | None, origin: Any, destination: Any, offset: datetime.timedelta, include_tomorrow: bool, ) -> None: """Initialize the sensor.""" self._pygtfs = gtfs self.origin = origin self.destination = destination self._include_tomorrow = include_tomorrow self._offset = offset self._custom_name = name self._available = False self._icon = ICON self._name = "" self._state: str | None = None self._attributes = {} self._agency = None self._departure = {} self._destination = None self._origin = None self._route = None self._trip = None self.lock = threading.Lock() self.update() @property def name(self) -> str: """Return the name of the sensor.""" return self._name @property def state(self) -> str | None: # type: ignore """Return the state of the sensor.""" return self._state @property def available(self) -> bool: """Return True if entity is available.""" return self._available @property def extra_state_attributes(self) -> dict: """Return the state attributes.""" return self._attributes @property def icon(self) -> str: """Icon to use in the frontend, if any.""" return self._icon def update(self) -> None: """Get the latest data from GTFS and update the states.""" with self.lock: # Fetch valid stop information once if not self._origin: stops = self._pygtfs.stops_by_id(self.origin) if not stops: self._available = False _LOGGER.warning("Origin stop ID %s not found", self.origin) return self._origin = stops[0] if not self._destination: stops = self._pygtfs.stops_by_id(self.destination) if not stops: self._available = False _LOGGER.warning( "Destination stop ID %s not found", self.destination ) return self._destination = stops[0] self._available = True # Fetch next departure self._departure = get_next_departure( self._pygtfs, self.origin, self.destination, self._offset, self._include_tomorrow, ) # Define the state as a UTC timestamp with ISO 8601 format if not self._departure: self._state = None else: self._state = dt_util.as_utc( self._departure["departure_time"] ).isoformat() # Fetch trip and route details once, unless updated if not self._departure: self._trip = None else: trip_id = self._departure["trip_id"] if not self._trip or self._trip.trip_id != trip_id: _LOGGER.debug("Fetching trip details for %s", trip_id) self._trip = self._pygtfs.trips_by_id(trip_id)[0] route_id = self._departure["route_id"] if not self._route or self._route.route_id != route_id: _LOGGER.debug("Fetching route details for %s", route_id) self._route = self._pygtfs.routes_by_id(route_id)[0] # Fetch agency details exactly once if self._agency is None and self._route: _LOGGER.debug("Fetching agency details for %s", self._route.agency_id) try: self._agency = self._pygtfs.agencies_by_id(self._route.agency_id)[0] except IndexError: _LOGGER.warning( "Agency ID '%s' was not found in agency table, " "you may want to update the routes database table " "to fix this missing reference", self._route.agency_id, ) self._agency = False # Assign attributes, icon and name self.update_attributes() if self._route: self._icon = ICONS.get(self._route.route_type, ICON) else: self._icon = ICON name = ( f"{getattr(self._agency, 'agency_name', DEFAULT_NAME)} " f"{self.origin} to {self.destination} next departure" ) if not self._departure: name = f"{DEFAULT_NAME}" self._name = self._custom_name or name def update_attributes(self) -> None: """Update state attributes.""" # Add departure information if self._departure: self._attributes[ATTR_ARRIVAL] = dt_util.as_utc( self._departure["arrival_time"] ).isoformat() self._attributes[ATTR_DAY] = self._departure["day"] if self._departure[ATTR_FIRST] is not None: self._attributes[ATTR_FIRST] = self._departure["first"] elif ATTR_FIRST in self._attributes: del self._attributes[ATTR_FIRST] if self._departure[ATTR_LAST] is not None: self._attributes[ATTR_LAST] = self._departure["last"] elif ATTR_LAST in self._attributes: del self._attributes[ATTR_LAST] else: if ATTR_ARRIVAL in self._attributes: del self._attributes[ATTR_ARRIVAL] if ATTR_DAY in self._attributes: del self._attributes[ATTR_DAY] if ATTR_FIRST in self._attributes: del self._attributes[ATTR_FIRST] if ATTR_LAST in self._attributes: del self._attributes[ATTR_LAST] # Add contextual information self._attributes[ATTR_OFFSET] = self._offset.total_seconds() / 60 if self._state is None: self._attributes[ATTR_INFO] = ( "No more departures" if self._include_tomorrow else "No more departures today" ) elif ATTR_INFO in self._attributes: del self._attributes[ATTR_INFO] if self._agency: self._attributes[ATTR_ATTRIBUTION] = self._agency.agency_name elif ATTR_ATTRIBUTION in self._attributes: del self._attributes[ATTR_ATTRIBUTION] # Add extra metadata key = "agency_id" if self._agency and key not in self._attributes: self.append_keys(self.dict_for_table(self._agency), "Agency") key = "origin_station_stop_id" if self._origin and key not in self._attributes: self.append_keys(self.dict_for_table(self._origin), "Origin Station") self._attributes[ATTR_LOCATION_ORIGIN] = LOCATION_TYPE_OPTIONS.get( self._origin.location_type, LOCATION_TYPE_DEFAULT ) self._attributes[ATTR_WHEELCHAIR_ORIGIN] = WHEELCHAIR_BOARDING_OPTIONS.get( self._origin.wheelchair_boarding, WHEELCHAIR_BOARDING_DEFAULT ) key = "destination_station_stop_id" if self._destination and key not in self._attributes: self.append_keys( self.dict_for_table(self._destination), "Destination Station" ) self._attributes[ATTR_LOCATION_DESTINATION] = LOCATION_TYPE_OPTIONS.get( self._destination.location_type, LOCATION_TYPE_DEFAULT ) self._attributes[ ATTR_WHEELCHAIR_DESTINATION ] = WHEELCHAIR_BOARDING_OPTIONS.get( self._destination.wheelchair_boarding, WHEELCHAIR_BOARDING_DEFAULT ) # Manage Route metadata key = "route_id" if not self._route and key in self._attributes: self.remove_keys("Route") elif self._route and ( key not in self._attributes or self._attributes[key] != self._route.route_id ): self.append_keys(self.dict_for_table(self._route), "Route") self._attributes[ATTR_ROUTE_TYPE] = ROUTE_TYPE_OPTIONS[ self._route.route_type ] # Manage Trip metadata key = "trip_id" if not self._trip and key in self._attributes: self.remove_keys("Trip") elif self._trip and ( key not in self._attributes or self._attributes[key] != self._trip.trip_id ): self.append_keys(self.dict_for_table(self._trip), "Trip") self._attributes[ATTR_BICYCLE] = BICYCLE_ALLOWED_OPTIONS.get( self._trip.bikes_allowed, BICYCLE_ALLOWED_DEFAULT ) self._attributes[ATTR_WHEELCHAIR] = WHEELCHAIR_ACCESS_OPTIONS.get( self._trip.wheelchair_accessible, WHEELCHAIR_ACCESS_DEFAULT ) # Manage Stop Times metadata prefix = "origin_stop" if self._departure: self.append_keys(self._departure["origin_stop_time"], prefix) self._attributes[ATTR_DROP_OFF_ORIGIN] = DROP_OFF_TYPE_OPTIONS.get( self._departure["origin_stop_time"]["Drop Off Type"], DROP_OFF_TYPE_DEFAULT, ) self._attributes[ATTR_PICKUP_ORIGIN] = PICKUP_TYPE_OPTIONS.get( self._departure["origin_stop_time"]["Pickup Type"], PICKUP_TYPE_DEFAULT ) self._attributes[ATTR_TIMEPOINT_ORIGIN] = TIMEPOINT_OPTIONS.get( self._departure["origin_stop_time"]["Timepoint"], TIMEPOINT_DEFAULT ) else: self.remove_keys(prefix) prefix = "destination_stop" if self._departure: self.append_keys(self._departure["destination_stop_time"], prefix) self._attributes[ATTR_DROP_OFF_DESTINATION] = DROP_OFF_TYPE_OPTIONS.get( self._departure["destination_stop_time"]["Drop Off Type"], DROP_OFF_TYPE_DEFAULT, ) self._attributes[ATTR_PICKUP_DESTINATION] = PICKUP_TYPE_OPTIONS.get( self._departure["destination_stop_time"]["Pickup Type"], PICKUP_TYPE_DEFAULT, ) self._attributes[ATTR_TIMEPOINT_DESTINATION] = TIMEPOINT_OPTIONS.get( self._departure["destination_stop_time"]["Timepoint"], TIMEPOINT_DEFAULT ) else: self.remove_keys(prefix) @staticmethod def dict_for_table(resource: Any) -> dict: """Return a dictionary for the SQLAlchemy resource given.""" return { col: getattr(resource, col) for col in resource.__table__.columns.keys() } def append_keys(self, resource: dict, prefix: str | None = None) -> None: """Properly format key val pairs to append to attributes.""" for attr, val in resource.items(): if val == "" or val is None or attr == "feed_id": continue key = attr if prefix and not key.startswith(prefix): key = f"{prefix} {key}" key = slugify(key) self._attributes[key] = val def remove_keys(self, prefix: str) -> None: """Remove attributes whose key starts with prefix.""" self._attributes = { k: v for k, v in self._attributes.items() if not k.startswith(prefix) }<|fim▁end|>
BICYCLE_ALLOWED_DEFAULT = STATE_UNKNOWN BICYCLE_ALLOWED_OPTIONS = {1: True, 2: False}
<|file_name|>plugin.py<|end_file_name|><|fim▁begin|># -*- coding: UTF-8 -*- ## Zap-History Browser by AliAbdul from Components.ActionMap import ActionMap from Components.config import config, ConfigInteger, ConfigSelection, ConfigSubsection, getConfigListEntry from Components.ConfigList import ConfigListScreen from Components.Label import Label from Components.Language import language<|fim▁hole|>from Components.MultiContent import MultiContentEntryText from enigma import eListboxPythonMultiContent, eServiceCenter, gFont, getDesktop, eSize from os import environ from Plugins.Plugin import PluginDescriptor from Screens.ChannelSelection import ChannelSelection from Screens.ParentalControlSetup import ProtectedScreen from Screens.Screen import Screen from Tools.Directories import resolveFilename, SCOPE_LANGUAGE, SCOPE_PLUGINS import gettext ################################################ def localeInit(): lang = language.getLanguage() environ["LANGUAGE"] = lang[:2] gettext.bindtextdomain("enigma2", resolveFilename(SCOPE_LANGUAGE)) gettext.textdomain("enigma2") gettext.bindtextdomain("ZapHistoryBrowser", "%s%s" % (resolveFilename(SCOPE_PLUGINS), "Extensions/ZapHistoryBrowser/locale/")) def _(txt): t = gettext.dgettext("ZapHistoryBrowser", txt) if t == txt: t = gettext.gettext(txt) return t localeInit() language.addCallback(localeInit) ################################################ config.plugins.ZapHistoryConfigurator = ConfigSubsection() config.plugins.ZapHistoryConfigurator.enable_zap_history = ConfigSelection(choices = {"off": _("disabled"), "on": _("enabled"), "parental_lock": _("disabled at parental lock")}, default="on") config.plugins.ZapHistoryConfigurator.maxEntries_zap_history = ConfigInteger(default=20, limits=(1, 60)) ################################################ def addToHistory(instance, ref): if config.plugins.ZapHistoryConfigurator.enable_zap_history.value == "off": return if config.ParentalControl.configured.value and config.plugins.ZapHistoryConfigurator.enable_zap_history.value == "parental_lock": if parentalControl.getProtectionLevel(ref.toCompareString()) != -1: return if instance.servicePath is not None: tmp = instance.servicePath[:] tmp.append(ref) try: del instance.history[instance.history_pos+1:] except: pass instance.history.append(tmp) hlen = len(instance.history) if hlen > config.plugins.ZapHistoryConfigurator.maxEntries_zap_history.value: del instance.history[0] hlen -= 1 instance.history_pos = hlen-1 ChannelSelection.addToHistory = addToHistory ################################################ class ZapHistoryConfigurator(ConfigListScreen, Screen): skin = """ <screen position="center,center" size="420,70" title="%s" > <widget name="config" position="0,0" size="420,70" scrollbarMode="showOnDemand" /> </screen>""" % _("Zap-History Configurator") def __init__(self, session): Screen.__init__(self, session) self.session = session ConfigListScreen.__init__(self, [ getConfigListEntry(_("Enable zap history:"), config.plugins.ZapHistoryConfigurator.enable_zap_history), getConfigListEntry(_("Maximum zap history entries:"), config.plugins.ZapHistoryConfigurator.maxEntries_zap_history)]) self["actions"] = ActionMap(["OkCancelActions"], {"ok": self.save, "cancel": self.exit}, -2) def save(self): for x in self["config"].list: x[1].save() self.close() def exit(self): for x in self["config"].list: x[1].cancel() self.close() ################################################ class ZapHistoryBrowserList(MenuList): def __init__(self, list, enableWrapAround=True): MenuList.__init__(self, list, enableWrapAround, eListboxPythonMultiContent) desktopSize = getDesktop(0).size() if desktopSize.width() == 1920: self.l.setItemHeight(30) self.l.setFont(0, gFont("Regular", 28)) self.l.setFont(1, gFont("Regular", 25)) elif desktopSize.width() == 1280: self.l.setItemHeight(21) self.l.setFont(0, gFont("Regular", 21)) self.l.setFont(1, gFont("Regular", 16)) else: self.l.setItemHeight(21) self.l.setFont(0, gFont("Regular", 21)) self.l.setFont(1, gFont("Regular", 16)) def ZapHistoryBrowserListEntry(serviceName, eventName): desktopSize = getDesktop(0).size() if desktopSize.width() == 1920: res = [serviceName] res.append(MultiContentEntryText(pos=(0, 0), size=(230, 30), font=0, text=serviceName)) res.append(MultiContentEntryText(pos=(240, 0), size=(550, 30), font=1, text=eventName)) return res elif desktopSize.width() == 1280: res = [serviceName] res.append(MultiContentEntryText(pos=(0, 0), size=(180, 22), font=0, text=serviceName)) res.append(MultiContentEntryText(pos=(190, 0), size=(550, 16), font=1, text=eventName)) return res else: res = [serviceName] res.append(MultiContentEntryText(pos=(0, 0), size=(180, 22), font=0, text=serviceName)) res.append(MultiContentEntryText(pos=(190, 0), size=(550, 16), font=1, text=eventName)) return res ################################################ class ZapHistoryBrowser(Screen, ProtectedScreen): skin = """ <screen position="670,440" size="560,210" title="%s" > <ePixmap pixmap="skin_default/buttons/red.png" position="0,0" size="140,40" transparent="1" alphatest="on" /> <ePixmap pixmap="skin_default/buttons/green.png" position="140,0" size="140,40" transparent="1" alphatest="on" /> <ePixmap pixmap="skin_default/buttons/yellow.png" position="280,0" size="140,40" transparent="1" alphatest="on" /> <ePixmap pixmap="skin_default/buttons/blue.png" position="420,0" size="140,40" transparent="1" alphatest="on" /> <widget name="key_red" position="0,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#1f771f" transparent="1" /> <widget name="key_green" position="140,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#1f771f" transparent="1" /> <widget name="key_yellow" position="280,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#1f771f" transparent="1" /> <widget name="key_blue" position="420,0" zPosition="1" size="140,40" font="Regular;20" valign="center" halign="center" backgroundColor="#1f771f" transparent="1" /> <widget name="list" position="0,40" size="560,180" scrollbarMode="showOnDemand" /> </screen>""" % _("Zap-History") def __init__(self, session, servicelist): Screen.__init__(self, session) ProtectedScreen.__init__(self) self.session = session self.servicelist = servicelist self.serviceHandler = eServiceCenter.getInstance() self.allowChanges = True self["list"] = ZapHistoryBrowserList([]) self["key_red"] = Label(_("Clear")) self["key_green"] = Label(_("Delete")) self["key_yellow"] = Label(_("Zap+Close")) self["key_blue"] = Label(_("Config")) self["actions"] = ActionMap(["OkCancelActions", "ColorActions"], { "ok": self.zap, "cancel": self.close, "red": self.clear, "green": self.delete, "yellow": self.zapAndClose, "blue": self.config }, prio=-1) self.onLayoutFinish.append(self.buildList) def buildList(self): list = [] for x in self.servicelist.history: if len(x) == 2: # Single-Bouquet ref = x[1] else: # Multi-Bouquet ref = x[2] info = self.serviceHandler.info(ref) if info: name = info.getName(ref).replace('\xc2\x86', '').replace('\xc2\x87', '') event = info.getEvent(ref) if event is not None: eventName = event.getEventName() if eventName is None: eventName = "" else: eventName = "" else: name = "N/A" eventName = "" list.append(ZapHistoryBrowserListEntry(name, eventName)) list.reverse() self["list"].setList(list) def zap(self): length = len(self.servicelist.history) if length > 0: self.servicelist.history_pos = (length - self["list"].getSelectionIndex()) - 1 self.servicelist.setHistoryPath() def clear(self): if self.allowChanges: for i in range(0, len(self.servicelist.history)): del self.servicelist.history[0] self.buildList() self.servicelist.history_pos = 0 def delete(self): if self.allowChanges: length = len(self.servicelist.history) if length > 0: idx = (length - self["list"].getSelectionIndex()) - 1 del self.servicelist.history[idx] self.buildList() currRef = self.session.nav.getCurrentlyPlayingServiceReference() idx = 0 for x in self.servicelist.history: if len(x) == 2: # Single-Bouquet ref = x[1] else: # Multi-Bouquet ref = x[2] if ref == currRef: self.servicelist.history_pos = idx break else: idx += 1 def zapAndClose(self): self.zap() self.close() def config(self): if self.allowChanges: self.session.open(ZapHistoryConfigurator) def isProtected(self): return config.ParentalControl.setuppinactive.value and config.ParentalControl.configured.value def pinEntered(self, result): if result is None: self.allowChanges = False elif not result: self.allowChanges = False else: self.allowChanges = True ################################################ def main(session, servicelist, **kwargs): session.open(ZapHistoryBrowser, servicelist) def Plugins(**kwargs): return PluginDescriptor(name=_("Zap-History Browser"), where=PluginDescriptor.WHERE_EXTENSIONSMENU, fnc=main)<|fim▁end|>
from Components.MenuList import MenuList
<|file_name|>XmlAttribute.java<|end_file_name|><|fim▁begin|>package org.polyglotted.xpathstax.model; import com.google.common.base.Splitter; import com.google.common.collect.Iterables; import org.codehaus.stax2.XMLStreamReader2; import org.polyglotted.xpathstax.data.Value; import javax.annotation.concurrent.ThreadSafe; import java.util.Iterator; import java.util.Map.Entry; import java.util.concurrent.atomic.AtomicInteger; import static com.google.common.base.Preconditions.checkArgument; import static com.google.common.base.Preconditions.checkNotNull; @SuppressWarnings("WeakerAccess") @ThreadSafe public class XmlAttribute { private static final String NP_SPACE = String.valueOf((char) 22); private static final String EQUALS = "="; private static final Splitter SPACE_SPLITTER = Splitter.on(" ").trimResults().omitEmptyStrings(); private static final Splitter NPSPACE_SPLITTER = Splitter.on(NP_SPACE).trimResults().omitEmptyStrings(); private static final Splitter EQUALS_SPLITTER = Splitter.on(EQUALS).trimResults().omitEmptyStrings(); public static final XmlAttribute EMPTY = XmlAttribute.from(""); private final StringBuffer buffer = new StringBuffer(); private AtomicInteger count = new AtomicInteger(0); public static XmlAttribute from(String attributeString) { XmlAttribute attr = new XmlAttribute(); Iterable<String> attributes = SPACE_SPLITTER.split(attributeString); for (String value : attributes) { Iterator<String> iter = splitByEquals(value); attr.add(iter.next(), iter.hasNext() ? iter.next() : ""); } return attr; } public static XmlAttribute from(XMLStreamReader2 xmlr) { XmlAttribute attr = new XmlAttribute(); for (int i = 0; i < xmlr.getAttributeCount(); i++) { attr.add(xmlr.getAttributeLocalName(i), xmlr.getAttributeValue(i)); } return attr; } public void add(String name, String value) { checkArgument(!name.contains(EQUALS)); buffer.append(buildKey(name)); buffer.append(buildValue(value)); count.incrementAndGet(); } public int count() { return count.get(); } public boolean contains(String name) { return buffer.indexOf(buildKey(name)) >= 0; } public boolean contains(String name, String value) { return buffer.indexOf(buildKey(name) + buildValue(value)) >= 0; } public boolean contains(XmlAttribute inner) { if (inner == null) return false; if (inner == this) return true; if (inner.count() == 1) { return buffer.indexOf(inner.buffer.toString()) >= 0; } boolean result = true;<|fim▁hole|> for (String part : NPSPACE_SPLITTER.split(inner.buffer)) { if (buffer.indexOf(NP_SPACE + part) < 0) { result = false; break; } } return result; } public Value get(String name) { String result = null; final String key = buildKey(name); int keyIndex = buffer.indexOf(key); if (keyIndex >= 0) { int fromIndex = keyIndex + key.length(); int lastIndex = buffer.indexOf(NP_SPACE, fromIndex); result = (lastIndex >= 0) ? buffer.substring(fromIndex, lastIndex) : buffer.substring(fromIndex); } return Value.of(result); } public Iterable<Entry<String, Value>> iterate() { return Iterables.transform(NPSPACE_SPLITTER.split(buffer), AttrEntry::new); } @Override public String toString() { return buffer.toString(); } private static String buildKey(String name) { return NP_SPACE + checkNotNull(name) + EQUALS; } private static String buildValue(String value) { return checkNotNull(value).replaceAll("'", "").replaceAll("\"", ""); } private static Iterator<String> splitByEquals(String value) { Iterator<String> iter = EQUALS_SPLITTER.split(value).iterator(); checkArgument(iter.hasNext(), "unable to parse attribute " + value); return iter; } private static class AttrEntry implements Entry<String, Value> { private final String key; private final Value value; AttrEntry(String data) { Iterator<String> iter = splitByEquals(data); this.key = iter.next(); this.value = iter.hasNext() ? Value.of(iter.next()) : Value.of(null); } @Override public String getKey() { return key; } @Override public Value getValue() { return value; } @Override public Value setValue(Value value) { throw new UnsupportedOperationException(); } } }<|fim▁end|>
<|file_name|>test_botparse.py<|end_file_name|><|fim▁begin|>from boten.core import BaseBot import payloads class TestBot(BaseBot): def command_arg_bot(self, user_name): yield "hello {}".format(user_name) def command_no_arg_bot(self): yield "hello" <|fim▁hole|> def command_two_message_bot(self): yield "message1" yield "message2" def foo(self): pass def test_available_commands(): bot = TestBot({}) available_commands = bot.commands assert "arg_bot" in available_commands assert "no_arg_bot" in available_commands assert "optional_arg_bot" in available_commands assert "two_message_bot" in available_commands assert "foo" not in available_commands def test_arg_bot_with_arg(): bot = TestBot({}) response = list(bot.run_command(payloads.arg_bot_with_arg)) assert response[0] == "hello derp" def test_arg_bot_with_no_args(): bot = TestBot({}) response = list(bot.run_command(payloads.arg_bot_with_no_args)) assert response[0].startswith("Got TypeError") # Help message def test_no_arg_bot_without_arg(): bot = TestBot({}) response = list(bot.run_command(payloads.no_arg_bot_without_arg)) assert response[0] == "hello" def test_no_arg_bot_with_arg(): bot = TestBot({}) response = list(bot.run_command(payloads.no_arg_bot_with_arg)) assert response[0].startswith("Got TypeError") # Help message def test_optional_arg_bot_with_optional_arg(): bot = TestBot({}) response = list(bot.run_command(payloads.optional_arg_bot_with_optional_arg)) assert response[0] == 'hello derp' def test_optional_arg_bot_with_no_arg(): bot = TestBot({}) response = list(bot.run_command(payloads.optional_arg_bot_with_no_arg)) assert response[0] == 'hello default' def test_two_message_bot(): bot = TestBot({}) response = list(bot.run_command(payloads.two_message_bot)) assert len(response) == 2 def test_help_subcommand(): bot = TestBot({}) response = list(bot.run_command(payloads.no_arg_bot_with_arg)) assert response[0].startswith("Got TypeError") # Help message<|fim▁end|>
def command_optional_arg_bot(self, optional="default"): yield "hello {}".format(optional)
<|file_name|>LDBDClient.py<|end_file_name|><|fim▁begin|>""" The LDBDClient module provides an API for connecting to and making requests of a LDBDServer. This module requires U{pyGlobus<http://www-itg.lbl.gov/gtg/projects/pyGlobus/>}. This file is part of the Grid LSC User Environment (GLUE) GLUE is free software: you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program. If not, see <http://www.gnu.org/licenses/>. """ from pycbc_glue import git_version __date__ = git_version.date __version__ = git_version.id import sys import os import exceptions import types import re import cPickle import xml.parsers.expat from pyGlobus import io from pyGlobus import security def version(): return __version__ class SimpleLWXMLParser: """ A very simple LIGO_LW XML parser class that reads the only keeps tables that do not contain the strings sngl_ or multi_ The class is not very robust as can have problems if the line breaks do not appear in the standard places in the XML file. """ def __init__(self): """ Constructs an instance. The private variable ignore_pat determines what tables we ignore. """ self.__p = xml.parsers.expat.ParserCreate() self.__in_table = 0 self.__silent = 0 self.__ignore_pat = re.compile(r'.*(sngl_|multi_).*', re.IGNORECASE) self.__p.StartElementHandler = self.start_element self.__p.EndElementHandler = self.end_element def __del__(self): """ Destroys an instance by shutting down and deleting the parser. """ self.__p("",1) del self.__p def start_element(self, name, attrs): """ Callback for start of an XML element. Checks to see if we are about to start a table that matches the ignore pattern. @param name: the name of the tag being opened @type name: string @param attrs: a dictionary of the attributes for the tag being opened @type attrs: dictionary """ if name.lower() == "table": for attr in attrs.keys(): if attr.lower() == "name": if self.__ignore_pat.search(attrs[attr]): self.__in_table = 1 def end_element(self, name): """ Callback for the end of an XML element. If the ignore flag is set, reset it so we start outputing the table again. @param name: the name of the tag being closed @type name: string """ if name.lower() == "table": if self.__in_table: self.__in_table = 0 def parse_line(self, line): """ For each line we are passed, call the XML parser. Returns the line if we are outside one of the ignored tables, otherwise returns the empty string. @param line: the line of the LIGO_LW XML file to be parsed @type line: string @return: the line of XML passed in or the null string @rtype: string """ self.__p.Parse(line) if self.__in_table: self.__silent = 1 if not self.__silent: ret = line else: ret = "" if not self.__in_table:<|fim▁hole|> class LDBDClientException(Exception): """Exceptions returned by server""" def __init__(self,args=None): self.args = args class LDBDClient(object): def __init__(self, host, port, identity): """ Open a connection to a LDBD Server and return an instance of class LDBDClient. One of the public methods can then be called to send a request to the server. @param host: the host on which the LDBD Server runs @type host: string @param port: port on which the LDBD Server listens @type port: integer @param identity: string which the LDBD Server identifies itself @type identity: string @return: Instance of LDBDClient """ try: self.__connect__(host,port,identity) except Exception, e: raise def __del__(self): """ Disconnect from the LDBD server. @return: None """ self.__disconnect__() def __connect__(self,host,port,identity): """ Attempt to open a connection to the LDBD Server using the 'host' and 'port' and expecting the server to identify itself with a corresponding host certificate. A IOException is raised if the connection cannot be made, but this is caught by the __init__ method above and turned into a LDBDClient exception. @param host: the host on which the LDBD Server runs @type host: string @param port: port on which the LDBD Server listens @type port: integer @param identity: string which the LDBD Server identifies itself @type identity: string @return: None """ # remove the globus tcp port range environment variable if set try: port_range = os.environ["GLOBUS_TCP_PORT_RANGE"] os.environ["GLOBUS_TCP_PORT_RANGE"] = "" except: pass self.host = host self.port = port self.identity = identity # redirect stdout and stderror for now try: f = open("/dev/null", "w") sys.stdout = f sys.stderr = f except: pass try: # create TCPIOAttr instance clientAttr = io.TCPIOAttr() authData = io.AuthData() soc = io.GSITCPSocket() if identity is None: # try an unauthenticated connection clientAttr.set_authentication_mode( io.ioc.GLOBUS_IO_SECURE_AUTHENTICATION_MODE_NONE) clientAttr.set_authorization_mode( io.ioc.GLOBUS_IO_SECURE_AUTHORIZATION_MODE_NONE, authData) clientAttr.set_channel_mode( io.ioc.GLOBUS_IO_SECURE_CHANNEL_MODE_CLEAR) clientAttr.set_delegation_mode( io.ioc.GLOBUS_IO_SECURE_DELEGATION_MODE_NONE) else: # set authentication mode to be GSSAPI clientAttr.set_authentication_mode( io.ioc.GLOBUS_IO_SECURE_AUTHENTICATION_MODE_GSSAPI) # set expected identity authData.set_identity(identity) # set authorization, channel, and delegation modes clientAttr.set_authorization_mode( io.ioc.GLOBUS_IO_SECURE_AUTHORIZATION_MODE_IDENTITY, authData) clientAttr.set_channel_mode( io.ioc.GLOBUS_IO_SECURE_CHANNEL_MODE_CLEAR) clientAttr.set_delegation_mode( io.ioc.GLOBUS_IO_SECURE_DELEGATION_MODE_LIMITED_PROXY) soc.connect(host, port, clientAttr) self.socket = soc self.sfile = soc.makefile("rw") finally: sys.stdout = sys.__stdout__ sys.stderr = sys.__stderr__ f.close() def __disconnect__(self): """ Disconnect from the LDBD Server. @return: None """ try: self.socket.shutdown(2) except: pass def __response__(self): """ Read the response sent back by the LDBD Server. Parse out the return code with 0 for success and non-zero for error, and then the list of strings representing the returned result(s). @return: tuple containing the integer error code and the list of strings representing the output from the server """ f = self.sfile response = "" # Read in 512 byte chunks until there is nothing left to read. # This blocks until the socket is ready for reading and until # 512 bytes are received. If the message is less then 512 bytes # this will block until the server closes the socket. Since # the server always shuts down the socket after sending its # reply this should continue to work for now. while 1: input = f.read(size = 512, waitForBytes = 512) response += input if len(input) < 512: break # the response from the server must always end in a null byte try: if response[-1] != '\0': msg = "Bad server reponse format. Contact server administrator." raise LDBDClientException, msg except: msg = "Connection refused. The server may be down or you may not have" + \ "authorization to access this server. Contact server administrator." raise LDBDClientException, msg # delete the last \0 before splitting into strings response = response[0:-1] try: stringList = response.split('\0') code = int(stringList[0]) output = stringList[1:] except Exception, e: msg = "Error parsing response from server : %s" % e try: f.close() except: pass raise LDBDClientException, msg f.close() return code, output def ping(self): """ Ping the LDBD Server and return any message received back as a string. @return: message received (may be empty) from LDBD Server as a string """ msg = "PING\0" self.sfile.write(msg) ret, output = self.__response__() reply = str(output[0]) if ret: msg = "Error pinging server %d:%s" % (ret, reply) raise LDBDClientException, msg return reply def query(self,sql): """ Execute an SQL query on the server and fetch the resulting XML file back. @return: message received (may be empty) from LDBD Server as a string """ msg = "QUERY\0" + sql + "\0" self.sfile.write(msg) ret, output = self.__response__() reply = str(output[0]) if ret: msg = "Error executing query on server %d:%s" % (ret, reply) raise LDBDClientException, msg return reply def insert(self,xmltext): """ Insert the LIGO_LW metadata in the xmltext string into the database. @return: message received (may be empty) from LDBD Server as a string """ msg = "INSERT\0" + xmltext + "\0" self.sfile.write(msg) ret, output = self.__response__() reply = str(output[0]) if ret: msg = "Error executing insert on server %d:%s" % (ret, reply) raise LDBDClientException, msg return reply def insertmap(self,xmltext,lfnpfn_dict): """ Insert the LIGO_LW metadata in the xmltext string into the database. @return: message received (may be empty) from LDBD Server as a string """ pmsg = cPickle.dumps(lfnpfn_dict) msg = "INSERTMAP\0" + xmltext + "\0" + pmsg + "\0" self.sfile.write(msg) ret, output = self.__response__() reply = str(output[0]) if ret: msg = "Error executing insert on server %d:%s" % (ret, reply) raise LDBDClientException, msg return reply def insertdmt(self,xmltext): """ Insert the LIGO_LW metadata in the xmltext string into the database. @return: message received (may be empty) from LDBD Server as a string """ msg = "INSERTDMT\0" + xmltext + "\0" self.sfile.write(msg) ret, output = self.__response__() reply = str(output[0]) if ret: msg = "Error executing insert on server %d:%s" % (ret, reply) raise LDBDClientException, msg return reply<|fim▁end|>
self.__silent = 0 return ret
<|file_name|>CheckboxBinding.js<|end_file_name|><|fim▁begin|>import Binding from 'virtualdom/items/Element/Binding/Binding'; import handleDomEvent from 'virtualdom/items/Element/Binding/shared/handleDomEvent'; var CheckboxBinding = Binding.extend({ name: 'checked', render: function () { var node = this.element.node; node.addEventListener( 'change', handleDomEvent, false ); if ( node.attachEvent ) { node.addEventListener( 'click', handleDomEvent, false ); } }, unrender: function () { var node = this.element.node; node.removeEventListener( 'change', handleDomEvent, false ); node.removeEventListener( 'click', handleDomEvent, false ); },<|fim▁hole|> return this.element.node.checked; } }); export default CheckboxBinding;<|fim▁end|>
getValue: function () {
<|file_name|>gridded_mappable_point.py<|end_file_name|><|fim▁begin|>import unittest import transaction import os import csv from pyramid import testing from thesis.models import DBSession from sqlalchemy import create_engine from thesis.models import ( Base, GriddedMappablePoint, Layer ) class TestGriddedMappableItem(unittest.TestCase): def setUp(self): self.config = testing.setUp() engine = create_engine('postgresql+psycopg2://thesis_db_user:[email protected]:5432/thesis_test_db') DBSession.configure(bind=engine) Base.metadata.create_all(engine) with transaction.manager: # Add TestLayer1 test_layer_1 = Layer(name='TestLayer1') test_layer_1.mappable_points = [ GriddedMappablePoint('Point(30 10)'), GriddedMappablePoint('Point(20 10)'), ] DBSession.add(test_layer_1) # Add TestLayer2 test_layer_2 = Layer(name='TestLayer2') test_layer_2.mappable_points = [ GriddedMappablePoint('Point(10 15)'), GriddedMappablePoint('Point(10 15)'), GriddedMappablePoint('Point(30 15)'), ] DBSession.add(test_layer_2) # Add Emu Layer tests_path = os.path.dirname(os.path.abspath(__file__)) test_fixtures_path = os.path.join(tests_path, 'fixtures') emu_csv_path = os.path.join(test_fixtures_path, 'emu.csv') emu_layer = Layer(name='Emu') with open(emu_csv_path, 'rb') as csvfile: emu_reader = csv.reader(csvfile) rownum = 0 header = None for row in emu_reader: # Save header row. if rownum == 0: header = row else: colnum = 0 latitude = 0 longitude = 0 for col in row: column_label = header[colnum] if column_label == "LNGDEC": longitude = col elif column_label == "LATDEC": latitude = col # print '%-8s: %s' % (column_label, col) colnum += 1 if longitude and latitude: mappable_point = GriddedMappablePoint('Point(%s %s)' % (longitude, latitude)) emu_layer.mappable_points.append(mappable_point) rownum += 1 DBSession.add(emu_layer) def tearDown(self): DBSession.remove() testing.tearDown() engine = create_engine('postgresql+psycopg2://thesis_db_user:[email protected]:5432/thesis_test_db') DBSession.configure(bind=engine) # Drop all the models Base.metadata.drop_all(engine) def test_search_layers_by_name(self): test_layer_1 = DBSession.query(Layer).\ filter_by(name='TestLayer1').one() self.assertEqual(test_layer_1.name, 'TestLayer1') self.assertEqual(len(test_layer_1.mappable_points), 2) test_layer_2 = DBSession.query(Layer).\ filter_by(name='TestLayer2').one() self.assertEqual(test_layer_2.name, 'TestLayer2') self.assertEqual(len(test_layer_2.mappable_points), 3) def test_emu_fixure_loaded(self): test_emu_layer = DBSession.query(Layer).\ filter_by(name='Emu').one() self.assertGreater(len(test_emu_layer.mappable_points), 5) def test_get_layer_points_as_geo_json(self): test_layer_1 = DBSession.query(Layer).filter_by(name='TestLayer1').one() test_layer_2 = DBSession.query(Layer).filter_by(name='TestLayer2').one() q = GriddedMappablePoint.get_points_as_geojson(test_layer_1, grid_size=1)<|fim▁hole|> self.assertEqual(result[0].cluster_size, 1) self.assertEqual(result[1].cluster_size, 1) q2 = GriddedMappablePoint.get_points_as_geojson(test_layer_1, grid_size=100) result2 = q2.all() # self.assertEqual(result2[0].locations, '{"type":"MultiPoint","coordinates":[[30,10],[20,10]]}') self.assertEqual(result2[0].cluster_size, 2) q3 = GriddedMappablePoint.get_points_as_geojson(test_layer_2, grid_size=1) result3 = q3.all() # self.assertEqual(result3[0].locations, '{"type":"MultiPoint","coordinates":[[10,15],[10,15]]}') # self.assertEqual(result3[1].locations, '{"type":"MultiPoint","coordinates":[[30,15]]}') self.assertEqual(result3[0].cluster_size, 2) self.assertEqual(result3[1].cluster_size, 1) def test_get_cluster_centroids_as_geo_json(self): test_layer_1 = DBSession.query(Layer).filter_by(name='TestLayer1').one() test_layer_2 = DBSession.query(Layer).filter_by(name='TestLayer2').one() q = GriddedMappablePoint.get_points_as_geojson(test_layer_1, grid_size=1) result = q.all() self.assertEqual(result[0].centroid, '{"type":"Point","coordinates":[20,10]}') self.assertEqual(result[1].centroid, '{"type":"Point","coordinates":[30,10]}') q2 = GriddedMappablePoint.get_points_as_geojson(test_layer_1, grid_size=100) result2 = q2.one() self.assertEqual(result2.centroid, '{"type":"Point","coordinates":[25,10]}') q3 = GriddedMappablePoint.get_points_as_geojson(test_layer_2, grid_size=100) result3 = q3.one() self.assertEqual(result3.centroid, '{"type":"Point","coordinates":[16.6666666666667,15]}') def test_get_layer_points_as_wkt(self): test_layer_1 = DBSession.query(Layer).filter_by(name='TestLayer1').one() q = GriddedMappablePoint.get_points_as_wkt(test_layer_1, grid_size=1) result = q.all() # self.assertEqual(result[0].locations, 'MULTIPOINT(20 10)') # self.assertEqual(result[1].locations, 'MULTIPOINT(30 10)') def test_normalise_grid_size(self): grid_size_1 = GriddedMappablePoint.normalise_grid_size(10) self.assertEqual(grid_size_1, 8) grid_size_2 = GriddedMappablePoint.normalise_grid_size(0.00001) self.assertEqual(grid_size_2, 0) grid_size_3 = GriddedMappablePoint.normalise_grid_size(0.9) self.assertEqual(grid_size_3, 0.5) grid_size_4 = GriddedMappablePoint.normalise_grid_size(1.1) self.assertEqual(grid_size_4, 1)<|fim▁end|>
result = q.all() # self.assertEqual(result[0].locations, '{"type":"MultiPoint","coordinates":[[20,10]]}') # self.assertEqual(result[1].locations, '{"type":"MultiPoint","coordinates":[[30,10]]}')
<|file_name|>generated_content.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! The generated content assignment phase. //! //! This phase handles CSS counters, quotes, and ordered lists per CSS § 12.3-12.5. It cannot be //! done in parallel and is therefore a sequential pass that runs on as little of the flow tree //! as possible. use context::LayoutContext; use flow::InorderFlowTraversal; use flow::{self, AFFECTS_COUNTERS, Flow, HAS_COUNTER_AFFECTING_CHILDREN, ImmutableFlowUtils}; use fragment::{Fragment, GeneratedContentInfo, SpecificFragmentInfo, UnscannedTextFragmentInfo}; use gfx::display_list::OpaqueNode; use script_layout_interface::restyle_damage::{RESOLVE_GENERATED_CONTENT, RestyleDamage}; use script_layout_interface::wrapper_traits::PseudoElementType; use smallvec::SmallVec; use std::collections::{HashMap, LinkedList}; use std::sync::Arc; use style::computed_values::content::ContentItem; use style::computed_values::{display, list_style_type}; use style::dom::TRestyleDamage; use style::properties::{ComputedValues, ServoComputedValues}; use text::TextRunScanner; // Decimal styles per CSS-COUNTER-STYLES § 6.1: static DECIMAL: [char; 10] = [ '0', '1', '2', '3', '4', '5', '6', '7', '8', '9' ]; // TODO(pcwalton): `decimal-leading-zero` static ARABIC_INDIC: [char; 10] = [ '٠', '١', '٢', '٣', '٤', '٥', '٦', '٧', '٨', '٩' ]; // TODO(pcwalton): `armenian`, `upper-armenian`, `lower-armenian` static BENGALI: [char; 10] = [ '০', '১', '২', '৩', '৪', '৫', '৬', '৭', '৮', '৯' ]; static CAMBODIAN: [char; 10] = [ '០', '១', '២', '៣', '៤', '៥', '៦', '៧', '៨', '៩' ]; // TODO(pcwalton): Suffix for CJK decimal. static CJK_DECIMAL: [char; 10] = [ '〇', '一', '二', '三', '四', '五', '六', '七', '八', '九' ]; static DEVANAGARI: [char; 10] = [ '०', '१', '२', '३', '४', '५', '६', '७', '८', '९' ]; // TODO(pcwalton): `georgian` static GUJARATI: [char; 10] = ['૦', '૧', '૨', '૩', '૪', '૫', '૬', '૭', '૮', '૯']; static GURMUKHI: [char; 10] = ['੦', '੧', '੨', '੩', '੪', '੫', '੬', '੭', '੮', '੯']; // TODO(pcwalton): `hebrew` static KANNADA: [char; 10] = ['೦', '೧', '೨', '೩', '೪', '೫', '೬', '೭', '೮', '೯']; static LAO: [char; 10] = ['໐', '໑', '໒', '໓', '໔', '໕', '໖', '໗', '໘', '໙']; static MALAYALAM: [char; 10] = ['൦', '൧', '൨', '൩', '൪', '൫', '൬', '൭', '൮', '൯']; static MONGOLIAN: [char; 10] = ['᠐', '᠑', '᠒', '᠓', '᠔', '᠕', '᠖', '᠗', '᠘', '᠙']; static MYANMAR: [char; 10] = ['၀', '၁', '၂', '၃', '၄', '၅', '၆', '၇', '၈', '၉']; static ORIYA: [char; 10] = ['୦', '୧', '୨', '୩', '୪', '୫', '୬', '୭', '୮', '୯']; static PERSIAN: [char; 10] = ['۰', '۱', '۲', '۳', '۴', '۵', '۶', '۷', '۸', '۹']; // TODO(pcwalton): `lower-roman`, `upper-roman` static TELUGU: [char; 10] = ['౦', '౧', '౨', '౩', '౪', '౫', '౬', '౭', '౮', '౯']; static THAI: [char; 10] = ['๐', '๑', '๒', '๓', '๔', '๕', '๖', '๗', '๘', '๙']; static TIBETAN: [char; 10] = ['༠', '༡', '༢', '༣', '༤', '༥', '༦', '༧', '༨', '༩']; // Alphabetic styles per CSS-COUNTER-STYLES § 6.2: static LOWER_ALPHA: [char; 26] = [ 'a', 'b', 'c', 'd', 'e', 'f', 'g', 'h', 'i', 'j', 'k', 'l', 'm', 'n', 'o', 'p', 'q', 'r', 's', 't', 'u', 'v', 'w', 'x', 'y', 'z' ]; static UPPER_ALPHA: [char; 26] = [ 'A', 'B', 'C', 'D', 'E', 'F', 'G', 'H', 'I', 'J', 'K', 'L', 'M', 'N', 'O', 'P', 'Q', 'R', 'S', 'T', 'U', 'V', 'W', 'X', 'Y', 'Z' ]; static CJK_EARTHLY_BRANCH: [char; 12] = [ '子', '丑', '寅', '卯', '辰', '巳', '午', '未', '申', '酉', '戌', '亥' ]; static CJK_HEAVENLY_STEM: [char; 10] = [ '甲', '乙', '丙', '丁', '戊', '己', '庚', '辛', '壬', '癸' ]; static LOWER_GREEK: [char; 24] = [ 'α', 'β', 'γ', 'δ', 'ε', 'ζ', 'η', 'θ', 'ι', 'κ', 'λ', 'μ', 'ν', 'ξ', 'ο', 'π', 'ρ', 'σ', 'τ', 'υ', 'φ', 'χ', 'ψ', 'ω' ]; static HIRAGANA: [char; 48] = [ 'あ', 'い', 'う', 'え', 'お', 'か', 'き', 'く', 'け', 'こ', 'さ', 'し', 'す', 'せ', 'そ', 'た', 'ち', 'つ', 'て', 'と', 'な', 'に', 'ぬ', 'ね', 'の', 'は', 'ひ', 'ふ', 'へ', 'ほ', 'ま', 'み', 'む', 'め', 'も', 'や', 'ゆ', 'よ', 'ら', 'り', 'る', 'れ', 'ろ', 'わ', 'ゐ', 'ゑ', 'を', 'ん' ]; static HIRAGANA_IROHA: [char; 47] = [ 'い', 'ろ', 'は', 'に', 'ほ', 'へ', 'と', 'ち', 'り', 'ぬ', 'る', 'を', 'わ', 'か', 'よ', 'た', 'れ', 'そ', 'つ', 'ね', 'な', 'ら', 'む', 'う', 'ゐ', 'の', 'お', 'く', 'や', 'ま', 'け', 'ふ', 'こ', 'え', 'て', 'あ', 'さ', 'き', 'ゆ', 'め', 'み', 'し', 'ゑ', 'ひ', 'も', 'せ', 'す' ]; static KATAKANA: [char; 48] = [ 'ア', 'イ', 'ウ', 'エ', 'オ', 'カ', 'キ', 'ク', 'ケ', 'コ', 'サ', 'シ', 'ス', 'セ', 'ソ', 'タ', 'チ', 'ツ', 'テ', 'ト', 'ナ', 'ニ', 'ヌ', 'ネ', 'ノ', 'ハ', 'ヒ', 'フ', 'ヘ', 'ホ', 'マ', 'ミ', 'ム', 'メ', 'モ', 'ヤ', 'ユ', 'ヨ', 'ラ', 'リ', 'ル', 'レ', 'ロ', 'ワ', 'ヰ', 'ヱ', 'ヲ', 'ン' ]; static KATAKANA_IROHA: [char; 47] = [ 'イ', 'ロ', 'ハ', 'ニ', 'ホ', 'ヘ', 'ト', 'チ', 'リ', 'ヌ', 'ル', 'ヲ', 'ワ', 'カ', 'ヨ', 'タ', 'レ', 'ソ', 'ツ', 'ネ', 'ナ', 'ラ', 'ム', 'ウ', 'ヰ', 'ノ', 'オ', 'ク', 'ヤ', 'マ', 'ケ', 'フ', 'コ', 'エ', 'テ', 'ア', 'サ', 'キ', 'ユ', 'メ', 'ミ', 'シ', 'ヱ', 'ヒ', 'モ', 'セ', 'ス' ]; /// The generated content resolution traversal. pub struct ResolveGeneratedContent<'a> { /// The layout context. layout_context: &'a LayoutContext<'a>, /// The counter representing an ordered list item. list_item: Counter, /// Named CSS counters. counters: HashMap<String, Counter>, /// The level of quote nesting. quote: u32, } impl<'a> ResolveGeneratedContent<'a> { /// Creates a new generated content resolution traversal. pub fn new(layout_context: &'a LayoutContext<'a>) -> ResolveGeneratedContent<'a> { ResolveGeneratedContent { layout_context: layout_context, list_item: Counter::new(), counters: HashMap::new(), quote: 0, } } } impl<'a> InorderFlowTraversal for ResolveGeneratedContent<'a> { #[inline] fn process(&mut self, flow: &mut Flow, level: u32) { let mut mutator = ResolveGeneratedContentFragmentMutator { traversal: self, level: level, is_block: flow.is_block_like(), incremented: false, }; flow.mutate_fragments(&mut |fragment| mutator.mutate_fragment(fragment)) } #[inline] fn should_process(&mut self, flow: &mut Flow) -> bool { flow::base(flow).restyle_damage.intersects(RESOLVE_GENERATED_CONTENT) || flow::base(flow).flags.intersects(AFFECTS_COUNTERS | HAS_COUNTER_AFFECTING_CHILDREN) } } /// The object that mutates the generated content fragments. struct ResolveGeneratedContentFragmentMutator<'a,'b:'a> { /// The traversal. traversal: &'a mut ResolveGeneratedContent<'b>, /// The level we're at in the flow tree. level: u32, /// Whether this flow is a block flow. is_block: bool, /// Whether we've incremented the counter yet. incremented: bool, } impl<'a,'b> ResolveGeneratedContentFragmentMutator<'a,'b> { fn mutate_fragment(&mut self, fragment: &mut Fragment) { // We only reset and/or increment counters once per flow. This avoids double-incrementing // counters on list items (once for the main fragment and once for the marker). if !self.incremented { self.reset_and_increment_counters_as_necessary(fragment); } let mut list_style_type = fragment.style().get_list().list_style_type; if fragment.style().get_box().display != display::T::list_item { list_style_type = list_style_type::T::none } let mut new_info = None; { let info = if let SpecificFragmentInfo::GeneratedContent(ref mut info) = fragment.specific { info } else { return }; match **info { GeneratedContentInfo::ListItem => { new_info = self.traversal.list_item.render(self.traversal.layout_context, fragment.node, fragment.pseudo.clone(), fragment.style.clone(), list_style_type, RenderingMode::Suffix(".\u{00a0}")) } GeneratedContentInfo::Empty | GeneratedContentInfo::ContentItem(ContentItem::String(_)) => { // Nothing to do here. } GeneratedContentInfo::ContentItem(ContentItem::Counter(ref counter_name, counter_style)) => { let temporary_counter = Counter::new(); let counter = self.traversal .counters .get(&*counter_name) .unwrap_or(&temporary_counter); new_info = counter.render(self.traversal.layout_context, fragment.node, fragment.pseudo.clone(), fragment.style.clone(), counter_style, RenderingMode::Plain) } GeneratedContentInfo::ContentItem(ContentItem::Counters(ref counter_name, ref separator, counter_style)) => { let temporary_counter = Counter::new(); let counter = self.traversal .counters .get(&*counter_name) .unwrap_or(&temporary_counter); new_info = counter.render(self.traversal.layout_context, fragment.node, fragment.pseudo, fragment.style.clone(), counter_style, RenderingMode::All(&separator)); } GeneratedContentInfo::ContentItem(ContentItem::OpenQuote) => { new_info = render_text(self.traversal.layout_context, fragment.node, fragment.pseudo, fragment.style.clone(), self.quote(&*fragment.style, false)); self.traversal.quote += 1 } GeneratedContentInfo::ContentItem(ContentItem::CloseQuote) => { if self.traversal.quote >= 1 { self.traversal.quote -= 1 } new_info = render_text(self.traversal.layout_context, fragment.node, fragment.pseudo, fragment.style.clone(), self.quote(&*fragment.style, true)); } GeneratedContentInfo::ContentItem(ContentItem::NoOpenQuote) => { self.traversal.quote += 1 } GeneratedContentInfo::ContentItem(ContentItem::NoCloseQuote) => { if self.traversal.quote >= 1 { self.traversal.quote -= 1 } } } }; fragment.specific = match new_info { Some(new_info) => new_info, // If the fragment did not generate any content, replace it with a no-op placeholder // so that it isn't processed again on the next layout. FIXME (mbrubeck): When // processing an inline flow, this traversal should be allowed to insert or remove // fragments. Then we can just remove these fragments rather than adding placeholders. None => SpecificFragmentInfo::GeneratedContent(box GeneratedContentInfo::Empty) }; } fn reset_and_increment_counters_as_necessary(&mut self, fragment: &mut Fragment) { let mut list_style_type = fragment.style().get_list().list_style_type; if !self.is_block || fragment.style().get_box().display != display::T::list_item { list_style_type = list_style_type::T::none } match list_style_type { list_style_type::T::disc | list_style_type::T::none | list_style_type::T::circle | list_style_type::T::square | list_style_type::T::disclosure_open | list_style_type::T::disclosure_closed => {} _ => self.traversal.list_item.increment(self.level, 1), } // Truncate down counters. for (_, counter) in &mut self.traversal.counters { counter.truncate_to_level(self.level); } self.traversal.list_item.truncate_to_level(self.level); for &(ref counter_name, value) in &fragment.style().get_counters().counter_reset.0 { if let Some(ref mut counter) = self.traversal.counters.get_mut(counter_name) { counter.reset(self.level, value); continue } let mut counter = Counter::new(); counter.reset(self.level, value); self.traversal.counters.insert((*counter_name).clone(), counter); } for &(ref counter_name, value) in &fragment.style().get_counters().counter_increment.0 { if let Some(ref mut counter) = self.traversal.counters.get_mut(counter_name) { counter.increment(self.level, value); continue } let mut counter = Counter::new(); counter.increment(self.level, value); self.traversal.counters.insert((*counter_name).clone(), counter); } self.incremented = true } fn quote(&self, style: &ServoComputedValues, close: bool) -> String { let quotes = &style.get_list().quotes; if quotes.0.is_empty() { return String::new() } let &(ref open_quote, ref close_quote) = if self.traversal.quote as usize >= quotes.0.len() { quotes.0.last().unwrap() } else { &quotes.0[self.traversal.quote as usize] }; if close { close_quote.clone() } else { open_quote.clone() } } } /// A counter per CSS 2.1 § 12.4. struct Counter { /// The values at each level. values: Vec<CounterValue>, } impl Counter { fn new() -> Counter { Counter { values: Vec::new(), } } fn reset(&mut self, level: u32, value: i32) { // Do we have an instance of the counter at this level? If so, just mutate it. if let Some(ref mut existing_value) = self.values.last_mut() { if level == existing_value.level { existing_value.value = value; return } } // Otherwise, push a new instance of the counter. self.values.push(CounterValue { level: level, value: value, }) } fn truncate_to_level(&mut self, level: u32) { if let Some(position) = self.values.iter().position(|value| value.level > level) { self.values.truncate(position) } } fn increment(&mut self, level: u32, amount: i32) { if let Some(ref mut value) = self.values.last_mut() { value.value += amount; return } self.values.push(CounterValue { level: level, value: amount, }) } fn render(&self, layout_context: &LayoutContext, node: OpaqueNode, pseudo: PseudoElementType<()>, style: Arc<ServoComputedValues>, list_style_type: list_style_type::T, mode: RenderingMode) -> Option<SpecificFragmentInfo> { let mut string = String::new(); match mode { RenderingMode::Plain => { let value = match self.values.last() { Some(ref value) => value.value, None => 0, }; push_representation(value, list_style_type, &mut string) } RenderingMode::Suffix(suffix) => { let value = match self.values.last() { Some(ref value) => value.value, None => 0, }; push_representation(value, list_style_type, &mut string); string.push_str(suffix) } RenderingMode::All(separator) => { let mut first = true; for value in &self.values { if !first { string.push_str(separator) } first = false; push_representation(value.value, list_style_type, &mut string) } } } if string.is_empty() { None } else { render_text(layout_context, node, pseudo, style, string) } } } /// How a counter value is to be rendered. enum RenderingMode<'a> { /// The innermost counter value is rendered with no extra decoration. Plain, /// The innermost counter value is rendered with the given string suffix. Suffix(&'a str), /// All values of the counter are rendered with the given separator string between them. All(&'a str), } /// The value of a counter at a given level. struct CounterValue { /// The level of the flow tree that this corresponds to. level: u32, /// The value of the counter at this level. value: i32, } /// Creates fragment info for a literal string. fn render_text(layout_context: &LayoutContext, node: OpaqueNode, pseudo: PseudoElementType<()>, style: Arc<ServoComputedValues>, string: String) -> Option<SpecificFragmentInfo> { let mut fragments = LinkedList::new(); let info = SpecificFragmentInfo::UnscannedText( box UnscannedTextFragmentInfo::new(string, None)); fragments.push_back(Fragment::from_opaque_node_and_style(node, pseudo, style.clone(), style, RestyleDamage::rebuild_and_reflow(), info)); // FIXME(pcwalton): This should properly handle multiple marker fragments. This could happen // due to text run splitting. let fragments = TextRunScanner::new().scan_for_runs(&mut layout_context.font_context(), fragments); if fragments.is_empty() { None } else { Some(fragments.fragments.into_iter().next().unwrap().specific) } } /// Appends string that represents the value rendered using the system appropriate for the given /// `list-style-type` onto the given string. fn push_representation(value: i32, list_style_type: list_style_type::T, accumulator: &mut String) { match list_style_type { list_style_type::T::none => {} list_style_type::T::disc | list_style_type::T::circle | list_style_type::T::square | list_style_type::T::disclosure_open | list_style_type::T::disclosure_closed => {<|fim▁hole|> list_style_type::T::decimal => push_numeric_representation(value, &DECIMAL, accumulator), list_style_type::T::arabic_indic => { push_numeric_representation(value, &ARABIC_INDIC, accumulator) } list_style_type::T::bengali => push_numeric_representation(value, &BENGALI, accumulator), list_style_type::T::cambodian | list_style_type::T::khmer => { push_numeric_representation(value, &CAMBODIAN, accumulator) } list_style_type::T::cjk_decimal => { push_numeric_representation(value, &CJK_DECIMAL, accumulator) } list_style_type::T::devanagari => { push_numeric_representation(value, &DEVANAGARI, accumulator) } list_style_type::T::gujarati => push_numeric_representation(value, &GUJARATI, accumulator), list_style_type::T::gurmukhi => push_numeric_representation(value, &GURMUKHI, accumulator), list_style_type::T::kannada => push_numeric_representation(value, &KANNADA, accumulator), list_style_type::T::lao => push_numeric_representation(value, &LAO, accumulator), list_style_type::T::malayalam => { push_numeric_representation(value, &MALAYALAM, accumulator) } list_style_type::T::mongolian => { push_numeric_representation(value, &MONGOLIAN, accumulator) } list_style_type::T::myanmar => push_numeric_representation(value, &MYANMAR, accumulator), list_style_type::T::oriya => push_numeric_representation(value, &ORIYA, accumulator), list_style_type::T::persian => push_numeric_representation(value, &PERSIAN, accumulator), list_style_type::T::telugu => push_numeric_representation(value, &TELUGU, accumulator), list_style_type::T::thai => push_numeric_representation(value, &THAI, accumulator), list_style_type::T::tibetan => push_numeric_representation(value, &TIBETAN, accumulator), list_style_type::T::lower_alpha => { push_alphabetic_representation(value, &LOWER_ALPHA, accumulator) } list_style_type::T::upper_alpha => { push_alphabetic_representation(value, &UPPER_ALPHA, accumulator) } list_style_type::T::cjk_earthly_branch => { push_alphabetic_representation(value, &CJK_EARTHLY_BRANCH, accumulator) } list_style_type::T::cjk_heavenly_stem => { push_alphabetic_representation(value, &CJK_HEAVENLY_STEM, accumulator) } list_style_type::T::lower_greek => { push_alphabetic_representation(value, &LOWER_GREEK, accumulator) } list_style_type::T::hiragana => { push_alphabetic_representation(value, &HIRAGANA, accumulator) } list_style_type::T::hiragana_iroha => { push_alphabetic_representation(value, &HIRAGANA_IROHA, accumulator) } list_style_type::T::katakana => { push_alphabetic_representation(value, &KATAKANA, accumulator) } list_style_type::T::katakana_iroha => { push_alphabetic_representation(value, &KATAKANA_IROHA, accumulator) } } } /// Returns the static character that represents the value rendered using the given list-style, if /// possible. pub fn static_representation(list_style_type: list_style_type::T) -> char { match list_style_type { list_style_type::T::disc => '•', list_style_type::T::circle => '◦', list_style_type::T::square => '▪', list_style_type::T::disclosure_open => '▾', list_style_type::T::disclosure_closed => '‣', _ => panic!("No static representation for this list-style-type!"), } } /// Pushes the string that represents the value rendered using the given *alphabetic system* onto /// the accumulator per CSS-COUNTER-STYLES § 3.1.4. fn push_alphabetic_representation(value: i32, system: &[char], accumulator: &mut String) { let mut abs_value = handle_negative_value(value, accumulator); let mut string: SmallVec<[char; 8]> = SmallVec::new(); while abs_value != 0 { // Step 1. abs_value = abs_value - 1; // Step 2. string.push(system[abs_value % system.len()]); // Step 3. abs_value = abs_value / system.len(); } accumulator.extend(string.iter().cloned().rev()) } /// Pushes the string that represents the value rendered using the given *numeric system* onto the /// accumulator per CSS-COUNTER-STYLES § 3.1.5. fn push_numeric_representation(value: i32, system: &[char], accumulator: &mut String) { let mut abs_value = handle_negative_value(value, accumulator); // Step 1. if abs_value == 0 { accumulator.push(system[0]); return } // Step 2. let mut string: SmallVec<[char; 8]> = SmallVec::new(); while abs_value != 0 { // Step 2.1. string.push(system[abs_value % system.len()]); // Step 2.2. abs_value = abs_value / system.len(); } // Step 3. accumulator.extend(string.iter().cloned().rev()) } /// If the system uses a negative sign, handle negative values per CSS-COUNTER-STYLES § 2. /// /// Returns the absolute value of the counter. fn handle_negative_value(value: i32, accumulator: &mut String) -> usize { // 3. If the counter value is negative and the counter style uses a negative sign, instead // generate an initial representation using the absolute value of the counter value. if value < 0 { // TODO: Support different negative signs using the 'negative' descriptor. // https://drafts.csswg.org/date/2015-07-16/css-counter-styles/#counter-style-negative accumulator.push('-'); value.abs() as usize } else { value as usize } }<|fim▁end|>
accumulator.push(static_representation(list_style_type)) }
<|file_name|>layout_aligned_opaque.rs<|end_file_name|><|fim▁begin|>#[repr(packed, C)] pub struct PackedStruct { pub arg1: usize, pub arg2: *mut u8, } #[repr(packed, C)] pub union PackedUnion { pub variant1: usize, pub variant2: *mut u8, } // Opaque because aligned_n is not defined. #[repr(align(1), C)] pub union OpaqueAlign1Union { pub variant1: usize, pub variant2: *mut u8, } // Opaque because aligned_n is not defined. #[repr(align(4), C)] pub union OpaqueAlign4Union { pub variant1: usize, pub variant2: *mut u8, } // Opaque because aligned_n is not defined. #[repr(align(16), C)] pub union OpaqueAlign16Union { pub variant1: usize, pub variant2: *mut u8, } // Opaque because aligned_n is not defined. #[repr(align(1), C)] pub struct OpaqueAlign1Struct { pub arg1: usize, pub arg2: *mut u8, } // Opaque because aligned_n is not defined. #[repr(align(2), C)] pub struct OpaqueAlign2Struct { pub arg1: usize, pub arg2: *mut u8, } // Opaque because aligned_n is not defined.<|fim▁hole|>pub struct OpaqueAlign4Struct { pub arg1: usize, pub arg2: *mut u8, } // Opaque because aligned_n is not defined. #[repr(align(8), C)] pub struct OpaqueAlign8Struct { pub arg1: usize, pub arg2: *mut u8, } // Opaque because aligned_n is not defined. #[repr(align(32), C)] pub struct OpaqueAlign32Struct { pub arg1: usize, pub arg2: *mut u8, }<|fim▁end|>
#[repr(align(4), C)]
<|file_name|>light.py<|end_file_name|><|fim▁begin|>"""Support for Hive light devices.""" from datetime import timedelta from homeassistant.components.light import ( ATTR_BRIGHTNESS, ATTR_COLOR_TEMP, ATTR_HS_COLOR, SUPPORT_BRIGHTNESS, SUPPORT_COLOR, SUPPORT_COLOR_TEMP, LightEntity, ) from homeassistant.helpers.entity import DeviceInfo import homeassistant.util.color as color_util from . import HiveEntity, refresh_system from .const import ATTR_MODE, DOMAIN PARALLEL_UPDATES = 0 SCAN_INTERVAL = timedelta(seconds=15) async def async_setup_entry(hass, entry, async_add_entities): """Set up Hive thermostat based on a config entry.""" hive = hass.data[DOMAIN][entry.entry_id] devices = hive.session.deviceList.get("light") entities = [] if devices: for dev in devices: entities.append(HiveDeviceLight(hive, dev)) async_add_entities(entities, True) class HiveDeviceLight(HiveEntity, LightEntity): """Hive Active Light Device.""" @property def unique_id(self): """Return unique ID of entity.""" return self._unique_id @property def device_info(self) -> DeviceInfo: """Return device information.""" return DeviceInfo( identifiers={(DOMAIN, self.device["device_id"])}, manufacturer=self.device["deviceData"]["manufacturer"], model=self.device["deviceData"]["model"], name=self.device["device_name"], sw_version=self.device["deviceData"]["version"], via_device=(DOMAIN, self.device["parentDevice"]), ) @property def name(self): """Return the display name of this light.""" return self.device["haName"] @property def available(self): """Return if the device is available.""" return self.device["deviceData"]["online"] @property def extra_state_attributes(self): """Show Device Attributes.""" return { ATTR_MODE: self.attributes.get(ATTR_MODE), } @property def brightness(self):<|fim▁hole|> def min_mireds(self): """Return the coldest color_temp that this light supports.""" return self.device.get("min_mireds") @property def max_mireds(self): """Return the warmest color_temp that this light supports.""" return self.device.get("max_mireds") @property def color_temp(self): """Return the CT color value in mireds.""" return self.device["status"].get("color_temp") @property def hs_color(self): """Return the hs color value.""" if self.device["status"]["mode"] == "COLOUR": rgb = self.device["status"].get("hs_color") return color_util.color_RGB_to_hs(*rgb) return None @property def is_on(self): """Return true if light is on.""" return self.device["status"]["state"] @refresh_system async def async_turn_on(self, **kwargs): """Instruct the light to turn on.""" new_brightness = None new_color_temp = None new_color = None if ATTR_BRIGHTNESS in kwargs: tmp_new_brightness = kwargs.get(ATTR_BRIGHTNESS) percentage_brightness = (tmp_new_brightness / 255) * 100 new_brightness = int(round(percentage_brightness / 5.0) * 5.0) if new_brightness == 0: new_brightness = 5 if ATTR_COLOR_TEMP in kwargs: tmp_new_color_temp = kwargs.get(ATTR_COLOR_TEMP) new_color_temp = round(1000000 / tmp_new_color_temp) if ATTR_HS_COLOR in kwargs: get_new_color = kwargs.get(ATTR_HS_COLOR) hue = int(get_new_color[0]) saturation = int(get_new_color[1]) new_color = (hue, saturation, 100) await self.hive.light.turnOn( self.device, new_brightness, new_color_temp, new_color ) @refresh_system async def async_turn_off(self, **kwargs): """Instruct the light to turn off.""" await self.hive.light.turnOff(self.device) @property def supported_features(self): """Flag supported features.""" supported_features = None if self.device["hiveType"] == "warmwhitelight": supported_features = SUPPORT_BRIGHTNESS elif self.device["hiveType"] == "tuneablelight": supported_features = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP elif self.device["hiveType"] == "colourtuneablelight": supported_features = SUPPORT_BRIGHTNESS | SUPPORT_COLOR_TEMP | SUPPORT_COLOR return supported_features async def async_update(self): """Update all Node data from Hive.""" await self.hive.session.updateData(self.device) self.device = await self.hive.light.getLight(self.device) self.attributes.update(self.device.get("attributes", {}))<|fim▁end|>
"""Brightness of the light (an integer in the range 1-255).""" return self.device["status"]["brightness"] @property
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>//! aobench: Ambient Occlusion Renderer benchmark. //! //! Based on [aobench](https://code.google.com/archive/p/aobench/) by Syoyo //! Fujita. #![deny(warnings, rust_2018_idioms)] #![allow(non_snake_case, non_camel_case_types)] #![cfg_attr( feature = "cargo-clippy", allow(<|fim▁hole|> clippy::cast_possible_truncation, clippy::cast_sign_loss, clippy::identity_op, clippy::erasing_op ) )] pub mod ambient_occlusion; pub mod geometry; pub mod image; pub mod intersection; pub mod random; pub mod scene; #[cfg(feature = "ispc")] pub mod ispc_; pub mod scalar; pub mod scalar_parallel; pub mod tiled; pub mod tiled_parallel; pub mod vector; pub mod vector_parallel; pub use self::image::Image; pub use self::scene::Scene;<|fim▁end|>
clippy::many_single_char_names, clippy::similar_names, clippy::cast_precision_loss, clippy::inline_always,
<|file_name|>RegistryClient.java<|end_file_name|><|fim▁begin|>/*** *Sensap contribution * @author George * */ package eu.sensap.farEdge.dataRoutingClient.registry; import java.io.IOException; import java.util.UUID; import org.slf4j.Logger; import org.slf4j.LoggerFactory; import com.fasterxml.jackson.annotation.JsonInclude.Include; import com.fasterxml.jackson.databind.ObjectMapper; import com.sun.jersey.api.client.Client; import com.sun.jersey.api.client.ClientResponse; import com.sun.jersey.api.client.WebResource; import com.sun.jersey.api.client.filter.HTTPBasicAuthFilter; import eu.faredge.edgeInfrastructure.registry.messages.RegistrationResult; import eu.faredge.edgeInfrastructure.registry.messages.RegistrationResultStatusEnum; import eu.faredge.edgeInfrastructure.registry.models.dsm.DSM; //import eu.faredge.edgeInfrastructure.registry.models.DataSourceManifest; import eu.sensap.farEdge.dataRoutingClient.interfaces.DeviceRegisterInterface; import eu.sensap.farEdge.dataRoutingClient.models.Credentials; //import eu.sensap.farEdge.dataRoutingClient.models.RegistrationResult; /*** * This class supports the basic registry operations * There are three operations: * 1. Registers a device with a specific UUID, Credentials, and Configuration Environments * 2. Unregisters e registered device * 3. Asks if a device is registered */ public class RegistryClient implements DeviceRegisterInterface { private final Logger log = LoggerFactory.getLogger(this.getClass()); private Credentials credentials; //Credentials for registry connection private String registryUri; //the end point from registry service private DSM dsm; //Data source Manifest for the device //TODO to be deleted // private ConfigurationEnv configurationEnv; //configuration environmental values for registry and message bus connection // private String dsd; //Data source definition for the data source // private String macAddress; // device macAddress @Override public void create(String registryUri) { this.setRegistryUri(registryUri); // this.setCredentials(credentials); } public RegistryClient(String registryUri) { create(registryUri); } // The public registration method @Override public RegistrationResult registerDevice(DSM dsm, Credentials credentials) { log.debug(" +--Request for registration for DSM with URI =" + dsm.getUri()); this.setDsm(dsm); this.setCredentials(credentials); //call post Method to connect with registry RegistrationResult result= this.postResource(this.registryUri, dsm, credentials); log.debug(" +--Registration returned status " + result.getStatus()); return result; } //the public method for unregister @Override public RegistrationResult unRegisterDevice(String id, Credentials credentials) { log.debug("Request for registration for DSM with id =" + id); // System.out.println("client:registryclient:unRegisterDevice==>dsmUri=" + uri + " registryUri=" + this.getRegistryUri()); // call post Method RegistrationResult result = this.deleteResource(this.registryUri, id,credentials); // System.out.println("Client:RegistryCLient:unregister response=" + result.getResult()); log.debug("Unregister returned status " + result.getStatus()); return result; } // public method for returning the registration status (true false) @Override public boolean isRegistered(DSM dsm, Credentials credentials) { log.debug("check if DSM is registered with id=" + dsm.getId()); // call post Method RegistrationResult result = this.postResource(this.registryUri, dsm,credentials); log.debug("Registration status for dsm_id=" + dsm.getId() + " is " + result.getStatus()); if (result.getStatus()==RegistrationResultStatusEnum.SUCCESS) return true; else return false; } // postResource posts postData data to the specific Rest (URI) private <T> RegistrationResult postResource (String uri, T postData, Credentials credentials) { log.debug(" +--Request for post to uri=" + uri); try { // create and initialize client for REST call Client client = Client.create(); client.addFilter(new HTTPBasicAuthFilter(credentials.getUser(),credentials.getPassword())); WebResource webResource = client.resource(uri); // serialize 'postData' Object to String ObjectMapper mapper = new ObjectMapper(); mapper.setSerializationInclusion(Include.NON_NULL); String request = mapper.writeValueAsString(postData); log.debug(" +--dsm=" + request); // call resource and get Results ClientResponse response = webResource.type("application/json").post(ClientResponse.class,request); //TODO why do I need this? if (response.getStatus() != 201) { log.debug("Response from rest{" + uri + "} has status " + response.getStatus()); client.destroy(); } //Get results as registration results RegistrationResult results = this.getRegistrationResults(response); //destroy client client.destroy(); log.debug(" +--Response from rest{" + uri + "} has status " + results.getStatus()); return results; } catch (Exception e) { // TODO only for skipping registry //e.printStackTrace(); RegistrationResult results = new RegistrationResult(); results.setStatus(RegistrationResultStatusEnum.SUCCESS); results.setBody(this.getFakeDsmId()); results.setStatusMessage("Get a fake registration"); log.debug(" +--Getting Fake registration"); return results; } // catch (Exception e) // { // e.printStackTrace(); // RegistrationResult results = new RegistrationResult(); // results.setStatus(RegistrationResultStatusEnum.FAIL); // results.setStatusMessage("Error creating-initializing-calling resource or parsing the response"); // // log.debug("Error creating-initializing-calling resource or parsing the response"); // // return results; // } } private String getFakeDsmId() { UUID id = UUID.randomUUID(); return "dsm://"+id.toString(); } private <T> RegistrationResult deleteResource (String uri, String postData, Credentials credentials) { log.debug("Request for delete to uri=" + uri + ". Delete the id:" + postData); try { // create and initialize client for REST call Client client = Client.create(); client.addFilter(new HTTPBasicAuthFilter(credentials.getUser(),credentials.getPassword())); WebResource webResource = client.resource(uri).queryParam("id", postData); // call resource and get Results ClientResponse response = webResource.type("application/json").delete(ClientResponse.class); //TODO why do I need this? if (response.getStatus() != 200) { client.destroy(); //throw new RuntimeException("Failed : HTTP error code : " + response.getStatus()); } //Get results as registration results RegistrationResult results = this.getRegistrationResults(response); //destroy client.destroy(); log.debug("Response from rest{" + uri + "} has status " + results.getStatus()); return results; } catch (Exception e) { e.printStackTrace(); RegistrationResult results = new RegistrationResult(); results.setStatus(RegistrationResultStatusEnum.FAIL); results.setStatusMessage("Error creating-initializing-calling resource or parsing the response"); log.debug("Error creating-initializing-calling resource or parsing the response"); return results; } } private RegistrationResult getRegistrationResults(ClientResponse response) { ObjectMapper mapper = new ObjectMapper(); String createresponseString = response.getEntity(String.class); RegistrationResult res = new RegistrationResult(); try { res = mapper.readValue(createresponseString, res.getClass()); } catch (IOException e) { // TODO Auto-generated catch block e.printStackTrace(); return null; } // TODO: Transformations from Client Response to Registration Result Class // RegistrationResult res = new RegistrationResult(); if (response.getStatus()== 400) { res.setStatus(RegistrationResultStatusEnum.NOTFOUND); } else { res.setStatus(RegistrationResultStatusEnum.SUCCESS); } res.setStatusMessage(Integer.toString(response.getStatus())); return res; } // // private void createDsm() // { // dsm = new DSM(); // dsm.setDataSourceDefinitionReferenceID(dsd); // dsm.setMacAddress(macAddress); // dsm.setUri(configurationEnv.getEdgeUri()+ macAddress); // dsm.setDataSourceDefinitionInterfaceParameters(createDsdIp()); // } // // // private DataSourceDefinitionInterfaceParameters createDsdIp () // { // DataSourceDefinitionInterfaceParameters dsdip = new DataSourceDefinitionInterfaceParameters(); // Set<Parameter> paramSet = null; // // dsdip.setDescr(configurationEnv.getTopic()); // // Parameter top = new Parameter(); <|fim▁hole|>// top.setKey("topic"); // top.setValue(configurationEnv.getTopic()); // paramSet.add(top); // // Set<String> keys = configurationEnv.getKafkaProps().stringPropertyNames(); // for (String key : keys) { // Parameter e = new Parameter(); // e.setKey(key); // e.setValue(configurationEnv.getKafkaProps().getProperty(key)); // paramSet.add(e); // System.out.println(key + " : " + configurationEnv.getKafkaProps().getProperty(key)); // } // // dsdip.setParameter(paramSet); // // return dsdip; // } // // Getters and setters public Credentials getCredentials() { return credentials; } public void setCredentials(Credentials credentials) { this.credentials = credentials; } public DSM getDsm() { return dsm; } public void setDsm(DSM dsm) { this.dsm = dsm; } public String getRegistryUri() { return registryUri; } public void setRegistryUri(String registryUri) { this.registryUri = registryUri; } // public String getDsd() { // return dsd; // } // // public void setDsd(String dsd) { // this.dsd = dsd; // } // // public String getMacAddress() { // return macAddress; // } // // public void setMacAddress(String macAddress) { // this.macAddress = macAddress; // } // // public ConfigurationEnv getConfigurationEnv() // { // return configurationEnv; // } // // public void setConfigurationEnv(ConfigurationEnv configurationEnv) // { // this.configurationEnv = configurationEnv; // } }<|fim▁end|>
<|file_name|>spi_dummy.rs<|end_file_name|><|fim▁begin|>///! A dummy SPI client to test the SPI implementation use sam4l; use hil::gpio; use hil::spi_master::{self, SpiMaster}; #[allow(unused_variables,dead_code)] pub struct DummyCB { val: u8 } pub static mut FLOP: bool = false; pub static mut buf1: [u8; 8] = [0, 0, 0, 0, 0, 0, 0, 0]; pub static mut buf2: [u8; 8] = [8, 7, 6, 5, 4, 3, 2, 1]; impl spi_master::SpiCallback for DummyCB { #[allow(unused_variables,dead_code)] fn read_write_done(&'static self) { unsafe { FLOP = !FLOP; let len: usize = buf1.len(); if FLOP { sam4l::spi::SPI.read_write_bytes(Some(&mut buf1), Some(&mut buf2), len); } else { sam4l::spi::SPI.read_write_bytes(Some(&mut buf2), Some(&mut buf1), len); } } } } pub static mut SPICB: DummyCB = DummyCB{val: 0x55 as u8}; // This test first asserts the Firestorm's pin 2, then initiates a continuous // SPI transfer of 8 bytes. // // The first SPI transfer outputs [8, 7, 6, 5, 4, 3, 2, 1] then echoes whatever<|fim▁hole|>// To test with a logic analyzer, connect probes to pin 2 on the Firestorm, and // the SPI MOSI and CLK pins (exposed on the Firestorm's 22-pin header). Setup // the logic analyzer to trigger sampling on assertion of pin 2, then restart // the board. pub unsafe fn spi_dummy_test() { let pin2 : &mut gpio::GPIOPin = &mut sam4l::gpio::PA[16]; pin2.enable_output(); pin2.set(); sam4l::spi::SPI.set_active_peripheral(sam4l::spi::Peripheral::Peripheral1); sam4l::spi::SPI.init(&SPICB); sam4l::spi::SPI.enable(); let len = buf2.len(); sam4l::spi::SPI.read_write_bytes(Some(&mut buf2), Some(&mut buf1), len); }<|fim▁end|>
// input it recieves from the slave on peripheral 1 continuously. //
<|file_name|>dialog.py<|end_file_name|><|fim▁begin|>import pygame def arrow_image(color): img = pygame.Surface((7, 6)) img.fill((226, 59, 252)) img.set_colorkey((226, 59, 252), pygame.RLEACCEL) pygame.draw.polygon(img, color, ((0, 0), (3, 3), (6, 0))) return img class Menu(object): def __init__(self, font, options): self.font = font self.options = options self.option = 0 self.height = len(self.options)*(self.font.get_height())+(len(self.options)-1)*3 self.width = 0 for o in self.options: w = (len(o)+1)*self.font.get_width() if w > self.width: self.width = w def draw(self, surface, pos, background=None, border=None): ypos = pos[1] i = 0 if background: pygame.draw.rect(surface, background, (pos[0]-4, pos[1]-4, self.width+8, self.height+6)) if border: pygame.draw.rect(surface, border, (pos[0]-4, pos[1]-4, self.width+8, self.height+8), 1) for opt in self.options: if i == self.option: icon = ">" else: icon = " " ren = self.font.render(icon + opt) surface.blit(ren, (pos[0], ypos)) ypos += ren.get_height()+3 i += 1 def move_cursor(self, dir): if dir > 0: if self.option < len(self.options)-1: self.option += 1 elif dir < 0: if self.option > 0: self.option -= 1 def get_option(self): return self.option, self.options[self.option] class DialogBox(object): def __init__(self, size, background_color, border_color, font): self.dialog = [] self.image = pygame.Surface(size) self.font = font self.size = size self.background_color = background_color self.border_color = border_color self.update_box() self.text_pos = 0 self.shown = False self.scroll_delay = 1 self.frame = 0 self.down_arrow = arrow_image(font.color) self.curr_dialog=0 def set_scrolldelay(self, delay): self.scroll_delay = delay def set_dialog(self, dialog_list): self.page = 0 self.pages = len(dialog_list) self.dialog = dialog_list self.shown = True self.text_pos = 0 def update_box(self): self.image.fill(self.background_color)<|fim▁hole|> if (self.curr_dialog==0): return if (self.text_pos >= len(self.curr_dialog)): if self.page < self.pages-1: self.page += 1 self.text_pos = 0 else: self.shown = False else: self.text_pos = len(self.curr_dialog) def draw(self, surface, pos): if self.shown and self.page < self.pages: self.update_box() self.curr_dialog = self.dialog[self.page] xpos = 4 ypos = 4 if self.text_pos < len(self.curr_dialog): self.frame -= 1 if self.frame <= 0: self.text_pos += 1 self.frame = self.scroll_delay else: self.image.blit(self.down_arrow, (self.image.get_width()-12, self.image.get_height()-8)) dialog = self.curr_dialog[:self.text_pos] for word in dialog.split(" "): ren = self.font.render(word + " ") w = ren.get_width() if xpos > self.image.get_width()-w: ypos += ren.get_height()+3 xpos = 4 self.image.blit(ren, (xpos, ypos)) xpos += w surface.blit(self.image, pos) def over(self): return self.shown != True def close(self): self.shown = False self.page = self.pages<|fim▁end|>
pygame.draw.rect(self.image, self.border_color, (0, 0, self.size[0]-1, self.size[1]-1), 1) def progress(self):
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>fn function() {<|fim▁hole|> println!("called `function()`"); } // A module named `my` mod my { // A module can contain items like functions #[allow(dead_code)] fn function() { println!("called `my::function()`"); } // Modules can be nested mod nested { #[allow(dead_code)] fn function() { println!("called `my::nested::function()`"); } } } fn main() { function(); // Error! `my::function` is private // my::function(); // TODO ^ Comment out this line }<|fim▁end|>
<|file_name|>api.rs<|end_file_name|><|fim▁begin|>//! Use this module to interact with Bitstamp exchange. //! Please see examples for more informations. use hyper_native_tls::NativeTlsClient; use hyper::Client; use hyper::header::ContentType; use hyper::net::HttpsConnector; use serde_json::Value; use serde_json::value::Map; use std::collections::HashMap; use std::io::Read; use std::thread; use std::time::Duration; use coinnect::Credentials; use exchange::Exchange; use error::*; use helpers; use types::Pair; use bitstamp::utils; use types::*; header! { #[doc(hidden)] (KeyHeader, "Key") => [String] } header! { #[doc(hidden)] (SignHeader, "Sign") => [String] } header! { #[doc(hidden)] (ContentHeader, "Content-Type") => [String] } #[derive(Debug)] pub struct BitstampApi { last_request: i64, // unix timestamp in ms, to avoid ban api_key: String, api_secret: String, customer_id: String, http_client: Client, burst: bool, } impl BitstampApi { /// Create a new BitstampApi by providing an API key & API secret pub fn new<C: Credentials>(creds: C) -> Result<BitstampApi> { if creds.exchange() != Exchange::Bitstamp { return Err(ErrorKind::InvalidConfigType(Exchange::Bitstamp, creds.exchange()).into()); } //TODO: Handle correctly TLS errors with error_chain. let ssl = match NativeTlsClient::new() { Ok(res) => res, Err(_) => return Err(ErrorKind::TlsError.into()), }; let connector = HttpsConnector::new(ssl); Ok(BitstampApi { last_request: 0, api_key: creds.get("api_key").unwrap_or_default(), api_secret: creds.get("api_secret").unwrap_or_default(), customer_id: creds.get("customer_id").unwrap_or_default(), http_client: Client::with_connector(connector), burst: false, // No burst by default }) } /// The number of calls in a given period is limited. In order to avoid a ban we limit /// by default the number of api requests. /// This function sets or removes the limitation. /// Burst false implies no block. /// Burst true implies there is a control over the number of calls allowed to the exchange pub fn set_burst(&mut self, burst: bool) { self.burst = burst } fn block_or_continue(&self) { if ! self.burst { let threshold: u64 = 1000; // 600 requests per 10 mins = 1 request per second let offset: u64 = helpers::get_unix_timestamp_ms() as u64 - self.last_request as u64; if offset < threshold { let wait_ms = Duration::from_millis(threshold - offset); thread::sleep(wait_ms); } } } fn public_query(&mut self, params: &HashMap<&str, &str>) -> Result<Map<String, Value>> { let method: &str = params .get("method") .ok_or_else(|| "Missing \"method\" field.")?; let pair: &str = params.get("pair").ok_or_else(|| "Missing \"pair\" field.")?; let url: String = utils::build_url(method, pair); self.block_or_continue(); let mut response = self.http_client.get(&url).send()?; self.last_request = helpers::get_unix_timestamp_ms(); let mut buffer = String::new(); response.read_to_string(&mut buffer)?; utils::deserialize_json(&buffer) } /// /// /// #Examples /// /// ```json /// extern crate coinnect; /// use coinnect::bitstamp::BitstampApi; /// let mut api = BitstampApi::new("", ""); /// let result = api.private_query("balance", "btcusd"); /// assert_eq!(true, true); /// ``` fn private_query(&mut self, params: &HashMap<&str, &str>) -> Result<Map<String, Value>> { let method: &str = params .get("method") .ok_or_else(|| "Missing \"method\" field.")?; let pair: &str = params.get("pair").ok_or_else(|| "Missing \"pair\" field.")?; let url: String = utils::build_url(method, pair); let nonce = utils::generate_nonce(None); let signature = utils::build_signature(&nonce, &self.customer_id, &self.api_key, &self.api_secret)?; let copy_api_key = self.api_key.clone(); let mut post_params: &mut HashMap<&str, &str> = &mut HashMap::new(); post_params.insert("key", &copy_api_key); post_params.insert("signature", &signature); post_params.insert("nonce", &nonce); // copy params into post_params .... bit of a hack but will do for now params.iter().for_each(|(k,v)| { post_params.insert(k,v); }); helpers::strip_empties(&mut post_params); let post_data = helpers::url_encode_hashmap(post_params); let mut response = self.http_client .post(&url) .header(ContentType::form_url_encoded()) .body(&post_data) .send()?; let mut buffer = String::new(); response.read_to_string(&mut buffer)?; utils::deserialize_json(&buffer) } /// Sample output : /// /// ```json /// { /// "BTC_LTC":{ /// "last":"0.0251","lowestAsk":"0.02589999","highestBid":"0.0251", /// "percentChange":"0.02390438","baseVolume":"6.16485315","quoteVolume":"245.82513926"}, /// "BTC_NXT":{ /// "last":"0.00005730","lowestAsk":"0.00005710","highestBid":"0.00004903", /// "percentChange":"0.16701570","baseVolume":"0.45347489","quoteVolume":"9094"}, /// ... } /// ``` pub fn return_ticker(&mut self, pair: Pair) -> Result<Map<String, Value>> { let pair_name = match utils::get_pair_string(&pair) { Some(name) => name, None => return Err(ErrorKind::PairUnsupported.into()), }; let mut params: HashMap<&str, &str> = HashMap::new(); params.insert("pair", pair_name); params.insert("method", "ticker"); self.public_query(&params) } /// Sample output : /// /// ```json /// {"asks":[[0.00007600,1164],[0.00007620,1300], ... ], "bids":[[0.00006901,200], /// [0.00006900,408], ... ], "timestamp": "1234567890"} /// ``` pub fn return_order_book(&mut self, pair: Pair) -> Result<Map<String, Value>> { let pair_name = match utils::get_pair_string(&pair) { Some(name) => name, None => return Err(ErrorKind::PairUnsupported.into()), }; let mut params: HashMap<&str, &str> = HashMap::new(); params.insert("method", "order_book"); params.insert("pair", pair_name); self.public_query(&params) } /// Sample output : /// /// ```json /// [{"date":"2014-02-10 04:23:23","type":"buy","rate":"0.00007600","amount":"140", /// "total":"0.01064"}, /// {"date":"2014-02-10 01:19:37","type":"buy","rate":"0.00007600","amount":"655", /// "total":"0.04978"}, ... ] /// ``` pub fn return_trade_history(&mut self, pair: Pair) -> Result<Map<String, Value>> { let pair_name = match utils::get_pair_string(&pair) { Some(name) => name, None => return Err(ErrorKind::PairUnsupported.into()), }; let mut params: HashMap<&str, &str> = HashMap::new(); params.insert("pair", pair_name); params.insert("method", "transactions"); self.public_query(&params) } /// Returns all of your available balances. /// /// Sample output: /// /// ```json /// {"BTC":"0.59098578","LTC":"3.31117268", ... } /// ``` pub fn return_balances(&mut self) -> Result<Map<String, Value>> { let mut params = HashMap::new(); params.insert("method", "balance"); params.insert("pair", ""); self.private_query(&params) } /// Add a buy limit order to the exchange /// limit_price : If the order gets executed, a new sell order will be placed, /// with "limit_price" as its price. /// daily_order (Optional) : Opens buy limit order which will be canceled /// at 0:00 UTC unless it already has been executed. Possible value: True pub fn buy_limit(&mut self, pair: Pair, amount: Volume, price: Price, price_limit: Option<Price>, daily_order: Option<bool>) -> Result<Map<String, Value>> { let pair_name = match utils::get_pair_string(&pair) { Some(name) => name, None => return Err(ErrorKind::PairUnsupported.into()), }; let amount_string = amount.to_string(); let price_string = price.to_string(); let price_limit_string = match price_limit { Some(limit) => limit.to_string(), None => "".to_string(), }; let mut params = HashMap::new(); params.insert("method", "buy"); params.insert("pair", pair_name); params.insert("amount", &amount_string); params.insert("price", &price_string); params.insert("limit_price", &price_limit_string); if let Some(order) = daily_order { let daily_order_str = if order { "True" } else { "" }; // False is not a possible value params.insert("daily_order", daily_order_str); } self.private_query(&params) } /// Add a sell limit order to the exchange /// limit_price : If the order gets executed, a new sell order will be placed, /// with "limit_price" as its price. /// daily_order (Optional) : Opens sell limit order which will be canceled /// at 0:00 UTC unless it already has been executed. Possible value: True pub fn sell_limit(&mut self, pair: Pair, amount: Volume, price: Price, price_limit: Option<Price>, daily_order: Option<bool>) -> Result<Map<String, Value>> { let pair_name = match utils::get_pair_string(&pair) { Some(name) => name, None => return Err(ErrorKind::PairUnsupported.into()), }; let amount_string = amount.to_string(); let price_string = price.to_string(); let price_limit_string = match price_limit { Some(limit) => limit.to_string(), None => "".to_string(), }; let mut params = HashMap::new(); params.insert("method", "sell"); params.insert("pair", pair_name); params.insert("amount", &amount_string); params.insert("price", &price_string); params.insert("limit_price", &price_limit_string); if let Some(order) = daily_order { let daily_order_str = if order { "True" } else { "" }; // False is not a possible value params.insert("daily_order", daily_order_str); } self.private_query(&params) } /// Add a market buy order to the exchange /// By placing a market order you acknowledge that the execution of your order depends /// on the market conditions and that these conditions may be subject to sudden changes /// that cannot be foreseen. pub fn buy_market(&mut self, pair: Pair, amount: Volume) -> Result<Map<String, Value>> { let pair_name = match utils::get_pair_string(&pair) { Some(name) => name, None => return Err(ErrorKind::PairUnsupported.into()), }; let amount_string = amount.to_string(); let mut params = HashMap::new(); params.insert("method", "buy/market"); params.insert("pair", pair_name); params.insert("amount", &amount_string); self.private_query(&params) } /// Add a market sell order to the exchange /// By placing a market order you acknowledge that the execution of your order depends /// on the market conditions and that these conditions may be subject to sudden changes /// that cannot be foreseen. pub fn sell_market(&mut self, pair: Pair, amount: Volume) -> Result<Map<String, Value>> { let pair_name = match utils::get_pair_string(&pair) { Some(name) => name, None => return Err(ErrorKind::PairUnsupported.into()), }; let amount_string = amount.to_string(); let mut params = HashMap::new(); params.insert("method", "sell/market"); params.insert("pair", pair_name); params.insert("amount", &amount_string); self.private_query(&params) } } #[cfg(test)] mod bitstamp_api_tests { use super::*; #[test] fn should_block_or_not_block_when_enabled_or_disabled() { let mut api = BitstampApi { last_request: helpers::get_unix_timestamp_ms(), api_key: "".to_string(), api_secret: "".to_string(), customer_id: "".to_string(), http_client: Client::new(), burst: false, }; <|fim▁hole|> let start = helpers::get_unix_timestamp_ms(); api.block_or_continue(); api.last_request = helpers::get_unix_timestamp_ms(); let difference = api.last_request - start; assert!(difference >= 999); assert!(difference < 10000); api.set_burst(true); let start = helpers::get_unix_timestamp_ms(); api.block_or_continue(); api.last_request = helpers::get_unix_timestamp_ms(); let difference = api.last_request - start; assert!(difference < 10); counter = counter + 1; if counter >= 3 { break; } } } }<|fim▁end|>
let mut counter = 0; loop { api.set_burst(false);
<|file_name|>LatchDemo_1.java<|end_file_name|><|fim▁begin|>package com.andyadc.concurrency.latch; import java.util.ArrayList; import java.util.List; import java.util.Random; import java.util.concurrent.*; /** * @author andaicheng * @version 2017/3/10 */ public class LatchDemo_1 { public static void main(String[] args) throws Exception { int num = 10; //发令枪只只响一次 CountDownLatch begin = new CountDownLatch(1); //参与跑步人数 CountDownLatch end = new CountDownLatch(num); ExecutorService es = Executors.newFixedThreadPool(num); //记录跑步成绩 List<Future<Integer>> futures = new ArrayList<>(); for (int i = 0; i < num; i++) { futures.add(es.submit(new Runner(begin, end))); } //预备 TimeUnit.SECONDS.sleep(10); //发令枪响 begin.countDown(); //等待跑者跑完 end.await(); int sum = 0; for (Future<Integer> f : futures) { sum += f.get(); } System.out.println("平均分数: " + (float) (sum / num)); } } class Runner implements Callable<Integer> { //开始信号 private CountDownLatch begin; //结束信号 private CountDownLatch end; public Runner(CountDownLatch begin, CountDownLatch end) { this.begin = begin; this.end = end; } @Override public Integer call() throws Exception { //跑步成绩 int score = new Random().nextInt(10); //等待发令枪响 <|fim▁hole|> TimeUnit.SECONDS.sleep(score); //跑步结束 end.countDown(); System.out.println("score:" + score); return score; } }<|fim▁end|>
begin.await();
<|file_name|>unix.rs<|end_file_name|><|fim▁begin|>use futures::future::{self, Either, FutureExt}; use log::info; use std::io; use tokio::signal::unix::{signal, SignalKind}; /// Create a monitor future for signals /// /// It will exit when received `SIGTERM` or `SIGINT`. pub async fn create_signal_monitor() -> io::Result<()> { // Future resolving to two signal streams. Can fail if setting up signal monitoring fails let mut sigterm = signal(SignalKind::terminate())?; let mut sigint = signal(SignalKind::interrupt())?; let signal_name = match future::select(sigterm.recv().boxed(), sigint.recv().boxed()).await { Either::Left(..) => "SIGTERM", Either::Right(..) => "SIGINT", }; info!("received {}, exiting", signal_name); Ok(())<|fim▁hole|>}<|fim▁end|>
<|file_name|>cloudlet_edge_redirector.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python # Very basic script template. Use this to build new # examples for use in the api-kickstart repository # """ Copyright 2015 Akamai Technologies, Inc. All Rights Reserved. Licensed under the Apache License, Version 2.0 (the "License"); you may not use this file except in compliance with the License. You may obtain a copy of the License at http://www.apache.org/licenses/LICENSE-2.0 Unless required by applicable law or agreed to in writing, software distributed under the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the License for the specific language governing permissions and limitations under the License. """ import requests, logging, json from http_calls import EdgeGridHttpCaller from random import randint from akamai.edgegrid import EdgeGridAuth from config import EdgeGridConfig from urlparse import urljoin import urllib session = requests.Session() debug = False verbose = False section_name = "cloudlet" config = EdgeGridConfig({"verbose":debug},section_name) if hasattr(config, "debug") and config.debug: debug = True if hasattr(config, "verbose") and config.verbose: verbose = True # Set the config options session.auth = EdgeGridAuth( client_token=config.client_token, client_secret=config.client_secret, access_token=config.access_token ) # Set the baseurl based on config.host baseurl = '%s://%s/' % ('https', config.host) httpCaller = EdgeGridHttpCaller(session, debug, verbose, baseurl) if __name__ == "__main__": # Get the list of cloudlets to pick the one we want to use endpoint_result = httpCaller.getResult("/cloudlets/api/v2/cloudlet-info") # Result for edge redirector: # { # "location": "/cloudlets/api/v2/cloudlet-info/2", # "cloudletId": 2, # "cloudletCode": "SA", # "apiVersion": "2.0", # "cloudletName": "SAASACCESS" #}, # Get the group ID for the cloudlet we're looking to create<|fim▁hole|> # Result for group info: # "groupName": "API Bootcamp", # "location": "/cloudlets/api/v2/group-info/77649", # "parentId": 64867, # "capabilities": [ # { # "cloudletId": 0, # "cloudletCode": "ER", # "capabilities": [ # "View", # "Edit", # "Activate", # "Internal", # "AdvancedEdit" # ] # }, sample_post_body = { "cloudletId": 0, "groupId": 77649, "name": "APIBootcampERv1", "description": "Testing the creation of a policy" } sample_post_result = httpCaller.postResult('/cloudlets/api/v2/policies', json.dumps(sample_post_body)) #{ #"cloudletCode": "SA", #"cloudletId": 2, #"name": "APIBootcampEdgeRedirect", #"propertyName": null, #"deleted": false, #"lastModifiedDate": 1458765299155, #"description": "Testing the creation of a policy", #"apiVersion": "2.0", #"lastModifiedBy": "advocate2", #"serviceVersion": null, #"createDate": 1458765299155, #"location": "/cloudlets/api/v2/policies/11434", #"createdBy": "advocate2", #"activations": [ #{ #"serviceVersion": null, #"policyInfo": { #"status": "inactive", #"name": "APIBootcampEdgeRedirect", #"statusDetail": null, #"detailCode": 0, #"version": 0, #"policyId": 11434, #"activationDate": 0, #"activatedBy": null #}, #"network": "prod", #"apiVersion": "2.0", #"propertyInfo": null #}, #{ #"serviceVersion": null, #"policyInfo": { #"status": "inactive", #"name": "APIBootcampEdgeRedirect", #"statusDetail": null, #"detailCode": 0, #"version": 0, #"policyId": 11434, #"activationDate": 0, #"activatedBy": null #}, #"network": "staging", # "apiVersion": "2.0", #"propertyInfo": null #} #], # "groupId": 77649, # "policyId": 11434 <<<<<<<<<<< # } # Activate by associating with a specific property sample_post_url = "/cloudlets/api/v2/policies/11442/versions/1/activations" sample_post_body = { "network": "staging", "additionalPropertyNames": [ "akamaiapibootcamp.com" ] } sample_post_result = httpCaller.postResult(sample_post_url, json.dumps(sample_post_body)) # Next, add the behavior for cloudlets # PUT the update to activate the cloudlet<|fim▁end|>
endpoint_result = httpCaller.getResult("/cloudlets/api/v2/group-info")
<|file_name|>EFA19C34C9DC95F7511AF979CAD72884A6746A3B.js<|end_file_name|><|fim▁begin|>this.NesDb = this.NesDb || {}; NesDb[ 'EFA19C34C9DC95F7511AF979CAD72884A6746A3B' ] = { "$": { "name": "Arkanoid", "altname": "アルカノイド", "class": "Licensed", "subclass": "3rd-Party", "catalog": "TFC-AN-5400-10", "publisher": "Taito", "developer": "Taito", "region": "Japan", "players": "1", "date": "1986-12-26" }, "peripherals": [ { "device": [ { "$": { "type": "arkanoid", "name": "Vaus Controller" } } ] } ], "cartridge": [ { "$": { "system": "Famicom", "crc": "D89E5A67", "sha1": "EFA19C34C9DC95F7511AF979CAD72884A6746A3B", "dump": "ok", "dumper": "bootgod", "datedumped": "2007-06-29" }, "board": [ { "$": { "type": "TAITO-CNROM", "pcb": "FC-010", "mapper": "3" }, "prg": [ { "$": { "size": "32k", "crc": "35893B67", "sha1": "7BB46BD1070F09DBBBA3AA9A05E6265FCF9A3376" } } ], "chr": [ { "$": { "size": "16k", "crc": "C5789B20", "sha1": "58551085A755781030EAFA8C0E9238C9B1A50F5B" } } ], "chip": [ { "$": { "type": "74xx161" } } ], "pad": [ { "$": { "h": "0", "v": "1" } } ] } ] } ], "gameGenieCodes": [ { "name": "Player 1 start with 1 life", "codes": [ [ "PAOPUGLA" ] ] }, { "name": "Player 1 start with 6 lives", "codes": [ [ "TAOPUGLA" ] ] }, { "name": "Player 1 start with 9 lives", "codes": [ [ "PAOPUGLE" ] ] }, { "name": "Infinite lives, players 1 & 2", "codes": [ [ "OZNEATVK" ] ] }, { "name": "Player 1 start at level 5", "codes": [ [ "IAOONGPA" ] ] }, { "name": "Player 1 start at level 10", "codes": [ [ "ZAOONGPE" ] ] }, { "name": "Player 1 start at level 15", "codes": [ [ "YAOONGPE" ] ] }, { "name": "Player 1 start at level 20", "codes": [ [ "GPOONGPA" ] ] },<|fim▁hole|> "PPOONGPE" ] ] }, { "name": "Player 1 start at level 30", "codes": [ [ "TPOONGPE" ] ] }, { "name": "No bat enhancement capsules", "codes": [ [ "SXNAIAAX" ] ] }, { "name": "No lasers", "codes": [ [ "SXVATAAX" ] ] } ] };<|fim▁end|>
{ "name": "Player 1 start at level 25", "codes": [ [
<|file_name|>htmlmetaelement.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at https://mozilla.org/MPL/2.0/. */ use crate::dom::attr::Attr; use crate::dom::bindings::cell::DomRefCell; use crate::dom::bindings::codegen::Bindings::HTMLMetaElementBinding; use crate::dom::bindings::codegen::Bindings::HTMLMetaElementBinding::HTMLMetaElementMethods; use crate::dom::bindings::codegen::Bindings::NodeBinding::NodeMethods; use crate::dom::bindings::inheritance::Castable; use crate::dom::bindings::root::{DomRoot, MutNullableDom}; use crate::dom::bindings::str::DOMString; use crate::dom::cssstylesheet::CSSStyleSheet; use crate::dom::document::Document; use crate::dom::element::{AttributeMutation, Element}; use crate::dom::htmlelement::HTMLElement; use crate::dom::htmlheadelement::HTMLHeadElement; use crate::dom::node::{ document_from_node, stylesheets_owner_from_node, window_from_node, BindContext, Node, UnbindContext, }; use crate::dom::virtualmethods::VirtualMethods; use dom_struct::dom_struct; use html5ever::{LocalName, Prefix}; use parking_lot::RwLock; use servo_arc::Arc; use servo_config::pref; use std::sync::atomic::AtomicBool; use style::media_queries::MediaList; use style::str::HTML_SPACE_CHARACTERS; use style::stylesheets::{CssRule, CssRules, Origin, Stylesheet, StylesheetContents, ViewportRule}; #[dom_struct] pub struct HTMLMetaElement { htmlelement: HTMLElement, #[ignore_malloc_size_of = "Arc"] stylesheet: DomRefCell<Option<Arc<Stylesheet>>>, cssom_stylesheet: MutNullableDom<CSSStyleSheet>, } impl HTMLMetaElement { fn new_inherited( local_name: LocalName, prefix: Option<Prefix>, document: &Document, ) -> HTMLMetaElement { HTMLMetaElement { htmlelement: HTMLElement::new_inherited(local_name, prefix, document), stylesheet: DomRefCell::new(None), cssom_stylesheet: MutNullableDom::new(None), } } #[allow(unrooted_must_root)] pub fn new( local_name: LocalName, prefix: Option<Prefix>, document: &Document, ) -> DomRoot<HTMLMetaElement> { Node::reflect_node( Box::new(HTMLMetaElement::new_inherited(local_name, prefix, document)), document, HTMLMetaElementBinding::Wrap, ) } pub fn get_stylesheet(&self) -> Option<Arc<Stylesheet>> { self.stylesheet.borrow().clone() } pub fn get_cssom_stylesheet(&self) -> Option<DomRoot<CSSStyleSheet>> { self.get_stylesheet().map(|sheet| { self.cssom_stylesheet.or_init(|| { CSSStyleSheet::new( &window_from_node(self), self.upcast::<Element>(), "text/css".into(), None, // todo handle location None, // todo handle title sheet, ) }) }) } fn process_attributes(&self) { let element = self.upcast::<Element>(); if let Some(ref name) = element.get_name() { let name = name.to_ascii_lowercase(); let name = name.trim_matches(HTML_SPACE_CHARACTERS); if name == "viewport" { self.apply_viewport(); } if name == "referrer" { self.apply_referrer(); } } } #[allow(unrooted_must_root)] fn apply_viewport(&self) { if !pref!(layout.viewport.enabled) { return; } let element = self.upcast::<Element>(); if let Some(ref content) = element.get_attribute(&ns!(), &local_name!("content")) { let content = content.value(); if !content.is_empty() { if let Some(translated_rule) = ViewportRule::from_meta(&**content) { let stylesheets_owner = stylesheets_owner_from_node(self); let document = document_from_node(self); let shared_lock = document.style_shared_lock(); let rule = CssRule::Viewport(Arc::new(shared_lock.wrap(translated_rule))); let sheet = Arc::new(Stylesheet { contents: StylesheetContents { rules: CssRules::new(vec![rule], shared_lock), origin: Origin::Author, namespaces: Default::default(), quirks_mode: document.quirks_mode(), url_data: RwLock::new(window_from_node(self).get_url()), source_map_url: RwLock::new(None), source_url: RwLock::new(None), }, media: Arc::new(shared_lock.wrap(MediaList::empty())), shared_lock: shared_lock.clone(), disabled: AtomicBool::new(false), }); *self.stylesheet.borrow_mut() = Some(sheet.clone()); stylesheets_owner.add_stylesheet(self.upcast(), sheet); } } } } fn process_referrer_attribute(&self) { let element = self.upcast::<Element>();<|fim▁hole|> let name = name.trim_matches(HTML_SPACE_CHARACTERS); if name == "referrer" { self.apply_referrer(); } } } /// <https://html.spec.whatwg.org/multipage/#meta-referrer> fn apply_referrer(&self) { if let Some(parent) = self.upcast::<Node>().GetParentElement() { if let Some(head) = parent.downcast::<HTMLHeadElement>() { head.set_document_referrer(); } } } } impl HTMLMetaElementMethods for HTMLMetaElement { // https://html.spec.whatwg.org/multipage/#dom-meta-name make_getter!(Name, "name"); // https://html.spec.whatwg.org/multipage/#dom-meta-name make_atomic_setter!(SetName, "name"); // https://html.spec.whatwg.org/multipage/#dom-meta-content make_getter!(Content, "content"); // https://html.spec.whatwg.org/multipage/#dom-meta-content make_setter!(SetContent, "content"); } impl VirtualMethods for HTMLMetaElement { fn super_type(&self) -> Option<&dyn VirtualMethods> { Some(self.upcast::<HTMLElement>() as &dyn VirtualMethods) } fn bind_to_tree(&self, context: &BindContext) { if let Some(ref s) = self.super_type() { s.bind_to_tree(context); } if context.tree_connected { self.process_attributes(); } } fn attribute_mutated(&self, attr: &Attr, mutation: AttributeMutation) { if let Some(s) = self.super_type() { s.attribute_mutated(attr, mutation); } self.process_referrer_attribute(); } fn unbind_from_tree(&self, context: &UnbindContext) { if let Some(ref s) = self.super_type() { s.unbind_from_tree(context); } if context.tree_connected { self.process_referrer_attribute(); if let Some(s) = self.stylesheet.borrow_mut().take() { stylesheets_owner_from_node(self).remove_stylesheet(self.upcast(), &s); } } } }<|fim▁end|>
if let Some(ref name) = element.get_name() { let name = name.to_ascii_lowercase();
<|file_name|>request.go<|end_file_name|><|fim▁begin|>package model import ( "time" ) type Request struct { Id uint32 MailingListId uint32 FirstName string LastName string<|fim▁hole|> Email string IpAddress string ApprovalStatus string CreationDate time.Time }<|fim▁end|>
Room string
<|file_name|>const.py<|end_file_name|><|fim▁begin|>"""Define constants for the Ambient PWS component.""" DOMAIN = "ambient_station" ATTR_LAST_DATA = "last_data"<|fim▁hole|> CONF_APP_KEY = "app_key" DATA_CLIENT = "data_client" TOPIC_UPDATE = "update" TYPE_BINARY_SENSOR = "binary_sensor" TYPE_SENSOR = "sensor"<|fim▁end|>
<|file_name|>helpers.rs<|end_file_name|><|fim▁begin|>// Copyrighttape Technologies LLC. // // Licensed under the Apache License, Version 2.0 (the "License"); // you may not use this file except in compliance with the License. // You may obtain a copy of the License at // // http://www.apache.org/licenses/LICENSE-2.0 // // Unless required by applicable law or agreed to in writing, software // distributed under the License is distributed on an "AS IS" BASIS, // WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. // See the License for the specific language governing permissions and // limitations under the License. extern crate rustc_serialize; extern crate tempdir; use std::env; use std::fs::File; use std::io::Read; use std::io::Write; use std::path::Path; use std::process::Command; use std::sync::Arc; use std::time::UNIX_EPOCH; use self::rustc_serialize::hex::FromHex; use self::tempdir::TempDir; use crate::libexecutor::block::{BlockBody, ClosedBlock, OpenBlock}; use crate::libexecutor::command; use crate::libexecutor::executor::Executor; use crate::types::header::OpenHeader; use crate::types::transaction::SignedTransaction; use cita_crypto::PrivKey; use cita_types::traits::LowerHex; use cita_types::{Address, U256}; use cita_vm::{state::MemoryDB, state::State}; use crossbeam_channel::{Receiver, Sender}; use libproto::blockchain; use util::AsMillis; const SCRIPTS_DIR: &str = "../../scripts"; pub fn get_temp_state() -> State<MemoryDB> { let db = Arc::new(MemoryDB::new(false)); State::new(db).unwrap() } pub fn solc(name: &str, source: &str) -> (Vec<u8>, Vec<u8>) { // input and output of solc command let output_dir = TempDir::new("solc_output").unwrap().into_path(); let contract_file = output_dir.join("contract.sol"); let deploy_code_file = output_dir.join([name, ".bin"].join("")); let runtime_code_file = output_dir.join([name, ".bin-runtime"].join("")); // prepare contract file let mut file = File::create(contract_file.clone()).unwrap(); let mut content = String::new(); file.write_all(source.as_ref()).expect("failed to write"); // execute solc command Command::new("solc") .arg(contract_file.clone()) .arg("--bin") .arg("--bin-runtime") .arg("-o") .arg(output_dir) .output() .expect("failed to execute solc"); // read deploy code File::open(deploy_code_file) .expect("failed to open deploy code file!") .read_to_string(&mut content) .expect("failed to read binary"); let deploy_code = content.as_str().from_hex().unwrap(); // read runtime code let mut content = String::new(); File::open(runtime_code_file) .expect("failed to open deploy code file!") .read_to_string(&mut content) .expect("failed to read binary"); let runtime_code = content.from_hex().unwrap(); (deploy_code, runtime_code) } pub fn init_executor() -> Executor { let (_fsm_req_sender, fsm_req_receiver) = crossbeam_channel::unbounded(); let (fsm_resp_sender, _fsm_resp_receiver) = crossbeam_channel::unbounded(); let (_command_req_sender, command_req_receiver) = crossbeam_channel::bounded(0); let (command_resp_sender, _command_resp_receiver) = crossbeam_channel::bounded(0); init_executor2( fsm_req_receiver, fsm_resp_sender, command_req_receiver, command_resp_sender, ) } pub fn init_executor2( fsm_req_receiver: Receiver<OpenBlock>, fsm_resp_sender: Sender<ClosedBlock>, command_req_receiver: Receiver<command::Command>, command_resp_sender: Sender<command::CommandResp>, ) -> Executor { // FIXME temp dir should be removed automatically, but at present it is not let tempdir = TempDir::new("init_executor").unwrap().into_path();<|fim▁hole|> data_path.push("data"); env::set_var("DATA_PATH", data_path); let executor = Executor::init( genesis_path.to_str().unwrap(), tempdir.to_str().unwrap().to_string(), fsm_req_receiver, fsm_resp_sender, command_req_receiver, command_resp_sender, false, ); executor } pub fn create_block( executor: &Executor, to: Address, data: &Vec<u8>, nonce: (u32, u32), privkey: &PrivKey, ) -> OpenBlock { let mut block = OpenBlock::default(); block.set_parent_hash(executor.get_current_hash()); block.set_timestamp(AsMillis::as_millis(&UNIX_EPOCH.elapsed().unwrap())); block.set_number(executor.get_current_height() + 1); // header.proof= ?; let mut body = BlockBody::default(); let mut txs = Vec::new(); for i in nonce.0..nonce.1 { let mut tx = blockchain::Transaction::new(); if to == Address::from(0) { tx.set_to(String::from("")); } else { tx.set_to(to.lower_hex()); } tx.set_nonce(U256::from(i).lower_hex()); tx.set_data(data.clone()); tx.set_valid_until_block(100); tx.set_quota(1844674); let stx = tx.sign(*privkey); let new_tx = SignedTransaction::create(&stx).unwrap(); txs.push(new_tx); } body.set_transactions(txs); block.set_body(body); block } pub fn generate_contract() -> Vec<u8> { let source = r#" pragma solidity ^0.4.8; contract ConstructSol { uint a; event LogCreate(address contractAddr); event A(uint); function ConstructSol(){ LogCreate(this); } function set(uint _a) { a = _a; A(a); } function get() returns (uint) { return a; } } "#; let (data, _) = solc("ConstructSol", source); data } pub fn generate_block_header() -> OpenHeader { OpenHeader::default() } pub fn generate_block_body() -> BlockBody { let mut stx = SignedTransaction::default(); stx.data = vec![1; 200]; let transactions = vec![stx; 200]; BlockBody { transactions } } pub fn generate_default_block() -> OpenBlock { let block_body = generate_block_body(); let block_header = generate_block_header(); OpenBlock { body: block_body, header: block_header, } }<|fim▁end|>
let genesis_path = Path::new(SCRIPTS_DIR).join("config_tool/genesis/genesis.json"); let mut data_path = tempdir.clone();
<|file_name|>AllocateDatabaseServerV4RequestToDBStackConverterTest.java<|end_file_name|><|fim▁begin|>package com.sequenceiq.redbeams.converter.stack; import static org.assertj.core.api.Assertions.assertThat; import static org.junit.jupiter.api.Assertions.assertEquals; import static org.junit.jupiter.api.Assertions.assertThrows; import static org.mockito.ArgumentMatchers.any; import static org.mockito.ArgumentMatchers.anyInt; import static org.mockito.ArgumentMatchers.anyList; import static org.mockito.ArgumentMatchers.anyString; import static org.mockito.ArgumentMatchers.eq; import static org.mockito.Mockito.mock; import static org.mockito.Mockito.never; import static org.mockito.Mockito.verify; import static org.mockito.Mockito.when; import java.io.IOException; import java.security.cert.X509Certificate; import java.time.Instant; import java.util.HashMap; import java.util.HashSet; import java.util.List; import java.util.Map; import java.util.Set; import org.junit.jupiter.api.BeforeEach; import org.junit.jupiter.api.Test; import org.junit.jupiter.api.extension.ExtendWith; import org.junit.jupiter.params.ParameterizedTest; import org.junit.jupiter.params.provider.MethodSource; import org.mockito.Answers; import org.mockito.InjectMocks; import org.mockito.Mock; import org.mockito.junit.jupiter.MockitoExtension; import org.mockito.junit.jupiter.MockitoSettings; import org.mockito.quality.Strictness; import org.springframework.test.util.ReflectionTestUtils; import com.sequenceiq.cloudbreak.api.endpoint.v4.common.DatabaseVendor; import com.sequenceiq.cloudbreak.auth.CrnUser; import com.sequenceiq.cloudbreak.auth.altus.EntitlementService; import com.sequenceiq.cloudbreak.auth.crn.Crn; import com.sequenceiq.cloudbreak.auth.crn.CrnTestUtil; import com.sequenceiq.cloudbreak.auth.security.CrnUserDetailsService; import com.sequenceiq.cloudbreak.cloud.model.CloudSubnet; import com.sequenceiq.cloudbreak.cloud.model.StackTags; import com.sequenceiq.cloudbreak.common.exception.BadRequestException; import com.sequenceiq.cloudbreak.common.mappable.CloudPlatform; import com.sequenceiq.cloudbreak.common.mappable.MappableBase; import com.sequenceiq.cloudbreak.common.mappable.ProviderParameterCalculator; import com.sequenceiq.cloudbreak.common.mappable.ProviderParametersBase; import com.sequenceiq.cloudbreak.common.service.Clock; import com.sequenceiq.cloudbreak.tag.CostTagging; import com.sequenceiq.environment.api.v1.environment.model.response.DetailedEnvironmentResponse; import com.sequenceiq.environment.api.v1.environment.model.response.EnvironmentNetworkResponse; import com.sequenceiq.environment.api.v1.environment.model.response.LocationResponse; import com.sequenceiq.environment.api.v1.environment.model.response.SecurityAccessResponse; import com.sequenceiq.environment.api.v1.environment.model.response.TagResponse; import com.sequenceiq.redbeams.api.endpoint.v4.databaseserver.requests.AllocateDatabaseServerV4Request; import com.sequenceiq.redbeams.api.endpoint.v4.databaseserver.requests.SslConfigV4Request; import com.sequenceiq.redbeams.api.endpoint.v4.databaseserver.requests.SslMode; import com.sequenceiq.redbeams.api.endpoint.v4.databaseserver.responses.SslCertificateType; import com.sequenceiq.redbeams.api.endpoint.v4.stacks.DatabaseServerV4StackRequest; import com.sequenceiq.redbeams.api.endpoint.v4.stacks.NetworkV4StackRequest; import com.sequenceiq.redbeams.api.endpoint.v4.stacks.SecurityGroupV4StackRequest; import com.sequenceiq.redbeams.api.endpoint.v4.stacks.aws.AwsNetworkV4Parameters; import com.sequenceiq.redbeams.api.model.common.DetailedDBStackStatus; import com.sequenceiq.redbeams.api.model.common.Status; import com.sequenceiq.redbeams.configuration.DatabaseServerSslCertificateConfig; import com.sequenceiq.redbeams.configuration.SslCertificateEntry; import com.sequenceiq.redbeams.domain.stack.DBStack; import com.sequenceiq.redbeams.domain.stack.SslConfig; import com.sequenceiq.redbeams.service.AccountTagService; import com.sequenceiq.redbeams.service.EnvironmentService; import com.sequenceiq.redbeams.service.PasswordGeneratorService; import com.sequenceiq.redbeams.service.UserGeneratorService; import com.sequenceiq.redbeams.service.UuidGeneratorService; import com.sequenceiq.redbeams.service.crn.CrnService; import com.sequenceiq.redbeams.service.network.NetworkParameterAdder; import com.sequenceiq.redbeams.service.network.SubnetChooserService; import com.sequenceiq.redbeams.service.network.SubnetListerService; @ExtendWith(MockitoExtension.class) @MockitoSettings(strictness = Strictness.LENIENT) class AllocateDatabaseServerV4RequestToDBStackConverterTest { private static final String OWNER_CRN = "crn:cdp:iam:us-west-1:cloudera:user:external/[email protected]"; private static final String ENVIRONMENT_CRN = "myenv"; private static final String CLUSTER_CRN = "crn:cdp:datahub:us-west-1:cloudera:stack:id"; private static final String ENVIRONMENT_NAME = "myenv-amazing-env"; private static final Instant NOW = Instant.now(); private static final Map<String, Object> ALLOCATE_REQUEST_PARAMETERS = Map.of("key", "value"); private static final Map<String, Object> SUBNET_ID_REQUEST_PARAMETERS = Map.of("netkey", "netvalue"); private static final String PASSWORD = "password"; private static final String USERNAME = "username"; private static final String DEFAULT_SECURITY_GROUP_ID = "defaultSecurityGroupId"; private static final String UNKNOWN_CLOUD_PLATFORM = "UnknownCloudPlatform"; private static final String USER_EMAIL = "userEmail"; private static final CloudPlatform AWS_CLOUD_PLATFORM = CloudPlatform.AWS; private static final CloudPlatform AZURE_CLOUD_PLATFORM = CloudPlatform.AZURE; private static final String CLOUD_PROVIDER_IDENTIFIER_V2 = "cert-id-2"; private static final String CLOUD_PROVIDER_IDENTIFIER_V3 = "cert-id-3"; private static final String CERT_PEM_V2 = "super-cert-2"; private static final String CERT_PEM_V3 = "super-cert-3"; private static final String REGION = "myRegion"; private static final String DATABASE_VENDOR = "postgres"; private static final String REDBEAMS_DB_MAJOR_VERSION = "10"; private static final String FIELD_DB_SERVICE_SUPPORTED_PLATFORMS = "dbServiceSupportedPlatforms"; private static final String FIELD_REDBEAMS_DB_MAJOR_VERSION = "redbeamsDbMajorVersion"; private static final String FIELD_SSL_ENABLED = "sslEnabled"; private static final int MAX_VERSION = 3; private static final int VERSION_1 = 1; private static final int VERSION_2 = 2; private static final int VERSION_3 = 3; private static final int NO_CERTS = 0; private static final int SINGLE_CERT = 1; private static final int TWO_CERTS = 2; private static final int THREE_CERTS = 3; @Mock private EnvironmentService environmentService; @Mock(answer = Answers.RETURNS_DEEP_STUBS) private ProviderParameterCalculator providerParameterCalculator; @Mock private Clock clock; @Mock private SubnetListerService subnetListerService; @Mock private SubnetChooserService subnetChooserService; @Mock private UserGeneratorService userGeneratorService; @Mock private PasswordGeneratorService passwordGeneratorService; @Mock private NetworkParameterAdder networkParameterAdder; @Mock private UuidGeneratorService uuidGeneratorService; @Mock private CrnUserDetailsService crnUserDetailsService; @Mock private CostTagging costTagging; @Mock private CrnService crnService; @Mock private EntitlementService entitlementService; @Mock private AccountTagService accountTagService; @Mock private DatabaseServerSslCertificateConfig databaseServerSslCertificateConfig; @Mock private X509Certificate x509Certificate; @InjectMocks private AllocateDatabaseServerV4RequestToDBStackConverter underTest; private AllocateDatabaseServerV4Request allocateRequest; private NetworkV4StackRequest networkRequest; private DatabaseServerV4StackRequest databaseServerRequest; private SecurityGroupV4StackRequest securityGroupRequest; private SslCertificateEntry sslCertificateEntryV2; private SslCertificateEntry sslCertificateEntryV3; @BeforeEach public void setUp() { ReflectionTestUtils.setField(underTest, FIELD_DB_SERVICE_SUPPORTED_PLATFORMS, Set.of("AWS", "AZURE")); ReflectionTestUtils.setField(underTest, FIELD_REDBEAMS_DB_MAJOR_VERSION, REDBEAMS_DB_MAJOR_VERSION); ReflectionTestUtils.setField(underTest, FIELD_SSL_ENABLED, true); allocateRequest = new AllocateDatabaseServerV4Request(); networkRequest = new NetworkV4StackRequest(); allocateRequest.setNetwork(networkRequest); databaseServerRequest = new DatabaseServerV4StackRequest(); allocateRequest.setDatabaseServer(databaseServerRequest); securityGroupRequest = new SecurityGroupV4StackRequest(); databaseServerRequest.setSecurityGroup(securityGroupRequest); when(crnUserDetailsService.loadUserByUsername(OWNER_CRN)).thenReturn(getCrnUser()); when(uuidGeneratorService.randomUuid()).thenReturn("uuid"); when(accountTagService.list()).thenReturn(new HashMap<>()); when(uuidGeneratorService.uuidVariableParts(anyInt())).thenReturn("parts"); when(entitlementService.internalTenant(anyString())).thenReturn(true); sslCertificateEntryV2 = new SslCertificateEntry(VERSION_2, CLOUD_PROVIDER_IDENTIFIER_V2, CERT_PEM_V2, x509Certificate); sslCertificateEntryV3 = new SslCertificateEntry(VERSION_3, CLOUD_PROVIDER_IDENTIFIER_V3, CERT_PEM_V3, x509Certificate); when(databaseServerSslCertificateConfig.getMaxVersionByCloudPlatformAndRegion(anyString(), eq(REGION))).thenReturn(MAX_VERSION); when(clock.getCurrentInstant()).thenReturn(NOW); when(crnService.createCrn(any(DBStack.class))).thenReturn(CrnTestUtil.getDatabaseServerCrnBuilder() .setAccountId("accountid") .setResource("1") .build()); } @Test void conversionTestWhenOptionalElementsAreProvided() throws IOException { setupAllocateRequest(true); when(databaseServerSslCertificateConfig.getNumberOfCertsByCloudPlatformAndRegion(AWS_CLOUD_PLATFORM.name(), REGION)).thenReturn(SINGLE_CERT); when(databaseServerSslCertificateConfig.getCertByCloudPlatformAndRegionAndVersion(AWS_CLOUD_PLATFORM.name(), REGION, VERSION_3)) .thenReturn(sslCertificateEntryV3); DetailedEnvironmentResponse environment = DetailedEnvironmentResponse.builder() .withCloudPlatform(AWS_CLOUD_PLATFORM.name()) .withCrn(ENVIRONMENT_CRN) .withLocation(LocationResponse.LocationResponseBuilder.aLocationResponse().withName(REGION).build()) .withName(ENVIRONMENT_NAME) .withTag(new TagResponse()) .build(); when(environmentService.getByCrn(ENVIRONMENT_CRN)).thenReturn(environment); DBStack dbStack = underTest.convert(allocateRequest, OWNER_CRN); assertEquals(allocateRequest.getName(), dbStack.getName()); assertEquals(allocateRequest.getEnvironmentCrn(), dbStack.getEnvironmentId()); assertEquals(REGION, dbStack.getRegion()); assertEquals(AWS_CLOUD_PLATFORM.name(), dbStack.getCloudPlatform()); assertEquals(AWS_CLOUD_PLATFORM.name(), dbStack.getPlatformVariant()); assertEquals(1, dbStack.getParameters().size()); assertEquals("value", dbStack.getParameters().get("key")); assertEquals(Crn.safeFromString(OWNER_CRN), dbStack.getOwnerCrn()); assertEquals(USER_EMAIL, dbStack.getUserName()); assertEquals(Status.REQUESTED, dbStack.getStatus()); assertEquals(DetailedDBStackStatus.PROVISION_REQUESTED, dbStack.getDbStackStatus().getDetailedDBStackStatus()); assertEquals(NOW.toEpochMilli(), dbStack.getDbStackStatus().getCreated().longValue()); assertEquals("n-uuid", dbStack.getNetwork().getName()); assertEquals(1, dbStack.getNetwork().getAttributes().getMap().size()); assertEquals("netvalue", dbStack.getNetwork().getAttributes().getMap().get("netkey")); assertEquals("dbsvr-uuid", dbStack.getDatabaseServer().getName()); assertEquals(databaseServerRequest.getInstanceType(), dbStack.getDatabaseServer().getInstanceType()); assertEquals(DatabaseVendor.fromValue(databaseServerRequest.getDatabaseVendor()), dbStack.getDatabaseServer().getDatabaseVendor()); assertEquals("org.postgresql.Driver", dbStack.getDatabaseServer().getConnectionDriver()); assertEquals(databaseServerRequest.getStorageSize(), dbStack.getDatabaseServer().getStorageSize()); assertEquals(databaseServerRequest.getRootUserName(), dbStack.getDatabaseServer().getRootUserName()); assertEquals(databaseServerRequest.getRootUserPassword(), dbStack.getDatabaseServer().getRootPassword()); assertEquals(2, dbStack.getDatabaseServer().getAttributes().getMap().size()); assertEquals("dbvalue", dbStack.getDatabaseServer().getAttributes().getMap().get("dbkey")); assertEquals(REDBEAMS_DB_MAJOR_VERSION, dbStack.getDatabaseServer().getAttributes().getMap().get("engineVersion")); assertEquals(securityGroupRequest.getSecurityGroupIds(), dbStack.getDatabaseServer().getSecurityGroup().getSecurityGroupIds()); assertEquals(dbStack.getTags().get(StackTags.class).getUserDefinedTags().get("DistroXKey1"), "DistroXValue1"); verifySsl(dbStack, Set.of(CERT_PEM_V3), CLOUD_PROVIDER_IDENTIFIER_V3); verify(databaseServerSslCertificateConfig, never()).getCertsByCloudPlatformAndRegionAndVersions(anyString(), anyString(), any()); verify(providerParameterCalculator).get(allocateRequest); verify(providerParameterCalculator).get(networkRequest); verify(subnetListerService, never()).listSubnets(any(), any()); verify(subnetChooserService, never()).chooseSubnets(anyList(), any(), any()); verify(networkParameterAdder, never()).addSubnetIds(any(), any(), any(), any()); verify(userGeneratorService, never()).generateUserName(); verify(passwordGeneratorService, never()).generatePassword(any()); } private CrnUser getCrnUser() { return new CrnUser("", "", "", USER_EMAIL, "", ""); } @Test void conversionTestWhenOptionalElementsGenerated() throws IOException { setupAllocateRequest(false); when(databaseServerSslCertificateConfig.getNumberOfCertsByCloudPlatformAndRegion(AWS_CLOUD_PLATFORM.name(), REGION)).thenReturn(SINGLE_CERT); when(databaseServerSslCertificateConfig.getCertByCloudPlatformAndRegionAndVersion(AWS_CLOUD_PLATFORM.name(), REGION, VERSION_3)) .thenReturn(sslCertificateEntryV3); List<CloudSubnet> cloudSubnets = List.of( new CloudSubnet("subnet-1", "", "az-a", ""), new CloudSubnet("subnet-2", "", "az-b", "") ); DetailedEnvironmentResponse environment = DetailedEnvironmentResponse.builder() .withCloudPlatform(AWS_CLOUD_PLATFORM.name()) .withName(ENVIRONMENT_NAME) .withCrn(ENVIRONMENT_CRN) .withTag(new TagResponse()) .withLocation(LocationResponse.LocationResponseBuilder.aLocationResponse().withName(REGION).build()) .withSecurityAccess(SecurityAccessResponse.builder().withDefaultSecurityGroupId(DEFAULT_SECURITY_GROUP_ID).build()) .withNetwork(EnvironmentNetworkResponse.builder() .withSubnetMetas( Map.of( "subnet-1", cloudSubnets.get(0), "subnet-2", cloudSubnets.get(1) ) ) .build()) .build(); when(environmentService.getByCrn(ENVIRONMENT_CRN)).thenReturn(environment); when(subnetListerService.listSubnets(any(), any())).thenReturn(cloudSubnets); when(subnetChooserService.chooseSubnets(any(), any(), any())).thenReturn(cloudSubnets); when(networkParameterAdder.addSubnetIds(any(), any(), any(), any())).thenReturn(SUBNET_ID_REQUEST_PARAMETERS); when(userGeneratorService.generateUserName()).thenReturn(USERNAME); when(passwordGeneratorService.generatePassword(any())).thenReturn(PASSWORD); DBStack dbStack = underTest.convert(allocateRequest, OWNER_CRN); assertEquals(ENVIRONMENT_NAME + "-dbstck-parts", dbStack.getName()); assertEquals(PASSWORD, dbStack.getDatabaseServer().getRootPassword()); assertEquals(USERNAME, dbStack.getDatabaseServer().getRootUserName()); assertEquals("n-uuid", dbStack.getNetwork().getName()); assertEquals(1, dbStack.getNetwork().getAttributes().getMap().size()); assertEquals("netvalue", dbStack.getNetwork().getAttributes().getMap().get("netkey")); assertThat(dbStack.getDatabaseServer().getSecurityGroup().getSecurityGroupIds()).hasSize(1); assertEquals(dbStack.getDatabaseServer().getSecurityGroup().getSecurityGroupIds().iterator().next(), DEFAULT_SECURITY_GROUP_ID); assertEquals(dbStack.getTags().get(StackTags.class).getUserDefinedTags().get("DistroXKey1"), "DistroXValue1"); verifySsl(dbStack, Set.of(CERT_PEM_V3), CLOUD_PROVIDER_IDENTIFIER_V3); verify(databaseServerSslCertificateConfig, never()).getCertsByCloudPlatformAndRegionAndVersions(anyString(), anyString(), any()); verify(providerParameterCalculator).get(allocateRequest); verify(providerParameterCalculator, never()).get(networkRequest); verify(subnetListerService).listSubnets(any(), any()); verify(subnetChooserService).chooseSubnets(anyList(), any(), any()); verify(networkParameterAdder).addSubnetIds(any(), any(), any(), any()); verify(userGeneratorService).generateUserName(); verify(passwordGeneratorService).generatePassword(any()); } @Test void conversionTestWhenRequestAndEnvironmentCloudPlatformsDiffer() {<|fim▁hole|> allocateRequest.setEnvironmentCrn(ENVIRONMENT_CRN); DetailedEnvironmentResponse environment = DetailedEnvironmentResponse.builder() .withCloudPlatform(UNKNOWN_CLOUD_PLATFORM) .build(); when(environmentService.getByCrn(ENVIRONMENT_CRN)).thenReturn(environment); BadRequestException badRequestException = assertThrows(BadRequestException.class, () -> underTest.convert(allocateRequest, OWNER_CRN)); assertThat(badRequestException).hasMessage("Cloud platform of the request AWS and the environment " + UNKNOWN_CLOUD_PLATFORM + " do not match."); } @Test void conversionTestWhenUnsupportedCloudPlatform() { allocateRequest.setCloudPlatform(CloudPlatform.YARN); allocateRequest.setTags(new HashMap<>()); allocateRequest.setEnvironmentCrn(ENVIRONMENT_CRN); DetailedEnvironmentResponse environment = DetailedEnvironmentResponse.builder() .withCloudPlatform(CloudPlatform.YARN.name()) .build(); when(environmentService.getByCrn(ENVIRONMENT_CRN)).thenReturn(environment); BadRequestException badRequestException = assertThrows(BadRequestException.class, () -> underTest.convert(allocateRequest, OWNER_CRN)); assertThat(badRequestException).hasMessage("Cloud platform YARN not supported yet."); } static Object[][] conversionTestWhenSslDisabledDataProvider() { return new Object[][]{ // testCaseName fieldSslEnabled sslConfigV4Request {"false, null", false, null}, {"true, null", true, null}, {"false, request with sslMode=null", false, new SslConfigV4Request()}, {"true, request with sslMode=null", true, new SslConfigV4Request()}, {"false, request with sslMode=DISABLED", false, createSslConfigV4Request(SslMode.DISABLED)}, {"true, request with sslMode=DISABLED", true, createSslConfigV4Request(SslMode.DISABLED)}, {"false, request with sslMode=ENABLED", false, createSslConfigV4Request(SslMode.ENABLED)}, }; } @ParameterizedTest(name = "{0}") @MethodSource("conversionTestWhenSslDisabledDataProvider") void conversionTestWhenSslDisabled(String testCaseName, boolean fieldSslEnabled, SslConfigV4Request sslConfigV4Request) { setupMinimalValid(sslConfigV4Request, AWS_CLOUD_PLATFORM); ReflectionTestUtils.setField(underTest, FIELD_SSL_ENABLED, fieldSslEnabled); DBStack dbStack = underTest.convert(allocateRequest, OWNER_CRN); SslConfig sslConfig = dbStack.getSslConfig(); assertThat(sslConfig).isNotNull(); Set<String> sslCertificates = sslConfig.getSslCertificates(); assertThat(sslCertificates).isNotNull(); assertThat(sslCertificates).isEmpty(); assertThat(sslConfig.getSslCertificateType()).isEqualTo(SslCertificateType.NONE); } private void setupMinimalValid(SslConfigV4Request sslConfigV4Request, CloudPlatform cloudPlatform) { allocateRequest.setEnvironmentCrn(ENVIRONMENT_CRN); allocateRequest.setTags(new HashMap<>()); allocateRequest.setClusterCrn(CLUSTER_CRN); allocateRequest.setSslConfig(sslConfigV4Request); DetailedEnvironmentResponse environment = DetailedEnvironmentResponse.builder() .withCloudPlatform(cloudPlatform.name()) .withCrn(ENVIRONMENT_CRN) .withLocation(LocationResponse.LocationResponseBuilder.aLocationResponse().withName(REGION).build()) .withName(ENVIRONMENT_NAME) .withTag(new TagResponse()) .build(); when(environmentService.getByCrn(ENVIRONMENT_CRN)).thenReturn(environment); databaseServerRequest.setDatabaseVendor(DATABASE_VENDOR); } @Test void conversionTestWhenSslEnabledAndAwsAndNoCerts() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AWS_CLOUD_PLATFORM); when(databaseServerSslCertificateConfig.getNumberOfCertsByCloudPlatformAndRegion(AWS_CLOUD_PLATFORM.name(), REGION)).thenReturn(NO_CERTS); DBStack dbStack = underTest.convert(allocateRequest, OWNER_CRN); verifySsl(dbStack, Set.of(), null); verify(databaseServerSslCertificateConfig, never()).getCertByCloudPlatformAndRegionAndVersion(anyString(), anyString(), anyInt()); verify(databaseServerSslCertificateConfig, never()).getCertsByCloudPlatformAndRegionAndVersions(anyString(), anyString(), any()); } @Test void conversionTestWhenSslEnabledAndAwsAndSingleCert() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AWS_CLOUD_PLATFORM); conversionTestWhenSslEnabledAndSingleCertReturnedInternal(AWS_CLOUD_PLATFORM.name(), SINGLE_CERT); } private void conversionTestWhenSslEnabledAndSingleCertReturnedInternal(String cloudPlatform, int numOfCerts) { when(databaseServerSslCertificateConfig.getNumberOfCertsByCloudPlatformAndRegion(cloudPlatform, REGION)).thenReturn(numOfCerts); when(databaseServerSslCertificateConfig.getCertByCloudPlatformAndRegionAndVersion(cloudPlatform, REGION, VERSION_3)).thenReturn(sslCertificateEntryV3); DBStack dbStack = underTest.convert(allocateRequest, OWNER_CRN); verifySsl(dbStack, Set.of(CERT_PEM_V3), CLOUD_PROVIDER_IDENTIFIER_V3); verify(databaseServerSslCertificateConfig, never()).getCertsByCloudPlatformAndRegionAndVersions(anyString(), anyString(), any()); } @Test void conversionTestWhenSslEnabledAndAwsAndSingleCertErrorNullCert() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AWS_CLOUD_PLATFORM); when(databaseServerSslCertificateConfig.getNumberOfCertsByCloudPlatformAndRegion(AWS_CLOUD_PLATFORM.name(), REGION)).thenReturn(SINGLE_CERT); when(databaseServerSslCertificateConfig.getCertByCloudPlatformAndRegionAndVersion(AWS_CLOUD_PLATFORM.name(), REGION, VERSION_3)).thenReturn(null); IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, () -> underTest.convert(allocateRequest, OWNER_CRN)); assertThat(illegalStateException).hasMessage("Could not find SSL certificate version 3 for cloud platform \"AWS\""); verify(databaseServerSslCertificateConfig, never()).getCertsByCloudPlatformAndRegionAndVersions(anyString(), anyString(), any()); } @Test void conversionTestWhenSslEnabledAndAwsAndSingleCertErrorVersionMismatch() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AWS_CLOUD_PLATFORM); when(databaseServerSslCertificateConfig.getNumberOfCertsByCloudPlatformAndRegion(AWS_CLOUD_PLATFORM.name(), REGION)).thenReturn(SINGLE_CERT); when(databaseServerSslCertificateConfig.getCertByCloudPlatformAndRegionAndVersion(AWS_CLOUD_PLATFORM.name(), REGION, VERSION_3)) .thenReturn(sslCertificateEntryV2); IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, () -> underTest.convert(allocateRequest, OWNER_CRN)); assertThat(illegalStateException).hasMessage("SSL certificate version mismatch for cloud platform \"AWS\": expected=3, actual=2"); verify(databaseServerSslCertificateConfig, never()).getCertsByCloudPlatformAndRegionAndVersions(anyString(), anyString(), any()); } @Test void conversionTestWhenSslEnabledAndAwsAndSingleCertErrorBlankCloudProviderIdentifier() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AWS_CLOUD_PLATFORM); when(databaseServerSslCertificateConfig.getNumberOfCertsByCloudPlatformAndRegion(AWS_CLOUD_PLATFORM.name(), REGION)).thenReturn(SINGLE_CERT); SslCertificateEntry sslCertificateEntryV3Broken = new SslCertificateEntry(VERSION_3, "", CERT_PEM_V3, x509Certificate); when(databaseServerSslCertificateConfig.getCertByCloudPlatformAndRegionAndVersion(AWS_CLOUD_PLATFORM.name(), REGION, VERSION_3)) .thenReturn(sslCertificateEntryV3Broken); IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, () -> underTest.convert(allocateRequest, OWNER_CRN)); assertThat(illegalStateException).hasMessage("Blank CloudProviderIdentifier in SSL certificate version 3 for cloud platform \"AWS\""); verify(databaseServerSslCertificateConfig, never()).getCertsByCloudPlatformAndRegionAndVersions(anyString(), anyString(), any()); } @Test void conversionTestWhenSslEnabledAndAwsAndSingleCertErrorBlankPem() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AWS_CLOUD_PLATFORM); when(databaseServerSslCertificateConfig.getNumberOfCertsByCloudPlatformAndRegion(AWS_CLOUD_PLATFORM.name(), REGION)).thenReturn(SINGLE_CERT); SslCertificateEntry sslCertificateEntryV3Broken = new SslCertificateEntry(VERSION_3, CLOUD_PROVIDER_IDENTIFIER_V3, "", x509Certificate); when(databaseServerSslCertificateConfig.getCertByCloudPlatformAndRegionAndVersion(AWS_CLOUD_PLATFORM.name(), REGION, VERSION_3)) .thenReturn(sslCertificateEntryV3Broken); IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, () -> underTest.convert(allocateRequest, OWNER_CRN)); assertThat(illegalStateException).hasMessage("Blank PEM in SSL certificate version 3 for cloud platform \"AWS\""); verify(databaseServerSslCertificateConfig, never()).getCertsByCloudPlatformAndRegionAndVersions(anyString(), anyString(), any()); } @Test void conversionTestWhenSslEnabledAndAzureAndSingleCert() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AZURE_CLOUD_PLATFORM); conversionTestWhenSslEnabledAndSingleCertReturnedInternal(AZURE_CLOUD_PLATFORM.name(), SINGLE_CERT); } @Test void conversionTestWhenSslEnabledAndAwsAndTwoCerts() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AWS_CLOUD_PLATFORM); conversionTestWhenSslEnabledAndSingleCertReturnedInternal(AWS_CLOUD_PLATFORM.name(), TWO_CERTS); } @Test void conversionTestWhenSslEnabledAndAwsAndThreeCerts() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AWS_CLOUD_PLATFORM); conversionTestWhenSslEnabledAndSingleCertReturnedInternal(AWS_CLOUD_PLATFORM.name(), THREE_CERTS); } @Test void conversionTestWhenSslEnabledAndAzureAndTwoCerts() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AZURE_CLOUD_PLATFORM); conversionTestWhenSslEnabledAndTwoCertsReturnedInternal(AZURE_CLOUD_PLATFORM.name(), TWO_CERTS); } private void conversionTestWhenSslEnabledAndTwoCertsReturnedInternal(String cloudPlatform, int numOfCerts) { when(databaseServerSslCertificateConfig.getNumberOfCertsByCloudPlatformAndRegion(cloudPlatform, REGION)).thenReturn(numOfCerts); when(databaseServerSslCertificateConfig.getCertsByCloudPlatformAndRegionAndVersions(cloudPlatform, REGION, VERSION_2, VERSION_3)) .thenReturn(Set.of(sslCertificateEntryV2, sslCertificateEntryV3)); DBStack dbStack = underTest.convert(allocateRequest, OWNER_CRN); verifySsl(dbStack, Set.of(CERT_PEM_V2, CERT_PEM_V3), CLOUD_PROVIDER_IDENTIFIER_V3); verify(databaseServerSslCertificateConfig, never()).getCertByCloudPlatformAndRegionAndVersion(anyString(), anyString(), anyInt()); } @Test void conversionTestWhenSslEnabledAndAzureAndTwoCertsErrorNullCert() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AZURE_CLOUD_PLATFORM); when(databaseServerSslCertificateConfig.getNumberOfCertsByCloudPlatformAndRegion(AZURE_CLOUD_PLATFORM.name(), REGION)).thenReturn(TWO_CERTS); Set<SslCertificateEntry> certs = new HashSet<>(); certs.add(sslCertificateEntryV3); certs.add(null); when(databaseServerSslCertificateConfig.getCertsByCloudPlatformAndRegionAndVersions(AZURE_CLOUD_PLATFORM.name(), REGION, VERSION_2, VERSION_3)) .thenReturn(certs); IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, () -> underTest.convert(allocateRequest, OWNER_CRN)); assertThat(illegalStateException) .hasMessage("Could not find SSL certificate(s) when requesting versions [2, 3] for cloud platform \"AZURE\": expected 2 certificates, got 1"); verify(databaseServerSslCertificateConfig, never()).getCertByCloudPlatformAndRegionAndVersion(anyString(), anyString(), anyInt()); } @Test void conversionTestWhenSslEnabledAndAzureAndTwoCertsErrorFewerCerts() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AZURE_CLOUD_PLATFORM); when(databaseServerSslCertificateConfig.getNumberOfCertsByCloudPlatformAndRegion(AZURE_CLOUD_PLATFORM.name(), REGION)).thenReturn(TWO_CERTS); when(databaseServerSslCertificateConfig.getCertsByCloudPlatformAndRegionAndVersions(AZURE_CLOUD_PLATFORM.name(), REGION, VERSION_2, VERSION_3)) .thenReturn(Set.of(sslCertificateEntryV3)); IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, () -> underTest.convert(allocateRequest, OWNER_CRN)); assertThat(illegalStateException) .hasMessage("Could not find SSL certificate(s) when requesting versions [2, 3] for cloud platform \"AZURE\": expected 2 certificates, got 1"); verify(databaseServerSslCertificateConfig, never()).getCertByCloudPlatformAndRegionAndVersion(anyString(), anyString(), anyInt()); } @Test void conversionTestWhenSslEnabledAndAzureAndTwoCertsErrorDuplicatedCertPem() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AZURE_CLOUD_PLATFORM); SslCertificateEntry sslCertificateEntryV2DuplicateOfV3 = new SslCertificateEntry(VERSION_2, CLOUD_PROVIDER_IDENTIFIER_V3, CERT_PEM_V3, x509Certificate); when(databaseServerSslCertificateConfig.getNumberOfCertsByCloudPlatformAndRegion(AZURE_CLOUD_PLATFORM.name(), REGION)).thenReturn(TWO_CERTS); when(databaseServerSslCertificateConfig.getCertsByCloudPlatformAndRegionAndVersions(AZURE_CLOUD_PLATFORM.name(), REGION, VERSION_2, VERSION_3)) .thenReturn(Set.of(sslCertificateEntryV2DuplicateOfV3, sslCertificateEntryV3)); IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, () -> underTest.convert(allocateRequest, OWNER_CRN)); assertThat(illegalStateException) .hasMessage("Received duplicated SSL certificate PEM when requesting versions [2, 3] for cloud platform \"AZURE\""); verify(databaseServerSslCertificateConfig, never()).getCertByCloudPlatformAndRegionAndVersion(anyString(), anyString(), anyInt()); } @Test void conversionTestWhenSslEnabledAndAzureAndTwoCertsErrorVersionMismatch() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AZURE_CLOUD_PLATFORM); SslCertificateEntry sslCertificateEntryV2Broken = new SslCertificateEntry(VERSION_1, CLOUD_PROVIDER_IDENTIFIER_V2, CERT_PEM_V2, x509Certificate); when(databaseServerSslCertificateConfig.getNumberOfCertsByCloudPlatformAndRegion(AZURE_CLOUD_PLATFORM.name(), REGION)).thenReturn(TWO_CERTS); when(databaseServerSslCertificateConfig.getCertsByCloudPlatformAndRegionAndVersions(AZURE_CLOUD_PLATFORM.name(), REGION, VERSION_2, VERSION_3)) .thenReturn(Set.of(sslCertificateEntryV2Broken, sslCertificateEntryV3)); IllegalStateException illegalStateException = assertThrows(IllegalStateException.class, () -> underTest.convert(allocateRequest, OWNER_CRN)); assertThat(illegalStateException) .hasMessage("Could not find SSL certificate version 2 for cloud platform \"AZURE\""); verify(databaseServerSslCertificateConfig, never()).getCertByCloudPlatformAndRegionAndVersion(anyString(), anyString(), anyInt()); } @Test void conversionTestWhenSslEnabledAndAzureAndThreeCerts() { setupMinimalValid(createSslConfigV4Request(SslMode.ENABLED), AZURE_CLOUD_PLATFORM); conversionTestWhenSslEnabledAndTwoCertsReturnedInternal(AZURE_CLOUD_PLATFORM.name(), THREE_CERTS); } private void verifySsl(DBStack dbStack, Set<String> sslCertificatesExpected, String cloudProviderIdentifierExpected) { SslConfig sslConfig = dbStack.getSslConfig(); assertThat(sslConfig).isNotNull(); Set<String> sslCertificates = sslConfig.getSslCertificates(); assertThat(sslCertificates).isNotNull(); assertThat(sslCertificates).isEqualTo(sslCertificatesExpected); assertThat(sslConfig.getSslCertificateType()).isEqualTo(SslCertificateType.CLOUD_PROVIDER_OWNED); assertThat(sslConfig.getSslCertificateActiveVersion()).isEqualTo(MAX_VERSION); assertThat(sslConfig.getSslCertificateActiveCloudProviderIdentifier()).isEqualTo(cloudProviderIdentifierExpected); } private void setupAllocateRequest(boolean provideOptionalFields) { allocateRequest.setEnvironmentCrn(ENVIRONMENT_CRN); allocateRequest.setTags(Map.of("DistroXKey1", "DistroXValue1")); allocateRequest.setClusterCrn(CLUSTER_CRN); if (provideOptionalFields) { allocateRequest.setName("myallocation"); AwsNetworkV4Parameters awsNetworkV4Parameters = new AwsNetworkV4Parameters(); awsNetworkV4Parameters.setSubnetId("subnet-1,subnet-2"); allocateRequest.getNetwork().setAws(awsNetworkV4Parameters); setupProviderCalculatorResponse(networkRequest, SUBNET_ID_REQUEST_PARAMETERS); } else { allocateRequest.setNetwork(null); allocateRequest.getDatabaseServer().setSecurityGroup(null); } allocateRequest.setSslConfig(createSslConfigV4Request(SslMode.ENABLED)); databaseServerRequest.setInstanceType("db.m3.medium"); databaseServerRequest.setDatabaseVendor(DATABASE_VENDOR); databaseServerRequest.setConnectionDriver("org.postgresql.Driver"); databaseServerRequest.setStorageSize(50L); if (provideOptionalFields) { databaseServerRequest.setRootUserName("root"); databaseServerRequest.setRootUserPassword("cloudera"); } setupProviderCalculatorResponse(allocateRequest, ALLOCATE_REQUEST_PARAMETERS); setupProviderCalculatorResponse(databaseServerRequest, new HashMap<>(Map.of("dbkey", "dbvalue"))); securityGroupRequest.setSecurityGroupIds(Set.of("sg-1234")); } private static SslConfigV4Request createSslConfigV4Request(SslMode sslMode) { SslConfigV4Request sslConfigV4Request = new SslConfigV4Request(); sslConfigV4Request.setSslMode(sslMode); return sslConfigV4Request; } private void setupProviderCalculatorResponse(ProviderParametersBase request, Map<String, Object> response) { MappableBase providerCalculatorResponse = mock(MappableBase.class); when(providerCalculatorResponse.asMap()).thenReturn(response); when(providerParameterCalculator.get(request)).thenReturn(providerCalculatorResponse); } }<|fim▁end|>
allocateRequest.setCloudPlatform(AWS_CLOUD_PLATFORM); allocateRequest.setTags(new HashMap<>());
<|file_name|>test_item_info.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*- # Copyright (c) 2015, Frappe Technologies Pvt. Ltd. and Contributors and Contributors # See license.txt from __future__ import unicode_literals import frappe import unittest # test_records = frappe.get_test_records('Item Info')<|fim▁hole|><|fim▁end|>
class TestItemInfo(unittest.TestCase): pass
<|file_name|>telepathy-web.js<|end_file_name|><|fim▁begin|>'use strict'; require('should'); describe('index.html', () => { beforeEach(() => { browser.url('/'); // browser.localStorage('DELETE') does not work in PhantomJS browser.execute(function() { delete window.localStorage; window.localStorage = {}; }); browser.url('/'); $('#default-username').waitForEnabled(); $('#default-username').setValue('test'); $('#shared-secret').setValue('test'); $('.save-settings').click(); $('#domain').waitForEnabled(); // wait for .modal-bg to be hidden browser.waitUntil(() => $('.modal-bg').getCssProperty('z-index').value == -1 ); }); it('should generate a password', () => { $('#domain').setValue('example.com'); browser.waitUntil(() => $('#password').getValue() == 'z<u9N_[c"R' ); }); it('should generate a password at a given index', () => { $('#index').selectByVisibleText('1'); $('#domain').setValue('example.com'); browser.waitUntil(() => $('#password').getValue() == 'g:3WGYj0}~' ); }); it('should generate a password of a given length', () => { $('#length').selectByVisibleText('8'); $('#domain').setValue('example.com'); browser.waitUntil(() => $('#password').getValue() == 'u9N_[c"R' ); }); it('should open settings menu', () => { $('#open-settings').click(); browser.waitUntil(() => $('#settings').getLocation('y') >= 0 ); }); it('should change the algorithm', () => { $('#open-settings').click(); $('#algorithm').waitForEnabled(); $('#algorithm').selectByVisibleText('SHA-512'); $('.save-settings').click(); $('#domain').waitForEnabled(); $('#domain').setValue('example.com'); browser.waitUntil(() =><|fim▁hole|> });<|fim▁end|>
$('#password').getValue() == 'V{fvC^YRi(' ); });
<|file_name|>inspector.rs<|end_file_name|><|fim▁begin|>/* This Source Code Form is subject to the terms of the Mozilla Public * License, v. 2.0. If a copy of the MPL was not distributed with this * file, You can obtain one at http://mozilla.org/MPL/2.0/. */ //! Liberally derived from the [Firefox JS implementation] //! (http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/inspector.js). use devtools_traits::{DevtoolScriptControlMsg, NodeInfo}; use devtools_traits::DevtoolScriptControlMsg::{GetRootNode, GetDocumentElement, GetChildren}; use devtools_traits::DevtoolScriptControlMsg::{GetLayout, ModifyAttribute}; use actor::{Actor, ActorRegistry}; use protocol::JsonPacketStream; use collections::BTreeMap; use msg::constellation_msg::PipelineId; use rustc_serialize::json::{self, Json, ToJson}; use std::cell::RefCell; use std::net::TcpStream; use std::sync::mpsc::{channel, Sender}; pub struct InspectorActor { pub name: String, pub walker: RefCell<Option<String>>, pub pageStyle: RefCell<Option<String>>, pub highlighter: RefCell<Option<String>>, pub script_chan: Sender<DevtoolScriptControlMsg>, pub pipeline: PipelineId, } #[derive(RustcEncodable)] struct GetHighlighterReply { highligter: HighlighterMsg, // sic. from: String, } #[derive(RustcEncodable)] struct HighlighterMsg { actor: String, } struct HighlighterActor { name: String, } pub struct NodeActor { pub name: String, script_chan: Sender<DevtoolScriptControlMsg>, pipeline: PipelineId, } #[derive(RustcEncodable)] struct ShowBoxModelReply { from: String, } #[derive(RustcEncodable)] struct HideBoxModelReply { from: String, } impl Actor for HighlighterActor { fn name(&self) -> String { self.name.clone() } fn handle_message(&self, _registry: &ActorRegistry, msg_type: &str, _msg: &json::Object, stream: &mut TcpStream) -> Result<bool, ()> { Ok(match msg_type { "showBoxModel" => { let msg = ShowBoxModelReply { from: self.name(), }; stream.write_json_packet(&msg); true } "hideBoxModel" => { let msg = HideBoxModelReply { from: self.name(), }; stream.write_json_packet(&msg); true } _ => false, })<|fim▁hole|>} #[derive(RustcEncodable)] struct ModifyAttributeReply{ from: String, } impl Actor for NodeActor { fn name(&self) -> String { self.name.clone() } fn handle_message(&self, registry: &ActorRegistry, msg_type: &str, msg: &json::Object, stream: &mut TcpStream) -> Result<bool, ()> { Ok(match msg_type { "modifyAttributes" => { let target = msg.get(&"to".to_string()).unwrap().as_string().unwrap(); let mods = msg.get(&"modifications".to_string()).unwrap().as_array().unwrap(); let modifications = mods.iter().map(|json_mod| { json::decode(&json_mod.to_string()).unwrap() }).collect(); self.script_chan.send(ModifyAttribute(self.pipeline, registry.actor_to_script(target.to_string()), modifications)) .unwrap(); let reply = ModifyAttributeReply{ from: self.name(), }; stream.write_json_packet(&reply); true } _ => false, }) } } #[derive(RustcEncodable)] struct GetWalkerReply { from: String, walker: WalkerMsg, } #[derive(RustcEncodable)] struct WalkerMsg { actor: String, root: NodeActorMsg, } #[derive(RustcEncodable)] struct AttrMsg { namespace: String, name: String, value: String, } #[derive(RustcEncodable)] struct NodeActorMsg { actor: String, baseURI: String, parent: String, nodeType: u16, namespaceURI: String, nodeName: String, numChildren: usize, name: String, publicId: String, systemId: String, attrs: Vec<AttrMsg>, pseudoClassLocks: Vec<String>, isDisplayed: bool, hasEventListeners: bool, isDocumentElement: bool, shortValue: String, incompleteValue: bool, } trait NodeInfoToProtocol { fn encode(self, actors: &ActorRegistry, display: bool, script_chan: Sender<DevtoolScriptControlMsg>, pipeline: PipelineId) -> NodeActorMsg; } impl NodeInfoToProtocol for NodeInfo { fn encode(self, actors: &ActorRegistry, display: bool, script_chan: Sender<DevtoolScriptControlMsg>, pipeline: PipelineId) -> NodeActorMsg { let actor_name = if !actors.script_actor_registered(self.uniqueId.clone()) { let name = actors.new_name("node"); let node_actor = NodeActor { name: name.clone(), script_chan: script_chan, pipeline: pipeline.clone(), }; actors.register_script_actor(self.uniqueId, name.clone()); actors.register_later(box node_actor); name } else { actors.script_to_actor(self.uniqueId) }; NodeActorMsg { actor: actor_name, baseURI: self.baseURI, parent: actors.script_to_actor(self.parent.clone()), nodeType: self.nodeType, namespaceURI: self.namespaceURI, nodeName: self.nodeName, numChildren: self.numChildren, name: self.name, publicId: self.publicId, systemId: self.systemId, attrs: self.attrs.into_iter().map(|attr| { AttrMsg { namespace: attr.namespace, name: attr.name, value: attr.value, } }).collect(), pseudoClassLocks: vec!(), //TODO get this data from script isDisplayed: display, hasEventListeners: false, //TODO get this data from script isDocumentElement: self.isDocumentElement, shortValue: self.shortValue, incompleteValue: self.incompleteValue, } } } struct WalkerActor { name: String, script_chan: Sender<DevtoolScriptControlMsg>, pipeline: PipelineId, } #[derive(RustcEncodable)] struct QuerySelectorReply { from: String, } #[derive(RustcEncodable)] struct DocumentElementReply { from: String, node: NodeActorMsg, } #[derive(RustcEncodable)] struct ClearPseudoclassesReply { from: String, } #[derive(RustcEncodable)] struct ChildrenReply { hasFirst: bool, hasLast: bool, nodes: Vec<NodeActorMsg>, from: String, } impl Actor for WalkerActor { fn name(&self) -> String { self.name.clone() } fn handle_message(&self, registry: &ActorRegistry, msg_type: &str, msg: &json::Object, stream: &mut TcpStream) -> Result<bool, ()> { Ok(match msg_type { "querySelector" => { let msg = QuerySelectorReply { from: self.name(), }; stream.write_json_packet(&msg); true } "documentElement" => { let (tx, rx) = channel(); self.script_chan.send(GetDocumentElement(self.pipeline, tx)).unwrap(); let doc_elem_info = rx.recv().unwrap(); let node = doc_elem_info.encode(registry, true, self.script_chan.clone(), self.pipeline); let msg = DocumentElementReply { from: self.name(), node: node, }; stream.write_json_packet(&msg); true } "clearPseudoClassLocks" => { let msg = ClearPseudoclassesReply { from: self.name(), }; stream.write_json_packet(&msg); true } "children" => { let target = msg.get(&"node".to_string()).unwrap().as_string().unwrap(); let (tx, rx) = channel(); self.script_chan.send(GetChildren(self.pipeline, registry.actor_to_script(target.to_string()), tx)) .unwrap(); let children = rx.recv().unwrap(); let msg = ChildrenReply { hasFirst: true, hasLast: true, nodes: children.into_iter().map(|child| { child.encode(registry, true, self.script_chan.clone(), self.pipeline) }).collect(), from: self.name(), }; stream.write_json_packet(&msg); true } _ => false, }) } } #[derive(RustcEncodable)] struct GetPageStyleReply { from: String, pageStyle: PageStyleMsg, } #[derive(RustcEncodable)] struct PageStyleMsg { actor: String, } struct PageStyleActor { name: String, script_chan: Sender<DevtoolScriptControlMsg>, pipeline: PipelineId, } #[derive(RustcEncodable)] struct GetAppliedReply { entries: Vec<AppliedEntry>, rules: Vec<AppliedRule>, sheets: Vec<AppliedSheet>, from: String, } #[derive(RustcEncodable)] struct GetComputedReply { computed: Vec<u32>, //XXX all css props from: String, } #[derive(RustcEncodable)] struct AppliedEntry { rule: String, pseudoElement: Json, isSystem: bool, matchedSelectors: Vec<String>, } #[derive(RustcEncodable)] struct AppliedRule { actor: String, __type__: u32, href: String, cssText: String, line: u32, column: u32, parentStyleSheet: String, } #[derive(RustcEncodable)] struct AppliedSheet { actor: String, href: String, nodeHref: String, disabled: bool, title: String, system: bool, styleSheetIndex: isize, ruleCount: usize, } #[derive(RustcEncodable)] struct GetLayoutReply { width: i32, height: i32, autoMargins: Json, from: String, } #[derive(RustcEncodable)] #[allow(dead_code)] struct AutoMargins { top: String, bottom: String, left: String, right: String, } impl Actor for PageStyleActor { fn name(&self) -> String { self.name.clone() } fn handle_message(&self, registry: &ActorRegistry, msg_type: &str, msg: &json::Object, stream: &mut TcpStream) -> Result<bool, ()> { Ok(match msg_type { "getApplied" => { //TODO: query script for relevant applied styles to node (msg.node) let msg = GetAppliedReply { entries: vec!(), rules: vec!(), sheets: vec!(), from: self.name(), }; stream.write_json_packet(&msg); true } "getComputed" => { //TODO: query script for relevant computed styles on node (msg.node) let msg = GetComputedReply { computed: vec!(), from: self.name(), }; stream.write_json_packet(&msg); true } //TODO: query script for box layout properties of node (msg.node) "getLayout" => { let target = msg.get(&"node".to_string()).unwrap().as_string().unwrap(); let (tx, rx) = channel(); self.script_chan.send(GetLayout(self.pipeline, registry.actor_to_script(target.to_string()), tx)) .unwrap(); let (width, height) = rx.recv().unwrap(); let auto_margins = msg.get(&"autoMargins".to_string()).unwrap().as_boolean().unwrap(); //TODO: the remaining layout properties (margin, border, padding, position) // as specified in getLayout in // http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/styles.js let msg = GetLayoutReply { width: width.round() as i32, height: height.round() as i32, autoMargins: if auto_margins { //TODO: real values like processMargins in // http://mxr.mozilla.org/mozilla-central/source/toolkit/devtools/server/actors/styles.js let mut m = BTreeMap::new(); m.insert("top".to_string(), "auto".to_string().to_json()); m.insert("bottom".to_string(), "auto".to_string().to_json()); m.insert("left".to_string(), "auto".to_string().to_json()); m.insert("right".to_string(), "auto".to_string().to_json()); Json::Object(m) } else { Json::Null }, from: self.name(), }; stream.write_json_packet(&msg); true } _ => false, }) } } impl Actor for InspectorActor { fn name(&self) -> String { self.name.clone() } fn handle_message(&self, registry: &ActorRegistry, msg_type: &str, _msg: &json::Object, stream: &mut TcpStream) -> Result<bool, ()> { Ok(match msg_type { "getWalker" => { if self.walker.borrow().is_none() { let walker = WalkerActor { name: registry.new_name("walker"), script_chan: self.script_chan.clone(), pipeline: self.pipeline, }; let mut walker_name = self.walker.borrow_mut(); *walker_name = Some(walker.name()); registry.register_later(box walker); } let (tx, rx) = channel(); self.script_chan.send(GetRootNode(self.pipeline, tx)).unwrap(); let root_info = rx.recv().unwrap(); let node = root_info.encode(registry, false, self.script_chan.clone(), self.pipeline); let msg = GetWalkerReply { from: self.name(), walker: WalkerMsg { actor: self.walker.borrow().clone().unwrap(), root: node, } }; stream.write_json_packet(&msg); true } "getPageStyle" => { if self.pageStyle.borrow().is_none() { let style = PageStyleActor { name: registry.new_name("pageStyle"), script_chan: self.script_chan.clone(), pipeline: self.pipeline, }; let mut pageStyle = self.pageStyle.borrow_mut(); *pageStyle = Some(style.name()); registry.register_later(box style); } let msg = GetPageStyleReply { from: self.name(), pageStyle: PageStyleMsg { actor: self.pageStyle.borrow().clone().unwrap(), }, }; stream.write_json_packet(&msg); true } //TODO: this is an old message; try adding highlightable to the root traits instead // and support getHighlighter instead //"highlight" => {} "getHighlighter" => { if self.highlighter.borrow().is_none() { let highlighter_actor = HighlighterActor { name: registry.new_name("highlighter"), }; let mut highlighter = self.highlighter.borrow_mut(); *highlighter = Some(highlighter_actor.name()); registry.register_later(box highlighter_actor); } let msg = GetHighlighterReply { from: self.name(), highligter: HighlighterMsg { actor: self.highlighter.borrow().clone().unwrap(), }, }; stream.write_json_packet(&msg); true } _ => false, }) } }<|fim▁end|>
}
<|file_name|>_instrumentedTheme.js<|end_file_name|><|fim▁begin|>import {normalize} from '../lib/themeUtils' const unused = new Set() const freeze = Object.freeze const createAccessors = (object, path = '') => { for (const key of Object.keys(object)) { const value = object[key] const keyPath = path ? `${path}.${key}` : key if (value && typeof value === 'object') { createAccessors(value, keyPath) } else if (typeof value === 'string') { unused.add(keyPath) Object.defineProperty(object, key, { get () { unused.delete(keyPath) return `%${keyPath}${value ? '#' + value : ''}%` } }) } } freeze.call(Object, object) } const theme = {} // normalize() caches the result, so this is just a cache key Object.freeze = obj => obj // Stub out so accessors can be created const normalized = normalize({theme}) createAccessors(normalized)<|fim▁hole|>export function checkThemeUsage (t) { t.deepEqual(unused, new Set(), 'All theme properties should be accessed at least once') }<|fim▁end|>
Object.freeze = freeze export default theme export {normalized as normalizedTheme}
<|file_name|>master.py<|end_file_name|><|fim▁begin|># coding=utf8 # Copyright © 2015-2017 Cask Data, Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); you may not # use this file except in compliance with the License. You may obtain a copy of # the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations under # the License. # import ambari_helpers as helpers from resource_management import * class Master(Script): def install(self, env): print('Install the CDAP Master') import params # Add repository file helpers.add_repo( params.files_dir + params.repo_file, params.os_repo_dir ) # Install any global packages self.install_packages(env) # Workaround for CDAP-3961 helpers.package('cdap-hbase-compat-1.1') # Install package helpers.package('cdap-master') self.configure(env) def start(self, env, upgrade_type=None): print('Start the CDAP Master') import params import status_params env.set_params(params) self.configure(env) helpers.create_hdfs_dir(params.hdfs_namespace, params.cdap_hdfs_user, 775) # Create user's HDFS home helpers.create_hdfs_dir('/user/' + params.cdap_user, params.cdap_user, 775) if params.cdap_hdfs_user != params.cdap_user: helpers.create_hdfs_dir('/user/' + params.cdap_hdfs_user, params.cdap_hdfs_user, 775) # Hack to work around CDAP-1967 self.remove_jackson(env) daemon_cmd = format('/opt/cdap/master/bin/cdap master start') no_op_test = format('ls {status_params.cdap_master_pid_file} >/dev/null 2>&1 && ps -p $(<{status_params.cdap_master_pid_file}) >/dev/null 2>&1') Execute( daemon_cmd, user=params.cdap_user, not_if=no_op_test ) def stop(self, env, upgrade_type=None): print('Stop the CDAP Master') import status_params daemon_cmd = format('service cdap-master stop') no_op_test = format('ls {status_params.cdap_master_pid_file} >/dev/null 2>&1 && ps -p $(<{status_params.cdap_master_pid_file}) >/dev/null 2>&1') Execute( daemon_cmd, only_if=no_op_test ) def status(self, env): import status_params check_process_status(status_params.cdap_master_pid_file) def configure(self, env): print('Configure the CDAP Master') import params env.set_params(params) helpers.cdap_config('master') def upgrade(self, env): self.run_class( env, classname='io.cdap.cdap.data.tools.UpgradeTool', label='CDAP Upgrade Tool', arguments='upgrade force' ) def upgrade_hbase(self, env): self.run_class( env, classname='io.cdap.cdap.data.tools.UpgradeTool', label='CDAP HBase Coprocessor Upgrade Tool', arguments='upgrade_hbase force' ) def postupgrade(self, env): self.run_class( env, classname='io.cdap.cdap.data.tools.flow.FlowQueuePendingCorrector', label='CDAP Post-Upgrade Tool' ) def queue_debugger(self, env): self.run_class( env, classname='io.cdap.cdap.data.tools.SimpleHBaseQueueDebugger', label='CDAP Queue Debugger Tool' ) def jobqueue_debugger(self, env): self.run_class( env, classname='io.cdap.cdap.data.tools.JobQueueDebugger', label='CDAP Job Queue Debugger Tool' ) def run_class(self, env, classname, label=None, arguments=''): if label is None: label = classname<|fim▁hole|> import params cmd = format("/opt/cdap/master/bin/cdap run %s %s" % (classname, arguments)) Execute( cmd, user=params.cdap_user ) def remove_jackson(self, env): jackson_check = format('ls -1 /opt/cdap/master/lib/org.codehaus.jackson* 2>/dev/null') Execute( 'rm -f /opt/cdap/master/lib/org.codehaus.jackson.jackson-*', not_if=jackson_check ) if __name__ == "__main__": Master().execute()<|fim▁end|>
print('Running: ' + label)
<|file_name|>index.js<|end_file_name|><|fim▁begin|>'use strict'; Object.defineProperty(exports, "__esModule", { value: true }); var _index = require('../../isSameWeek/index.js'); var _index2 = _interopRequireDefault(_index); var _index3 = require('../_lib/convertToFP/index.js'); var _index4 = _interopRequireDefault(_index3); function _interopRequireDefault(obj) { return obj && obj.__esModule ? obj : { default: obj }; } // This file is generated automatically by `scripts/build/fp.js`. Please, don't change it. var isSameWeekWithOptions = (0, _index4.default)(_index2.default, 3);<|fim▁hole|>exports.default = isSameWeekWithOptions; module.exports = exports['default'];<|fim▁end|>
<|file_name|>model.py<|end_file_name|><|fim▁begin|># Copyright (c) 2016 Nokia, Inc. # All Rights Reserved # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import collections class ChangeData(object): def __init__(self): self.new = dict() self.prev = dict() def __str__(self): '''returns simple dict representation of the mapping''' return "new = " + str(self.new) + ", prev = " + str(self.prev) class ObjBase(collections.MutableMapping): ''' Mapping that works like both a dict and a mutable object, i.e. d = ObjBase(foo='bar') and d.foo returns 'bar' ''' # ``__init__`` method required to create instance from class. def __init__(self, attributes=None): '''Use the object dict''' if (attributes is not None): self.__dict__.update(attributes) # The next five methods are requirements of the ABC. def __setitem__(self, key, value): self.__dict__[key] = value def __getitem__(self, key): return self.__dict__[key] def __delitem__(self, key): del self.__dict__[key] def __iter__(self): return iter(self.__dict__) def __len__(self): return len(self.__dict__) def __str__(self): '''returns simple dict representation of the mapping''' return str(self.__dict__) def __repr__(self):<|fim▁hole|> def update_attrs(self, new_attributes): changes = ChangeData() for key in new_attributes: if key in self.__dict__: if new_attributes[key] != self.__dict__[key]: changes.prev[key] = self.__dict__[key] self.__dict__[key] = new_attributes[key] changes.new[key] = new_attributes[key] else: self.__dict__[key] = new_attributes[key] changes.new[key] = new_attributes[key] return changes class Port(ObjBase): def __init__(self, id, attributes=None): super(self.__class__, self).__init__(attributes) self.__dict__["__id"] = id self.__dict__["__state"] = "Unbound" # "Unbound", "Bound", "InUse" class DataObj(ObjBase): def __init__(self, id, attributes=None): super(self.__class__, self).__init__(attributes) self.__dict__["__id"] = id class Model(object): def __init__(self): self.ports = dict() # Port objects<|fim▁end|>
'''echoes class, id, & reproducible representation in the REPL''' return '{}, {}'.format(super(ObjBase, self).__repr__(), self.__dict__)
<|file_name|>about_iteration.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # -*- coding: utf-8 -*- from runner.koan import * class AboutIteration(Koan): def test_iterators_are_a_type(self): it = iter(range(1,6)) total = 0 for num in it: total += num self.assertEqual(15 , total) def test_iterating_with_next(self): stages = iter(['alpha','beta','gamma']) try: self.assertEqual('alpha', next(stages)) next(stages) self.assertEqual('gamma', next(stages)) next(stages) except StopIteration as ex: err_msg = 'Ran out of iterations' self.assertRegex(err_msg, 'Ran out') # ------------------------------------------------------------------ def add_ten(self, item): return item + 10 def test_map_transforms_elements_of_a_list(self): seq = [1, 2, 3] mapped_seq = list() mapping = map(self.add_ten, seq) self.assertNotEqual(list, mapping.__class__) self.assertEqual(map, mapping.__class__) # In Python 3 built in iterator funcs return iterable view objects # instead of lists for item in mapping: mapped_seq.append(item) self.assertEqual([11, 12, 13], mapped_seq) # Note, iterator methods actually return objects of iter type in # python 3. In python 2 map() would give you a list. def test_filter_selects_certain_items_from_a_list(self): def is_even(item): return (item % 2) == 0 seq = [1, 2, 3, 4, 5, 6] even_numbers = list() for item in filter(is_even, seq): even_numbers.append(item) self.assertEqual([2,4,6], even_numbers) def test_just_return_first_item_found(self): def is_big_name(item): return len(item) > 4 names = ["Jim", "Bill", "Clarence", "Doug", "Eli"] name = None <|fim▁hole|> msg = 'Ran out of big names' self.assertEqual("Clarence", name) # ------------------------------------------------------------------ def add(self,accum,item): return accum + item def multiply(self,accum,item): return accum * item def test_reduce_will_blow_your_mind(self): import functools # As of Python 3 reduce() has been demoted from a builtin function # to the functools module. result = functools.reduce(self.add, [2, 3, 4]) self.assertEqual(int, result.__class__) # Reduce() syntax is same as Python 2 self.assertEqual(9, result) result2 = functools.reduce(self.multiply, [2, 3, 4], 1) self.assertEqual(24, result2) # Extra Credit: # Describe in your own words what reduce does. # ------------------------------------------------------------------ def test_use_pass_for_iterations_with_no_body(self): for num in range(1,5): pass self.assertEqual(4, num) # ------------------------------------------------------------------ def test_all_iteration_methods_work_on_any_sequence_not_just_lists(self): # Ranges are an iterable sequence result = map(self.add_ten, range(1,4)) self.assertEqual([11, 12, 13], list(result)) try: file = open("example_file.txt") try: def make_upcase(line): return line.strip().upper() upcase_lines = map(make_upcase, file.readlines()) self.assertEqual(["THIS", "IS", "A", "TEST"] , list(upcase_lines)) finally: # Arg, this is ugly. # We will figure out how to fix this later. file.close() except IOError: # should never happen self.fail()<|fim▁end|>
iterator = filter(is_big_name, names) try: name = next(iterator) except StopIteration:
<|file_name|>globals.py<|end_file_name|><|fim▁begin|>""" bamboo.globals ~~~~~~~~~~~~~ """ from peak.util.proxies import CallbackProxy from bamboo.context import context<|fim▁hole|><|fim▁end|>
db = CallbackProxy(lambda: context["db"])
<|file_name|>ps_parser.py<|end_file_name|><|fim▁begin|>from .utils import ShellParser class Parser(ShellParser): """Extract text from postscript files using ps2ascii command. """<|fim▁hole|><|fim▁end|>
def extract(self, filename, **kwargs): stdout, _ = self.run(['ps2ascii', filename]) return stdout
<|file_name|>3734300868bc_add_account_id.py<|end_file_name|><|fim▁begin|>"""add account id Revision ID: 3734300868bc Revises: 3772e5bcb34d Create Date: 2013-09-30 18:07:21.729288 """ # revision identifiers, used by Alembic. revision = '3734300868bc' down_revision = '3772e5bcb34d' from alembic import op import sqlalchemy as sa def upgrade(): op.add_column('account_profile', sa.Column('account_id', sa.Integer(11))) pass <|fim▁hole|>def downgrade(): pass<|fim▁end|>
<|file_name|>shell_plus.py<|end_file_name|><|fim▁begin|>import os import sys import time import traceback from optparse import make_option import six from django.conf import settings from django.core.management.base import NoArgsCommand from django_extensions.management.shells import import_objects from django_extensions.management.utils import signalcommand class Command(NoArgsCommand): def use_vi_mode(): editor = os.environ.get('EDITOR') if not editor: return False editor = os.path.basename(editor) return editor.startswith('vi') or editor.endswith('vim') option_list = NoArgsCommand.option_list + ( make_option('--plain', action='store_true', dest='plain', help='Tells Django to use plain Python, not BPython nor IPython.'), make_option('--bpython', action='store_true', dest='bpython', help='Tells Django to use BPython, not IPython.'), make_option('--ptpython', action='store_true', dest='ptpython', help='Tells Django to use PTPython, not IPython.'), make_option('--ptipython', action='store_true', dest='ptipython', help='Tells Django to use PT-IPython, not IPython.'), make_option('--ipython', action='store_true', dest='ipython', help='Tells Django to use IPython, not BPython.'), make_option('--notebook', action='store_true', dest='notebook', help='Tells Django to use IPython Notebook.'), make_option('--kernel', action='store_true', dest='kernel', help='Tells Django to start an IPython Kernel.'), make_option('--use-pythonrc', action='store_true', dest='use_pythonrc', help='Tells Django to execute PYTHONSTARTUP file (BE CAREFULL WITH THIS!)'), make_option('--print-sql', action='store_true', default=False, help="Print SQL queries as they're executed"), make_option('--dont-load', action='append', dest='dont_load', default=[], help='Ignore autoloading of some apps/models. Can be used several times.'), make_option('--quiet-load', action='store_true', default=False, dest='quiet_load', help='Do not display loaded models messages'), make_option('--vi', action='store_true', default=use_vi_mode(), dest='vi_mode', help='Load Vi key bindings (for --ptpython and --ptipython)'), make_option('--no-browser', action='store_true', default=False, dest='no_browser', help='Don\'t open the notebook in a browser after startup.'), ) help = "Like the 'shell' command but autoloads the models of all installed Django apps." @signalcommand def handle_noargs(self, **options): use_kernel = options.get('kernel', False) use_notebook = options.get('notebook', False) use_ipython = options.get('ipython', False) use_bpython = options.get('bpython', False) use_plain = options.get('plain', False) use_ptpython = options.get('ptpython', False) use_ptipython = options.get('ptipython', False) use_pythonrc = options.get('use_pythonrc', True) no_browser = options.get('no_browser', False) verbosity = int(options.get('verbosity', 1)) if options.get("print_sql", False): # Code from http://gist.github.com/118990 try: # Django 1.7 onwards from django.db.backends import utils except ImportError: # Django 1.6 and below from django.db.backends import util as utils sqlparse = None try: import sqlparse except ImportError: pass class PrintQueryWrapper(utils.CursorDebugWrapper): def execute(self, sql, params=()): starttime = time.time() try: return self.cursor.execute(sql, params) finally: execution_time = time.time() - starttime raw_sql = self.db.ops.last_executed_query(self.cursor, sql, params) if sqlparse: print(sqlparse.format(raw_sql, reindent=True)) else: print(raw_sql) print("") print('Execution time: %.6fs [Database: %s]' % (execution_time, self.db.alias)) print("") utils.CursorDebugWrapper = PrintQueryWrapper def get_kernel(): try: from IPython import release if release.version_info[0] < 2: print(self.style.ERROR("--kernel requires at least IPython version 2.0")) return from IPython import embed_kernel except ImportError: return traceback.format_exc() def run_kernel(): imported_objects = import_objects(options, self.style) embed_kernel(local_ns=imported_objects) return run_kernel def get_notebook(): from IPython import release try: from IPython.html.notebookapp import NotebookApp except ImportError: if release.version_info[0] >= 3: raise try: from IPython.frontend.html.notebook import notebookapp NotebookApp = notebookapp.NotebookApp except ImportError: return traceback.format_exc() def install_kernel_spec(app, display_name, ipython_arguments): """install an IPython >= 3.0 kernelspec that loads django extensions""" ksm = app.kernel_spec_manager ks = ksm.get_kernel_spec('python') ks.argv.extend(ipython_arguments) ks.display_name = display_name manage_py_dir, manage_py = os.path.split(os.path.realpath(sys.argv[0])) if manage_py == 'manage.py' and os.path.isdir(manage_py_dir) and manage_py_dir != os.getcwd(): pythonpath = ks.env.get('PYTHONPATH', os.environ.get('PYTHONPATH', '')) pythonpath = pythonpath.split(':') if manage_py_dir not in pythonpath: pythonpath.append(manage_py_dir) ks.env['PYTHONPATH'] = ':'.join(filter(None, pythonpath)) kernel_dir = os.path.join(ksm.user_kernel_dir, 'django_extensions') if not os.path.exists(kernel_dir): os.makedirs(kernel_dir) with open(os.path.join(kernel_dir, 'kernel.json'), 'w') as f: f.write(ks.to_json()) def run_notebook(): app = NotebookApp.instance() # Treat IPYTHON_ARGUMENTS from settings ipython_arguments = getattr(settings, 'IPYTHON_ARGUMENTS', []) if 'django_extensions.management.notebook_extension' not in ipython_arguments: ipython_arguments.extend(['--ext', 'django_extensions.management.notebook_extension']) # Treat NOTEBOOK_ARGUMENTS from settings notebook_arguments = getattr(settings, 'NOTEBOOK_ARGUMENTS', []) if no_browser and '--no-browser' not in notebook_arguments: notebook_arguments.append('--no-browser') if '--notebook-dir' not in notebook_arguments: notebook_arguments.extend(['--notebook-dir', '.']) # IPython < 3 passes through kernel args from notebook CLI if release.version_info[0] < 3: notebook_arguments.extend(ipython_arguments) app.initialize(notebook_arguments) # IPython >= 3 uses kernelspecs to specify kernel CLI args if release.version_info[0] >= 3: display_name = getattr(settings, 'IPYTHON_KERNEL_DISPLAY_NAME', "Django Shell-Plus") install_kernel_spec(app, display_name, ipython_arguments) app.start() return run_notebook def get_plain(): # Using normal Python shell import code imported_objects = import_objects(options, self.style) try: # Try activating rlcompleter, because it's handy. import readline except ImportError: pass else: # We don't have to wrap the following import in a 'try', because # we already know 'readline' was imported successfully. import rlcompleter readline.set_completer(rlcompleter.Completer(imported_objects).complete) readline.parse_and_bind("tab:complete") # We want to honor both $PYTHONSTARTUP and .pythonrc.py, so follow system # conventions and get $PYTHONSTARTUP first then import user. if use_pythonrc: pythonrc = os.environ.get("PYTHONSTARTUP") if pythonrc and os.path.isfile(pythonrc): global_ns = {} with open(pythonrc) as rcfile: try: six.exec_(compile(rcfile.read(), pythonrc, 'exec'), global_ns) imported_objects.update(global_ns) except NameError: pass # This will import .pythonrc.py as a side-effect try: import user # NOQA except ImportError: pass def run_plain(): code.interact(local=imported_objects) return run_plain def get_bpython(): try: from bpython import embed except ImportError: return traceback.format_exc() def run_bpython(): imported_objects = import_objects(options, self.style) embed(imported_objects) return run_bpython def get_ipython(): try: from IPython import start_ipython def run_ipython(): imported_objects = import_objects(options, self.style) ipython_arguments = getattr(settings, 'IPYTHON_ARGUMENTS', []) start_ipython(argv=ipython_arguments, user_ns=imported_objects) return run_ipython except ImportError: str_exc = traceback.format_exc() # IPython < 0.11 # Explicitly pass an empty list as arguments, because otherwise # IPython would use sys.argv from this script. # Notebook not supported for IPython < 0.11. try: from IPython.Shell import IPShell except ImportError: return str_exc + "\n" + traceback.format_exc() def run_ipython(): imported_objects = import_objects(options, self.style) shell = IPShell(argv=[], user_ns=imported_objects) shell.mainloop() return run_ipython def get_ptpython(): try: from ptpython.repl import embed, run_config except ImportError: tb = traceback.format_exc() try: # prompt_toolkit < v0.27 from prompt_toolkit.contrib.repl import embed, run_config except ImportError: return tb def run_ptpython(): imported_objects = import_objects(options, self.style) history_filename = os.path.expanduser('~/.ptpython_history') embed(globals=imported_objects, history_filename=history_filename, vi_mode=options.get('vi_mode', False), configure=run_config) return run_ptpython def get_ptipython(): try: from ptpython.repl import run_config from ptpython.ipython import embed except ImportError: tb = traceback.format_exc() try: # prompt_toolkit < v0.27 from prompt_toolkit.contrib.repl import run_config from prompt_toolkit.contrib.ipython import embed except ImportError: return tb def run_ptipython(): imported_objects = import_objects(options, self.style) history_filename = os.path.expanduser('~/.ptpython_history') embed(user_ns=imported_objects, history_filename=history_filename, vi_mode=options.get('vi_mode', False), configure=run_config) return run_ptipython def set_application_name(): """Set the application_name on PostgreSQL connection Use the fallback_application_name to let the user override it with PGAPPNAME env variable http://www.postgresql.org/docs/9.4/static/libpq-connect.html#LIBPQ-PARAMKEYWORDS # noqa """ supported_backends = ['django.db.backends.postgresql_psycopg2'] opt_name = 'fallback_application_name' default_app_name = 'django_shell' app_name = default_app_name dbs = getattr(settings, 'DATABASES', []) # lookup over all the databases entry for db in dbs.keys(): if dbs[db]['ENGINE'] in supported_backends: try: options = dbs[db]['OPTIONS'] except KeyError: options = {} # dot not override a defined value if opt_name in options.keys(): app_name = dbs[db]['OPTIONS'][opt_name] else: dbs[db]['OPTIONS'].update({opt_name: default_app_name}) app_name = default_app_name return app_name shells = ( ('ptipython', get_ptipython), ('ptpython', get_ptpython), ('bpython', get_bpython), ('ipython', get_ipython), ('plain', get_plain), ) SETTINGS_SHELL_PLUS = getattr(settings, 'SHELL_PLUS', None) shell = None shell_name = "any" set_application_name() if use_kernel: shell = get_kernel() shell_name = "IPython Kernel" elif use_notebook: shell = get_notebook() shell_name = "IPython Notebook" elif use_plain: shell = get_plain() shell_name = "plain" elif use_ipython: shell = get_ipython() shell_name = "IPython" elif use_bpython: shell = get_bpython() shell_name = "BPython" elif use_ptpython: shell = get_ptpython() shell_name = "ptpython" elif use_ptipython: shell = get_ptipython() shell_name = "ptipython" elif SETTINGS_SHELL_PLUS: shell_name = SETTINGS_SHELL_PLUS shell = dict(shells)[shell_name]() else: for shell_name, func in shells: shell = func() if callable(shell): if verbosity > 1: print(self.style.NOTICE("Using shell %s." % shell_name)) break if not callable(shell): if shell: print(shell) print(self.style.ERROR("Could not load %s interactive Python environment." % shell_name))<|fim▁hole|><|fim▁end|>
return shell()
<|file_name|>sched_score.cpp<|end_file_name|><|fim▁begin|>// This file is part of BOINC. // http://boinc.berkeley.edu // Copyright (C) 2008 University of California // // BOINC is free software; you can redistribute it and/or modify it // under the terms of the GNU Lesser General Public License // as published by the Free Software Foundation, // either version 3 of the License, or (at your option) any later version. // // BOINC is distributed in the hope that it will be useful, // but WITHOUT ANY WARRANTY; without even the implied warranty of // MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. // See the GNU Lesser General Public License for more details. // // You should have received a copy of the GNU Lesser General Public License // along with BOINC. If not, see <http://www.gnu.org/licenses/>. // job dispatch using a score-based approach: // - scan the job array, assigning a score to each job and building a list // (the score reflect a variety of factors). // - sort the list // - send jobs in order of decreasing score until request is satisfied // - do the above separately for each resource type #include <algorithm> #include "boinc_db.h" #include "error_numbers.h" #include "util.h" #include "sched_check.h" #include "sched_config.h" #include "sched_hr.h" #include "sched_main.h" #include "sched_msgs.h" #include "sched_send.h" #include "sched_shmem.h" #include "sched_types.h" #include "sched_version.h" #include "sched_score.h" // given the host's estimated speed, determine its size class // static int get_size_class(APP& app, double es) { for (int i=0; i<app.n_size_classes-1; i++) { if (es < app.size_class_quantiles[i]) return i; } return app.n_size_classes - 1; } // Assign a score to this job, // representing the value of sending the job to this host. // Also do some initial screening, // and return false if can't send the job to host // bool JOB::get_score(WU_RESULT& wu_result) { score = 0; if (!app->beta && wu_result.need_reliable) { if (!bavp->reliable) { return false; } } if (app->beta) { if (g_wreq->allow_beta_work) { score += 1; } else { if (config.debug_send_job) { log_messages.printf(MSG_NORMAL, "[send_job] can't send job %d for beta app to non-beta user\n", wu_result.workunit.id ); } return false; } } if (app_not_selected(app->id)) { if (g_wreq->allow_non_preferred_apps) { score -= 1; } else { if (config.debug_send_job) { log_messages.printf(MSG_NORMAL, "[send_job] app not selected for job %d\n", wu_result.workunit.id ); } return false; } } if (wu_result.infeasible_count) { score += 1; } if (app->locality_scheduling == LOCALITY_SCHED_LITE && g_request->file_infos.size() ) { int n = nfiles_on_host(wu_result.workunit); if (config.debug_locality_lite) { log_messages.printf(MSG_NORMAL, "[loc_lite] job %s has %d files on this host\n", wu_result.workunit.name, n ); } if (n > 0) { score += 10; } } if (app->n_size_classes > 1) { double effective_speed = bavp->host_usage.projected_flops * available_frac(*bavp); int target_size = get_size_class(*app, effective_speed); if (config.debug_send_job) { log_messages.printf(MSG_NORMAL, "[send_job] size: host %d job %d speed %f\n", target_size, wu_result.workunit.size_class, effective_speed ); } if (target_size == wu_result.workunit.size_class) { score += 5; } else if (target_size < wu_result.workunit.size_class) { score -= 2; } else { score -= 1; } } if (config.debug_send_job) { log_messages.printf(MSG_NORMAL, "[send_job]: score %f for result %d\n", score, wu_result.resultid ); } return true; } bool job_compare(JOB j1, JOB j2) { return (j1.score > j2.score); } static double req_sec_save[NPROC_TYPES]; static double req_inst_save[NPROC_TYPES]; static void clear_others(int rt) { for (int i=0; i<NPROC_TYPES; i++) { if (i == rt) continue; req_sec_save[i] = g_wreq->req_secs[i]; g_wreq->req_secs[i] = 0; req_inst_save[i] = g_wreq->req_instances[i]; g_wreq->req_instances[i] = 0; } } static void restore_others(int rt) { for (int i=0; i<NPROC_TYPES; i++) { if (i == rt) continue; g_wreq->req_secs[i] += req_sec_save[i]; g_wreq->req_instances[i] += req_inst_save[i]; } } // send work for a particular processor type // void send_work_score_type(int rt) { vector<JOB> jobs; if (config.debug_send_scan) { log_messages.printf(MSG_NORMAL, "[send_scan] scanning for %s jobs\n", proc_type_name(rt) ); } clear_others(rt); int nscan = ssp->max_wu_results; int rnd_off = rand() % ssp->max_wu_results; if (config.debug_send_scan) { log_messages.printf(MSG_NORMAL, "[send_scan] scanning %d slots starting at %d\n", nscan, rnd_off ); } for (int j=0; j<nscan; j++) { int i = (j+rnd_off) % ssp->max_wu_results; WU_RESULT& wu_result = ssp->wu_results[i]; if (wu_result.state != WR_STATE_PRESENT && wu_result.state != g_pid) { continue; } WORKUNIT wu = wu_result.workunit; JOB job; job.app = ssp->lookup_app(wu.appid); if (job.app->non_cpu_intensive) { if (config.debug_send_job) { log_messages.printf(MSG_NORMAL, "[send_job] [RESULT#%u] app is non compute intensive\n", wu_result.resultid ); } continue; } job.bavp = get_app_version(wu, true, false); if (!job.bavp) { if (config.debug_send_job) { log_messages.printf(MSG_NORMAL, "[send_job] [RESULT#%u] no app version available\n", wu_result.resultid ); } continue; } job.index = i; job.result_id = wu_result.resultid; if (!job.get_score(wu_result)) { if (config.debug_send_job) { log_messages.printf(MSG_NORMAL, "[send_job] [RESULT#%u] get_score() returned false\n", wu_result.resultid ); } continue; } if (config.debug_send_job) { log_messages.printf(MSG_NORMAL, "[send_job] [RESULT#%u] score: %f\n", wu_result.resultid, job.score ); } jobs.push_back(job); } std::sort(jobs.begin(), jobs.end(), job_compare); bool sema_locked = false; for (unsigned int i=0; i<jobs.size(); i++) { // check limit on total jobs // if (!work_needed(false)) { break; } // check limit on jobs for this processor type // if (!g_wreq->need_proc_type(rt)) { break; } JOB& job = jobs[i]; // check limits on jobs for this (app, processor type) // if (config.max_jobs_in_progress.exceeded(job.app, job.bavp->host_usage.proc_type)) { if (config.debug_quota) { log_messages.printf(MSG_NORMAL, "[quota] limit for app/proctype exceeded\n" ); } continue; } if (!sema_locked) { lock_sema(); sema_locked = true; } // make sure the job is still in the cache // array is locked at this point. // WU_RESULT& wu_result = ssp->wu_results[job.index]; if (wu_result.state != WR_STATE_PRESENT && wu_result.state != g_pid) { continue; } if (wu_result.resultid != job.result_id) { continue; } WORKUNIT wu = wu_result.workunit; int retval = wu_is_infeasible_fast( wu, wu_result.res_server_state, wu_result.res_priority, wu_result.res_report_deadline, *job.app, *job.bavp ); if (retval) { continue; } wu_result.state = g_pid; // It passed fast checks. // Release sema and do slow checks // unlock_sema(); sema_locked = false;<|fim▁hole|> switch (slow_check(wu_result, job.app, job.bavp)) { case 1: wu_result.state = WR_STATE_PRESENT; break; case 2: wu_result.state = WR_STATE_EMPTY; break; default: // slow_check() refreshes fields of wu_result.workunit; // update our copy too // wu.hr_class = wu_result.workunit.hr_class; wu.app_version_id = wu_result.workunit.app_version_id; // mark slot as empty AFTER we've copied out of it // (since otherwise feeder might overwrite it) // wu_result.state = WR_STATE_EMPTY; // reread result from DB, make sure it's still unsent // TODO: from here to end of add_result_to_reply() // (which updates the DB record) should be a transaction // SCHED_DB_RESULT result; result.id = wu_result.resultid; if (result_still_sendable(result, wu)) { add_result_to_reply(result, wu, job.bavp, false); // add_result_to_reply() fails only in pathological cases - // e.g. we couldn't update the DB record or modify XML fields. // If this happens, don't replace the record in the array // (we can't anyway, since we marked the entry as "empty"). // The feeder will eventually pick it up again, // and hopefully the problem won't happen twice. } break; } } if (sema_locked) { unlock_sema(); } restore_others(rt); g_wreq->best_app_versions.clear(); } void send_work_score() { for (int i=NPROC_TYPES-1; i>= 0; i--) { if (g_wreq->need_proc_type(i)) { send_work_score_type(i); } } }<|fim▁end|>
<|file_name|>io.py<|end_file_name|><|fim▁begin|>"""Some classes to support import of data files """ import os, glob import numpy import time try: import ConfigParser as configparser #gets rename to lowercase in python 3 except: import configparser class _BaseDataFile(object): """ """ def __init__(self, filepath): """ """ self.filepath = filepath self.info = self._loadHeader() self.data = self._loadData() def _findFile(self, ending='', orSimilar=False): """Finds files using the base filename and the optional `ending` param (used to separate data from header) If orSimilar==True then this function will first search for the exact file and then for any file of the appropriate format in that folder. (For a header file that can be useful, just to retrieve the <|fim▁hole|> """ #fetch all header/data files matching path searchPattern = self.filepath+'*'+ending filenames = glob.glob(searchPattern) #check if we have exactly one matching file filename=None if len(filenames)==0 and orSimilar: folder = os.path.split(os.path.abspath(self.filepath))[0] print('No exact match found for\n\t %s' %(searchPattern)) searchPattern = os.path.join(folder, '*'+ending) print('...searching instead for\n\t %s' %(searchPattern)) filenames = glob.glob(searchPattern) if len(filenames)==0: print('No file found: %s' %(searchPattern)) elif len(filenames)>1: print('Multiple files found') else: filename = filenames[0] return filename class DBPA_file(_BaseDataFile): """ DBPA amplifiers are made by Sensorium. Excellent signal to noise on the amp, with a very basic file format - a 5-line ASCII header file (config-style file) and a binary data file. Example usage: datFile = io.DBPA_file('jwp_2013_18_02') #don't include file extension print datFile.info #print the header info (samples, seconds etc) {'channels': 122, 'duration': 761, 'rate': 1000, 'samples': 761000} print datFile.data.shape (122, 761000) #channels, samples """ def _loadHeader(self): """Load info from a header file ('*.h.txt') """ filename = self._findFile(ending='h.txt', orSimilar=True) if not filename: print('No header file') #this header file looks like a config file with a single section cfg = configparser.ConfigParser() hdr = {} f = open(filename) cfg.readfp(f) #operates in place (doesn't return anything) f.close() hdr['channels'] = cfg.items('File Information') for name, val in cfg.items('File Information'): #reads entries in File Info section as a list of tuples if name.lower()=='number of channels': hdr['channels']=int(val.replace('"', '')) # convert '"200"' to 200 elif name.lower()=='samples per second': hdr['rate']=int(val.replace('"', '')) # convert '"200"' to 200 return hdr def _loadData(self): """ :param offset: the sample number to start reading from """ data = [] filename = self._findFile(ending='dat') fileSize = os.stat(filename).st_size self.info['duration'] = int(fileSize/self.info['rate']/self.info['channels']/4) #4 bytes per sample self.info['samples'] = self.info['duration']*self.info['rate'] if not filename: print('No data file') fileSize = os.stat(filename).st_size data = numpy.fromfile(filename, dtype='>f')# data are big-endian float32 data = data.reshape([self.info['samples'],self.info['channels']]) data = data.transpose() # to get (channels, time) return data<|fim▁end|>
<|file_name|>test_system_wrappers.py<|end_file_name|><|fim▁begin|>"""Проверки модуля system_wrappers.""" from logging import INFO from unittest import TestCase from unittest.mock import Mock, call, patch from codestyle import system_wrappers from codestyle.system_wrappers import ( ExitCodes, check_output, interrupt_program_flow, ) class Test(TestCase): """Проверка функций модуля.""" @patch('codestyle.system_wrappers.sys', new_callable=Mock) @patch.object(system_wrappers, '_logger', new_callable=Mock) def test_interrupt_program_flow( self, mocked_logger: Mock, mocked_sys: Mock ): """Проверка interrupt_program_flow.""" mock_log = Mock() mocked_logger.log = mock_log mock_exit = Mock() mocked_sys.exit = mock_exit interrupt_program_flow(log_message='Проверка вызова функции.') self.assertEqual(True, mock_log.called) self.assertEqual(1, mock_log.call_count) args, kwargs = mock_log.call_args self.assertTupleEqual((INFO, 'Проверка вызова функции.'), args) self.assertDictEqual({}, kwargs) self.assertEqual(True, mock_exit.called) self.assertEqual(1, mock_exit.call_count) args, kwargs = mock_exit.call_args self.assertTupleEqual((ExitCodes.SUCCESS,), args) self.assertDictEqual({}, kwargs) @patch('codestyle.system_wrappers.check_process_output', new_callable=Mock) @patch.object(system_wrappers, '_logger', new_callable=Mock) def test_check_output( self, mocked_logger: Mock, mocked_process_output_checker: Mock ): """Проверка check_output.""" mock_debug = Mock() mocked_logger.debug = mock_debug mock_rstrip = Mock() mock_decode = Mock(return_value=Mock(rstrip=mock_rstrip)) mocked_process_output_checker.return_value = Mock(decode=mock_decode) check_output(('application', 'run')) self.assertEqual(True, mock_debug.called) self.assertEqual(1, mock_debug.call_count) args, kwargs = mock_debug.call_args self.assertTupleEqual( ('Проверка наличия application в системе...',), args ) self.assertDictEqual({}, kwargs) self.assertEqual(True, mocked_process_output_checker.called) self.assertEqual(1, mocked_process_output_checker.call_count) args, kwargs = mocked_process_output_checker.call_args self.assertTupleEqual((('application', 'run'),), args) self.assertDictEqual({'timeout': 10}, kwargs) self.assertEqual(True, mock_decode.called) self.assertEqual(1, mock_decode.call_count) args, kwargs = mock_decode.call_args self.assertTupleEqual((), args) self.assertDictEqual({}, kwargs) self.assertEqual(True, mock_rstrip.called) self.assertEqual(1, mock_rstrip.call_count) args, kwargs = mock_rstrip.call_args<|fim▁hole|> 'codestyle.system_wrappers.interrupt_program_flow', new_callable=Mock ) @patch('codestyle.system_wrappers.check_process_output', new_callable=Mock) @patch.object(system_wrappers, '_logger', new_callable=Mock) def test_check_output_with_error( self, mocked_logger: Mock, mocked_process_output_checker: Mock, mocked_interrupt_program_flow: Mock, ): """Проверка check_output с ошибкой внутри.""" mock_debug = Mock() mock_warning = Mock() mocked_logger.debug = mock_debug mocked_logger.warning = mock_warning mocked_process_output_checker.side_effect = FileNotFoundError( 'Исполняемый файл application не найден.' ) check_output(('application', 'run')) self.assertEqual(True, mock_debug.called) self.assertEqual(2, mock_debug.call_count) self.assertEqual(1, mock_warning.call_count) self.assertIn( call('Проверка наличия application в системе...'), mock_debug.mock_calls, ) self.assertIn( call('Инструмент application не найден.'), mock_warning.mock_calls ) self.assertIn( call('Исполняемый файл application не найден.'), mock_debug.mock_calls, ) self.assertEqual(True, mocked_interrupt_program_flow.called) self.assertEqual(1, mocked_interrupt_program_flow.call_count) args, kwargs = mocked_interrupt_program_flow.call_args self.assertTupleEqual((ExitCodes.UNSUCCESSFUL,), args) self.assertDictEqual({}, kwargs)<|fim▁end|>
self.assertTupleEqual((), args) self.assertDictEqual({}, kwargs) @patch(
<|file_name|>mod.rs<|end_file_name|><|fim▁begin|>use alloc::boxed::Box; use arch::intex::Intex; use collections::string::{String, ToString}; use collections::vec::Vec; use common::event::Event; use common::time::Duration; use arch::context::ContextManager; use fs::{KScheme, Resource, Scheme, VecResource, Url}; use sync::WaitQueue; use system::error::{Error, Result, ENOENT, EEXIST}; use system::syscall::O_CREAT; use self::console::Console; /// The Kernel Console pub mod console; /// The kernel environment pub struct Environment { /// Contexts pub contexts: Intex<ContextManager>, /// Clock realtime (default) pub clock_realtime: Intex<Duration>, /// Monotonic clock pub clock_monotonic: Intex<Duration>, /// Default console pub console: Intex<Console>, /// Pending events pub events: WaitQueue<Event>, /// Schemes pub schemes: Intex<Vec<Box<KScheme>>>, /// Interrupt stats pub interrupts: Intex<[u64; 256]>, } impl Environment { pub fn new() -> Box<Environment> { box Environment { contexts: Intex::new(ContextManager::new()), clock_realtime: Intex::new(Duration::new(0, 0)), clock_monotonic: Intex::new(Duration::new(0, 0)), console: Intex::new(Console::new()), events: WaitQueue::new(), schemes: Intex::new(Vec::new()), interrupts: Intex::new([0; 256]), } } pub fn on_irq(&self, irq: u8) { for mut scheme in self.schemes.lock().iter_mut() { scheme.on_irq(irq); } } /// Open a new resource pub fn open(&self, url: Url, flags: usize) -> Result<Box<Resource>> { let url_scheme = url.scheme(); if url_scheme.is_empty() { let url_path = url.reference(); if url_path.trim_matches('/').is_empty() { let mut list = String::new(); for scheme in self.schemes.lock().iter() { let scheme_str = scheme.scheme(); if !scheme_str.is_empty() { if !list.is_empty() { list = list + "\n" + scheme_str; } else { list = scheme_str.to_string(); } } } Ok(box VecResource::new(":".to_string(), list.into_bytes())) } else if flags & O_CREAT == O_CREAT { for scheme in self.schemes.lock().iter_mut() { if scheme.scheme() == url_path { return Err(Error::new(EEXIST)); } } match Scheme::new(url_path) { Ok((scheme, server)) => { self.schemes.lock().push(scheme); Ok(server) }, Err(err) => Err(err) } } else { Err(Error::new(ENOENT)) } } else { for mut scheme in self.schemes.lock().iter_mut() { if scheme.scheme() == url_scheme { return scheme.open(url, flags); } } Err(Error::new(ENOENT)) } } /// Makes a directory pub fn mkdir(&self, url: Url, flags: usize) -> Result<()> { let url_scheme = url.scheme(); if !url_scheme.is_empty() { for mut scheme in self.schemes.lock().iter_mut() { if scheme.scheme() == url_scheme { return scheme.mkdir(url, flags); } } } Err(Error::new(ENOENT)) } /// Remove a directory pub fn rmdir(&self, url: Url) -> Result<()> { let url_scheme = url.scheme(); if !url_scheme.is_empty() { for mut scheme in self.schemes.lock().iter_mut() { if scheme.scheme() == url_scheme { return scheme.rmdir(url); } } } Err(Error::new(ENOENT)) } /// Unlink a resource pub fn unlink(&self, url: Url) -> Result<()> { let url_scheme = url.scheme(); if !url_scheme.is_empty() {<|fim▁hole|> if scheme.scheme() == url_scheme { return scheme.unlink(url); } } } Err(Error::new(ENOENT)) } }<|fim▁end|>
for mut scheme in self.schemes.lock().iter_mut() {
<|file_name|>lib.rs<|end_file_name|><|fim▁begin|>//! A pmem-resident log file. //! This is useful for programs like databases that append frequently to a log file. //! //! > This is **not** an official port of the NVM Library. //! > //! > The official **libpmemlog** documentation can be found at: [http://pmem.io/nvml/libpmemlog/](http://pmem.io/nvml/libpmemlog/) extern crate pmemlog_sys; extern crate libc;<|fim▁hole|>pub use log::Log;<|fim▁end|>
pub mod log;
<|file_name|>swagger.go<|end_file_name|><|fim▁begin|>package rest import ( restful "github.com/emicklei/go-restful" swagger "github.com/emicklei/go-restful-swagger12" )<|fim▁hole|>// ConfigureSwagger configures the swagger documentation for all endpoints in the container func ConfigureSwagger(apiDocPath string, container *restful.Container) { if apiDocPath == "" { return } config := swagger.Config{ WebServices: container.RegisteredWebServices(), WebServicesUrl: ``, ApiPath: apiDocPath, } swagger.RegisterSwaggerService(config, container) }<|fim▁end|>
<|file_name|>api.go<|end_file_name|><|fim▁begin|>package telebot import ( "bytes" "encoding/json" "fmt" "io" "io/ioutil" "log" "mime/multipart" "net/http" "os" "strconv" "strings" "time" ) // Raw lets you call any method of Bot API manually. // It also handles API errors, so you only need to unwrap // result field from json data. func (b *Bot) Raw(method string, payload interface{}) ([]byte, error) { url := b.URL + "/bot" + b.Token + "/" + method var buf bytes.Buffer if err := json.NewEncoder(&buf).Encode(payload); err != nil { return nil, err } resp, err := b.client.Post(url, "application/json", &buf) if err != nil { return nil, wrapError(err) } resp.Close = true defer resp.Body.Close() data, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, wrapError(err) } if b.verbose { body, _ := json.Marshal(payload) body = bytes.ReplaceAll(body, []byte(`\"`), []byte(`"`)) body = bytes.ReplaceAll(body, []byte(`"{`), []byte(`{`)) body = bytes.ReplaceAll(body, []byte(`}"`), []byte(`}`)) indent := func(b []byte) string { buf.Reset() json.Indent(&buf, b, "", "\t") return buf.String() } log.Printf("[verbose] telebot: sent request\n"+ "Method: %v\nParams: %v\nResponse: %v", method, indent(body), indent(data)) } // returning data as well return data, extractOk(data) } func (b *Bot) sendFiles(method string, files map[string]File, params map[string]string) ([]byte, error) { rawFiles := make(map[string]interface{}) for name, f := range files { switch { case f.InCloud(): params[name] = f.FileID case f.FileURL != "": params[name] = f.FileURL case f.OnDisk(): rawFiles[name] = f.FileLocal case f.FileReader != nil: rawFiles[name] = f.FileReader default: return nil, fmt.Errorf("telebot: file for field %s doesn't exist", name) } } if len(rawFiles) == 0 { return b.Raw(method, params) } pipeReader, pipeWriter := io.Pipe() writer := multipart.NewWriter(pipeWriter) go func() { defer pipeWriter.Close() for field, file := range rawFiles { if err := addFileToWriter(writer, files[field].fileName, field, file); err != nil { pipeWriter.CloseWithError(err) return } } for field, value := range params { if err := writer.WriteField(field, value); err != nil { pipeWriter.CloseWithError(err) return } } if err := writer.Close(); err != nil { pipeWriter.CloseWithError(err) return } }()<|fim▁hole|> resp, err := b.client.Post(url, writer.FormDataContentType(), pipeReader) if err != nil { err = wrapError(err) pipeReader.CloseWithError(err) return nil, err } resp.Close = true defer resp.Body.Close() if resp.StatusCode == http.StatusInternalServerError { return nil, ErrInternal } data, err := ioutil.ReadAll(resp.Body) if err != nil { return nil, wrapError(err) } return data, extractOk(data) } func addFileToWriter(writer *multipart.Writer, filename, field string, file interface{}) error { var reader io.Reader if r, ok := file.(io.Reader); ok { reader = r } else if path, ok := file.(string); ok { f, err := os.Open(path) if err != nil { return err } defer f.Close() reader = f } else { return fmt.Errorf("telebot: file for field %v should be io.ReadCloser or string", field) } part, err := writer.CreateFormFile(field, filename) if err != nil { return err } _, err = io.Copy(part, reader) return err } func (b *Bot) sendText(to Recipient, text string, opt *SendOptions) (*Message, error) { params := map[string]string{ "chat_id": to.Recipient(), "text": text, } b.embedSendOptions(params, opt) data, err := b.Raw("sendMessage", params) if err != nil { return nil, err } return extractMessage(data) } func (b *Bot) sendMedia(media Media, params map[string]string, files map[string]File) (*Message, error) { kind := media.MediaType() what := "send" + strings.Title(kind) if kind == "videoNote" { kind = "video_note" } sendFiles := map[string]File{kind: *media.MediaFile()} for k, v := range files { sendFiles[k] = v } data, err := b.sendFiles(what, sendFiles, params) if err != nil { return nil, err } return extractMessage(data) } func (b *Bot) getMe() (*User, error) { data, err := b.Raw("getMe", nil) if err != nil { return nil, err } var resp struct { Result *User } if err := json.Unmarshal(data, &resp); err != nil { return nil, wrapError(err) } return resp.Result, nil } func (b *Bot) getUpdates(offset, limit int, timeout time.Duration, allowed []string) ([]Update, error) { params := map[string]string{ "offset": strconv.Itoa(offset), "timeout": strconv.Itoa(int(timeout / time.Second)), } if limit != 0 { params["limit"] = strconv.Itoa(limit) } if len(allowed) > 0 { data, _ := json.Marshal(allowed) params["allowed_updates"] = string(data) } data, err := b.Raw("getUpdates", params) if err != nil { return nil, err } var resp struct { Result []Update } if err := json.Unmarshal(data, &resp); err != nil { return nil, wrapError(err) } return resp.Result, nil }<|fim▁end|>
url := b.URL + "/bot" + b.Token + "/" + method
<|file_name|>tst_FieldGraphItem.cpp<|end_file_name|><|fim▁begin|>//----------------------------------------------------------------------------- // File: tst_FieldGraphItem.cpp //----------------------------------------------------------------------------- // Project: Kactus 2 // Author: Esko Pekkarinen // Date: 14.04.2015 // // Description: // Unit test for class FieldGraphItem. //----------------------------------------------------------------------------- #include <QtTest> #include <editors/ComponentEditor/memoryMaps/memoryMapsVisualizer/fieldgraphitem.h> #include <editors/ComponentEditor/visualization/memorygapitem.h> #include <editors/ComponentEditor/common/ExpressionParser.h> #include <editors/ComponentEditor/common/SystemVerilogExpressionParser.h> #include <editors/ComponentEditor/common/NullParser.h> #include <IPXACTmodels/Component/Field.h> class tst_FieldGraphItem : public QObject { Q_OBJECT public: tst_FieldGraphItem(); private slots: void testConstructor(); void testExpressions(); }; //----------------------------------------------------------------------------- // Function: tst_FieldGraphItem::tst_FieldGraphItem() //----------------------------------------------------------------------------- tst_FieldGraphItem::tst_FieldGraphItem() { } //----------------------------------------------------------------------------- <|fim▁hole|>// Function: tst_FieldGraphItem::testConstructor() //----------------------------------------------------------------------------- void tst_FieldGraphItem::testConstructor() { QSharedPointer<Field> testField(new Field()); testField->setName("testField"); testField->setBitOffset("0"); testField->setBitWidth("2"); QSharedPointer<ExpressionParser> noParser(new NullParser()); FieldGraphItem* FieldItem = new FieldGraphItem(testField, noParser, 0); QCOMPARE(FieldItem->name(), QString("testField")); QCOMPARE(FieldItem->getOffset(), quint64(0)); QCOMPARE(FieldItem->getLastAddress(), quint64(1)); QCOMPARE(FieldItem->getDisplayOffset(), quint64(1)); QCOMPARE(FieldItem->getDisplayLastAddress(), quint64(0)); QCOMPARE(FieldItem->pos().y(), qreal(0)); delete FieldItem; } //----------------------------------------------------------------------------- // Function: tst_FieldGraphItem::testExpressions() //----------------------------------------------------------------------------- void tst_FieldGraphItem::testExpressions() { QSharedPointer<Field> testField(new Field()); testField->setBitOffset("1+1"); testField->setBitWidth("2*2"); QSharedPointer<ExpressionParser> parser(new SystemVerilogExpressionParser()); FieldGraphItem* FieldItem = new FieldGraphItem(testField, parser, 0); QCOMPARE(FieldItem->getBitWidth(), 4); QCOMPARE(FieldItem->getOffset(), quint64(2)); QCOMPARE(FieldItem->getLastAddress(), quint64(5)); QCOMPARE(FieldItem->getDisplayOffset(), quint64(5)); QCOMPARE(FieldItem->getDisplayLastAddress(), quint64(2)); QCOMPARE(FieldItem->pos().y(), qreal(0)); delete FieldItem; } QTEST_MAIN(tst_FieldGraphItem) #include "tst_FieldGraphItem.moc"<|fim▁end|>
<|file_name|>find_epd.py<|end_file_name|><|fim▁begin|>#! /usr/bin/env python ''' Recursively looks for EPD Writes to STDOUT and STDERR the found library and the found include directory. In this way, this script can be executed within CMAKE and the Python Libraries and Includes can be set to the STDOUT and STDERR streams Checks for a minimum version of Python, default 2.7, but can be specified as a command-line argument The root paths it starts looking for EPD in are contained in the 'check_dirs' global variable ''' import os, sys, getpass check_dirs = ['/opt/local/lib', '/home/%s' % getpass.getuser(), '/usr/share/', '/home/ecuzzill', '/opt/local', '/opt'] def main(): if len(sys.argv) == 2: min_py_version = sys.argv[1] #make sure it's a version by casting to float try: min_py_version = 'libpython' + str(float(min_py_version)) except ValueError: min_py_version = 'libpython2.7' else: min_py_version = 'libpython2.7' <|fim▁hole|> for d in check_dirs: for (dname, dnames, fnames) in os.walk(d): for r in dnames: #found an 'epd'-ish directory if r.find('epd') >= 0: full_dir = '%s/%s' % (dname, r) lib_exists = False #find the library for lib in [x for x in os.listdir(full_dir + '/lib/') if x.find('libpython') >=0 ]: lib_version = '.'.join(lib.split('.')[:2]) if lib_version >= min_py_version: lib_exists = True break if not lib_exists: break lib = '%s/lib/%s.so' % (full_dir, lib_version) include_dir = '%s/include/%s' % (full_dir, min_py_version[3:]) #success if this passes if os.path.isfile(lib) and os.path.isdir(include_dir): bin_path = '%s/bin/python' % (full_dir) unicode_support = os.system("%s -c 'import sys; sys.exit(sys.maxunicode > 65535)'" % (bin_path)) if unicode_support == 0: sys.stdout.write(lib) sys.stderr.write(include_dir) found = True break if found: break if __name__ == '__main__': main()<|fim▁end|>
found = False
<|file_name|>bossanova.py<|end_file_name|><|fim▁begin|>from __future__ import division """ These functions are for BOSSANOVA (BOss Survey of Satellites Around Nearby Optically obserVable milky way Analogs) """ import numpy as np from matplotlib import pyplot as plt import targeting def count_targets(hsts, verbose=True, remove_cached=True, rvir=300, targetingkwargs={}): """ Generates a count of targets for each field. Parameters ---------- hsts A list of `NSAHost` objects verbose : bool Whether or not to print a message when each host is examined remove_cached : bool Whether or not to remove the cached sdss catalog for each host after counting. This may be necessary to prevent running out of memory, depending on the number of hosts involved. rvir : float "virial radius" in kpc for the arcmin transform targetingkwargs : dict or list of dicts passed into ` targeting.select_targets` if a single dictionary, otherwise the targeting will Returns ------- ntargs : astropy.Table a table object with the names of the hosts and the target counts. """ import sys import collections from astropy import table if isinstance(targetingkwargs, collections.Mapping): colnames = ['ntarg'] targetingkwargs = [targetingkwargs.copy()] else: colnames = [('ntarg_' + t.get('colname', str(i))) for i, t in enumerate(targetingkwargs)] targetingkwargs = [t.copy() for t in targetingkwargs] for t in targetingkwargs: t.setdefault('outercutrad', 300) t.setdefault('removegama', False) if 'colname' in t: del t['colname'] nms = [] dists = [] rvs = [] cnts = [[] for t in targetingkwargs] for i, h in enumerate(hsts): if verbose: print 'Generating target count for', h.name, '#', i + 1, 'of', len(hsts) sys.stdout.flush() nms.append(h.name) dists.append(h.distmpc) rvs.append(h.physical_to_projected(300)) for j, t in enumerate(targetingkwargs): if verbose: print 'Targeting parameters:', t sys.stdout.flush() tcat = targeting.select_targets(h, **t) cnts[j].append(len(tcat)) if remove_cached: h._cached_sdss = None t = table.Table() t.add_column(table.Column(name='name', data=nms)) t.add_column(table.Column(name='distmpc', data=dists, units='Mpc')) t.add_column(table.Column(name='rvirarcmin', data=rvs, units='arcmin')) <|fim▁hole|> for cnm, cnt in zip(colnames, cnts): t.add_column(table.Column(name=cnm, data=cnt)) return t _Vabs_mw_sats = {'Bootes I': -6.3099999999999987, 'Bootes II': -2.7000000000000011, 'Bootes III': -5.7500000000000018, 'Canes Venatici I': -8.5900000000000016, 'Canes Venatici II': -4.9199999999999982, 'Canis Major': -14.389999999999999, 'Carina': -9.1099999999999994, 'Coma Berenices': -4.0999999999999996, 'Draco': -8.7999999999999989, 'Fornax': -13.44, 'Hercules': -6.6000000000000014, 'LMC': -18.120000000000001, 'Leo I': -12.02, 'Leo II': -9.8399999999999999, 'Leo IV': -5.8400000000000016, 'Leo V': -5.25, 'Pisces II': -5.0, 'SMC': -16.830000000000002, 'Sagittarius dSph': -13.500000000000002, 'Sculptor': -11.070000000000002, 'Segue I': -1.5, 'Segue II': -2.5, 'Sextans I': -9.2700000000000014, 'Ursa Major I': -5.5299999999999994, 'Ursa Major II': -4.1999999999999993, 'Ursa Minor': -8.7999999999999989, 'Willman 1': -2.6999999999999993} #now just assume they are all g-r=0.5, ~right for Draco ... Apply Jester+ transforms _rabs_mw_sats = dict([(k, v + (-0.41 * (0.5) + 0.01)) for k, v in _Vabs_mw_sats.iteritems()]) _sorted_mw_rabs = np.sort(_rabs_mw_sats.values()) def count_mw_sats(h, maglim, mwsatsrmags=_sorted_mw_rabs): appmags = mwsatsrmags + h.distmod return np.sum(appmags < maglim) def generate_count_table(hsts, fnout=None, maglims=[21, 20.5, 20], outercutrad=-90,remove_cached=True): from astropy.io import ascii from astropy import table targetingkwargs = [] for m in maglims: targetingkwargs.append({'faintlimit': m, 'outercutrad': outercutrad, 'colname': str(m)}) tab = count_targets(hsts, targetingkwargs=targetingkwargs, remove_cached=remove_cached) for m in maglims: satcnt = [] for hs in hsts: satcnt.append(count_mw_sats(hs, m)) tab.add_column(table.Column(name='nsat_' + str(m), data=satcnt)) for m in maglims: nsatstr = 'nsat_' + str(m) ntargstr = 'ntarg_' + str(m) tab.add_column(table.Column(name='ntargpersat_' + str(m), data=tab[ntargstr] / tab[nsatstr])) if fnout: ascii.write(tab, fnout) return tab<|fim▁end|>
<|file_name|>IngredientDbHelper.java<|end_file_name|><|fim▁begin|>package com.example.profbola.bakingtime.provider; import android.database.sqlite.SQLiteDatabase; import com.example.profbola.bakingtime.provider.RecipeContract.IngredientEntry; import static com.example.profbola.bakingtime.utils.RecipeConstants.IngredientDbHelperConstants.INGREDIENT_RECIPE_ID_IDX; /** * Created by prof.BOLA on 6/23/2017. */ public class IngredientDbHelper { // private static final String DATABASE_NAME = "bakingtime.db"; // private static final int DATABASE_VERSION = 1; // public IngredientDbHelper(Context context) { // super(context, DATABASE_NAME, null, DATABASE_VERSION); // } public static void onCreate(SQLiteDatabase db) { final String SQL_CREATE_INGREDIENTS_TABLE = "CREATE TABLE " + IngredientEntry.TABLE_NAME + " ( " + IngredientEntry._ID + " INTEGER PRIMARY KEY AUTOINCREMENT, " + IngredientEntry.COLUMN_INGREDIENT + " STRING NOT NULL, " + IngredientEntry.COLUMN_MEASURE + " STRING NOT NULL, " + IngredientEntry.COLUMN_QUANTITY + " REAL NOT NULL, " + IngredientEntry.COLUMN_RECIPE_ID + " INTEGER, " + " FOREIGN KEY ( " + IngredientEntry.COLUMN_RECIPE_ID + " ) REFERENCES " + RecipeContract.RecipeEntry.TABLE_NAME + " ( " + RecipeContract.RecipeEntry.COLUMN_ID + " ) " + " UNIQUE ( " + IngredientEntry.COLUMN_INGREDIENT + " , " + IngredientEntry.COLUMN_RECIPE_ID + " ) ON CONFLICT REPLACE " + ");"; final String SQL_CREATE_INDEX<|fim▁hole|> } public static void onUpgrade(SQLiteDatabase db, int oldVersion, int newVersion) { db.execSQL("DROP TABLE IF EXISTS " + IngredientEntry.TABLE_NAME); onCreate(db); } }<|fim▁end|>
= "CREATE INDEX " + INGREDIENT_RECIPE_ID_IDX + " ON " + IngredientEntry.TABLE_NAME + " ( " + IngredientEntry.COLUMN_RECIPE_ID + " );"; db.execSQL(SQL_CREATE_INGREDIENTS_TABLE); db.execSQL(SQL_CREATE_INDEX);
<|file_name|>compress.rs<|end_file_name|><|fim▁begin|>use std::fs::File; use std::io::Write; use std::path::{Path, PathBuf}; use ::image; use ::image::GenericImage; use dct; use quantize; use color_space; use compressed_image; use protobuf::Message; use flate2::Compression; use flate2::write::ZlibEncoder; pub fn compress_file(input_filename: &Path) { let file_stem = match input_filename.file_stem() { Some(stem) => stem, None => panic!("Invalid input filename: Could not automatically determine output file"), }; let file_container = match input_filename.parent() { Some(result) => result, None => { panic!("Invalid input filename: Could not automatically determine the output file \ directory") } }; let mut output_filename = PathBuf::from(&file_container);<|fim▁hole|>} pub fn compress_file_to_output(input_filename: &Path, output_filename: &Path) { if let Some(extension) = output_filename.extension() { assert!(extension == "msca", "Output file for compression must be 'msca'") } else { panic!("Output file for compression must be msca") } let input_image = image::open(input_filename).unwrap(); let mut output_file = File::create(&Path::new(&output_filename)).unwrap(); compress(&input_image, &mut output_file); } fn compress(input_image: &image::DynamicImage, output: &mut File) { let (width, height) = input_image.dimensions(); let mut red_channel: Vec<f32> = Vec::with_capacity(width as usize * height as usize); let mut green_channel: Vec<f32> = Vec::with_capacity(width as usize * height as usize); let mut blue_channel: Vec<f32> = Vec::with_capacity(width as usize * height as usize); // split the color data into channels for y in 0..height { for x in 0..width { let pixel = input_image.get_pixel(x, y); let (y, cb, cr) = color_space::rgb_to_ycbcr(pixel[0], pixel[1], pixel[2]); red_channel.push(y); green_channel.push(cb); blue_channel.push(cr); } } let mut serializer = compressed_image::compressed_image::new(); serializer.set_width(width); serializer.set_height(height); // compress the data and put it directly into the serializer serializer.set_red(compress_color_channel(width as usize, height as usize, red_channel)); serializer.set_green(compress_color_channel(width as usize, height as usize, green_channel)); serializer.set_blue(compress_color_channel(width as usize, height as usize, blue_channel)); let serialized_bytes = serializer.write_to_bytes().unwrap(); // losslessly compress the serialized data let mut enc = ZlibEncoder::new(output, Compression::Default); let mut written = 0; while written < serialized_bytes.len() { written += enc.write(&serialized_bytes[written..serialized_bytes.len()]).unwrap(); } let _ = enc.finish(); } fn compress_color_channel(width: usize, height: usize, mut uncompressed_channel_data: Vec<f32>) -> Vec<i32> { dct::dct2_2d(width, height, &mut uncompressed_channel_data); quantize::encode(width, height, &uncompressed_channel_data) }<|fim▁end|>
output_filename.push(file_stem); output_filename.set_extension("msca"); compress_file_to_output(input_filename, output_filename.as_path());
<|file_name|>gapcoin_lt.ts<|end_file_name|><|fim▁begin|><?xml version="1.0" ?><!DOCTYPE TS><TS language="lt" version="2.1"> <context> <name>AboutDialog</name> <message> <source>About Gapcoin Core</source> <translation type="unfinished"/> </message> <message> <source>&lt;b&gt;Gapcoin Core&lt;/b&gt; version</source> <translation type="unfinished"/> </message> <message> <source> This is experimental software. Distributed under the MIT/X11 software license, see the accompanying file COPYING or http://www.opensource.org/licenses/mit-license.php. This product includes software developed by the OpenSSL Project for use in the OpenSSL Toolkit (http://www.openssl.org/) and cryptographic software written by Eric Young ([email protected]) and UPnP software written by Thomas Bernard.</source> <translation>Tai eksperimentinė programa. Platinama pagal MIT/X11 licenciją, kurią rasite faile COPYING arba http://www.opensource.org/licenses/mit-license.php. Šiame produkte yra OpenSSL projekto kuriamas OpenSSL Toolkit (http://www.openssl.org/), Eric Young parašyta kriptografinė programinė įranga bei Thomas Bernard sukurta UPnP programinė įranga.</translation> </message> <message> <source>Copyright</source> <translation>Copyright</translation> </message> <message> <source>The Gapcoin Core developers</source> <translation type="unfinished"/> </message> <message> <source>(%1-bit)</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressBookPage</name> <message> <source>Double-click to edit address or label</source> <translation>Spragtelėkite, kad pakeistumėte adresą arba žymę</translation> </message> <message> <source>Create a new address</source> <translation>Sukurti naują adresą</translation> </message> <message> <source>&amp;New</source> <translation>&amp;Naujas</translation> </message> <message> <source>Copy the currently selected address to the system clipboard</source> <translation>Kopijuoti esamą adresą į mainų atmintį</translation> </message> <message> <source>&amp;Copy</source> <translation>&amp;Kopijuoti</translation> </message> <message> <source>C&amp;lose</source> <translation>&amp;Užverti</translation> </message> <message> <source>&amp;Copy Address</source> <translation>&amp;Kopijuoti adresą</translation> </message> <message> <source>Delete the currently selected address from the list</source> <translation type="unfinished"/> </message> <message> <source>Export the data in the current tab to a file</source> <translation type="unfinished"/> </message> <message> <source>&amp;Export</source> <translation>&amp;Eksportuoti</translation> </message> <message> <source>&amp;Delete</source> <translation>&amp;Trinti</translation> </message> <message> <source>Choose the address to send coins to</source> <translation type="unfinished"/> </message> <message> <source>Choose the address to receive coins with</source> <translation type="unfinished"/> </message> <message> <source>C&amp;hoose</source> <translation type="unfinished"/> </message> <message> <source>Sending addresses</source> <translation type="unfinished"/> </message> <message> <source>Receiving addresses</source> <translation type="unfinished"/> </message> <message> <source>These are your Gapcoin addresses for sending payments. Always check the amount and the receiving address before sending coins.</source> <translation type="unfinished"/> </message> <message> <source>These are your Gapcoin addresses for receiving payments. It is recommended to use a new receiving address for each transaction.</source> <translation type="unfinished"/> </message> <message> <source>Copy &amp;Label</source> <translation>Kopijuoti ž&amp;ymę</translation> </message> <message> <source>&amp;Edit</source> <translation>&amp;Keisti</translation> </message> <message> <source>Export Address List</source> <translation type="unfinished"/> </message> <message> <source>Comma separated file (*.csv)</source> <translation>Kableliais išskirtas failas (*.csv)</translation> </message> <message> <source>Exporting Failed</source> <translation type="unfinished"/> </message> <message> <source>There was an error trying to save the address list to %1.</source> <translation type="unfinished"/> </message> </context> <context> <name>AddressTableModel</name> <message> <source>Label</source> <translation>Žymė</translation> </message> <message> <source>Address</source> <translation>Adresas</translation> </message> <message> <source>(no label)</source> <translation>(nėra žymės)</translation> </message> </context> <context> <name>AskPassphraseDialog</name> <message> <source>Passphrase Dialog</source> <translation>Slaptafrazės dialogas</translation> </message> <message> <source>Enter passphrase</source> <translation>Įvesti slaptafrazę</translation> </message> <message> <source>New passphrase</source> <translation>Nauja slaptafrazė</translation> </message> <message> <source>Repeat new passphrase</source> <translation>Pakartokite naują slaptafrazę</translation> </message> <message> <source>Enter the new passphrase to the wallet.&lt;br/&gt;Please use a passphrase of &lt;b&gt;10 or more random characters&lt;/b&gt;, or &lt;b&gt;eight or more words&lt;/b&gt;.</source> <translation>Įveskite naują piniginės slaptafrazę.&lt;br/&gt;Prašome naudoti slaptafrazę iš &lt;b&gt; 10 ar daugiau atsitiktinių simbolių&lt;/b&gt; arba &lt;b&gt;aštuonių ar daugiau žodžių&lt;/b&gt;.</translation> </message> <message> <source>Encrypt wallet</source> <translation>Užšifruoti piniginę</translation> </message> <message> <source>This operation needs your wallet passphrase to unlock the wallet.</source> <translation>Ši operacija reikalauja jūsų piniginės slaptafrazės jai atrakinti.</translation> </message> <message> <source>Unlock wallet</source> <translation>Atrakinti piniginę</translation> </message> <message> <source>This operation needs your wallet passphrase to decrypt the wallet.</source> <translation>Ši operacija reikalauja jūsų piniginės slaptafrazės jai iššifruoti.</translation> </message> <message> <source>Decrypt wallet</source> <translation>Iššifruoti piniginę</translation> </message> <message> <source>Change passphrase</source> <translation>Pakeisti slaptafrazę</translation> </message> <message> <source>Enter the old and new passphrase to the wallet.</source> <translation>Įveskite seną ir naują piniginės slaptafrazes.</translation> </message> <message> <source>Confirm wallet encryption</source> <translation>Patvirtinkite piniginės užšifravimą</translation> </message> <message> <source>Warning: If you encrypt your wallet and lose your passphrase, you will &lt;b&gt;LOSE ALL OF YOUR GAPCOINS&lt;/b&gt;!</source> <translation>Dėmesio: jei užšifruosite savo piniginę ir pamesite slaptafrazę, jūs&lt;b&gt;PRARASITE VISUS SAVO GAPCOINUS&lt;/b&gt;! </translation> </message> <message> <source>Are you sure you wish to encrypt your wallet?</source> <translation>Ar tikrai norite šifruoti savo piniginę?</translation> </message> <message> <source>IMPORTANT: Any previous backups you have made of your wallet file should be replaced with the newly generated, encrypted wallet file. For security reasons, previous backups of the unencrypted wallet file will become useless as soon as you start using the new, encrypted wallet.</source> <translation type="unfinished"/> </message> <message> <source>Warning: The Caps Lock key is on!</source> <translation>Įspėjimas: įjungtas Caps Lock klavišas!</translation> </message> <message> <source>Wallet encrypted</source> <translation>Piniginė užšifruota</translation> </message> <message> <source>Gapcoin will close now to finish the encryption process. Remember that encrypting your wallet cannot fully protect your gapcoins from being stolen by malware infecting your computer.</source> <translation>Gapcoin dabar užsidarys šifravimo proceso pabaigai. Atminkite, kad piniginės šifravimas negali pilnai apsaugoti gapcoinų vagysčių kai tinkle esančios kenkėjiškos programos patenka į jūsų kompiuterį.</translation> </message> <message> <source>Wallet encryption failed</source> <translation>Nepavyko užšifruoti piniginę</translation> </message> <message> <source>Wallet encryption failed due to an internal error. Your wallet was not encrypted.</source> <translation>Dėl vidinės klaidos nepavyko užšifruoti piniginę.Piniginė neužšifruota.</translation> </message> <message> <source>The supplied passphrases do not match.</source> <translation>Įvestos slaptafrazės nesutampa.</translation> </message> <message> <source>Wallet unlock failed</source> <translation>Nepavyko atrakinti piniginę</translation> </message> <message> <source>The passphrase entered for the wallet decryption was incorrect.</source> <translation>Neteisingai įvestas slaptažodis piniginės iššifravimui.</translation> </message> <message> <source>Wallet decryption failed</source> <translation>Nepavyko iššifruoti piniginės</translation> </message> <message> <source>Wallet passphrase was successfully changed.</source> <translation>Piniginės slaptažodis sėkmingai pakeistas.</translation> </message> </context> <context> <name>GapcoinGUI</name> <message> <source>Sign &amp;message...</source> <translation>Pasirašyti ži&amp;nutę...</translation> </message> <message> <source>Synchronizing with network...</source> <translation>Sinchronizavimas su tinklu ...</translation> </message> <message> <source>&amp;Overview</source> <translation>&amp;Apžvalga</translation> </message> <message> <source>Node</source> <translation type="unfinished"/> </message> <message> <source>Show general overview of wallet</source> <translation>Rodyti piniginės bendrą apžvalgą</translation> </message> <message> <source>&amp;Transactions</source> <translation>&amp;Sandoriai</translation> </message> <message> <source>Browse transaction history</source> <translation>Apžvelgti sandorių istoriją</translation> </message> <message> <source>E&amp;xit</source> <translation>&amp;Išeiti</translation> </message> <message> <source>Quit application</source> <translation>Išjungti programą</translation> </message> <message> <source>Show information about Gapcoin</source> <translation>Rodyti informaciją apie Gapcoin</translation> </message> <message> <source>About &amp;Qt</source> <translation>Apie &amp;Qt</translation> </message> <message> <source>Show information about Qt</source> <translation>Rodyti informaciją apie Qt</translation> </message> <message> <source>&amp;Options...</source> <translation>&amp;Parinktys...</translation> </message> <message> <source>&amp;Encrypt Wallet...</source> <translation>&amp;Užšifruoti piniginę...</translation> </message> <message> <source>&amp;Backup Wallet...</source> <translation>&amp;Backup piniginę...</translation> </message> <message> <source>&amp;Change Passphrase...</source> <translation>&amp;Keisti slaptafrazę...</translation> </message> <message> <source>&amp;Sending addresses...</source> <translation type="unfinished"/> </message> <message> <source>&amp;Receiving addresses...</source> <translation type="unfinished"/> </message> <message> <source>Open &amp;URI...</source> <translation type="unfinished"/> </message> <message> <source>Importing blocks from disk...</source> <translation>Blokai importuojami iš disko...</translation> </message> <message> <source>Reindexing blocks on disk...</source> <translation>Blokai iš naujo indeksuojami...</translation> </message> <message> <source>Send coins to a Gapcoin address</source> <translation>Siųsti monetas Gapcoin adresui</translation> </message> <message> <source>Modify configuration options for Gapcoin</source> <translation>Keisti gapcoin konfigūracijos galimybes</translation> </message> <message> <source>Backup wallet to another location</source> <translation>Daryti piniginės atsarginę kopiją</translation> </message> <message> <source>Change the passphrase used for wallet encryption</source> <translation>Pakeisti slaptafrazę naudojamą piniginės užšifravimui</translation> </message> <message> <source>&amp;Debug window</source> <translation>&amp;Derinimo langas</translation> </message> <message> <source>Open debugging and diagnostic console</source> <translation>Atverti derinimo ir diagnostikos konsolę</translation> </message> <message> <source>&amp;Verify message...</source> <translation>&amp;Tikrinti žinutę...</translation> </message> <message> <source>Gapcoin</source> <translation>Gapcoin</translation> </message> <message> <source>Wallet</source> <translation>Piniginė</translation> </message> <message> <source>&amp;Send</source> <translation>&amp;Siųsti</translation> </message> <message> <source>&amp;Receive</source> <translation>&amp;Gauti</translation> </message> <message> <source>&amp;Show / Hide</source> <translation>&amp;Rodyti / Slėpti</translation> </message> <message> <source>Show or hide the main Window</source> <translation>Rodyti arba slėpti pagrindinį langą</translation> </message> <message> <source>Encrypt the private keys that belong to your wallet</source> <translation type="unfinished"/> </message> <message> <source>Sign messages with your Gapcoin addresses to prove you own them</source> <translation type="unfinished"/> </message> <message> <source>Verify messages to ensure they were signed with specified Gapcoin addresses</source> <translation type="unfinished"/> </message> <message> <source>&amp;File</source> <translation>&amp;Failas</translation> </message> <message> <source>&amp;Settings</source> <translation>&amp;Nustatymai</translation> </message> <message> <source>&amp;Help</source> <translation>&amp;Pagalba</translation> </message> <message> <source>Tabs toolbar</source> <translation>Kortelių įrankinė</translation> </message> <message> <source>[testnet]</source> <translation>[testavimotinklas]</translation> </message> <message> <source>Gapcoin Core</source> <translation>Gapcoin branduolys</translation> </message> <message> <source>Request payments (generates QR codes and gapcoin: URIs)</source> <translation type="unfinished"/> </message> <message> <source>&amp;About Gapcoin Core</source> <translation type="unfinished"/> </message> <message> <source>Show the list of used sending addresses and labels</source> <translation type="unfinished"/> </message> <message> <source>Show the list of used receiving addresses and labels</source> <translation type="unfinished"/> </message> <message> <source>Open a gapcoin: URI or payment request</source> <translation type="unfinished"/> </message> <message> <source>&amp;Command-line options</source> <translation type="unfinished"/> </message> <message> <source>Show the Gapcoin Core help message to get a list with possible Gapcoin command-line options</source> <translation type="unfinished"/> </message> <message> <source>Gapcoin client</source> <translation>Gapcoin klientas</translation> </message> <message numerus="yes"> <source>%n active connection(s) to Gapcoin network</source> <translation><numerusform>%n Gapcoin tinklo aktyvus ryšys</numerusform><numerusform>%n Gapcoin tinklo aktyvūs ryšiai</numerusform><numerusform>%n Gapcoin tinklo aktyvūs ryšiai</numerusform></translation> </message> <message> <source>No block source available...</source> <translation type="unfinished"/> </message> <message> <source>Processed %1 of %2 (estimated) blocks of transaction history.</source> <translation type="unfinished"/> </message> <message> <source>Processed %1 blocks of transaction history.</source> <translation type="unfinished"/> </message> <message numerus="yes"> <source>%n hour(s)</source> <translation><numerusform>%n valanda</numerusform><numerusform>%n valandos</numerusform><numerusform>%n valandų</numerusform></translation> </message> <message numerus="yes"> <source>%n day(s)</source> <translation><numerusform>%n diena</numerusform><numerusform>%n dienos</numerusform><numerusform>%n dienų</numerusform></translation> </message> <message numerus="yes"> <source>%n week(s)</source> <translation><numerusform>%n savaitė</numerusform><numerusform>%n savaitės</numerusform><numerusform>%n savaičių</numerusform></translation> </message> <message> <source>%1 and %2</source> <translation type="unfinished"/> </message> <message numerus="yes"> <source>%n year(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <source>%1 behind</source> <translation type="unfinished"/> </message> <message> <source>Last received block was generated %1 ago.</source> <translation type="unfinished"/> </message> <message> <source>Transactions after this will not yet be visible.</source> <translation type="unfinished"/> </message> <message> <source>Error</source> <translation>Klaida</translation> </message> <message> <source>Warning</source> <translation type="unfinished"/> </message> <message> <source>Information</source> <translation>Informacija</translation> </message> <message> <source>Up to date</source> <translation>Atnaujinta</translation> </message> <message> <source>Catching up...</source> <translation>Vejamasi...</translation> </message> <message> <source>Sent transaction</source> <translation>Sandoris nusiųstas</translation> </message> <message> <source>Incoming transaction</source> <translation>Ateinantis sandoris</translation> </message> <message> <source>Date: %1 Amount: %2 Type: %3 Address: %4 </source> <translation>Data: %1 Suma: %2 Tipas: %3 Adresas: %4</translation> </message> <message> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;unlocked&lt;/b&gt;</source> <translation>Piniginė &lt;b&gt;užšifruota&lt;/b&gt; ir šiuo metu &lt;b&gt;atrakinta&lt;/b&gt;</translation> </message> <message> <source>Wallet is &lt;b&gt;encrypted&lt;/b&gt; and currently &lt;b&gt;locked&lt;/b&gt;</source> <translation>Piniginė &lt;b&gt;užšifruota&lt;/b&gt; ir šiuo metu &lt;b&gt;užrakinta&lt;/b&gt;</translation> </message> <message> <source>A fatal error occurred. Gapcoin can no longer continue safely and will quit.</source> <translation type="unfinished"/> </message> </context> <context> <name>ClientModel</name> <message> <source>Network Alert</source> <translation>Tinklo įspėjimas</translation> </message> </context> <context> <name>CoinControlDialog</name> <message> <source>Coin Control Address Selection</source> <translation type="unfinished"/> </message> <message> <source>Quantity:</source> <translation type="unfinished"/> </message> <message> <source>Bytes:</source> <translation type="unfinished"/> </message> <message> <source>Amount:</source> <translation>Suma:</translation> </message> <message> <source>Priority:</source> <translation type="unfinished"/> </message> <message> <source>Fee:</source> <translation type="unfinished"/> </message> <message> <source>Low Output:</source> <translation type="unfinished"/> </message> <message> <source>After Fee:</source> <translation type="unfinished"/> </message> <message> <source>Change:</source> <translation type="unfinished"/> </message> <message> <source>(un)select all</source> <translation type="unfinished"/> </message> <message> <source>Tree mode</source> <translation type="unfinished"/> </message> <message> <source>List mode</source> <translation type="unfinished"/> </message> <message> <source>Amount</source> <translation>Suma</translation> </message> <message> <source>Address</source> <translation>Adresas</translation> </message> <message> <source>Date</source> <translation>Data</translation> </message> <message> <source>Confirmations</source> <translation type="unfinished"/> </message> <message> <source>Confirmed</source> <translation>Patvirtintas</translation> </message> <message> <source>Priority</source> <translation type="unfinished"/> </message> <message> <source>Copy address</source> <translation>Kopijuoti adresą</translation> </message> <message> <source>Copy label</source> <translation>Kopijuoti žymę</translation> </message> <message> <source>Copy amount</source> <translation>Kopijuoti sumą</translation> </message> <message> <source>Copy transaction ID</source> <translation type="unfinished"/> </message> <message> <source>Lock unspent</source> <translation type="unfinished"/> </message> <message> <source>Unlock unspent</source> <translation type="unfinished"/> </message> <message> <source>Copy quantity</source> <translation type="unfinished"/> </message> <message> <source>Copy fee</source> <translation type="unfinished"/> </message> <message> <source>Copy after fee</source> <translation type="unfinished"/> </message> <message> <source>Copy bytes</source> <translation type="unfinished"/> </message> <message> <source>Copy priority</source> <translation type="unfinished"/> </message> <message> <source>Copy low output</source> <translation type="unfinished"/> </message> <message> <source>Copy change</source> <translation type="unfinished"/> </message> <message> <source>highest</source> <translation type="unfinished"/> </message> <message> <source>higher</source> <translation type="unfinished"/> </message> <message> <source>high</source> <translation type="unfinished"/> </message> <message> <source>medium-high</source> <translation type="unfinished"/> </message> <message> <source>medium</source> <translation type="unfinished"/> </message> <message> <source>low-medium</source> <translation type="unfinished"/> </message> <message> <source>low</source> <translation type="unfinished"/> </message> <message> <source>lower</source> <translation type="unfinished"/> </message> <message> <source>lowest</source> <translation type="unfinished"/> </message> <message> <source>(%1 locked)</source> <translation type="unfinished"/> </message> <message> <source>none</source> <translation type="unfinished"/> </message> <message> <source>Dust</source> <translation type="unfinished"/> </message> <message> <source>yes</source> <translation type="unfinished"/> </message> <message> <source>no</source> <translation type="unfinished"/> </message> <message> <source>This label turns red, if the transaction size is greater than 1000 bytes.</source> <translation type="unfinished"/> </message> <message> <source>This means a fee of at least %1 per kB is required.</source> <translation type="unfinished"/> </message> <message> <source>Can vary +/- 1 byte per input.</source> <translation type="unfinished"/> </message> <message> <source>Transactions with higher priority are more likely to get included into a block.</source> <translation type="unfinished"/> </message> <message> <source>This label turns red, if the priority is smaller than &quot;medium&quot;.</source> <translation type="unfinished"/> </message> <message> <source>This label turns red, if any recipient receives an amount smaller than %1.</source> <translation type="unfinished"/> </message> <message> <source>This means a fee of at least %1 is required.</source> <translation type="unfinished"/> </message> <message> <source>Amounts below 0.546 times the minimum relay fee are shown as dust.</source> <translation type="unfinished"/> </message> <message> <source>This label turns red, if the change is smaller than %1.</source> <translation type="unfinished"/> </message> <message> <source>(no label)</source> <translation>(nėra žymės)</translation> </message> <message> <source>change from %1 (%2)</source> <translation type="unfinished"/> </message> <message> <source>(change)</source> <translation type="unfinished"/> </message> </context> <context> <name>EditAddressDialog</name> <message> <source>Edit Address</source> <translation>Keisti adresą</translation> </message> <message> <source>&amp;Label</source> <translation>Ž&amp;ymė</translation> </message> <message> <source>The label associated with this address list entry</source> <translation type="unfinished"/> </message> <message> <source>The address associated with this address list entry. This can only be modified for sending addresses.</source> <translation type="unfinished"/> </message> <message> <source>&amp;Address</source> <translation>&amp;Adresas</translation> </message> <message> <source>New receiving address</source> <translation>Naujas gavimo adresas</translation> </message> <message> <source>New sending address</source> <translation>Naujas siuntimo adresas</translation> </message> <message> <source>Edit receiving address</source> <translation>Keisti gavimo adresą</translation> </message> <message> <source>Edit sending address</source> <translation>Keisti siuntimo adresą</translation> </message> <message> <source>The entered address &quot;%1&quot; is already in the address book.</source> <translation>Įvestas adresas „%1“ jau yra adresų knygelėje.</translation> </message> <message> <source>The entered address &quot;%1&quot; is not a valid Gapcoin address.</source> <translation>Įvestas adresas „%1“ nėra galiojantis Gapcoin adresas.</translation> </message> <message> <source>Could not unlock wallet.</source> <translation>Nepavyko atrakinti piniginės.</translation> </message> <message> <source>New key generation failed.</source> <translation>Naujo rakto generavimas nepavyko.</translation> </message> </context> <context> <name>FreespaceChecker</name> <message> <source>A new data directory will be created.</source> <translation type="unfinished"/> </message> <message> <source>name</source> <translation type="unfinished"/> </message> <message> <source>Directory already exists. Add %1 if you intend to create a new directory here.</source> <translation type="unfinished"/> </message> <message> <source>Path already exists, and is not a directory.</source> <translation type="unfinished"/> </message> <message> <source>Cannot create data directory here.</source> <translation type="unfinished"/> </message> </context> <context> <name>HelpMessageDialog</name> <message> <source>Gapcoin Core - Command-line options</source> <translation type="unfinished"/> </message> <message> <source>Gapcoin Core</source> <translation>Gapcoin branduolys</translation> </message> <message> <source>version</source> <translation>versija</translation> </message> <message> <source>Usage:</source> <translation>Naudojimas:</translation> </message> <message> <source>command-line options</source> <translation>komandinės eilutės parametrai</translation> </message> <message> <source>UI options</source> <translation>Naudotoji sąsajos parametrai</translation> </message> <message> <source>Set language, for example &quot;de_DE&quot; (default: system locale)</source> <translation>Nustatyti kalbą, pavyzdžiui &quot;lt_LT&quot; (numatyta: sistemos kalba)</translation> </message> <message> <source>Start minimized</source> <translation>Paleisti sumažintą</translation> </message> <message> <source>Set SSL root certificates for payment request (default: -system-)</source> <translation type="unfinished"/> </message> <message> <source>Show splash screen on startup (default: 1)</source> <translation type="unfinished"/> </message> <message> <source>Choose data directory on startup (default: 0)</source> <translation type="unfinished"/> </message> </context> <context> <name>Intro</name> <message> <source>Welcome</source> <translation>Sveiki</translation> </message> <message> <source>Welcome to Gapcoin Core.</source> <translation type="unfinished"/> </message> <message> <source>As this is the first time the program is launched, you can choose where Gapcoin Core will store its data.</source> <translation type="unfinished"/> </message> <message> <source>Gapcoin Core will download and store a copy of the Gapcoin block chain. At least %1GB of data will be stored in this directory, and it will grow over time. The wallet will also be stored in this directory.</source> <translation type="unfinished"/> </message> <message> <source>Use the default data directory</source> <translation type="unfinished"/> </message> <message> <source>Use a custom data directory:</source> <translation type="unfinished"/> </message> <message> <source>Gapcoin</source> <translation>Gapcoin</translation> </message> <message> <source>Error: Specified data directory &quot;%1&quot; can not be created.</source> <translation type="unfinished"/> </message> <message> <source>Error</source> <translation>Klaida</translation> </message> <message> <source>GB of free space available</source> <translation type="unfinished"/> </message> <message> <source>(of %1GB needed)</source> <translation type="unfinished"/> </message> </context> <context> <name>OpenURIDialog</name> <message> <source>Open URI</source> <translation type="unfinished"/> </message> <message> <source>Open payment request from URI or file</source> <translation type="unfinished"/> </message> <message> <source>URI:</source> <translation type="unfinished"/> </message> <message> <source>Select payment request file</source> <translation type="unfinished"/> </message> <message> <source>Select payment request file to open</source> <translation type="unfinished"/> </message> </context> <context> <name>OptionsDialog</name> <message> <source>Options</source> <translation>Parinktys</translation> </message> <message> <source>&amp;Main</source> <translation>&amp;Pagrindinės</translation> </message> <message> <source>Optional transaction fee per kB that helps make sure your transactions are processed quickly. Most transactions are 1 kB.</source> <translation type="unfinished"/> </message> <message> <source>Pay transaction &amp;fee</source> <translation>&amp;Mokėti sandorio mokestį</translation> </message> <message> <source>Automatically start Gapcoin after logging in to the system.</source> <translation>Automatiškai paleisti Bitkoin programą įjungus sistemą.</translation> </message> <message> <source>&amp;Start Gapcoin on system login</source> <translation>&amp;Paleisti Gapcoin programą su window sistemos paleidimu</translation> </message> <message> <source>Size of &amp;database cache</source> <translation type="unfinished"/> </message> <message> <source>MB</source> <translation type="unfinished"/> </message> <message> <source>Number of script &amp;verification threads</source> <translation type="unfinished"/> </message> <message> <source>Connect to the Gapcoin network through a SOCKS proxy.</source> <translation type="unfinished"/> </message> <message> <source>&amp;Connect through SOCKS proxy (default proxy):</source> <translation type="unfinished"/> </message> <message> <source>IP address of the proxy (e.g. IPv4: 127.0.0.1 / IPv6: ::1)</source> <translation type="unfinished"/> </message> <message> <source>Third party URLs (e.g. a block explorer) that appear in the transactions tab as context menu items. %s in the URL is replaced by transaction hash. Multiple URLs are separated by vertical bar |.</source> <translation type="unfinished"/> </message> <message> <source>Third party transaction URLs</source> <translation type="unfinished"/> </message> <message> <source>Active command-line options that override above options:</source> <translation type="unfinished"/> </message> <message> <source>Reset all client options to default.</source> <translation type="unfinished"/> </message> <message> <source>&amp;Reset Options</source> <translation type="unfinished"/> </message> <message> <source>&amp;Network</source> <translation>&amp;Tinklas</translation> </message> <message> <source>(0 = auto, &lt;0 = leave that many cores free)</source> <translation type="unfinished"/> </message> <message> <source>W&amp;allet</source> <translation type="unfinished"/> </message> <message> <source>Expert</source> <translation type="unfinished"/> </message> <message> <source>Enable coin &amp;control features</source> <translation type="unfinished"/> </message> <message> <source>If you disable the spending of unconfirmed change, the change from a transaction cannot be used until that transaction has at least one confirmation. This also affects how your balance is computed.</source> <translation type="unfinished"/> </message> <message> <source>&amp;Spend unconfirmed change</source> <translation type="unfinished"/> </message> <message> <source>Automatically open the Gapcoin client port on the router. This only works when your router supports UPnP and it is enabled.</source> <translation>Automatiškai atidaryti Gapcoin kliento prievadą maršrutizatoriuje. Tai veikia tik tada, kai jūsų maršrutizatorius palaiko UPnP ir ji įjungta.</translation> </message> <message> <source>Map port using &amp;UPnP</source> <translation>Persiųsti prievadą naudojant &amp;UPnP</translation> </message> <message> <source>Proxy &amp;IP:</source> <translation>Tarpinio serverio &amp;IP:</translation> </message> <message> <source>&amp;Port:</source> <translation>&amp;Prievadas:</translation> </message> <message> <source>Port of the proxy (e.g. 9050)</source> <translation>Tarpinio serverio preivadas (pvz, 9050)</translation> </message> <message> <source>SOCKS &amp;Version:</source> <translation>SOCKS &amp;versija:</translation> </message> <message> <source>SOCKS version of the proxy (e.g. 5)</source> <translation>Tarpinio serverio SOCKS versija (pvz., 5)</translation> </message> <message> <source>&amp;Window</source> <translation>&amp;Langas</translation> </message> <message> <source>Show only a tray icon after minimizing the window.</source> <translation>Po programos lango sumažinimo rodyti tik programos ikoną.</translation> </message> <message> <source>&amp;Minimize to the tray instead of the taskbar</source> <translation>&amp;M sumažinti langą bet ne užduočių juostą</translation> </message> <message> <source>Minimize instead of exit the application when the window is closed. When this option is enabled, the application will be closed only after selecting Quit in the menu.</source> <translation>Uždarant langą neuždaryti programos. Kai ši parinktis įjungta, programa bus uždaryta tik pasirinkus meniu komandą Baigti.</translation> </message> <message> <source>M&amp;inimize on close</source> <translation>&amp;Sumažinti uždarant</translation> </message> <message> <source>&amp;Display</source> <translation>&amp;Rodymas</translation> </message> <message> <source>User Interface &amp;language:</source> <translation>Naudotojo sąsajos &amp;kalba:</translation> </message> <message> <source>The user interface language can be set here. This setting will take effect after restarting Gapcoin.</source> <translation>Čia gali būti nustatyta naudotojo sąsajos kalba. Šis nustatymas įsigalios iš naujo paleidus Gapcoin.</translation> </message> <message> <source>&amp;Unit to show amounts in:</source> <translation>&amp;Vienetai, kuriais rodyti sumas:</translation> </message> <message> <source>Choose the default subdivision unit to show in the interface and when sending coins.</source> <translation>Rodomų ir siunčiamų monetų kiekio matavimo vienetai</translation> </message> <message> <source>Whether to show Gapcoin addresses in the transaction list or not.</source> <translation type="unfinished"/> </message> <message> <source>&amp;Display addresses in transaction list</source> <translation>&amp;Rodyti adresus sandorių sąraše</translation> </message> <message> <source>Whether to show coin control features or not.</source> <translation type="unfinished"/> </message> <message> <source>&amp;OK</source> <translation>&amp;Gerai</translation> </message> <message> <source>&amp;Cancel</source> <translation>&amp;Atšaukti</translation> </message> <message> <source>default</source> <translation>numatyta</translation> </message> <message> <source>none</source> <translation type="unfinished"/> </message> <message> <source>Confirm options reset</source> <translation type="unfinished"/> </message> <message> <source>Client restart required to activate changes.</source> <translation type="unfinished"/> </message> <message> <source>Client will be shutdown, do you want to proceed?</source> <translation type="unfinished"/> </message> <message> <source>This change would require a client restart.</source> <translation type="unfinished"/> </message> <message> <source>The supplied proxy address is invalid.</source> <translation>Nurodytas tarpinio serverio adresas negalioja.</translation> </message> </context> <context> <name>OverviewPage</name> <message> <source>Form</source> <translation>Forma</translation> </message> <message> <source>The displayed information may be out of date. Your wallet automatically synchronizes with the Gapcoin network after a connection is established, but this process has not completed yet.</source> <translation type="unfinished"/> </message> <message> <source>Wallet</source> <translation>Piniginė</translation> </message> <message> <source>Available:</source> <translation type="unfinished"/> </message> <message> <source>Your current spendable balance</source> <translation type="unfinished"/> </message> <message> <source>Pending:</source> <translation type="unfinished"/> </message> <message> <source>Total of transactions that have yet to be confirmed, and do not yet count toward the spendable balance</source> <translation type="unfinished"/> </message> <message> <source>Immature:</source> <translation>Nepribrendę:</translation> </message> <message> <source>Mined balance that has not yet matured</source> <translation type="unfinished"/> </message> <message> <source>Total:</source> <translation>Viso:</translation> </message> <message> <source>Your current total balance</source> <translation>Jūsų balansas</translation> </message> <message> <source>&lt;b&gt;Recent transactions&lt;/b&gt;</source> <translation>&lt;b&gt;Naujausi sandoriai&lt;/b&gt;</translation> </message> <message> <source>out of sync</source> <translation>nesinchronizuota</translation> </message> </context> <context> <name>PaymentServer</name> <message> <source>URI handling</source> <translation>URI apdorojimas</translation> </message> <message> <source>URI can not be parsed! This can be caused by an invalid Gapcoin address or malformed URI parameters.</source> <translation type="unfinished"/> </message> <message> <source>Requested payment amount of %1 is too small (considered dust).</source> <translation type="unfinished"/> </message> <message> <source>Payment request error</source> <translation type="unfinished"/> </message> <message> <source>Cannot start gapcoin: click-to-pay handler</source> <translation type="unfinished"/> </message> <message> <source>Net manager warning</source> <translation type="unfinished"/> </message> <message> <source>Your active proxy doesn&apos;t support SOCKS5, which is required for payment requests via proxy.</source> <translation type="unfinished"/> </message> <message> <source>Payment request fetch URL is invalid: %1</source> <translation type="unfinished"/> </message> <message> <source>Payment request file handling</source> <translation type="unfinished"/> </message> <message> <source>Payment request file can not be read or processed! This can be caused by an invalid payment request file.</source> <translation type="unfinished"/> </message> <message> <source>Unverified payment requests to custom payment scripts are unsupported.</source> <translation type="unfinished"/> </message> <message> <source>Refund from %1</source> <translation type="unfinished"/> </message> <message> <source>Error communicating with %1: %2</source> <translation type="unfinished"/> </message> <message> <source>Payment request can not be parsed or processed!</source> <translation type="unfinished"/> </message> <message> <source>Bad response from server %1</source> <translation type="unfinished"/> </message> <message> <source>Payment acknowledged</source> <translation type="unfinished"/> </message> <message> <source>Network request error</source> <translation>Tinklo užklausos klaida</translation> </message> </context> <context> <name>QObject</name> <message> <source>Gapcoin</source> <translation>Gapcoin</translation> </message> <message> <source>Error: Specified data directory &quot;%1&quot; does not exist.</source> <translation type="unfinished"/> </message> <message> <source>Error: Cannot parse configuration file: %1. Only use key=value syntax.</source> <translation type="unfinished"/> </message> <message> <source>Error: Invalid combination of -regtest and -testnet.</source> <translation type="unfinished"/> </message> <message> <source>Gapcoin Core didn&apos;t yet exit safely...</source> <translation type="unfinished"/> </message> <message> <source>Enter a Gapcoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source> <translation>Įveskite bitkoinų adresą (pvz. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation> </message> </context> <context> <name>QRImageWidget</name> <message> <source>&amp;Save Image...</source> <translation type="unfinished"/> </message> <message> <source>&amp;Copy Image</source> <translation type="unfinished"/> </message> <message> <source>Save QR Code</source> <translation>Įrašyti QR kodą</translation> </message> <message> <source>PNG Image (*.png)</source> <translation type="unfinished"/> </message> </context> <context> <name>RPCConsole</name> <message> <source>Client name</source> <translation>Kliento pavadinimas</translation> </message> <message> <source>N/A</source> <translation>nėra</translation> </message> <message> <source>Client version</source> <translation>Kliento versija</translation> </message> <message> <source>&amp;Information</source> <translation>&amp;Informacija</translation> </message> <message> <source>Debug window</source> <translation type="unfinished"/> </message> <message> <source>General</source> <translation type="unfinished"/> </message> <message> <source>Using OpenSSL version</source> <translation>Naudojama OpenSSL versija</translation> </message> <message> <source>Startup time</source> <translation>Paleidimo laikas</translation> </message> <message> <source>Network</source> <translation>Tinklas</translation> </message> <message> <source>Name</source> <translation type="unfinished"/> </message> <message> <source>Number of connections</source> <translation>Prisijungimų kiekis</translation> </message> <message> <source>Block chain</source> <translation>Blokų grandinė</translation> </message> <message> <source>Current number of blocks</source> <translation>Dabartinis blokų skaičius</translation> </message> <message> <source>Estimated total blocks</source> <translation type="unfinished"/> </message> <message> <source>Last block time</source> <translation>Paskutinio bloko laikas</translation> </message> <message> <source>&amp;Open</source> <translation>&amp;Atverti</translation> </message> <message> <source>&amp;Console</source> <translation>&amp;Konsolė</translation> </message> <message> <source>&amp;Network Traffic</source> <translation type="unfinished"/> </message> <message> <source>&amp;Clear</source> <translation type="unfinished"/> </message> <message> <source>Totals</source> <translation type="unfinished"/> </message> <message> <source>In:</source> <translation type="unfinished"/> </message> <message> <source>Out:</source> <translation type="unfinished"/> </message> <message> <source>Build date</source> <translation>Kompiliavimo data</translation> </message> <message> <source>Debug log file</source> <translation>Derinimo žurnalo failas</translation> </message> <message> <source>Open the Gapcoin debug log file from the current data directory. This can take a few seconds for large log files.</source> <translation type="unfinished"/> </message> <message> <source>Clear console</source> <translation>Išvalyti konsolę</translation> </message> <message> <source>Welcome to the Gapcoin RPC console.</source> <translation>Sveiki atvykę į Gapcoin RPC konsolę.</translation> </message> <message> <source>Use up and down arrows to navigate history, and &lt;b&gt;Ctrl-L&lt;/b&gt; to clear screen.</source> <translation type="unfinished"/> </message> <message> <source>Type &lt;b&gt;help&lt;/b&gt; for an overview of available commands.</source> <translation type="unfinished"/> </message> <message> <source>%1 B</source> <translation>%1 B</translation> </message> <message> <source>%1 KB</source> <translation>%1 KB</translation> </message> <message> <source>%1 MB</source> <translation>%1 MB</translation> </message> <message> <source>%1 GB</source> <translation>%1 GB</translation> </message> <message> <source>%1 m</source> <translation>%1 m</translation> </message> <message> <source>%1 h</source> <translation>%1 h</translation> </message> <message> <source>%1 h %2 m</source> <translation>%1 h %2 m</translation> </message> </context> <context> <name>ReceiveCoinsDialog</name> <message> <source>&amp;Amount:</source> <translation type="unfinished"/> </message> <message> <source>&amp;Label:</source> <translation>Ž&amp;ymė:</translation> </message> <message> <source>&amp;Message:</source> <translation type="unfinished"/> </message> <message> <source>Reuse one of the previously used receiving addresses. Reusing addresses has security and privacy issues. Do not use this unless re-generating a payment request made before.</source> <translation type="unfinished"/> </message> <message> <source>R&amp;euse an existing receiving address (not recommended)</source> <translation type="unfinished"/> </message> <message> <source>An optional message to attach to the payment request, which will be displayed when the request is opened. Note: The message will not be sent with the payment over the Gapcoin network.</source> <translation type="unfinished"/> </message> <message> <source>An optional label to associate with the new receiving address.</source> <translation type="unfinished"/> </message> <message> <source>Use this form to request payments. All fields are &lt;b&gt;optional&lt;/b&gt;.</source> <translation type="unfinished"/> </message> <message> <source>An optional amount to request. Leave this empty or zero to not request a specific amount.</source> <translation type="unfinished"/> </message> <message> <source>Clear all fields of the form.</source> <translation type="unfinished"/> </message> <message> <source>Clear</source> <translation type="unfinished"/> </message> <message> <source>Requested payments history</source> <translation type="unfinished"/> </message> <message> <source>&amp;Request payment</source> <translation type="unfinished"/> </message> <message> <source>Show the selected request (does the same as double clicking an entry)</source> <translation type="unfinished"/> </message> <message> <source>Show</source> <translation type="unfinished"/> </message> <message> <source>Remove the selected entries from the list</source> <translation type="unfinished"/> </message> <message> <source>Remove</source> <translation type="unfinished"/> </message> <message> <source>Copy label</source> <translation>Kopijuoti žymę</translation> </message> <message> <source>Copy message</source> <translation type="unfinished"/> </message> <message> <source>Copy amount</source> <translation>Kopijuoti sumą</translation> </message> </context> <context> <name>ReceiveRequestDialog</name> <message> <source>QR Code</source> <translation>QR kodas</translation> </message> <message> <source>Copy &amp;URI</source> <translation type="unfinished"/> </message> <message> <source>Copy &amp;Address</source> <translation type="unfinished"/> </message> <message> <source>&amp;Save Image...</source> <translation type="unfinished"/> </message> <message> <source>Request payment to %1</source> <translation type="unfinished"/> </message> <message> <source>Payment information</source> <translation>Mokėjimo informacija</translation> </message> <message> <source>URI</source> <translation type="unfinished"/> </message> <message> <source>Address</source> <translation>Adresas</translation> </message> <message> <source>Amount</source> <translation>Suma</translation> </message> <message> <source>Label</source> <translation>Žymė</translation> </message> <message> <source>Message</source> <translation>Žinutė</translation> </message> <message> <source>Resulting URI too long, try to reduce the text for label / message.</source> <translation type="unfinished"/> </message> <message> <source>Error encoding URI into QR Code.</source> <translation>Klaida, koduojant URI į QR kodą.</translation> </message> </context> <context> <name>RecentRequestsTableModel</name> <message> <source>Date</source> <translation>Data</translation> </message> <message> <source>Label</source> <translation>Žymė</translation> </message> <message> <source>Message</source> <translation>Žinutė</translation> </message> <message> <source>Amount</source> <translation>Suma</translation> </message> <message> <source>(no label)</source> <translation>(nėra žymės)</translation> </message> <message> <source>(no message)</source> <translation type="unfinished"/> </message> <message> <source>(no amount)</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsDialog</name> <message> <source>Send Coins</source> <translation>Siųsti monetas</translation> </message> <message> <source>Coin Control Features</source> <translation type="unfinished"/> </message> <message> <source>Inputs...</source> <translation type="unfinished"/> </message> <message> <source>automatically selected</source> <translation type="unfinished"/> </message> <message> <source>Insufficient funds!</source> <translation type="unfinished"/> </message> <message> <source>Quantity:</source> <translation type="unfinished"/> </message> <message> <source>Bytes:</source> <translation type="unfinished"/> </message> <message> <source>Amount:</source> <translation>Suma:</translation> </message> <message> <source>Priority:</source> <translation type="unfinished"/> </message> <message> <source>Fee:</source> <translation type="unfinished"/> </message> <message> <source>Low Output:</source> <translation type="unfinished"/> </message> <message> <source>After Fee:</source> <translation type="unfinished"/> </message> <message> <source>Change:</source> <translation type="unfinished"/> </message> <message> <source>If this is activated, but the change address is empty or invalid, change will be sent to a newly generated address.</source> <translation type="unfinished"/> </message> <message> <source>Custom change address</source> <translation type="unfinished"/> </message> <message> <source>Send to multiple recipients at once</source> <translation>Siųsti keliems gavėjams vienu metu</translation> </message> <message> <source>Add &amp;Recipient</source> <translation>&amp;A Pridėti gavėją</translation> </message> <message> <source>Clear all fields of the form.</source> <translation type="unfinished"/> </message> <message> <source>Clear &amp;All</source> <translation>Išvalyti &amp;viską</translation> </message> <message> <source>Balance:</source> <translation>Balansas:</translation> </message> <message> <source>Confirm the send action</source> <translation>Patvirtinti siuntimo veiksmą</translation> </message> <message> <source>S&amp;end</source> <translation>&amp;Siųsti</translation> </message> <message> <source>Confirm send coins</source> <translation>Patvirtinti monetų siuntimą</translation> </message> <message> <source>%1 to %2</source> <translation type="unfinished"/> </message> <message> <source>Copy quantity</source> <translation type="unfinished"/> </message> <message> <source>Copy amount</source> <translation>Kopijuoti sumą</translation> </message> <message> <source>Copy fee</source> <translation type="unfinished"/> </message> <message> <source>Copy after fee</source> <translation type="unfinished"/> </message> <message> <source>Copy bytes</source> <translation type="unfinished"/> </message> <message> <source>Copy priority</source> <translation type="unfinished"/> </message> <message> <source>Copy low output</source> <translation type="unfinished"/> </message> <message> <source>Copy change</source> <translation type="unfinished"/> </message> <message> <source>Total Amount %1 (= %2)</source> <translation type="unfinished"/> </message> <message> <source>or</source> <translation type="unfinished"/> </message> <message> <source>The recipient address is not valid, please recheck.</source> <translation>Negaliojantis gavėjo adresas. Patikrinkite.</translation> </message> <message> <source>The amount to pay must be larger than 0.</source> <translation>Apmokėjimo suma turi būti didesnė nei 0.</translation> </message> <message> <source>The amount exceeds your balance.</source> <translation>Suma viršija jūsų balansą.</translation> </message> <message> <source>The total exceeds your balance when the %1 transaction fee is included.</source> <translation>Jei pridedame sandorio mokestį %1 bendra suma viršija jūsų balansą.</translation> </message> <message> <source>Duplicate address found, can only send to each address once per send operation.</source> <translation>Rastas adreso dublikatas.</translation> </message> <message> <source>Transaction creation failed!</source> <translation type="unfinished"/> </message> <message> <source>The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <source>Warning: Invalid Gapcoin address</source> <translation type="unfinished"/> </message> <message> <source>(no label)</source> <translation>(nėra žymės)</translation> </message> <message> <source>Warning: Unknown change address</source> <translation type="unfinished"/> </message> <message> <source>Are you sure you want to send?</source> <translation type="unfinished"/> </message> <message> <source>added as transaction fee</source> <translation type="unfinished"/> </message> <message> <source>Payment request expired</source> <translation type="unfinished"/> </message> <message> <source>Invalid payment address %1</source> <translation type="unfinished"/> </message> </context> <context> <name>SendCoinsEntry</name> <message> <source>A&amp;mount:</source> <translation>Su&amp;ma:</translation> </message> <message> <source>Pay &amp;To:</source> <translation>Mokėti &amp;gavėjui:</translation> </message> <message> <source>The address to send the payment to (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source> <translation type="unfinished"/> </message> <message> <source>Enter a label for this address to add it to your address book</source> <translation>Įveskite žymę šiam adresui kad galėtumėte įtraukti ją į adresų knygelę</translation> </message> <message> <source>&amp;Label:</source> <translation>Ž&amp;ymė:</translation> </message> <message> <source>Choose previously used address</source> <translation type="unfinished"/> </message> <message> <source>This is a normal payment.</source> <translation type="unfinished"/> </message> <message> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <source>Paste address from clipboard</source> <translation>Įvesti adresą iš mainų atminties</translation> </message> <message> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <source>Remove this entry</source> <translation type="unfinished"/> </message> <message> <source>Message:</source> <translation>Žinutė:</translation> </message> <message> <source>This is a verified payment request.</source> <translation type="unfinished"/> </message> <message> <source>Enter a label for this address to add it to the list of used addresses</source> <translation type="unfinished"/> </message> <message> <source>A message that was attached to the gapcoin: URI which will be stored with the transaction for your reference. Note: This message will not be sent over the Gapcoin network.</source> <translation type="unfinished"/> </message> <message> <source>This is an unverified payment request.</source> <translation type="unfinished"/> </message> <message> <source>Pay To:</source> <translation type="unfinished"/> </message> <message> <source>Memo:</source> <translation type="unfinished"/> </message> </context> <context> <name>ShutdownWindow</name> <message> <source>Gapcoin Core is shutting down...</source> <translation type="unfinished"/> </message> <message> <source>Do not shut down the computer until this window disappears.</source> <translation type="unfinished"/> </message> </context> <context> <name>SignVerifyMessageDialog</name> <message> <source>Signatures - Sign / Verify a Message</source> <translation type="unfinished"/> </message> <message> <source>&amp;Sign Message</source> <translation>&amp;Pasirašyti žinutę</translation> </message> <message> <source>You can sign messages with your addresses to prove you own them. Be careful not to sign anything vague, as phishing attacks may try to trick you into signing your identity over to them. Only sign fully-detailed statements you agree to.</source> <translation type="unfinished"/> </message> <message> <source>The address to sign the message with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source> <translation>Įveskite bitkoinų adresą (pvz. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation> </message> <message> <source>Choose previously used address</source> <translation type="unfinished"/> </message> <message> <source>Alt+A</source> <translation>Alt+A</translation> </message> <message> <source>Paste address from clipboard</source> <translation>Įvesti adresą iš mainų atminties</translation> </message> <message> <source>Alt+P</source> <translation>Alt+P</translation> </message> <message> <source>Enter the message you want to sign here</source> <translation>Įveskite pranešimą, kurį norite pasirašyti čia</translation> </message> <message> <source>Signature</source> <translation type="unfinished"/> </message> <message> <source>Copy the current signature to the system clipboard</source> <translation type="unfinished"/> </message> <message> <source>Sign the message to prove you own this Gapcoin address</source> <translation>Registruotis žinute įrodymuii, kad turite šį adresą</translation> </message> <message> <source>Sign &amp;Message</source> <translation type="unfinished"/> </message> <message> <source>Reset all sign message fields</source> <translation type="unfinished"/> </message> <message> <source>Clear &amp;All</source> <translation>Išvalyti &amp;viską</translation> </message> <message> <source>&amp;Verify Message</source> <translation>&amp;Patikrinti žinutę</translation> </message> <message> <source>Enter the signing address, message (ensure you copy line breaks, spaces, tabs, etc. exactly) and signature below to verify the message. Be careful not to read more into the signature than what is in the signed message itself, to avoid being tricked by a man-in-the-middle attack.</source> <translation type="unfinished"/> </message> <message> <source>The address the message was signed with (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source> <translation>Įveskite bitkoinų adresą (pvz. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation> </message> <message> <source>Verify the message to ensure it was signed with the specified Gapcoin address</source> <translation>Patikrinkite žinutę, jog įsitikintumėte, kad ją pasirašė nurodytas Gapcoin adresas</translation> </message> <message> <source>Verify &amp;Message</source> <translation type="unfinished"/> </message> <message> <source>Reset all verify message fields</source> <translation type="unfinished"/> </message> <message> <source>Enter a Gapcoin address (e.g. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</source> <translation>Įveskite bitkoinų adresą (pvz. 1NS17iag9jJgTHD1VXjvLCEnZuQ3rJDE9L)</translation> </message> <message> <source>Click &quot;Sign Message&quot; to generate signature</source> <translation>Spragtelėkite &quot;Registruotis žinutę&quot; tam, kad gauti parašą</translation> </message> <message> <source>The entered address is invalid.</source> <translation>Įvestas adresas negalioja.</translation> </message> <message> <source>Please check the address and try again.</source> <translation>Prašom patikrinti adresą ir bandyti iš naujo.</translation> </message> <message> <source>The entered address does not refer to a key.</source> <translation type="unfinished"/> </message> <message> <source>Wallet unlock was cancelled.</source> <translation>Piniginės atrakinimas atšauktas.</translation> </message> <message> <source>Private key for the entered address is not available.</source> <translation type="unfinished"/> </message> <message> <source>Message signing failed.</source> <translation>Žinutės pasirašymas nepavyko.</translation> </message> <message> <source>Message signed.</source> <translation>Žinutė pasirašyta.</translation> </message> <message> <source>The signature could not be decoded.</source> <translation>Nepavyko iškoduoti parašo.</translation> </message> <message> <source>Please check the signature and try again.</source> <translation>Prašom patikrinti parašą ir bandyti iš naujo.</translation> </message> <message> <source>The signature did not match the message digest.</source> <translation>Parašas neatitinka žinutės.</translation> </message> <message> <source>Message verification failed.</source> <translation>Žinutės tikrinimas nepavyko.</translation> </message> <message> <source>Message verified.</source> <translation>Žinutė patikrinta.</translation> </message> </context> <context> <name>SplashScreen</name> <message> <source>Gapcoin Core</source> <translation>Gapcoin branduolys</translation> </message> <message> <source>The Gapcoin Core developers</source> <translation type="unfinished"/> </message> <message> <source>[testnet]</source> <translation>[testavimotinklas]</translation> </message> </context> <context> <name>TrafficGraphWidget</name> <message> <source>KB/s</source> <translation>KB/s</translation> </message> </context> <context> <name>TransactionDesc</name> <message> <source>Open until %1</source> <translation>Atidaryta iki %1</translation> </message> <message> <source>conflicted</source> <translation type="unfinished"/> </message> <message> <source>%1/offline</source> <translation>%1/neprisijungęs</translation> </message> <message> <source>%1/unconfirmed</source> <translation>%1/nepatvirtintas</translation> </message> <message> <source>%1 confirmations</source> <translation>%1 patvirtinimų</translation> </message> <message> <source>Status</source> <translation>Būsena</translation> </message> <message numerus="yes"> <source>, broadcast through %n node(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <source>Date</source> <translation>Data</translation> </message> <message> <source>Source</source> <translation>Šaltinis</translation> </message> <message> <source>Generated</source> <translation>Sugeneruotas</translation> </message> <message> <source>From</source> <translation>Nuo</translation> </message> <message> <source>To</source> <translation>Kam</translation> </message> <message> <source>own address</source> <translation>savo adresas</translation> </message> <message> <source>label</source> <translation>žymė</translation> </message> <message> <source>Credit</source> <translation>Kreditas</translation> </message> <message numerus="yes"> <source>matures in %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <source>not accepted</source> <translation>nepriimta</translation> </message> <message> <source>Debit</source> <translation>Debitas</translation> </message> <message> <source>Transaction fee</source> <translation>Sandorio mokestis</translation> </message> <message> <source>Net amount</source> <translation>Neto suma</translation> </message> <message> <source>Message</source> <translation>Žinutė</translation> </message> <message> <source>Comment</source> <translation>Komentaras</translation> </message> <message> <source>Transaction ID</source> <translation>Sandorio ID</translation> </message> <message> <source>Merchant</source> <translation type="unfinished"/> </message> <message> <source>Generated coins must mature %1 blocks before they can be spent. When you generated this block, it was broadcast to the network to be added to the block chain. If it fails to get into the chain, its state will change to &quot;not accepted&quot; and it won&apos;t be spendable. This may occasionally happen if another node generates a block within a few seconds of yours.</source> <translation type="unfinished"/> </message> <message> <source>Debug information</source> <translation>Derinimo informacija</translation> </message> <message> <source>Transaction</source> <translation>Sandoris</translation> </message> <message> <source>Inputs</source> <translation type="unfinished"/> </message> <message> <source>Amount</source> <translation>Suma</translation> </message> <message> <source>true</source> <translation>tiesa</translation> </message> <message> <source>false</source> <translation>netiesa</translation> </message> <message> <source>, has not been successfully broadcast yet</source> <translation>, transliavimas dar nebuvo sėkmingas</translation> </message> <message numerus="yes"> <source>Open for %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <source>unknown</source> <translation>nežinomas</translation> </message> </context> <context> <name>TransactionDescDialog</name> <message> <source>Transaction details</source> <translation>Sandorio detelės</translation> </message> <message> <source>This pane shows a detailed description of the transaction</source> <translation>Šis langas sandorio detalų aprašymą</translation> </message> </context> <context> <name>TransactionTableModel</name> <message> <source>Date</source> <translation>Data</translation> </message> <message> <source>Type</source> <translation>Tipas</translation> </message> <message> <source>Address</source> <translation>Adresas</translation> </message> <message> <source>Amount</source> <translation>Suma</translation> </message> <message> <source>Immature (%1 confirmations, will be available after %2)</source> <translation type="unfinished"/> </message> <message numerus="yes"> <source>Open for %n more block(s)</source> <translation type="unfinished"><numerusform></numerusform><numerusform></numerusform><numerusform></numerusform></translation> </message> <message> <source>Open until %1</source> <translation>Atidaryta iki %1</translation> </message> <message> <source>Confirmed (%1 confirmations)</source> <translation>Patvirtinta (%1 patvirtinimai)</translation> </message> <message> <source>This block was not received by any other nodes and will probably not be accepted!</source> <translation>Šis blokas negautas nė vienu iš mazgų ir matomai nepriimtas</translation> </message> <message> <source>Generated but not accepted</source> <translation>Išgauta bet nepriimta</translation> </message> <message> <source>Offline</source> <translation type="unfinished"/> </message> <message> <source>Unconfirmed</source> <translation type="unfinished"/> </message> <message> <source>Confirming (%1 of %2 recommended confirmations)</source> <translation type="unfinished"/> </message> <message> <source>Conflicted</source> <translation type="unfinished"/> </message> <message> <source>Received with</source> <translation>Gauta su</translation> </message> <message> <source>Received from</source> <translation>Gauta iš</translation> </message> <message> <source>Sent to</source> <translation>Siųsta </translation> </message> <message> <source>Payment to yourself</source> <translation>Mokėjimas sau</translation> </message> <message> <source>Mined</source> <translation>Išgauta</translation> </message> <message> <source>(n/a)</source> <translation>nepasiekiama</translation> </message> <message> <source>Transaction status. Hover over this field to show number of confirmations.</source> <translation>Sandorio būklė. Užvedus pelės žymeklį ant šios srities matysite patvirtinimų skaičių.</translation> </message> <message> <source>Date and time that the transaction was received.</source> <translation>Sandorio gavimo data ir laikas</translation> </message> <message> <source>Type of transaction.</source> <translation>Sandorio tipas.</translation> </message> <message> <source>Destination address of transaction.</source> <translation>Sandorio paskirties adresas</translation> </message> <message> <source>Amount removed from or added to balance.</source> <translation>Suma pridėta ar išskaičiuota iš balanso</translation> </message> </context> <context> <name>TransactionView</name> <message> <source>All</source> <translation>Visi</translation> </message> <message> <source>Today</source> <translation>Šiandien</translation> </message> <message> <source>This week</source> <translation>Šią savaitę</translation> </message> <message> <source>This month</source> <translation>Šį mėnesį</translation> </message> <message> <source>Last month</source> <translation>Paskutinį mėnesį</translation> </message> <message> <source>This year</source> <translation>Šiais metais</translation> </message> <message> <source>Range...</source> <translation>Intervalas...</translation> </message> <message> <source>Received with</source> <translation>Gauta su</translation> </message> <message> <source>Sent to</source> <translation>Išsiųsta</translation> </message> <message> <source>To yourself</source> <translation>Skirta sau</translation> </message> <message> <source>Mined</source> <translation>Išgauta</translation> </message> <message><|fim▁hole|> <message> <source>Enter address or label to search</source> <translation>Įveskite adresą ar žymę į paiešką</translation> </message> <message> <source>Min amount</source> <translation>Minimali suma</translation> </message> <message> <source>Copy address</source> <translation>Kopijuoti adresą</translation> </message> <message> <source>Copy label</source> <translation>Kopijuoti žymę</translation> </message> <message> <source>Copy amount</source> <translation>Kopijuoti sumą</translation> </message> <message> <source>Copy transaction ID</source> <translation type="unfinished"/> </message> <message> <source>Edit label</source> <translation>Taisyti žymę</translation> </message> <message> <source>Show transaction details</source> <translation>Rodyti sandėrio detales</translation> </message> <message> <source>Export Transaction History</source> <translation type="unfinished"/> </message> <message> <source>Exporting Failed</source> <translation type="unfinished"/> </message> <message> <source>There was an error trying to save the transaction history to %1.</source> <translation type="unfinished"/> </message> <message> <source>Exporting Successful</source> <translation type="unfinished"/> </message> <message> <source>The transaction history was successfully saved to %1.</source> <translation type="unfinished"/> </message> <message> <source>Comma separated file (*.csv)</source> <translation>Kableliais atskirtų duomenų failas (*.csv)</translation> </message> <message> <source>Confirmed</source> <translation>Patvirtintas</translation> </message> <message> <source>Date</source> <translation>Data</translation> </message> <message> <source>Type</source> <translation>Tipas</translation> </message> <message> <source>Label</source> <translation>Žymė</translation> </message> <message> <source>Address</source> <translation>Adresas</translation> </message> <message> <source>Amount</source> <translation>Suma</translation> </message> <message> <source>ID</source> <translation>ID</translation> </message> <message> <source>Range:</source> <translation>Grupė:</translation> </message> <message> <source>to</source> <translation>skirta</translation> </message> </context> <context> <name>WalletFrame</name> <message> <source>No wallet has been loaded.</source> <translation type="unfinished"/> </message> </context> <context> <name>WalletModel</name> <message> <source>Send Coins</source> <translation>Siųsti monetas</translation> </message> </context> <context> <name>WalletView</name> <message> <source>&amp;Export</source> <translation>&amp;Eksportuoti</translation> </message> <message> <source>Export the data in the current tab to a file</source> <translation type="unfinished"/> </message> <message> <source>Backup Wallet</source> <translation type="unfinished"/> </message> <message> <source>Wallet Data (*.dat)</source> <translation type="unfinished"/> </message> <message> <source>Backup Failed</source> <translation type="unfinished"/> </message> <message> <source>There was an error trying to save the wallet data to %1.</source> <translation type="unfinished"/> </message> <message> <source>The wallet data was successfully saved to %1.</source> <translation type="unfinished"/> </message> <message> <source>Backup Successful</source> <translation>Atsarginė kopija sėkmingai padaryta</translation> </message> </context> <context> <name>gapcoin-core</name> <message> <source>Usage:</source> <translation>Naudojimas:</translation> </message> <message> <source>List commands</source> <translation>Komandų sąrašas</translation> </message> <message> <source>Get help for a command</source> <translation>Suteikti pagalba komandai</translation> </message> <message> <source>Options:</source> <translation>Parinktys:</translation> </message> <message> <source>Specify configuration file (default: gapcoin.conf)</source> <translation>Nurodyti konfigūracijos failą (pagal nutylėjimąt: gapcoin.conf)</translation> </message> <message> <source>Specify pid file (default: gapcoind.pid)</source> <translation>Nurodyti pid failą (pagal nutylėjimą: gapcoind.pid)</translation> </message> <message> <source>Specify data directory</source> <translation>Nustatyti duomenų aplanką</translation> </message> <message> <source>Listen for connections on &lt;port&gt; (default: 31469 or testnet: 19661)</source> <translation>Sujungimo klausymas prijungčiai &lt;port&gt; (pagal nutylėjimą: 31469 arba testnet: 19661)</translation> </message> <message> <source>Maintain at most &lt;n&gt; connections to peers (default: 125)</source> <translation>Palaikyti ne daugiau &lt;n&gt; jungčių kolegoms (pagal nutylėjimą: 125)</translation> </message> <message> <source>Connect to a node to retrieve peer addresses, and disconnect</source> <translation type="unfinished"/> </message> <message> <source>Specify your own public address</source> <translation>Nurodykite savo nuosavą viešą adresą</translation> </message> <message> <source>Threshold for disconnecting misbehaving peers (default: 100)</source> <translation>Atjungimo dėl netinkamo kolegų elgesio riba (pagal nutylėjimą: 100)</translation> </message> <message> <source>Number of seconds to keep misbehaving peers from reconnecting (default: 86400)</source> <translation>Sekundžių kiekis eikiamas palaikyti ryšį dėl lygiarangių nestabilumo (pagal nutylėjimą: 86.400)</translation> </message> <message> <source>An error occurred while setting up the RPC port %u for listening on IPv4: %s</source> <translation type="unfinished"/> </message> <message> <source>Listen for JSON-RPC connections on &lt;port&gt; (default: 31397 or testnet: 19609)</source> <translation>Klausymas JSON-RPC sujungimui prijungčiai &lt;port&gt; (pagal nutylėjimą: 31397 or testnet: 19609)</translation> </message> <message> <source>Accept command line and JSON-RPC commands</source> <translation>Priimti komandinę eilutę ir JSON-RPC komandas</translation> </message> <message> <source>Gapcoin Core RPC client version</source> <translation type="unfinished"/> </message> <message> <source>Run in the background as a daemon and accept commands</source> <translation>Dirbti fone kaip šešėlyje ir priimti komandas</translation> </message> <message> <source>Use the test network</source> <translation>Naudoti testavimo tinklą</translation> </message> <message> <source>Accept connections from outside (default: 1 if no -proxy or -connect)</source> <translation type="unfinished"/> </message> <message> <source>%s, you must set a rpcpassword in the configuration file: %s It is recommended you use the following random password: rpcuser=gapcoinrpc rpcpassword=%s (you do not need to remember this password) The username and password MUST NOT be the same. If the file does not exist, create it with owner-readable-only file permissions. It is also recommended to set alertnotify so you are notified of problems; for example: alertnotify=echo %%s | mail -s &quot;Gapcoin Alert&quot; [email protected] </source> <translation type="unfinished"/> </message> <message> <source>Acceptable ciphers (default: TLSv1.2+HIGH:TLSv1+HIGH:!SSLv2:!aNULL:!eNULL:!3DES:@STRENGTH)</source> <translation type="unfinished"/> </message> <message> <source>An error occurred while setting up the RPC port %u for listening on IPv6, falling back to IPv4: %s</source> <translation type="unfinished"/> </message> <message> <source>Bind to given address and always listen on it. Use [host]:port notation for IPv6</source> <translation type="unfinished"/> </message> <message> <source>Continuously rate-limit free transactions to &lt;n&gt;*1000 bytes per minute (default:15)</source> <translation type="unfinished"/> </message> <message> <source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly. This is intended for regression testing tools and app development.</source> <translation type="unfinished"/> </message> <message> <source>Enter regression test mode, which uses a special chain in which blocks can be solved instantly.</source> <translation type="unfinished"/> </message> <message> <source>Error: Listening for incoming connections failed (listen returned error %d)</source> <translation type="unfinished"/> </message> <message> <source>Error: The transaction was rejected! This might happen if some of the coins in your wallet were already spent, such as if you used a copy of wallet.dat and coins were spent in the copy but not marked as spent here.</source> <translation type="unfinished"/> </message> <message> <source>Error: This transaction requires a transaction fee of at least %s because of its amount, complexity, or use of recently received funds!</source> <translation type="unfinished"/> </message> <message> <source>Execute command when a wallet transaction changes (%s in cmd is replaced by TxID)</source> <translation type="unfinished"/> </message> <message> <source>Fees smaller than this are considered zero fee (for transaction creation) (default:</source> <translation type="unfinished"/> </message> <message> <source>Flush database activity from memory pool to disk log every &lt;n&gt; megabytes (default: 100)</source> <translation type="unfinished"/> </message> <message> <source>How thorough the block verification of -checkblocks is (0-4, default: 3)</source> <translation type="unfinished"/> </message> <message> <source>In this mode -genproclimit controls how many blocks are generated immediately.</source> <translation type="unfinished"/> </message> <message> <source>Set the number of script verification threads (%u to %d, 0 = auto, &lt;0 = leave that many cores free, default: %d)</source> <translation type="unfinished"/> </message> <message> <source>Set the processor limit for when generation is on (-1 = unlimited, default: -1)</source> <translation type="unfinished"/> </message> <message> <source>This is a pre-release test build - use at your own risk - do not use for mining or merchant applications</source> <translation type="unfinished"/> </message> <message> <source>Unable to bind to %s on this computer. Gapcoin Core is probably already running.</source> <translation type="unfinished"/> </message> <message> <source>Use separate SOCKS5 proxy to reach peers via Tor hidden services (default: -proxy)</source> <translation type="unfinished"/> </message> <message> <source>Warning: -paytxfee is set very high! This is the transaction fee you will pay if you send a transaction.</source> <translation>Įspėjimas: -paytxfee yra nustatytas per didelis. Tai sandorio mokestis, kurį turėsite mokėti, jei siųsite sandorį.</translation> </message> <message> <source>Warning: Please check that your computer&apos;s date and time are correct! If your clock is wrong Gapcoin will not work properly.</source> <translation>Įspėjimas: Patikrinkite, kad kompiuterio data ir laikas yra teisingi.Jei Jūsų laikrodis neteisingai nustatytas Gapcoin, veiks netinkamai.</translation> </message> <message> <source>Warning: The network does not appear to fully agree! Some miners appear to be experiencing issues.</source> <translation type="unfinished"/> </message> <message> <source>Warning: We do not appear to fully agree with our peers! You may need to upgrade, or other nodes may need to upgrade.</source> <translation type="unfinished"/> </message> <message> <source>Warning: error reading wallet.dat! All keys read correctly, but transaction data or address book entries might be missing or incorrect.</source> <translation type="unfinished"/> </message> <message> <source>Warning: wallet.dat corrupt, data salvaged! Original wallet.dat saved as wallet.{timestamp}.bak in %s; if your balance or transactions are incorrect you should restore from a backup.</source> <translation type="unfinished"/> </message> <message> <source>(default: 1)</source> <translation type="unfinished"/> </message> <message> <source>(default: wallet.dat)</source> <translation type="unfinished"/> </message> <message> <source>&lt;category&gt; can be:</source> <translation type="unfinished"/> </message> <message> <source>Attempt to recover private keys from a corrupt wallet.dat</source> <translation type="unfinished"/> </message> <message> <source>Gapcoin Core Daemon</source> <translation type="unfinished"/> </message> <message> <source>Block creation options:</source> <translation type="unfinished"/> </message> <message> <source>Clear list of wallet transactions (diagnostic tool; implies -rescan)</source> <translation type="unfinished"/> </message> <message> <source>Connect only to the specified node(s)</source> <translation>Prisijungti tik prie nurodyto mazgo</translation> </message> <message> <source>Connect through SOCKS proxy</source> <translation type="unfinished"/> </message> <message> <source>Connect to JSON-RPC on &lt;port&gt; (default: 31397 or testnet: 19609)</source> <translation type="unfinished"/> </message> <message> <source>Connection options:</source> <translation type="unfinished"/> </message> <message> <source>Corrupted block database detected</source> <translation type="unfinished"/> </message> <message> <source>Debugging/Testing options:</source> <translation type="unfinished"/> </message> <message> <source>Disable safemode, override a real safe mode event (default: 0)</source> <translation type="unfinished"/> </message> <message> <source>Discover own IP address (default: 1 when listening and no -externalip)</source> <translation type="unfinished"/> </message> <message> <source>Do not load the wallet and disable wallet RPC calls</source> <translation type="unfinished"/> </message> <message> <source>Do you want to rebuild the block database now?</source> <translation type="unfinished"/> </message> <message> <source>Error initializing block database</source> <translation type="unfinished"/> </message> <message> <source>Error initializing wallet database environment %s!</source> <translation type="unfinished"/> </message> <message> <source>Error loading block database</source> <translation type="unfinished"/> </message> <message> <source>Error opening block database</source> <translation>Klaida atveriant blokų duombazę</translation> </message> <message> <source>Error: Disk space is low!</source> <translation type="unfinished"/> </message> <message> <source>Error: Wallet locked, unable to create transaction!</source> <translation type="unfinished"/> </message> <message> <source>Error: system error: </source> <translation>Klaida: sistemos klaida:</translation> </message> <message> <source>Failed to listen on any port. Use -listen=0 if you want this.</source> <translation type="unfinished"/> </message> <message> <source>Failed to read block info</source> <translation>Nepavyko nuskaityti bloko informacijos</translation> </message> <message> <source>Failed to read block</source> <translation>Nepavyko nuskaityti bloko</translation> </message> <message> <source>Failed to sync block index</source> <translation type="unfinished"/> </message> <message> <source>Failed to write block index</source> <translation type="unfinished"/> </message> <message> <source>Failed to write block info</source> <translation type="unfinished"/> </message> <message> <source>Failed to write block</source> <translation>Nepavyko įrašyti bloko</translation> </message> <message> <source>Failed to write file info</source> <translation>Nepavyko įrašyti failo informacijos</translation> </message> <message> <source>Failed to write to coin database</source> <translation type="unfinished"/> </message> <message> <source>Failed to write transaction index</source> <translation type="unfinished"/> </message> <message> <source>Failed to write undo data</source> <translation type="unfinished"/> </message> <message> <source>Fee per kB to add to transactions you send</source> <translation> Įtraukti mokestį už kB siunčiamiems sandoriams</translation> </message> <message> <source>Fees smaller than this are considered zero fee (for relaying) (default:</source> <translation type="unfinished"/> </message> <message> <source>Find peers using DNS lookup (default: 1 unless -connect)</source> <translation type="unfinished"/> </message> <message> <source>Force safe mode (default: 0)</source> <translation type="unfinished"/> </message> <message> <source>Generate coins (default: 0)</source> <translation>Generuoti monetas (numatyta: 0)</translation> </message> <message> <source>How many blocks to check at startup (default: 288, 0 = all)</source> <translation type="unfinished"/> </message> <message> <source>If &lt;category&gt; is not supplied, output all debugging information.</source> <translation type="unfinished"/> </message> <message> <source>Importing...</source> <translation type="unfinished"/> </message> <message> <source>Incorrect or no genesis block found. Wrong datadir for network?</source> <translation type="unfinished"/> </message> <message> <source>Invalid -onion address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <source>Not enough file descriptors available.</source> <translation type="unfinished"/> </message> <message> <source>Prepend debug output with timestamp (default: 1)</source> <translation type="unfinished"/> </message> <message> <source>RPC client options:</source> <translation type="unfinished"/> </message> <message> <source>Rebuild block chain index from current blk000??.dat files</source> <translation type="unfinished"/> </message> <message> <source>Select SOCKS version for -proxy (4 or 5, default: 5)</source> <translation type="unfinished"/> </message> <message> <source>Set database cache size in megabytes (%d to %d, default: %d)</source> <translation type="unfinished"/> </message> <message> <source>Set maximum block size in bytes (default: %d)</source> <translation type="unfinished"/> </message> <message> <source>Set the number of threads to service RPC calls (default: 4)</source> <translation type="unfinished"/> </message> <message> <source>Specify wallet file (within data directory)</source> <translation type="unfinished"/> </message> <message> <source>Spend unconfirmed change when sending transactions (default: 1)</source> <translation type="unfinished"/> </message> <message> <source>This is intended for regression testing tools and app development.</source> <translation type="unfinished"/> </message> <message> <source>Usage (deprecated, use gapcoin-cli):</source> <translation type="unfinished"/> </message> <message> <source>Verifying blocks...</source> <translation>Tikrinami blokai...</translation> </message> <message> <source>Verifying wallet...</source> <translation>Tikrinama piniginė...</translation> </message> <message> <source>Wait for RPC server to start</source> <translation type="unfinished"/> </message> <message> <source>Wallet %s resides outside data directory %s</source> <translation type="unfinished"/> </message> <message> <source>Wallet options:</source> <translation type="unfinished"/> </message> <message> <source>Warning: Deprecated argument -debugnet ignored, use -debug=net</source> <translation type="unfinished"/> </message> <message> <source>You need to rebuild the database using -reindex to change -txindex</source> <translation type="unfinished"/> </message> <message> <source>Imports blocks from external blk000??.dat file</source> <translation type="unfinished"/> </message> <message> <source>Cannot obtain a lock on data directory %s. Gapcoin Core is probably already running.</source> <translation type="unfinished"/> </message> <message> <source>Execute command when a relevant alert is received or we see a really long fork (%s in cmd is replaced by message)</source> <translation type="unfinished"/> </message> <message> <source>Output debugging information (default: 0, supplying &lt;category&gt; is optional)</source> <translation type="unfinished"/> </message> <message> <source>Set maximum size of high-priority/low-fee transactions in bytes (default: %d)</source> <translation type="unfinished"/> </message> <message> <source>Information</source> <translation>Informacija</translation> </message> <message> <source>Invalid amount for -minrelaytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <source>Invalid amount for -mintxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <source>Limit size of signature cache to &lt;n&gt; entries (default: 50000)</source> <translation type="unfinished"/> </message> <message> <source>Log transaction priority and fee per kB when mining blocks (default: 0)</source> <translation type="unfinished"/> </message> <message> <source>Maintain a full transaction index (default: 0)</source> <translation type="unfinished"/> </message> <message> <source>Maximum per-connection receive buffer, &lt;n&gt;*1000 bytes (default: 5000)</source> <translation>Maksimalus buferis priėmimo sujungimui &lt;n&gt;*1000 bitų (pagal nutylėjimą: 5000)</translation> </message> <message> <source>Maximum per-connection send buffer, &lt;n&gt;*1000 bytes (default: 1000)</source> <translation>Maksimalus buferis siuntimo sujungimui &lt;n&gt;*1000 bitų (pagal nutylėjimą: 1000)</translation> </message> <message> <source>Only accept block chain matching built-in checkpoints (default: 1)</source> <translation type="unfinished"/> </message> <message> <source>Only connect to nodes in network &lt;net&gt; (IPv4, IPv6 or Tor)</source> <translation type="unfinished"/> </message> <message> <source>Print block on startup, if found in block index</source> <translation type="unfinished"/> </message> <message> <source>Print block tree on startup (default: 0)</source> <translation type="unfinished"/> </message> <message> <source>RPC SSL options: (see the Gapcoin Wiki for SSL setup instructions)</source> <translation type="unfinished"/> </message> <message> <source>RPC server options:</source> <translation type="unfinished"/> </message> <message> <source>Randomly drop 1 of every &lt;n&gt; network messages</source> <translation type="unfinished"/> </message> <message> <source>Randomly fuzz 1 of every &lt;n&gt; network messages</source> <translation type="unfinished"/> </message> <message> <source>Run a thread to flush wallet periodically (default: 1)</source> <translation type="unfinished"/> </message> <message> <source>SSL options: (see the Gapcoin Wiki for SSL setup instructions)</source> <translation>SSL opcijos (žr.e Gapcoin Wiki for SSL setup instructions)</translation> </message> <message> <source>Send command to Gapcoin Core</source> <translation type="unfinished"/> </message> <message> <source>Send trace/debug info to console instead of debug.log file</source> <translation>Siųsti atsekimo/derinimo info į konsolę vietoj debug.log failo</translation> </message> <message> <source>Set minimum block size in bytes (default: 0)</source> <translation type="unfinished"/> </message> <message> <source>Sets the DB_PRIVATE flag in the wallet db environment (default: 1)</source> <translation type="unfinished"/> </message> <message> <source>Show all debugging options (usage: --help -help-debug)</source> <translation type="unfinished"/> </message> <message> <source>Show benchmark information (default: 0)</source> <translation type="unfinished"/> </message> <message> <source>Shrink debug.log file on client startup (default: 1 when no -debug)</source> <translation type="unfinished"/> </message> <message> <source>Signing transaction failed</source> <translation type="unfinished"/> </message> <message> <source>Specify connection timeout in milliseconds (default: 5000)</source> <translation>Nustatyti sujungimo trukmę milisekundėmis (pagal nutylėjimą: 5000)</translation> </message> <message> <source>Start Gapcoin Core Daemon</source> <translation type="unfinished"/> </message> <message> <source>System error: </source> <translation>Sistemos klaida:</translation> </message> <message> <source>Transaction amount too small</source> <translation type="unfinished"/> </message> <message> <source>Transaction amounts must be positive</source> <translation type="unfinished"/> </message> <message> <source>Transaction too large</source> <translation type="unfinished"/> </message> <message> <source>Use UPnP to map the listening port (default: 0)</source> <translation>Bandymas naudoti UPnP struktūra klausymosi prievadui (default: 0)</translation> </message> <message> <source>Use UPnP to map the listening port (default: 1 when listening)</source> <translation>Bandymas naudoti UPnP struktūra klausymosi prievadui (default: 1 when listening)</translation> </message> <message> <source>Username for JSON-RPC connections</source> <translation>Vartotojo vardas JSON-RPC jungimuisi</translation> </message> <message> <source>Warning</source> <translation type="unfinished"/> </message> <message> <source>Warning: This version is obsolete, upgrade required!</source> <translation type="unfinished"/> </message> <message> <source>Zapping all transactions from wallet...</source> <translation type="unfinished"/> </message> <message> <source>on startup</source> <translation type="unfinished"/> </message> <message> <source>version</source> <translation>versija</translation> </message> <message> <source>wallet.dat corrupt, salvage failed</source> <translation type="unfinished"/> </message> <message> <source>Password for JSON-RPC connections</source> <translation>Slaptažodis JSON-RPC sujungimams</translation> </message> <message> <source>Allow JSON-RPC connections from specified IP address</source> <translation>Leisti JSON-RPC tik iš nurodytų IP adresų</translation> </message> <message> <source>Send commands to node running on &lt;ip&gt; (default: 127.0.0.1)</source> <translation>Siųsti komandą mazgui dirbančiam &lt;ip&gt; (pagal nutylėjimą: 127.0.0.1)</translation> </message> <message> <source>Execute command when the best block changes (%s in cmd is replaced by block hash)</source> <translation type="unfinished"/> </message> <message> <source>Upgrade wallet to latest format</source> <translation>Atnaujinti piniginę į naujausią formatą</translation> </message> <message> <source>Set key pool size to &lt;n&gt; (default: 100)</source> <translation>Nustatyti rakto apimties dydį &lt;n&gt; (pagal nutylėjimą: 100)</translation> </message> <message> <source>Rescan the block chain for missing wallet transactions</source> <translation>Ieškoti prarastų piniginės sandorių blokų grandinėje</translation> </message> <message> <source>Use OpenSSL (https) for JSON-RPC connections</source> <translation>Naudoti OpenSSL (https) jungimuisi JSON-RPC </translation> </message> <message> <source>Server certificate file (default: server.cert)</source> <translation>Serverio sertifikato failas (pagal nutylėjimą: server.cert)</translation> </message> <message> <source>Server private key (default: server.pem)</source> <translation>Serverio privatus raktas (pagal nutylėjimą: server.pem)</translation> </message> <message> <source>This help message</source> <translation>Pagelbos žinutė</translation> </message> <message> <source>Unable to bind to %s on this computer (bind returned error %d, %s)</source> <translation>Nepavyko susieti šiame kompiuteryje prievado %s (bind returned error %d, %s)</translation> </message> <message> <source>Allow DNS lookups for -addnode, -seednode and -connect</source> <translation>Leisti DNS paiešką sujungimui ir mazgo pridėjimui</translation> </message> <message> <source>Loading addresses...</source> <translation>Užkraunami adresai...</translation> </message> <message> <source>Error loading wallet.dat: Wallet corrupted</source> <translation> wallet.dat pakrovimo klaida, wallet.dat sugadintas</translation> </message> <message> <source>Error loading wallet.dat: Wallet requires newer version of Gapcoin</source> <translation> wallet.dat pakrovimo klaida, wallet.dat reikalauja naujasnės Gapcoin versijos</translation> </message> <message> <source>Wallet needed to be rewritten: restart Gapcoin to complete</source> <translation>Piniginė turi būti prrašyta: įvykdymui perkraukite Gapcoin</translation> </message> <message> <source>Error loading wallet.dat</source> <translation> wallet.dat pakrovimo klaida</translation> </message> <message> <source>Invalid -proxy address: &apos;%s&apos;</source> <translation>Neteisingas proxy adresas: &apos;%s&apos;</translation> </message> <message> <source>Unknown network specified in -onlynet: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <source>Unknown -socks proxy version requested: %i</source> <translation type="unfinished"/> </message> <message> <source>Cannot resolve -bind address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <source>Cannot resolve -externalip address: &apos;%s&apos;</source> <translation type="unfinished"/> </message> <message> <source>Invalid amount for -paytxfee=&lt;amount&gt;: &apos;%s&apos;</source> <translation>Neteisinga suma -paytxfee=&lt;amount&gt;: &apos;%s&apos;</translation> </message> <message> <source>Invalid amount</source> <translation>Neteisinga suma</translation> </message> <message> <source>Insufficient funds</source> <translation>Nepakanka lėšų</translation> </message> <message> <source>Loading block index...</source> <translation>Įkeliamas blokų indeksas...</translation> </message> <message> <source>Add a node to connect to and attempt to keep the connection open</source> <translation>Pridėti mazgą prie sujungti su and attempt to keep the connection open</translation> </message> <message> <source>Loading wallet...</source> <translation>Užkraunama piniginė...</translation> </message> <message> <source>Cannot downgrade wallet</source> <translation type="unfinished"/> </message> <message> <source>Cannot write default address</source> <translation>Negalima parašyti įprasto adreso</translation> </message> <message> <source>Rescanning...</source> <translation>Peržiūra</translation> </message> <message> <source>Done loading</source> <translation>Įkėlimas baigtas</translation> </message> <message> <source>To use the %s option</source> <translation type="unfinished"/> </message> <message> <source>Error</source> <translation>Klaida</translation> </message> <message> <source>You must set rpcpassword=&lt;password&gt; in the configuration file: %s If the file does not exist, create it with owner-readable-only file permissions.</source> <translation type="unfinished"/> </message> </context> </TS><|fim▁end|>
<source>Other</source> <translation>Kita</translation> </message>
<|file_name|>indexsubscriber.go<|end_file_name|><|fim▁begin|>// Copyright (c) 2021 The Decred developers // Use of this source code is governed by an ISC // license that can be found in the LICENSE file. package indexers import ( "context" "fmt" "sync" "sync/atomic" "github.com/decred/dcrd/blockchain/v4/internal/progresslog" "github.com/decred/dcrd/database/v3" "github.com/decred/dcrd/dcrutil/v4" ) // IndexNtfnType represents an index notification type. type IndexNtfnType int const ( // ConnectNtfn indicates the index notification signals a block // connected to the main chain. ConnectNtfn IndexNtfnType = iota // DisconnectNtfn indicates the index notification signals a block // disconnected from the main chain. DisconnectNtfn ) var ( // bufferSize represents the index notification buffer size. bufferSize = 128 // noPrereqs indicates no index prerequisites. noPrereqs = "none" ) // IndexNtfn represents an index notification detailing a block connection // or disconnection. type IndexNtfn struct { NtfnType IndexNtfnType Block *dcrutil.Block Parent *dcrutil.Block PrevScripts PrevScripter IsTreasuryEnabled bool Done chan bool } // IndexSubscription represents a subscription for index updates. type IndexSubscription struct { id string idx Indexer subscriber *IndexSubscriber mtx sync.Mutex // prerequisite defines the notification processing hierarchy for this // subscription. It is expected that the subscriber associated with the // prerequisite provided processes notifications before they are // delivered by this subscription to its subscriber. An empty string // indicates the subscription has no prerequisite. prerequisite string // dependent defines the index subscription that requires the subscriber // associated with this subscription to have processed incoming // notifications before it does. A nil dependency indicates the subscription // has no dependencies. dependent *IndexSubscription } // newIndexSubscription initializes a new index subscription. func newIndexSubscription(subber *IndexSubscriber, indexer Indexer, prereq string) *IndexSubscription { return &IndexSubscription{ id: indexer.Name(), idx: indexer, prerequisite: prereq, subscriber: subber, } } // stop prevents any future index updates from being delivered and // unsubscribes the associated subscription. func (s *IndexSubscription) stop() error { <|fim▁hole|> // subscription as a dependency. if s.prerequisite != noPrereqs { s.mtx.Lock() prereq, ok := s.subscriber.subscriptions[s.prerequisite] s.mtx.Unlock() if !ok { return fmt.Errorf("no subscription found with id %s", s.prerequisite) } prereq.mtx.Lock() prereq.dependent = nil prereq.mtx.Unlock() return nil } // If the subscription has a dependent, stop it as well. if s.dependent != nil { err := s.dependent.stop() if err != nil { return err } } // If the subscription is independent, remove it from the // index subscriber's subscriptions. s.mtx.Lock() delete(s.subscriber.subscriptions, s.id) s.mtx.Unlock() return nil } // IndexSubscriber subscribes clients for index updates. type IndexSubscriber struct { subscribers uint32 // update atomically. c chan IndexNtfn subscriptions map[string]*IndexSubscription mtx sync.Mutex ctx context.Context cancel context.CancelFunc quit chan struct{} } // NewIndexSubscriber creates a new index subscriber. It also starts the // handler for incoming index update subscriptions. func NewIndexSubscriber(sCtx context.Context) *IndexSubscriber { ctx, cancel := context.WithCancel(sCtx) s := &IndexSubscriber{ c: make(chan IndexNtfn, bufferSize), subscriptions: make(map[string]*IndexSubscription), ctx: ctx, cancel: cancel, quit: make(chan struct{}), } return s } // Subscribe subscribes an index for updates. The returned index subscription // has functions to retrieve a channel that produces a stream of index updates // and to stop the stream when the caller no longer wishes to receive updates. func (s *IndexSubscriber) Subscribe(index Indexer, prerequisite string) (*IndexSubscription, error) { sub := newIndexSubscription(s, index, prerequisite) // If the subscription has a prequisite, find it and set the subscription // as a dependency. if prerequisite != noPrereqs { s.mtx.Lock() prereq, ok := s.subscriptions[prerequisite] s.mtx.Unlock() if !ok { return nil, fmt.Errorf("no subscription found with id %s", prerequisite) } prereq.mtx.Lock() defer prereq.mtx.Unlock() if prereq.dependent != nil { return nil, fmt.Errorf("%s already has a dependent set: %s", prereq.id, prereq.dependent.id) } prereq.dependent = sub atomic.AddUint32(&s.subscribers, 1) return sub, nil } // If the subscription does not have a prerequisite, add it to the index // subscriber's subscriptions. s.mtx.Lock() s.subscriptions[sub.id] = sub s.mtx.Unlock() atomic.AddUint32(&s.subscribers, 1) return sub, nil } // Notify relays an index notification to subscribed indexes for processing. func (s *IndexSubscriber) Notify(ntfn *IndexNtfn) { subscribers := atomic.LoadUint32(&s.subscribers) // Only relay notifications when there are subscribed indexes // to be notified. if subscribers > 0 { select { case <-s.quit: case s.c <- *ntfn: } } } // findLowestIndexTipHeight determines the lowest index tip height among // subscribed indexes and their dependencies. func (s *IndexSubscriber) findLowestIndexTipHeight(queryer ChainQueryer) (int64, int64, error) { // Find the lowest tip height to catch up among subscribed indexes. bestHeight, _ := queryer.Best() lowestHeight := bestHeight for _, sub := range s.subscriptions { tipHeight, tipHash, err := sub.idx.Tip() if err != nil { return 0, bestHeight, err } // Ensure the index tip is on the main chain. if !queryer.MainChainHasBlock(tipHash) { return 0, bestHeight, fmt.Errorf("%s: index tip (%s) is not on the "+ "main chain", sub.idx.Name(), tipHash) } if tipHeight < lowestHeight { lowestHeight = tipHeight } // Update the lowest tip height if a dependent has a lower tip height. dependent := sub.dependent for dependent != nil { tipHeight, _, err := sub.dependent.idx.Tip() if err != nil { return 0, bestHeight, err } if tipHeight < lowestHeight { lowestHeight = tipHeight } dependent = dependent.dependent } } return lowestHeight, bestHeight, nil } // CatchUp syncs all subscribed indexes to the the main chain by connecting // blocks from after the lowest index tip to the current main chain tip. // // This should be called after all indexes have subscribed for updates. func (s *IndexSubscriber) CatchUp(ctx context.Context, db database.DB, queryer ChainQueryer) error { lowestHeight, bestHeight, err := s.findLowestIndexTipHeight(queryer) if err != nil { return err } // Nothing to do if all indexes are synced. if bestHeight == lowestHeight { return nil } // Create a progress logger for the indexing process below. progressLogger := progresslog.NewBlockProgressLogger("Indexed", log) // tip and need to be caught up, so log the details and loop through // each block that needs to be indexed. log.Infof("Catching up from height %d to %d", lowestHeight, bestHeight) var cachedParent *dcrutil.Block for height := lowestHeight + 1; height <= bestHeight; height++ { if interruptRequested(ctx) { return indexerError(ErrInterruptRequested, interruptMsg) } hash, err := queryer.BlockHashByHeight(height) if err != nil { return err } // Ensure the next tip hash is on the main chain. if !queryer.MainChainHasBlock(hash) { msg := fmt.Sprintf("the next block being synced to (%s) "+ "at height %d is not on the main chain", hash, height) return indexerError(ErrBlockNotOnMainChain, msg) } var parent *dcrutil.Block if cachedParent == nil && height > 0 { parentHash, err := queryer.BlockHashByHeight(height - 1) if err != nil { return err } parent, err = queryer.BlockByHash(parentHash) if err != nil { return err } } else { parent = cachedParent } child, err := queryer.BlockByHash(hash) if err != nil { return err } // Construct and send the index notification. var prevScripts PrevScripter err = db.View(func(dbTx database.Tx) error { if interruptRequested(ctx) { return indexerError(ErrInterruptRequested, interruptMsg) } prevScripts, err = queryer.PrevScripts(dbTx, child) if err != nil { return err } return nil }) if err != nil { return err } isTreasuryEnabled, err := queryer.IsTreasuryAgendaActive(parent.Hash()) if err != nil { return err } ntfn := &IndexNtfn{ NtfnType: ConnectNtfn, Block: child, Parent: parent, PrevScripts: prevScripts, IsTreasuryEnabled: isTreasuryEnabled, } // Relay the index update to subscribed indexes. for _, sub := range s.subscriptions { err := updateIndex(ctx, sub.idx, ntfn) if err != nil { s.cancel() return err } } cachedParent = child progressLogger.LogBlockHeight(child.MsgBlock(), parent.MsgBlock()) } log.Infof("Caught up to height %d", bestHeight) return nil } // Run relays index notifications to subscribed indexes. // // This should be run as a goroutine. func (s *IndexSubscriber) Run(ctx context.Context) { for { select { case ntfn := <-s.c: // Relay the index update to subscribed indexes. for _, sub := range s.subscriptions { err := updateIndex(ctx, sub.idx, &ntfn) if err != nil { log.Error(err) s.cancel() break } } if ntfn.Done != nil { close(ntfn.Done) } case <-ctx.Done(): log.Infof("Index subscriber shutting down") close(s.quit) // Stop all updates to subscribed indexes and terminate their // processes. for _, sub := range s.subscriptions { err := sub.stop() if err != nil { log.Error("unable to stop index subscription: %v", err) } } s.cancel() return } } }<|fim▁end|>
// If the subscription has a prerequisite, find it and remove the
<|file_name|>iterator_ops_test.py<|end_file_name|><|fim▁begin|># Copyright 2018 The TensorFlow Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests for experimental iterator_ops.""" from __future__ import absolute_import from __future__ import division from __future__ import print_function from tensorflow.contrib.data.python.ops import iterator_ops from tensorflow.python.data.ops import dataset_ops from tensorflow.python.estimator import estimator from tensorflow.python.estimator import model_fn from tensorflow.python.framework import constant_op from tensorflow.python.framework import dtypes from tensorflow.python.framework import ops from tensorflow.python.ops import control_flow_ops from tensorflow.python.ops import variables from tensorflow.python.platform import test from tensorflow.python.training import saver as saver_lib from tensorflow.python.training import training_util <|fim▁hole|> class CheckpointInputPipelineHookTest(test.TestCase): @staticmethod def _model_fn(features, labels, mode, config): del labels del mode del config global_step = training_util.get_or_create_global_step() update_global_step_op = global_step.assign_add(1) latest_feature = variables.Variable( 0, name='latest_feature', dtype=dtypes.int64) store_latest_feature_op = latest_feature.assign(features) ops.add_to_collection('my_vars', global_step) ops.add_to_collection('my_vars', latest_feature) return model_fn.EstimatorSpec( mode='train', train_op=control_flow_ops.group( [update_global_step_op, store_latest_feature_op]), loss=constant_op.constant(2.0)) def _read_vars(self, model_dir): """Returns (global_step, latest_feature).""" with ops.Graph().as_default() as g: ckpt_path = saver_lib.latest_checkpoint(model_dir) meta_filename = ckpt_path + '.meta' saver_lib.import_meta_graph(meta_filename) saver = saver_lib.Saver() with self.test_session(graph=g) as sess: saver.restore(sess, ckpt_path) return sess.run(ops.get_collection('my_vars')) def _build_iterator_saver_hook(self, est): return iterator_ops.CheckpointInputPipelineHook(est) def testReturnDatasetFromInputFn(self): def _input_fn(): return dataset_ops.Dataset.range(10) est = estimator.Estimator(model_fn=self._model_fn) est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)]) self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1)) est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)]) self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3)) def testBuildIteratorInInputFn(self): def _input_fn(): ds = dataset_ops.Dataset.range(10) iterator = ds.make_one_shot_iterator() return iterator.get_next() est = estimator.Estimator(model_fn=self._model_fn) est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)]) self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1)) est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)]) self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3)) def testDoNotRestore(self): def _input_fn(): return dataset_ops.Dataset.range(10) est = estimator.Estimator(model_fn=self._model_fn) est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)]) self.assertSequenceEqual(self._read_vars(est.model_dir), (2, 1)) est.train(_input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)]) self.assertSequenceEqual(self._read_vars(est.model_dir), (4, 3)) # Hook not provided, input pipeline was not restored. est.train(_input_fn, steps=2) self.assertSequenceEqual(self._read_vars(est.model_dir), (6, 1)) def testRaiseErrorIfNoIterator(self): def _input_fn(): return constant_op.constant(1, dtype=dtypes.int64) est = estimator.Estimator(model_fn=self._model_fn) with self.assertRaises(ValueError): est.train( _input_fn, steps=2, hooks=[self._build_iterator_saver_hook(est)]) if __name__ == '__main__': test.main()<|fim▁end|>
<|file_name|>content.js<|end_file_name|><|fim▁begin|>jQuery( document ).ready(function() { jQuery( '.pane-bundle-widget-sketch span' ).click(function() { jQuery( '.pane-bundle-widget-sketch .container' ).toggle( 'slide' ); });<|fim▁hole|>});<|fim▁end|>
<|file_name|>VideoSyncDRM.cpp<|end_file_name|><|fim▁begin|>/* * Copyright (C) 2005-2014 Team XBMC * http://xbmc.org * * This Program is free software; you can redistribute it and/or modify * it under the terms of the GNU General Public License as published by * the Free Software Foundation; either version 2, or (at your option) * any later version. * * This Program is distributed in the hope that it will be useful, * but WITHOUT ANY WARRANTY; without even the implied warranty of * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the * GNU General Public License for more details. * * You should have received a copy of the GNU General Public License * along with XBMC; see the file COPYING. If not, see * <http://www.gnu.org/licenses/>. * */ #include "system.h" #if defined(HAVE_X11) #include "video/videosync/VideoSyncDRM.h" #include "xf86drm.h" #include <sys/poll.h> #include <sys/time.h> #include "utils/TimeUtils.h" #include "utils/MathUtils.h" #include "windowing/WindowingFactory.h" #include "guilib/GraphicContext.h" #include "utils/log.h" bool CVideoSyncDRM::Setup(PUPDATECLOCK func) { CLog::Log(LOGDEBUG, "CVideoSyncDRM::%s - setting up DRM", __FUNCTION__); UpdateClock = func; m_fd = open("/dev/dri/card0", O_RDWR, 0); if (m_fd < 0) { CLog::Log(LOGERROR, "CVideoSyncDRM::%s - can't open /dev/dri/card0", __FUNCTION__); return false; } drmVBlank vbl; int ret; vbl.request.type = DRM_VBLANK_RELATIVE; vbl.request.sequence = 0; ret = drmWaitVBlank(m_fd, &vbl); if (ret != 0) { CLog::Log(LOGERROR, "CVideoSyncDRM::%s - drmWaitVBlank returned error", __FUNCTION__); return false; } m_abort = false; g_Windowing.Register(this); return true; } void CVideoSyncDRM::Run(volatile bool& stop) { drmVBlank vbl; VblInfo info; int ret; int crtc = g_Windowing.GetCrtc(); vbl.request.type = DRM_VBLANK_RELATIVE; if (crtc == 1) { vbl.request.type = (drmVBlankSeqType)(vbl.request.type | DRM_VBLANK_SECONDARY); } else if (crtc > 1) { vbl.request.type = (drmVBlankSeqType)(vbl.request.type | ((crtc << DRM_VBLANK_HIGH_CRTC_SHIFT) & DRM_VBLANK_HIGH_CRTC_MASK)); } vbl.request.sequence = 0; ret = drmWaitVBlank(m_fd, &vbl); if (ret != 0) { CLog::Log(LOGERROR, "CVideoSyncDRM::%s - drmWaitVBlank returned error", __FUNCTION__); return; } info.start = CurrentHostCounter(); info.videoSync = this; vbl.request.type = (drmVBlankSeqType)(DRM_VBLANK_RELATIVE | DRM_VBLANK_EVENT); if (crtc == 1) { vbl.request.type = (drmVBlankSeqType)(vbl.request.type | DRM_VBLANK_SECONDARY); } else if (crtc > 1) { vbl.request.type = (drmVBlankSeqType)(vbl.request.type | ((crtc << DRM_VBLANK_HIGH_CRTC_SHIFT) & DRM_VBLANK_HIGH_CRTC_MASK)); } vbl.request.sequence = 1; vbl.request.signal = (unsigned long)&info; ret = drmWaitVBlank(m_fd, &vbl); if (ret != 0) { CLog::Log(LOGERROR, "CVideoSyncDRM::%s - drmWaitVBlank returned error", __FUNCTION__); return; } drmEventContext evctx; memset(&evctx, 0, sizeof evctx); evctx.version = DRM_EVENT_CONTEXT_VERSION; evctx.vblank_handler = EventHandler; evctx.page_flip_handler = NULL; timeval timeout; fd_set fds; FD_ZERO(&fds); FD_SET(m_fd, &fds); while (!stop && !m_abort) { timeout.tv_sec = 1; timeout.tv_usec = 0; ret = select(m_fd + 1, &fds, NULL, NULL, &timeout); if (ret <= 0) { continue; } ret = drmHandleEvent(m_fd, &evctx); if (ret != 0) { CLog::Log(LOGERROR, "CVideoSyncDRM::%s - drmHandleEvent returned error", __FUNCTION__); break; } } } void CVideoSyncDRM::Cleanup() { close(m_fd); g_Windowing.Unregister(this); } void CVideoSyncDRM::EventHandler(int fd, unsigned int frame, unsigned int sec, unsigned int usec, void *data) { drmVBlank vbl; struct timeval end; VblInfo *info = (VblInfo*)data; int crtc = g_Windowing.GetCrtc(); vbl.request.type = (drmVBlankSeqType)(DRM_VBLANK_RELATIVE | DRM_VBLANK_EVENT); if (crtc == 1) { vbl.request.type = (drmVBlankSeqType)(vbl.request.type | DRM_VBLANK_SECONDARY); } else if (crtc > 1) { vbl.request.type = (drmVBlankSeqType)(vbl.request.type | ((crtc << DRM_VBLANK_HIGH_CRTC_SHIFT) & DRM_VBLANK_HIGH_CRTC_MASK)); } vbl.request.sequence = 1; vbl.request.signal = (unsigned long)data; drmWaitVBlank(info->videoSync->m_fd, &vbl); uint64_t now = CurrentHostCounter(); float diff = (float)(now - info->start)/CurrentHostFrequency(); int vblanks = MathUtils::round_int(diff * info->videoSync->m_fps); info->start = now; info->videoSync->UpdateClock(vblanks, now); } void CVideoSyncDRM::OnResetDevice() { m_abort = true; } <|fim▁hole|>float CVideoSyncDRM::GetFps() { m_fps = g_graphicsContext.GetFPS(); return m_fps; } #endif<|fim▁end|>
<|file_name|>MultiReader.java<|end_file_name|><|fim▁begin|>package org.targettest.org.apache.lucene.index; /** * Licensed to the Apache Software Foundation (ASF) under one or more * contributor license agreements. See the NOTICE file distributed with * this work for additional information regarding copyright ownership. * The ASF licenses this file to You under the Apache License, Version 2.0 * (the "License"); you may not use this file except in compliance with * the License. You may obtain a copy of the License at * * http://www.apache.org/licenses/LICENSE-2.0 * * Unless required by applicable law or agreed to in writing, software * distributed under the License is distributed on an "AS IS" BASIS, * WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. * See the License for the specific language governing permissions and * limitations under the License. */ import java.io.IOException; import java.util.Arrays; import java.util.Collection; import java.util.HashMap; import java.util.Map; import org.targettest.org.apache.lucene.document.Document; import org.targettest.org.apache.lucene.document.FieldSelector; import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermDocs; import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermEnum; import org.targettest.org.apache.lucene.index.DirectoryReader.MultiTermPositions; import org.targettest.org.apache.lucene.search.DefaultSimilarity; import org.targettest.org.apache.lucene.search.FieldCache; /** An IndexReader which reads multiple indexes, appending * their content. */ public class MultiReader extends IndexReader implements Cloneable { protected IndexReader[] subReaders; private int[] starts; // 1st docno for each segment private boolean[] decrefOnClose; // remember which subreaders to decRef on close private Map<String,byte[]> normsCache = new HashMap<String,byte[]>(); private int maxDoc = 0; private int numDocs = -1; private boolean hasDeletions = false; /** * <p>Construct a MultiReader aggregating the named set of (sub)readers. * Directory locking for delete, undeleteAll, and setNorm operations is * left to the subreaders. </p> * <p>Note that all subreaders are closed if this Multireader is closed.</p> * @param subReaders set of (sub)readers * @throws IOException */ public MultiReader(IndexReader... subReaders) { initialize(subReaders, true); } /** * <p>Construct a MultiReader aggregating the named set of (sub)readers. * Directory locking for delete, undeleteAll, and setNorm operations is * left to the subreaders. </p> * @param closeSubReaders indicates whether the subreaders should be closed * when this MultiReader is closed * @param subReaders set of (sub)readers * @throws IOException */ public MultiReader(IndexReader[] subReaders, boolean closeSubReaders) { initialize(subReaders, closeSubReaders); } private void initialize(IndexReader[] subReaders, boolean closeSubReaders) { this.subReaders = subReaders.clone(); starts = new int[subReaders.length + 1]; // build starts array decrefOnClose = new boolean[subReaders.length]; for (int i = 0; i < subReaders.length; i++) { starts[i] = maxDoc; maxDoc += subReaders[i].maxDoc(); // compute maxDocs if (!closeSubReaders) { subReaders[i].incRef(); decrefOnClose[i] = true; } else { decrefOnClose[i] = false; } if (subReaders[i].hasDeletions()) hasDeletions = true; } starts[subReaders.length] = maxDoc; } /** * Tries to reopen the subreaders. * <br> * If one or more subreaders could be re-opened (i. e. subReader.reopen() * returned a new instance != subReader), then a new MultiReader instance * is returned, otherwise this instance is returned. * <p> * A re-opened instance might share one or more subreaders with the old * instance. Index modification operations result in undefined behavior * when performed before the old instance is closed. * (see {@link IndexReader#reopen()}). * <p> * If subreaders are shared, then the reference count of those * readers is increased to ensure that the subreaders remain open * until the last referring reader is closed. * * @throws CorruptIndexException if the index is corrupt * @throws IOException if there is a low-level IO error */ @Override public synchronized IndexReader reopen() throws CorruptIndexException, IOException { return doReopen(false); } /** * Clones the subreaders. * (see {@link IndexReader#clone()}). * <br> * <p> * If subreaders are shared, then the reference count of those * readers is increased to ensure that the subreaders remain open * until the last referring reader is closed. */ @Override public synchronized Object clone() { try { return doReopen(true); } catch (Exception ex) { throw new RuntimeException(ex); } } /** * If clone is true then we clone each of the subreaders * @param doClone * @return New IndexReader, or same one (this) if * reopen/clone is not necessary * @throws CorruptIndexException * @throws IOException */ protected IndexReader doReopen(boolean doClone) throws CorruptIndexException, IOException { ensureOpen(); boolean reopened = false; IndexReader[] newSubReaders = new IndexReader[subReaders.length]; boolean success = false; try { for (int i = 0; i < subReaders.length; i++) { if (doClone) newSubReaders[i] = (IndexReader) subReaders[i].clone(); else newSubReaders[i] = subReaders[i].reopen(); // if at least one of the subreaders was updated we remember that // and return a new MultiReader if (newSubReaders[i] != subReaders[i]) { reopened = true; } } success = true; } finally { if (!success && reopened) { for (int i = 0; i < newSubReaders.length; i++) { if (newSubReaders[i] != subReaders[i]) { try { newSubReaders[i].close(); } catch (IOException ignore) { // keep going - we want to clean up as much as possible } } } } } if (reopened) { boolean[] newDecrefOnClose = new boolean[subReaders.length]; for (int i = 0; i < subReaders.length; i++) { if (newSubReaders[i] == subReaders[i]) { newSubReaders[i].incRef(); newDecrefOnClose[i] = true; } } MultiReader mr = new MultiReader(newSubReaders); mr.decrefOnClose = newDecrefOnClose; return mr; } else { return this; } } @Override public TermFreqVector[] getTermFreqVectors(int n) throws IOException { ensureOpen(); int i = readerIndex(n); // find segment num return subReaders[i].getTermFreqVectors(n - starts[i]); // dispatch to segment } @Override public TermFreqVector getTermFreqVector(int n, String field) throws IOException { ensureOpen(); int i = readerIndex(n); // find segment num return subReaders[i].getTermFreqVector(n - starts[i], field); } @Override public void getTermFreqVector(int docNumber, String field, TermVectorMapper mapper) throws IOException { ensureOpen(); int i = readerIndex(docNumber); // find segment num subReaders[i].getTermFreqVector(docNumber - starts[i], field, mapper); } @Override public void getTermFreqVector(int docNumber, TermVectorMapper mapper) throws IOException { ensureOpen(); int i = readerIndex(docNumber); // find segment num subReaders[i].getTermFreqVector(docNumber - starts[i], mapper); } @Override public boolean isOptimized() { return false; } @Override public int numDocs() { // Don't call ensureOpen() here (it could affect performance) // NOTE: multiple threads may wind up init'ing // numDocs... but that's harmless if (numDocs == -1) { // check cache int n = 0; // cache miss--recompute for (int i = 0; i < subReaders.length; i++) n += subReaders[i].numDocs(); // sum from readers numDocs = n; } return numDocs; } @Override public int maxDoc() { // Don't call ensureOpen() here (it could affect performance) return maxDoc; } // inherit javadoc @Override public Document document(int n, FieldSelector fieldSelector) throws CorruptIndexException, IOException { ensureOpen(); int i = readerIndex(n); // find segment num return subReaders[i].document(n - starts[i], fieldSelector); // dispatch to segment reader } @Override public boolean isDeleted(int n) { // Don't call ensureOpen() here (it could affect performance) int i = readerIndex(n); // find segment num return subReaders[i].isDeleted(n - starts[i]); // dispatch to segment reader } @Override public boolean hasDeletions() { // Don't call ensureOpen() here (it could affect performance) return hasDeletions; } @Override protected void doDelete(int n) throws CorruptIndexException, IOException { numDocs = -1; // invalidate cache int i = readerIndex(n); // find segment num subReaders[i].deleteDocument(n - starts[i]); // dispatch to segment reader hasDeletions = true; } @Override protected void doUndeleteAll() throws CorruptIndexException, IOException { for (int i = 0; i < subReaders.length; i++) subReaders[i].undeleteAll(); hasDeletions = false; numDocs = -1; // invalidate cache } private int readerIndex(int n) { // find reader for doc n: return DirectoryReader.readerIndex(n, this.starts, this.subReaders.length); } @Override public boolean hasNorms(String field) throws IOException { ensureOpen(); for (int i = 0; i < subReaders.length; i++) { if (subReaders[i].hasNorms(field)) return true; } return false; } @Override public synchronized byte[] norms(String field) throws IOException { ensureOpen(); byte[] bytes = normsCache.get(field); if (bytes != null) return bytes; // cache hit if (!hasNorms(field)) return null; bytes = new byte[maxDoc()]; for (int i = 0; i < subReaders.length; i++) subReaders[i].norms(field, bytes, starts[i]); normsCache.put(field, bytes); // update cache return bytes; } @Override public synchronized void norms(String field, byte[] result, int offset) throws IOException { ensureOpen(); byte[] bytes = normsCache.get(field); for (int i = 0; i < subReaders.length; i++) // read from segments subReaders[i].norms(field, result, offset + starts[i]); if (bytes==null && !hasNorms(field)) { Arrays.fill(result, offset, result.length, DefaultSimilarity.encodeNorm(1.0f)); } else if (bytes != null) { // cache hit System.arraycopy(bytes, 0, result, offset, maxDoc()); } else { for (int i = 0; i < subReaders.length; i++) { // read from segments subReaders[i].norms(field, result, offset + starts[i]); } } } @Override protected void doSetNorm(int n, String field, byte value) throws CorruptIndexException, IOException { synchronized (normsCache) { normsCache.remove(field); // clear cache } int i = readerIndex(n); // find segment num subReaders[i].setNorm(n-starts[i], field, value); // dispatch } @Override public TermEnum terms() throws IOException { ensureOpen(); return new MultiTermEnum(this, subReaders, starts, null); } @Override public TermEnum terms(Term term) throws IOException { ensureOpen(); return new MultiTermEnum(this, subReaders, starts, term); } @Override public int docFreq(Term t) throws IOException { ensureOpen(); int total = 0; // sum freqs in segments for (int i = 0; i < subReaders.length; i++) total += subReaders[i].docFreq(t); return total; } @Override public TermDocs termDocs() throws IOException { ensureOpen(); return new MultiTermDocs(this, subReaders, starts); } @Override public TermPositions termPositions() throws IOException { ensureOpen(); return new MultiTermPositions(this, subReaders, starts); } @Override protected void doCommit(Map<String,String> commitUserData) throws IOException { for (int i = 0; i < subReaders.length; i++) subReaders[i].commit(commitUserData); } @Override protected synchronized void doClose() throws IOException { for (int i = 0; i < subReaders.length; i++) { if (decrefOnClose[i]) { subReaders[i].decRef(); } else { subReaders[i].close();<|fim▁hole|> // NOTE: only needed in case someone had asked for // FieldCache for top-level reader (which is generally // not a good idea): FieldCache.DEFAULT.purge(this); } @Override public Collection<String> getFieldNames (IndexReader.FieldOption fieldNames) { ensureOpen(); return DirectoryReader.getFieldNames(fieldNames, this.subReaders); } /** * Checks recursively if all subreaders are up to date. */ @Override public boolean isCurrent() throws CorruptIndexException, IOException { for (int i = 0; i < subReaders.length; i++) { if (!subReaders[i].isCurrent()) { return false; } } // all subreaders are up to date return true; } /** Not implemented. * @throws UnsupportedOperationException */ @Override public long getVersion() { throw new UnsupportedOperationException("MultiReader does not support this method."); } @Override public IndexReader[] getSequentialSubReaders() { return subReaders; } }<|fim▁end|>
} }
<|file_name|>0004_auto_20160306_1424.py<|end_file_name|><|fim▁begin|># -*- coding: utf-8 -*-<|fim▁hole|>from __future__ import unicode_literals import django.core.validators from django.db import migrations, models class Migration(migrations.Migration): dependencies = [ ('badges', '0003_badgedesign_bg_color'), ] operations = [ migrations.AlterField( model_name='badgedesign', name='bg_color', field=models.CharField(default='#FFFFFF', help_text='E.g. #00ff00', max_length=7, validators=[django.core.validators.RegexValidator('^#[a-fA-F0-9]{6}$')], verbose_name='Background color'), ), ]<|fim▁end|>
# Generated by Django 1.9.4 on 2016-03-06 13:24
<|file_name|>peewee.py<|end_file_name|><|fim▁begin|># May you do good and not evil # May you find forgiveness for yourself and forgive others # May you share freely, never taking more than you give. -- SQLite source code # # As we enjoy great advantages from the inventions of others, we should be glad # of an opportunity to serve others by an invention of ours, and this we should # do freely and generously. -- Ben Franklin # # (\ # ( \ /(o)\ caw! # ( \/ ()/ /) # ( `;.))'".) # `(/////.-' # =====))=))===() # ///' # // # ' import datetime import decimal import hashlib import logging import operator import re import sys import threading import uuid from collections import deque from collections import namedtuple try: from collections import OrderedDict except ImportError: OrderedDict = dict from copy import deepcopy from functools import wraps from inspect import isclass __version__ = '2.4.7' __all__ = [ 'BareField', 'BigIntegerField', 'BlobField', 'BooleanField', 'CharField', 'Check', 'Clause', 'CompositeKey', 'DatabaseError', 'DataError', 'DateField', 'DateTimeField', 'DecimalField', 'DoesNotExist', 'DoubleField', 'DQ', 'Field', 'FloatField', 'fn', 'ForeignKeyField', 'ImproperlyConfigured', 'IntegerField', 'IntegrityError', 'InterfaceError', 'InternalError', 'JOIN_FULL', 'JOIN_INNER', 'JOIN_LEFT_OUTER', 'Model', 'MySQLDatabase', 'NotSupportedError', 'OperationalError', 'Param', 'PostgresqlDatabase', 'prefetch', 'PrimaryKeyField', 'ProgrammingError', 'Proxy', 'R', 'SqliteDatabase', 'SQL', 'TextField', 'TimeField', 'UUIDField', 'Window', ] # Set default logging handler to avoid "No handlers could be found for logger # "peewee"" warnings. try: # Python 2.7+ from logging import NullHandler except ImportError: class NullHandler(logging.Handler): def emit(self, record): pass # All peewee-generated logs are logged to this namespace. logger = logging.getLogger('peewee') logger.addHandler(NullHandler()) # Python 2/3 compatibility helpers. These helpers are used internally and are # not exported. def with_metaclass(meta, base=object): return meta("NewBase", (base,), {}) PY2 = sys.version_info[0] == 2 PY3 = sys.version_info[0] == 3 if PY3: import builtins from collections import Callable from functools import reduce callable = lambda c: isinstance(c, Callable) unicode_type = str string_type = bytes basestring = str print_ = getattr(builtins, 'print') binary_construct = lambda s: bytes(s.encode('raw_unicode_escape')) def reraise(tp, value, tb=None): if value.__traceback__ is not tb: raise value.with_traceback(tb) raise value elif PY2: unicode_type = unicode string_type = basestring binary_construct = buffer def print_(s): sys.stdout.write(s) sys.stdout.write('\n') exec('def reraise(tp, value, tb=None): raise tp, value, tb') else: raise RuntimeError('Unsupported python version.') # By default, peewee supports Sqlite, MySQL and Postgresql. try: import sqlite3 except ImportError: try: from pysqlite2 import dbapi2 as sqlite3 except ImportError: sqlite3 = None try: from psycopg2cffi import compat compat.register() except ImportError: pass try: import psycopg2 from psycopg2 import extensions as pg_extensions except ImportError: psycopg2 = None try: import MySQLdb as mysql # prefer the C module. except ImportError: try: import pymysql as mysql except ImportError: mysql = None if sqlite3: sqlite3.register_adapter(decimal.Decimal, str) sqlite3.register_adapter(datetime.date, str) sqlite3.register_adapter(datetime.time, str) DATETIME_PARTS = ['year', 'month', 'day', 'hour', 'minute', 'second'] DATETIME_LOOKUPS = set(DATETIME_PARTS) # Sqlite does not support the `date_part` SQL function, so we will define an # implementation in python. SQLITE_DATETIME_FORMATS = ( '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d', '%H:%M:%S', '%H:%M:%S.%f', '%H:%M') def _sqlite_date_part(lookup_type, datetime_string): assert lookup_type in DATETIME_LOOKUPS if not datetime_string: return dt = format_date_time(datetime_string, SQLITE_DATETIME_FORMATS) return getattr(dt, lookup_type) SQLITE_DATE_TRUNC_MAPPING = { 'year': '%Y', 'month': '%Y-%m', 'day': '%Y-%m-%d', 'hour': '%Y-%m-%d %H', 'minute': '%Y-%m-%d %H:%M', 'second': '%Y-%m-%d %H:%M:%S'} MYSQL_DATE_TRUNC_MAPPING = SQLITE_DATE_TRUNC_MAPPING.copy() MYSQL_DATE_TRUNC_MAPPING['minute'] = '%Y-%m-%d %H:%i' MYSQL_DATE_TRUNC_MAPPING['second'] = '%Y-%m-%d %H:%i:%S' def _sqlite_date_trunc(lookup_type, datetime_string): assert lookup_type in SQLITE_DATE_TRUNC_MAPPING if not datetime_string: return dt = format_date_time(datetime_string, SQLITE_DATETIME_FORMATS) return dt.strftime(SQLITE_DATE_TRUNC_MAPPING[lookup_type]) def _sqlite_regexp(regex, value): return re.search(regex, value, re.I) is not None # Operators used in binary expressions. OP_AND = 'and' OP_OR = 'or' OP_ADD = '+' OP_SUB = '-' OP_MUL = '*' OP_DIV = '/' OP_BIN_AND = '&' OP_BIN_OR = '|' OP_XOR = '^' OP_MOD = '%' OP_EQ = '=' OP_LT = '<' OP_LTE = '<=' OP_GT = '>' OP_GTE = '>=' OP_NE = '!=' OP_IN = 'in' OP_NOT_IN = 'not in' OP_IS = 'is' OP_IS_NOT = 'is not' OP_LIKE = 'like' OP_ILIKE = 'ilike' OP_BETWEEN = 'between' OP_REGEXP = 'regexp' OP_CONCAT = '||' # To support "django-style" double-underscore filters, create a mapping between # operation name and operation code, e.g. "__eq" == OP_EQ. DJANGO_MAP = { 'eq': OP_EQ, 'lt': OP_LT, 'lte': OP_LTE, 'gt': OP_GT, 'gte': OP_GTE, 'ne': OP_NE, 'in': OP_IN, 'is': OP_IS, 'like': OP_LIKE, 'ilike': OP_ILIKE, 'regexp': OP_REGEXP, } JOIN_INNER = 'inner' JOIN_LEFT_OUTER = 'left outer' JOIN_RIGHT_OUTER = 'right outer' JOIN_FULL = 'full' # Helper functions that are used in various parts of the codebase. def merge_dict(source, overrides): merged = source.copy() merged.update(overrides) return merged def pythonify_name(name): name = re.sub('([a-z_])([A-Z][_a-z])', '\\1 \\2', name) return re.sub('[^\w+]', '_', name.lower()) def returns_clone(func): """ Method decorator that will "clone" the object before applying the given method. This ensures that state is mutated in a more predictable fashion, and promotes the use of method-chaining. """ def inner(self, *args, **kwargs): clone = self.clone() # Assumes object implements `clone`. func(clone, *args, **kwargs) return clone inner.call_local = func # Provide a way to call without cloning. return inner def not_allowed(func): """ Method decorator to indicate a method is not allowed to be called. Will raise a `NotImplementedError`. """ def inner(self, *args, **kwargs): raise NotImplementedError('%s is not allowed on %s instances' % ( func, type(self).__name__)) return inner class Proxy(object): """ Proxy class useful for situations when you wish to defer the initialization of an object. """ __slots__ = ['obj', '_callbacks'] def __init__(self): self._callbacks = [] self.initialize(None) def initialize(self, obj): self.obj = obj for callback in self._callbacks: callback(obj) def attach_callback(self, callback): self._callbacks.append(callback) return callback def __getattr__(self, attr): if self.obj is None: raise AttributeError('Cannot use uninitialized Proxy.') return getattr(self.obj, attr) def __setattr__(self, attr, value): if attr not in self.__slots__: raise AttributeError('Cannot set attribute on proxy.') return super(Proxy, self).__setattr__(attr, value) class _CDescriptor(object): def __get__(self, instance, instance_type=None): if instance is not None: return Entity(instance._alias) return self # Classes representing the query tree. class Node(object): """Base-class for any part of a query which shall be composable.""" c = _CDescriptor() _node_type = 'node' def __init__(self): self._negated = False self._alias = None self._ordering = None # ASC or DESC. def clone_base(self): return type(self)() def clone(self): inst = self.clone_base() inst._negated = self._negated inst._alias = self._alias inst._ordering = self._ordering return inst @returns_clone def __invert__(self): self._negated = not self._negated @returns_clone def alias(self, a=None): self._alias = a @returns_clone def asc(self): self._ordering = 'ASC' @returns_clone def desc(self): self._ordering = 'DESC' def _e(op, inv=False): """ Lightweight factory which returns a method that builds an Expression consisting of the left-hand and right-hand operands, using `op`. """ def inner(self, rhs): if inv: return Expression(rhs, op, self) return Expression(self, op, rhs) return inner __and__ = _e(OP_AND) __or__ = _e(OP_OR) __add__ = _e(OP_ADD) __sub__ = _e(OP_SUB) __mul__ = _e(OP_MUL) __div__ = __truediv__ = _e(OP_DIV) __xor__ = _e(OP_XOR) __radd__ = _e(OP_ADD, inv=True) __rsub__ = _e(OP_SUB, inv=True) __rmul__ = _e(OP_MUL, inv=True) __rdiv__ = __rtruediv__ = _e(OP_DIV, inv=True) __rand__ = _e(OP_AND, inv=True) __ror__ = _e(OP_OR, inv=True) __rxor__ = _e(OP_XOR, inv=True) def __eq__(self, rhs): if rhs is None: return Expression(self, OP_IS, None) return Expression(self, OP_EQ, rhs) def __ne__(self, rhs): if rhs is None: return Expression(self, OP_IS_NOT, None) return Expression(self, OP_NE, rhs) __lt__ = _e(OP_LT) __le__ = _e(OP_LTE) __gt__ = _e(OP_GT) __ge__ = _e(OP_GTE) __lshift__ = _e(OP_IN) __rshift__ = _e(OP_IS) __mod__ = _e(OP_LIKE) __pow__ = _e(OP_ILIKE) bin_and = _e(OP_BIN_AND) bin_or = _e(OP_BIN_OR) # Special expressions. def in_(self, *rhs): return Expression(self, OP_IN, rhs) def not_in(self, *rhs): return Expression(self, OP_NOT_IN, rhs) def is_null(self, is_null=True): if is_null: return Expression(self, OP_IS, None) return Expression(self, OP_IS_NOT, None) def contains(self, rhs): return Expression(self, OP_ILIKE, '%%%s%%' % rhs) def startswith(self, rhs): return Expression(self, OP_ILIKE, '%s%%' % rhs) def endswith(self, rhs): return Expression(self, OP_ILIKE, '%%%s' % rhs) def between(self, low, high): return Expression(self, OP_BETWEEN, Clause(low, R('AND'), high)) def regexp(self, expression): return Expression(self, OP_REGEXP, expression) def concat(self, rhs): return Expression(self, OP_CONCAT, rhs) class Expression(Node): """A binary expression, e.g `foo + 1` or `bar < 7`.""" _node_type = 'expression' def __init__(self, lhs, op, rhs, flat=False): super(Expression, self).__init__() self.lhs = lhs self.op = op self.rhs = rhs self.flat = flat def clone_base(self): return Expression(self.lhs, self.op, self.rhs, self.flat) class DQ(Node): """A "django-style" filter expression, e.g. {'foo__eq': 'x'}.""" def __init__(self, **query): super(DQ, self).__init__() self.query = query def clone_base(self): return DQ(**self.query) class Param(Node): """ Arbitrary parameter passed into a query. Instructs the query compiler to specifically treat this value as a parameter, useful for `list` which is special-cased for `IN` lookups. """ _node_type = 'param' def __init__(self, value, conv=None): self.value = value self.conv = conv super(Param, self).__init__() def clone_base(self): return Param(self.value, self.conv) class Passthrough(Param): _node_type = 'passthrough' class SQL(Node): """An unescaped SQL string, with optional parameters.""" _node_type = 'sql' def __init__(self, value, *params): self.value = value self.params = params super(SQL, self).__init__() def clone_base(self): return SQL(self.value, *self.params) R = SQL # backwards-compat. class Func(Node): """An arbitrary SQL function call.""" _node_type = 'func' def __init__(self, name, *arguments): self.name = name self.arguments = arguments self._coerce = True super(Func, self).__init__() @returns_clone def coerce(self, coerce=True): self._coerce = coerce def clone_base(self): res = Func(self.name, *self.arguments) res._coerce = self._coerce return res def over(self, partition_by=None, order_by=None, window=None): if isinstance(partition_by, Window) and window is None: window = partition_by if window is None: sql = Window( partition_by=partition_by, order_by=order_by).__sql__() else: sql = SQL(window._alias) return Clause(self, SQL('OVER'), sql) def __getattr__(self, attr): def dec(*args, **kwargs): return Func(attr, *args, **kwargs) return dec # fn is a factory for creating `Func` objects and supports a more friendly # API. So instead of `Func("LOWER", param)`, `fn.LOWER(param)`. fn = Func(None) class Window(Node): def __init__(self, partition_by=None, order_by=None): super(Window, self).__init__() self.partition_by = partition_by self.order_by = order_by self._alias = self._alias or 'w' def __sql__(self): over_clauses = [] if self.partition_by: over_clauses.append(Clause( SQL('PARTITION BY'), CommaClause(*self.partition_by))) if self.order_by: over_clauses.append(Clause( SQL('ORDER BY'), CommaClause(*self.order_by))) return EnclosedClause(Clause(*over_clauses)) def clone_base(self): return Window(self.partition_by, self.order_by) class Clause(Node): """A SQL clause, one or more Node objects joined by spaces.""" _node_type = 'clause' glue = ' ' parens = False def __init__(self, *nodes): super(Clause, self).__init__() self.nodes = list(nodes) def clone_base(self): clone = Clause(*self.nodes) clone.glue = self.glue clone.parens = self.parens return clone class CommaClause(Clause): """One or more Node objects joined by commas, no parens.""" glue = ', ' class EnclosedClause(CommaClause): """One or more Node objects joined by commas and enclosed in parens.""" parens = True class Entity(Node): """A quoted-name or entity, e.g. "table"."column".""" _node_type = 'entity' def __init__(self, *path): super(Entity, self).__init__() self.path = path def clone_base(self): return Entity(*self.path) def __getattr__(self, attr): return Entity(*filter(None, self.path + (attr,))) class Check(SQL): """Check constraint, usage: `Check('price > 10')`.""" def __init__(self, value): super(Check, self).__init__('CHECK (%s)' % value) class _StripParens(Node): _node_type = 'strip_parens' def __init__(self, node): super(_StripParens, self).__init__() self.node = node JoinMetadata = namedtuple('JoinMetadata', ( 'source', 'target_attr', 'dest', 'to_field', 'related_name')) class Join(namedtuple('_Join', ('dest', 'join_type', 'on'))): def get_foreign_key(self, source, dest): fk_field = source._meta.rel_for_model(dest) if fk_field is not None: return fk_field, False reverse_rel = source._meta.reverse_rel_for_model(dest) if reverse_rel is not None: return reverse_rel, True return None, None def join_metadata(self, source): is_model_alias = isinstance(self.dest, ModelAlias) if is_model_alias: dest = self.dest.model_class else: dest = self.dest is_expr = isinstance(self.on, Expression) join_alias = is_expr and self.on._alias or None target_attr = to_field = related_name = None fk_field, is_backref = self.get_foreign_key(source, dest) if fk_field is not None: if is_backref: target_attr = dest._meta.db_table related_name = fk_field.related_name else: target_attr = fk_field.name to_field = fk_field.to_field.name elif is_expr and hasattr(self.on.lhs, 'name'): target_attr = self.on.lhs.name else: target_attr = dest._meta.db_table return JoinMetadata( source, join_alias or target_attr, self.dest, to_field, related_name) class FieldDescriptor(object): # Fields are exposed as descriptors in order to control access to the # underlying "raw" data. def __init__(self, field): self.field = field self.att_name = self.field.name def __get__(self, instance, instance_type=None): if instance is not None: return instance._data.get(self.att_name) return self.field def __set__(self, instance, value): instance._data[self.att_name] = value instance._dirty.add(self.att_name) class Field(Node): """A column on a table.""" _field_counter = 0 _order = 0 _node_type = 'field' db_field = 'unknown' def __init__(self, null=False, index=False, unique=False, verbose_name=None, help_text=None, db_column=None, default=None, choices=None, primary_key=False, sequence=None, constraints=None, schema=None): self.null = null self.index = index self.unique = unique self.verbose_name = verbose_name self.help_text = help_text self.db_column = db_column self.default = default self.choices = choices # Used for metadata purposes, not enforced. self.primary_key = primary_key self.sequence = sequence # Name of sequence, e.g. foo_id_seq. self.constraints = constraints # List of column constraints. self.schema = schema # Name of schema, e.g. 'public'. # Used internally for recovering the order in which Fields were defined # on the Model class. Field._field_counter += 1 self._order = Field._field_counter self._sort_key = (self.primary_key and 1 or 2), self._order self._is_bound = False # Whether the Field is "bound" to a Model. super(Field, self).__init__() def clone_base(self, **kwargs): inst = type(self)( null=self.null, index=self.index, unique=self.unique, verbose_name=self.verbose_name, help_text=self.help_text, db_column=self.db_column, default=self.default, choices=self.choices, primary_key=self.primary_key, sequence=self.sequence, constraints=self.constraints, schema=self.schema, **kwargs) if self._is_bound: inst.name = self.name inst.model_class = self.model_class inst._is_bound = self._is_bound return inst def add_to_class(self, model_class, name): """ Hook that replaces the `Field` attribute on a class with a named `FieldDescriptor`. Called by the metaclass during construction of the `Model`. """ self.name = name self.model_class = model_class self.db_column = self.db_column or self.name if not self.verbose_name: self.verbose_name = re.sub('_+', ' ', name).title() model_class._meta.fields[self.name] = self model_class._meta.columns[self.db_column] = self setattr(model_class, name, FieldDescriptor(self)) self._is_bound = True def get_database(self): return self.model_class._meta.database def get_column_type(self): field_type = self.get_db_field() return self.get_database().compiler().get_column_type(field_type) def get_db_field(self): return self.db_field def get_modifiers(self): return None def coerce(self, value): return value def db_value(self, value): """Convert the python value for storage in the database.""" return value if value is None else self.coerce(value) def python_value(self, value): """Convert the database value to a pythonic value.""" return value if value is None else self.coerce(value) def _as_entity(self, with_table=False): if with_table: return Entity(self.model_class._meta.db_table, self.db_column) return Entity(self.db_column) def __ddl_column__(self, column_type): """Return the column type, e.g. VARCHAR(255) or REAL.""" modifiers = self.get_modifiers() if modifiers: return SQL( '%s(%s)' % (column_type, ', '.join(map(str, modifiers)))) return SQL(column_type) def __ddl__(self, column_type): """Return a list of Node instances that defines the column.""" ddl = [self._as_entity(), self.__ddl_column__(column_type)] if not self.null: ddl.append(SQL('NOT NULL')) if self.primary_key: ddl.append(SQL('PRIMARY KEY')) if self.sequence: ddl.append(SQL("DEFAULT NEXTVAL('%s')" % self.sequence)) if self.constraints: ddl.extend(self.constraints) return ddl def __hash__(self): return hash(self.name + '.' + self.model_class.__name__) class BareField(Field): db_field = 'bare' class IntegerField(Field): db_field = 'int' coerce = int class BigIntegerField(IntegerField): db_field = 'bigint' class PrimaryKeyField(IntegerField): db_field = 'primary_key' def __init__(self, *args, **kwargs): kwargs['primary_key'] = True super(PrimaryKeyField, self).__init__(*args, **kwargs) class FloatField(Field): db_field = 'float' coerce = float class DoubleField(FloatField): db_field = 'double' class DecimalField(Field): db_field = 'decimal' def __init__(self, max_digits=10, decimal_places=5, auto_round=False, rounding=None, *args, **kwargs): self.max_digits = max_digits self.decimal_places = decimal_places self.auto_round = auto_round self.rounding = rounding or decimal.DefaultContext.rounding super(DecimalField, self).__init__(*args, **kwargs) def clone_base(self, **kwargs): return super(DecimalField, self).clone_base( max_digits=self.max_digits, decimal_places=self.decimal_places, auto_round=self.auto_round, rounding=self.rounding, **kwargs) def get_modifiers(self): return [self.max_digits, self.decimal_places] def db_value(self, value): D = decimal.Decimal if not value: return value if value is None else D(0) if self.auto_round: exp = D(10) ** (-self.decimal_places) rounding = self.rounding return D(str(value)).quantize(exp, rounding=rounding) return value def python_value(self, value): if value is not None: if isinstance(value, decimal.Decimal): return value return decimal.Decimal(str(value)) def coerce_to_unicode(s, encoding='utf-8'): if isinstance(s, unicode_type): return s elif isinstance(s, string_type): return s.decode(encoding) return unicode_type(s) class CharField(Field): db_field = 'string' def __init__(self, max_length=255, *args, **kwargs): self.max_length = max_length super(CharField, self).__init__(*args, **kwargs) def clone_base(self, **kwargs): return super(CharField, self).clone_base( max_length=self.max_length, **kwargs) def get_modifiers(self): return self.max_length and [self.max_length] or None def coerce(self, value): return coerce_to_unicode(value or '') class TextField(Field): db_field = 'text' def coerce(self, value): return coerce_to_unicode(value or '') class BlobField(Field): db_field = 'blob' def db_value(self, value): if isinstance(value, basestring): return binary_construct(value) return value class UUIDField(Field): db_field = 'uuid' def db_value(self, value): return None if value is None else str(value) def python_value(self, value): return None if value is None else uuid.UUID(value) def format_date_time(value, formats, post_process=None): post_process = post_process or (lambda x: x) for fmt in formats: try: return post_process(datetime.datetime.strptime(value, fmt)) except ValueError: pass return value def _date_part(date_part): def dec(self): return self.model_class._meta.database.extract_date(date_part, self) return dec class _BaseFormattedField(Field): formats = None def __init__(self, formats=None, *args, **kwargs): if formats is not None: self.formats = formats super(_BaseFormattedField, self).__init__(*args, **kwargs) def clone_base(self, **kwargs): return super(_BaseFormattedField, self).clone_base( formats=self.formats, **kwargs) class DateTimeField(_BaseFormattedField): db_field = 'datetime' formats = [ '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d', ] def python_value(self, value): if value and isinstance(value, basestring): return format_date_time(value, self.formats) return value year = property(_date_part('year')) month = property(_date_part('month')) day = property(_date_part('day')) hour = property(_date_part('hour')) minute = property(_date_part('minute')) second = property(_date_part('second')) class DateField(_BaseFormattedField): db_field = 'date' formats = [ '%Y-%m-%d', '%Y-%m-%d %H:%M:%S', '%Y-%m-%d %H:%M:%S.%f', ] def python_value(self, value): if value and isinstance(value, basestring): pp = lambda x: x.date() return format_date_time(value, self.formats, pp) elif value and isinstance(value, datetime.datetime): return value.date() return value year = property(_date_part('year')) month = property(_date_part('month')) day = property(_date_part('day')) class TimeField(_BaseFormattedField): db_field = 'time' formats = [ '%H:%M:%S.%f', '%H:%M:%S', '%H:%M', '%Y-%m-%d %H:%M:%S.%f', '%Y-%m-%d %H:%M:%S', ] def python_value(self, value): if value and isinstance(value, basestring): pp = lambda x: x.time() return format_date_time(value, self.formats, pp) elif value and isinstance(value, datetime.datetime): return value.time() return value hour = property(_date_part('hour')) minute = property(_date_part('minute')) second = property(_date_part('second')) class BooleanField(Field): db_field = 'bool' coerce = bool class RelationDescriptor(FieldDescriptor): """Foreign-key abstraction to replace a related PK with a related model.""" def __init__(self, field, rel_model): self.rel_model = rel_model super(RelationDescriptor, self).__init__(field) def get_object_or_id(self, instance): rel_id = instance._data.get(self.att_name) if rel_id is not None or self.att_name in instance._obj_cache: if self.att_name not in instance._obj_cache: obj = self.rel_model.get(self.field.to_field == rel_id) instance._obj_cache[self.att_name] = obj return instance._obj_cache[self.att_name] elif not self.field.null: raise self.rel_model.DoesNotExist return rel_id def __get__(self, instance, instance_type=None): if instance is not None: return self.get_object_or_id(instance) return self.field def __set__(self, instance, value): if isinstance(value, self.rel_model): instance._data[self.att_name] = getattr( value, self.field.to_field.name) instance._obj_cache[self.att_name] = value else: orig_value = instance._data.get(self.att_name) instance._data[self.att_name] = value if orig_value != value and self.att_name in instance._obj_cache: del instance._obj_cache[self.att_name] instance._dirty.add(self.att_name) class ReverseRelationDescriptor(object): """Back-reference to expose related objects as a `SelectQuery`.""" def __init__(self, field): self.field = field self.rel_model = field.model_class def __get__(self, instance, instance_type=None): if instance is not None: return self.rel_model.select().where( self.field == getattr(instance, self.field.to_field.name)) return self class ForeignKeyField(IntegerField): def __init__(self, rel_model, related_name=None, on_delete=None, on_update=None, extra=None, to_field=None, *args, **kwargs): if rel_model != 'self' and not isinstance(rel_model, Proxy) and not \ issubclass(rel_model, Model): raise TypeError('Unexpected value for `rel_model`. Expected ' '`Model`, `Proxy` or "self"') self.rel_model = rel_model self._related_name = related_name self.deferred = isinstance(rel_model, Proxy) self.on_delete = on_delete self.on_update = on_update self.extra = extra self.to_field = to_field super(ForeignKeyField, self).__init__(*args, **kwargs) def clone_base(self, **kwargs): return super(ForeignKeyField, self).clone_base( rel_model=self.rel_model, related_name=self.related_name, on_delete=self.on_delete, on_update=self.on_update, extra=self.extra, to_field=self.to_field, **kwargs) def _get_descriptor(self): return RelationDescriptor(self, self.rel_model) def _get_backref_descriptor(self): return ReverseRelationDescriptor(self) def _get_related_name(self): return self._related_name or ('%s_set' % self.model_class._meta.name) def add_to_class(self, model_class, name): if isinstance(self.rel_model, Proxy): def callback(rel_model): self.rel_model = rel_model self.add_to_class(model_class, name) self.rel_model.attach_callback(callback) return self.name = name self.model_class = model_class self.db_column = self.db_column or '%s_id' % self.name if not self.verbose_name: self.verbose_name = re.sub('_+', ' ', name).title() model_class._meta.fields[self.name] = self model_class._meta.columns[self.db_column] = self self.related_name = self._get_related_name() if self.rel_model == 'self': self.rel_model = self.model_class if self.to_field is not None: if not isinstance(self.to_field, Field): self.to_field = getattr(self.rel_model, self.to_field) else: self.to_field = self.rel_model._meta.primary_key if model_class._meta.validate_backrefs: if self.related_name in self.rel_model._meta.fields: error = ('Foreign key: %s.%s related name "%s" collision with ' 'model field of the same name.') raise AttributeError(error % ( self.model_class._meta.name, self.name, self.related_name)) if self.related_name in self.rel_model._meta.reverse_rel: error = ('Foreign key: %s.%s related name "%s" collision with ' 'foreign key using same related_name.') raise AttributeError(error % ( self.model_class._meta.name, self.name, self.related_name)) setattr(model_class, name, self._get_descriptor()) setattr(self.rel_model, self.related_name, self._get_backref_descriptor()) self._is_bound = True model_class._meta.rel[self.name] = self self.rel_model._meta.reverse_rel[self.related_name] = self def get_db_field(self): """ Overridden to ensure Foreign Keys use same column type as the primary key they point to. """ if not isinstance(self.to_field, PrimaryKeyField): return self.to_field.get_db_field() return super(ForeignKeyField, self).get_db_field() def get_modifiers(self): if not isinstance(self.to_field, PrimaryKeyField): return self.to_field.get_modifiers() return super(ForeignKeyField, self).get_modifiers() def coerce(self, value): return self.to_field.coerce(value) def db_value(self, value): if isinstance(value, self.rel_model): value = value._get_pk_value() return self.to_field.db_value(value) class CompositeKey(object): """A primary key composed of multiple columns.""" sequence = None def __init__(self, *field_names): self.field_names = field_names def add_to_class(self, model_class, name): self.name = name self.model_class = model_class setattr(model_class, name, self) def __get__(self, instance, instance_type=None): if instance is not None: return tuple([getattr(instance, field_name) for field_name in self.field_names]) return self def __set__(self, instance, value): pass def __eq__(self, other): expressions = [(self.model_class._meta.fields[field] == value) for field, value in zip(self.field_names, other)] return reduce(operator.and_, expressions) class AliasMap(object): prefix = 't' def __init__(self): self._alias_map = {} self._counter = 0 def __repr__(self): return '<AliasMap: %s>' % self._alias_map def add(self, obj, alias=None): if obj in self._alias_map: return self._counter += 1 self._alias_map[obj] = alias or '%s%s' % (self.prefix, self._counter) def __getitem__(self, obj): if obj not in self._alias_map: self.add(obj) return self._alias_map[obj] def __contains__(self, obj): return obj in self._alias_map def update(self, alias_map): if alias_map: for obj, alias in alias_map._alias_map.items(): if obj not in self: self._alias_map[obj] = alias return self class QueryCompiler(object): # Mapping of `db_type` to actual column type used by database driver. # Database classes may provide additional column types or overrides. field_map = { 'bare': '', 'bigint': 'BIGINT', 'blob': 'BLOB', 'bool': 'SMALLINT', 'date': 'DATE', 'datetime': 'DATETIME', 'decimal': 'DECIMAL', 'double': 'REAL', 'float': 'REAL', 'int': 'INTEGER', 'primary_key': 'INTEGER', 'string': 'VARCHAR', 'text': 'TEXT', 'time': 'TIME', } # Mapping of OP_ to actual SQL operation. For most databases this will be # the same, but some column types or databases may support additional ops. # Like `field_map`, Database classes may extend or override these. op_map = { OP_EQ: '=', OP_LT: '<', OP_LTE: '<=', OP_GT: '>', OP_GTE: '>=', OP_NE: '!=', OP_IN: 'IN', OP_NOT_IN: 'NOT IN', OP_IS: 'IS', OP_IS_NOT: 'IS NOT', OP_BIN_AND: '&', OP_BIN_OR: '|', OP_LIKE: 'LIKE', OP_ILIKE: 'ILIKE', OP_BETWEEN: 'BETWEEN', OP_ADD: '+', OP_SUB: '-', OP_MUL: '*', OP_DIV: '/', OP_XOR: '#', OP_AND: 'AND', OP_OR: 'OR', OP_MOD: '%', OP_REGEXP: 'REGEXP', OP_CONCAT: '||', } join_map = { JOIN_INNER: 'INNER', JOIN_LEFT_OUTER: 'LEFT OUTER', JOIN_RIGHT_OUTER: 'RIGHT OUTER', JOIN_FULL: 'FULL', } alias_map_class = AliasMap def __init__(self, quote_char='"', interpolation='?', field_overrides=None, op_overrides=None): self.quote_char = quote_char self.interpolation = interpolation self._field_map = merge_dict(self.field_map, field_overrides or {}) self._op_map = merge_dict(self.op_map, op_overrides or {}) self._parse_map = self.get_parse_map() self._unknown_types = set(['param']) def get_parse_map(self): # To avoid O(n) lookups when parsing nodes, use a lookup table for # common node types O(1). return { 'expression': self._parse_expression, 'param': self._parse_param, 'passthrough': self._parse_param, 'func': self._parse_func, 'clause': self._parse_clause, 'entity': self._parse_entity, 'field': self._parse_field, 'sql': self._parse_sql, 'select_query': self._parse_select_query, 'compound_select_query': self._parse_compound_select_query, 'strip_parens': self._parse_strip_parens, } def quote(self, s): return '%s%s%s' % (self.quote_char, s, self.quote_char) def get_column_type(self, f): return self._field_map[f] def get_op(self, q): return self._op_map[q] def _sorted_fields(self, field_dict): return sorted(field_dict.items(), key=lambda i: i[0]._sort_key) def _clean_extra_parens(self, s): # Quick sanity check. if not s or s[0] != '(': return s ct = i = 0 l = len(s) while i < l: if s[i] == '(' and s[l - 1] == ')': ct += 1 i += 1 l -= 1 else: break if ct: # If we ever end up with negatively-balanced parentheses, then we # know that one of the outer parentheses was required. unbalanced_ct = 0 required = 0 for i in range(ct, l - ct): if s[i] == '(': unbalanced_ct += 1 elif s[i] == ')': unbalanced_ct -= 1 if unbalanced_ct < 0: required += 1 unbalanced_ct = 0 if required == ct: break ct -= required if ct > 0: return s[ct:-ct] return s def _parse_default(self, node, alias_map, conv): return self.interpolation, [node] def _parse_expression(self, node, alias_map, conv): if isinstance(node.lhs, Field): conv = node.lhs lhs, lparams = self.parse_node(node.lhs, alias_map, conv) rhs, rparams = self.parse_node(node.rhs, alias_map, conv) template = '%s %s %s' if node.flat else '(%s %s %s)' sql = template % (lhs, self.get_op(node.op), rhs) return sql, lparams + rparams def _parse_param(self, node, alias_map, conv): if node.conv: params = [node.conv(node.value)] else: params = [node.value] return self.interpolation, params def _parse_func(self, node, alias_map, conv): conv = node._coerce and conv or None sql, params = self.parse_node_list(node.arguments, alias_map, conv) return '%s(%s)' % (node.name, self._clean_extra_parens(sql)), params def _parse_clause(self, node, alias_map, conv): sql, params = self.parse_node_list( node.nodes, alias_map, conv, node.glue) if node.parens: sql = '(%s)' % self._clean_extra_parens(sql) return sql, params def _parse_entity(self, node, alias_map, conv): return '.'.join(map(self.quote, node.path)), [] def _parse_sql(self, node, alias_map, conv): return node.value, list(node.params) def _parse_field(self, node, alias_map, conv): if alias_map: sql = '.'.join(( self.quote(alias_map[node.model_class]), self.quote(node.db_column))) else: sql = self.quote(node.db_column) return sql, [] def _parse_compound_select_query(self, node, alias_map, conv): l, lp = self.generate_select(node.lhs, alias_map) r, rp = self.generate_select(node.rhs, alias_map) sql = '(%s %s %s)' % (l, node.operator, r) return sql, lp + rp def _parse_select_query(self, node, alias_map, conv): clone = node.clone() if not node._explicit_selection: if conv and isinstance(conv, ForeignKeyField): select_field = conv.to_field else: select_field = clone.model_class._meta.primary_key clone._select = (select_field,) sub, params = self.generate_select(clone, alias_map) return '(%s)' % self._clean_extra_parens(sub), params def _parse_strip_parens(self, node, alias_map, conv): sql, params = self.parse_node(node.node, alias_map, conv) return self._clean_extra_parens(sql), params def _parse(self, node, alias_map, conv): # By default treat the incoming node as a raw value that should be # parameterized. node_type = getattr(node, '_node_type', None) unknown = False if node_type in self._parse_map: sql, params = self._parse_map[node_type](node, alias_map, conv) unknown = node_type in self._unknown_types elif isinstance(node, (list, tuple)): # If you're wondering how to pass a list into your query, simply # wrap it in Param(). sql, params = self.parse_node_list(node, alias_map, conv) sql = '(%s)' % sql elif isinstance(node, Model): sql = self.interpolation if conv and isinstance(conv, ForeignKeyField): params = [ conv.to_field.db_value(getattr(node, conv.to_field.name))] else: params = [node._get_pk_value()] elif (isclass(node) and issubclass(node, Model)) or \ isinstance(node, ModelAlias): entity = node._as_entity().alias(alias_map[node]) sql, params = self.parse_node(entity, alias_map, conv) else: sql, params = self._parse_default(node, alias_map, conv) unknown = True return sql, params, unknown def parse_node(self, node, alias_map=None, conv=None): sql, params, unknown = self._parse(node, alias_map, conv) if unknown and conv and params: params = [conv.db_value(i) for i in params] if isinstance(node, Node): if node._negated: sql = 'NOT %s' % sql if node._alias: sql = ' '.join((sql, 'AS', node._alias)) if node._ordering: sql = ' '.join((sql, node._ordering)) return sql, params def parse_node_list(self, nodes, alias_map, conv=None, glue=', '): sql = [] params = [] for node in nodes: node_sql, node_params = self.parse_node(node, alias_map, conv) sql.append(node_sql) params.extend(node_params) return glue.join(sql), params def calculate_alias_map(self, query, alias_map=None): new_map = self.alias_map_class() if alias_map is not None: new_map._counter = alias_map._counter new_map.add(query.model_class, query.model_class._meta.table_alias) for src_model, joined_models in query._joins.items(): new_map.add(src_model, src_model._meta.table_alias) for join_obj in joined_models: if isinstance(join_obj.dest, Node): new_map.add(join_obj.dest, join_obj.dest.alias) else: new_map.add(join_obj.dest, join_obj.dest._meta.table_alias) return new_map.update(alias_map) def build_query(self, clauses, alias_map=None): return self.parse_node(Clause(*clauses), alias_map) def generate_joins(self, joins, model_class, alias_map): # Joins are implemented as an adjancency-list graph. Perform a # depth-first search of the graph to generate all the necessary JOINs. clauses = [] seen = set() q = [model_class] while q: curr = q.pop() if curr not in joins or curr in seen: continue seen.add(curr) for join in joins[curr]: src = curr dest = join.dest if isinstance(join.on, Expression): # Clear any alias on the join expression. constraint = join.on.clone().alias() else: field = src._meta.rel_for_model(dest, join.on) if field: left_field = field right_field = field.to_field else: field = dest._meta.rel_for_model(src, join.on) left_field = field.to_field right_field = field constraint = (left_field == right_field) if isinstance(dest, Node): # TODO: ensure alias? dest_n = dest else: q.append(dest) dest_n = dest._as_entity().alias(alias_map[dest]) join_type = self.join_map[join.join_type or JOIN_INNER] join_stmt = SQL('%s JOIN' % (join_type)) clauses.append( Clause(join_stmt, dest_n, SQL('ON'), constraint)) return clauses def generate_select(self, query, alias_map=None): model = query.model_class db = model._meta.database alias_map = self.calculate_alias_map(query, alias_map) if isinstance(query, CompoundSelect): clauses = [_StripParens(query)] else: if not query._distinct: clauses = [SQL('SELECT')] else: clauses = [SQL('SELECT DISTINCT')] if query._distinct not in (True, False): clauses += [SQL('ON'), EnclosedClause(*query._distinct)] select_clause = Clause(*query._select) select_clause.glue = ', ' clauses.extend((select_clause, SQL('FROM'))) if query._from is None: clauses.append(model._as_entity().alias(alias_map[model])) else: clauses.append(CommaClause(*query._from)) if query._windows is not None: clauses.append(SQL('WINDOW')) clauses.append(CommaClause(*[ Clause( SQL(window._alias), SQL('AS'), window.__sql__()) for window in query._windows])) join_clauses = self.generate_joins(query._joins, model, alias_map) if join_clauses: clauses.extend(join_clauses) if query._where is not None: clauses.extend([SQL('WHERE'), query._where]) if query._group_by: clauses.extend([SQL('GROUP BY'), CommaClause(*query._group_by)]) if query._having: clauses.extend([SQL('HAVING'), query._having]) if query._order_by: clauses.extend([SQL('ORDER BY'), CommaClause(*query._order_by)]) if query._limit or (query._offset and db.limit_max): limit = query._limit or db.limit_max clauses.append(SQL('LIMIT %s' % limit)) if query._offset: clauses.append(SQL('OFFSET %s' % query._offset)) for_update, no_wait = query._for_update if for_update: stmt = 'FOR UPDATE NOWAIT' if no_wait else 'FOR UPDATE' clauses.append(SQL(stmt)) return self.build_query(clauses, alias_map) def generate_update(self, query): model = query.model_class alias_map = self.alias_map_class() alias_map.add(model, model._meta.db_table) clauses = [SQL('UPDATE'), model._as_entity(), SQL('SET')] update = [] for field, value in self._sorted_fields(query._update): if not isinstance(value, (Node, Model)): value = Param(value, conv=field.db_value) update.append(Expression( field._as_entity(with_table=False), OP_EQ, value, flat=True)) # No outer parens, no table alias. clauses.append(CommaClause(*update)) if query._where: clauses.extend([SQL('WHERE'), query._where]) return self.build_query(clauses, alias_map) def _get_field_clause(self, fields): return EnclosedClause(*[ field._as_entity(with_table=False) for field in fields]) def generate_insert(self, query): model = query.model_class alias_map = self.alias_map_class() alias_map.add(model, model._meta.db_table) statement = query._upsert and 'INSERT OR REPLACE INTO' or 'INSERT INTO' clauses = [SQL(statement), model._as_entity()] if query._query is not None: # This INSERT query is of the form INSERT INTO ... SELECT FROM. if query._fields: clauses.append(self._get_field_clause(query._fields)) clauses.append(_StripParens(query._query)) elif query._rows is not None: fields, value_clauses = [], [] have_fields = False for row_dict in query._iter_rows(): if not have_fields: fields = sorted( row_dict.keys(), key=operator.attrgetter('_sort_key')) have_fields = True values = [] for field in fields: value = row_dict[field] if not isinstance(value, (Node, Model)): value = Param(value, conv=field.db_value) values.append(value) value_clauses.append(EnclosedClause(*values)) if fields: clauses.extend([ self._get_field_clause(fields), SQL('VALUES'), CommaClause(*value_clauses)]) return self.build_query(clauses, alias_map) def generate_delete(self, query): model = query.model_class clauses = [SQL('DELETE FROM'), model._as_entity()] if query._where: clauses.extend([SQL('WHERE'), query._where]) return self.build_query(clauses) def field_definition(self, field): column_type = self.get_column_type(field.get_db_field()) ddl = field.__ddl__(column_type) return Clause(*ddl) def foreign_key_constraint(self, field): ddl = [ SQL('FOREIGN KEY'), EnclosedClause(field._as_entity()), SQL('REFERENCES'), field.rel_model._as_entity(), EnclosedClause(field.to_field._as_entity())] if field.on_delete: ddl.append(SQL('ON DELETE %s' % field.on_delete)) if field.on_update: ddl.append(SQL('ON UPDATE %s' % field.on_update)) return Clause(*ddl) def return_parsed_node(function_name): # TODO: treat all `generate_` functions as returning clauses, instead # of SQL/params. def inner(self, *args, **kwargs): fn = getattr(self, function_name) return self.parse_node(fn(*args, **kwargs)) return inner def _create_foreign_key(self, model_class, field, constraint=None): constraint = constraint or 'fk_%s_%s_refs_%s' % ( model_class._meta.db_table, field.db_column, field.rel_model._meta.db_table) fk_clause = self.foreign_key_constraint(field) return Clause( SQL('ALTER TABLE'), model_class._as_entity(), SQL('ADD CONSTRAINT'), Entity(constraint), *fk_clause.nodes) create_foreign_key = return_parsed_node('_create_foreign_key') def _create_table(self, model_class, safe=False): statement = 'CREATE TABLE IF NOT EXISTS' if safe else 'CREATE TABLE' meta = model_class._meta columns, constraints = [], [] if isinstance(meta.primary_key, CompositeKey): pk_cols = [meta.fields[f]._as_entity() for f in meta.primary_key.field_names] constraints.append(Clause( SQL('PRIMARY KEY'), EnclosedClause(*pk_cols))) for field in meta.get_fields(): columns.append(self.field_definition(field)) if isinstance(field, ForeignKeyField) and not field.deferred: constraints.append(self.foreign_key_constraint(field)) return Clause( SQL(statement), model_class._as_entity(), EnclosedClause(*(columns + constraints))) create_table = return_parsed_node('_create_table') def _drop_table(self, model_class, fail_silently=False, cascade=False): statement = 'DROP TABLE IF EXISTS' if fail_silently else 'DROP TABLE' ddl = [SQL(statement), model_class._as_entity()] if cascade: ddl.append(SQL('CASCADE')) return Clause(*ddl) drop_table = return_parsed_node('_drop_table') def index_name(self, table, columns): index = '%s_%s' % (table, '_'.join(columns)) if len(index) > 64: index_hash = hashlib.md5(index.encode('utf-8')).hexdigest() index = '%s_%s' % (table, index_hash) return index def _create_index(self, model_class, fields, unique, *extra): tbl_name = model_class._meta.db_table statement = 'CREATE UNIQUE INDEX' if unique else 'CREATE INDEX' index_name = self.index_name(tbl_name, [f.db_column for f in fields]) return Clause( SQL(statement), Entity(index_name), SQL('ON'), model_class._as_entity(), EnclosedClause(*[field._as_entity() for field in fields]), *extra) create_index = return_parsed_node('_create_index') def _create_sequence(self, sequence_name): return Clause(SQL('CREATE SEQUENCE'), Entity(sequence_name)) create_sequence = return_parsed_node('_create_sequence') def _drop_sequence(self, sequence_name): return Clause(SQL('DROP SEQUENCE'), Entity(sequence_name)) drop_sequence = return_parsed_node('_drop_sequence') class QueryResultWrapper(object): """ Provides an iterator over the results of a raw Query, additionally doing two things: - converts rows from the database into python representations - ensures that multiple iterations do not result in multiple queries """ def __init__(self, model, cursor, meta=None): self.model = model self.cursor = cursor self.__ct = 0 self.__idx = 0 self._result_cache = [] self._populated = False self._initialized = False if meta is not None: self.column_meta, self.join_meta = meta else: self.column_meta = self.join_meta = None def __iter__(self): self.__idx = 0 if not self._populated: return self else: return iter(self._result_cache) def process_row(self, row): return row def iterate(self): row = self.cursor.fetchone() if not row: self._populated = True if not getattr(self.cursor, 'name', None): self.cursor.close() raise StopIteration elif not self._initialized: self.initialize(self.cursor.description) self._initialized = True return self.process_row(row) def iterator(self): while True: yield self.iterate() def next(self): if self.__idx < self.__ct: inst = self._result_cache[self.__idx] self.__idx += 1 return inst obj = self.iterate() self._result_cache.append(obj) self.__ct += 1 self.__idx += 1 return obj __next__ = next def fill_cache(self, n=None): n = n or float('Inf') if n < 0: raise ValueError('Negative values are not supported.') self.__idx = self.__ct while not self._populated and (n > self.__ct): try: self.next() except StopIteration: break <|fim▁hole|>class ExtQueryResultWrapper(QueryResultWrapper): def initialize(self, description): model = self.model conv = [] identity = lambda x: x for i in range(len(description)): func = identity column = description[i][0] found = False if self.column_meta is not None: try: select_column = self.column_meta[i] except IndexError: pass else: if isinstance(select_column, Field): func = select_column.python_value column = select_column._alias or select_column.name found = True elif (isinstance(select_column, Func) and len(select_column.arguments) and isinstance(select_column.arguments[0], Field)): if select_column._coerce: # Special-case handling aggregations. func = select_column.arguments[0].python_value found = True if not found and column in model._meta.columns: field_obj = model._meta.columns[column] column = field_obj.name func = field_obj.python_value conv.append((i, column, func)) self.conv = conv class TuplesQueryResultWrapper(ExtQueryResultWrapper): def process_row(self, row): return tuple([self.conv[i][2](col) for i, col in enumerate(row)]) class NaiveQueryResultWrapper(ExtQueryResultWrapper): def process_row(self, row): instance = self.model() for i, column, func in self.conv: setattr(instance, column, func(row[i])) instance._prepare_instance() return instance class DictQueryResultWrapper(ExtQueryResultWrapper): def process_row(self, row): res = {} for i, column, func in self.conv: res[column] = func(row[i]) return res class ModelQueryResultWrapper(QueryResultWrapper): def initialize(self, description): self.column_map, model_set = self.generate_column_map() self.join_list = self.generate_join_list(model_set) def generate_column_map(self): column_map = [] models = set([self.model]) for i, node in enumerate(self.column_meta): attr = conv = None if isinstance(node, Field): if isinstance(node, FieldProxy): key = node._model_alias constructor = node.model else: key = constructor = node.model_class attr = node.name conv = node.python_value else: key = constructor = self.model if isinstance(node, Expression) and node._alias: attr = node._alias column_map.append((key, constructor, attr, conv)) models.add(key) return column_map, models def generate_join_list(self, models): join_list = [] joins = self.join_meta stack = [self.model] while stack: current = stack.pop() if current not in joins: continue for join in joins[current]: if join.dest in models: join_list.append(join.join_metadata(current)) stack.append(join.dest) return join_list def process_row(self, row): collected = self.construct_instances(row) instances = self.follow_joins(collected) for i in instances: i._prepare_instance() return instances[0] def construct_instances(self, row, keys=None): collected_models = {} for i, (key, constructor, attr, conv) in enumerate(self.column_map): if keys is not None and key not in keys: continue value = row[i] if key not in collected_models: collected_models[key] = constructor() instance = collected_models[key] if attr is None: attr = self.cursor.description[i][0] if conv is not None: value = conv(value) setattr(instance, attr, value) return collected_models def follow_joins(self, collected): prepared = [collected[self.model]] for (lhs, attr, rhs, to_field, related_name) in self.join_list: inst = collected[lhs] joined_inst = collected[rhs] # Can we populate a value on the joined instance using the current? if to_field is not None and attr in inst._data: if getattr(joined_inst, to_field) is None: setattr(joined_inst, to_field, inst._data[attr]) setattr(inst, attr, joined_inst) prepared.append(joined_inst) return prepared class AggregateQueryResultWrapper(ModelQueryResultWrapper): def __init__(self, *args, **kwargs): self._row = [] super(AggregateQueryResultWrapper, self).__init__(*args, **kwargs) def initialize(self, description): super(AggregateQueryResultWrapper, self).initialize(description) # Collect the set of all models queried. self.all_models = set() for key, _, _, _ in self.column_map: self.all_models.add(key) # Prepare data structure for analyzing unique rows. self.models_with_aggregate = set() self.back_references = {} for (src_model, _, dest_model, _, related_name) in self.join_list: if related_name: self.models_with_aggregate.add(src_model) self.back_references[dest_model] = (src_model, related_name) self.columns_to_compare = {} for idx, (_, model_class, col_name, _) in enumerate(self.column_map): if model_class in self.models_with_aggregate: self.columns_to_compare.setdefault(model_class, []) self.columns_to_compare[model_class].append((idx, col_name)) def read_model_data(self, row): models = {} for model_class, column_data in self.columns_to_compare.items(): models[model_class] = [] for idx, col_name in column_data: models[model_class].append(row[idx]) return models def iterate(self): if self._row: row = self._row.pop() else: row = self.cursor.fetchone() if not row: self._populated = True if not getattr(self.cursor, 'name', None): self.cursor.close() raise StopIteration elif not self._initialized: self.initialize(self.cursor.description) self._initialized = True def _get_pk(instance): if isinstance(instance._meta.primary_key, CompositeKey): return tuple([ instance._data[field_name] for field_name in instance._meta.primary_key.field_names]) return instance._get_pk_value() identity_map = {} _constructed = self.construct_instances(row) primary_instance = _constructed[self.model] for model_class, instance in _constructed.items(): identity_map[model_class] = OrderedDict() identity_map[model_class][_get_pk(instance)] = instance model_data = self.read_model_data(row) while True: cur_row = self.cursor.fetchone() if cur_row is None: break duplicate_models = set() cur_row_data = self.read_model_data(cur_row) for model_class, data in cur_row_data.items(): if model_data[model_class] == data: duplicate_models.add(model_class) if not duplicate_models: self._row.append(cur_row) break different_models = self.all_models - duplicate_models new_instances = self.construct_instances(cur_row, different_models) for model_class, instance in new_instances.items(): # Do not include any instances which are comprised solely of # NULL values. pk_value = _get_pk(instance) if [val for val in instance._data.values() if val is not None]: identity_map[model_class][pk_value] = instance stack = [self.model] instances = [primary_instance] while stack: current = stack.pop() if current not in self.join_meta: continue for join in self.join_meta[current]: foreign_key = current._meta.rel_for_model(join.dest, join.on) if foreign_key: if join.dest not in identity_map: continue for pk, instance in identity_map[current].items(): joined_inst = identity_map[join.dest][ instance._data[foreign_key.name]] setattr(instance, foreign_key.name, joined_inst) instances.append(joined_inst) else: if not isinstance(join.dest, Node): backref = current._meta.reverse_rel_for_model( join.dest, join.on) if not backref: continue else: continue attr_name = backref.related_name for instance in identity_map[current].values(): setattr(instance, attr_name, []) if join.dest not in identity_map: continue for pk, instance in identity_map[join.dest].items(): if pk is None: continue try: joined_inst = identity_map[current][ instance._data[backref.name]] except KeyError: continue getattr(joined_inst, attr_name).append(instance) instances.append(instance) stack.append(join.dest) for instance in instances: instance._prepare_instance() return primary_instance class Query(Node): """Base class representing a database query on one or more tables.""" require_commit = True def __init__(self, model_class): super(Query, self).__init__() self.model_class = model_class self.database = model_class._meta.database self._dirty = True self._query_ctx = model_class self._joins = {self.model_class: []} # Join graph as adjacency list. self._where = None def __repr__(self): sql, params = self.sql() return '%s %s %s' % (self.model_class, sql, params) def clone(self): query = type(self)(self.model_class) query.database = self.database return self._clone_attributes(query) def _clone_attributes(self, query): if self._where is not None: query._where = self._where.clone() query._joins = self._clone_joins() query._query_ctx = self._query_ctx return query def _clone_joins(self): return dict( (mc, list(j)) for mc, j in self._joins.items()) def _add_query_clauses(self, initial, expressions, conjunction=None): reduced = reduce(operator.and_, expressions) if initial is None: return reduced conjunction = conjunction or operator.and_ return conjunction(initial, reduced) @returns_clone def where(self, *expressions): self._where = self._add_query_clauses(self._where, expressions) @returns_clone def orwhere(self, *expressions): self._where = self._add_query_clauses( self._where, expressions, operator.or_) @returns_clone def join(self, dest, join_type=None, on=None): if not on: require_join_condition = [ isinstance(dest, SelectQuery), (isclass(dest) and not self._query_ctx._meta.rel_exists(dest))] if any(require_join_condition): raise ValueError('A join condition must be specified.') elif isinstance(on, basestring): on = self._query_ctx._meta.fields[on] self._joins.setdefault(self._query_ctx, []) self._joins[self._query_ctx].append(Join(dest, join_type, on)) if not isinstance(dest, SelectQuery): self._query_ctx = dest @returns_clone def switch(self, model_class=None): """Change or reset the query context.""" self._query_ctx = model_class or self.model_class def ensure_join(self, lm, rm, on=None): ctx = self._query_ctx for join in self._joins.get(lm, []): if join.dest == rm: return self return self.switch(lm).join(rm, on=on).switch(ctx) def convert_dict_to_node(self, qdict): accum = [] joins = [] relationship = (ForeignKeyField, ReverseRelationDescriptor) for key, value in sorted(qdict.items()): curr = self.model_class if '__' in key and key.rsplit('__', 1)[1] in DJANGO_MAP: key, op = key.rsplit('__', 1) op = DJANGO_MAP[op] else: op = OP_EQ for piece in key.split('__'): model_attr = getattr(curr, piece) if isinstance(model_attr, relationship): curr = model_attr.rel_model joins.append(model_attr) accum.append(Expression(model_attr, op, value)) return accum, joins def filter(self, *args, **kwargs): # normalize args and kwargs into a new expression dq_node = Node() if args: dq_node &= reduce(operator.and_, [a.clone() for a in args]) if kwargs: dq_node &= DQ(**kwargs) # dq_node should now be an Expression, lhs = Node(), rhs = ... q = deque([dq_node]) dq_joins = set() while q: curr = q.popleft() if not isinstance(curr, Expression): continue for side, piece in (('lhs', curr.lhs), ('rhs', curr.rhs)): if isinstance(piece, DQ): query, joins = self.convert_dict_to_node(piece.query) dq_joins.update(joins) expression = reduce(operator.and_, query) # Apply values from the DQ object. expression._negated = piece._negated expression._alias = piece._alias setattr(curr, side, expression) else: q.append(piece) dq_node = dq_node.rhs query = self.clone() for field in dq_joins: if isinstance(field, ForeignKeyField): lm, rm = field.model_class, field.rel_model field_obj = field elif isinstance(field, ReverseRelationDescriptor): lm, rm = field.field.rel_model, field.rel_model field_obj = field.field query = query.ensure_join(lm, rm, field_obj) return query.where(dq_node) def compiler(self): return self.database.compiler() def sql(self): raise NotImplementedError def _execute(self): sql, params = self.sql() return self.database.execute_sql(sql, params, self.require_commit) def execute(self): raise NotImplementedError def scalar(self, as_tuple=False, convert=False): if convert: row = self.tuples().first() else: row = self._execute().fetchone() if row and not as_tuple: return row[0] else: return row class RawQuery(Query): """ Execute a SQL query, returning a standard iterable interface that returns model instances. """ def __init__(self, model, query, *params): self._sql = query self._params = list(params) self._qr = None self._tuples = False self._dicts = False super(RawQuery, self).__init__(model) def clone(self): query = RawQuery(self.model_class, self._sql, *self._params) query._tuples = self._tuples query._dicts = self._dicts return query join = not_allowed('joining') where = not_allowed('where') switch = not_allowed('switch') @returns_clone def tuples(self, tuples=True): self._tuples = tuples @returns_clone def dicts(self, dicts=True): self._dicts = dicts def sql(self): return self._sql, self._params def execute(self): if self._qr is None: if self._tuples: ResultWrapper = TuplesQueryResultWrapper elif self._dicts: ResultWrapper = DictQueryResultWrapper else: ResultWrapper = NaiveQueryResultWrapper self._qr = ResultWrapper(self.model_class, self._execute(), None) return self._qr def __iter__(self): return iter(self.execute()) class SelectQuery(Query): _node_type = 'select_query' def __init__(self, model_class, *selection): super(SelectQuery, self).__init__(model_class) self.require_commit = self.database.commit_select self.__select(*selection) self._from = None self._group_by = None self._having = None self._order_by = None self._windows = None self._limit = None self._offset = None self._distinct = False self._for_update = (False, False) self._naive = False self._tuples = False self._dicts = False self._aggregate_rows = False self._alias = None self._qr = None def _clone_attributes(self, query): query = super(SelectQuery, self)._clone_attributes(query) query._explicit_selection = self._explicit_selection query._select = list(self._select) if self._from is not None: query._from = [] for f in self._from: if isinstance(f, Node): query._from.append(f.clone()) else: query._from.append(f) if self._group_by is not None: query._group_by = list(self._group_by) if self._having: query._having = self._having.clone() if self._order_by is not None: query._order_by = list(self._order_by) if self._windows is not None: query._windows = list(self._windows) query._limit = self._limit query._offset = self._offset query._distinct = self._distinct query._for_update = self._for_update query._naive = self._naive query._tuples = self._tuples query._dicts = self._dicts query._aggregate_rows = self._aggregate_rows query._alias = self._alias return query def _model_shorthand(self, args): accum = [] for arg in args: if isinstance(arg, Node): accum.append(arg) elif isinstance(arg, Query): accum.append(arg) elif isinstance(arg, ModelAlias): accum.extend(arg.get_proxy_fields()) elif isclass(arg) and issubclass(arg, Model): accum.extend(arg._meta.get_fields()) return accum def compound_op(operator): def inner(self, other): supported_ops = self.model_class._meta.database.compound_operations if operator not in supported_ops: raise ValueError( 'Your database does not support %s' % operator) return CompoundSelect(self.model_class, self, operator, other) return inner _compound_op_static = staticmethod(compound_op) __or__ = compound_op('UNION') __and__ = compound_op('INTERSECT') __sub__ = compound_op('EXCEPT') def __xor__(self, rhs): # Symmetric difference, should just be (self | rhs) - (self & rhs)... wrapped_rhs = self.model_class.select(SQL('*')).from_( EnclosedClause((self & rhs)).alias('_')).order_by() return (self | rhs) - wrapped_rhs def union_all(self, rhs): return SelectQuery._compound_op_static('UNION ALL')(self, rhs) def __select(self, *selection): self._explicit_selection = len(selection) > 0 selection = selection or self.model_class._meta.get_fields() self._select = self._model_shorthand(selection) select = returns_clone(__select) @returns_clone def from_(self, *args): self._from = None if args: self._from = list(args) @returns_clone def group_by(self, *args): self._group_by = self._model_shorthand(args) @returns_clone def having(self, *expressions): self._having = self._add_query_clauses(self._having, expressions) @returns_clone def order_by(self, *args): self._order_by = list(args) @returns_clone def window(self, *windows): self._windows = list(windows) @returns_clone def limit(self, lim): self._limit = lim @returns_clone def offset(self, off): self._offset = off @returns_clone def paginate(self, page, paginate_by=20): if page > 0: page -= 1 self._limit = paginate_by self._offset = page * paginate_by @returns_clone def distinct(self, is_distinct=True): self._distinct = is_distinct @returns_clone def for_update(self, for_update=True, nowait=False): self._for_update = (for_update, nowait) @returns_clone def naive(self, naive=True): self._naive = naive @returns_clone def tuples(self, tuples=True): self._tuples = tuples @returns_clone def dicts(self, dicts=True): self._dicts = dicts @returns_clone def aggregate_rows(self, aggregate_rows=True): self._aggregate_rows = aggregate_rows @returns_clone def alias(self, alias=None): self._alias = alias def annotate(self, rel_model, annotation=None): if annotation is None: annotation = fn.Count(rel_model._meta.primary_key).alias('count') query = self.clone() query = query.ensure_join(query._query_ctx, rel_model) if not query._group_by: query._group_by = [x.alias() for x in query._select] query._select = tuple(query._select) + (annotation,) return query def _aggregate(self, aggregation=None): if aggregation is None: aggregation = fn.Count(SQL('*')) query = self.order_by() query._select = [aggregation] return query def aggregate(self, aggregation=None, convert=True): return self._aggregate(aggregation).scalar(convert=convert) def count(self, clear_limit=False): if self._distinct or self._group_by or self._limit or self._offset: return self.wrapped_count(clear_limit=clear_limit) # defaults to a count() of the primary key return self.aggregate(convert=False) or 0 def wrapped_count(self, clear_limit=False): clone = self.order_by() if clear_limit: clone._limit = clone._offset = None sql, params = clone.sql() wrapped = 'SELECT COUNT(1) FROM (%s) AS wrapped_select' % sql rq = self.model_class.raw(wrapped, *params) return rq.scalar() or 0 def exists(self): clone = self.paginate(1, 1) clone._select = [SQL('1')] return bool(clone.scalar()) def get(self): clone = self.paginate(1, 1) try: return clone.execute().next() except StopIteration: raise self.model_class.DoesNotExist( 'Instance matching query does not exist:\nSQL: %s\nPARAMS: %s' % self.sql()) def first(self): res = self.execute() res.fill_cache(1) try: return res._result_cache[0] except IndexError: pass def sql(self): return self.compiler().generate_select(self) def verify_naive(self): model_class = self.model_class for node in self._select: if isinstance(node, Field) and node.model_class != model_class: return False return True def get_query_meta(self): return (self._select, self._joins) def execute(self): if self._dirty or not self._qr: model_class = self.model_class query_meta = self.get_query_meta() if self._tuples: ResultWrapper = TuplesQueryResultWrapper elif self._dicts: ResultWrapper = DictQueryResultWrapper elif self._naive or not self._joins or self.verify_naive(): ResultWrapper = NaiveQueryResultWrapper elif self._aggregate_rows: ResultWrapper = AggregateQueryResultWrapper else: ResultWrapper = ModelQueryResultWrapper self._qr = ResultWrapper(model_class, self._execute(), query_meta) self._dirty = False return self._qr else: return self._qr def __iter__(self): return iter(self.execute()) def iterator(self): return iter(self.execute().iterator()) def __getitem__(self, value): res = self.execute() if isinstance(value, slice): index = value.stop else: index = value if index is not None and index >= 0: index += 1 res.fill_cache(index) return res._result_cache[value] if PY3: def __hash__(self): return id(self) class CompoundSelect(SelectQuery): _node_type = 'compound_select_query' def __init__(self, model_class, lhs=None, operator=None, rhs=None): self.lhs = lhs self.operator = operator self.rhs = rhs super(CompoundSelect, self).__init__(model_class, []) def _clone_attributes(self, query): query = super(CompoundSelect, self)._clone_attributes(query) query.lhs = self.lhs query.operator = self.operator query.rhs = self.rhs return query def get_query_meta(self): return self.lhs.get_query_meta() class UpdateQuery(Query): def __init__(self, model_class, update=None): self._update = update super(UpdateQuery, self).__init__(model_class) def _clone_attributes(self, query): query = super(UpdateQuery, self)._clone_attributes(query) query._update = dict(self._update) return query join = not_allowed('joining') def sql(self): return self.compiler().generate_update(self) def execute(self): return self.database.rows_affected(self._execute()) class InsertQuery(Query): def __init__(self, model_class, field_dict=None, rows=None, fields=None, query=None): super(InsertQuery, self).__init__(model_class) self._upsert = False self._is_multi_row_insert = rows is not None or query is not None if rows is not None: self._rows = rows else: self._rows = [field_dict or {}] self._fields = fields self._query = query def _iter_rows(self): model_meta = self.model_class._meta valid_fields = (set(model_meta.fields.keys()) | set(model_meta.fields.values())) def validate_field(field): if field not in valid_fields: raise KeyError('"%s" is not a recognized field.' % field) defaults = model_meta._default_dict callables = model_meta._default_callables for row_dict in self._rows: field_row = defaults.copy() seen = set() for key in row_dict: validate_field(key) if key in model_meta.fields: field = model_meta.fields[key] else: field = key field_row[field] = row_dict[key] seen.add(field) if callables: for field in callables: if field not in seen: field_row[field] = callables[field]() yield field_row def _clone_attributes(self, query): query = super(InsertQuery, self)._clone_attributes(query) query._rows = self._rows query._upsert = self._upsert query._is_multi_row_insert = self._is_multi_row_insert query._fields = self._fields query._query = self._query return query join = not_allowed('joining') where = not_allowed('where clause') @returns_clone def upsert(self, upsert=True): self._upsert = upsert def sql(self): return self.compiler().generate_insert(self) def execute(self): if self._is_multi_row_insert and self._query is None: if not self.database.insert_many: last_id = None for row in self._rows: last_id = InsertQuery(self.model_class, row).execute() return last_id return self.database.last_insert_id(self._execute(), self.model_class) class DeleteQuery(Query): join = not_allowed('joining') def sql(self): return self.compiler().generate_delete(self) def execute(self): return self.database.rows_affected(self._execute()) IndexMetadata = namedtuple( 'IndexMetadata', ('name', 'sql', 'columns', 'unique', 'table')) ColumnMetadata = namedtuple( 'ColumnMetadata', ('name', 'data_type', 'null', 'primary_key', 'table')) ForeignKeyMetadata = namedtuple( 'ForeignKeyMetadata', ('column', 'dest_table', 'dest_column', 'table')) class PeeweeException(Exception): pass class ImproperlyConfigured(PeeweeException): pass class DatabaseError(PeeweeException): pass class DataError(DatabaseError): pass class IntegrityError(DatabaseError): pass class InterfaceError(PeeweeException): pass class InternalError(DatabaseError): pass class NotSupportedError(DatabaseError): pass class OperationalError(DatabaseError): pass class ProgrammingError(DatabaseError): pass class ExceptionWrapper(object): __slots__ = ['exceptions'] def __init__(self, exceptions): self.exceptions = exceptions def __enter__(self): pass def __exit__(self, exc_type, exc_value, traceback): if exc_type is None: return if exc_type.__name__ in self.exceptions: new_type = self.exceptions[exc_type.__name__] reraise(new_type, new_type(*exc_value.args), traceback) class _BaseConnectionLocal(object): def __init__(self, **kwargs): super(_BaseConnectionLocal, self).__init__(**kwargs) self.autocommit = None self.closed = True self.conn = None self.context_stack = [] self.transactions = [] class _ConnectionLocal(_BaseConnectionLocal, threading.local): pass class Database(object): commit_select = False compiler_class = QueryCompiler compound_operations = ['UNION', 'INTERSECT', 'EXCEPT', 'UNION ALL'] distinct_on = False drop_cascade = False field_overrides = {} foreign_keys = True for_update = False for_update_nowait = False insert_many = True interpolation = '?' limit_max = None op_overrides = {} quote_char = '"' reserved_tables = [] savepoints = True sequences = False subquery_delete_same_table = True window_functions = False exceptions = { 'ConstraintError': IntegrityError, 'DatabaseError': DatabaseError, 'DataError': DataError, 'IntegrityError': IntegrityError, 'InterfaceError': InterfaceError, 'InternalError': InternalError, 'NotSupportedError': NotSupportedError, 'OperationalError': OperationalError, 'ProgrammingError': ProgrammingError} def __init__(self, database, threadlocals=True, autocommit=True, fields=None, ops=None, autorollback=False, **connect_kwargs): self.init(database, **connect_kwargs) if threadlocals: self.__local = _ConnectionLocal() else: self.__local = _BaseConnectionLocal() self._conn_lock = threading.Lock() self.autocommit = autocommit self.autorollback = autorollback self.field_overrides = merge_dict(self.field_overrides, fields or {}) self.op_overrides = merge_dict(self.op_overrides, ops or {}) def init(self, database, **connect_kwargs): self.deferred = database is None self.database = database self.connect_kwargs = connect_kwargs def exception_wrapper(self): return ExceptionWrapper(self.exceptions) def connect(self): with self._conn_lock: if self.deferred: raise Exception('Error, database not properly initialized ' 'before opening connection') with self.exception_wrapper(): self.__local.conn = self._connect( self.database, **self.connect_kwargs) self.__local.closed = False def close(self): with self._conn_lock: if self.deferred: raise Exception('Error, database not properly initialized ' 'before closing connection') with self.exception_wrapper(): self._close(self.__local.conn) self.__local.closed = True def get_conn(self): if self.__local.context_stack: return self.__local.context_stack[-1].connection if self.__local.closed: self.connect() return self.__local.conn def is_closed(self): return self.__local.closed def get_cursor(self): return self.get_conn().cursor() def _close(self, conn): conn.close() def _connect(self, database, **kwargs): raise NotImplementedError @classmethod def register_fields(cls, fields): cls.field_overrides = merge_dict(cls.field_overrides, fields) @classmethod def register_ops(cls, ops): cls.op_overrides = merge_dict(cls.op_overrides, ops) def last_insert_id(self, cursor, model): if model._meta.auto_increment: return cursor.lastrowid def rows_affected(self, cursor): return cursor.rowcount def sql_error_handler(self, exception, sql, params, require_commit): return True def compiler(self): return self.compiler_class( self.quote_char, self.interpolation, self.field_overrides, self.op_overrides) def execute_sql(self, sql, params=None, require_commit=True): logger.debug((sql, params)) with self.exception_wrapper(): cursor = self.get_cursor() try: cursor.execute(sql, params or ()) except Exception as exc: if self.get_autocommit() and self.autorollback: self.rollback() if self.sql_error_handler(exc, sql, params, require_commit): raise else: if require_commit and self.get_autocommit(): self.commit() return cursor def begin(self): pass def commit(self): self.get_conn().commit() def rollback(self): self.get_conn().rollback() def set_autocommit(self, autocommit): self.__local.autocommit = autocommit def get_autocommit(self): if self.__local.autocommit is None: self.set_autocommit(self.autocommit) return self.__local.autocommit def push_execution_context(self, transaction): self.__local.context_stack.append(transaction) def pop_execution_context(self): self.__local.context_stack.pop() def execution_context_depth(self): return len(self.__local.context_stack) def execution_context(self, with_transaction=True): return ExecutionContext(self, with_transaction=with_transaction) def push_transaction(self, transaction): self.__local.transactions.append(transaction) def pop_transaction(self): self.__local.transactions.pop() def transaction_depth(self): return len(self.__local.transactions) def transaction(self): return transaction(self) def commit_on_success(self, func): @wraps(func) def inner(*args, **kwargs): with self.transaction(): return func(*args, **kwargs) return inner def savepoint(self, sid=None): if not self.savepoints: raise NotImplementedError return savepoint(self, sid) def atomic(self): return _atomic(self) def get_tables(self, schema=None): raise NotImplementedError def get_indexes(self, table, schema=None): raise NotImplementedError def get_columns(self, table, schema=None): raise NotImplementedError def get_primary_keys(self, table, schema=None): raise NotImplementedError def get_foreign_keys(self, table, schema=None): raise NotImplementedError def sequence_exists(self, seq): raise NotImplementedError def create_table(self, model_class, safe=False): qc = self.compiler() return self.execute_sql(*qc.create_table(model_class, safe)) def create_tables(self, models, safe=False): create_model_tables(models, fail_silently=safe) def create_index(self, model_class, fields, unique=False): qc = self.compiler() if not isinstance(fields, (list, tuple)): raise ValueError('Fields passed to "create_index" must be a list ' 'or tuple: "%s"' % fields) fobjs = [ model_class._meta.fields[f] if isinstance(f, basestring) else f for f in fields] return self.execute_sql(*qc.create_index(model_class, fobjs, unique)) def create_foreign_key(self, model_class, field, constraint=None): qc = self.compiler() return self.execute_sql(*qc.create_foreign_key( model_class, field, constraint)) def create_sequence(self, seq): if self.sequences: qc = self.compiler() return self.execute_sql(*qc.create_sequence(seq)) def drop_table(self, model_class, fail_silently=False, cascade=False): qc = self.compiler() return self.execute_sql(*qc.drop_table( model_class, fail_silently, cascade)) def drop_tables(self, models, safe=False, cascade=False): drop_model_tables(models, fail_silently=safe, cascade=cascade) def drop_sequence(self, seq): if self.sequences: qc = self.compiler() return self.execute_sql(*qc.drop_sequence(seq)) def extract_date(self, date_part, date_field): return fn.EXTRACT(Clause(date_part, R('FROM'), date_field)) def truncate_date(self, date_part, date_field): return fn.DATE_TRUNC(SQL(date_part), date_field) class SqliteDatabase(Database): foreign_keys = False insert_many = sqlite3 and sqlite3.sqlite_version_info >= (3, 7, 11, 0) limit_max = -1 op_overrides = { OP_LIKE: 'GLOB', OP_ILIKE: 'LIKE', } def __init__(self, *args, **kwargs): self._journal_mode = kwargs.pop('journal_mode', None) super(SqliteDatabase, self).__init__(*args, **kwargs) if not self.database: self.database = ':memory:' def _connect(self, database, **kwargs): conn = sqlite3.connect(database, **kwargs) conn.isolation_level = None self._add_conn_hooks(conn) return conn def _add_conn_hooks(self, conn): conn.create_function('date_part', 2, _sqlite_date_part) conn.create_function('date_trunc', 2, _sqlite_date_trunc) conn.create_function('regexp', 2, _sqlite_regexp) if self._journal_mode: self.execute_sql('PRAGMA journal_mode=%s;' % self._journal_mode) def begin(self, lock_type='DEFERRED'): self.execute_sql('BEGIN %s' % lock_type, require_commit=False) def get_tables(self, schema=None): cursor = self.execute_sql('SELECT name FROM sqlite_master WHERE ' 'type = ? ORDER BY name;', ('table',)) return [row[0] for row in cursor.fetchall()] def get_indexes(self, table, schema=None): query = ('SELECT name, sql FROM sqlite_master ' 'WHERE tbl_name = ? AND type = ? ORDER BY name') cursor = self.execute_sql(query, (table, 'index')) index_to_sql = dict(cursor.fetchall()) # Determine which indexes have a unique constraint. unique_indexes = set() cursor = self.execute_sql('PRAGMA index_list("%s")' % table) for _, name, is_unique in cursor.fetchall(): if is_unique: unique_indexes.add(name) # Retrieve the indexed columns. index_columns = {} for index_name in sorted(index_to_sql): cursor = self.execute_sql('PRAGMA index_info("%s")' % index_name) index_columns[index_name] = [row[2] for row in cursor.fetchall()] return [ IndexMetadata( name, index_to_sql[name], index_columns[name], name in unique_indexes, table) for name in sorted(index_to_sql)] def get_columns(self, table, schema=None): cursor = self.execute_sql('PRAGMA table_info("%s")' % table) return [ColumnMetadata(row[1], row[2], not row[3], bool(row[5]), table) for row in cursor.fetchall()] def get_primary_keys(self, table, schema=None): cursor = self.execute_sql('PRAGMA table_info("%s")' % table) return [row[1] for row in cursor.fetchall() if row[-1]] def get_foreign_keys(self, table, schema=None): cursor = self.execute_sql('PRAGMA foreign_key_list("%s")' % table) return [ForeignKeyMetadata(row[3], row[2], row[4], table) for row in cursor.fetchall()] def savepoint(self, sid=None): return savepoint_sqlite(self, sid) def extract_date(self, date_part, date_field): return fn.date_part(date_part, date_field) def truncate_date(self, date_part, date_field): return fn.strftime(SQLITE_DATE_TRUNC_MAPPING[date_part], date_field) class PostgresqlDatabase(Database): commit_select = True distinct_on = True drop_cascade = True field_overrides = { 'blob': 'BYTEA', 'bool': 'BOOLEAN', 'datetime': 'TIMESTAMP', 'decimal': 'NUMERIC', 'double': 'DOUBLE PRECISION', 'primary_key': 'SERIAL', 'uuid': 'UUID', } for_update = True for_update_nowait = True interpolation = '%s' op_overrides = { OP_REGEXP: '~', } reserved_tables = ['user'] sequences = True window_functions = True register_unicode = True def _connect(self, database, **kwargs): if not psycopg2: raise ImproperlyConfigured('psycopg2 must be installed.') conn = psycopg2.connect(database=database, **kwargs) if self.register_unicode: pg_extensions.register_type(pg_extensions.UNICODE, conn) pg_extensions.register_type(pg_extensions.UNICODEARRAY, conn) return conn def last_insert_id(self, cursor, model): meta = model._meta schema = '' if meta.schema: schema = '%s.' % meta.schema if meta.primary_key.sequence: seq = meta.primary_key.sequence elif meta.auto_increment: seq = '%s_%s_seq' % (meta.db_table, meta.primary_key.db_column) else: seq = None if seq: cursor.execute("SELECT CURRVAL('%s\"%s\"')" % (schema, seq)) result = cursor.fetchone()[0] if self.get_autocommit(): self.commit() return result def get_tables(self, schema='public'): query = ('SELECT tablename FROM pg_catalog.pg_tables ' 'WHERE schemaname = %s ORDER BY tablename') return [r for r, in self.execute_sql(query, (schema,)).fetchall()] def get_indexes(self, table, schema='public'): query = """ SELECT i.relname, idxs.indexdef, idx.indisunique, array_to_string(array_agg(cols.attname), ',') FROM pg_catalog.pg_class AS t INNER JOIN pg_catalog.pg_index AS idx ON t.oid = idx.indrelid INNER JOIN pg_catalog.pg_class AS i ON idx.indexrelid = i.oid INNER JOIN pg_catalog.pg_indexes AS idxs ON (idxs.tablename = t.relname AND idxs.indexname = i.relname) LEFT OUTER JOIN pg_catalog.pg_attribute AS cols ON (cols.attrelid = t.oid AND cols.attnum = ANY(idx.indkey)) WHERE t.relname = %s AND t.relkind = %s AND idxs.schemaname = %s GROUP BY i.relname, idxs.indexdef, idx.indisunique ORDER BY idx.indisunique DESC, i.relname;""" cursor = self.execute_sql(query, (table, 'r', schema)) return [IndexMetadata(row[0], row[1], row[3].split(','), row[2], table) for row in cursor.fetchall()] def get_columns(self, table, schema='public'): query = """ SELECT column_name, is_nullable, data_type FROM information_schema.columns WHERE table_name = %s AND table_schema = %s""" cursor = self.execute_sql(query, (table, schema)) pks = set(self.get_primary_keys(table, schema)) return [ColumnMetadata(name, dt, null == 'YES', name in pks, table) for name, null, dt in cursor.fetchall()] def get_primary_keys(self, table, schema='public'): query = """ SELECT kc.column_name FROM information_schema.table_constraints AS tc INNER JOIN information_schema.key_column_usage AS kc ON ( tc.table_name = kc.table_name AND tc.table_schema = kc.table_schema AND tc.constraint_name = kc.constraint_name) WHERE tc.constraint_type = %s AND tc.table_name = %s AND tc.table_schema = %s""" cursor = self.execute_sql(query, ('PRIMARY KEY', table, schema)) return [row for row, in cursor.fetchall()] def get_foreign_keys(self, table, schema='public'): sql = """ SELECT kcu.column_name, ccu.table_name, ccu.column_name FROM information_schema.table_constraints AS tc JOIN information_schema.key_column_usage AS kcu ON (tc.constraint_name = kcu.constraint_name AND tc.constraint_schema = kcu.constraint_schema) JOIN information_schema.constraint_column_usage AS ccu ON (ccu.constraint_name = tc.constraint_name AND ccu.constraint_schema = tc.constraint_schema) WHERE tc.constraint_type = 'FOREIGN KEY' AND tc.table_name = %s AND tc.table_schema = %s""" cursor = self.execute_sql(sql, (table, schema)) return [ForeignKeyMetadata(row[0], row[1], row[2], table) for row in cursor.fetchall()] def sequence_exists(self, sequence): res = self.execute_sql(""" SELECT COUNT(*) FROM pg_class, pg_namespace WHERE relkind='S' AND pg_class.relnamespace = pg_namespace.oid AND relname=%s""", (sequence,)) return bool(res.fetchone()[0]) def set_search_path(self, *search_path): path_params = ','.join(['%s'] * len(search_path)) self.execute_sql('SET search_path TO %s' % path_params, search_path) class MySQLDatabase(Database): commit_select = True compound_operations = ['UNION', 'UNION ALL'] field_overrides = { 'bool': 'BOOL', 'decimal': 'NUMERIC', 'double': 'DOUBLE PRECISION', 'float': 'FLOAT', 'primary_key': 'INTEGER AUTO_INCREMENT', 'text': 'LONGTEXT', } for_update = True interpolation = '%s' limit_max = 2 ** 64 - 1 # MySQL quirk op_overrides = { OP_LIKE: 'LIKE BINARY', OP_ILIKE: 'LIKE', OP_XOR: 'XOR', } quote_char = '`' subquery_delete_same_table = False def _connect(self, database, **kwargs): if not mysql: raise ImproperlyConfigured('MySQLdb or PyMySQL must be installed.') conn_kwargs = { 'charset': 'utf8', 'use_unicode': True, } conn_kwargs.update(kwargs) if 'password' in conn_kwargs: conn_kwargs['passwd'] = conn_kwargs.pop('password') return mysql.connect(db=database, **conn_kwargs) def get_tables(self, schema=None): return [row for row, in self.execute_sql('SHOW TABLES')] def get_indexes(self, table, schema=None): cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table) unique = set() indexes = {} for row in cursor.fetchall(): if not row[1]: unique.add(row[2]) indexes.setdefault(row[2], []) indexes[row[2]].append(row[4]) return [IndexMetadata(name, None, indexes[name], name in unique, table) for name in indexes] def get_columns(self, table, schema=None): sql = """ SELECT column_name, is_nullable, data_type FROM information_schema.columns WHERE table_name = %s AND table_schema = DATABASE()""" cursor = self.execute_sql(sql, (table,)) pks = set(self.get_primary_keys(table)) return [ColumnMetadata(name, dt, null == 'YES', name in pks, table) for name, null, dt in cursor.fetchall()] def get_primary_keys(self, table, schema=None): cursor = self.execute_sql('SHOW INDEX FROM `%s`' % table) return [row[4] for row in cursor.fetchall() if row[2] == 'PRIMARY'] def get_foreign_keys(self, table, schema=None): query = """ SELECT column_name, referenced_table_name, referenced_column_name FROM information_schema.key_column_usage WHERE table_name = %s AND table_schema = DATABASE() AND referenced_table_name IS NOT NULL AND referenced_column_name IS NOT NULL""" cursor = self.execute_sql(query, (table,)) return [ ForeignKeyMetadata(column, dest_table, dest_column, table) for column, dest_table, dest_column in cursor.fetchall()] def extract_date(self, date_part, date_field): return fn.EXTRACT(Clause(R(date_part), R('FROM'), date_field)) def truncate_date(self, date_part, date_field): return fn.DATE_FORMAT(date_field, MYSQL_DATE_TRUNC_MAPPING[date_part]) class _callable_context_manager(object): def __call__(self, fn): @wraps(fn) def inner(*args, **kwargs): with self: return fn(*args, **kwargs) return inner class ExecutionContext(_callable_context_manager): def __init__(self, database, with_transaction=True): self.database = database self.with_transaction = with_transaction def __enter__(self): with self.database._conn_lock: self.database.push_execution_context(self) self.connection = self.database._connect( self.database.database, **self.database.connect_kwargs) if self.with_transaction: self.txn = self.database.transaction() self.txn.__enter__() return self def __exit__(self, exc_type, exc_val, exc_tb): with self.database._conn_lock: try: if self.with_transaction: if not exc_type: self.txn.commit(False) self.txn.__exit__(exc_type, exc_val, exc_tb) finally: self.database.pop_execution_context() self.database._close(self.connection) class _atomic(_callable_context_manager): def __init__(self, db): self.db = db def __enter__(self): if self.db.transaction_depth() == 0: self._helper = self.db.transaction() else: self._helper = self.db.savepoint() return self._helper.__enter__() def __exit__(self, exc_type, exc_val, exc_tb): return self._helper.__exit__(exc_type, exc_val, exc_tb) class transaction(_callable_context_manager): def __init__(self, db): self.db = db def _begin(self): self.db.begin() def commit(self, begin=True): self.db.commit() if begin: self._begin() def rollback(self, begin=True): self.db.rollback() if begin: self._begin() def __enter__(self): self._orig = self.db.get_autocommit() self.db.set_autocommit(False) if self.db.transaction_depth() == 0: self._begin() self.db.push_transaction(self) return self def __exit__(self, exc_type, exc_val, exc_tb): try: if exc_type: self.rollback(False) elif self.db.transaction_depth() == 1: try: self.commit(False) except: self.rollback(False) raise finally: self.db.set_autocommit(self._orig) self.db.pop_transaction() class savepoint(_callable_context_manager): def __init__(self, db, sid=None): self.db = db _compiler = db.compiler() self.sid = sid or 's' + uuid.uuid4().hex self.quoted_sid = _compiler.quote(self.sid) def _execute(self, query): self.db.execute_sql(query, require_commit=False) def commit(self): self._execute('RELEASE SAVEPOINT %s;' % self.quoted_sid) def rollback(self): self._execute('ROLLBACK TO SAVEPOINT %s;' % self.quoted_sid) def __enter__(self): self._orig_autocommit = self.db.get_autocommit() self.db.set_autocommit(False) self._execute('SAVEPOINT %s;' % self.quoted_sid) return self def __exit__(self, exc_type, exc_val, exc_tb): try: if exc_type: self.rollback() else: try: self.commit() except: self.rollback() raise finally: self.db.set_autocommit(self._orig_autocommit) class savepoint_sqlite(savepoint): def __enter__(self): conn = self.db.get_conn() # For sqlite, the connection's isolation_level *must* be set to None. # The act of setting it, though, will break any existing savepoints, # so only write to it if necessary. if conn.isolation_level is not None: self._orig_isolation_level = conn.isolation_level conn.isolation_level = None else: self._orig_isolation_level = None return super(savepoint_sqlite, self).__enter__() def __exit__(self, exc_type, exc_val, exc_tb): try: return super(savepoint_sqlite, self).__exit__( exc_type, exc_val, exc_tb) finally: if self._orig_isolation_level is not None: self.db.get_conn().isolation_level = self._orig_isolation_level class FieldProxy(Field): def __init__(self, alias, field_instance): self._model_alias = alias self.model = self._model_alias.model_class self.field_instance = field_instance def clone_base(self): return FieldProxy(self._model_alias, self.field_instance) def coerce(self, value): return self.field_instance.coerce(value) def python_value(self, value): return self.field_instance.python_value(value) def db_value(self, value): return self.field_instance.db_value(value) def __getattr__(self, attr): if attr == 'model_class': return self._model_alias return getattr(self.field_instance, attr) class ModelAlias(object): def __init__(self, model_class): self.__dict__['model_class'] = model_class def __getattr__(self, attr): model_attr = getattr(self.model_class, attr) if isinstance(model_attr, Field): return FieldProxy(self, model_attr) return model_attr def __setattr__(self, attr, value): raise AttributeError('Cannot set attributes on ModelAlias instances') def get_proxy_fields(self): return [ FieldProxy(self, f) for f in self.model_class._meta.get_fields()] def select(self, *selection): query = SelectQuery(self, *selection) if self._meta.order_by: query = query.order_by(*self._meta.order_by) return query class DoesNotExist(Exception): pass if sqlite3: default_database = SqliteDatabase('peewee.db') else: default_database = None class ModelOptions(object): def __init__(self, cls, database=None, db_table=None, indexes=None, order_by=None, primary_key=None, table_alias=None, constraints=None, schema=None, validate_backrefs=True, **kwargs): self.model_class = cls self.name = cls.__name__.lower() self.fields = {} self.columns = {} self.defaults = {} self._default_by_name = {} self._default_dict = {} self._default_callables = {} self.database = database or default_database self.db_table = db_table self.indexes = list(indexes or []) self.order_by = order_by self.primary_key = primary_key self.table_alias = table_alias self.constraints = constraints self.schema = schema self.validate_backrefs = validate_backrefs self.auto_increment = None self.rel = {} self.reverse_rel = {} for key, value in kwargs.items(): setattr(self, key, value) self._additional_keys = set(kwargs.keys()) def prepared(self): for field in self.fields.values(): if field.default is not None: self.defaults[field] = field.default if callable(field.default): self._default_callables[field] = field.default else: self._default_dict[field] = field.default self._default_by_name[field.name] = field.default if self.order_by: norm_order_by = [] for item in self.order_by: if isinstance(item, Field): prefix = '-' if item._ordering == 'DESC' else '' item = prefix + item.name field = self.fields[item.lstrip('-')] if item.startswith('-'): norm_order_by.append(field.desc()) else: norm_order_by.append(field.asc()) self.order_by = norm_order_by def get_default_dict(self): dd = self._default_by_name.copy() if self._default_callables: for field, default in self._default_callables.items(): dd[field.name] = default() return dd def get_sorted_fields(self): key = lambda i: i[1]._sort_key return sorted(self.fields.items(), key=key) def get_field_names(self): return [f[0] for f in self.get_sorted_fields()] def get_fields(self): return [f[1] for f in self.get_sorted_fields()] def get_field_index(self, field): for i, (field_name, field_obj) in enumerate(self.get_sorted_fields()): if field_name == field.name: return i return -1 def rel_for_model(self, model, field_obj=None): is_field = isinstance(field_obj, Field) is_node = not is_field and isinstance(field_obj, Node) for field in self.get_fields(): if isinstance(field, ForeignKeyField) and field.rel_model == model: is_match = any(( field_obj is None, is_field and field_obj.name == field.name, is_node and field_obj._alias == field.name)) if is_match: return field def reverse_rel_for_model(self, model, field_obj=None): return model._meta.rel_for_model(self.model_class, field_obj) def rel_exists(self, model): return self.rel_for_model(model) or self.reverse_rel_for_model(model) def related_models(self, backrefs=False): models = [] stack = [self.model_class] while stack: model = stack.pop() if model in models: continue models.append(model) for fk in model._meta.rel.values(): stack.append(fk.rel_model) if backrefs: for fk in model._meta.reverse_rel.values(): stack.append(fk.model_class) return models class BaseModel(type): inheritable = set(['constraints', 'database', 'indexes', 'order_by', 'primary_key', 'schema', 'validate_backrefs']) def __new__(cls, name, bases, attrs): if not bases: return super(BaseModel, cls).__new__(cls, name, bases, attrs) meta_options = {} meta = attrs.pop('Meta', None) if meta: for k, v in meta.__dict__.items(): if not k.startswith('_'): meta_options[k] = v model_pk = getattr(meta, 'primary_key', None) parent_pk = None # inherit any field descriptors by deep copying the underlying field # into the attrs of the new model, additionally see if the bases define # inheritable model options and swipe them for b in bases: if not hasattr(b, '_meta'): continue base_meta = getattr(b, '_meta') if parent_pk is None: parent_pk = deepcopy(base_meta.primary_key) all_inheritable = cls.inheritable | base_meta._additional_keys for (k, v) in base_meta.__dict__.items(): if k in all_inheritable and k not in meta_options: meta_options[k] = v for (k, v) in b.__dict__.items(): if k in attrs: continue if isinstance(v, FieldDescriptor): if not v.field.primary_key: attrs[k] = deepcopy(v.field) # initialize the new class and set the magic attributes cls = super(BaseModel, cls).__new__(cls, name, bases, attrs) cls._meta = ModelOptions(cls, **meta_options) cls._data = None cls._meta.indexes = list(cls._meta.indexes) if not cls._meta.db_table: cls._meta.db_table = re.sub('[^\w]+', '_', cls.__name__.lower()) # replace fields with field descriptors, calling the add_to_class hook fields = [] for name, attr in cls.__dict__.items(): if isinstance(attr, Field): if attr.primary_key and model_pk: raise ValueError('primary key is overdetermined.') elif attr.primary_key: model_pk, pk_name = attr, name else: fields.append((attr, name)) if model_pk is None: if parent_pk: model_pk, pk_name = parent_pk, parent_pk.name else: model_pk, pk_name = PrimaryKeyField(primary_key=True), 'id' elif isinstance(model_pk, CompositeKey): pk_name = '_composite_key' if model_pk is not False: model_pk.add_to_class(cls, pk_name) cls._meta.primary_key = model_pk cls._meta.auto_increment = ( isinstance(model_pk, PrimaryKeyField) or bool(model_pk.sequence)) for field, name in fields: field.add_to_class(cls, name) # create a repr and error class before finalizing if hasattr(cls, '__unicode__'): setattr(cls, '__repr__', lambda self: '<%s: %r>' % ( cls.__name__, self.__unicode__())) exc_name = '%sDoesNotExist' % cls.__name__ exception_class = type(exc_name, (DoesNotExist,), {}) cls.DoesNotExist = exception_class cls._meta.prepared() return cls def __iter__(self): return iter(self.select()) class Model(with_metaclass(BaseModel)): def __init__(self, *args, **kwargs): self._data = self._meta.get_default_dict() self._dirty = set() self._obj_cache = {} for k, v in kwargs.items(): setattr(self, k, v) @classmethod def alias(cls): return ModelAlias(cls) @classmethod def select(cls, *selection): query = SelectQuery(cls, *selection) if cls._meta.order_by: query = query.order_by(*cls._meta.order_by) return query @classmethod def update(cls, **update): fdict = dict((cls._meta.fields[f], v) for f, v in update.items()) return UpdateQuery(cls, fdict) @classmethod def insert(cls, **insert): return InsertQuery(cls, insert) @classmethod def insert_many(cls, rows): return InsertQuery(cls, rows=rows) @classmethod def insert_from(cls, fields, query): return InsertQuery(cls, fields=fields, query=query) @classmethod def delete(cls): return DeleteQuery(cls) @classmethod def raw(cls, sql, *params): return RawQuery(cls, sql, *params) @classmethod def create(cls, **query): inst = cls(**query) inst.save(force_insert=True) inst._prepare_instance() return inst @classmethod def get(cls, *query, **kwargs): sq = cls.select().naive() if query: sq = sq.where(*query) if kwargs: sq = sq.filter(**kwargs) return sq.get() @classmethod def get_or_create(cls, **kwargs): sq = cls.select().filter(**kwargs) try: return sq.get() except cls.DoesNotExist: return cls.create(**kwargs) @classmethod def filter(cls, *dq, **query): return cls.select().filter(*dq, **query) @classmethod def table_exists(cls): kwargs = {} if cls._meta.schema: kwargs['schema'] = cls._meta.schema return cls._meta.db_table in cls._meta.database.get_tables(**kwargs) @classmethod def create_table(cls, fail_silently=False): if fail_silently and cls.table_exists(): return db = cls._meta.database pk = cls._meta.primary_key if db.sequences and pk.sequence: if not db.sequence_exists(pk.sequence): db.create_sequence(pk.sequence) db.create_table(cls) cls._create_indexes() @classmethod def _fields_to_index(cls): fields = [] for field in cls._meta.fields.values(): if field.primary_key: continue requires_index = any(( field.index, field.unique, isinstance(field, ForeignKeyField))) if requires_index: fields.append(field) return fields @classmethod def _create_indexes(cls): db = cls._meta.database for field in cls._fields_to_index(): db.create_index(cls, [field], field.unique) if cls._meta.indexes: for fields, unique in cls._meta.indexes: db.create_index(cls, fields, unique) @classmethod def sqlall(cls): queries = [] compiler = cls._meta.database.compiler() pk = cls._meta.primary_key if cls._meta.database.sequences and pk.sequence: queries.append(compiler.create_sequence(pk.sequence)) queries.append(compiler.create_table(cls)) for field in cls._fields_to_index(): queries.append(compiler.create_index(cls, [field], field.unique)) if cls._meta.indexes: for field_names, unique in cls._meta.indexes: fields = [cls._meta.fields[f] for f in field_names] queries.append(compiler.create_index(cls, fields, unique)) return [sql for sql, _ in queries] @classmethod def drop_table(cls, fail_silently=False, cascade=False): cls._meta.database.drop_table(cls, fail_silently, cascade) @classmethod def _as_entity(cls): if cls._meta.schema: return Entity(cls._meta.schema, cls._meta.db_table) return Entity(cls._meta.db_table) def _get_pk_value(self): return getattr(self, self._meta.primary_key.name) get_id = _get_pk_value # Backwards-compatibility. def _set_pk_value(self, value): setattr(self, self._meta.primary_key.name, value) set_id = _set_pk_value # Backwards-compatibility. def _pk_expr(self): return self._meta.primary_key == self._get_pk_value() def _prepare_instance(self): self._dirty.clear() self.prepared() def prepared(self): pass def _prune_fields(self, field_dict, only): new_data = {} for field in only: if field.name in field_dict: new_data[field.name] = field_dict[field.name] return new_data def save(self, force_insert=False, only=None): field_dict = dict(self._data) pk_field = self._meta.primary_key if only: field_dict = self._prune_fields(field_dict, only) if self._get_pk_value() is not None and not force_insert: if isinstance(pk_field, CompositeKey): for pk_part_name in pk_field.field_names: field_dict.pop(pk_part_name, None) else: field_dict.pop(pk_field.name, None) rows = self.update(**field_dict).where(self._pk_expr()).execute() else: pk = self._get_pk_value() pk_from_cursor = self.insert(**field_dict).execute() if pk_from_cursor is not None: pk = pk_from_cursor self._set_pk_value(pk) # Do not overwrite current ID with None. rows = 1 self._dirty.clear() return rows def is_dirty(self): return bool(self._dirty) @property def dirty_fields(self): return [f for f in self._meta.get_fields() if f.name in self._dirty] def dependencies(self, search_nullable=False): model_class = type(self) query = self.select().where(self._pk_expr()) stack = [(type(self), query)] seen = set() while stack: klass, query = stack.pop() if klass in seen: continue seen.add(klass) for rel_name, fk in klass._meta.reverse_rel.items(): rel_model = fk.model_class if fk.rel_model is model_class: node = (fk == self._data[fk.to_field.name]) subquery = rel_model.select().where(node) else: node = fk << query subquery = rel_model.select().where(node) if not fk.null or search_nullable: stack.append((rel_model, subquery)) yield (node, fk) def delete_instance(self, recursive=False, delete_nullable=False): if recursive: dependencies = self.dependencies(delete_nullable) for query, fk in reversed(list(dependencies)): model = fk.model_class if fk.null and not delete_nullable: model.update(**{fk.name: None}).where(query).execute() else: model.delete().where(query).execute() return self.delete().where(self._pk_expr()).execute() def __eq__(self, other): return ( other.__class__ == self.__class__ and self._get_pk_value() is not None and other._get_pk_value() == self._get_pk_value()) def __ne__(self, other): return not self == other def prefetch_add_subquery(sq, subqueries): fixed_queries = [PrefetchResult(sq)] for i, subquery in enumerate(subqueries): if not isinstance(subquery, Query) and issubclass(subquery, Model): subquery = subquery.select() subquery_model = subquery.model_class fkf = backref = None for j in reversed(range(i + 1)): last_query = fixed_queries[j][0] last_model = last_query.model_class fkf = subquery_model._meta.rel_for_model(last_model) backref = last_model._meta.rel_for_model(subquery_model) if fkf or backref: break if not (fkf or backref): raise AttributeError('Error: unable to find foreign key for ' 'query: %s' % subquery) if fkf: inner_query = last_query.select(fkf.to_field) fixed_queries.append( PrefetchResult(subquery.where(fkf << inner_query), fkf, False)) elif backref: q = subquery.where(backref.to_field << last_query.select(backref)) fixed_queries.append(PrefetchResult(q, backref, True)) return fixed_queries __prefetched = namedtuple('__prefetched', ( 'query', 'field', 'backref', 'rel_model', 'foreign_key_attr', 'model')) class PrefetchResult(__prefetched): def __new__(cls, query, field=None, backref=None, rel_model=None, foreign_key_attr=None, model=None): if field: if backref: rel_model = field.model_class foreign_key_attr = field.to_field.name else: rel_model = field.rel_model foreign_key_attr = field.name model = query.model_class return super(PrefetchResult, cls).__new__( cls, query, field, backref, rel_model, foreign_key_attr, model) def populate_instance(self, instance, id_map): if self.backref: identifier = instance._data[self.field.name] if identifier in id_map: setattr(instance, self.field.name, id_map[identifier]) else: identifier = instance._data[self.field.to_field.name] rel_instances = id_map.get(identifier, []) attname = self.foreign_key_attr dest = '%s_prefetch' % self.field.related_name for inst in rel_instances: setattr(inst, attname, instance) setattr(instance, dest, rel_instances) def store_instance(self, instance, id_map): identity = self.field.to_field.python_value( instance._data[self.foreign_key_attr]) if self.backref: id_map[identity] = instance else: id_map.setdefault(identity, []) id_map[identity].append(instance) def prefetch(sq, *subqueries): if not subqueries: return sq fixed_queries = prefetch_add_subquery(sq, subqueries) deps = {} rel_map = {} for prefetch_result in reversed(fixed_queries): query_model = prefetch_result.model if prefetch_result.field: rel_map.setdefault(prefetch_result.rel_model, []) rel_map[prefetch_result.rel_model].append(prefetch_result) deps[query_model] = {} id_map = deps[query_model] has_relations = bool(rel_map.get(query_model)) for instance in prefetch_result.query: if prefetch_result.field: prefetch_result.store_instance(instance, id_map) if has_relations: for rel in rel_map[query_model]: rel.populate_instance(instance, deps[rel.model]) return prefetch_result.query def create_model_tables(models, **create_table_kwargs): """Create tables for all given models (in the right order).""" for m in sort_models_topologically(models): m.create_table(**create_table_kwargs) def drop_model_tables(models, **drop_table_kwargs): """Drop tables for all given models (in the right order).""" for m in reversed(sort_models_topologically(models)): m.drop_table(**drop_table_kwargs) def sort_models_topologically(models): """Sort models topologically so that parents will precede children.""" models = set(models) seen = set() ordering = [] def dfs(model): if model in models and model not in seen: seen.add(model) for foreign_key in model._meta.reverse_rel.values(): dfs(foreign_key.model_class) ordering.append(model) # parent will follow descendants # order models by name and table initially to guarantee a total ordering names = lambda m: (m._meta.name, m._meta.db_table) for m in sorted(models, key=names, reverse=True): dfs(m) return list(reversed(ordering)) # want parents first in output ordering<|fim▁end|>
<|file_name|>setup.py<|end_file_name|><|fim▁begin|>from setuptools import setup version = '1.4' testing_extras = ['nose', 'coverage'] docs_extras = ['Sphinx'] setup( name='WebOb', version=version, description="WSGI request and response object", long_description="""\ WebOb provides wrappers around the WSGI request environment, and an object to help create WSGI responses. The objects map much of the specified behavior of HTTP, including header parsing and accessors for other standard parts of the environment. You may install the `in-development version of WebOb <https://github.com/Pylons/webob/zipball/master#egg=WebOb-dev>`_ with ``pip install WebOb==dev`` (or ``easy_install WebOb==dev``). * `WebOb reference <http://docs.webob.org/en/latest/reference.html>`_ * `Bug tracker <https://github.com/Pylons/webob/issues>`_ * `Browse source code <https://github.com/Pylons/webob>`_ * `Mailing list <http://bit.ly/paste-users>`_ * `Release news <http://docs.webob.org/en/latest/news.html>`_ * `Detailed changelog <https://github.com/Pylons/webob/commits/master>`_ """, classifiers=[ "Development Status :: 6 - Mature", "Intended Audience :: Developers", "License :: OSI Approved :: MIT License", "Topic :: Internet :: WWW/HTTP :: WSGI", "Topic :: Internet :: WWW/HTTP :: WSGI :: Application", "Topic :: Internet :: WWW/HTTP :: WSGI :: Middleware", "Programming Language :: Python :: 2.6", "Programming Language :: Python :: 2.7", "Programming Language :: Python :: 3.2", "Programming Language :: Python :: 3.3", "Programming Language :: Python :: 3.4", "Programming Language :: Python :: Implementation :: CPython", "Programming Language :: Python :: Implementation :: PyPy", ], keywords='wsgi request web http', author='Ian Bicking', author_email='[email protected]', maintainer='Pylons Project', url='http://webob.org/', license='MIT',<|fim▁hole|> tests_require=['nose'], extras_require = { 'testing':testing_extras, 'docs':docs_extras, }, )<|fim▁end|>
packages=['webob'], zip_safe=True, test_suite='nose.collector',
<|file_name|>forward_messagebuffer.cpp<|end_file_name|><|fim▁begin|>/* This source file is part of KBEngine For the latest info, see http://www.kbengine.org/ Copyright (c) 2008-2016 KBEngine. KBEngine is free software: you can redistribute it and/or modify it under the terms of the GNU Lesser General Public License as published by the Free Software Foundation, either version 3 of the License, or (at your option) any later version. KBEngine is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU Lesser General Public License for more details. You should have received a copy of the GNU Lesser General Public License along with KBEngine. If not, see <http://www.gnu.org/licenses/>. */ #include "forward_messagebuffer.h" #include "network/bundle.h" #include "network/channel.h" #include "network/event_dispatcher.h" #include "network/network_interface.h" namespace KBEngine { KBE_SINGLETON_INIT(ForwardComponent_MessageBuffer); KBE_SINGLETON_INIT(ForwardAnywhere_MessageBuffer); //------------------------------------------------------------------------------------- ForwardComponent_MessageBuffer::ForwardComponent_MessageBuffer(Network::NetworkInterface & networkInterface) : Task(), networkInterface_(networkInterface), start_(false) { // dispatcher().addTask(this); } //------------------------------------------------------------------------------------- ForwardComponent_MessageBuffer::~ForwardComponent_MessageBuffer() { //dispatcher().cancelTask(this); clear(); } //------------------------------------------------------------------------------------- void ForwardComponent_MessageBuffer::clear() { MSGMAP::iterator iter = pMap_.begin(); for(; iter != pMap_.end(); ++iter) { std::vector<ForwardItem*>::iterator itervec = iter->second.begin(); for(; itervec != iter->second.end(); ++itervec) { SAFE_RELEASE((*itervec)->pBundle); SAFE_RELEASE((*itervec)->pHandler); SAFE_RELEASE((*itervec)); } } pMap_.clear(); } //------------------------------------------------------------------------------------- Network::EventDispatcher & ForwardComponent_MessageBuffer::dispatcher() { return networkInterface_.dispatcher(); } //------------------------------------------------------------------------------------- void ForwardComponent_MessageBuffer::push(COMPONENT_ID componentID, ForwardItem* pHandler) { if(!start_) { dispatcher().addTask(this); start_ = true; } pMap_[componentID].push_back(pHandler); } //------------------------------------------------------------------------------------- bool ForwardComponent_MessageBuffer::process() { if(pMap_.size() <= 0) { start_ = false; return false; } MSGMAP::iterator iter = pMap_.begin(); for(; iter != pMap_.end(); ) { Components::ComponentInfos* cinfos = Components::getSingleton().findComponent(iter->first); if(cinfos == NULL || cinfos->pChannel == NULL) return true; // 如果是mgr类组件需要判断是否已经初始化完成 if(g_componentType == CELLAPPMGR_TYPE || g_componentType == BASEAPPMGR_TYPE) { if(cinfos->state != COMPONENT_STATE_RUN) return true; } if(iter->second.size() == 0) { pMap_.erase(iter++); } else { int icount = 5; std::vector<ForwardItem*>::iterator itervec = iter->second.begin(); for(; itervec != iter->second.end(); ) { if (!(*itervec)->isOK()) return true; cinfos->pChannel->send((*itervec)->pBundle); (*itervec)->pBundle = NULL; if((*itervec)->pHandler != NULL) { (*itervec)->pHandler->process(); SAFE_RELEASE((*itervec)->pHandler); } SAFE_RELEASE((*itervec)); itervec = iter->second.erase(itervec); if(--icount <= 0) return true; } DEBUG_MSG(fmt::format("ForwardComponent_MessageBuffer::process(): size:{}.\n", iter->second.size())); iter->second.clear(); ++iter; } } return true; } //------------------------------------------------------------------------------------- ForwardAnywhere_MessageBuffer::ForwardAnywhere_MessageBuffer(Network::NetworkInterface & networkInterface, COMPONENT_TYPE forwardComponentType) : Task(), networkInterface_(networkInterface), forwardComponentType_(forwardComponentType), start_(false) { // dispatcher().addTask(this); } //------------------------------------------------------------------------------------- ForwardAnywhere_MessageBuffer::~ForwardAnywhere_MessageBuffer() { //dispatcher().cancelTask(this); clear(); } //------------------------------------------------------------------------------------- void ForwardAnywhere_MessageBuffer::clear() { std::vector<ForwardItem*>::iterator iter = pBundles_.begin(); for(; iter != pBundles_.end(); ) { SAFE_RELEASE((*iter)->pBundle); SAFE_RELEASE((*iter)->pHandler); } pBundles_.clear(); } //------------------------------------------------------------------------------------- Network::EventDispatcher & ForwardAnywhere_MessageBuffer::dispatcher() { return networkInterface_.dispatcher(); } //------------------------------------------------------------------------------------- void ForwardAnywhere_MessageBuffer::push(ForwardItem* pHandler) { if(!start_) { dispatcher().addTask(this); start_ = true; } pBundles_.push_back(pHandler); } //------------------------------------------------------------------------------------- bool ForwardAnywhere_MessageBuffer::process() { if(pBundles_.size() <= 0) { start_ = false; return false; } Components::COMPONENTS& cts = Components::getSingleton().getComponents(forwardComponentType_); size_t idx = 0; if(cts.size() > 0) { bool hasEnabled = (g_componentType != CELLAPPMGR_TYPE && g_componentType != BASEAPPMGR_TYPE); Components::COMPONENTS::iterator ctiter = cts.begin(); for(; ctiter != cts.end(); ++ctiter) { // 必须所有的组件频道都被设置,如果不是则等待。 if((*ctiter).pChannel == NULL) return true; if((*ctiter).state == COMPONENT_STATE_RUN) hasEnabled = true; } // 必须有可用的进程 if(!hasEnabled) return true; // 最多每个tick处理5个 int icount = 5; std::vector<ForwardItem*>::iterator iter = pBundles_.begin(); for (; iter != pBundles_.end(); ++iter) { if ((*iter)->isOK()) break; } // 必须所有的ForwardItem都处于ok状态 // 何时不处于ok状态?例如:cellappmgr中的ForwardItem需要等待cellapp初始化完毕之后才ok if (iter == pBundles_.end()) return true; for(; iter != pBundles_.end(); ) { Network::Channel* pChannel = NULL; if(g_componentType != CELLAPPMGR_TYPE && g_componentType != BASEAPPMGR_TYPE) { pChannel = cts[idx++].pChannel; if(idx >= cts.size()) idx = 0; } else { while(pChannel == NULL) { if(cts[idx].state != COMPONENT_STATE_RUN) { if(++idx >= cts.size()) idx = 0; continue; } pChannel = cts[idx++].pChannel; <|fim▁hole|> if(idx >= cts.size()) idx = 0; } } pChannel->send((*iter)->pBundle); (*iter)->pBundle = NULL; if((*iter)->pHandler != NULL) { (*iter)->pHandler->process(); SAFE_RELEASE((*iter)->pHandler); } SAFE_RELEASE((*iter)); iter = pBundles_.erase(iter); if(--icount <= 0) return true; } DEBUG_MSG(fmt::format("ForwardAnywhere_MessageBuffer::process(): size:{}.\n", pBundles_.size())); start_ = false; return false; } return true; } //------------------------------------------------------------------------------------- }<|fim▁end|>
<|file_name|>tree-test.js<|end_file_name|><|fim▁begin|>var vows = require("vows"), load = require("../load"), assert = require("../assert"); var suite = vows.describe("d3.layout.tree"); suite.addBatch({ "tree": {<|fim▁hole|> assert.deepEqual(t.nodes({ name: "1", children: [ {name: "1-1"}, {name: "1-2"}, {name: "1-3"} ] }).map(layout), [ {name: "1", depth: 0, x: 0.5, y: 0}, {name: "1-1", depth: 1, x: 0.16666666666666666, y: 1}, {name: "1-2", depth: 1, x: 0.5, y: 1}, {name: "1-3", depth: 1, x: 0.8333333333333333, y: 1} ]); }, "can handle an empty children array": function(tree) { var t = tree(); assert.deepEqual(t.nodes({children: []}).map(layout), [ {depth: 0, x: 0.5, y: 0} ]); assert.deepEqual(t.nodes({children: [ {children: []}, {children: [{}]}, {children: [{}]} ]}).map(layout), [ {depth: 0, x: 0.5, y: 0}, {depth: 1, x: 0.125, y: 0.5}, {depth: 1, x: 0.375, y: 0.5}, {depth: 2, x: 0.375, y: 1}, {depth: 1, x: 0.875, y: 0.5}, {depth: 2, x: 0.875, y: 1} ]); }, "can handle a single node": function(tree) { var t = tree(); assert.deepEqual(t.nodes({}).map(layout), [ {depth: 0, x: 0.5, y: 0} ]); } } }); function layout(node) { delete node.children; delete node.parent; return node; } suite.export(module);<|fim▁end|>
topic: load("layout/tree").expression("d3.layout.tree"), "computes a simple tree layout": function(tree) { var t = tree();
<|file_name|>gyptest-bare.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # Copyright (c) 2009 Google Inc. All rights reserved. # Use of this source code is governed by a BSD-style license that can be # found in the LICENSE file. """ Verifies actions which are not depended on by other targets get executed. """ import TestGyp test = TestGyp.TestGyp()<|fim▁hole|>test.build('bare.gyp', chdir='relocate/src') file_content = 'Hello from bare.py\n' test.built_file_must_match('out.txt', file_content, chdir='relocate/src') test.pass_test()<|fim▁end|>
test.run_gyp('bare.gyp', chdir='src') test.relocate('src', 'relocate/src')
<|file_name|>window.rs<|end_file_name|><|fim▁begin|>use ::game::*; use ::ncurses::*; use std::ascii::AsciiExt; use std::char; use std::env; use std::fs::File; use std::io::BufReader; use std::io::prelude::*; use std::mem; use std::iter::repeat; use ::itertools::Itertools; use ::consts::*; pub struct NCursesWindow; pub fn create() -> Box<Window> { Box::new(NCursesWindow::new()) } pub trait Window { fn render(&self, &GameState); fn handle_input(&self, &mut GameState) -> Option<InputEvent>; } impl NCursesWindow { fn new() -> NCursesWindow {<|fim▁hole|> // Enable all mouse events for the current terminal. env::set_var("TERM", "xterm-1003"); setlocale(LcCategory::all, ""); initscr(); raw(); // Extended keyboard and mouse events. keypad(stdscr(), true); nodelay(stdscr(), true); noecho(); let mouse_events = ALL_MOUSE_EVENTS | REPORT_MOUSE_POSITION; mousemask(mouse_events as u32, None); mouseinterval(0); if has_mouse() { info!("Mouse driver initialized.") } else { info!("Error initializing mouse driver."); } NCursesWindow } } impl Drop for NCursesWindow { fn drop(&mut self) { refresh(); endwin(); } } impl Window for NCursesWindow { fn handle_input(&self, game_state: &mut GameState) -> Option<InputEvent> { let ch: i32 = getch(); // Allow WASD and HJKL controls const KEY_W: i32 = 'w' as i32; const KEY_A: i32 = 'a' as i32; const KEY_S: i32 = 's' as i32; const KEY_D: i32 = 'd' as i32; const KEY_H: i32 = 'h' as i32; const KEY_J: i32 = 'j' as i32; const KEY_K: i32 = 'k' as i32; const KEY_L: i32 = 'l' as i32; const KEY_ESC: i32 = 27; const KEY_ENTER: i32 = '\n' as i32; match ch as i32 { KEY_LEFT | KEY_A | KEY_H => Some(InputEvent::Left), KEY_RIGHT | KEY_D | KEY_L => Some(InputEvent::Right), KEY_UP | KEY_W | KEY_K => Some(InputEvent::Up), KEY_DOWN | KEY_S | KEY_J => Some(InputEvent::Down), KEY_MOUSE => { let mut event: MEVENT = unsafe { mem::uninitialized() }; assert!(getmouse(&mut event) == OK); game_state.cursor_position = (event.x, event.y); if event.bstate & (BUTTON1_PRESSED as u32) != 0 { Some(InputEvent::Action) } else { None } } KEY_ENTER => Some(InputEvent::Action), KEY_ESC => Some(InputEvent::Quit), _ => None, } } fn render(&self, game_state: &GameState) { refresh(); erase(); let starting_line = MARGIN + 5; // If the game is over, render the ending state and return early. if let Some(ref ending) = game_state.status { match *ending { GameEnding::Won => { curs_set(CURSOR_VISIBILITY::CURSOR_INVISIBLE); let reader = BufReader::new(File::open("resources/vault_boy.txt").unwrap()); let mut line_counter = 0; for line in reader.lines().map(|l| l.unwrap()) { mvprintw(line_counter as i32, 0, &format!("{:^1$}", line, WINDOW_WIDTH as usize)); line_counter += 1; } mvprintw(line_counter as i32, 0, &format!("{:^1$}", "ACCESS GRANTED", WINDOW_WIDTH as usize)); } GameEnding::Lost => { curs_set(CURSOR_VISIBILITY::CURSOR_INVISIBLE); mvprintw((starting_line + ROWS) / 2, 0, &format!("{:^1$}", "TERMINAL LOCKED", WINDOW_WIDTH as usize)); mvprintw((starting_line + ROWS + 1) / 2, 0, &format!("{:^1$}", "PLEASE CONTACT AN ADMINISTRATOR", WINDOW_WIDTH as usize)); } } return; } // Print information at top mvprintw(MARGIN, MARGIN, "ROBCO INDUSTRIES (TM) TERMLINK PROTOCOL"); mvprintw(MARGIN + 1, MARGIN, "ENTER PASSWORD NOW"); mvprintw(LINES() - 1, 0, "Press Esc to exit"); // Print attempts remaining let visual_attempts = repeat("█") .take(game_state.attempts as usize) .join(" "); mvprintw(MARGIN + 3, MARGIN, &format!("{} ATTEMPT(S) LEFT: {}", game_state.attempts, visual_attempts)); // Draw random addresses and word columns let highlight_positions = match game_state.get_entity_at_cursor() { Some(cursor_entity) => { if cursor_entity.highlighted() { let (start, end) = cursor_entity.indices(); let start_x = start % WORD_COLUMN_WIDTH as usize; let start_y = start / WORD_COLUMN_WIDTH as usize; let end_x = end % WORD_COLUMN_WIDTH as usize; let end_y = end / WORD_COLUMN_WIDTH as usize; Some(((start_x, start_y), (end_x, end_y))) } else { None } } None => None, }; // Draw both columns for (column_index, column) in game_state.columns.iter().enumerate() { let word_data: Vec<char> = column.render_word_data().chars().collect::<Vec<char>>(); let word_chunks = word_data.chunks(WORD_COLUMN_WIDTH as usize); for (line, (address, word_chunk)) in column.addresses .iter() .zip(word_chunks.into_iter()) .enumerate() { let row = starting_line + line as i32; let col = MARGIN + column_index as i32 * (COLUMN_WIDTH + COLUMN_PADDING); let hex_address: String = format!("{:#01$X}", address, ADDRESS_COLUMN_WIDTH as usize); let word_row: String = word_chunk.iter().map(|&c| c).collect::<String>(); mvprintw(row, col, &(hex_address + " ")); if let Some(((start_x, start_y), (end_x, end_y))) = highlight_positions { if game_state.get_cursor_column_index().unwrap() != column_index { // We're not in the correct column, so just write out the line and // continue. addstr(&word_row); continue; } // If the highlight ends on the same line, we just iterate over the chunk and // turn on and off the highlight at the start and the end. if start_y == line && start_y == end_y { for (i, c) in word_row.chars().enumerate() { if i == start_x { attron(A_STANDOUT()); } if i == end_x { attroff(A_STANDOUT()); } addch(c as u32); } } else if start_y == line { for (i, c) in word_row.chars().enumerate() { if i == start_x { attron(A_STANDOUT()); } addch(c as u32); } attroff(A_STANDOUT()); } else if end_y == line { attron(A_STANDOUT()); for (i, c) in word_row.chars().enumerate() { if i == end_x { attroff(A_STANDOUT()); } addch(c as u32); } } else { addstr(&word_row); } } else { addstr(&word_row); } } } // Draw the console. let console_entry = if let Some(entity) = game_state.get_entity_at_cursor() { match *entity { CursorEntity::Word { ref word, .. } => word.to_ascii_uppercase(), CursorEntity::Brackets { ref pair, .. } => pair.0.to_string(), } } else { // If we're in a column, display the character at the cursor. Otherwise, display an empty // string. match game_state.get_cursor_column_index() { Some(..) => { let (x, y) = game_state.cursor_position; char::from_u32(mvinch(y, x) as u32).unwrap().to_string() } None => "".to_string(), } }; mvprintw(starting_line + ROWS - 1, MARGIN + 2 * COLUMN_WIDTH + COLUMN_PADDING + MARGIN, &format!(">{}", console_entry)); // Draw the console entries, starting from the bottom. let mut entries_row = starting_line + ROWS - 3; for entry in game_state.entries.iter().rev() { let col = MARGIN + 2 * COLUMN_WIDTH + COLUMN_PADDING + MARGIN; // Only prints the lines if the entry would be within the address columns. let mvprintw_checked = |row, col, lines: &[&str]| { for (i, line) in lines.iter().rev().enumerate() { if row >= starting_line { mvprintw(row - i as i32, col, line); } } }; match *entry { Entry::Incorrect { num_correct, ref word } => { mvprintw_checked(entries_row, col, &[&format!(">{}", word.to_ascii_uppercase()), ">Entry denied", &format!(">{}/{} correct.", num_correct, 7)]); } Entry::Correct { ref word } => { mvprintw_checked(entries_row, col, &[&format!(">{}", word.to_ascii_uppercase()), ">Exact match!", ">Please wait", ">while system", ">is accessed."]); } Entry::DudRemoval => { mvprintw_checked(entries_row, col, &[">", ">Dud removed."]); } Entry::AllowanceReplenish => { mvprintw_checked(entries_row, col, &[">", ">Allowance", ">replenished."]); } } entries_row -= entry.display_rows() as i32; } // Move the cursor to the current position let (x, y) = game_state.cursor_position; mv(y, x); } }<|fim▁end|>
<|file_name|>inlinequeryresultcachedmpeg4gif.py<|end_file_name|><|fim▁begin|>#!/usr/bin/env python # # A library that provides a Python interface to the Telegram Bot API # Copyright (C) 2015-2018 # Leandro Toledo de Souza <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser Public License for more details. # # You should have received a copy of the GNU Lesser Public License # along with this program. If not, see [http://www.gnu.org/licenses/]. """This module contains the classes that represent Telegram InlineQueryResultMpeg4Gif.""" from telegram import InlineQueryResult class InlineQueryResultCachedMpeg4Gif(InlineQueryResult): """ Represents a link to a video animation (H.264/MPEG-4 AVC video without sound) stored on the Telegram servers. By default, this animated MPEG-4 file will be sent by the user with an optional caption. Alternatively, you can use :attr:`input_message_content` to send a message with the specified content instead of the animation. Attributes: type (:obj:`str`): 'mpeg4_gif'. id (:obj:`str`): Unique identifier for this result, 1-64 bytes. mpeg4_file_id (:obj:`str`): A valid file identifier for the MP4 file. title (:obj:`str`): Optional. Title for the result. caption (:obj:`str`): Optional. Caption, 0-200 characters parse_mode (:obj:`str`): Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in the media caption. See the constants in :class:`telegram.ParseMode` for the available modes. reply_markup (:class:`telegram.InlineKeyboardMarkup`): Optional. Inline keyboard attached to the message. input_message_content (:class:`telegram.InputMessageContent`): Optional. Content of the message to be sent instead of the MPEG-4 file. Args: id (:obj:`str`): Unique identifier for this result, 1-64 bytes. mpeg4_file_id (:obj:`str`): A valid file identifier for the MP4 file. title (:obj:`str`, optional): Title for the result. caption (:obj:`str`, optional): Caption, 0-200 characters parse_mode (:obj:`str`, optional): Send Markdown or HTML, if you want Telegram apps to show bold, italic, fixed-width text or inline URLs in the media caption. See the constants in :class:`telegram.ParseMode` for the available modes. reply_markup (:class:`telegram.InlineKeyboardMarkup`, optional): Inline keyboard attached to the message. input_message_content (:class:`telegram.InputMessageContent`, optional): Content of the message to be sent instead of the MPEG-4 file. **kwargs (:obj:`dict`): Arbitrary keyword arguments. """ <|fim▁hole|> def __init__(self, id, mpeg4_file_id, title=None, caption=None, reply_markup=None, input_message_content=None, parse_mode=None, **kwargs): # Required super(InlineQueryResultCachedMpeg4Gif, self).__init__('mpeg4_gif', id) self.mpeg4_file_id = mpeg4_file_id # Optionals if title: self.title = title if caption: self.caption = caption if parse_mode: self.parse_mode = parse_mode if reply_markup: self.reply_markup = reply_markup if input_message_content: self.input_message_content = input_message_content<|fim▁end|>
<|file_name|>scanresources.py<|end_file_name|><|fim▁begin|># -- coding: utf-8 -- # =========================================================================== # eXe # Copyright 2010-2011, Pedro Peña Pérez # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 59 Temple Place, Suite 330, Boston, MA 02111-1307 USA # =========================================================================== import os import mimetypes from chardet import universaldetector from chardet import latin1prober import re import sys from bs4 import BeautifulSoup, UnicodeDammit from urllib import quote, unquote from exe.engine.freetextidevice import FreeTextIdevice from exe.engine.resource import Resource from exe.engine.path import Path import logging log = logging.getLogger(__name__) class FixedLatin1Prober(latin1prober.Latin1Prober): """La clase Latin1Prober baja a la mitad la confidencia para mejorar los resultados de otros probers. Dejamos la confidencia en su valor real.""" def get_confidence(self): return latin1prober.Latin1Prober.get_confidence(self)*2 class FixedUniversalDetector(universaldetector.UniversalDetector): """Para usar FixedLatin1Prober""" def __init__(self): from chardet.mbcsgroupprober import MBCSGroupProber # multi-byte character sets from chardet.sbcsgroupprober import SBCSGroupProber # single-byte character sets universaldetector.UniversalDetector.__init__(self) self._mCharSetProbers = [MBCSGroupProber(), SBCSGroupProber(), FixedLatin1Prober()] def detect(aBuf): """Autodetecta la codificacion de una cadena usando FixedUniversalDetector""" u = FixedUniversalDetector() u.reset() u.feed(aBuf) u.close() return u.result def relpath(path, start): """Implementa os.path.relpath independientemente del sistema y arregla un fallo cuando start una unidad en windows""" if sys.platform[:3] == 'win': if not hasattr(os.path,'relpath'): r = nt_relpath(path,start).replace(os.sep,os.altsep) else: r = os.path.relpath(path, start).replace(os.sep,os.altsep) if os.path.splitdrive(start)[1] in ['',os.curdir,os.sep,os.sep + os.curdir]: r = r.replace(os.pardir + os.altsep,'') return r else: return os.path.relpath(path, start) curdir = '.' def nt_relpath(path, start=curdir): """Implementa os.path.relpath para Windows ya que en python 2.5 no esta implementada""" from ntpath import abspath, splitunc, sep, pardir, join if not path: raise ValueError("no path specified") start_list = abspath(start).split(sep) path_list = abspath(path).split(sep) if start_list[0].lower() != path_list[0].lower(): unc_path, rest = splitunc(path) unc_start, rest = splitunc(start) if bool(unc_path) ^ bool(unc_start): raise ValueError("Cannot mix UNC and non-UNC paths (%s and %s)" % (path, start)) else: raise ValueError("path is on drive %s, start on drive %s" % (path_list[0], start_list[0])) # Work out how much of the filepath is shared by start and path. for i in range(min(len(start_list), len(path_list))): if start_list[i].lower() != path_list[i].lower(): break else: i += 1 rel_list = [pardir] * (len(start_list)-i) + path_list[i:] if not rel_list: return curdir return join(*rel_list) class Link: def __init__(self, url, relative, referrer, tag = None, key = None, match = None): self.url = url self.referrer = referrer self.tag = tag self.key = key self.relative = relative self.match = match def __repr__(self): if self.tag and self.tag.name: return "<%s %s=%s>" % (self.tag.name,self.key,str(self.url)) else: return "<%s>" % (str(self.url)) class Url: def __init__(self, path, start='.'): self.path = path self.start = start self.relpath = relpath(self.path,self.start) parent = os.path.split(self.relpath)[0] self.parentpath = u"." if parent == u"" else parent self.basename = os.path.basename(self.relpath) if os.path.isdir(self.path): self.type = 'dir' elif os.path.isfile(self.path): self.type = 'file' self.mime, self.dataencoding = mimetypes.guess_type(self.path) self.links = [] self.plinks = [] self.rlinks = set() self.soup = None self.content = None self.contentUpdated = [] self.l = unquote(self.relpath) self.absl = self.start + os.path.sep + self.l def setSoup(self,soup): if self.mime == 'text/html': self.soup = soup def getSoup(self): return self.soup def setContent(self,content,encoding): self.content = content self.contentEncoding = encoding def getContent(self): return self.content def createNode(self,parent, name = None): self.node = parent.createChild() self.node.setTitle(name if name else self.basename) def createIdevice(self): self.idevice = FreeTextIdevice() self.idevice.edit = False self.node.addIdevice(self.idevice) return self.idevice def __str__(self): return self.relpath def __repr__(self): return self.relpath def addLink(self,link): self.links.append(link) def addPLink(self,link): self.plinks.append(link) def addRLink(self,link): self.rlinks.add(link) class Resources: cancel = False @classmethod def cancelImport(cls): cls.cancel = True #TODO Deshacer todo lo que se lleve hecho def __init__(self, baseurl, node, client=None): self.baseurl = baseurl.decode(sys.getfilesystemencoding()) self.node = node self.client = client self.numdirs = 0 resources = {} resources['mimes'] = {} resources['urls'] = {} url = Url(self.baseurl, self.baseurl) url.createNode(node, _('Contents of directory')) resources['urls'][url.relpath] = url try: for root, dirs, files in self._safewalk(self.baseurl): if self.cancel: return self.numdirs += 1 except UnicodeDecodeError: raise i = 1 for root, dirs, files in self._safewalk(self.baseurl): html = u"" idevice = None if self.client: self.client.call('eXe.app.getController("Toolbar").updateImportProgressWindow',_(u'Analizing directory %d of %d: %s') % (i, self.numdirs,root.encode(sys.getfilesystemencoding()))) for dir in dirs: if self.cancel: return path = root + os.path.sep + dir url = Url(path, self.baseurl) url.createNode(resources['urls'][url.parentpath].node) resources['urls'][url.relpath] = url for file in files: if self.cancel: return path = root + os.path.sep + file url = Url(path, self.baseurl) parent = resources['urls'][url.parentpath] if not idevice: idevice = parent.createIdevice() try: p = Path(path) p.setSalt(str(url)) r = Resource(idevice,p) except: continue url.href = 'resources/%s' % (quote(r.storageName)) html += u"<p><a href=%s>%s</p>\n" % (url.href,url.basename) resources['urls'][url.relpath] = url if url.mime in resources['mimes'].keys(): resources['mimes'][url.mime].append(url) else: resources['mimes'][url.mime] = [ url ] if idevice: idevice.setContent(html) i += 1 self.resources = resources def _safewalk(self, top): try: names = os.listdir(top) except error, err: return dirs, nondirs = [], [] for name in names: try: name.encode(sys.getfilesystemencoding()) except: return if os.path.isdir(os.path.join(top, name)): dirs.append(name) else: nondirs.append(name) yield top, dirs, nondirs for name in dirs: path = os.path.join(top, name) if not os.path.islink(path): for x in self._safewalk(path): yield x def _computeRelpaths(self): i = 1 for url in self.resources['urls'].values(): if url.type == 'dir': if self.client: self.client.call('eXe.app.getController("Toolbar").updateImportProgressWindow',_(u'Calculating relative paths to directory %d of %d: %s') % (i, self.numdirs, url.relpath.encode(sys.getfilesystemencoding()))) url.relpaths = [] absd = ''.join([self.baseurl, os.path.sep, url.relpath]) for link in self.resources['urls'].values(): if self.cancel: return if link.relpath.encode(sys.getfilesystemencoding()) == '.': continue rl = relpath(link.absl,absd) url.relpaths.append((link.l,rl)) i += 1 def _computeLinks(self): self._computeRelpaths() htmls = self.resources['mimes']['text/html'] total = len(htmls) i = 1 for url in htmls: if self.cancel: return if self.client: self.client.call('eXe.app.getController("Toolbar").updateImportProgressWindow',_(u'Analyzing HTML file labels %d of %d: %s') % (i, total, str(url))) content = open(url.path).read() encoding = detect(content)['encoding'] #ucontent = unicode(content,encoding) soup = BeautifulSoup(content,from_encoding=encoding) declaredHTMLEncoding = getattr(soup, 'declared_html_encoding') if declaredHTMLEncoding: content = UnicodeDammit(content,[declaredHTMLEncoding]).unicode_markup encoding = declaredHTMLEncoding else: pass url.setContent(content,encoding) url.setSoup(soup) for tag in soup.find_all(): if self.cancel: return if not tag.attrs: continue matches = [] for key, value in tag.attrs.iteritems(): if value == "": continue for val in value: unq_value = unquote(val) unq_low_value = unquote(val.lower()) for l, rl in self.resources['urls'][url.parentpath].relpaths: low_rl = rl.lower() if rl in unq_value: L = Link(self.resources['urls'][l],rl,url,tag,key,rl) matches.append(L) elif low_rl in unq_value: L = Link(self.resources['urls'][l],rl,url,tag,key,low_rl) matches.append(L) elif l in unq_value: L = Link(self.resources['urls'][l],rl,url,tag,key,l) matches.append(L) matches_final = [] for l1 in matches: matches_ = [ m for m in matches if m != l1 ] found = False for l2 in matches_: if re.search(re.escape(l1.relative),l2.relative): found = True if not found: matches_final.append(l1) if matches_final:<|fim▁hole|> i += 1 csss = self.resources['mimes']['text/css'] if 'text/css' in self.resources['mimes'].keys() else None csss_and_htmls = csss + htmls if csss else htmls total = len(csss_and_htmls) i = 1 for url in csss_and_htmls: if self.cancel: return if url.mime == 'text/css': tipo = 'CSS' else: tipo = 'HTML' content = url.getContent() if not content: content = open(url.path).read() encoding = detect(content)['encoding'] content = unicode(content,encoding) url.setContent(content,encoding) if self.client: self.client.call('eXe.app.getController("Toolbar").updateImportProgressWindow',_(u'Exhaustively analyzed file %s %d of %d: %s') % (tipo, i, total, str(url))) matches = [] for l, rl in self.resources['urls'][url.parentpath].relpaths: low_rl = rl.lower() if rl in content: L = Link(self.resources['urls'][l],rl,url,match=rl) matches.append(L) elif low_rl in content: L = Link(self.resources['urls'][l],rl,url,match=low_rl) matches.append(L) matches_final = [] for l1 in matches: matches_ = [ m for m in matches if m != l1 ] found = False for l2 in matches_: if re.search(re.escape(l1.relative),l2.relative): found = True if not found: matches_final.append(l1) if matches_final: for match in matches_final: if not [ link for link in url.links if link.relative == match.relative ]: url.addLink( match ) url.addRLink( str(match.url) ) i += 1 def _computeDepths(self,url): from collections import deque q = deque() q.append(([url],0)) while q: if self.cancel: return links, depth = q.pop() for link in links: if link in self.depths.keys(): self.depths[link] = depth if self.depths[link] > depth else self.depths[link] else: self.depths[link] = depth if self.depths[link] < depth: continue q.appendleft((self.resources['urls'][link].rlinks,depth + 1)) def insertNode(self,urls=['index.html','index.htm']): if self.cancel: return for url in urls: if url not in self.resources['urls'].keys(): continue else: self.depths = {} self._computeLinks() if self.cancel: return if self.client: self.client.call('eXe.app.getController("Toolbar").updateImportProgressWindow',_(u'Calculating depth of links')) self._computeDepths(url) if self.cancel: return self._insertNode(None,url) def _guessName(self,url): return str(url) soup = url.getSoup() if soup.title: return str(soup.title.string) names = {} for link in url.plinks: if link.tag.contents and isinstance(link.tag.contents[0],unicode) and link.tag.contents[0].lstrip() != u"": if link.tag.contents[0] in names.keys(): names[link.tag.contents[0]] += 1 else: names[link.tag.contents[0]] = 1 max = 0 max_name_ocurr = str(url) for name in names.keys(): if names[name] > max: max_name_ocurr = name max = names[name] return unquote(max_name_ocurr) def _insertNode(self, node, url, depth=0, idevice=None): if self.cancel: return if isinstance(url,str): link = None url = self.resources['urls'][url] elif isinstance(url,Link): link = url url = link.url if url.mime == 'text/html' and self.depths[str(url)] >= depth: if self.client: self.client.call('eXe.app.getController("Toolbar").updateImportProgressWindow',_(u'Inserting %s') % (str(url))) type = link.tag.name if link and link.tag else 'a' if type not in ['frame','iframe'] and node: node = node.createChild() node.setTitle(self._guessName(url)) if depth == 1: node.up() if not node: node = self.node parent = idevice if type in ['frame','iframe'] else None idevice = FreeTextIdevice(type=type,parent=parent) idevice.edit = False node.addIdevice(idevice) if url.type == "file": p = Path(self.baseurl + os.path.sep + str(url)) p.setSalt(str(url)) r = Resource(idevice,p) url.storageName = quote(r.storageName) if link and link.relative not in link.referrer.contentUpdated: if link.match: link.referrer.content = link.referrer.content.replace(link.match,'###resources###/%s' % (url.storageName)) else: link.referrer.content = link.referrer.content.replace(link.relative,'###resources###/%s' % (url.storageName)) link.referrer.contentUpdated.append(link.relative) if self.depths[str(url)] < depth: return for l in url.links: if self.cancel: return self._insertNode(node, l, depth+1, idevice) content = url.getContent() if content: content_w_resourcePaths = re.sub('###resources###/','resources/',content) content_wo_resourcePaths = re.sub('###resources###/','',content) if url.mime == "text/html" and idevice: soup = url.getSoup() if soup and soup.declaredHTMLEncoding: content_w_resourcePaths = re.sub(soup.declaredHTMLEncoding,'utf-8',content_w_resourcePaths,re.IGNORECASE) content_wo_resourcePaths = re.sub(soup.declaredHTMLEncoding,'utf-8',content_wo_resourcePaths,re.IGNORECASE) if soup and soup.find('frameset'): idevice.type = 'frameset' idevice.setContent(content_w_resourcePaths,content_wo_resourcePaths) f = open(r.path,"w") f.write(content_wo_resourcePaths.encode('utf-8')) f.close()<|fim▁end|>
for match in matches_final: url.addLink( match ) url.addRLink( str(match.url) )
<|file_name|>train.py<|end_file_name|><|fim▁begin|># Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. import mxnet as mx from mxnet.test_utils import * from data import get_avazu_data from linear_model import * import argparse import os parser = argparse.ArgumentParser(description="Run sparse linear classification " \ "with distributed kvstore", formatter_class=argparse.ArgumentDefaultsHelpFormatter) parser.add_argument('--num-epoch', type=int, default=5, help='number of epochs to train') parser.add_argument('--batch-size', type=int, default=8192, help='number of examples per batch') parser.add_argument('--kvstore', type=str, default=None, help='what kvstore to use', choices=["dist_sync", "dist_async", "local"]) parser.add_argument('--optimizer', type=str, default='sgd', help='what optimizer to use', choices=["adagrad", "sgd", "adam"]) AVAZU = { 'train': 'avazu-app', 'test': 'avazu-app.t', 'url': "https://www.csie.ntu.edu.tw/~cjlin/libsvmtools/datasets/binary/", # 1000000 + 1 since LibSVMIter uses zero-based indexing 'num_features': 1000001, } def batch_row_ids(data_batch): """ Generate row ids based on the current mini-batch """ return {'weight': data_batch.data[0].indices} def all_row_ids(data_batch): """ Generate row ids for all rows """ all_rows = mx.nd.arange(0, AVAZU['num_features'], dtype='int64') return {'weight': all_rows} if __name__ == '__main__': import logging head = '%(asctime)-15s %(message)s' logging.basicConfig(level=logging.INFO, format=head) # arg parser args = parser.parse_args() logging.info(args) num_epoch = args.num_epoch kvstore = args.kvstore batch_size = args.batch_size optimizer = args.optimizer # create kvstore kv = mx.kvstore.create(kvstore) if kvstore else None rank = kv.rank if kv else 0 num_worker = kv.num_workers if kv else 1 # dataset num_features = AVAZU['num_features'] data_dir = os.path.join(os.getcwd(), 'data') train_data = os.path.join(data_dir, AVAZU['train'])<|fim▁hole|> # data iterator train_data = mx.io.LibSVMIter(data_libsvm=train_data, data_shape=(num_features,), batch_size=batch_size, num_parts=num_worker, part_index=rank) eval_data = mx.io.LibSVMIter(data_libsvm=val_data, data_shape=(num_features,), batch_size=batch_size) # model # The positive class weight, says how much more we should upweight the importance of # positive instances in the objective function. # This is used to combat the extreme class imbalance. positive_class_weight = 2 model = linear_model(num_features, positive_class_weight) # module mod = mx.mod.Module(symbol=model, data_names=['data'], label_names=['softmax_label']) mod.bind(data_shapes=train_data.provide_data, label_shapes=train_data.provide_label) mod.init_params() optim = mx.optimizer.create(optimizer, learning_rate=0.01, rescale_grad=1.0/batch_size/num_worker) mod.init_optimizer(optimizer=optim, kvstore=kv) # use accuracy as the metric metric = mx.metric.create(['nll_loss']) # get the sparse weight parameter speedometer = mx.callback.Speedometer(batch_size, 100) logging.info('Training started ...') for epoch in range(num_epoch): nbatch = 0 metric.reset() for batch in train_data: nbatch += 1 # for distributed training, we need to manually pull sparse weights from kvstore mod.prepare(batch, sparse_row_id_fn=batch_row_ids) mod.forward_backward(batch) # update all parameters (including the weight parameter) mod.update() # update training metric mod.update_metric(metric, batch.label) speedometer_param = mx.model.BatchEndParam(epoch=epoch, nbatch=nbatch, eval_metric=metric, locals=locals()) speedometer(speedometer_param) # prepare the module weight with all row ids for inference. Alternatively, one could call # score = mod.score(val_iter, ['MSE'], sparse_row_id_fn=batch_row_ids) # to fetch the weight per mini-batch mod.prepare(None, all_row_ids) # evaluate metric on validation dataset score = mod.score(eval_data, ['nll_loss']) logging.info('epoch %d, eval nll = %s ' % (epoch, score[0][1])) # prepare the module weight with all row ids before making a checkpoint. mod.prepare(None, all_row_ids) mod.save_checkpoint("checkpoint", epoch) # reset the iterator for next pass of data train_data.reset() eval_data.reset() logging.info('Training completed.')<|fim▁end|>
val_data = os.path.join(data_dir, AVAZU['test']) get_avazu_data(data_dir, AVAZU['train'], AVAZU['url']) get_avazu_data(data_dir, AVAZU['test'], AVAZU['url'])
<|file_name|>PearsonsCorrelationCoefficeint.ts<|end_file_name|><|fim▁begin|>import * as indicators from "../"; export class PearsonsCorrelationCoefficeint extends indicators.AbstractIndicator<number> { static INDICATOR_NAME: string = "CORREL"; static INDICATOR_DESCR: string = "Pearson's Correlation Coefficient (r)"; static TIMEPERIOD_DEFAULT: number = 30; static TIMEPERIOD_MIN: number = 1; public timePeriod: number; private sumXX: number = 0.0; /* sum of x * x */ private sumYY: number = 0.0; /* sum of y */ private sumXY: number = 0.0; /* sum of x * y */ private sumX: number = 0.0; /* sum of x */ private sumY: number = 0.0; /* sum of y */ private sumXXHistory: indicators.Queue<number>; private sumYYHistory: indicators.Queue<number>; private sumXYHistory: indicators.Queue<number>; private sumXHistory: indicators.Queue<number>; private sumYHistory: indicators.Queue<number>; private trailingSumXX: number = 0.0; private trailingSumYY: number = 0.0;<|fim▁hole|> private trailingSumXY: number = 0.0; private trailingSumX: number = 0.0; private trailingSumY: number = 0.0; /* same as last_price_y except used to remove elements from the trailing summation */ private tmpReal: number = 0.0; private x: number; private lastPriceX: number; private y: number; private lastPriceY: number; private periodCounter: number; constructor(timePeriod: number = CORREL.TIMEPERIOD_DEFAULT) { super(CORREL.INDICATOR_NAME, CORREL.INDICATOR_DESCR); if (timePeriod < CORREL.TIMEPERIOD_MIN) { throw (new Error(indicators.generateMinTimePeriodError(this.name, CORREL.TIMEPERIOD_MIN, timePeriod))); } this.timePeriod = timePeriod; this.periodCounter = 0; this.lastPriceX = 0; this.lastPriceY = 0; this.sumXXHistory = new indicators.Queue<number>(); this.sumXXHistory.enqueue(0); this.sumXYHistory = new indicators.Queue<number>(); this.sumXYHistory.enqueue(0); this.sumXHistory = new indicators.Queue<number>(); this.sumXHistory.enqueue(0); this.sumYHistory = new indicators.Queue<number>(); this.sumYHistory.enqueue(0); this.sumYYHistory = new indicators.Queue<number>(); this.sumYYHistory.enqueue(0); this.setLookBack(this.timePeriod - 1); } receiveData(inputData1: number, inputData2: number): boolean { this.periodCounter++; if (this.periodCounter < this.timePeriod) { this.x = inputData1; this.y = inputData2; this.sumX += this.x; this.sumY += this.y; this.sumXY += this.x * this.y; this.sumXX += this.x * this.x; this.sumYY += this.y * this.y; this.sumXHistory.enqueue(this.sumX); this.sumXXHistory.enqueue(this.sumXX); this.sumXYHistory.enqueue(this.sumXY); this.sumYHistory.enqueue(this.sumY); this.sumYYHistory.enqueue(this.sumYY); } else if (this.periodCounter >= this.timePeriod) { this.x = inputData1; this.y = inputData2; this.sumX += this.x; this.sumY += this.y; this.sumXY += this.x * this.y; this.sumXX += this.x * this.x; this.sumYY += this.y * this.y; this.sumXHistory.enqueue(this.sumX); this.sumXXHistory.enqueue(this.sumXX); this.sumXYHistory.enqueue(this.sumXY); this.sumYHistory.enqueue(this.sumY); this.sumYYHistory.enqueue(this.sumYY); this.trailingSumX = this.sumXHistory.dequeue(); this.sumX -= this.trailingSumX; this.trailingSumXX = this.sumXXHistory.dequeue(); this.sumXX -= this.trailingSumXX; this.trailingSumXY = this.sumXYHistory.dequeue(); this.sumXY -= this.trailingSumXY; this.trailingSumY = this.sumYHistory.dequeue(); this.sumY -= this.trailingSumY; this.trailingSumYY = this.sumYYHistory.dequeue(); this.sumYY -= this.trailingSumYY; this.tmpReal = (this.sumXX - ((this.sumX * this.sumX) / this.timePeriod)) * (this.sumYY - ((this.sumY * this.sumY) / this.timePeriod)); if (this.tmpReal !== 0) { this.setCurrentValue((this.sumXY - ((this.sumX * this.sumY) / this.timePeriod)) / Math.sqrt(this.tmpReal)); } else { this.setCurrentValue(0); } this.sumXX += this.trailingSumXX; this.sumXY += this.trailingSumXY; this.sumX += this.trailingSumX; this.sumY += this.trailingSumY; this.sumYY += this.trailingSumYY; } return this.isReady; } } export class CORREL extends PearsonsCorrelationCoefficeint { }<|fim▁end|>
<|file_name|>CommentsMenu.tsx<|end_file_name|><|fim▁begin|>import React, { useState } from 'react'; import { registerComponent, Components } from '../../../lib/vulcan-lib'; import MoreVertIcon from '@material-ui/icons/MoreVert'; import Menu from '@material-ui/core/Menu'; import { useCurrentUser } from '../../common/withUser'; import { useTracking } from "../../../lib/analyticsEvents"; const styles = (theme: ThemeType): JssStyles => ({ icon: { cursor: "pointer", fontSize:"1.4rem" }, menu: { position:"absolute", right:0, top:0, zIndex: theme.zIndexes.commentsMenu,<|fim▁hole|>const CommentsMenu = ({classes, className, comment, post, showEdit, icon}: { classes: ClassesType, className?: string, comment: CommentsList, post?: PostsMinimumInfo, showEdit: ()=>void, icon?: any, }) => { const [anchorEl, setAnchorEl] = useState<any>(null); // Render menu-contents if the menu has ever been opened (keep rendering // contents when closed after open, because of closing animation). const [everOpened, setEverOpened] = useState(false); const currentUser = useCurrentUser(); const { captureEvent } = useTracking({eventType: "commentMenuClicked", eventProps: {commentId: comment._id, itemType: "comment"}}) if (!currentUser) return null return ( <span className={className}> <span onClick={event => { captureEvent("commentMenuClicked", {open: true}) setAnchorEl(event.currentTarget) setEverOpened(true); }}> {icon ? icon : <MoreVertIcon className={classes.icon}/>} </span> <Menu onClick={event => { captureEvent("commentMenuClicked", {open: false}) setAnchorEl(null) }} open={Boolean(anchorEl)} anchorEl={anchorEl} > {everOpened && <Components.CommentActions currentUser={currentUser} comment={comment} post={post} showEdit={showEdit} />} </Menu> </span> ) } const CommentsMenuComponent = registerComponent('CommentsMenu', CommentsMenu, {styles}); declare global { interface ComponentTypes { CommentsMenu: typeof CommentsMenuComponent, } }<|fim▁end|>
} })
<|file_name|>space3d.py<|end_file_name|><|fim▁begin|>import abc from typing import Optional, Callable from math import fabs import itertools from copy import copy import numbers from array import array from .space2d import Point2D, Segment2D from ...orientations.orientations import * from ...mathematics.statistics import * from ...mathematics.quaternions import * from ...utils.types import check_type class Shape3D(object, metaclass=abc.ABCMeta): @abc.abstractmethod def area(self): """Calculate shape area""" @abc.abstractmethod def length(self): """Calculate shape area""" ''' @abc.abstractmethod def clone(self): """Create a clone of the shape""" ''' class Point3D: """ Cartesian point. Dimensions: 3D """ def __init__( self, x: numbers.Real, y: numbers.Real, z: numbers.Real = 0.0 ): """ Construct a Point instance. :param x: point x coordinate. :type x: numbers.Real. :param y: point y coordinate. :type y: numbers.Real. :param z: point z coordinate. :type z: numbers.Real. """ vals = [x, y] if any(map(lambda val: not isinstance(val, numbers.Real), vals)): raise Exception("X and y input values must be integer or float type") if not all(map(math.isfinite, vals)): raise Exception("X and y input values must be finite (#03)") self._x = float(x) self._y = float(y) self._z = float(z) @classmethod def fromVect(cls, vect: Vect3D) -> 'Point3D': """ :param vect: :return: """ return cls( x=vect.x, y=vect.y, z=vect.z ) @property def x(self) -> numbers.Real: """ Return the x coordinate of the current point. :return: x coordinate. :rtype: numbers.Real Examples: >>> Point3D(4, 3, 7).x 4.0 >>> Point3D(-0.39, 3, 7).x -0.39 """ return self._x @property def y(self) -> numbers.Real: """ Return the y coordinate of the current point. :return: y coordinate. :rtype: numbers.Real Examples: >>> Point3D(4, 3, 7).y 3.0 >>> Point3D(-0.39, 17.42, 7).y 17.42 """ return self._y @property def z(self) -> numbers.Real: """ Return the z coordinate of the current point. :return: z coordinate. :rtype: numbers.Real Examples: >>> Point3D(4, 3, 7).z 7.0 >>> Point3D(-0.39, 17.42, 8.9).z 8.9 """ return self._z def __iter__(self): """ Return the elements of a Point. :return: Examples; >>> x, y, z = Point3D(1,1) >>> x == 1 True >>> y == 1 True """ return (i for i in self.a()) def __repr__(self) -> str: return "Point3D({:.4f}, {:.4f}, {:.4f})".format(self.x, self.y, self.z) def __eq__(self, another: 'Point3D' ) -> bool: """ Return True if objects are equal. :param another: another point. :type another: Point. :raise: Exception. Example: >>> Point3D(1., 1., 1.) == Point3D(1, 1, 1) True >>> Point3D(1., 1., 1.) == Point3D(1, 1, 1) True >>> Point3D(1., 1., 1.) == Point3D(1, 1, -1) False """ if not isinstance(another, Point3D): raise Exception("Another instance must be a Point") return all([ self.x == another.x, self.y == another.y, self.z == another.z ] ) def __ne__(self, another: 'Point3D' ) -> bool: """ Return False if objects are equal. Example: >>> Point3D(1., 1., 1.) != Point3D(0., 0., 0.) True >>> Point3D(1., 1., 1.) != Point3D(1, 1, 1) False """ return not (self == another) def a(self) -> Tuple[numbers.Real, numbers.Real, numbers.Real]: """ Return the individual values of the point. :return: double array of x, y, z values Examples: >>> Point3D(4, 3, 7).a() (4.0, 3.0, 7.0) """ return self.x, self.y, self.z def __add__(self, another: 'Point3D') -> 'Point3D': """ Sum of two points. :param another: the point to add :type another: Point3D :return: the sum of the two points :rtype: Point3D :raise: Exception Example: >>> Point3D(1, 0, 0) + Point3D(0, 1, 1) Point3D(1.0000, 1.0000, 1.0000) >>> Point3D(1, 1, 1) + Point3D(-1, -1, -1) Point3D(0.0000, 0.0000, 0.0000) """ check_type(another, "Second point", Point3D) x0, y0, z0 = self x1, y1, z1 = another return Point3D( x=x0+x1, y=y0+y1, z=z0+z1 ) def __sub__(self, another: 'Point3D' ) -> 'Point3D': """Subtract two points. :param another: the point to subtract :type another: Point3D :return: the difference between the two points :rtype: Point3D :raise: Exception Example: >>> Point3D(1., 1., 1.) - Point3D(1., 1., 1.) Point3D(0.0000, 0.0000, 0.0000) >>> Point3D(1., 1., 3.) - Point3D(1., 1., 2.2) Point3D(0.0000, 0.0000, 0.8000) """ check_type(another, "Second point", Point3D) x0, y0, z0 = self x1, y1, z1 = another return Point3D( x=x0 - x1, y=y0 - y1, z=z0 - z1 ) def clone(self) -> 'Point3D': """ Clone a point. :return: a new point. :rtype: Point. """ return Point3D(*self.a()) def toXYZ(self) -> Tuple[numbers.Real, numbers.Real, numbers.Real]: """ Returns the spatial components as a tuple of three values. :return: the spatial components (x, y, z). :rtype: a tuple of three floats. Examples: >>> Point3D(1, 0, 3).toXYZ() (1.0, 0.0, 3.0) """ return self.x, self.y, self.z def toArray(self) -> np.ndarray: """ Return a Numpy array representing the point values. :return: Numpy array Examples: >>> np.allclose(Point3D(1, 2, 3).toArray(), np.array([ 1., 2., 3.])) True """ return np.asarray(self.toXYZ()) def to2d(self) -> Point2D: """ Projection on the x-y plane as a 2D point. Examples: >>> Point3D(2, 3, 4).to2d() Point2D(2.0000, 3.0000) """ return Point2D( x=self.x, y=self.y ) def pXY(self) -> 'Point3D': """ Projection on the x-y plane :return: projected object instance Examples: >>> Point3D(2, 3, 4).pXY() Point3D(2.0000, 3.0000, 0.0000) """ return Point3D(self.x, self.y, 0.0) def pXZ(self) -> 'Point3D': """ Projection on the x-z plane :return: projected object instance Examples: >>> Point3D(2, 3, 4).pXZ() Point3D(2.0000, 0.0000, 4.0000) """ return Point3D(self.x, 0.0, self.z) def pYZ(self) -> 'Point3D': """ Projection on the y-z plane :return: projected object instance Examples: >>> Point3D(2, 3, 4).pYZ() Point3D(0.0000, 3.0000, 4.0000) """ return Point3D(0.0, self.y, self.z) def deltaX(self, another: 'Point3D' ) -> Optional[numbers.Real]: """ Delta between x components of two Point Instances. :return: x coordinates difference value. :rtype: optional numbers.Real. :raise: Exception Examples: >>> Point3D(1, 2, 3).deltaX(Point3D(4, 7, 1)) 3.0 """ return another.x - self.x def deltaY(self, another: 'Point3D' ) -> Optional[numbers.Real]: """ Delta between y components of two Point Instances. :return: y coordinates difference value. :rtype: optional numbers.Real. Examples: >>> Point3D(1, 2, 3).deltaY(Point3D(4, 7, 1)) 5.0 """ return another.y - self.y def deltaZ(self, another: 'Point3D' ) -> Optional[numbers.Real]: """ Delta between z components of two Point Instances. :return: z coordinates difference value. :rtype: optional numbers.Real. Examples: >>> Point3D(1, 2, 3).deltaZ(Point3D(4, 7, 1)) -2.0 """ return another.z - self.z def distance(self, another: 'Point3D' ) -> numbers.Real: """ Calculate Euclidean spatial distance between two points. TODO: consider case of polar CRS :param another: another Point instance. :type another: Point. :return: the distance (when the two points have the same CRS). :rtype: numbers.Real. :raise: Exception. Examples: >>> Point3D(1., 1., 1.).distance(Point3D(4., 5., 1)) 5.0 >>> Point3D(1, 1, 1).distance(Point3D(4, 5, 1)) 5.0 >>> Point3D(1, 1, 1).distance(Point3D(4, 5, 1)) 5.0 """ check_type(another, "Point", Point3D) return sqrt((self.x - another.x) ** 2 + (self.y - another.y) ** 2 + (self.z - another.z) ** 2) def dist_2d(self, another: 'Point3D' ) -> numbers.Real: """ Calculate horizontal (2D) distance between two points. TODO: consider case of polar CRS :param another: another Point instance. :type another: Point. :return: the 2D distance (when the two points have the same CRS). :rtype: numbers.Real. :raise: Exception. Examples: >>> Point3D(1., 1., 1.).dist_2d(Point3D(4., 5., 7.)) 5.0 """ check_type(another, "Second point", Point3D) return sqrt((self.x - another.x) ** 2 + (self.y - another.y) ** 2) def scale(self, scale_factor: numbers.Real ) -> 'Point3D': """ Create a scaled object. Note: it does not make sense for polar coordinates. TODO: manage polar coordinates cases OR deprecate and remove - after dependency check. Example; >>> Point3D(1, 0, 1).scale(2.5) Point3D(2.5000, 0.0000, 2.5000) >>> Point3D(1, 0, 1).scale(2.5) Point3D(2.5000, 0.0000, 2.5000) """ x, y, z = self.x * scale_factor, self.y * scale_factor, self.z * scale_factor return Point3D(x, y, z) def invert(self) -> 'Point3D': """ Create a new object with inverted direction. Note: it depends on scale method, that could be deprecated/removed. Examples: >>> Point3D(1, 1, 1).invert() Point3D(-1.0000, -1.0000, -1.0000) >>> Point3D(2, -1, 4).invert() Point3D(-2.0000, 1.0000, -4.0000) """ return self.scale(-1) def reflect_vertical(self) -> 'Point3D': """ Reflect a point along a vertical axis. :return: reflected point. :rtype: Point3D Examples: >>> Point3D(1,1,1).reflect_vertical() Point3D(-1.0000, -1.0000, 1.0000) """ x, y, z = self return Point3D( x=-x, y=-y, z=z ) def is_coincident(self, another: 'Point3D', tolerance: numbers.Real = MIN_SEPARATION_THRESHOLD ) -> bool: """ Check spatial coincidence of two points :param another: the point to compare. :type another: Point. :param tolerance: the maximum allowed distance between the two points. :type tolerance: numbers.Real. :return: whether the two points are coincident. :rtype: bool. :raise: Exception. Example: >>> Point3D(1., 0., -1.).is_coincident(Point3D(1., 1.5, -1.)) False >>> Point3D(1., 0., 0.).is_coincident(Point3D(1., 0., 0.)) True """ check_type(another, "Second point", Point3D) return self.distance(another) <= tolerance def already_present(self, pt_list: List['Point3D'], tolerance: numbers.Real = MIN_SEPARATION_THRESHOLD ) -> Optional[bool]: """ Determines if a point is already in a given point list, using an optional distance separation, :param pt_list: list of points. May be empty. :type pt_list: List of Points. :param tolerance: optional maximum distance between near-coincident point pair. :type tolerance: numbers.Real. :return: True if already present, False otherwise. :rtype: optional boolean. """ for pt in pt_list: if self.is_coincident(pt, tolerance=tolerance): return True return False def shift(self, sx: numbers.Real, sy: numbers.Real, sz: numbers.Real ) -> Optional['Point3D']: """ Create a new object shifted by given amount from the self instance. Example: >>> Point3D(1, 1, 1).shift(0.5, 1., 1.5) Point3D(1.5000, 2.0000, 2.5000) >>> Point3D(1, 2, -1).shift(0.5, 1., 1.5) Point3D(1.5000, 3.0000, 0.5000) """ return Point3D(self.x + sx, self.y + sy, self.z + sz) def shiftByVect(self, v: Vect3D ) -> 'Point3D': """ Create a new point shifted from the self instance by given vector. :param v: the shift vector. :type v: Vect. :return: the shifted point. :rtype: Point. :raise: Exception Example: >>> Point3D(1, 1, 1).shiftByVect(Vect3D(0.5, 1., 1.5)) Point3D(1.5000, 2.0000, 2.5000) >>> Point3D(1, 2, -1).shiftByVect(Vect3D(0.5, 1., 1.5)) Point3D(1.5000, 3.0000, 0.5000) """ x, y, z = self sx, sy, sz = v.toXYZ() return Point3D(x + sx, y + sy, z + sz) def asVect(self) -> 'Vect3D': """ Create a vector based on the point coordinates Example: >>> Point3D(1, 1, 0).asVect() Vect3D(1.0000, 1.0000, 0.0000) >>> Point3D(0.2, 1, 6).asVect() Vect3D(0.2000, 1.0000, 6.0000) """ return Vect3D(self.x, self.y, self.z) def rotate(self, rotation_axis: RotationAxis, center_point: 'Point3D' = None ) -> 'Point3D': """ Rotates a point. :param rotation_axis: :param center_point: :return: the rotated point :rtype: Point3D Examples: >>> pt = Point3D(0,0,1) >>> rot_axis = RotationAxis(0,0,90) >>> center_pt = Point3D(0,0,0.5) >>> pt.rotate(rotation_axis=rot_axis, center_point=center_pt) Point3D(0.5000, 0.0000, 0.5000) >>> center_pt = Point3D(0,0,1) >>> pt.rotate(rotation_axis=rot_axis, center_point=center_pt) Point3D(0.0000, 0.0000, 1.0000) >>> center_pt = Point3D(0, 0, 2) >>> pt.rotate(rotation_axis=rot_axis, center_point=center_pt) Point3D(-1.0000, 0.0000, 2.0000) >>> rot_axis = RotationAxis(0,0,180) >>> pt.rotate(rotation_axis=rot_axis, center_point=center_pt) Point3D(-0.0000, 0.0000, 3.0000) >>> pt.rotate(rotation_axis=rot_axis) Point3D(0.0000, 0.0000, -1.0000) >>> pt = Point3D(1,1,1) >>> rot_axis = RotationAxis(0,90,90) >>> pt.rotate(rotation_axis=rot_axis) Point3D(1.0000, -1.0000, 1.0000) >>> rot_axis = RotationAxis(0,90,180) >>> pt.rotate(rotation_axis=rot_axis) Point3D(-1.0000, -1.0000, 1.0000) >>> center_pt = Point3D(1,1,1) >>> pt.rotate(rotation_axis=rot_axis, center_point=center_pt) Point3D(1.0000, 1.0000, 1.0000) >>> center_pt = Point3D(2,2,10) >>> pt.rotate(rotation_axis=rot_axis, center_point=center_pt) Point3D(3.0000, 3.0000, 1.0000) >>> pt = Point3D(1, 1, 2) >>> rot_axis = RotationAxis(135, 0, 180) >>> center_pt = Point3D(0,0,1) >>> pt.rotate(rotation_axis=rot_axis, center_point=center_pt) Point3D(-1.0000, -1.0000, 0.0000) """ if not center_point: center_point = Point3D( x=0.0, y=0.0, z=0.0 ) check_type(center_point, "Center point", Point3D) p_diff = self - center_point p_vect = p_diff.asVect() rot_vect = rotVectByAxis( v=p_vect, rot_axis=rotation_axis ) x, y, z = rot_vect rot_pt = Point3D( x=x, y=y, z=z ) transl_pt = center_point + rot_pt return transl_pt @classmethod def random(cls, lower_boundary: float = -MAX_SCALAR_VALUE, upper_boundary: float = MAX_SCALAR_VALUE ): """ Creates a random point. :return: random point :rtype: Point3D """ vals = [random.uniform(lower_boundary, upper_boundary) for _ in range(3)] return cls(*vals) def pack_to_points( xs: array, ys: array, zs: Optional[array] = None, ) -> List[Point3D]: # Side effects: None """ Create a list of points given a set of input arrays. :param xs: array of x values :param ys: array of y values :param zs: optional array of z values :return: a list of Point3D instances """ if zs is None: zs = [0.0] * len(xs) pts = [] for x, y, z, t in zip(xs, ys, zs): pts.append( Point3D( x, y, z ) ) return pts class Segment3D: """ Segment is a geometric object defined by the straight line between two vertices. """ def __init__(self, start_pt: Point3D, end_pt: Point3D): """ Creates a segment instance provided the two points have the same CRS code. :param start_pt: the start point. :type: Point. :param end_pt: the end point. :type end_pt: Point. :return: the new segment instance if both points have the same georeferenced. :raises: CRSCodeException. """ check_type(start_pt, "Start point", Point3D) check_type(end_pt, "End point", Point3D) if start_pt.distance(end_pt) == 0.0: raise Exception("Source points cannot be coincident") self._start_pt = start_pt.clone() self._end_pt = end_pt.clone() @classmethod def fromVector(cls, point: Point3D, dir_vector: Vect3D): check_type(point, "Input point", Point3D) check_type(dir_vector, "Directional vector", Vect3D) start_pt = point end_pt = start_pt.shiftByVect(dir_vector) return cls( start_pt=start_pt, end_pt=end_pt ) @classmethod def from2D(cls, segment: Segment2D): check_type(segment, "Input segment", Segment2D) start_pt = Point3D( x=segment.start_pt.x, y=segment.start_pt.y, z=0.0 ) end_pt = Point3D( x=segment.end_pt.x, y=segment.end_pt.y, z=0.0 ) return cls( start_pt=start_pt, end_pt=end_pt ) def __repr__(self) -> str: """ Represents a Segment instance. :return: the Segment representation. :rtype: str. """ return "Segment3D(start_pt={}, end_pt={})".format( self.start_pt, self.end_pt ) @property def start_pt(self) -> Point3D: return self._start_pt @property def end_pt(self) -> Point3D: return self._end_pt def __iter__(self): """ Return the elements of a Segment, i.e., start and end point. """ return (i for i in [self.start_pt, self.end_pt]) def clone(self) -> 'Segment3D': return Segment3D(self._start_pt, self._end_pt) def increasing_x(self) -> 'Segment3D': <|fim▁hole|> if self.end_pt.x < self.start_pt.x: return Segment3D(self.end_pt, self.start_pt) else: return self.clone() def x_range(self) -> Tuple[numbers.Real, numbers.Real]: if self.start_pt.x < self.end_pt.x: return self.start_pt.x, self.end_pt.x else: return self.end_pt.x, self.start_pt.x def y_range(self) -> Tuple[numbers.Real, numbers.Real]: if self.start_pt.y < self.end_pt.y: return self.start_pt.y, self.end_pt.y else: return self.end_pt.y, self.start_pt.y def z_range(self) -> Tuple[numbers.Real, numbers.Real]: if self.start_pt.z < self.end_pt.z: return self.start_pt.z, self.end_pt.z else: return self.end_pt.z, self.start_pt.z def delta_x(self) -> numbers.Real: """ X delta between segment end point and start point. :return: the horizontal, x-parallel distance between segment end point and start point. """ return self.end_pt.x - self.start_pt.x def delta_y(self) -> numbers.Real: """ Y delta between segment end point and start point. :return: the horizontal, y-parallel distance between segment end point and start point. """ return self.end_pt.y - self.start_pt.y def delta_z(self) -> numbers.Real: """ Z delta between segment end point and start point. :return: the vertical distance between segment end point and start point. """ return self.end_pt.z - self.start_pt.z def as_vector(self) -> Vect3D: """ Convert a segment to a vector. """ return Vect3D( x=self.delta_x(), y=self.delta_y(), z=self.delta_z() ) def length_horizontal(self) -> numbers.Real: return self.start_pt.dist_2d(self.end_pt) def length(self) -> numbers.Real: return self.start_pt.distance(self.end_pt) def ratio_delta_zs(self) -> Optional[numbers.Real]: """ Calculates the delta z - delta s ratio of a segment. :return: optional numbers.Real. """ len2d = self.length_horizontal() if len2d == 0.0: return None return self.delta_z() / len2d def slope_rad(self) -> Optional[numbers.Real]: """ Calculates the slope in radians of the segment. Positive is downward point, negative upward pointing. :return: optional numbers.Real. """ delta_zs = self.ratio_delta_zs() if delta_zs is None: return None else: return - math.atan(delta_zs) def vector(self) -> Vect3D: return Vect3D(self.delta_x(), self.delta_y(), self.delta_z() ) def antivector(self) -> Vect3D: """ Returns the vector pointing from the segment end to the segment start. :return: the vector pointing from the segment end to the segment start. :rtype: Vect. """ return self.vector().invert() def contains_pt(self, pt: Point3D ) -> bool: """ Checks whether a point is contained in a segment. :param pt: the point for which to check containement. :return: bool. :raise: Exception. Examples: >>> segment = Segment3D(Point3D(0, 0, 0), Point3D(1, 0, 0)) >>> segment.contains_pt(Point3D(0, 0, 0)) True >>> segment.contains_pt(Point3D(1, 0, 0)) True >>> segment.contains_pt(Point3D(0.5, 0, 0)) True >>> segment.contains_pt(Point3D(0.5, 0.00001, 0)) False >>> segment.contains_pt(Point3D(0.5, 0, 0.00001)) False >>> segment.contains_pt(Point3D(1.00001, 0, 0)) False >>> segment.contains_pt(Point3D(0.000001, 0, 0)) True >>> segment.contains_pt(Point3D(-0.000001, 0, 0)) False >>> segment.contains_pt(Point3D(0.5, 1000, 1000)) False >>> segment = Segment3D(Point3D(0, 0, 0), Point3D(0, 1, 0)) >>> segment.contains_pt(Point3D(0, 0, 0)) True >>> segment.contains_pt(Point3D(0, 0.5, 0)) True >>> segment.contains_pt(Point3D(0, 1, 0)) True >>> segment.contains_pt(Point3D(0, 1.5, 0)) False >>> segment = Segment3D(Point3D(0, 0, 0), Point3D(1, 1, 1)) >>> segment.contains_pt(Point3D(0.5, 0.5, 0.5)) True >>> segment.contains_pt(Point3D(1, 1, 1)) True >>> segment = Segment3D(Point3D(1,2,3), Point3D(9,8,2)) >>> segment.contains_pt(segment.pointAt(0.745)) True >>> segment.contains_pt(segment.pointAt(1.745)) False >>> segment.contains_pt(segment.pointAt(-0.745)) False >>> segment.contains_pt(segment.pointAt(0)) True """ check_type(pt, "Point", Point3D) segment_length = self.length() length_startpt_pt = self.start_pt.distance(pt) length_endpt_pt = self.end_pt.distance(pt) return areClose( a=segment_length, b=length_startpt_pt + length_endpt_pt ) def pointAt(self, scale_factor: numbers.Real ) -> Point3D: """ Returns a point aligned with the segment and lying at given scale factor, where 1 is segment length ans 0 is segment start. :param scale_factor: the scale factor, where 1 is the segment length. :type scale_factor: numbers.Real :return: Point at scale factor :rtype: Point3D Examples: >>> s = Segment3D(Point3D(0,0,0), Point3D(1,0,0)) >>> s.pointAt(0) Point3D(0.0000, 0.0000, 0.0000) >>> s.pointAt(0.5) Point3D(0.5000, 0.0000, 0.0000) >>> s.pointAt(1) Point3D(1.0000, 0.0000, 0.0000) >>> s.pointAt(-1) Point3D(-1.0000, 0.0000, 0.0000) >>> s.pointAt(-2) Point3D(-2.0000, 0.0000, 0.0000) >>> s.pointAt(2) Point3D(2.0000, 0.0000, 0.0000) >>> s = Segment3D(Point3D(0,0,0), Point3D(0,0,1)) >>> s.pointAt(0) Point3D(0.0000, 0.0000, 0.0000) >>> s.pointAt(0.5) Point3D(0.0000, 0.0000, 0.5000) >>> s.pointAt(1) Point3D(0.0000, 0.0000, 1.0000) >>> s.pointAt(-1) Point3D(0.0000, 0.0000, -1.0000) >>> s.pointAt(-2) Point3D(0.0000, 0.0000, -2.0000) >>> s.pointAt(2) Point3D(0.0000, 0.0000, 2.0000) >>> s = Segment3D(Point3D(0,0,0), Point3D(1,1,1)) >>> s.pointAt(0.5) Point3D(0.5000, 0.5000, 0.5000) >>> s = Segment3D(Point3D(0,0,0), Point3D(4,0,0)) >>> s.pointAt(7.5) Point3D(30.0000, 0.0000, 0.0000) """ dx = self.delta_x() * scale_factor dy = self.delta_y() * scale_factor dz = self.delta_z() * scale_factor return Point3D( x=self.start_pt.x + dx, y=self.start_pt.y + dy, z=self.start_pt.z + dz ) def pointProjection(self, point: Point3D ) -> Point3D: """ Return the point projection on the segment. Examples: >>> s = Segment3D(start_pt=Point3D(0,0,0), end_pt=Point3D(1,0,0)) >>> p = Point3D(0.5, 1, 4) >>> s.pointProjection(p) Point3D(0.5000, 0.0000, 0.0000) >>> s = Segment3D(start_pt=Point3D(0,0,0), end_pt=Point3D(4,0,0)) >>> p = Point3D(7.5, 19.2, -14.72) >>> s.pointProjection(p) Point3D(7.5000, 0.0000, 0.0000) """ check_type(point, "Input point", Point3D) other_segment = Segment3D( self.start_pt, point ) scale_factor = self.vector().scalar_projection(other_segment.vector()) / self.length() return self.pointAt(scale_factor) def pointDistance(self, point: Point3D ) -> numbers.Real: """ Returns the point distance to the segment. :param point: the point to calculate the distance with :type point: Point3D :return: the distance of the point to the segment :rtype: numbers.Real Examples: >>> s = Segment3D(Point3D(0,0,0), Point3D(0,0,4)) >>> s.pointDistance(Point3D(-17.2, 0.0, -49)) 17.2 >>> s.pointDistance(Point3D(-17.2, 1.22, -49)) 17.24321315764553 """ check_type(point, "Input point", Point3D) #check_crs(self, point) point_projection = self.pointProjection(point) return point.distance(point_projection) def point_s(self, point: Point3D ) -> Optional[numbers.Real]: """ Calculates the optional distance of the point along the segment. A zero value is for a point coinciding with the start point. Returns None if the point is not contained in the segment. :param point: the point to calculate the optional distance in the segment. :type point: Point3D :return: the the optional distance of the point along the segment. """ check_type(point, "Input point", Point3D) #check_crs(self, point) if not self.contains_pt(point): return None return self.start_pt.distance(point) def scale(self, scale_factor ) -> 'Segment3D': """ Scale a segment by the given scale_factor. Start point does not change. :param scale_factor: the scale factor, where 1 is the segment length. :type scale_factor: numbers.Real :return: Point at scale factor :rtype: Point3D """ end_pt = self.pointAt(scale_factor) return Segment3D( self.start_pt, end_pt) def vertical_plane(self) -> Optional['CPlane3D']: """ Returns the vertical Cartesian plane containing the segment. :return: the vertical Cartesian plane containing the segment. :rtype: Optional[CPlane3D]. """ if self.length_horizontal() == 0.0: # collapsed segment return None elif self.length_horizontal() == 0.0: # vertical segment return None # arbitrary point on the same vertical as end point section_final_pt_up = self.end_pt.shift( sx=0.0, sy=0.0, sz=1000.0) return CPlane3D.fromPoints( pt1=self.start_pt, pt2=self.end_pt, pt3=section_final_pt_up) def same_start(self, another: 'Segment3D', tol: numbers.Real = 1e-12 ) -> bool: """ Check whether the two segments have the same start point. :param another: a segment to check for. :type another: Segment. :param tol: tolerance for distance between points. :type tol: numbers.Real. :return: whether the two segments have the same start point. :rtype: bool. Examples: >>> s1 = Segment3D(Point3D(0,0,0), Point3D(1,0,0)) >>> s2 = Segment3D(Point3D(0,0,0), Point3D(0,1,0)) >>> s1.same_start(s2) True """ return self.start_pt.is_coincident( another=another.start_pt, tolerance=tol ) def same_end(self, another: 'Segment3D', tol: numbers.Real = 1e-12 ) -> bool: """ Check whether the two segments have the same end point. :param another: a segment to check for. :type another: Segment. :param tol: tolerance for distance between points. :type tol: numbers.Real. :return: whether the two segments have the same end point. :rtype: bool. Examples: >>> s1 = Segment3D(Point3D(0,0,0), Point3D(1,0,0)) >>> s2 = Segment3D(Point3D(2,0,0), Point3D(1,0,0)) >>> s1.same_end(s2) True """ return self.end_pt.is_coincident( another=another.end_pt, tolerance=tol) def conn_to_other(self, another: 'Segment3D', tol: numbers.Real = 1e-12 ) -> bool: """ Check whether the first segment is sequentially connected to the second one. :param another: a segment to check for. :type another: Segment. :param tol: tolerance for distance between points. :type tol: numbers.Real. :return: whether the first segment is sequentially connected to the second one. :rtype: bool. Examples: >>> s1 = Segment3D(Point3D(0,0,0), Point3D(1,0,0)) >>> s2 = Segment3D(Point3D(1,0,0), Point3D(2,0,0)) >>> s1.conn_to_other(s2) True """ return self.end_pt.is_coincident( another=another.start_pt, tolerance=tol) def other_connected(self, another: 'Segment3D', tol: numbers.Real = 1e-12 ) -> bool: """ Check whether the second segment is sequentially connected to the first one. :param another: a segment to check for. :type another: Segment. :param tol: tolerance for distance between points. :type tol: numbers.Real. :return: whether the second segment is sequentially connected to the first one. :rtype: bool. Examples: >>> s1 = Segment3D(Point3D(0,0,0), Point3D(1,0,0)) >>> s2 = Segment3D(Point3D(-1,0,0), Point3D(0,0,0)) >>> s1.other_connected(s2) True """ return another.end_pt.is_coincident( another=self.start_pt, tolerance=tol) def segment_start_in(self, another: 'Segment3D' ) -> bool: """ Check whether the second segment contains the first segment start point. :param another: a segment to check for. :type another: Segment. :return: whether the second segment contains the first segment start point. :rtype: bool. Examples: >>> s1 = Segment3D(Point3D(0,0,0), Point3D(1,0,0)) >>> s2 = Segment3D(Point3D(-0.5,0,0), Point3D(0.5,0,0)) >>> s1.segment_start_in(s2) True >>> s1 = Segment3D(Point3D(0,0,0), Point3D(1,1,1)) >>> s1.segment_start_in(s2) True >>> s1 = Segment3D(Point3D(0,1,0), Point3D(1,1,1)) >>> s1.segment_start_in(s2) False >>> s1 = Segment3D(Point3D(-1,-1,-1), Point3D(1,1,1)) >>> s1.segment_start_in(s2) False """ return another.contains_pt(self.start_pt) def segment_end_in(self, another: 'Segment3D' ) -> bool: """ Check whether the second segment contains the first segment end point. :param another: a segment to check for. :type another: Segment. :return: whether the second segment contains the first segment end point. :rtype: bool. Examples: >>> s1 = Segment3D(Point3D(0,0,0), Point3D(1,0,0)) >>> s2 = Segment3D(Point3D(-0.5,0,0), Point3D(0.5,0,0)) >>> s1.segment_end_in(s2) False >>> s1 = Segment3D(Point3D(0,0,0), Point3D(1,1,1)) >>> s1.segment_end_in(s2) False >>> s1 = Segment3D(Point3D(0,1,0), Point3D(1,1,1)) >>> s2 = Segment3D(Point3D(1,1,1), Point3D(0.5,0,0)) >>> s1.segment_end_in(s2) True >>> s1 = Segment3D(Point3D(-1,-1,3), Point3D(1,1,3)) >>> s2 = Segment3D(Point3D(0,2,3), Point3D(2,0,3)) >>> s1.segment_end_in(s2) True """ return another.contains_pt(self.end_pt) def rotate(self, rotation_axis: 'RotationAxis', center_point: 'Point3D' = None ) -> 'Segment3D': """ Rotates a segment. :param rotation_axis: :param center_point: :return: the rotated segment :rtype: Segment3D Examples: >>> seg = Segment3D(Point3D(0,0,0), Point3D(0,0,1)) >>> rot_ax = RotationAxis(0, 0, 90) >>> seg.rotate(rot_ax) Segment3D(start_pt=Point3D(0.0000, 0.0000, 0.0000), end_pt=Point3D(1.0000, 0.0000, 0.0000)) >>> rot_ax = RotationAxis(0, 0, 180) >>> centr_pt = Point3D(0,0,0.5) >>> seg.rotate(rotation_axis=rot_ax, center_point=centr_pt) Segment3D(start_pt=Point3D(-0.0000, 0.0000, 1.0000), end_pt=Point3D(0.0000, 0.0000, 0.0000)) >>> seg = Segment3D(Point3D(0,0,0), Point3D(1,1,0)) >>> centr_pt = Point3D(1,0,0) >>> rot_ax = RotationAxis(0, 90, 90) >>> seg.rotate(rotation_axis=rot_ax, center_point=centr_pt) Segment3D(start_pt=Point3D(1.0000, 1.0000, 0.0000), end_pt=Point3D(2.0000, 0.0000, -0.0000)) >>> seg = Segment3D(Point3D(1,1,1), Point3D(0,0,0)) >>> rot_ax = RotationAxis(135, 0, 180) >>> centr_pt = Point3D(0.5,0.5,0.5) >>> seg.rotate(rotation_axis=rot_ax, center_point=centr_pt) Segment3D(start_pt=Point3D(0.0000, 0.0000, 0.0000), end_pt=Point3D(1.0000, 1.0000, 1.0000)) """ start_pt, end_pt = self rotated_start_pt = start_pt.rotate( rotation_axis=rotation_axis, center_point=center_point ) rotated_end_pt = end_pt.rotate( rotation_axis=rotation_axis, center_point=center_point ) return Segment3D( start_pt=rotated_start_pt, end_pt=rotated_end_pt ) @classmethod def random(cls, lower_boundary: float = -MAX_SCALAR_VALUE, upper_boundary: float = MAX_SCALAR_VALUE): """ Creates a random segment. :return: random segment :rtype: Segment3D """ return cls( start_pt=Point3D.random(lower_boundary, upper_boundary), end_pt=Point3D.random(lower_boundary, upper_boundary) ) def densify_as_line3d(self, densify_distance ) -> 'Line3D': """ Densify a segment by adding additional points separated a distance equal to densify_distance. The result is no longer a Segment instance, instead it is a Line instance. :param densify_distance: float :return: a Line3D """ length3d = self.length() segment_versor = self.as_vector().versor() generator_vector = segment_versor.scale(densify_distance) interpolated_line = Line3D( pts=[self.start_pt]) n = 0 while True: n += 1 shift_vector = generator_vector.scale(n) new_pt = self.start_pt.shift( shift_vector.x, shift_vector.y, shift_vector.z ) distance = self.start_pt.distance(new_pt) if distance >= length3d: break interpolated_line.add_pt(new_pt) interpolated_line.add_pt(self.end_pt) return interpolated_line def densify_as_pts3d(self, densify_distance ) -> List[Point3D]: return self.densify_as_line3d(densify_distance=densify_distance).pts() def densify_as_steps3d(self, densify_distance: numbers.Real ) -> array: """ Defines the array storing the incremental lengths according to the provided densify distance. :param densify_distance: the step distance. :type densify_distance: numbers.Real. :return: array storing incremental steps, with the last step being equal to the segment length. :rtype: array. """ if not isinstance(densify_distance, numbers.Real): raise Exception("Densify distance must be float or int") if not math.isfinite(densify_distance): raise Exception("Densify distance must be finite") if densify_distance <= 0.0: raise Exception("Densify distance must be positive") segment_length = self.length() s_list = [] n = 0 length = n * densify_distance while length < segment_length: s_list.append(length) n += 1 length = n * densify_distance s_list.append(segment_length) return array('d', s_list) def point_or_segment3d( point1: Point3D, point2: Point3D, tol: numbers.Real = PRACTICAL_MIN_DIST ) -> Union[Point3D, Segment3D]: """ Creates a point or segment based on the points distance. :param point1: first input point. :type point1: Point. :param point2: second input point. :type point2: Point. :param tol: distance tolerance between the two points. :type tol: numbers.Real. :return: point or segment based on their distance. :rtype: PointOrSegment. :raise: Exception. """ check_type(point1, "First point", Point3D) check_type(point2, "Second point", Point3D) if point1.distance(point2) <= tol: return Point3D( x=(point1.x + point2.x) / 2, y=(point1.y + point2.y) / 2, z=(point1.z + point2.z) / 2 ) else: return Segment3D( start_pt=point1, end_pt=point2 ) def intersect_segments3d( segment1: Segment3D, segment2: Segment3D, tol: numbers.Real = PRACTICAL_MIN_DIST ) -> Optional[Union[Point3D, Segment3D]]: """ Determines the optional point or segment intersection between the segment pair. :param segment1: the first segment :param segment2: the second segment :param tol: the distance tolerance for collapsing a intersection segment into a point :return: the optional point or segment intersection between the segment pair. Examples: >>> s2 = Segment3D(Point3D(0,0,0), Point3D(1,0,0)) >>> s1 = Segment3D(Point3D(0,0,0), Point3D(1,0,0)) >>> intersect_segments3d(s1, s2) Segment3D(start_pt=Point3D(0.0000, 0.0000, 0.0000), end_pt=Point3D(1.0000, 0.0000, 0.0000)) >>> s1 = Segment3D(Point3D(-2,0,0), Point3D(-1,0,0)) >>> intersect_segments3d(s1, s2) is None True >>> s1 = Segment3D(Point3D(-2,0,0), Point3D(0,0,0)) >>> intersect_segments3d(s1, s2) Point3D(0.0000, 0.0000, 0.0000) >>> s1 = Segment3D(Point3D(-2,0,0), Point3D(0.5,0,0)) >>> intersect_segments3d(s1, s2) Segment3D(start_pt=Point3D(0.0000, 0.0000, 0.0000), end_pt=Point3D(0.5000, 0.0000, 0.0000)) >>> s1 = Segment3D(Point3D(-2,0,0), Point3D(1,0,0)) >>> intersect_segments3d(s1, s2) Segment3D(start_pt=Point3D(0.0000, 0.0000, 0.0000), end_pt=Point3D(1.0000, 0.0000, 0.0000)) >>> s1 = Segment3D(Point3D(-2,0,0), Point3D(2,0,0)) >>> intersect_segments3d(s1, s2) Segment3D(start_pt=Point3D(0.0000, 0.0000, 0.0000), end_pt=Point3D(1.0000, 0.0000, 0.0000)) >>> s1 = Segment3D(Point3D(0,0,0), Point3D(0.5,0,0)) >>> intersect_segments3d(s1, s2) Segment3D(start_pt=Point3D(0.0000, 0.0000, 0.0000), end_pt=Point3D(0.5000, 0.0000, 0.0000)) >>> s1 = Segment3D(Point3D(0.25,0,0), Point3D(0.75,0,0)) >>> intersect_segments3d(s1, s2) Segment3D(start_pt=Point3D(0.2500, 0.0000, 0.0000), end_pt=Point3D(0.7500, 0.0000, 0.0000)) >>> s1 = Segment3D(Point3D(0.25,0,0), Point3D(1,0,0)) >>> intersect_segments3d(s1, s2) Segment3D(start_pt=Point3D(0.2500, 0.0000, 0.0000), end_pt=Point3D(1.0000, 0.0000, 0.0000)) >>> s1 = Segment3D(Point3D(0.25,0,0), Point3D(1.25,0,0)) >>> intersect_segments3d(s1, s2) Segment3D(start_pt=Point3D(0.2500, 0.0000, 0.0000), end_pt=Point3D(1.0000, 0.0000, 0.0000)) >>> s1 = Segment3D(Point3D(0,0,0), Point3D(1.25,0,0)) >>> intersect_segments3d(s1, s2) Segment3D(start_pt=Point3D(0.0000, 0.0000, 0.0000), end_pt=Point3D(1.0000, 0.0000, 0.0000)) >>> s1 = Segment3D(Point3D(1,0,0), Point3D(1.25,0,0)) >>> intersect_segments3d(s1, s2) Point3D(1.0000, 0.0000, 0.0000) >>> s2 = Segment3D(Point3D(0,0,0), Point3D(1,1,1)) >>> s1 = Segment3D(Point3D(0.25,0.25,0.25), Point3D(0.75,0.75,0.75)) >>> intersect_segments3d(s1, s2) Segment3D(start_pt=Point3D(0.2500, 0.2500, 0.2500), end_pt=Point3D(0.7500, 0.7500, 0.7500)) >>> s1 = Segment3D(Point3D(0.25,0.25,0.25), Point3D(1.75,1.75,1.75)) >>> intersect_segments3d(s1, s2) Segment3D(start_pt=Point3D(0.2500, 0.2500, 0.2500), end_pt=Point3D(1.0000, 1.0000, 1.0000)) >>> s1 = Segment3D(Point3D(0.25,0.25,0.25), Point3D(1.75,0,1.75)) >>> intersect_segments3d(s1, s2) Point3D(0.2500, 0.2500, 0.2500) >>> s1 = Segment3D(Point3D(0.25,1,0.25), Point3D(0.75,0.75,0.75)) >>> intersect_segments3d(s1, s2) Point3D(0.7500, 0.7500, 0.7500) >>> s2 = Segment3D(Point3D(-1,-1,-1), Point3D(1,1,1)) >>> s1 = Segment3D(Point3D(-1,1,1), Point3D(1,-1,-1)) >>> intersect_segments3d(s1, s2) Point3D(0.0000, 0.0000, 0.0000) """ check_type(segment1, "First segment", Segment3D) check_type(segment2, "Second segment", Segment3D) #check_crs(segment1, segment2) s1_startpt_inside = segment1.segment_start_in(segment2) s2_startpt_inside = segment2.segment_start_in(segment1) s1_endpt_inside = segment1.segment_end_in(segment2) s2_endpt_inside = segment2.segment_end_in(segment1) elements = [s1_startpt_inside, s2_startpt_inside, s1_endpt_inside, s2_endpt_inside] if all(elements): return segment1.clone() if s1_startpt_inside and s1_endpt_inside: return segment1.clone() if s2_startpt_inside and s2_endpt_inside: return segment2.clone() if s1_startpt_inside and s2_startpt_inside: return point_or_segment3d( segment1.start_pt, segment2.start_pt, tol=tol ) if s1_startpt_inside and s2_endpt_inside: return point_or_segment3d( segment1.start_pt, segment2.end_pt, tol=tol ) if s1_endpt_inside and s2_startpt_inside: return point_or_segment3d( segment2.start_pt, segment1.end_pt, tol=tol ) if s1_endpt_inside and s2_endpt_inside: return point_or_segment3d( segment1.end_pt, segment2.end_pt, tol=tol ) if s1_startpt_inside: return segment1.start_pt.clone() if s1_endpt_inside: return segment1.end_pt.clone() if s2_startpt_inside: return segment2.start_pt.clone() if s2_endpt_inside: return segment2.end_pt.clone() shortest_segm_or_pt = shortest_segment_or_point3d( segment1, segment2, tol=tol ) if not shortest_segm_or_pt: return None if not isinstance(shortest_segm_or_pt, Point3D): return None inters_pt = shortest_segm_or_pt if not segment1.contains_pt(inters_pt): return None if not segment2.contains_pt(inters_pt): return None return inters_pt class PointSegmentCollection3D(list): """ Collection of point or segment elements. """ def __init__( self, geoms: Optional[List[Union[Point3D, Segment3D]]] = None, # epsg_code: Optional[numbers.Integral] = None ): if geoms is not None: for geom in geoms: check_type(geom, "Spatial element", (Point3D, Segment3D)) """ if epsg_code is not None: check_type( var=epsg_code, name="EPSG code", expected_types=numbers.Integral ) if geoms is not None and epsg_code is not None: for geom in geoms: check_epsg( spatial_element=geom, epsg_code=epsg_code ) elif geoms is not None and len(geoms) > 0: epsg_code = geoms[0].epsg_code() """ if geoms is not None and len(geoms) > 0: super(PointSegmentCollection3D, self).__init__(geoms) else: super(PointSegmentCollection3D, self).__init__() # self.epsg_code = epsg_code def append(self, spatial_element: Union[Point3D, Segment3D] ) -> None: check_type( var=spatial_element, name="Spatial element", expected_types=(Point3D, Segment3D) ) """ if self.epsg_code is not None: check_epsg( spatial_element=spatial_element, epsg_code=self.epsg_code ) else: self.epsg_code = spatial_element.epsg_code() """ self.append(spatial_element) class Line3D: """ A line. """ def __init__(self, pts: Optional[List[Point3D]] = None): """ """ if pts is not None: check_type(pts, "List", list) for el in pts: check_type(el, "Point3D", Point3D) self._pts = pts else: self._pts = [] def __repr__(self) -> str: """ Represents a Line instance as a shortened text. :return: a textual shortened representation of a Line instance. :rtype: str. """ num_points = self.num_pts() if num_points == 0: txt = "Empty Line3D" else: x1, y1, z1 = self.start_pt() if num_points == 1: txt = f"Line3D with unique point: {x1:.4f}, {y1:.4f}, {z1:.4f}" else: x2, y2, z2 = self.end_pt() txt = f"Line3D with {self.num_pts()} points: ({x1:.4f}, {y1:.4f}, {z1:.4f}) ... ({x2:.4f}, {y2:.4f}, {z2:.4f})" return txt def pts(self): return self._pts def pt(self, ndx: numbers.Integral): """ """ return self._pts[ndx] def start_pt(self) -> Optional[Point3D]: """ Return the first point of a Line or None when no points. :return: the first point or None. """ return self.pt(0) if self.num_pts() > 0 else None def end_pt(self) -> Optional[Point3D]: """ Return the last point of a Line or None when no points. :return: the last point or None. """ return self.pt(-1) if self.num_pts() > 0 else None def add_pt(self, pt: Point3D): self._pts.append(pt) def num_pts(self): return len(self._pts) def segment(self, ndx: numbers.Integral ) -> Optional[Segment3D]: """ Returns the optional segment at index ndx. :param ndx: the segment index. :type ndx: numbers.Integral :return: the optional segment :rtype: Optional[Segment] """ start_pt = self.pt(ndx) end_pt = self.pt(ndx + 1) if start_pt.is_coincident(end_pt): return None else: return Segment3D( start_pt=self.pt(ndx), end_pt=self.pt(ndx + 1) ) def __iter__(self): """ Return each element of a Line, i.e., its segments. """ return (self.segment(i) for i in range(self.num_pts()-1)) def x_list(self) -> List[numbers.Real]: return list(map(lambda pt: pt.x, self._pts)) def y_list(self) -> List[numbers.Real]: return list(map(lambda pt: pt.y, self._pts)) def x_array(self): return np.asarray([pt.x for pt in self.pts()]) def y_array(self): return np.asarray([pt.y for pt in self.pts()]) def z_array(self): return np.asarray([pt.z for pt in self.pts()]) def xy_arrays(self): return self.x_array, self.y_array def x_min(self): return np.nanmin(list(map(lambda pt: pt.x, self._pts))) def x_max(self): return np.nanmax(list(map(lambda pt: pt.x, self._pts))) def y_min(self): return np.nanmin(list(map(lambda pt: pt.y, self._pts))) def y_max(self): return np.nanmax(list(map(lambda pt: pt.y, self._pts))) def z_min(self): return np.nanmin(list(map(lambda pt: pt.z, self._pts))) def z_max(self): return np.nanmax(list(map(lambda pt: pt.z, self._pts))) def as_segments(self): """ Convert to a list of segments. :return: list of Segment objects """ pts_pairs = zip(self.pts()[:-1], self.pts()[1:]) segments = [Segment3D(pt_a, pt_b) for (pt_a, pt_b) in pts_pairs] return segments ''' def densify_2d_line(self, sample_distance) -> 'Points': """ Densify a line into a new line instance, using the provided sample distance. Returned Line instance has coincident successive points removed. :param sample_distance: numbers.Real :return: Line instance """ if sample_distance <= 0.0: raise Exception(f"Sample distance must be positive. {sample_distance} received") segments = self.as_segments() densified_line_list = [segment.densify2d_asLine(sample_distance) for segment in segments] densifyied_multiline = MultiLine(densified_line_list) densifyied_line = densifyied_multiline.to_line() densifyied_line_wo_coinc_pts = densifyied_line.remove_coincident_points() return densifyied_line_wo_coinc_pts ''' def join(self, another) -> 'Line3D': """ Joins together two lines and returns the join as a new line without point changes, with possible overlapping points and orientation mismatches between the two original lines """ return Line3D(self.pts() + another.pts()) def length(self) -> numbers.Real: length = 0.0 for ndx in range(self.num_pts() - 1): length += self.pt(ndx).distance(self.pt(ndx + 1)) return length def length_2d(self) -> numbers.Real: length = 0.0 for ndx in range(self.num_pts() - 1): length += self.pt(ndx).to2d().distance(self.pt(ndx + 1).to2d()) return length def step_delta_z(self) -> List[numbers.Real]: """ Return the difference in elevation between consecutive points: z[ndx+1] - z[ndx] :return: a list of height differences. :rtype: list of floats. """ delta_z = [0.0] for ndx in range(1, self.num_pts()): delta_z.append(self.pt(ndx).z - self.pt(ndx - 1).z) return delta_z def step_lengths_3d(self) -> List[numbers.Real]: """ Returns the point-to-point 3D distances. It is the distance between a point and its previous one. The list has the same lenght as the source point list. :return: the individual 3D segment lengths. :rtype: list of floats. Examples: """ step_length_list = [0.0] for ndx in range(1, self.num_pts()): length = self.pt(ndx).distance(self.pt(ndx - 1)) step_length_list.append(length) return step_length_list ''' def step_lengths_2d(self) -> List[numbers.Real]: """ Returns the point-to-point 2D distances. It is the distance between a point and its previous one. The list has the same length as the source point list. :return: the individual 2D segment lengths. :rtype: list of floats. Examples: """ step_length_list = [0.0] for ndx in range(1, self.num_pts()): length = self.pt(ndx).dist2DWith(self.pt(ndx - 1)) step_length_list.append(length) return step_length_list ''' def incremental_length_2d(self): lIncrementalLengths = [] length = 0.0 lIncrementalLengths.append(length) for ndx in range(self.num_pts() - 1): length += self.pts()[ndx].dist_2d(self.pts()[ndx + 1]) lIncrementalLengths.append(length) return np.asarray(lIncrementalLengths) def incremental_length_3d(self) -> List[numbers.Real]: """ Returns the accumulated 3D segment lengths. :return: accumulated 3D segment lenghts :rtype: list of floats. """ return list(itertools.accumulate(self.step_lengths_3d())) ''' def incremental_length_2d(self) -> List[numbers.Real]: """ Returns the accumulated 2D segment lengths. :return: accumulated 2D segment lenghts :rtype: list of floats. """ return list(itertools.accumulate(self.step_lengths_2d())) ''' def reversed(self) -> 'Line3D': """ Return a Line instance with reversed point list. :return: a new Line instance. :rtype: Line. """ pts = [pt.clone() for pt in self.pts()] pts.reverse() return Line3D( pts=pts ) def slopes_degr(self) -> List[Optional[numbers.Real]]: """ Calculates the slopes (in degrees) of each Line segment. The first value is the slope of the first segment. The last value, always None, is the slope of the segment starting at the last point. The number of elements is equal to the number of points in the Line. :return: list of slopes (degrees). :rtype: List[Optional[numbers.Real]]. """ lSlopes = [] segments = self.as_segments() for segment in segments: vector = segment.vector() lSlopes.append(-vector.slope_degr()) # minus because vector convention is positive downward lSlopes.append(None) # None refers to the slope of the Segment starting with the last point return lSlopes def slopes_stats(self) -> Dict: """ Returns the line directional slope statistics. :return: the statistics parameters: min, max, mean, var, std. """ return get_statistics(self.slopes_degr()) def abs_slopes_degr(self) -> List[Optional[numbers.Real]]: return [abs(val) for val in self.slopes_degr()] def dir_slopes(self) -> np.ndarray: lSlopes = [] for ndx in range(self.num_pts() - 1): segment_start_pt = self.pts()[ndx] segment_end_pt = self.pts()[ndx + 1] if np.isnan(segment_start_pt.z) or np.isnan(segment_end_pt.z): lSlopes.append(np.nan) else: vector = Segment3D(self.pts()[ndx], self.pts()[ndx + 1]).vector() lSlopes.append(-vector.slope_degr()) # minus because vector convention is positive downward lSlopes.append(np.nan) # slope value for last point is unknown return np.asarray(lSlopes) def absolute_slopes(self) -> np.ndarray: return np.asarray(list(map(abs, self.dir_slopes()))) def abs_slopes_stats(self) -> Dict: """ Returns the line absolute slopes statistics. :return: the statistics parameters: min, max, mean, var, std. :rtype: Dictionary. """ return get_statistics(self.abs_slopes_degr()) def extremes_distance_3d(self) -> numbers.Real: """ Calculate the 3D distance between start and end points. :return: the 3D distance between start and end points :rtype: numbers.Real """ return self.pt(-1).distance(self.pt(0)) ''' def extremes_distance_2d(self) -> numbers.Real: """ Calculate the 2D distance between start and end points. :return: the 2D distance between start and end points """ return self.end_pt().dist2DWith(self.start_pt()) ''' def is_closed(self, tolerance: numbers.Real = MIN_SEPARATION_THRESHOLD ) -> bool: """ Determine if the line is 3D-closed. :param tolerance: the tolerance for considering the line closed :type tolerance: numbers.Real :return: whether the line is to be considered 3D-closed :rtype: bool """ return self.pt(-1).is_coincident(self.pt(0), tolerance=tolerance) ''' def isClosed_2d(self, tolerance: numbers.Real = MIN_SEPARATION_THRESHOLD ) -> bool: """ Determine if the line is 2D-closed. :param tolerance: the tolerance for considering the line closed :return: whether the line is to be considered 2D-closed """ return self.end_pt().isCoinc2D(self.start_pt(), tolerance=tolerance) ''' def walk_backward(self) -> 'Line3D': """ Create a new line by walking the line backward from the last point up to the first and thus closing it. :return: a closed line with zero area :rtype: 'Line' """ return Line3D(self.pts() + self.reversed()[1:]) def clone(self) -> 'Line3D': """ Clone a line. :return: the cloned line :rtype: Line3D """ return Line3D(self.pts()) ''' def close_2d(self) -> 'Points': """ Return a line that is 2D-closed. :return: a 2D-closed line :rtype: Points """ line = self.clone() if not line.isClosed_2d(): line.add_pt(line.start_pt()) return line ''' def close_3d(self) -> 'Line3D': """ Return a line that is 3D-closed. :return: a 3D-closed line :rtype: Line3D """ line = self.clone() if not line.is_closed(): line.add_pt(line.start_pt()) return line def remove_coincident_points(self) -> Optional['Line3D']: """ Remove coincident successive points :return: Line instance :rtype: Optional[Line3D] """ if self.num_pts() == 0: return new_line = Line3D( pts=[self.pt(0)] ) for ndx in range(1, self.num_pts()): if not self.pt(ndx).is_coincident(new_line.pt(-1)): new_line.add_pt(self.pt(ndx)) return new_line def intersectSegment(self, segment: Segment3D ) -> Optional[PointSegmentCollection3D]: """ Calculates the possible intersection between the line and a provided segment. :param segment: the input segment :return: the optional intersections, points or segments :raise: Exception """ if self.num_pts() <= 1: return check_type(segment, "Input segment", Segment3D) intersections = [intersect_segments3d(curr_segment, segment) for curr_segment in self if curr_segment is not None] intersections = list(filter(lambda val: val is not None, intersections)) intersections = PointSegmentCollection3D(intersections) return intersections class MultiLine3D: """ MultiLine is a list of Line objects """ def __init__(self, lines_list=None): if lines_list is None: lines_list = [] self._lines = lines_list @property def lines(self): return self._lines def add(self, line): return MultiLine3D(self.lines + [line]) def clone(self): return MultiLine3D(self.lines) @property def num_parts(self): return len(self.lines) @property def num_points(self): num_points = 0 for line in self.lines: num_points += line.num_pts return num_points @property def x_min(self): return np.nanmin([line.x_min for line in self.lines]) @property def x_max(self): return np.nanmax([line.x_max for line in self.lines]) @property def y_min(self): return np.nanmin([line.y_min for line in self.lines]) @property def y_max(self): return np.nanmax([line.y_max for line in self.lines]) @property def z_min(self): return np.nanmin([line.z_min for line in self.lines]) @property def z_max(self): return np.nanmax([line.z_max for line in self.lines]) def is_continuous(self): for line_ndx in range(len(self._lines) - 1): if not self.lines[line_ndx].pts[-1].coincident(self.lines[line_ndx + 1].pts[0]) or \ not self.lines[line_ndx].pts[-1].coincident(self.lines[line_ndx + 1].pts[-1]): return False return True def is_unidirectional(self): for line_ndx in range(len(self.lines) - 1): if not self.lines[line_ndx].pts[-1].coincident(self.lines[line_ndx + 1].pts[0]): return False return True def to_line(self): return Line3D([point for line in self.lines for point in line.pts]) ''' def crs_project(self, srcCrs, destCrs): lines = [] for line in self.lines: lines.append(line.crs_project(srcCrs, destCrs)) return MultiLine4D(lines) ''' ''' def densify_2d_multiline(self, sample_distance): lDensifiedLines = [] for line in self.lines: lDensifiedLines.append(line.densify_2d_line(sample_distance)) return MultiLine4D(lDensifiedLines) ''' def remove_coincident_points(self): cleaned_lines = [] for line in self.lines: cleaned_lines.append(line.remove_coincident_points()) return MultiLine3D(cleaned_lines) def shortest_segment_or_point3d( first_segment: Segment3D, second_segment: Segment3D, tol: numbers.Real = PRACTICAL_MIN_DIST ) -> Optional[Union[Segment3D, Point3D]]: """ Calculates the optional shortest segment - or the intersection point - between two lines represented by two segments. Adapted from: http://paulbourke.net/geometry/pointlineplane/ C code from: http://paulbourke.net/geometry/pointlineplane/lineline.c [ typedef struct { double x,y,z; } XYZ; /* Calculate the line segment PaPb that is the shortest route between two lines P1P2 and P3P4. Calculate also the values of mua and mub where Pa = P1 + mua (P2 - P1) Pb = P3 + mub (P4 - P3) Return FALSE if no solution exists. */ int LineLineIntersect( XYZ p1,XYZ p2,XYZ p3,XYZ p4,XYZ *pa,XYZ *pb, double *mua, double *mub) { XYZ p13,p43,p21; double d1343,d4321,d1321,d4343,d2121; double numer,denom; p13.x = p1.x - p3.x; p13.y = p1.y - p3.y; p13.z = p1.z - p3.z; p43.x = p4.x - p3.x; p43.y = p4.y - p3.y; p43.z = p4.z - p3.z; if (ABS(p43.x) < EPS && ABS(p43.y) < EPS && ABS(p43.z) < EPS) return(FALSE); p21.x = p2.x - p1.x; p21.y = p2.y - p1.y; p21.z = p2.z - p1.z; if (ABS(p21.x) < EPS && ABS(p21.y) < EPS && ABS(p21.z) < EPS) return(FALSE); d1343 = p13.x * p43.x + p13.y * p43.y + p13.z * p43.z; d4321 = p43.x * p21.x + p43.y * p21.y + p43.z * p21.z; d1321 = p13.x * p21.x + p13.y * p21.y + p13.z * p21.z; d4343 = p43.x * p43.x + p43.y * p43.y + p43.z * p43.z; d2121 = p21.x * p21.x + p21.y * p21.y + p21.z * p21.z; denom = d2121 * d4343 - d4321 * d4321; if (ABS(denom) < EPS) return(FALSE); numer = d1343 * d4321 - d1321 * d4343; *mua = numer / denom; *mub = (d1343 + d4321 * (*mua)) / d4343; pa->x = p1.x + *mua * p21.x; pa->y = p1.y + *mua * p21.y; pa->z = p1.z + *mua * p21.z; pb->x = p3.x + *mub * p43.x; pb->y = p3.y + *mub * p43.y; pb->z = p3.z + *mub * p43.z; return(TRUE); } :param first_segment: the first segment :param second_segment: the second segment :param tol: tolerance value for collapsing a segment into the midpoint. :return: the optional shortest segment or an intersection point. """ check_type(second_segment, "Second Cartesian line", Segment3D) p1 = first_segment.start_pt p2 = first_segment.end_pt p3 = second_segment.start_pt p4 = second_segment.end_pt p13 = Point3D( x=p1.x - p3.x, y=p1.y - p3.y, z=p1.z - p3.z ) p43 = Point3D( x=p4.x - p3.x, y=p4.y - p3.y, z=p4.z - p3.z ) if p43.asVect().is_close_to_zero: return None p21 = Point3D( x=p2.x - p1.x, y=p2.y - p1.y, z=p2.z - p1.z, ) if p21.asVect().is_close_to_zero: return None d1343 = p13.x * p43.x + p13.y * p43.y + p13.z * p43.z d4321 = p43.x * p21.x + p43.y * p21.y + p43.z * p21.z d1321 = p13.x * p21.x + p13.y * p21.y + p13.z * p21.z d4343 = p43.x * p43.x + p43.y * p43.y + p43.z * p43.z d2121 = p21.x * p21.x + p21.y * p21.y + p21.z * p21.z denom = d2121 * d4343 - d4321 * d4321 if fabs(denom) < MIN_SCALAR_VALUE: return None numer = d1343 * d4321 - d1321 * d4343 mua = numer / denom mub = (d1343 + d4321 * mua) / d4343 pa = Point3D( x=p1.x + mua * p21.x, y=p1.y + mua * p21.y, z=p1.z + mua * p21.z ) pb = Point3D( x=p3.x + mub * p43.x, y=p3.y + mub * p43.y, z=p3.z + mub * p43.z ) intersection = point_or_segment3d( point1=pa, point2=pb, tol=tol ) return intersection ''' class ParamLine3D(object): """ parametric line srcPt: source Point l, m, n: ..... """ def __init__(self, srcPt, l, m, n): assert -1.0 <= l <= 1.0 assert -1.0 <= m <= 1.0 assert -1.0 <= n <= 1.0 self._srcPt = srcPt self._l = l self._m = m self._n = n def intersect_cartes_plane(self, cartes_plane): """ Return intersection point between parametric line and Cartesian plane """ # line parameters x1, y1, z1 = self._srcPt.x, self._srcPt.y, self._srcPt.z l, m, n = self._l, self._m, self._n # Cartesian plane parameters a, b, c, d = cartes_plane.a, cartes_plane.b, cartes_plane.c, cartes_plane.d try: k = (a * x1 + b * y1 + c * z1 + d) / (a * l + b * m + c * n) except ZeroDivisionError: return None return Point3D(x1 - l * k, y1 - m * k, z1 - n * k) ''' def eq_xy_pair(xy_pair_1, xy_pair_2): if xy_pair_1[0] == xy_pair_2[0] and xy_pair_1[1] == xy_pair_2[1]: return True return False ''' def remove_equal_consecutive_xypairs(xy_list): out_xy_list = [xy_list[0]] for n in range(1, len(xy_list)): if not eq_xy_pair(xy_list[n], out_xy_list[-1]): out_xy_list.append(xy_list[n]) return out_xy_list ''' class CPlane3D: """ Cartesian plane. Expressed by equation: ax + by + cz + d = 0 Note: CPlane3D is locational - its position in space is defined. This contrast with Plane, defined just by its attitude, but with undefined position """ def __init__(self, a: numbers.Real, b: numbers.Real, c: numbers.Real, d: numbers.Real ): if not isinstance(a, numbers.Real): raise Exception("Input value a must be float or int but is {}".format(type(a))) if not isinstance(b, numbers.Real): raise Exception("Input value b must be float or int but is {}".format(type(b))) if not isinstance(c, numbers.Real): raise Exception("Input value c must be float or int but is {}".format(type(c))) if not isinstance(d, numbers.Real): raise Exception("Input value d must be float or int but is {}".format(type(d))) norm = sqrt(a*a + b*b + c*c) self._a = float(a) / norm self._b = float(b) / norm self._c = float(c) / norm self._d = float(d) / norm def a(self) -> numbers.Real: """ Return a coefficient of a CPlane3D instance. Example: >>> CPlane3D(1, 0, 0, 2).a() 1.0 """ return self._a def b(self) -> numbers.Real: """ Return b coefficient of a CPlane3D instance. Example: >>> CPlane3D(1, 4, 0, 2).b() 0.9701425001453319 """ return self._b def c(self) -> numbers.Real: """ Return a coefficient of a CPlane3D instance. Example: >>> CPlane3D(1, 0, 5.4, 2).c() 0.9832820049844602 """ return self._c def d(self) -> numbers.Real: """ Return a coefficient of a CPlane3D instance. Example: >>> CPlane3D(1, 0, 0, 2).d() 2.0 """ return self._d def v(self) -> Tuple[numbers.Real, numbers.Real, numbers.Real, numbers.Real]: """ Return coefficients of a CPlane3D instance. Example: >>> CPlane3D(1, 1, 7, -4).v() (0.14002800840280097, 0.14002800840280097, 0.9801960588196068, -0.5601120336112039) """ return self.a(), self.b(), self.c(), self.d() @classmethod def fromPoints(cls, pt1, pt2, pt3) -> 'CPlane3D': """ Create a CPlane3D from three given Point instances. Example: >>> CPlane3D.fromPoints(Point3D(0, 0, 0), Point3D(1, 0, 0), Point3D(0, 1, 0)) CPlane3D(0.0000, 0.0000, 1.0000, 0.0000) >>> CPlane3D.fromPoints(Point3D(0, 0, 0), Point3D(1, 0, 0), Point3D(0, 1, 0)) CPlane3D(0.0000, 0.0000, 1.0000, 0.0000) >>> CPlane3D.fromPoints(Point3D(0, 0, 0), Point3D(0, 1, 0), Point3D(0, 0, 1)) CPlane3D(1.0000, 0.0000, 0.0000, 0.0000) >>> CPlane3D.fromPoints(Point3D(1,2,3), Point3D(2,3,4), Point3D(-1,7,-2)) CPlane3D(-0.7956, 0.2387, 0.5569, -1.3524) """ if not (isinstance(pt1, Point3D)): raise Exception("First input point should be Point but is {}".format(type(pt1))) if not (isinstance(pt2, Point3D)): raise Exception("Second input point should be Point but is {}".format(type(pt2))) if not (isinstance(pt3, Point3D)): raise Exception("Third input point should be Point but is {}".format(type(pt3))) matr_a = np.array( [[pt1.y, pt1.z, 1], [pt2.y, pt2.z, 1], [pt3.y, pt3.z, 1]]) matr_b = - np.array( [[pt1.x, pt1.z, 1], [pt2.x, pt2.z, 1], [pt3.x, pt3.z, 1]]) matr_c = np.array( [[pt1.x, pt1.y, 1], [pt2.x, pt2.y, 1], [pt3.x, pt3.y, 1]]) matr_d = - np.array( [[pt1.x, pt1.y, pt1.z], [pt2.x, pt2.y, pt2.z], [pt3.x, pt3.y, pt3.z]]) return cls( np.linalg.det(matr_a), np.linalg.det(matr_b), np.linalg.det(matr_c), np.linalg.det(matr_d) ) @classmethod def from_geological_plane(cls, geol_plane: Plane, pt: Point3D): """ Given a Plane instance and a provided Point instance, calculate the corresponding Plane instance. Example: >>> CPlane3D.from_geological_plane(Plane(0, 0), Point3D(0, 0, 0)) CPlane3D(0.0000, 0.0000, 1.0000, -0.0000) >>> CPlane3D.from_geological_plane(Plane(90, 45), Point3D(0, 0, 0)) CPlane3D(0.7071, 0.0000, 0.7071, -0.0000) >>> CPlane3D.from_geological_plane(Plane(0, 90), Point3D(0, 0, 0)) CPlane3D(0.0000, 1.0000, -0.0000, -0.0000) """ normal_versor = geol_plane.normDirectFrwrd().as_versor() a, b, c = normal_versor.x, normal_versor.y, normal_versor.z d = - (a * pt.x + b * pt.y + c * pt.z) return CPlane3D(a, b, c, d) def __repr__(self): return "CPlane3D({:.4f}, {:.4f}, {:.4f}, {:.4f})".format(*self.v()) def normVersor(self) -> Optional[Vect3D]: """ Return the versor normal to the cartesian plane. Examples: >>> CPlane3D(0, 0, 5, -2).normVersor() Vect3D(0.0000, 0.0000, 1.0000) >>> CPlane3D(0, 7, 0, 5).normVersor() Vect3D(0.0000, 1.0000, 0.0000) """ return Vect3D(self.a(), self.b(), self.c()).versor() def toPoint(self) -> Point3D: """ Returns a point lying in the plane (non-unique solution). Examples: >>> CPlane3D(0, 0, 1, -1).toPoint() Point3D(0.0000, 0.0000, 1.0000) """ point = Point3D( *pointSolution( np.array([[self.a(), self.b(), self.c()]]), np.array([-self.d()])) ) return point """ def gplane_point(self): ''' Converts a cartesian plane into a geological plane and a point lying in the plane (non-unique solution). Examples: >>> gpl, pt = CPlane3D(0, 0, 1, -1).gplane_point() >>> gpl GPlane(000.00, +00.00) >>> pt Point(0.0000, 0.0000, 1.0000, nan) ''' geol_plane = self.normVersor().gvect.normal_gplane point = Point4D(*point_solution(np.array([[self.a, self.b, self.c]]), np.array([-self.d]))) return geol_plane, point """ def intersVersor(self, another) -> Optional[Vect3D]: """ Return intersection versor for two intersecting planes. Return None for not intersecting planes. :param another: another Cartesian plane. :type another: CPlane3D. :return: the intersection line as a vector. :rtype: Optional[Vect]. :raise: Exception. Examples: >>> a = CPlane3D(1, 0, 0, 0) >>> b = CPlane3D(0, 0, 1, 0) >>> a.intersVersor(b) Vect3D(0.0000, -1.0000, 0.0000) >>> b = CPlane3D(-1, 0, 0, 0) # parallel plane, no intersection >>> a.intersVersor(b) is None True """ check_type(another, "Input Cartesian plane", CPlane3D) return self.normVersor().cross_product(another.normVersor()).versor() def intersPoint(self, another) -> Optional[Point3D]: """ Return point on intersection line (non-unique solution) for two planes. :param another: the second cartesian plane :type another: CPlane3D :return: the optional instersection point :rtype: Optional[Point] :raise: Exception Examples: >>> p_a = CPlane3D(1, 0, 0, 0) >>> p_b = CPlane3D(0, 0, 1, 0) >>> p_a.intersPoint(p_b) Point3D(0.0000, 0.0000, 0.0000) >>> p_b = CPlane3D(-1, 0, 0, 0) # parallel plane, no intersection >>> p_a.intersPoint(p_b) is None """ check_type(another, "Second plane", CPlane3D) # find a point lying on the intersection line (this is a non-unique solution) a = np.array([[self.a(), self.b(), self.c()], [another.a(), another.b(), another.c()]]) b = np.array([-self.d(), -another.d()]) x, y, z = pointSolution(a, b) if x is not None and y is not None and z is not None: return Point3D(x, y, z) else: return None def pointDistance(self, pt: Point3D ) -> numbers.Real: """ Calculate the distance between a point and the cartesian plane. Distance expression: distance = a * x1 + b * y1 + c * z1 + d where a, b, c and d are plane parameters of the plane equation: a * x + b * y + c * z + d = 0 and x1, y1, and z1 are the point coordinates. :param pt: the point to calculate distance with. :type pt: Point. :return: the distance value. :rtype: numbers.Real. :raise: Exception. Examples: >>> cpl = CPlane3D(0, 0, 1, 0) >>> pt = Point3D(0, 0, 1) >>> cpl.pointDistance(pt) 1.0 >>> pt = Point3D(0, 0, 0.5) >>> cpl.pointDistance(pt) 0.5 >>> pt = Point3D(0, 0, -0.5) >>> cpl.pointDistance(pt) -0.5 >>> pt = Point3D(10, 20, 0.0) >>> cpl.pointDistance(pt) 0.0 """ check_type(pt, "Input point", Point3D) return self.a() * pt.x + self.b() * pt.y + self.c() * pt.z + self.d() def isPointInPlane(self, pt: Union[Point3D, Point2D] ) -> bool: """ Check whether a point lies in the current plane. :param pt: the point to check. :return: whether the point lies in the current plane. :raise: Exception. Examples: >>> pl = CPlane3D(0, 0, 1, 0) >>> pt = Point3D(0, 1, 0) >>> pl.isPointInPlane(pt) True >>> pl = CPlane3D(0, 0, 1, 0) >>> pt = Point3D(0, 1, 0) >>> pl.isPointInPlane(pt) True """ check_type(pt, "Input point", (Point2D, Point3D)) if isinstance(pt, Point2D): pt = Point3D( pt.x, pt.y, 0.0 ) if abs(self.pointDistance(pt)) < MIN_SEPARATION_THRESHOLD: return True else: return False def angle_as_degrees(self, another: 'CPlane3D' ) -> numbers.Real: """ Calculate angle (in degrees) between two planes. :param another: the CPlane3D instance to calculate angle with. :type another: CPlane3D. :return: the angle (in degrees) between the two planes. :rtype: numbers.Real. :raise: Exception. Examples: >>> CPlane3D(1,0,0,0).angle_as_degrees(CPlane3D(0,1,0,0)) 90.0 >>> CPlane3D(1,0,0,0).angle_as_degrees(CPlane3D(0,1,0,0)) 90.0 >>> CPlane3D(1,0,0,0).angle_as_degrees(CPlane3D(1,0,1,0)) 45.0 >>> CPlane3D(1,0,0,0).angle_as_degrees(CPlane3D(1,0,0,0)) 0.0 """ check_type(another, "Second Cartesian plane", CPlane3D) angle_degr = self.normVersor().angle_as_degrees(another.normVersor()) if angle_degr > 90.0: angle_degr = 180.0 - angle_degr return angle_degr class ParamLine3D(object): """ parametric line srcPt: source Point l, m, n: line coefficients """ def __init__(self, srcPt, l, m, n): for v in (l, m, n): if not (-1.0 <= v <= 1.0): raise Exception("Parametric line values must be in -1 to 1 range") self._srcPt = srcPt.clone() self._l = l self._m = m self._n = n ''' def epsg(self) -> numbers.Integral: """ Return the EPSG code of the parametric line. """ return self._srcPt.epsg_code ''' def intersect_cartes_plane(self, cartes_plane) -> Optional[Point3D]: """ Return intersection point between parametric line and Cartesian plane. :param cartes_plane: a Cartesian plane: :type cartes_plane: CPlane3D. :return: the intersection point between parametric line and Cartesian plane. :rtype: Point. :raise: Exception. """ if not isinstance(cartes_plane, CPlane3D): raise Exception("Method argument should be a Cartesian plane but is {}".format(type(cartes_plane))) ''' if cartes_plane.epsg_code != self.epsg_code: raise Exception("Parametric line has EPSG {} while Cartesian plane has {}".format(self.epsg_code, cartes_plane.epsg_code)) ''' # line parameters x1, y1, z1 = self._srcPt.x, self._srcPt.y, self._srcPt.z l, m, n = self._l, self._m, self._n # Cartesian plane parameters a, b, c, d = cartes_plane.a(), cartes_plane.b(), cartes_plane.c(), cartes_plane.d() try: k = (a * x1 + b * y1 + c * z1 + d) / (a * l + b * m + c * n) except ZeroDivisionError: return None return Point3D( x=x1 - l * k, y=y1 - m * k, z=z1 - n * k ) def closure_plane_from_geo( plane: Plane, src_pt: Point3D ) -> Callable: """ Closure that embodies the analytical formula for a given, non-vertical plane. This closure is used to calculate the z value from given horizontal coordinates (x, y). :param plane: the geological plane :param src_pt: the 3D point expressing a location point contained by the plane. :return: lambda (closure) expressing an analytical formula for deriving z given x and y values. """ x0 = src_pt.x y0 = src_pt.y z0 = src_pt.z # slope of the line parallel to the x axis and contained by the plane a = plane.slope_x_dir() # slope of the line parallel to the y axis and contained by the plane b = plane.slope_y_dir() return lambda x, y: a * (x - x0) + b * (y - y0) + z0 class Points3D: """ Collection of points. """ def __init__(self, x_array: array, y_array: array, z_array: array ): """ Construct a point list from a set of array values. :param x_array: the array storing the x values :param y_array: the array storing the y values :param z_array: the optional array storing the z values """ check_type( var=x_array, name="X array", expected_types=array ) check_type( var=y_array, name="Y array", expected_types=array ) array_length = len(x_array) if len(y_array) != array_length: raise Exception(f"Y array has length {len(y_array)} while X array has length {len(x_array)}") check_type( var=z_array, name="Z array", expected_types=array ) if len(z_array) != array_length: raise Exception(f"Z array has length {len(z_array)} while X array has length {len(x_array)}") self._x_array = x_array self._y_array = y_array self._z_array = z_array def num_pts(self ) -> int: """ Numbers of points. """ return len(self._x_array) @classmethod def fromPoints(cls, points: List[Point3D] ): """ :param points: list of points """ for ndx, point in enumerate(points): check_type(point, "Input point {}".format(ndx), Point3D) return Points3D( x_array=array('d', [p.x for p in points]), y_array=array('d', [p.y for p in points]), z_array=array('d', [p.z for p in points]) ) @property def xs(self ) -> array: """ Returns a copy of the points x values. :return: points x values """ return copy(self._x_array) @property def ys(self ) -> array: """ Returns a copy of the points y values. :return: points y values """ return copy(self._y_array) @property def zs(self ) -> array: """ Returns a copy of the points z values. :return: points z values """ return copy(self._z_array) def pt(self, pt_ndx: numbers.Integral) -> Point3D: """ Extract the point at index pt_ndx. :param pt_ndx: point index. :type pt_ndx: numbers.Integral. :return: the extracted Point instance. :rtype: Point. Examples: """ return Point3D( x=self._x_array[pt_ndx], y=self._y_array[pt_ndx], z=self._z_array[pt_ndx] ) def values_at(self, ndx: numbers.Integral ) -> Tuple[float, float, float]: """ Return the values at given index. :param ndx: the index of the point values to extract :type ndx: numbers.Integral :return: the x, y and z values """ return ( self._x_array[ndx], self._y_array[ndx], self._z_array[ndx] ) def pts(self): return [Point3D(*self.values_at(ndx)) for ndx in range(self.num_pts())] def __repr__(self) -> str: """ Represents a Points instance as a shortened text. :return: a textual shortened representation of a Points instance. """ num_points = self.num_pts() if num_points == 0: txt = "Empty Points3D" else: x1, y1, z1 = self.values_at(0) if num_points == 1: txt = "Points3D with unique point: {.4f}.{.4f},{.4f}".format(x1, y1, z1) else: x2, y2, z2 = self.values_at(self.num_pts()-1) txt = "Points3D with {} points: ({:.4f}, {:.4f}, {:.4f}) ... ({:.4f}, {:.4f}, {:.4f})".format( num_points, x1, y1, z1, x2, y2, z2) return txt def __iter__(self): """ Return each point. """ return (self.pt(ndx) for ndx in range(self.num_pts())) def asXyzArray(self): """ Convert to a Numpy x-y-z array """ return np.vstack( ( self.xs, self.ys, self.zs ) ).transpose() def add_pt(self, pt) -> None: """ In-place transformation of the original Points3D instance by adding a new point at the end. :param pt: the point to add :return: nothing """ self._x_array.append(pt.x) self._y_array.append(pt.y) self._z_array.append(pt.z) def add_pts(self, pts: 'Points3D' ): """ In-place transformation of the original Points instance by adding a new set of points at the end. :param pts: list of Points. """ check_type(pts, "Points", Points3D) self._x_array.extend(pts.xs) self._y_array.extend(pts.ys) self._z_array.extend(pts.zs) def x_min(self) -> Optional[numbers.Real]: """ Optional minimum of x values. :return: the optional minimum of x values. :rtype: Optional[numbers.Real] """ return np.nanmin(self._x_array) if self.num_pts() > 0 else None def x_max(self) -> Optional[numbers.Real]: """ Optional maximum x value. """ return np.nanmax(self._x_array) if self.num_pts() > 0 else None def x_mean(self) -> Optional[numbers.Real]: """ Optional mean x value. """ return np.nanmean(self._x_array) if self.num_pts() > 0 else None def y_min(self) -> Optional[numbers.Real]: """ Optional minimum y value. """ return np.nanmin(self._y_array) if self.num_pts() > 0 else None def y_max(self) -> Optional[numbers.Real]: """ Optional maximum y value. """ return np.nanmax(self._y_array) if self.num_pts() > 0 else None def y_mean(self) -> Optional[numbers.Real]: """ Optional mean y value. """ return np.nanmean(self._y_array) if self.num_pts() > 0 else None def z_min(self) -> Optional[numbers.Real]: """ Optional minimum z value. """ return np.nanmin(self._z_array) if self.num_pts() > 0 else None def z_max(self) -> Optional[numbers.Real]: """ Optional maximum z value. """ return np.nanmax(self._z_array) if self.num_pts() > 0 else None def z_mean(self) -> Optional[numbers.Real]: """ Optional mean z value. """ return np.nanmean(self._z_array) if self.num_pts() > 0 else None def z_var(self) -> Optional[numbers.Real]: """ Optional variance of z values. :return: the optional variance of z values. :rtype: Optional[numbers.Real] Examples: >>> l = Points3D.fromPoints([Point3D(0, 0, 2), Point3D(1, 0, 2), Point3D(0, 1, 2)]) >>> l.z_var() 0.0 """ return np.nanvar(self._z_array) if self.num_pts() > 0 else None def z_std(self) -> Optional[numbers.Real]: """ Optional standard deviation of z values. :return: the optional standard deviation of z values. :rtype: Optional[numbers.Real] Examples: >>> l = Points3D.fromPoints([Point3D(0, 0, 2), Point3D(1, 0, 2), Point3D(0, 1, 2)]) >>> l.z_std() 0.0 """ return np.nanstd(self._z_array) if self.num_pts() > 0 else None def nanmean_point(self) -> Point3D: """ Returns the nan- excluded mean point of the collection. It is the mean point for a collection of point in a x-y-z frame (i.e., not lat-lon). :return: the nan- excluded mean point of the collection. """ return Point3D( x=np.nanmean(self._x_array), y=np.nanmean(self._y_array), z=np.nanmean(self._z_array) ) def segment(self, ndx: int ) -> Optional[Segment3D]: """ Returns the optional segment starting at index ndx. :param ndx: the segment index. :return: the optional segment """ if ndx < 0 or ndx >= self.num_pts() - 1: return None return Segment3D( start_pt=self.pt(ndx), end_pt=self.pt(ndx + 1) ) def reversed(self) -> 'Points3D': """ Return a Points3D instance with reversed point list. :return: a new Points3D instance. """ xs = self._x_array.reversed() ys = self._y_array.reversed() zs = self._z_array.reversed() return Points3D( x_array=xs, y_array=ys, z_array=zs )<|fim▁end|>
<|file_name|>boxed_queries_require_selectable_expression_for_order.rs<|end_file_name|><|fim▁begin|>extern crate diesel; use diesel::pg::Pg; use diesel::*; table! { users { id -> Integer, name -> VarChar, } } table! { posts { id -> Integer, title -> VarChar, }<|fim▁hole|> fn main() { users::table.into_boxed::<Pg>().order(posts::title.desc()); }<|fim▁end|>
}
<|file_name|>cross-crate-name-hiding-2.rs<|end_file_name|><|fim▁begin|>// Check that an identifier from a 2.0 macro in another crate cannot be // resolved with an identifier that's not from a macro expansion. // aux-build:use_by_macro.rs extern crate use_by_macro;<|fim▁hole|> use use_by_macro::*; my_struct!(define); fn main() { let x = MyStruct {}; //~^ ERROR cannot find struct, variant or union type `MyStruct` in this scope }<|fim▁end|>
<|file_name|>AbstractParallelSlave.java<|end_file_name|><|fim▁begin|>/** * Copyright (c) 2014, * Charles Prud'homme (TASC, INRIA Rennes, LINA CNRS UMR 6241), * Jean-Guillaume Fages (COSLING S.A.S.). * All rights reserved. * * Redistribution and use in source and binary forms, with or without * modification, are permitted provided that the following conditions are met: * * Redistributions of source code must retain the above copyright * notice, this list of conditions and the following disclaimer. * * Redistributions in binary form must reproduce the above copyright * notice, this list of conditions and the following disclaimer in the * documentation and/or other materials provided with the distribution. * * Neither the name of the <organization> nor the * names of its contributors may be used to endorse or promote products * derived from this software without specific prior written permission. * * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED * WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE * DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS * SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. */ package org.chocosolver.solver.thread; /** * Slave born to be mastered and work in parallel * * @author Jean-Guillaume Fages */ public abstract class AbstractParallelSlave<P extends AbstractParallelMaster> { //*********************************************************************************** // VARIABLES //*********************************************************************************** public P master; public final int id; //*********************************************************************************** // CONSTRUCTORS //*********************************************************************************** /** * Create a slave born to be mastered and work in parallel * * @param master master solver * @param id slave unique name */ public AbstractParallelSlave(P master, int id) { this.master = master; this.id = id; } //*********************************************************************************** // SUB-PROBLEM SOLVING //*********************************************************************************** /** * Creates a new thread to work in parallel */ public void workInParallel() { Thread t = new Thread() { @Override<|fim▁hole|> work(); master.wishGranted(); } }; t.start(); } /** * do something */ public abstract void work(); }<|fim▁end|>
public void run() {
<|file_name|>urls.py<|end_file_name|><|fim▁begin|><|fim▁hole|># Copyright (c) 2010 by Yaco Sistemas <[email protected]> # # This program is free software: you can redistribute it and/or modify # it under the terms of the GNU Lesser General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU Lesser General Public License for more details. # # You should have received a copy of the GNU Lesser General Public License # along with this programe. If not, see <http://www.gnu.org/licenses/>. from django.conf.urls.defaults import patterns, url urlpatterns = patterns('autoreports.views', url(r'^ajax/fields/tree/$', 'reports_ajax_fields', name='reports_ajax_fields'), url(r'^ajax/fields/options/$', 'reports_ajax_fields_options', name='reports_ajax_fields_options'), url(r'^(category/(?P<category_key>[\w-]+)/)?$', 'reports_list', name='reports_list'), url(r'^(?P<registry_key>[\w-]+)/$', 'reports_api', name='reports_api'), url(r'^(?P<registry_key>[\w-]+)/(?P<report_id>\d+)/$', 'reports_api', name='reports_api'), url(r'^(?P<registry_key>[\w-]+)/reports/$', 'reports_api_list', name='reports_api_list'), url(r'^(?P<registry_key>[\w-]+)/wizard/$', 'reports_api_wizard', name='reports_api_wizard'), url(r'^(?P<registry_key>[\w-]+)/wizard/(?P<report_id>\d+)/$', 'reports_api_wizard', name='reports_api_wizard'), url(r'^(?P<app_name>[\w-]+)/(?P<model_name>[\w-]+)/$', 'reports_view', name='reports_view'), )<|fim▁end|>
<|file_name|>km.d.ts<|end_file_name|><|fim▁begin|>import { CustomLocale } from "../types/locale";<|fim▁hole|> th?: CustomLocale | undefined; tr?: CustomLocale | undefined; ar?: CustomLocale | undefined; at?: CustomLocale | undefined; az?: CustomLocale | undefined; be?: CustomLocale | undefined; bg?: CustomLocale | undefined; bn?: CustomLocale | undefined; bs?: CustomLocale | undefined; cat?: CustomLocale | undefined; cs?: CustomLocale | undefined; cy?: CustomLocale | undefined; da?: CustomLocale | undefined; de?: CustomLocale | undefined; en?: CustomLocale | undefined; eo?: CustomLocale | undefined; es?: CustomLocale | undefined; et?: CustomLocale | undefined; fa?: CustomLocale | undefined; fi?: CustomLocale | undefined; fo?: CustomLocale | undefined; fr?: CustomLocale | undefined; gr?: CustomLocale | undefined; he?: CustomLocale | undefined; hi?: CustomLocale | undefined; hu?: CustomLocale | undefined; id?: CustomLocale | undefined; is?: CustomLocale | undefined; it?: CustomLocale | undefined; ja?: CustomLocale | undefined; ka?: CustomLocale | undefined; ko?: CustomLocale | undefined; km?: CustomLocale | undefined; kz?: CustomLocale | undefined; lt?: CustomLocale | undefined; lv?: CustomLocale | undefined; mk?: CustomLocale | undefined; mn?: CustomLocale | undefined; ms?: CustomLocale | undefined; my?: CustomLocale | undefined; nl?: CustomLocale | undefined; no?: CustomLocale | undefined; pa?: CustomLocale | undefined; pl?: CustomLocale | undefined; pt?: CustomLocale | undefined; ro?: CustomLocale | undefined; ru?: CustomLocale | undefined; si?: CustomLocale | undefined; sk?: CustomLocale | undefined; sl?: CustomLocale | undefined; sq?: CustomLocale | undefined; sr?: CustomLocale | undefined; sv?: CustomLocale | undefined; uk?: CustomLocale | undefined; vn?: CustomLocale | undefined; zh?: CustomLocale | undefined; zh_tw?: CustomLocale | undefined; } & { default: import("../types/locale").Locale; }; export default _default;<|fim▁end|>
export declare const Khmer: CustomLocale; declare const _default: { default?: CustomLocale | undefined; hr?: CustomLocale | undefined;
<|file_name|>test_classes.py<|end_file_name|><|fim▁begin|>"""Test inter-conversion of different polynomial classes. This tests the convert and cast methods of all the polynomial classes. """ from __future__ import division, absolute_import, print_function import operator as op from numbers import Number import pytest import numpy as np from numpy.polynomial import ( Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE) from numpy.testing import ( assert_almost_equal, assert_raises, assert_equal, assert_, ) from numpy.compat import long # # fixtures # classes = ( Polynomial, Legendre, Chebyshev, Laguerre, Hermite, HermiteE ) classids = tuple(cls.__name__ for cls in classes) @pytest.fixture(params=classes, ids=classids) def Poly(request): return request.param # # helper functions # random = np.random.random def assert_poly_almost_equal(p1, p2, msg=""): try: assert_(np.all(p1.domain == p2.domain)) assert_(np.all(p1.window == p2.window)) assert_almost_equal(p1.coef, p2.coef) except AssertionError: msg = "Result: %s\nTarget: %s", (p1, p2) raise AssertionError(msg) # # Test conversion methods that depend on combinations of two classes. # Poly1 = Poly Poly2 = Poly def test_conversion(Poly1, Poly2): x = np.linspace(0, 1, 10) coef = random((3,)) d1 = Poly1.domain + random((2,))*.25 w1 = Poly1.window + random((2,))*.25 p1 = Poly1(coef, domain=d1, window=w1) d2 = Poly2.domain + random((2,))*.25 w2 = Poly2.window + random((2,))*.25 p2 = p1.convert(kind=Poly2, domain=d2, window=w2) assert_almost_equal(p2.domain, d2) assert_almost_equal(p2.window, w2) assert_almost_equal(p2(x), p1(x)) def test_cast(Poly1, Poly2): x = np.linspace(0, 1, 10) coef = random((3,)) d1 = Poly1.domain + random((2,))*.25 w1 = Poly1.window + random((2,))*.25 p1 = Poly1(coef, domain=d1, window=w1) d2 = Poly2.domain + random((2,))*.25 w2 = Poly2.window + random((2,))*.25 p2 = Poly2.cast(p1, domain=d2, window=w2) assert_almost_equal(p2.domain, d2) assert_almost_equal(p2.window, w2) assert_almost_equal(p2(x), p1(x)) # # test methods that depend on one class # def test_identity(Poly): d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 x = np.linspace(d[0], d[1], 11) p = Poly.identity(domain=d, window=w) assert_equal(p.domain, d) assert_equal(p.window, w) assert_almost_equal(p(x), x) def test_basis(Poly): d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 p = Poly.basis(5, domain=d, window=w) assert_equal(p.domain, d) assert_equal(p.window, w) assert_equal(p.coef, [0]*5 + [1]) def test_fromroots(Poly): # check that requested roots are zeros of a polynomial # of correct degree, domain, and window. d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 r = random((5,)) p1 = Poly.fromroots(r, domain=d, window=w) assert_equal(p1.degree(), len(r)) assert_equal(p1.domain, d) assert_equal(p1.window, w) assert_almost_equal(p1(r), 0) # check that polynomial is monic pdom = Polynomial.domain pwin = Polynomial.window p2 = Polynomial.cast(p1, domain=pdom, window=pwin) assert_almost_equal(p2.coef[-1], 1) def test_fit(Poly): def f(x): return x*(x - 1)*(x - 2) x = np.linspace(0, 3) y = f(x) # check default value of domain and window p = Poly.fit(x, y, 3) assert_almost_equal(p.domain, [0, 3]) assert_almost_equal(p(x), y) assert_equal(p.degree(), 3) # check with given domains and window d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 p = Poly.fit(x, y, 3, domain=d, window=w) assert_almost_equal(p(x), y) assert_almost_equal(p.domain, d) assert_almost_equal(p.window, w) p = Poly.fit(x, y, [0, 1, 2, 3], domain=d, window=w) assert_almost_equal(p(x), y) assert_almost_equal(p.domain, d) assert_almost_equal(p.window, w) # check with class domain default p = Poly.fit(x, y, 3, []) assert_equal(p.domain, Poly.domain) assert_equal(p.window, Poly.window) p = Poly.fit(x, y, [0, 1, 2, 3], []) assert_equal(p.domain, Poly.domain) assert_equal(p.window, Poly.window) # check that fit accepts weights. w = np.zeros_like(x) z = y + random(y.shape)*.25 w[::2] = 1 p1 = Poly.fit(x[::2], z[::2], 3) p2 = Poly.fit(x, z, 3, w=w) p3 = Poly.fit(x, z, [0, 1, 2, 3], w=w) assert_almost_equal(p1(x), p2(x)) assert_almost_equal(p2(x), p3(x)) def test_equal(Poly): p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) assert_(p1 == p1) assert_(not p1 == p2) assert_(not p1 == p3) assert_(not p1 == p4) def test_not_equal(Poly): p1 = Poly([1, 2, 3], domain=[0, 1], window=[2, 3]) p2 = Poly([1, 1, 1], domain=[0, 1], window=[2, 3]) p3 = Poly([1, 2, 3], domain=[1, 2], window=[2, 3]) p4 = Poly([1, 2, 3], domain=[0, 1], window=[1, 2]) assert_(not p1 != p1) assert_(p1 != p2) assert_(p1 != p3) assert_(p1 != p4) def test_add(Poly): # This checks commutation, not numerical correctness c1 = list(random((4,)) + .5) c2 = list(random((3,)) + .5) p1 = Poly(c1) p2 = Poly(c2) p3 = p1 + p2 assert_poly_almost_equal(p2 + p1, p3) assert_poly_almost_equal(p1 + c2, p3) assert_poly_almost_equal(c2 + p1, p3) assert_poly_almost_equal(p1 + tuple(c2), p3) assert_poly_almost_equal(tuple(c2) + p1, p3) assert_poly_almost_equal(p1 + np.array(c2), p3) assert_poly_almost_equal(np.array(c2) + p1, p3) assert_raises(TypeError, op.add, p1, Poly([0], domain=Poly.domain + 1)) assert_raises(TypeError, op.add, p1, Poly([0], window=Poly.window + 1)) if Poly is Polynomial: assert_raises(TypeError, op.add, p1, Chebyshev([0])) else: assert_raises(TypeError, op.add, p1, Polynomial([0])) def test_sub(Poly): # This checks commutation, not numerical correctness c1 = list(random((4,)) + .5) c2 = list(random((3,)) + .5) p1 = Poly(c1) p2 = Poly(c2) p3 = p1 - p2 assert_poly_almost_equal(p2 - p1, -p3) assert_poly_almost_equal(p1 - c2, p3) assert_poly_almost_equal(c2 - p1, -p3) assert_poly_almost_equal(p1 - tuple(c2), p3) assert_poly_almost_equal(tuple(c2) - p1, -p3) assert_poly_almost_equal(p1 - np.array(c2), p3) assert_poly_almost_equal(np.array(c2) - p1, -p3) assert_raises(TypeError, op.sub, p1, Poly([0], domain=Poly.domain + 1)) assert_raises(TypeError, op.sub, p1, Poly([0], window=Poly.window + 1)) if Poly is Polynomial: assert_raises(TypeError, op.sub, p1, Chebyshev([0])) else: assert_raises(TypeError, op.sub, p1, Polynomial([0])) def test_mul(Poly): c1 = list(random((4,)) + .5) c2 = list(random((3,)) + .5) p1 = Poly(c1) p2 = Poly(c2) p3 = p1 * p2 assert_poly_almost_equal(p2 * p1, p3) assert_poly_almost_equal(p1 * c2, p3) assert_poly_almost_equal(c2 * p1, p3) assert_poly_almost_equal(p1 * tuple(c2), p3) assert_poly_almost_equal(tuple(c2) * p1, p3) assert_poly_almost_equal(p1 * np.array(c2), p3) assert_poly_almost_equal(np.array(c2) * p1, p3) assert_poly_almost_equal(p1 * 2, p1 * Poly([2])) assert_poly_almost_equal(2 * p1, p1 * Poly([2])) assert_raises(TypeError, op.mul, p1, Poly([0], domain=Poly.domain + 1)) assert_raises(TypeError, op.mul, p1, Poly([0], window=Poly.window + 1)) if Poly is Polynomial: assert_raises(TypeError, op.mul, p1, Chebyshev([0])) else: assert_raises(TypeError, op.mul, p1, Polynomial([0])) def test_floordiv(Poly): c1 = list(random((4,)) + .5) c2 = list(random((3,)) + .5) c3 = list(random((2,)) + .5) p1 = Poly(c1) p2 = Poly(c2) p3 = Poly(c3) p4 = p1 * p2 + p3 c4 = list(p4.coef) assert_poly_almost_equal(p4 // p2, p1) assert_poly_almost_equal(p4 // c2, p1) assert_poly_almost_equal(c4 // p2, p1) assert_poly_almost_equal(p4 // tuple(c2), p1) assert_poly_almost_equal(tuple(c4) // p2, p1) assert_poly_almost_equal(p4 // np.array(c2), p1) assert_poly_almost_equal(np.array(c4) // p2, p1) assert_poly_almost_equal(2 // p2, Poly([0])) assert_poly_almost_equal(p2 // 2, 0.5*p2) assert_raises( TypeError, op.floordiv, p1, Poly([0], domain=Poly.domain + 1)) assert_raises( TypeError, op.floordiv, p1, Poly([0], window=Poly.window + 1)) if Poly is Polynomial: assert_raises(TypeError, op.floordiv, p1, Chebyshev([0])) else: assert_raises(TypeError, op.floordiv, p1, Polynomial([0])) def test_truediv(Poly): # true division is valid only if the denominator is a Number and # not a python bool. p1 = Poly([1,2,3]) p2 = p1 * 5 for stype in np.ScalarType: if not issubclass(stype, Number) or issubclass(stype, bool): continue s = stype(5) assert_poly_almost_equal(op.truediv(p2, s), p1) assert_raises(TypeError, op.truediv, s, p2) for stype in (int, long, float): s = stype(5) assert_poly_almost_equal(op.truediv(p2, s), p1) assert_raises(TypeError, op.truediv, s, p2) for stype in [complex]: s = stype(5, 0) assert_poly_almost_equal(op.truediv(p2, s), p1) assert_raises(TypeError, op.truediv, s, p2) for s in [tuple(), list(), dict(), bool(), np.array([1])]: assert_raises(TypeError, op.truediv, p2, s) assert_raises(TypeError, op.truediv, s, p2) for ptype in classes: assert_raises(TypeError, op.truediv, p2, ptype(1)) def test_mod(Poly): # This checks commutation, not numerical correctness c1 = list(random((4,)) + .5) c2 = list(random((3,)) + .5) c3 = list(random((2,)) + .5) p1 = Poly(c1) p2 = Poly(c2) p3 = Poly(c3) p4 = p1 * p2 + p3 c4 = list(p4.coef) assert_poly_almost_equal(p4 % p2, p3) assert_poly_almost_equal(p4 % c2, p3) assert_poly_almost_equal(c4 % p2, p3) assert_poly_almost_equal(p4 % tuple(c2), p3) assert_poly_almost_equal(tuple(c4) % p2, p3) assert_poly_almost_equal(p4 % np.array(c2), p3) assert_poly_almost_equal(np.array(c4) % p2, p3) assert_poly_almost_equal(2 % p2, Poly([2])) assert_poly_almost_equal(p2 % 2, Poly([0])) assert_raises(TypeError, op.mod, p1, Poly([0], domain=Poly.domain + 1)) assert_raises(TypeError, op.mod, p1, Poly([0], window=Poly.window + 1)) if Poly is Polynomial: assert_raises(TypeError, op.mod, p1, Chebyshev([0])) else: assert_raises(TypeError, op.mod, p1, Polynomial([0])) def test_divmod(Poly): # This checks commutation, not numerical correctness c1 = list(random((4,)) + .5) c2 = list(random((3,)) + .5) c3 = list(random((2,)) + .5) p1 = Poly(c1) p2 = Poly(c2) p3 = Poly(c3) p4 = p1 * p2 + p3 c4 = list(p4.coef) quo, rem = divmod(p4, p2) assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(p4, c2) assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(c4, p2) assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(p4, tuple(c2)) assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(tuple(c4), p2) assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(p4, np.array(c2)) assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(np.array(c4), p2) assert_poly_almost_equal(quo, p1) assert_poly_almost_equal(rem, p3) quo, rem = divmod(p2, 2) assert_poly_almost_equal(quo, 0.5*p2) assert_poly_almost_equal(rem, Poly([0])) quo, rem = divmod(2, p2) assert_poly_almost_equal(quo, Poly([0])) assert_poly_almost_equal(rem, Poly([2])) assert_raises(TypeError, divmod, p1, Poly([0], domain=Poly.domain + 1)) assert_raises(TypeError, divmod, p1, Poly([0], window=Poly.window + 1)) if Poly is Polynomial: assert_raises(TypeError, divmod, p1, Chebyshev([0])) else: assert_raises(TypeError, divmod, p1, Polynomial([0])) def test_roots(Poly): d = Poly.domain * 1.25 + .25 w = Poly.window tgt = np.linspace(d[0], d[1], 5) res = np.sort(Poly.fromroots(tgt, domain=d, window=w).roots()) assert_almost_equal(res, tgt) # default domain and window res = np.sort(Poly.fromroots(tgt).roots()) assert_almost_equal(res, tgt) <|fim▁hole|> def test_copy(Poly): p1 = Poly.basis(5) p2 = p1.copy() assert_(p1 == p2) assert_(p1 is not p2) assert_(p1.coef is not p2.coef) assert_(p1.domain is not p2.domain) assert_(p1.window is not p2.window) def test_integ(Poly): P = Polynomial # Check defaults p0 = Poly.cast(P([1*2, 2*3, 3*4])) p1 = P.cast(p0.integ()) p2 = P.cast(p0.integ(2)) assert_poly_almost_equal(p1, P([0, 2, 3, 4])) assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) # Check with k p0 = Poly.cast(P([1*2, 2*3, 3*4])) p1 = P.cast(p0.integ(k=1)) p2 = P.cast(p0.integ(2, k=[1, 1])) assert_poly_almost_equal(p1, P([1, 2, 3, 4])) assert_poly_almost_equal(p2, P([1, 1, 1, 1, 1])) # Check with lbnd p0 = Poly.cast(P([1*2, 2*3, 3*4])) p1 = P.cast(p0.integ(lbnd=1)) p2 = P.cast(p0.integ(2, lbnd=1)) assert_poly_almost_equal(p1, P([-9, 2, 3, 4])) assert_poly_almost_equal(p2, P([6, -9, 1, 1, 1])) # Check scaling d = 2*Poly.domain p0 = Poly.cast(P([1*2, 2*3, 3*4]), domain=d) p1 = P.cast(p0.integ()) p2 = P.cast(p0.integ(2)) assert_poly_almost_equal(p1, P([0, 2, 3, 4])) assert_poly_almost_equal(p2, P([0, 0, 1, 1, 1])) def test_deriv(Poly): # Check that the derivative is the inverse of integration. It is # assumes that the integration has been checked elsewhere. d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 p1 = Poly([1, 2, 3], domain=d, window=w) p2 = p1.integ(2, k=[1, 2]) p3 = p1.integ(1, k=[1]) assert_almost_equal(p2.deriv(1).coef, p3.coef) assert_almost_equal(p2.deriv(2).coef, p1.coef) # default domain and window p1 = Poly([1, 2, 3]) p2 = p1.integ(2, k=[1, 2]) p3 = p1.integ(1, k=[1]) assert_almost_equal(p2.deriv(1).coef, p3.coef) assert_almost_equal(p2.deriv(2).coef, p1.coef) def test_linspace(Poly): d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 p = Poly([1, 2, 3], domain=d, window=w) # check default domain xtgt = np.linspace(d[0], d[1], 20) ytgt = p(xtgt) xres, yres = p.linspace(20) assert_almost_equal(xres, xtgt) assert_almost_equal(yres, ytgt) # check specified domain xtgt = np.linspace(0, 2, 20) ytgt = p(xtgt) xres, yres = p.linspace(20, domain=[0, 2]) assert_almost_equal(xres, xtgt) assert_almost_equal(yres, ytgt) def test_pow(Poly): d = Poly.domain + random((2,))*.25 w = Poly.window + random((2,))*.25 tgt = Poly([1], domain=d, window=w) tst = Poly([1, 2, 3], domain=d, window=w) for i in range(5): assert_poly_almost_equal(tst**i, tgt) tgt = tgt * tst # default domain and window tgt = Poly([1]) tst = Poly([1, 2, 3]) for i in range(5): assert_poly_almost_equal(tst**i, tgt) tgt = tgt * tst # check error for invalid powers assert_raises(ValueError, op.pow, tgt, 1.5) assert_raises(ValueError, op.pow, tgt, -1) def test_call(Poly): P = Polynomial d = Poly.domain x = np.linspace(d[0], d[1], 11) # Check defaults p = Poly.cast(P([1, 2, 3])) tgt = 1 + x*(2 + 3*x) res = p(x) assert_almost_equal(res, tgt) def test_cutdeg(Poly): p = Poly([1, 2, 3]) assert_raises(ValueError, p.cutdeg, .5) assert_raises(ValueError, p.cutdeg, -1) assert_equal(len(p.cutdeg(3)), 3) assert_equal(len(p.cutdeg(2)), 3) assert_equal(len(p.cutdeg(1)), 2) assert_equal(len(p.cutdeg(0)), 1) def test_truncate(Poly): p = Poly([1, 2, 3]) assert_raises(ValueError, p.truncate, .5) assert_raises(ValueError, p.truncate, 0) assert_equal(len(p.truncate(4)), 3) assert_equal(len(p.truncate(3)), 3) assert_equal(len(p.truncate(2)), 2) assert_equal(len(p.truncate(1)), 1) def test_trim(Poly): c = [1, 1e-6, 1e-12, 0] p = Poly(c) assert_equal(p.trim().coef, c[:3]) assert_equal(p.trim(1e-10).coef, c[:2]) assert_equal(p.trim(1e-5).coef, c[:1]) def test_mapparms(Poly): # check with defaults. Should be identity. d = Poly.domain w = Poly.window p = Poly([1], domain=d, window=w) assert_almost_equal([0, 1], p.mapparms()) # w = 2*d + 1 p = Poly([1], domain=d, window=w) assert_almost_equal([1, 2], p.mapparms()) def test_ufunc_override(Poly): p = Poly([1, 2, 3]) x = np.ones(3) assert_raises(TypeError, np.add, p, x) assert_raises(TypeError, np.add, x, p) class TestLatexRepr(object): """Test the latex repr used by ipython """ def as_latex(self, obj): # right now we ignore the formatting of scalars in our tests, since # it makes them too verbose. Ideally, the formatting of scalars will # be fixed such that tests below continue to pass obj._repr_latex_scalar = lambda x: str(x) try: return obj._repr_latex_() finally: del obj._repr_latex_scalar def test_simple_polynomial(self): # default input p = Polynomial([1, 2, 3]) assert_equal(self.as_latex(p), r'$x \mapsto 1.0 + 2.0\,x + 3.0\,x^{2}$') # translated input p = Polynomial([1, 2, 3], domain=[-2, 0]) assert_equal(self.as_latex(p), r'$x \mapsto 1.0 + 2.0\,\left(1.0 + x\right) + 3.0\,\left(1.0 + x\right)^{2}$') # scaled input p = Polynomial([1, 2, 3], domain=[-0.5, 0.5]) assert_equal(self.as_latex(p), r'$x \mapsto 1.0 + 2.0\,\left(2.0x\right) + 3.0\,\left(2.0x\right)^{2}$') # affine input p = Polynomial([1, 2, 3], domain=[-1, 0]) assert_equal(self.as_latex(p), r'$x \mapsto 1.0 + 2.0\,\left(1.0 + 2.0x\right) + 3.0\,\left(1.0 + 2.0x\right)^{2}$') def test_basis_func(self): p = Chebyshev([1, 2, 3]) assert_equal(self.as_latex(p), r'$x \mapsto 1.0\,{T}_{0}(x) + 2.0\,{T}_{1}(x) + 3.0\,{T}_{2}(x)$') # affine input - check no surplus parens are added p = Chebyshev([1, 2, 3], domain=[-1, 0]) assert_equal(self.as_latex(p), r'$x \mapsto 1.0\,{T}_{0}(1.0 + 2.0x) + 2.0\,{T}_{1}(1.0 + 2.0x) + 3.0\,{T}_{2}(1.0 + 2.0x)$') def test_multichar_basis_func(self): p = HermiteE([1, 2, 3]) assert_equal(self.as_latex(p), r'$x \mapsto 1.0\,{He}_{0}(x) + 2.0\,{He}_{1}(x) + 3.0\,{He}_{2}(x)$') # # Test class method that only exists for some classes # class TestInterpolate(object): def f(self, x): return x * (x - 1) * (x - 2) def test_raises(self): assert_raises(ValueError, Chebyshev.interpolate, self.f, -1) assert_raises(TypeError, Chebyshev.interpolate, self.f, 10.) def test_dimensions(self): for deg in range(1, 5): assert_(Chebyshev.interpolate(self.f, deg).degree() == deg) def test_approximation(self): def powx(x, p): return x**p x = np.linspace(0, 2, 10) for deg in range(0, 10): for t in range(0, deg + 1): p = Chebyshev.interpolate(powx, deg, domain=[0, 2], args=(t,)) assert_almost_equal(p(x), powx(x, t), decimal=12)<|fim▁end|>
def test_degree(Poly): p = Poly.basis(5) assert_equal(p.degree(), 5)
<|file_name|>Badge.d.ts<|end_file_name|><|fim▁begin|>/** * The `Badge` component represents a user/discussion badge, indicating some * status (e.g. a discussion is stickied, a user is an admin). * * A badge may have the following special attrs: * * - `type` The type of badge this is. This will be used to give the badge a * class name of `Badge--{type}`. * - `icon` The name of an icon to show inside the badge. * - `label`<|fim▁hole|> * All other attrs will be assigned as attributes on the badge element. */ export default class Badge extends Component<import("../Component").ComponentAttrs, undefined> { constructor(); } import Component from "../Component";<|fim▁end|>
*
<|file_name|>hintrc.js<|end_file_name|><|fim▁begin|>{ // environment "browser": true, "node": true, "globals": { "L": true, "define": true, "map":true, "jQuery":true // "drawnItems":true }, "strict": false, // code style "bitwise": true, "camelcase": true, "curly": true, "eqeqeq": true,<|fim▁hole|> "immed": true, "latedef": true, "newcap": true, "noarg": true, "noempty": true, "nonew": true, "undef": true, "unused": true, "quotmark": "single", // whitespace "indent": 4, "trailing": true, "white": true, "smarttabs": true, "maxlen": 150 // code simplicity - not enforced but nice to check from time to time // "maxstatements": 20, // "maxcomplexity": 5 // "maxparams": 4, // "maxdepth": 4 }<|fim▁end|>
"forin": false,
<|file_name|>__init__.py<|end_file_name|><|fim▁begin|>__author__ = 'shahbaz' # ############################################################################### # Utility functions # # ############################################################################### import sys from functools import wraps from logging import StreamHandler from bitstring import BitArray def singleton(f): """ :param f: :return: """ return f() def cached(f): """ :param f: :return: """ @wraps(f) def wrapper(*args): """ :param args: :return: """ try: return wrapper.cache[args] except KeyError: wrapper.cache[args] = v = f(*args) return v wrapper.cache = {} return wrapper class frozendict(object): __slots__ = ["_dict", "_cached_hash"] def __init__(self, new_dict=None, **kwargs): """ :param new_dict: :param kwargs: :return: """ self._dict = dict() if new_dict is not None: self._dict.update(new_dict) self._dict.update(kwargs) def update(self, new_dict=None, **kwargs): """ :param new_dict: :param kwargs: :return: """ d = self._dict.copy() if new_dict is not None: d.update(new_dict) d.update(kwargs) return self.__class__(d) def remove(self, ks): """ :param ks: :return: """ d = self._dict.copy() for k in ks: if k in d: del d[k] return self.__class__(d) def pop(self, *ks): """ :param ks: :return: """ result = [] for k in ks: result.append(self[k]) result.append(self.remove(*ks)) return result def __repr__(self): """ :return: """ return repr(self._dict) def __iter__(self): """ :return: """ return iter(self._dict) def __contains__(self, key): """ :param key: :return: """ return key in self._dict def keys(self): """ :return: """ return self._dict.keys() def values(self): """ :return: """ return self._dict.values() def items(self): """ :return: """ return self._dict.items() def iterkeys(self): """ :return: """ return self._dict.iterkeys() def itervalues(self): """ :return: """ return self._dict.itervalues() def iteritems(self): """ :return: """ return self._dict.iteritems() def get(self, key, default=None): """ :param key: :param default: :return: """ return self._dict.get(key, default) def __getitem__(self, item): """ :param item: :return: """ return self._dict[item] def __hash__(self): """ :return: """ try: return self._cached_hash except AttributeError: h = self._cached_hash = hash(frozenset(self._dict.items())) return h def __eq__(self, other): """ <|fim▁hole|> :return: """ return self._dict == other._dict def __ne__(self, other): """ :param other: :return: """ return self._dict != other._dict def __len__(self): """ :return: """ return len(self._dict) def indent_str(s, indent=4): """ :param s: :param indent: :return: """ return "\n".join(indent * " " + i for i in s.splitlines()) def repr_plus(ss, indent=4, sep="\n", prefix=""): """ :param ss: :param indent: :param sep: :param prefix: :return: """ if isinstance(ss, basestring): ss = [ss] return indent_str(sep.join(prefix + repr(s) for s in ss), indent) class LockStreamHandler(StreamHandler): '''Relies on a multiprocessing.Lock to serialize multiprocess writes to a stream.''' def __init__(self, lock, stream=sys.stderr): """ :param lock: :param stream: :return: """ self.lock = lock super(MultiprocessStreamHandler, self).__init__(stream) def emit(self, record): """ Acquire the lock before emitting the record. :param record: :return: """ self.lock.acquire() super(LockStreamHandler, self).emit(record) self.lock.release() class QueueStreamHandler(StreamHandler): """ Relies on a multiprocessing.Lock to serialize multiprocess writes to a stream. """ def __init__(self, queue, stream=sys.stderr): """ :param queue: :param stream: :return: """ self.queue = queue super(QueueStreamHandler, self).__init__(stream) def emit(self, record): """ Acquire the lock before emitting the record. :param record: :return: """ self.queue.put(record) def get_bitarray(packet, fields): """ :param packet: :param fields: :return: """ o = 0 a = BitArray() for h in fields: l = packet[h]['length'] a[o:(o + l)] = packet[h]['value'] o += l return a<|fim▁end|>
:param other:
<|file_name|>project_manager.py<|end_file_name|><|fim▁begin|># =============================================================================== # Copyright 2016 ross # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # =============================================================================== # ============= standard library imports ======================== from __future__ import absolute_import from traits.api import List, Str, Dict, Button, Int, String, Event from pychron.core.fuzzyfinder import fuzzyfinder from pychron.envisage.browser.record_views import ProjectRecordView from pychron.loggable import Loggable class ProjectManager(Loggable): oitems = List items = List filter_str = Str(enter_set=True, auto_set=False) filter_attrs = Dict( { "name": "Name", "unique_id": "UniqueID", "principal_investigator": "Principal Investigator", "lab_contact": "Lab Contact", } ) filter_attr = Str # add_button = Button # ir = Str # institution = Str # comment = String # pi = Str # lab_contact = Str # pis = List # lab_contacts = List scroll_to_row = Int project_name = String comment = String selected = List save_button = Button refresh = Event def activated(self): with self.dvc.session_ctx(use_parent_session=False): self.items = self.oitems = [ ProjectRecordView(pr) for pr in self.dvc.get_projects() ] # self._filter() # self.pis = self.dvc.get_principal_investigator_names() # self.lab_contacts = self.dvc.get_usernames() # def prepare_destroy(self): # self.dvc.close_session() # private # def _add(self): # self.dvc.add_ir(self.pi, self.lab_contact, # ir=self.ir, # comment=self.comment, # institution=self.institution) # # self.oitems = self.dvc.get_irs() # self._filter() def _project_name_changed(self, new): if self.selected: if len(self.selected) == 1: p = self.selected[0] p.name = new if new != p.db_name: p.dirty = True else: p.dirty = False self.refresh = True else: self.warning_dialog("Can only edit the name of one project at a time") def _comment_changed(self, new): if self.selected: for i in self.selected: i.comment = new if new != i.db_comment: i.comment = new i.dirty = True else: i.dirty = False self.refresh = True def _save_button_fired(self): self.debug("Apply changes") dvc = self.dvc with dvc.session_ctx(use_parent_session=False): commit = False for item in self.oitems: if item.dirty: pr = dvc.get_project_by_id(item.unique_id) pr.name = item.name pr.comment = item.comment<|fim▁hole|> item.db_comment = item.comment item.db_name = item.name item.dirty = False commit = True if commit: dvc.commit() def _filter(self): if self.filter_str: self.items = fuzzyfinder(self.filter_str, self.oitems, self.filter_attr) else: self.items = self.oitems self.scroll_to_row = len(self.items) - 1 def _filter_str_changed(self): self._filter() def _filter_attr_changed(self): self._filter() # def _add_button_fired(self): # self._add() # ============= EOF =============================================<|fim▁end|>
<|file_name|>sorting_impl.rs<|end_file_name|><|fim▁begin|>use std::cmp::Ordering; use sorting::*; impl<T: Ord + Clone> Sort for [T] { fn adv_sort_mut(&mut self, algo: SortingAlgorithmn) { match algo { SortingAlgorithmn::Bubble => bubble_sort_mut(self), SortingAlgorithmn::Quick => quick_sort_mut(self), } } } impl<T: PartialOrd, F: FnMut(&T, &T) -> Ordering> SortBy<T, F> for [T] { fn adv_sort_by_mut(&mut self, compare: &mut F, algo: SortingAlgorithmn) { match algo { SortingAlgorithmn::Bubble => bubble_sort_by_mut(compare, self), SortingAlgorithmn::Quick => quick_sort_by_mut(compare, self),<|fim▁hole|><|fim▁end|>
} } }
<|file_name|>console.ts<|end_file_name|><|fim▁begin|>const consoleFilters = [ /^The above error occurred in the <.*?> component:/, // error boundary output /^Error: Uncaught .+/ // jsdom output ] function suppressErrorOutput() { const originalError = console.error const error = (...args: Parameters<typeof originalError>) => { const message = typeof args[0] === 'string' ? args[0] : null if (!message || !consoleFilters.some((filter) => filter.test(message))) { originalError(...args) } } console.error = error return () => { console.error = originalError } } function errorFilteringDisabled() { try { return !!process.env.RHTL_DISABLE_ERROR_FILTERING<|fim▁hole|> return false } } function enableErrorOutputSuppression() { // Automatically registers console error suppression and restoration in supported testing frameworks if ( typeof beforeEach === 'function' && typeof afterEach === 'function' && !errorFilteringDisabled() ) { let restoreConsole!: () => void beforeEach(() => { restoreConsole = suppressErrorOutput() }) afterEach(() => restoreConsole()) } } export { enableErrorOutputSuppression, suppressErrorOutput }<|fim▁end|>
} catch { // falling back in the case that process.env.RHTL_DISABLE_ERROR_FILTERING cannot be accessed (e.g. browser environment)
<|file_name|>documents.test.js<|end_file_name|><|fim▁begin|>const { assign, set } = require('lodash') const { renderDocuments } = require('../documents') describe('Contacts documents controller', () => { beforeEach(() => { this.breadcrumbStub = sinon.stub().returnsThis() this.resMock = assign({}, globalRes, { breadcrumb: this.breadcrumbStub, render: sinon.spy(), }) this.reqMock = assign({}, globalReq) this.nextSpy = sinon.spy() }) describe('#renderDocuments', () => { context('when documents path is an empty string', () => { beforeEach(() => {<|fim▁hole|> }) it('should call breadcrumb', () => { expect(this.resMock.breadcrumb).to.be.calledOnce }) it('should call breadcrumb with', () => { expect(this.resMock.breadcrumb).to.be.calledWith('Documents') }) it('should call render', () => { expect(this.resMock.render).to.be.calledOnce }) it('should call render with', () => { expect(this.resMock.render).to.be.calledWith( 'contacts/views/documents', { archivedDocumentPath: '' } ) }) }) context('when documents path contains a url', () => { const mockDocumentUrl = 'mock-document-url' beforeEach(() => { set( this.resMock, 'locals.contact.archived_documents_url_path', mockDocumentUrl ) renderDocuments(this.reqMock, this.resMock, this.nextSpy) }) it('should call breadcrumb', () => { expect(this.resMock.breadcrumb).to.be.calledOnce }) it('should call breadcrumb with', () => { expect(this.resMock.breadcrumb).to.be.calledWith('Documents') }) it('should call render', () => { expect(this.resMock.render).to.be.calledOnce }) it('should call render with', () => { expect(this.resMock.render).to.be.calledWith( 'contacts/views/documents', { archivedDocumentPath: mockDocumentUrl, } ) }) }) }) })<|fim▁end|>
set(this.resMock, 'locals.contact.archived_documents_url_path', '') renderDocuments(this.reqMock, this.resMock, this.nextSpy)
<|file_name|>session.py<|end_file_name|><|fim▁begin|># Copyright (C) 2008, Red Hat, Inc. # # This program is free software; you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation; either version 2 of the License, or # (at your option) any later version. # # This program is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with this program; if not, write to the Free Software # Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA from gi.repository import Gtk import dbus import os import signal import sys import logging from sugar3 import session from sugar3 import env _session_manager = None def have_systemd(): return os.access("/run/systemd/seats", 0) >= 0 class SessionManager(session.SessionManager): MODE_LOGOUT = 0 MODE_SHUTDOWN = 1 MODE_REBOOT = 2 def __init__(self): session.SessionManager.__init__(self) self._logout_mode = None def logout(self): self._logout_mode = self.MODE_LOGOUT self.initiate_shutdown() def shutdown(self): self._logout_mode = self.MODE_SHUTDOWN self.initiate_shutdown() def reboot(self): self._logout_mode = self.MODE_REBOOT self.initiate_shutdown() def shutdown_completed(self): if env.is_emulator(): self._close_emulator() elif self._logout_mode != self.MODE_LOGOUT: bus = dbus.SystemBus() if have_systemd(): try: proxy = bus.get_object('org.freedesktop.login1', '/org/freedesktop/login1') pm = dbus.Interface(proxy, 'org.freedesktop.login1.Manager') if self._logout_mode == self.MODE_SHUTDOWN: pm.PowerOff(False)<|fim▁hole|> logging.exception('Can not stop sugar') self.session.cancel_shutdown() return else: CONSOLEKIT_DBUS_PATH = '/org/freedesktop/ConsoleKit/Manager' try: proxy = bus.get_object('org.freedesktop.ConsoleKit', CONSOLEKIT_DBUS_PATH) pm = dbus.Interface(proxy, 'org.freedesktop.ConsoleKit.Manager') if self._logout_mode == self.MODE_SHUTDOWN: pm.Stop() elif self._logout_mode == self.MODE_REBOOT: pm.Restart() except: logging.exception('Can not stop sugar') self.session.cancel_shutdown() return session.SessionManager.shutdown_completed(self) Gtk.main_quit() def _close_emulator(self): Gtk.main_quit() if 'SUGAR_EMULATOR_PID' in os.environ: pid = int(os.environ['SUGAR_EMULATOR_PID']) os.kill(pid, signal.SIGTERM) # Need to call this ASAP so the atexit handlers get called before we # get killed by the X (dis)connection sys.exit() def get_session_manager(): global _session_manager if _session_manager == None: _session_manager = SessionManager() return _session_manager<|fim▁end|>
elif self._logout_mode == self.MODE_REBOOT: pm.Reboot(True) except:
<|file_name|>osm.py<|end_file_name|><|fim▁begin|>"""Handles downloading and importing OSM Data""" import os import subprocess import tempfile import requests from celery.utils.log import get_task_logger from django.conf import settings from django.db import connection from datasources.models import OSMData, OSMDataProblem from datasources.tasks.shapefile import ErrorFactory # Note: The download is done using the overpass API # (see:http://wiki.openstreetmap.org/wiki/Overpass_API) because # we may be downloading large files and these endpoints are optimized # for downloads/reads unlike the main openstreetmap API endpoint OSM_API_URL = 'http://www.overpass-api.de/api/xapi?way[bbox=%s,%s,%s,%s][highway=*]' # set up shared task logger logger = get_task_logger(__name__) def run_osm_import(osmdata_id): """Download and run import step for OSM data Downloads and stores raw OSM data within a bounding box defined by imported GTFS data. Uses the SRID defined on the gtfs_stops table to determine correct UTM projection to import data as. Uses Raw SQL to - get extent from GTFS data since we do not have models that keeps track of GTFS Data - get UTM projection to import OSM data as correct projection """ logger.debug('Starting OSM import') osm_data = OSMData.objects.get(pk=osmdata_id) osm_data.status = OSMData.Statuses.PROCESSING error_factory = ErrorFactory(OSMDataProblem, osm_data, 'osmdata') def handle_error(title, description): """Helper method to handle shapefile errors.""" error_factory.error(title, description) osm_data.status = OSMData.Statuses.ERROR osm_data.save() return with connection.cursor() as c: try: # Get the bounding box for gtfs data # split components to make it easier to parse the sql response bbox_query = """ SELECT MIN(ST_Xmin(the_geom)), MIN(ST_Ymin(the_geom)), MAX(ST_Xmax(the_geom)), MAX(ST_Ymax(the_geom)) FROM gtfs_stops;""" logger.debug('Making query for bounding box from gtfs stops') c.execute(bbox_query) bbox = c.fetchone() except Exception as e: err_msg = 'Error obtaining bounding box from gtfs_stops table' handle_error(err_msg, e.message) try: logger.debug('Making query for UTM projection srid from gtfs_stops table (geom field)') utm_projection_query = "SELECT FIND_SRID('', 'gtfs_stops', 'geom');" c.execute(utm_projection_query)<|fim▁hole|> except Exception as e: err_msg = 'Error obtaining SRID from gtfs_stops table' logger.exception(err_msg) handle_error(err_msg, e.message) _, temp_filename = tempfile.mkstemp() logger.debug('Generated tempfile %s to download osm data into', temp_filename) osm_data.source_file = temp_filename osm_data.status = OSMData.Statuses.DOWNLOADING osm_data.save() try: response = requests.get(OSM_API_URL % bbox, stream=True) logger.debug('Downloading OSM data from overpass/OSM api') # Download OSM data with open(temp_filename, 'wb') as fh: for chunk in response.iter_content(chunk_size=1024): if chunk: fh.write(chunk) fh.flush() logger.debug('Finished downloading OSM data') osm_data.status = OSMData.Statuses.IMPORTING osm_data.save() except Exception as e: err_msg = 'Error downloading data' logger.exception('Error downloading data') handle_error(err_msg, e.message) # Get Database settings db_host = settings.DATABASES['default']['HOST'] db_password = settings.DATABASES['default']['PASSWORD'] db_user = settings.DATABASES['default']['USER'] db_name = settings.DATABASES['default']['NAME'] env = os.environ.copy() env['PGPASSWORD'] = db_password # Insert OSM Data into Database with osm2pgsql command osm2pgsql_command = ['osm2pgsql', '-U', db_user, '-H', db_host, '-d', db_name, '-s', # use slim mode to cache to DB rather than in-memory '-E', str(utm_projection), temp_filename] try: logger.debug('Running OSM import command %s', ' '.join(osm2pgsql_command)) subprocess.check_call(osm2pgsql_command, env=env) osm_data.status = OSMData.Statuses.COMPLETE except subprocess.CalledProcessError as e: osm_data.status = OSMData.Statuses.ERROR err_msg = 'Error running osm2pgsql command' logger.exception('Error running osm2pgsql command') error_factory.error(err_msg, e.message) finally: osm_data.save() os.remove(temp_filename)<|fim▁end|>
utm_projection = c.fetchone()[0]
<|file_name|>bioc_collection.py<|end_file_name|><|fim▁begin|>__all__ = ['BioCCollection'] from meta import _MetaInfons, _MetaIter from compat import _Py2Next class BioCCollection(_Py2Next, _MetaInfons, _MetaIter): def __init__(self, collection=None): self.infons = dict()<|fim▁hole|> self.source = '' self.date = '' self.key = '' self.documents = list() if collection is not None: self.infons = collection.infons self.source = collection.source self.date = collection.date self.key = collection.key self.documents = collection.documents def __str__(self): s = 'source: ' + self.source + '\n' s += 'date: ' + self.date + '\n' s += 'key: ' + self.key + '\n' s += str(self.infons) + '\n' s += str(self.documents) + '\n' return s def _iterdata(self): return self.documents def clear_documents(self): self.documents = list() def get_document(self, doc_idx): return self.documents[doc_idx] def add_document(self, document): self.documents.append(document) def remove_document(self, document): if type(document) is int: self.dcouments.remove(self.documents[document]) else: self.documents.remove(document) # TBC<|fim▁end|>
<|file_name|>serf.go<|end_file_name|><|fim▁begin|>package lib import ( "github.com/hashicorp/serf/serf" ) // SerfDefaultConfig returns a Consul-flavored Serf default configuration,<|fim▁hole|>func SerfDefaultConfig() *serf.Config { base := serf.DefaultConfig() // This effectively disables the annoying queue depth warnings. base.QueueDepthWarning = 1000000 // This enables dynamic sizing of the message queue depth based on the // cluster size. base.MinQueueDepth = 4096 return base }<|fim▁end|>
// suitable as a basis for a LAN, WAN, segment, or area.
<|file_name|>static_images.py<|end_file_name|><|fim▁begin|>import json import requests import key API_key = key.getAPIkey() #load all champion pictures def load_champion_pictures(champion_json): print len(champion_json['data']) version = champion_json['version'] print "version: " + version for champion in champion_json['data']: print champion r = requests.get('http://ddragon.leagueoflegends.com/cdn/' + version + '/img/champion/' + champion + '.png') if r.status_code == 200: img = r.content with open('static/images/champions/' + champion_json['data'][champion]['name'] + '.png', 'w') as f: f.write(img) print "img created" else: print "pictures: something went wrong" #load champion json #converts to python dict using json() and json.dump() for error checking def load_champion_json(): try: r = requests.get('https://global.api.pvp.net/api/lol/static-data/na/v1.2/champion?&api_key=' + API_key) champion_json = r.json() if 'status' in champion_json: print champion_json['status']['message'] return load_champion_pictures(champion_json) # quick fix to change MonkeyKing to Wukong so that sort_keys sorts it properly champion_json['data']['Wukong'] = champion_json['data']['MonkeyKing']<|fim▁hole|> print e.message return with open('static/json/champion.json', 'w') as f: json.dump(champion_json, f, sort_keys=True) load_champion_json()<|fim▁end|>
del champion_json['data']['MonkeyKing'] except ValueError as e:
<|file_name|>issue-13853.rs<|end_file_name|><|fim▁begin|>// Copyright 2014 The Rust Project Developers. See the COPYRIGHT // file at the top-level directory of this distribution and at // http://rust-lang.org/COPYRIGHT. // // Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or // http://www.apache.org/licenses/LICENSE-2.0> or the MIT license // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your // option. This file may not be copied, modified, or distributed // except according to those terms.<|fim▁hole|> fn zomg(); } trait Graph<N: Node> { fn nodes<'a, I: Iterator<Item=&'a N>>(&'a self) -> I; } impl<N: Node> Graph<N> for Vec<N> { fn nodes<'a, I: Iterator<Item=&'a N>>(&self) -> I { self.iter() //~ ERROR mismatched types } } struct Stuff; impl Node for Stuff { fn zomg() { println!("zomg"); } } fn iterate<N: Node, G: Graph<N>>(graph: &G) { for node in graph.iter() { //~ ERROR does not implement any method in scope named node.zomg(); //~ error: the type of this value must be known in this context } } pub fn main() { let graph = Vec::new(); graph.push(Stuff); iterate(graph); //~ ERROR mismatched types }<|fim▁end|>
use std::marker::MarkerTrait; trait Node : MarkerTrait {
<|file_name|>feature-detection.ts<|end_file_name|><|fim▁begin|>export interface DetectedFeatures { draggable:boolean; dragEvents:boolean; userAgentSupportingNativeDnD:boolean; } export function detectFeatures():DetectedFeatures { let features:DetectedFeatures = { dragEvents: ("ondragstart" in document.documentElement), draggable: ("draggable" in document.documentElement), userAgentSupportingNativeDnD: undefined }; const isBlinkEngine = !!((<any>window).chrome) || /chrome/i.test( navigator.userAgent ); features.userAgentSupportingNativeDnD = !( // if is mobile safari or android browser -> no native dnd (/iPad|iPhone|iPod|Android/.test( navigator.userAgent )) || // OR //if is blink(chrome/opera) with touch events enabled -> no native dnd (isBlinkEngine && ("ontouchstart" in document.documentElement)) ); return features; } export function supportsPassiveEventListener():boolean { let supportsPassiveEventListeners = false; // reference https://github.com/WICG/EventListenerOptions/blob/gh-pages/explainer.md try { let opts = Object.defineProperty( {}, "passive", { get: function() { supportsPassiveEventListeners = true; } } ); window.addEventListener( "test", null, opts );<|fim▁hole|> return supportsPassiveEventListeners; }<|fim▁end|>
} // tslint:disable-next-line:no-empty catch( e ) { }
<|file_name|>DbaccMain.cpp<|end_file_name|><|fim▁begin|>/* Copyright (c) 2003, 2016, Oracle and/or its affiliates. All rights reserved. This program is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License as published by the Free Software Foundation; version 2 of the License. This program is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with this program; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA */ #define DBACC_C #include "Dbacc.hpp" #include <AttributeHeader.hpp> #include <Bitmask.hpp> #include <signaldata/AccFrag.hpp> #include <signaldata/AccScan.hpp> #include <signaldata/NextScan.hpp> #include <signaldata/AccLock.hpp> #include <signaldata/EventReport.hpp> #include <signaldata/FsConf.hpp> #include <signaldata/FsRef.hpp> #include <signaldata/FsRemoveReq.hpp> #include <signaldata/DropTab.hpp> #include <signaldata/DumpStateOrd.hpp> #include <signaldata/TuxMaint.hpp> #include <signaldata/DbinfoScan.hpp> #include <signaldata/TransIdAI.hpp> #include <KeyDescriptor.hpp> #include <signaldata/NodeStateSignalData.hpp> #include <md5_hash.hpp> #ifdef VM_TRACE #define DEBUG(x) ndbout << "DBACC: "<< x << endl; #else #define DEBUG(x) #endif #ifdef ACC_SAFE_QUEUE #define vlqrequire(x) do { if (unlikely(!(x))) {\ dump_lock_queue(loPtr); \ ndbrequire(false); } } while(0) #else #define vlqrequire(x) ndbrequire(x) #define dump_lock_queue(x) #endif // primary key is stored in TUP #include "../dbtup/Dbtup.hpp" #include "../dblqh/Dblqh.hpp" #define JAM_FILE_ID 345 // Index pages used by ACC instances, used by CMVMI to report index memory usage extern Uint32 g_acc_pages_used[MAX_NDBMT_LQH_WORKERS]; // Signal entries and statement blocks /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* */ /* COMMON SIGNAL RECEPTION MODULE */ /* */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* ******************--------------------------------------------------------------- */ /* CONTINUEB CONTINUE SIGNAL */ /* ******************------------------------------+ */ /* SENDER: ACC, LEVEL B */ void Dbacc::execCONTINUEB(Signal* signal) { Uint32 tcase; jamEntry(); tcase = signal->theData[0]; tdata0 = signal->theData[1]; tresult = 0; switch (tcase) { case ZINITIALISE_RECORDS: jam(); initialiseRecordsLab(signal, signal->theData[3], signal->theData[4]); return; break; case ZREL_ROOT_FRAG: { jam(); Uint32 tableId = signal->theData[1]; releaseRootFragResources(signal, tableId); break; } case ZREL_FRAG: { jam(); Uint32 fragIndex = signal->theData[1]; releaseFragResources(signal, fragIndex); break; } case ZREL_DIR: { jam(); releaseDirResources(signal); break; } default: ndbrequire(false); break; }//switch return; }//Dbacc::execCONTINUEB() /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* */ /* END OF COMMON SIGNAL RECEPTION MODULE */ /* */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* */ /* SYSTEM RESTART MODULE */ /* */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ void Dbacc::execNDB_STTOR(Signal* signal) { Uint32 tstartphase; Uint32 tStartType; jamEntry(); cndbcntrRef = signal->theData[0]; cmynodeid = signal->theData[1]; tstartphase = signal->theData[2]; tStartType = signal->theData[3]; switch (tstartphase) { case ZSPH1: jam(); ndbsttorryLab(signal); return; break; case ZSPH2: ndbsttorryLab(signal); return; break; case ZSPH3: break; case ZSPH6: jam(); break; default: jam(); /*empty*/; break; }//switch ndbsttorryLab(signal); return; }//Dbacc::execNDB_STTOR() /* ******************--------------------------------------------------------------- */ /* STTOR START / RESTART */ /* ******************------------------------------+ */ /* SENDER: ANY, LEVEL B */ void Dbacc::execSTTOR(Signal* signal) { jamEntry(); Uint32 tstartphase = signal->theData[1]; switch (tstartphase) { case 1: jam(); ndbrequire((c_tup = (Dbtup*)globalData.getBlock(DBTUP, instance())) != 0); ndbrequire((c_lqh = (Dblqh*)globalData.getBlock(DBLQH, instance())) != 0); break; } tuserblockref = signal->theData[3]; csignalkey = signal->theData[6]; sttorrysignalLab(signal); return; }//Dbacc::execSTTOR() /* --------------------------------------------------------------------------------- */ /* ZSPH1 */ /* --------------------------------------------------------------------------------- */ void Dbacc::ndbrestart1Lab() { cmynodeid = globalData.ownId; cownBlockref = calcInstanceBlockRef(DBACC); czero = 0; cminusOne = czero - 1; return; }//Dbacc::ndbrestart1Lab() void Dbacc::initialiseRecordsLab(Signal* signal, Uint32 ref, Uint32 data) { switch (tdata0) { case 0: jam(); initialiseTableRec(); break; case 1: case 2: break; case 3: jam(); break; case 4: jam(); break; case 5: jam(); break; case 6: jam(); initialiseFragRec(); break; case 7: jam(); break; case 8: jam(); initialiseOperationRec(); break; case 9: jam(); initialisePageRec(); break; case 10: jam(); break; case 11: jam(); initialiseScanRec(); break; case 12: jam(); { ReadConfigConf * conf = (ReadConfigConf*)signal->getDataPtrSend(); conf->senderRef = reference(); conf->senderData = data; sendSignal(ref, GSN_READ_CONFIG_CONF, signal, ReadConfigConf::SignalLength, JBB); } return; break; default: ndbrequire(false); break; }//switch signal->theData[0] = ZINITIALISE_RECORDS; signal->theData[1] = tdata0 + 1; signal->theData[2] = 0; signal->theData[3] = ref; signal->theData[4] = data; sendSignal(reference(), GSN_CONTINUEB, signal, 5, JBB); return; }//Dbacc::initialiseRecordsLab() /* *********************************<< */ /* NDB_STTORRY */ /* *********************************<< */ void Dbacc::ndbsttorryLab(Signal* signal) const { signal->theData[0] = cownBlockref; sendSignal(cndbcntrRef, GSN_NDB_STTORRY, signal, 1, JBB); return; }//Dbacc::ndbsttorryLab() /* *********************************<< */ /* SIZEALT_REP SIZE ALTERATION */ /* *********************************<< */ void Dbacc::execREAD_CONFIG_REQ(Signal* signal) { const ReadConfigReq * req = (ReadConfigReq*)signal->getDataPtr(); Uint32 ref = req->senderRef; Uint32 senderData = req->senderData; ndbrequire(req->noOfParameters == 0); jamEntry(); const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); ndbrequire(p != 0); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_FRAGMENT, &cfragmentsize)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_OP_RECS, &coprecsize)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_PAGE8, &cpagesize)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_TABLE, &ctablesize)); ndbrequire(!ndb_mgm_get_int_parameter(p, CFG_ACC_SCAN, &cscanRecSize)); initRecords(); ndbrestart1Lab(); c_memusage_report_frequency = 0; ndb_mgm_get_int_parameter(p, CFG_DB_MEMREPORT_FREQUENCY, &c_memusage_report_frequency); tdata0 = 0; initialiseRecordsLab(signal, ref, senderData); return; }//Dbacc::execSIZEALT_REP() /* *********************************<< */ /* STTORRY */ /* *********************************<< */ void Dbacc::sttorrysignalLab(Signal* signal) const { signal->theData[0] = csignalkey; signal->theData[1] = 3; /* BLOCK CATEGORY */ signal->theData[2] = 2; /* SIGNAL VERSION NUMBER */ signal->theData[3] = ZSPH1; signal->theData[4] = 255; BlockReference cntrRef = !isNdbMtLqh() ? NDBCNTR_REF : DBACC_REF; sendSignal(cntrRef, GSN_STTORRY, signal, 5, JBB); /* END OF START PHASES */ return; }//Dbacc::sttorrysignalLab() /* --------------------------------------------------------------------------------- */ /* INITIALISE_FRAG_REC */ /* INITIALATES THE FRAGMENT RECORDS. */ /* --------------------------------------------------------------------------------- */ void Dbacc::initialiseFragRec() { FragmentrecPtr regFragPtr; ndbrequire(cfragmentsize > 0); for (regFragPtr.i = 0; regFragPtr.i < cfragmentsize; regFragPtr.i++) { jam(); refresh_watch_dog(); ptrAss(regFragPtr, fragmentrec); initFragGeneral(regFragPtr); regFragPtr.p->nextfreefrag = regFragPtr.i + 1; }//for regFragPtr.i = cfragmentsize - 1; ptrAss(regFragPtr, fragmentrec); regFragPtr.p->nextfreefrag = RNIL; cfirstfreefrag = 0; }//Dbacc::initialiseFragRec() /* --------------------------------------------------------------------------------- */ /* INITIALISE_OPERATION_REC */ /* INITIALATES THE OPERATION RECORDS. */ /* --------------------------------------------------------------------------------- */ void Dbacc::initialiseOperationRec() { ndbrequire(coprecsize > 0); for (operationRecPtr.i = 0; operationRecPtr.i < coprecsize; operationRecPtr.i++) { refresh_watch_dog(); ptrAss(operationRecPtr, operationrec); operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; operationRecPtr.p->nextOp = operationRecPtr.i + 1; }//for operationRecPtr.i = coprecsize - 1; ptrAss(operationRecPtr, operationrec); operationRecPtr.p->nextOp = RNIL; cfreeopRec = 0; }//Dbacc::initialiseOperationRec() /* --------------------------------------------------------------------------------- */ /* INITIALISE_PAGE_REC */ /* INITIALATES THE PAGE RECORDS. */ /* --------------------------------------------------------------------------------- */ void Dbacc::initialisePageRec() { ndbrequire(cpagesize > 0); cnoOfAllocatedPages = 0; cnoOfAllocatedPagesMax = 0; }//Dbacc::initialisePageRec() /* --------------------------------------------------------------------------------- */ /* INITIALISE_ROOTFRAG_REC */ /* INITIALATES THE ROOTFRAG RECORDS. */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* INITIALISE_SCAN_REC */ /* INITIALATES THE QUE_SCAN RECORDS. */ /* --------------------------------------------------------------------------------- */ void Dbacc::initialiseScanRec() { ndbrequire(cscanRecSize > 0); for (scanPtr.i = 0; scanPtr.i < cscanRecSize; scanPtr.i++) { ptrAss(scanPtr, scanRec); scanPtr.p->scanNextfreerec = scanPtr.i + 1; scanPtr.p->scanState = ScanRec::SCAN_DISCONNECT; scanPtr.p->activeLocalFrag = RNIL; scanPtr.p->initContainer(); }//for scanPtr.i = cscanRecSize - 1; ptrAss(scanPtr, scanRec); scanPtr.p->scanNextfreerec = RNIL; cfirstFreeScanRec = 0; }//Dbacc::initialiseScanRec() /* --------------------------------------------------------------------------------- */ /* INITIALISE_TABLE_REC */ /* INITIALATES THE TABLE RECORDS. */ /* --------------------------------------------------------------------------------- */ void Dbacc::initialiseTableRec() { ndbrequire(ctablesize > 0); for (tabptr.i = 0; tabptr.i < ctablesize; tabptr.i++) { refresh_watch_dog(); ptrAss(tabptr, tabrec); for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabptr.p->fragholder); i++) { tabptr.p->fragholder[i] = RNIL; tabptr.p->fragptrholder[i] = RNIL; }//for }//for }//Dbacc::initialiseTableRec() void Dbacc::set_tup_fragptr(Uint32 fragptr, Uint32 tup_fragptr) { fragrecptr.i = fragptr; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); fragrecptr.p->tupFragptr = tup_fragptr; } /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* */ /* END OF SYSTEM RESTART MODULE */ /* */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* */ /* ADD/DELETE FRAGMENT MODULE */ /* */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ // JONAS This methods "aer ett saall" void Dbacc::execACCFRAGREQ(Signal* signal) { const AccFragReq * const req = (AccFragReq*)&signal->theData[0]; jamEntry(); if (ERROR_INSERTED(3001)) { jam(); addFragRefuse(signal, 1); CLEAR_ERROR_INSERT_VALUE; return; } tabptr.i = req->tableId; #ifndef VM_TRACE // config mismatch - do not crash if release compiled if (tabptr.i >= ctablesize) { jam(); addFragRefuse(signal, 640); return; } #endif ptrCheckGuard(tabptr, ctablesize, tabrec); ndbrequire((req->reqInfo & 0xF) == ZADDFRAG); ndbrequire(!getfragmentrec(fragrecptr, req->fragId)); if (cfirstfreefrag == RNIL) { jam(); addFragRefuse(signal, ZFULL_FRAGRECORD_ERROR); return; }//if ndbassert(req->localKeyLen == 1); if (req->localKeyLen != 1) { jam(); addFragRefuse(signal, ZLOCAL_KEY_LENGTH_ERROR); return; } seizeFragrec(); initFragGeneral(fragrecptr); initFragAdd(signal, fragrecptr); if (!addfragtotab(fragrecptr.i, req->fragId)) { jam(); releaseFragRecord(fragrecptr); addFragRefuse(signal, ZFULL_FRAGRECORD_ERROR); return; }//if Page8Ptr spPageptr; seizePage(spPageptr); if (tresult > ZLIMIT_OF_ERROR) { jam(); addFragRefuse(signal, tresult); return; }//if if (!setPagePtr(fragrecptr.p->directory, 0, spPageptr.i)) { jam(); addFragRefuse(signal, ZDIR_RANGE_FULL_ERROR); return; } tipPageId = 0; initPage(spPageptr); Uint32 userPtr = req->userPtr; BlockReference retRef = req->userRef; fragrecptr.p->rootState = ACTIVEROOT; AccFragConf * const conf = (AccFragConf*)&signal->theData[0]; conf->userPtr = userPtr; conf->rootFragPtr = RNIL; conf->fragId[0] = fragrecptr.p->fragmentid; conf->fragId[1] = RNIL; conf->fragPtr[0] = fragrecptr.i; conf->fragPtr[1] = RNIL; conf->rootHashCheck = fragrecptr.p->roothashcheck; sendSignal(retRef, GSN_ACCFRAGCONF, signal, AccFragConf::SignalLength, JBB); }//Dbacc::execACCFRAGREQ() void Dbacc::addFragRefuse(Signal* signal, Uint32 errorCode) const { const AccFragReq * const req = (AccFragReq*)&signal->theData[0]; AccFragRef * const ref = (AccFragRef*)&signal->theData[0]; Uint32 userPtr = req->userPtr; BlockReference retRef = req->userRef; ref->userPtr = userPtr; ref->errorCode = errorCode; sendSignal(retRef, GSN_ACCFRAGREF, signal, AccFragRef::SignalLength, JBB); return; }//Dbacc::addFragRefuseEarly() void Dbacc::execDROP_TAB_REQ(Signal* signal){ jamEntry(); DropTabReq* req = (DropTabReq*)signal->getDataPtr(); TabrecPtr tabPtr; tabPtr.i = req->tableId; ptrCheckGuard(tabPtr, ctablesize, tabrec); tabPtr.p->tabUserRef = req->senderRef; tabPtr.p->tabUserPtr = req->senderData; tabPtr.p->tabUserGsn = GSN_DROP_TAB_REQ; signal->theData[0] = ZREL_ROOT_FRAG; signal->theData[1] = tabPtr.i; sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB); } void Dbacc::execDROP_FRAG_REQ(Signal* signal){ jamEntry(); DropFragReq* req = (DropFragReq*)signal->getDataPtr(); TabrecPtr tabPtr; tabPtr.i = req->tableId; ptrCheckGuard(tabPtr, ctablesize, tabrec); tabPtr.p->tabUserRef = req->senderRef; tabPtr.p->tabUserPtr = req->senderData; tabPtr.p->tabUserGsn = GSN_DROP_FRAG_REQ; for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabPtr.p->fragholder); i++) { jam(); if (tabPtr.p->fragholder[i] == req->fragId) { jam(); tabPtr.p->fragholder[i] = RNIL; releaseFragResources(signal, tabPtr.p->fragptrholder[i]); return; }//if }//for releaseRootFragResources(signal, req->tableId); } void Dbacc::releaseRootFragResources(Signal* signal, Uint32 tableId) { TabrecPtr tabPtr; tabPtr.i = tableId; ptrCheckGuard(tabPtr, ctablesize, tabrec); if (tabPtr.p->tabUserGsn == GSN_DROP_TAB_REQ) { jam(); for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabPtr.p->fragholder); i++) { jam(); if (tabPtr.p->fragholder[i] != RNIL) { jam(); tabPtr.p->fragholder[i] = RNIL; releaseFragResources(signal, tabPtr.p->fragptrholder[i]); return; } } /** * Finished... */ DropTabConf * const dropConf = (DropTabConf *)signal->getDataPtrSend(); dropConf->senderRef = reference(); dropConf->senderData = tabPtr.p->tabUserPtr; dropConf->tableId = tabPtr.i; sendSignal(tabPtr.p->tabUserRef, GSN_DROP_TAB_CONF, signal, DropTabConf::SignalLength, JBB); } else { ndbrequire(tabPtr.p->tabUserGsn == GSN_DROP_FRAG_REQ); DropFragConf * conf = (DropFragConf *)signal->getDataPtrSend(); conf->senderRef = reference(); conf->senderData = tabPtr.p->tabUserPtr; conf->tableId = tabPtr.i; sendSignal(tabPtr.p->tabUserRef, GSN_DROP_FRAG_CONF, signal, DropFragConf::SignalLength, JBB); } tabPtr.p->tabUserPtr = RNIL; tabPtr.p->tabUserRef = 0; tabPtr.p->tabUserGsn = 0; }//Dbacc::releaseRootFragResources() void Dbacc::releaseFragResources(Signal* signal, Uint32 fragIndex) { jam(); ndbassert(g_acc_pages_used[instance()] == cnoOfAllocatedPages); FragmentrecPtr regFragPtr; regFragPtr.i = fragIndex; ptrCheckGuard(regFragPtr, cfragmentsize, fragmentrec); verifyFragCorrect(regFragPtr); if (!regFragPtr.p->directory.isEmpty()) { jam(); DynArr256::ReleaseIterator iter; DynArr256 dir(directoryPool, regFragPtr.p->directory); dir.init(iter); signal->theData[0] = ZREL_DIR; signal->theData[1] = regFragPtr.i; memcpy(&signal->theData[2], &iter, sizeof(iter)); sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2 + sizeof(iter) / 4, JBB); } else { jam(); { ndbassert(static_cast<Uint32>(regFragPtr.p->m_noOfAllocatedPages) == regFragPtr.p->sparsepages.getCount() + regFragPtr.p->fullpages.getCount()); regFragPtr.p->m_noOfAllocatedPages = 0; LocalPage8List freelist(*this, cfreepages); cnoOfAllocatedPages -= regFragPtr.p->sparsepages.getCount(); freelist.appendList(regFragPtr.p->sparsepages); cnoOfAllocatedPages -= regFragPtr.p->fullpages.getCount(); freelist.appendList(regFragPtr.p->fullpages); ndbassert(freelist.getCount() + cnoOfAllocatedPages == cpageCount); g_acc_pages_used[instance()] = cnoOfAllocatedPages; if (cnoOfAllocatedPages < m_maxAllocPages) m_oom = false; } jam(); Uint32 tab = regFragPtr.p->mytabptr; releaseFragRecord(regFragPtr); signal->theData[0] = ZREL_ROOT_FRAG; signal->theData[1] = tab; sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB); }//if ndbassert(validatePageCount()); }//Dbacc::releaseFragResources() void Dbacc::verifyFragCorrect(FragmentrecPtr regFragPtr)const { ndbrequire(regFragPtr.p->lockOwnersList == RNIL); }//Dbacc::verifyFragCorrect() void Dbacc::releaseDirResources(Signal* signal) { jam(); Uint32 fragIndex = signal->theData[1]; DynArr256::ReleaseIterator iter; memcpy(&iter, &signal->theData[2], sizeof(iter)); FragmentrecPtr regFragPtr; regFragPtr.i = fragIndex; ptrCheckGuard(regFragPtr, cfragmentsize, fragmentrec); verifyFragCorrect(regFragPtr); DynArr256::Head* directory; ndbrequire(signal->theData[0] == ZREL_DIR); directory = &regFragPtr.p->directory; DynArr256 dir(directoryPool, *directory); Uint32 ret; Uint32 pagei; /* TODO: find a good value for count * bigger value means quicker release of big index, * but longer time slice so less concurrent */ int count = 10; while (count > 0 && (ret = dir.release(iter, &pagei)) != 0) { jam(); count--; if (ret == 1 && pagei != RNIL) { jam(); Page8Ptr rpPageptr; rpPageptr.i = pagei; ptrCheckGuard(rpPageptr, cpagesize, page8); fragrecptr = regFragPtr; releasePage(rpPageptr); } } if (ret != 0) { jam(); memcpy(&signal->theData[2], &iter, sizeof(iter)); sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2 + sizeof(iter) / 4, JBB); } else { jam(); signal->theData[0] = ZREL_FRAG; sendSignal(cownBlockref, GSN_CONTINUEB, signal, 2, JBB); } }//Dbacc::releaseDirResources() void Dbacc::releaseFragRecord(FragmentrecPtr regFragPtr) { regFragPtr.p->nextfreefrag = cfirstfreefrag; cfirstfreefrag = regFragPtr.i; initFragGeneral(regFragPtr); RSS_OP_FREE(cnoOfFreeFragrec); }//Dbacc::releaseFragRecord() /* -------------------------------------------------------------------------- */ /* ADDFRAGTOTAB */ /* DESCRIPTION: PUTS A FRAGMENT ID AND A POINTER TO ITS RECORD INTO */ /* TABLE ARRRAY OF THE TABLE RECORD. */ /* -------------------------------------------------------------------------- */ bool Dbacc::addfragtotab(Uint32 rootIndex, Uint32 fid) const { for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabptr.p->fragholder); i++) { jam(); if (tabptr.p->fragholder[i] == RNIL) { jam(); tabptr.p->fragholder[i] = fid; tabptr.p->fragptrholder[i] = rootIndex; return true; }//if }//for return false; }//Dbacc::addfragtotab() /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* */ /* END OF ADD/DELETE FRAGMENT MODULE */ /* */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* */ /* CONNECTION MODULE */ /* */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* ******************--------------------------------------------------------------- */ /* ACCSEIZEREQ SEIZE REQ */ /* SENDER: LQH, LEVEL B */ /* ENTER ACCSEIZEREQ WITH */ /* TUSERPTR , CONECTION PTR OF LQH */ /* TUSERBLOCKREF BLOCK REFERENCE OF LQH */ /* ******************--------------------------------------------------------------- */ /* ******************--------------------------------------------------------------- */ /* ACCSEIZEREQ SEIZE REQ */ /* ******************------------------------------+ */ /* SENDER: LQH, LEVEL B */ void Dbacc::execACCSEIZEREQ(Signal* signal) { jamEntry(); tuserptr = signal->theData[0]; /* CONECTION PTR OF LQH */ tuserblockref = signal->theData[1]; /* BLOCK REFERENCE OF LQH */ tresult = 0; if (cfreeopRec == RNIL) { jam(); refaccConnectLab(signal); return; }//if seizeOpRec(); ptrGuard(operationRecPtr); operationRecPtr.p->userptr = tuserptr; operationRecPtr.p->userblockref = tuserblockref; operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; /* ******************************< */ /* ACCSEIZECONF */ /* ******************************< */ signal->theData[0] = tuserptr; signal->theData[1] = operationRecPtr.i; sendSignal(tuserblockref, GSN_ACCSEIZECONF, signal, 2, JBB); return; }//Dbacc::execACCSEIZEREQ() void Dbacc::refaccConnectLab(Signal* signal) { tresult = ZCONNECT_SIZE_ERROR; /* ******************************< */ /* ACCSEIZEREF */ /* ******************************< */ signal->theData[0] = tuserptr; signal->theData[1] = tresult; sendSignal(tuserblockref, GSN_ACCSEIZEREF, signal, 2, JBB); return; }//Dbacc::refaccConnectLab() /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* */ /* END OF CONNECTION MODULE */ /* */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* */ /* EXECUTE OPERATION MODULE */ /* */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* INIT_OP_REC */ /* INFORMATION WHICH IS RECIEVED BY ACCKEYREQ WILL BE SAVED */ /* IN THE OPERATION RECORD. */ /* --------------------------------------------------------------------------------- */ void Dbacc::initOpRec(const AccKeyReq* signal, Uint32 siglen) const { register Uint32 Treqinfo; Treqinfo = signal->requestInfo; operationRecPtr.p->hashValue = LHBits32(signal->hashValue); operationRecPtr.p->tupkeylen = signal->keyLen; operationRecPtr.p->xfrmtupkeylen = signal->keyLen; operationRecPtr.p->transId1 = signal->transId1; operationRecPtr.p->transId2 = signal->transId2; const bool readOp = AccKeyReq::getLockType(Treqinfo) == ZREAD; const bool dirtyOp = AccKeyReq::getDirtyOp(Treqinfo); const bool dirtyReadOp = readOp & dirtyOp; Uint32 operation = AccKeyReq::getOperation(Treqinfo); if (operation == ZREFRESH) operation = ZWRITE; /* Insert if !exist, otherwise lock */ Uint32 opbits = 0; opbits |= operation; opbits |= readOp ? 0 : (Uint32) Operationrec::OP_LOCK_MODE; opbits |= readOp ? 0 : (Uint32) Operationrec::OP_ACC_LOCK_MODE; opbits |= dirtyReadOp ? (Uint32) Operationrec::OP_DIRTY_READ : 0; if (AccKeyReq::getLockReq(Treqinfo)) { opbits |= Operationrec::OP_LOCK_REQ; // TUX LOCK_REQ /** * A lock req has SCAN_OP, it can't delete a row, * so OP_COMMIT_DELETE_CHECK is set like for SCAN * see initScanOpRec */ opbits |= Operationrec::OP_COMMIT_DELETE_CHECK; /** * TODO: Looking at it now, I think it would be more natural * to treat it as a ZREAD... */ } //operationRecPtr.p->nodeType = AccKeyReq::getReplicaType(Treqinfo); operationRecPtr.p->fid = fragrecptr.p->myfid; operationRecPtr.p->fragptr = fragrecptr.i; operationRecPtr.p->nextParallelQue = RNIL; operationRecPtr.p->prevParallelQue = RNIL; operationRecPtr.p->nextSerialQue = RNIL; operationRecPtr.p->prevSerialQue = RNIL; operationRecPtr.p->elementPage = RNIL; operationRecPtr.p->scanRecPtr = RNIL; operationRecPtr.p->m_op_bits = opbits; NdbTick_Invalidate(&operationRecPtr.p->m_lockTime); // bit to mark lock operation // undo log is not run via ACCKEYREQ if (operationRecPtr.p->tupkeylen == 0) { NDB_STATIC_ASSERT(AccKeyReq::SignalLength_localKey == 10); ndbassert(siglen == AccKeyReq::SignalLength_localKey); } else { NDB_STATIC_ASSERT(AccKeyReq::SignalLength_keyInfo == 8); ndbassert(siglen == AccKeyReq::SignalLength_keyInfo + operationRecPtr.p->tupkeylen); } }//Dbacc::initOpRec() /* --------------------------------------------------------------------------------- */ /* SEND_ACCKEYCONF */ /* --------------------------------------------------------------------------------- */ void Dbacc::sendAcckeyconf(Signal* signal) const { signal->theData[0] = operationRecPtr.p->userptr; signal->theData[1] = operationRecPtr.p->m_op_bits & Operationrec::OP_MASK; signal->theData[2] = operationRecPtr.p->fid; signal->theData[3] = operationRecPtr.p->localdata.m_page_no; signal->theData[4] = operationRecPtr.p->localdata.m_page_idx; }//Dbacc::sendAcckeyconf() /* ******************--------------------------------------------------------------- */ /* ACCKEYREQ REQUEST FOR INSERT, DELETE, */ /* RERAD AND UPDATE, A TUPLE. */ /* SENDER: LQH, LEVEL B */ /* SIGNAL DATA: OPERATION_REC_PTR, CONNECTION PTR */ /* TABPTR, TABLE ID = TABLE RECORD POINTER */ /* TREQINFO, */ /* THASHVALUE, HASH VALUE OF THE TUP */ /* TKEYLEN, LENGTH OF THE PRIMARY KEYS */ /* TKEY1, PRIMARY KEY 1 */ /* TKEY2, PRIMARY KEY 2 */ /* TKEY3, PRIMARY KEY 3 */ /* TKEY4, PRIMARY KEY 4 */ /* ******************--------------------------------------------------------------- */ void Dbacc::execACCKEYREQ(Signal* signal) { jamEntry(); AccKeyReq* const req = reinterpret_cast<AccKeyReq*>(&signal->theData[0]); operationRecPtr.i = req->connectPtr; /* CONNECTION PTR */ fragrecptr.i = req->fragmentPtr; /* FRAGMENT RECORD POINTER */ ndbrequire((operationRecPtr.i < coprecsize) || (fragrecptr.i < cfragmentsize)); ptrAss(operationRecPtr, operationrec); ptrAss(fragrecptr, fragmentrec); ndbrequire(operationRecPtr.p->m_op_bits == Operationrec::OP_INITIAL); initOpRec(req, signal->getLength()); // normalize key if any char attr if (operationRecPtr.p->tupkeylen && fragrecptr.p->hasCharAttr) xfrmKeyData(req); /*---------------------------------------------------------------*/ /* */ /* WE WILL USE THE HASH VALUE TO LOOK UP THE PROPER MEMORY */ /* PAGE AND MEMORY PAGE INDEX TO START THE SEARCH WITHIN. */ /* WE REMEMBER THESE ADDRESS IF WE LATER NEED TO INSERT */ /* THE ITEM AFTER NOT FINDING THE ITEM. */ /*---------------------------------------------------------------*/ OperationrecPtr lockOwnerPtr; Page8Ptr bucketPageptr; Uint32 bucketConidx; Page8Ptr elemPageptr; Uint32 elemConptr; Uint32 elemptr; const Uint32 found = getElement(req, lockOwnerPtr, bucketPageptr, bucketConidx, elemPageptr, elemConptr, elemptr); Uint32 opbits = operationRecPtr.p->m_op_bits; if (AccKeyReq::getTakeOver(req->requestInfo)) { /* Verify that lock taken over and operation are on same * element by checking that lockOwner match. */ OperationrecPtr lockOpPtr; lockOpPtr.i = req->lockConnectPtr; ptrAss(lockOpPtr, operationrec); if (lockOwnerPtr.i == RNIL || !(lockOwnerPtr.i == lockOpPtr.i || lockOwnerPtr.i == lockOpPtr.p->m_lock_owner_ptr_i)) { signal->theData[0] = cminusOne; signal->theData[1] = ZTO_OP_STATE_ERROR; operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; return; /* Take over failed */ } signal->theData[1] = req->lockConnectPtr; signal->theData[2] = operationRecPtr.p->transId1; signal->theData[3] = operationRecPtr.p->transId2; execACC_TO_REQ(signal); if (signal->theData[0] == cminusOne) { operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; ndbassert(signal->theData[1] == ZTO_OP_STATE_ERROR); return; /* Take over failed */ } } Uint32 op = opbits & Operationrec::OP_MASK; if (found == ZTRUE) { switch (op) { case ZREAD: case ZUPDATE: case ZDELETE: case ZWRITE: case ZSCAN_OP: if (!lockOwnerPtr.p) { if(op == ZWRITE) { jam(); opbits &= ~(Uint32)Operationrec::OP_MASK; opbits |= (op = ZUPDATE); operationRecPtr.p->m_op_bits = opbits; // store to get correct ACCKEYCONF } opbits |= Operationrec::OP_STATE_RUNNING; opbits |= Operationrec::OP_RUN_QUEUE; c_tup->prepareTUPKEYREQ(operationRecPtr.p->localdata.m_page_no, operationRecPtr.p->localdata.m_page_idx, fragrecptr.p->tupFragptr); sendAcckeyconf(signal); if (! (opbits & Operationrec::OP_DIRTY_READ)) { /*---------------------------------------------------------------*/ // It is not a dirty read. We proceed by locking and continue with // the operation. /*---------------------------------------------------------------*/ Uint32 eh = elemPageptr.p->word32[elemptr]; operationRecPtr.p->reducedHashValue = ElementHeader::getReducedHashValue(eh); operationRecPtr.p->elementPage = elemPageptr.i; operationRecPtr.p->elementContainer = elemConptr; operationRecPtr.p->elementPointer = elemptr; eh = ElementHeader::setLocked(operationRecPtr.i); dbgWord32(elemPageptr, elemptr, eh); elemPageptr.p->word32[elemptr] = eh; opbits |= Operationrec::OP_LOCK_OWNER; insertLockOwnersList(operationRecPtr); fragrecptr.p-> m_lockStats.req_start_imm_ok((opbits & Operationrec::OP_LOCK_MODE) != ZREADLOCK, operationRecPtr.p->m_lockTime, getHighResTimer()); } else { jam(); /*---------------------------------------------------------------*/ // It is a dirty read. We do not lock anything. Set state to // IDLE since no COMMIT call will come. /*---------------------------------------------------------------*/ opbits = Operationrec::OP_EXECUTED_DIRTY_READ; }//if operationRecPtr.p->m_op_bits = opbits; return; } else { jam(); accIsLockedLab(signal, lockOwnerPtr); return; }//if break; case ZINSERT: jam(); insertExistElemLab(signal, lockOwnerPtr); return; break; default: ndbrequire(false); break; }//switch } else if (found == ZFALSE) { switch (op){ case ZWRITE: opbits &= ~(Uint32)Operationrec::OP_MASK; opbits |= (op = ZINSERT); case ZINSERT: jam(); opbits |= Operationrec::OP_INSERT_IS_DONE; opbits |= Operationrec::OP_STATE_RUNNING; opbits |= Operationrec::OP_RUN_QUEUE; operationRecPtr.p->m_op_bits = opbits; insertelementLab(signal, bucketPageptr, bucketConidx); return; break; case ZREAD: case ZUPDATE: case ZDELETE: case ZSCAN_OP: jam(); acckeyref1Lab(signal, ZREAD_ERROR); return; break; default: ndbrequire(false); break; }//switch } else { jam(); acckeyref1Lab(signal, found); return; }//if return; }//Dbacc::execACCKEYREQ() void Dbacc::execACCKEY_ORD(Signal* signal, Uint32 opPtrI) { jamEntry(); OperationrecPtr lastOp; lastOp.i = opPtrI; ptrCheckGuard(lastOp, coprecsize, operationrec); Uint32 opbits = lastOp.p->m_op_bits; Uint32 opstate = opbits & Operationrec::OP_STATE_MASK; if (likely(opbits == Operationrec::OP_EXECUTED_DIRTY_READ)) { jam(); lastOp.p->m_op_bits = Operationrec::OP_INITIAL; return; } else if (likely(opstate == Operationrec::OP_STATE_RUNNING)) { opbits |= Operationrec::OP_STATE_EXECUTED; lastOp.p->m_op_bits = opbits; startNext(signal, lastOp); return; } else { } ndbout_c("bits: %.8x state: %.8x", opbits, opstate); ndbrequire(false); } void Dbacc::startNext(Signal* signal, OperationrecPtr lastOp) { jam(); OperationrecPtr nextOp; OperationrecPtr loPtr; OperationrecPtr tmp; nextOp.i = lastOp.p->nextParallelQue; loPtr.i = lastOp.p->m_lock_owner_ptr_i; Uint32 opbits = lastOp.p->m_op_bits; if ((opbits & Operationrec::OP_STATE_MASK)!= Operationrec::OP_STATE_EXECUTED) { jam(); return; } Uint32 nextbits; if (nextOp.i != RNIL) { jam(); ptrCheckGuard(nextOp, coprecsize, operationrec); nextbits = nextOp.p->m_op_bits; goto checkop; } if ((opbits & Operationrec::OP_LOCK_OWNER) == 0) { jam(); ptrCheckGuard(loPtr, coprecsize, operationrec); } else { jam(); loPtr = lastOp; } nextOp.i = loPtr.p->nextSerialQue; ndbassert(loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER); if (nextOp.i == RNIL) { jam(); return; } /** * There is an op in serie queue... * Check if it can run */ ptrCheckGuard(nextOp, coprecsize, operationrec); nextbits = nextOp.p->m_op_bits; { const bool same = nextOp.p->is_same_trans(lastOp.p); if (!same && ((opbits & Operationrec::OP_ACC_LOCK_MODE) || (nextbits & Operationrec::OP_LOCK_MODE))) { jam(); /** * Not same transaction * and either last had exclusive lock * or next had exclusive lock */ return; } /** * same trans and X-lock */ if (same && (opbits & Operationrec::OP_ACC_LOCK_MODE)) { jam(); goto upgrade; } } /** * all shared lock... */ if ((opbits & Operationrec::OP_ACC_LOCK_MODE) == 0 && (nextbits & Operationrec::OP_LOCK_MODE) == 0) { jam(); goto upgrade; } /** * There is a shared parallell queue & and exclusive op is first in queue */ ndbassert((opbits & Operationrec::OP_ACC_LOCK_MODE) == 0 && (nextbits & Operationrec::OP_LOCK_MODE)); /** * We must check if there are many transactions in parallel queue... */ tmp= loPtr; while (tmp.i != RNIL) { ptrCheckGuard(tmp, coprecsize, operationrec); if (!nextOp.p->is_same_trans(tmp.p)) { jam(); /** * parallel queue contained another transaction, dont let it run */ return; } tmp.i = tmp.p->nextParallelQue; } upgrade: /** * Move first op in serie queue to end of parallell queue */ tmp.i = loPtr.p->nextSerialQue = nextOp.p->nextSerialQue; loPtr.p->m_lo_last_parallel_op_ptr_i = nextOp.i; nextOp.p->nextSerialQue = RNIL; nextOp.p->prevSerialQue = RNIL; nextOp.p->m_lock_owner_ptr_i = loPtr.i; nextOp.p->prevParallelQue = lastOp.i; lastOp.p->nextParallelQue = nextOp.i; if (tmp.i != RNIL) { jam(); ptrCheckGuard(tmp, coprecsize, operationrec); tmp.p->prevSerialQue = loPtr.i; } else { jam(); loPtr.p->m_lo_last_serial_op_ptr_i = RNIL; } nextbits |= Operationrec::OP_RUN_QUEUE; /** * Currently no grouping of ops in serie queue */ ndbrequire(nextOp.p->nextParallelQue == RNIL); /** * Track end-of-wait */ { FragmentrecPtr frp; frp.i = nextOp.p->fragptr; ptrCheckGuard(frp, cfragmentsize, fragmentrec); frp.p->m_lockStats.wait_ok(((nextbits & Operationrec::OP_LOCK_MODE) != ZREADLOCK), nextOp.p->m_lockTime, getHighResTimer()); } checkop: Uint32 errCode = 0; OperationrecPtr save = operationRecPtr; operationRecPtr = nextOp; Uint32 lastop = opbits & Operationrec::OP_MASK; Uint32 nextop = nextbits & Operationrec::OP_MASK; nextbits &= nextbits & ~(Uint32)Operationrec::OP_STATE_MASK; nextbits |= Operationrec::OP_STATE_RUNNING; /* * bug#19031389 * Consider transactions such as read-0,read-1,read-2,delete-3. * Read-N commits come from TC while delete-3 commit comes from * backup replica. In MT kernel delete-3 commit can come first. * Then at read-0 commit there is no ZDELETE left. But all * ops in parallel queue have been marked OP_ELEMENT_DISAPPEARED. * So also check for that bit. */ if (lastop == ZDELETE || (lastOp.p->m_op_bits & Operationrec::OP_ELEMENT_DISAPPEARED)) { jam(); if (nextop != ZINSERT && nextop != ZWRITE) { errCode = ZREAD_ERROR; goto ref; } nextbits &= ~(Uint32)Operationrec::OP_MASK; nextbits &= ~(Uint32)Operationrec::OP_ELEMENT_DISAPPEARED; nextbits |= (nextop = ZINSERT); goto conf; } else if (nextop == ZINSERT) { jam(); errCode = ZWRITE_ERROR; goto ref; } else if (nextop == ZWRITE) { jam(); nextbits &= ~(Uint32)Operationrec::OP_MASK; nextbits |= (nextop = ZUPDATE); goto conf; } else { jam(); } conf: nextOp.p->m_op_bits = nextbits; nextOp.p->localdata = lastOp.p->localdata; if (nextop == ZSCAN_OP && (nextbits & Operationrec::OP_LOCK_REQ) == 0) { jam(); takeOutScanLockQueue(nextOp.p->scanRecPtr); putReadyScanQueue(nextOp.p->scanRecPtr); } else { jam(); fragrecptr.i = nextOp.p->fragptr; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); sendAcckeyconf(signal); sendSignal(nextOp.p->userblockref, GSN_ACCKEYCONF, signal, 6, JBB); } operationRecPtr = save; return; ref: nextOp.p->m_op_bits = nextbits; if (nextop == ZSCAN_OP && (nextbits & Operationrec::OP_LOCK_REQ) == 0) { jam(); nextOp.p->m_op_bits |= Operationrec::OP_ELEMENT_DISAPPEARED; takeOutScanLockQueue(nextOp.p->scanRecPtr); putReadyScanQueue(nextOp.p->scanRecPtr); } else { jam(); signal->theData[0] = nextOp.p->userptr; signal->theData[1] = errCode; sendSignal(nextOp.p->userblockref, GSN_ACCKEYREF, signal, 2, JBB); } operationRecPtr = save; return; } void Dbacc::xfrmKeyData(AccKeyReq* signal)const { Uint32 table = fragrecptr.p->myTableId; Uint32 dst[MAX_KEY_SIZE_IN_WORDS * MAX_XFRM_MULTIPLY]; Uint32 keyPartLen[MAX_ATTRIBUTES_IN_INDEX]; Uint32* const src = signal->keyInfo; Uint32 len = xfrm_key(table, src, dst, sizeof(dst) >> 2, keyPartLen); ndbrequire(len); // 0 means error memcpy(src, dst, len << 2); operationRecPtr.p->xfrmtupkeylen = len; } void Dbacc::accIsLockedLab(Signal* signal, OperationrecPtr lockOwnerPtr) const { Uint32 bits = operationRecPtr.p->m_op_bits; validate_lock_queue(lockOwnerPtr); if ((bits & Operationrec::OP_DIRTY_READ) == 0){ Uint32 return_result; if ((bits & Operationrec::OP_LOCK_MODE) == ZREADLOCK) { jam(); return_result = placeReadInLockQueue(lockOwnerPtr); } else { jam(); return_result = placeWriteInLockQueue(lockOwnerPtr); }//if if (return_result == ZPARALLEL_QUEUE) { jam(); c_tup->prepareTUPKEYREQ(operationRecPtr.p->localdata.m_page_no, operationRecPtr.p->localdata.m_page_idx, fragrecptr.p->tupFragptr); fragrecptr.p->m_lockStats.req_start_imm_ok((bits & Operationrec::OP_LOCK_MODE) != ZREADLOCK, operationRecPtr.p->m_lockTime, getHighResTimer()); sendAcckeyconf(signal); return; } else if (return_result == ZSERIAL_QUEUE) { jam(); fragrecptr.p->m_lockStats.req_start((bits & Operationrec::OP_LOCK_MODE) != ZREADLOCK, operationRecPtr.p->m_lockTime, getHighResTimer()); signal->theData[0] = RNIL; return; } else { jam(); acckeyref1Lab(signal, return_result); return; }//if ndbrequire(false); } else { if (! (lockOwnerPtr.p->m_op_bits & Operationrec::OP_ELEMENT_DISAPPEARED) && ! lockOwnerPtr.p->localdata.isInvalid()) { jam(); /* --------------------------------------------------------------- * It is a dirty read. We do not lock anything. Set state to *IDLE since no COMMIT call will arrive. * ---------------------------------------------------------------*/ c_tup->prepareTUPKEYREQ(operationRecPtr.p->localdata.m_page_no, operationRecPtr.p->localdata.m_page_idx, fragrecptr.p->tupFragptr); sendAcckeyconf(signal); operationRecPtr.p->m_op_bits = Operationrec::OP_EXECUTED_DIRTY_READ; return; } else { jam(); /*---------------------------------------------------------------*/ // The tuple does not exist in the committed world currently. // Report read error. /*---------------------------------------------------------------*/ acckeyref1Lab(signal, ZREAD_ERROR); return; }//if }//if }//Dbacc::accIsLockedLab() /* ------------------------------------------------------------------------ */ /* I N S E R T E X I S T E L E M E N T */ /* ------------------------------------------------------------------------ */ void Dbacc::insertExistElemLab(Signal* signal, OperationrecPtr lockOwnerPtr) const { if (!lockOwnerPtr.p) { jam(); acckeyref1Lab(signal, ZWRITE_ERROR);/* THE ELEMENT ALREADY EXIST */ return; }//if accIsLockedLab(signal, lockOwnerPtr); }//Dbacc::insertExistElemLab() /* --------------------------------------------------------------------------------- */ /* INSERTELEMENT */ /* --------------------------------------------------------------------------------- */ void Dbacc::insertelementLab(Signal* signal, Page8Ptr bucketPageptr, Uint32 bucketConidx) { if (unlikely(m_oom)) { jam(); acckeyref1Lab(signal, ZPAGESIZE_ERROR); return; } if (unlikely(fragrecptr.p->dirRangeFull)) { jam(); acckeyref1Lab(signal, ZDIR_RANGE_FULL_ERROR); return; } if (fragrecptr.p->sparsepages.isEmpty()) { jam(); allocOverflowPage(); if (tresult > ZLIMIT_OF_ERROR) { jam(); acckeyref1Lab(signal, tresult); return; }//if }//if ndbrequire(operationRecPtr.p->tupkeylen <= fragrecptr.p->keyLength); ndbassert(!(operationRecPtr.p->m_op_bits & Operationrec::OP_LOCK_REQ)); insertLockOwnersList(operationRecPtr); operationRecPtr.p->reducedHashValue = fragrecptr.p->level.reduce(operationRecPtr.p->hashValue); const Uint32 tidrElemhead = ElementHeader::setLocked(operationRecPtr.i); Page8Ptr idrPageptr; idrPageptr = bucketPageptr; Uint32 tidrPageindex = bucketConidx; bool isforward = true; ndbrequire(fragrecptr.p->localkeylen == 1); /* ----------------------------------------------------------------------- */ /* WE SET THE LOCAL KEY TO MINUS ONE TO INDICATE IT IS NOT YET VALID. */ /* ----------------------------------------------------------------------- */ Local_key localKey; localKey.setInvalid(); operationRecPtr.p->localdata = localKey; Uint32 conptr; insertElement(Element(tidrElemhead, localKey.m_page_no), operationRecPtr, idrPageptr, tidrPageindex, isforward, conptr, Operationrec::ANY_SCANBITS, false); fragrecptr.p->m_lockStats.req_start_imm_ok(true /* Exclusive */, operationRecPtr.p->m_lockTime, getHighResTimer()); c_tup->prepareTUPKEYREQ(localKey.m_page_no, localKey.m_page_idx, fragrecptr.p->tupFragptr); sendAcckeyconf(signal); return; }//Dbacc::insertelementLab() /* ------------------------------------------------------------------------ */ /* GET_NO_PARALLEL_TRANSACTION */ /* ------------------------------------------------------------------------ */ Uint32 Dbacc::getNoParallelTransaction(const Operationrec * op) const { OperationrecPtr tmp; tmp.i= op->nextParallelQue; Uint32 transId[2] = { op->transId1, op->transId2 }; while (tmp.i != RNIL) { jam(); ptrCheckGuard(tmp, coprecsize, operationrec); if (tmp.p->transId1 == transId[0] && tmp.p->transId2 == transId[1]) tmp.i = tmp.p->nextParallelQue; else return 2; } return 1; }//Dbacc::getNoParallelTransaction() #ifdef VM_TRACE Uint32 Dbacc::getNoParallelTransactionFull(const Operationrec * op) const { ConstPtr<Operationrec> tmp; tmp.p = op; while ((tmp.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0) { tmp.i = tmp.p->prevParallelQue; if (tmp.i != RNIL) { ptrCheckGuard(tmp, coprecsize, operationrec); } else { break; } } return getNoParallelTransaction(tmp.p); } #endif #ifdef ACC_SAFE_QUEUE Uint32 Dbacc::get_parallel_head(OperationrecPtr opPtr) const { while ((opPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0 && opPtr.p->prevParallelQue != RNIL) { opPtr.i = opPtr.p->prevParallelQue; ptrCheckGuard(opPtr, coprecsize, operationrec); } return opPtr.i; } bool Dbacc::validate_lock_queue(OperationrecPtr opPtr)const { OperationrecPtr loPtr; loPtr.i = get_parallel_head(opPtr); ptrCheckGuard(loPtr, coprecsize, operationrec); while((loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0 && loPtr.p->prevSerialQue != RNIL) { loPtr.i = loPtr.p->prevSerialQue; ptrCheckGuard(loPtr, coprecsize, operationrec); } // Now we have lock owner... vlqrequire(loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER); vlqrequire(loPtr.p->m_op_bits & Operationrec::OP_RUN_QUEUE); // 1 Validate page pointer { Page8Ptr pagePtr; pagePtr.i = loPtr.p->elementPage; ptrCheckGuard(pagePtr, cpagesize, page8); arrGuard(loPtr.p->elementPointer, 2048); Uint32 eh = pagePtr.p->word32[loPtr.p->elementPointer]; vlqrequire(ElementHeader::getLocked(eh)); vlqrequire(ElementHeader::getOpPtrI(eh) == loPtr.i); } // 2 Lock owner should always have same LOCK_MODE and ACC_LOCK_MODE if (loPtr.p->m_op_bits & Operationrec::OP_LOCK_MODE) { vlqrequire(loPtr.p->m_op_bits & Operationrec::OP_ACC_LOCK_MODE); } else { vlqrequire((loPtr.p->m_op_bits & Operationrec::OP_ACC_LOCK_MODE) == 0); } // 3 Lock owner should never be waiting... bool running = false; { Uint32 opstate = loPtr.p->m_op_bits & Operationrec::OP_STATE_MASK; if (opstate == Operationrec::OP_STATE_RUNNING) running = true; else { vlqrequire(opstate == Operationrec::OP_STATE_EXECUTED); } } // Validate parallel queue { bool many = false; bool orlockmode = loPtr.p->m_op_bits & Operationrec::OP_LOCK_MODE; OperationrecPtr lastP = loPtr; while (lastP.p->nextParallelQue != RNIL) { Uint32 prev = lastP.i; lastP.i = lastP.p->nextParallelQue; ptrCheckGuard(lastP, coprecsize, operationrec); vlqrequire(lastP.p->prevParallelQue == prev); Uint32 opbits = lastP.p->m_op_bits; many |= loPtr.p->is_same_trans(lastP.p) ? 0 : 1; orlockmode |= !!(opbits & Operationrec::OP_LOCK_MODE); vlqrequire(opbits & Operationrec::OP_RUN_QUEUE); vlqrequire((opbits & Operationrec::OP_LOCK_OWNER) == 0); Uint32 opstate = opbits & Operationrec::OP_STATE_MASK; if (running) { // If I found a running operation, // all following should be waiting vlqrequire(opstate == Operationrec::OP_STATE_WAITING); } else { if (opstate == Operationrec::OP_STATE_RUNNING) running = true; else vlqrequire(opstate == Operationrec::OP_STATE_EXECUTED); } if (lastP.p->m_op_bits & Operationrec::OP_LOCK_MODE) { vlqrequire(lastP.p->m_op_bits & Operationrec::OP_ACC_LOCK_MODE); } else { vlqrequire((lastP.p->m_op_bits && orlockmode) == orlockmode); vlqrequire((lastP.p->m_op_bits & Operationrec::OP_MASK) == ZREAD || (lastP.p->m_op_bits & Operationrec::OP_MASK) == ZSCAN_OP); } if (many) { vlqrequire(orlockmode == 0); } } if (lastP.i != loPtr.i) { vlqrequire(loPtr.p->m_lo_last_parallel_op_ptr_i == lastP.i); vlqrequire(lastP.p->m_lock_owner_ptr_i == loPtr.i); } else { vlqrequire(loPtr.p->m_lo_last_parallel_op_ptr_i == RNIL); } } // Validate serie queue if (loPtr.p->nextSerialQue != RNIL) { Uint32 prev = loPtr.i; OperationrecPtr lastS; lastS.i = loPtr.p->nextSerialQue; while (true) { ptrCheckGuard(lastS, coprecsize, operationrec); vlqrequire(lastS.p->prevSerialQue == prev); vlqrequire(getNoParallelTransaction(lastS.p) == 1); vlqrequire((lastS.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0); vlqrequire((lastS.p->m_op_bits & Operationrec::OP_RUN_QUEUE) == 0); vlqrequire((lastS.p->m_op_bits & Operationrec::OP_STATE_MASK) == Operationrec::OP_STATE_WAITING); if (lastS.p->nextSerialQue == RNIL) break; prev = lastS.i; lastS.i = lastS.p->nextSerialQue; } vlqrequire(loPtr.p->m_lo_last_serial_op_ptr_i == lastS.i); } else { vlqrequire(loPtr.p->m_lo_last_serial_op_ptr_i == RNIL); } return true; } NdbOut& operator<<(NdbOut & out, Dbacc::OperationrecPtr ptr) { Uint32 opbits = ptr.p->m_op_bits; out << "[ " << dec << ptr.i << " [ " << hex << ptr.p->transId1 << " " << hex << ptr.p->transId2 << "] " << " bits: H'" << hex << opbits << " "; bool read = false; switch(opbits & Dbacc::Operationrec::OP_MASK){ case ZREAD: out << "READ "; read = true; break; case ZINSERT: out << "INSERT "; break; case ZUPDATE: out << "UPDATE "; break; case ZDELETE: out << "DELETE "; break; case ZWRITE: out << "WRITE "; break; case ZSCAN_OP: out << "SCAN "; read = true; break; default: out << "<Unknown: H'" << hex << (opbits & Dbacc::Operationrec::OP_MASK) << "> "; } if (read) { if (opbits & Dbacc::Operationrec::OP_LOCK_MODE) out << "(X)"; else out << "(S)"; if (opbits & Dbacc::Operationrec::OP_ACC_LOCK_MODE) out << "(X)"; else out << "(S)"; } if (opbits) { out << "(RQ)"; } switch(opbits & Dbacc::Operationrec::OP_STATE_MASK){ case Dbacc::Operationrec::OP_STATE_WAITING: out << " WAITING "; break; case Dbacc::Operationrec::OP_STATE_RUNNING: out << " RUNNING "; break; case Dbacc::Operationrec::OP_STATE_EXECUTED: out << " EXECUTED "; break; case Dbacc::Operationrec::OP_STATE_IDLE: out << " IDLE "; break; default: out << " <Unknown: H'" << hex << (opbits & Dbacc::Operationrec::OP_STATE_MASK) << "> "; } /* OP_MASK = 0x000F // 4 bits for operation type ,OP_LOCK_MODE = 0x0010 // 0 - shared lock, 1 = exclusive lock ,OP_ACC_LOCK_MODE = 0x0020 // Or:de lock mode of all operation // before me ,OP_LOCK_OWNER = 0x0040 ,OP_DIRTY_READ = 0x0080 ,OP_LOCK_REQ = 0x0100 // isAccLockReq ,OP_COMMIT_DELETE_CHECK = 0x0200 ,OP_INSERT_IS_DONE = 0x0400 ,OP_ELEMENT_DISAPPEARED = 0x0800 ,OP_STATE_MASK = 0xF000 ,OP_STATE_IDLE = 0xF000 ,OP_STATE_WAITING = 0x0000 ,OP_STATE_RUNNING = 0x1000 ,OP_STATE_EXECUTED = 0x3000 }; */ if (opbits & Dbacc::Operationrec::OP_LOCK_OWNER) out << "LO "; if (opbits & Dbacc::Operationrec::OP_DIRTY_READ) out << "DR "; if (opbits & Dbacc::Operationrec::OP_LOCK_REQ) out << "LOCK_REQ "; if (opbits & Dbacc::Operationrec::OP_COMMIT_DELETE_CHECK) out << "COMMIT_DELETE_CHECK "; if (opbits & Dbacc::Operationrec::OP_INSERT_IS_DONE) out << "INSERT_IS_DONE "; if (opbits & Dbacc::Operationrec::OP_ELEMENT_DISAPPEARED) out << "ELEMENT_DISAPPEARED "; if (opbits & Dbacc::Operationrec::OP_LOCK_OWNER) { out << "last_parallel: " << dec << ptr.p->m_lo_last_parallel_op_ptr_i << " "; out << "last_serial: " << dec << ptr.p->m_lo_last_serial_op_ptr_i << " "; } out << "]"; return out; } void Dbacc::dump_lock_queue(OperationrecPtr loPtr)const { if ((loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0) { while ((loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0 && loPtr.p->prevParallelQue != RNIL) { loPtr.i = loPtr.p->prevParallelQue; ptrCheckGuard(loPtr, coprecsize, operationrec); } while ((loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0 && loPtr.p->prevSerialQue != RNIL) { loPtr.i = loPtr.p->prevSerialQue; ptrCheckGuard(loPtr, coprecsize, operationrec); } ndbassert(loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER); } ndbout << "-- HEAD --" << endl; OperationrecPtr tmp = loPtr; while (tmp.i != RNIL) { ptrCheckGuard(tmp, coprecsize, operationrec); ndbout << tmp << " "; tmp.i = tmp.p->nextParallelQue; if (tmp.i == loPtr.i) { ndbout << " <LOOP>"; break; } } ndbout << endl; tmp.i = loPtr.p->nextSerialQue; while (tmp.i != RNIL) { ptrCheckGuard(tmp, coprecsize, operationrec); OperationrecPtr tmp2 = tmp; if (tmp.i == loPtr.i) { ndbout << "<LOOP S>" << endl; break; } while (tmp2.i != RNIL) { ptrCheckGuard(tmp2, coprecsize, operationrec); ndbout << tmp2 << " "; tmp2.i = tmp2.p->nextParallelQue; if (tmp2.i == tmp.i) { ndbout << "<LOOP 3>"; break; } } ndbout << endl; tmp.i = tmp.p->nextSerialQue; } } #endif /* ------------------------------------------------------------------------- * PLACE_WRITE_IN_LOCK_QUEUE * INPUT: OPERATION_REC_PTR OUR OPERATION POINTER * QUE_OPER_PTR LOCK QUEUE OWNER OPERATION POINTER * PWI_PAGEPTR PAGE POINTER OF ELEMENT * TPWI_ELEMENTPTR ELEMENT POINTER OF ELEMENT * OUTPUT TRESULT = * ZPARALLEL_QUEUE OPERATION PLACED IN PARALLEL QUEUE * OPERATION CAN PROCEED NOW. * ZSERIAL_QUEUE OPERATION PLACED IN SERIAL QUEUE * ERROR CODE OPERATION NEEDS ABORTING * ------------------------------------------------------------------------- */ Uint32 Dbacc::placeWriteInLockQueue(OperationrecPtr lockOwnerPtr) const { OperationrecPtr lastOpPtr; lastOpPtr.i = lockOwnerPtr.p->m_lo_last_parallel_op_ptr_i; Uint32 opbits = operationRecPtr.p->m_op_bits; if (lastOpPtr.i == RNIL) { lastOpPtr = lockOwnerPtr; } else { ptrCheckGuard(lastOpPtr, coprecsize, operationrec); } ndbassert(get_parallel_head(lastOpPtr) == lockOwnerPtr.i); Uint32 lastbits = lastOpPtr.p->m_op_bits; if (lastbits & Operationrec::OP_ACC_LOCK_MODE) { if(operationRecPtr.p->is_same_trans(lastOpPtr.p)) { goto checkop; } } else { /** * We dont have an exclusive lock on operation and * */ jam(); /** * Scan parallell queue to see if we are the only one */ OperationrecPtr loopPtr = lockOwnerPtr; do { ptrCheckGuard(loopPtr, coprecsize, operationrec); if (!loopPtr.p->is_same_trans(operationRecPtr.p)) { goto serial; } loopPtr.i = loopPtr.p->nextParallelQue; } while (loopPtr.i != RNIL); goto checkop; } serial: jam(); placeSerialQueue(lockOwnerPtr, operationRecPtr); validate_lock_queue(lockOwnerPtr); return ZSERIAL_QUEUE; checkop: /* WE ARE PERFORMING AN READ EXCLUSIVE, INSERT, UPDATE OR DELETE IN THE SAME TRANSACTION WHERE WE PREVIOUSLY HAVE EXECUTED AN OPERATION. Read-All, Update-All, Insert-All and Delete-Insert are allowed combinations. Delete-Read, Delete-Update and Delete-Delete are not an allowed combination and will result in tuple not found error. */ const Uint32 lstate = lastbits & Operationrec::OP_STATE_MASK; Uint32 retValue = ZSERIAL_QUEUE; // So that it gets blocked... if (lstate == Operationrec::OP_STATE_EXECUTED) { jam(); /** * Since last operation has executed...we can now check operation types * if not, we have to wait until it has executed */ const Uint32 op = opbits & Operationrec::OP_MASK; const Uint32 lop = lastbits & Operationrec::OP_MASK; if (op == ZINSERT && lop != ZDELETE) { jam(); return ZWRITE_ERROR; }//if /** * NOTE. No checking op operation types, as one can read different save * points... */ if(op == ZWRITE) { opbits &= ~(Uint32)Operationrec::OP_MASK; opbits |= (lop == ZDELETE) ? ZINSERT : ZUPDATE; } opbits |= Operationrec::OP_STATE_RUNNING; operationRecPtr.p->localdata = lastOpPtr.p->localdata; retValue = ZPARALLEL_QUEUE; } opbits |= Operationrec::OP_RUN_QUEUE; operationRecPtr.p->m_op_bits = opbits; operationRecPtr.p->prevParallelQue = lastOpPtr.i; operationRecPtr.p->m_lock_owner_ptr_i = lockOwnerPtr.i; lastOpPtr.p->nextParallelQue = operationRecPtr.i; lockOwnerPtr.p->m_lo_last_parallel_op_ptr_i = operationRecPtr.i; validate_lock_queue(lockOwnerPtr); return retValue; }//Dbacc::placeWriteInLockQueue() Uint32 Dbacc::placeReadInLockQueue(OperationrecPtr lockOwnerPtr) const { OperationrecPtr lastOpPtr; OperationrecPtr loopPtr = lockOwnerPtr; lastOpPtr.i = lockOwnerPtr.p->m_lo_last_parallel_op_ptr_i; Uint32 opbits = operationRecPtr.p->m_op_bits; if (lastOpPtr.i == RNIL) { lastOpPtr = lockOwnerPtr; } else { ptrCheckGuard(lastOpPtr, coprecsize, operationrec); } ndbassert(get_parallel_head(lastOpPtr) == lockOwnerPtr.i); /** * Last operation in parallell queue of lock owner is same trans * and ACC_LOCK_MODE is exlusive, then we can proceed */ Uint32 lastbits = lastOpPtr.p->m_op_bits; bool same = operationRecPtr.p->is_same_trans(lastOpPtr.p); if (same && (lastbits & Operationrec::OP_ACC_LOCK_MODE)) { jam(); opbits |= Operationrec::OP_LOCK_MODE; // Upgrade to X-lock goto checkop; } if ((lastbits & Operationrec::OP_ACC_LOCK_MODE) && !same) { jam(); /** * Last op in serial queue had X-lock and was not our transaction... */ goto serial; } if (lockOwnerPtr.p->nextSerialQue == RNIL) { jam(); goto checkop; } /** * Scan parallell queue to see if we are already there... */ do { ptrCheckGuard(loopPtr, coprecsize, operationrec); if (loopPtr.p->is_same_trans(operationRecPtr.p)) goto checkop; loopPtr.i = loopPtr.p->nextParallelQue; } while (loopPtr.i != RNIL); serial: placeSerialQueue(lockOwnerPtr, operationRecPtr); validate_lock_queue(lockOwnerPtr); return ZSERIAL_QUEUE; checkop: Uint32 lstate = lastbits & Operationrec::OP_STATE_MASK; Uint32 retValue = ZSERIAL_QUEUE; // So that it gets blocked... if (lstate == Operationrec::OP_STATE_EXECUTED) { jam(); /** * NOTE. No checking op operation types, as one can read different save * points... */ #if 0 /** * Since last operation has executed...we can now check operation types * if not, we have to wait until it has executed */ if (lop == ZDELETE) { jam(); return ZREAD_ERROR; } #endif opbits |= Operationrec::OP_STATE_RUNNING; operationRecPtr.p->localdata = lastOpPtr.p->localdata; retValue = ZPARALLEL_QUEUE; } opbits |= (lastbits & Operationrec::OP_ACC_LOCK_MODE); opbits |= Operationrec::OP_RUN_QUEUE; operationRecPtr.p->m_op_bits = opbits; operationRecPtr.p->prevParallelQue = lastOpPtr.i; operationRecPtr.p->m_lock_owner_ptr_i = lockOwnerPtr.i; lastOpPtr.p->nextParallelQue = operationRecPtr.i; lockOwnerPtr.p->m_lo_last_parallel_op_ptr_i = operationRecPtr.i; validate_lock_queue(lockOwnerPtr); return retValue; }//Dbacc::placeReadInLockQueue void Dbacc::placeSerialQueue(OperationrecPtr lockOwnerPtr, OperationrecPtr opPtr)const { OperationrecPtr lastOpPtr; lastOpPtr.i = lockOwnerPtr.p->m_lo_last_serial_op_ptr_i; if (lastOpPtr.i == RNIL) { // Lock owner is last... ndbrequire(lockOwnerPtr.p->nextSerialQue == RNIL); lastOpPtr = lockOwnerPtr; } else { ptrCheckGuard(lastOpPtr, coprecsize, operationrec); } operationRecPtr.p->prevSerialQue = lastOpPtr.i; lastOpPtr.p->nextSerialQue = opPtr.i; lockOwnerPtr.p->m_lo_last_serial_op_ptr_i = opPtr.i; } /* ------------------------------------------------------------------------- */ /* ACC KEYREQ END */ /* ------------------------------------------------------------------------- */ void Dbacc::acckeyref1Lab(Signal* signal, Uint32 result_code) const { operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; /* ************************<< */ /* ACCKEYREF */ /* ************************<< */ signal->theData[0] = cminusOne; signal->theData[1] = result_code; return; }//Dbacc::acckeyref1Lab() /* ******************----------------------------------------------------- */ /* ACCMINUPDATE UPDATE LOCAL KEY REQ */ /* DESCRIPTION: UPDATES LOCAL KEY OF AN ELEMENTS IN THE HASH TABLE */ /* THIS SIGNAL IS WAITED AFTER ANY INSERT REQ */ /* ENTER ACCMINUPDATE WITH SENDER: LQH, LEVEL B */ /* OPERATION_REC_PTR, OPERATION RECORD PTR */ /* CLOCALKEY(0), LOCAL KEY 1 */ /* CLOCALKEY(1) LOCAL KEY 2 */ /* ******************----------------------------------------------------- */ void Dbacc::execACCMINUPDATE(Signal* signal) { Page8Ptr ulkPageidptr; Uint32 tulkLocalPtr; Local_key localkey; jamEntry(); operationRecPtr.i = signal->theData[0]; localkey.m_page_no = signal->theData[1]; localkey.m_page_idx = signal->theData[2]; ptrCheckGuard(operationRecPtr, coprecsize, operationrec); Uint32 opbits = operationRecPtr.p->m_op_bits; fragrecptr.i = operationRecPtr.p->fragptr; ulkPageidptr.i = operationRecPtr.p->elementPage; tulkLocalPtr = operationRecPtr.p->elementPointer + 1; if ((opbits & Operationrec::OP_STATE_MASK) == Operationrec::OP_STATE_RUNNING) { ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); ptrCheckGuard(ulkPageidptr, cpagesize, page8); dbgWord32(ulkPageidptr, tulkLocalPtr, tlocalkey1); arrGuard(tulkLocalPtr, 2048); operationRecPtr.p->localdata = localkey; ndbrequire(fragrecptr.p->localkeylen == 1); ulkPageidptr.p->word32[tulkLocalPtr] = localkey.m_page_no; return; }//if ndbrequire(false); }//Dbacc::execACCMINUPDATE() void Dbacc::removerow(Uint32 opPtrI, const Local_key* key) { jamEntry(); operationRecPtr.i = opPtrI; ptrCheckGuard(operationRecPtr, coprecsize, operationrec); Uint32 opbits = operationRecPtr.p->m_op_bits; fragrecptr.i = operationRecPtr.p->fragptr; /* Mark element disappeared */ opbits |= Operationrec::OP_ELEMENT_DISAPPEARED; opbits &= ~Uint32(Operationrec::OP_COMMIT_DELETE_CHECK); /** * This function is (currently?) only used when refreshTuple() * inserts a record...and later wants to remove it * * Since this should not affect row-count...we change the optype to UPDATE * execACC_COMMITREQ will be called in same timeslice as this change... */ opbits &= ~Uint32(Operationrec::OP_MASK); opbits |= ZUPDATE; operationRecPtr.p->m_op_bits = opbits; #ifdef VM_TRACE ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); ndbrequire(operationRecPtr.p->localdata.m_page_no == key->m_page_no); ndbrequire(operationRecPtr.p->localdata.m_page_idx == key->m_page_idx); #endif }//Dbacc::execACCMINUPDATE() /* ******************--------------------------------------------------------------- */ /* ACC_COMMITREQ COMMIT TRANSACTION */ /* SENDER: LQH, LEVEL B */ /* INPUT: OPERATION_REC_PTR , */ /* ******************--------------------------------------------------------------- */ void Dbacc::execACC_COMMITREQ(Signal* signal) { Uint8 Toperation; jamEntry(); operationRecPtr.i = signal->theData[0]; ptrCheckGuard(operationRecPtr, coprecsize, operationrec); #ifdef VM_TRACE Uint32 tmp = operationRecPtr.i; void* ptr = operationRecPtr.p; #endif Uint32 opbits = operationRecPtr.p->m_op_bits; fragrecptr.i = operationRecPtr.p->fragptr; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); Toperation = opbits & Operationrec::OP_MASK; commitOperation(signal); ndbassert(operationRecPtr.i == tmp); ndbassert(operationRecPtr.p == ptr); operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; if((Toperation != ZREAD) && (Toperation != ZSCAN_OP)) { fragrecptr.p->m_commit_count++; #ifdef ERROR_INSERT bool force_expand_shrink = false; if (ERROR_INSERTED(3004) && fragrecptr.p->fragmentid == 0 && fragrecptr.p->level.getSize() != ERROR_INSERT_EXTRA) { force_expand_shrink = true; } #endif if (Toperation != ZINSERT) { if (Toperation != ZDELETE) { return; } else { jam(); #ifdef ERROR_INSERT ndbrequire(fragrecptr.p->noOfElements > 0); #else ndbassert(fragrecptr.p->noOfElements > 0); #endif fragrecptr.p->noOfElements--; fragrecptr.p->slack += fragrecptr.p->elementLength; #ifdef ERROR_INSERT if (force_expand_shrink || fragrecptr.p->slack > fragrecptr.p->slackCheck) #else if (fragrecptr.p->slack > fragrecptr.p->slackCheck) #endif { /* TIME FOR JOIN BUCKETS PROCESS */ if (fragrecptr.p->expandCounter > 0) { if (!fragrecptr.p->expandOrShrinkQueued) { jam(); signal->theData[0] = fragrecptr.i; fragrecptr.p->expandOrShrinkQueued = true; sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 1, JBB); }//if }//if }//if }//if } else { jam(); /* EXPAND PROCESS HANDLING */ fragrecptr.p->noOfElements++; fragrecptr.p->slack -= fragrecptr.p->elementLength; #ifdef ERROR_INSERT if ((force_expand_shrink || fragrecptr.p->slack < 0) && !fragrecptr.p->level.isFull()) #else if (fragrecptr.p->slack < 0 && !fragrecptr.p->level.isFull()) #endif { /* IT MEANS THAT IF SLACK < ZERO */ if (!fragrecptr.p->expandOrShrinkQueued) { jam(); signal->theData[0] = fragrecptr.i; fragrecptr.p->expandOrShrinkQueued = true; sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 1, JBB); }//if }//if } } return; }//Dbacc::execACC_COMMITREQ() /* ******************------------------------------------------------------- */ /* ACC ABORT REQ ABORT ALL OPERATION OF THE TRANSACTION */ /* ******************------------------------------+ */ /* SENDER: LQH, LEVEL B */ /* ******************------------------------------------------------------- */ /* ACC ABORT REQ ABORT TRANSACTION */ /* ******************------------------------------+ */ /* SENDER: LQH, LEVEL B */ void Dbacc::execACC_ABORTREQ(Signal* signal) { jamEntry(); operationRecPtr.i = signal->theData[0]; Uint32 sendConf = signal->theData[1]; ptrCheckGuard(operationRecPtr, coprecsize, operationrec); fragrecptr.i = operationRecPtr.p->fragptr; Uint32 opbits = operationRecPtr.p->m_op_bits; Uint32 opstate = opbits & Operationrec::OP_STATE_MASK; tresult = 0; /* ZFALSE */ if (opbits == Operationrec::OP_EXECUTED_DIRTY_READ) { jam(); } else if (opstate == Operationrec::OP_STATE_EXECUTED || opstate == Operationrec::OP_STATE_WAITING || opstate == Operationrec::OP_STATE_RUNNING) { jam(); ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); abortOperation(signal); } operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; signal->theData[0] = operationRecPtr.p->userptr; signal->theData[1] = 0; switch(sendConf){ case 0: return; case 2: if (opstate != Operationrec::OP_STATE_RUNNING) { return; } case 1: sendSignal(operationRecPtr.p->userblockref, GSN_ACC_ABORTCONF, signal, 1, JBB); } signal->theData[1] = RNIL; } /* * Lock or unlock tuple. */ void Dbacc::execACC_LOCKREQ(Signal* signal) { jamEntry(); AccLockReq* sig = (AccLockReq*)signal->getDataPtrSend(); AccLockReq reqCopy = *sig; AccLockReq* const req = &reqCopy; Uint32 lockOp = (req->requestInfo & 0xFF); if (lockOp == AccLockReq::LockShared || lockOp == AccLockReq::LockExclusive) { jam(); // find table tabptr.i = req->tableId; ptrCheckGuard(tabptr, ctablesize, tabrec); // find fragment (TUX will know it) if (req->fragPtrI == RNIL) { for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabptr.p->fragholder); i++) { jam(); if (tabptr.p->fragholder[i] == req->fragId){ jam(); req->fragPtrI = tabptr.p->fragptrholder[i]; break; } } } fragrecptr.i = req->fragPtrI; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); ndbrequire(req->fragId == fragrecptr.p->myfid); // caller must be explicit here ndbrequire(req->accOpPtr == RNIL); // seize operation to hold the lock if (cfreeopRec != RNIL) { jam(); seizeOpRec(); // init as in ACCSEIZEREQ operationRecPtr.p->userptr = req->userPtr; operationRecPtr.p->userblockref = req->userRef; operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; operationRecPtr.p->scanRecPtr = RNIL; // do read with lock via ACCKEYREQ Uint32 lockMode = (lockOp == AccLockReq::LockShared) ? 0 : 1; Uint32 opCode = ZSCAN_OP; { Uint32 accreq = 0; accreq = AccKeyReq::setOperation(accreq, opCode); accreq = AccKeyReq::setLockType(accreq, lockMode); accreq = AccKeyReq::setDirtyOp(accreq, false); accreq = AccKeyReq::setReplicaType(accreq, 0); // ? accreq = AccKeyReq::setTakeOver(accreq, false); accreq = AccKeyReq::setLockReq(accreq, true); AccKeyReq* keyreq = reinterpret_cast<AccKeyReq*>(&signal->theData[0]); keyreq->connectPtr = operationRecPtr.i; keyreq->fragmentPtr = fragrecptr.i; keyreq->requestInfo = accreq; keyreq->hashValue = req->hashValue; keyreq->keyLen = 0; // search local key keyreq->transId1 = req->transId1; keyreq->transId2 = req->transId2; keyreq->lockConnectPtr = RNIL; // enter local key in place of PK keyreq->localKey[0] = req->page_id; keyreq->localKey[1] = req->page_idx; NDB_STATIC_ASSERT(AccKeyReq::SignalLength_localKey == 10); } EXECUTE_DIRECT(DBACC, GSN_ACCKEYREQ, signal, AccKeyReq::SignalLength_localKey); /* keyreq invalid, signal now contains return value */ // translate the result if (signal->theData[0] < RNIL) { jam(); req->returnCode = AccLockReq::Success; req->accOpPtr = operationRecPtr.i; } else if (signal->theData[0] == RNIL) { jam(); req->returnCode = AccLockReq::IsBlocked; req->accOpPtr = operationRecPtr.i; } else { ndbrequire(signal->theData[0] == (UintR)-1); releaseOpRec(); req->returnCode = AccLockReq::Refused; req->accOpPtr = RNIL; } } else { jam(); req->returnCode = AccLockReq::NoFreeOp; } *sig = *req; return; } if (lockOp == AccLockReq::Unlock) { jam(); // do unlock via ACC_COMMITREQ (immediate) signal->theData[0] = req->accOpPtr; EXECUTE_DIRECT(DBACC, GSN_ACC_COMMITREQ, signal, 1); releaseOpRec(); req->returnCode = AccLockReq::Success; *sig = *req; return; } if (lockOp == AccLockReq::Abort) { jam(); // do abort via ACC_ABORTREQ (immediate) signal->theData[0] = req->accOpPtr; signal->theData[1] = 0; // Dont send abort execACC_ABORTREQ(signal); releaseOpRec(); req->returnCode = AccLockReq::Success; *sig = *req; return; } if (lockOp == AccLockReq::AbortWithConf) { jam(); // do abort via ACC_ABORTREQ (with conf signal) signal->theData[0] = req->accOpPtr; signal->theData[1] = 1; // send abort execACC_ABORTREQ(signal); releaseOpRec(); req->returnCode = AccLockReq::Success; *sig = *req; return; } ndbrequire(false); } /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* */ /* END OF EXECUTE OPERATION MODULE */ /* */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /** * HASH TABLE MODULE * * Each partition (fragment) consist of a linear hash table in Dbacc. * The linear hash table can expand and shrink by one bucket at a time, * moving data from only one bucket. * * The operations supported are: * * [] insert one new element * [] delete one element * [] lookup one element * [] expand by splitting one bucket creating a new top bucket * [] shrink by merge top bucket data into a merge bucket * [] scan * * SCANS INTERACTION WITH EXPAND AND SHRINK * * Since expanding and shrinking can occur during the scan, and elements * move around one need to take extra care so that elements are scanned * exactly once. Elements deleted or inserted during scan should be * scanned at most once, there reinserted data always counts as a different * element. * * Scans are done in one or two laps. The first lap scans buckets from * bottom (bucket 0) to top. During this lap expanding and shrinking may * occur. In the second lap one rescan buckets that got merged after they * was scanned in lap one, and now expanding and shrinking are not allowed. * * Neither is a expand or shrink involving the currently scanned bucket * allowed. * * During lap one the table can be seen consisting of five kinds of buckets: * * [] unscanned, note that these have no defined scan bits, since the scan * bits are left overs from earlier scans. * [] current, exactly one bucket * [] scanned, all buckets below current * [] expanded, these buckets have not been scanned in lap one, but may * contain scanned elements. Anyway they always have well defined scan * bits also for unscanned elements. * [] merged and scanned, these are buckets scanned in lap one but have * been merged after they got scanned, and may contain unscanned * elements. These buckets must be rescanned during lap two of scan. * Note that we only keep track of a first and last bucket to rescan * even if there are some buckets in between that have not been merged. * * The diagram below show the possible regions of buckets. The names to * the right are the data members that describes the limits of the regions. * * +--------------------------+ * | Expanded buckets. May | Fragmentrec::level.getTop() * | contain both scanned and | * | unscanned data. | * | | * +--------------------------+ * | Unscanned data with | ScanRec::startNoOfBuckets * | undefined scan bits. | * | | ScanRec::nextBucketIndex + 1 * +--------------------------+ * | Currently scanned data. | ScanRec::nextBucketIndex * +--------------------------+ * | Scanned buckets. | * | | * +--------------------------+ * | Merged buckets after | ScanRec::maxBucketIndexToRescan * | scan start - need rescan.| * | | ScanRec::minBucketIndexToRescan * +--------------------------+ * | | * | Scanned buckets. | 0 * +--------------------------+ * * When scan starts, all buckets are unscanned and have undefined scan bits. * On start scanning of an unscanned bucket with undefined scan bits all * scan bits for the bucket are cleared. ScanRec::startNoOfBuckets keeps * track of the last bucket with undefined scan bits, note that * startNoOfBuckets may decrease if table shrinks below it. * * During the second lap the buckets from minBucketIndexToRescan to * maxBucketIndexToRescan inclusive, are scanned, and no bucket need to have * its scan bits cleared prior to scan. * * SCAN AND EXPAND * * After expand, the new top bucket will always have defined scan bits. * * If the split bucket have undefined scan bits the buckets scan bits are * cleared before split. * * The expanded bucket may only contain scanned elements if the split * bucket was a scanned bucket below the current bucket. This fact comes * from noting that once the split bucket are below current bucket, the * following expand can not have a split bucket above current bucket, since * next split bucket is either the next bucket, or the bottom bucket due to * how the linear hash table grow. And since expand are not allowed when * split bucket would be the current bucket all expand bucket with scanned * elements must come from buckets below current bucket. * * SCAN AND SHRINK * * Shrink merge back the top bucket into the bucket it was split from in * the corresponding expand. This implies that we will never merge back a * bucket with scanned elements into an unscanned bucket, with or without * defined scan bits. * * If the top bucket have undefined scan bits they are cleared before merge, * even if it is into another bucket with undefined scan bits. This is to * ensure that an element is not inserted in a bucket that have scan bits * set that are not allowed in bucket, for details why see under BUCKET * INVARIANTS. * * Whenever top bucket have undefined scan bits one need to decrease * startNoOfBuckets that indicates the last bucket with undefined scan * bits. If the top bucket reappear by expand it will have defined * scan bits which possibly indicate scan elements, these must not be * cleared prior scan. * * If merge destination are below current bucket, it must be added for * rescan. Note that we only keep track of lowest and highest bucket * number to rescan even if some buckets in between are not merged and do * not need rescan. * * CONTAINERS * * Each bucket is a linked list of containers. Only the first head * container may be empty. * * Containers are located in 8KiB pages. Each page have 72 buffers with * 28 words. Each buffer may host up to two containers. One headed at * buffers lowest address, called left end, and one headed at buffers high * words, the right end. The left end container grows forward towards * higher addresses, and the right end container grows backwards. * * Each bucket has its first container at a unique logical address, the * logical page number is bucket number divided by 64 with the remainder * index one of the first 64 left end containers on page. A dynamic array * are used to map the logical page number to physical page number. * * The pages which host the head containers of buckets are called normal * pages. When a container is full a new container is allocated, first it * looks for one of the eight left end containers that are on same page. * If no one is free, one look for a free right end container on same page. * Otherwise one look for an overflow container on an overflow page. New * overflow pages are allocated if needed. * * SCAN BITS * * To keep track of which elements have been scanned several means are used. * Every container header have scan bits, if a scan bit is set it means that * all elements in that container have been scanned by the corresponding * scan. * * If a container is currently scanned, that is some elements are scanned * and some not, each element in the container have a scan bit in the scan * record (ScanRec::elemScanned). The next scanned element is looked for * in the current container, if none found, the next container is used, and * then the next bucket. * * A scan may only scan one container at a time. * * BUCKETS INVARIANTS * * To be able to guarantee that only one container at a time are currently * scanned, there is an important invariant: * * [] No container may have a scan bit set that preceding container have * not set. That is, container are scanned in order within bucket, and * no inserted element may be put in such that the invariant breaks. * * Also a condition that all operations on buckets must satisfy is: * * [] It is not allowed to insert an element with more scan bits set than * the buckets head container have (unless it is for a new top bucket). * * This is too avoid extra complexity that would arise if such an * element was inserted. A new container can not be inserted preceding * the bucket head container since it has an fixed logical address. The * alternative would be to create a new bucket after the bucket head * container and move every element from head container to the new * container. * * How the condition is fulfilled are: * * [] Shrink, where top bucket have undefined scan bits. * * Top buckets scan bits are first cleared prior to merge. * * [] Shrink, where destination bucket have undefined scan bits. * * In this case top bucket must also have undefined scan bits (see SCAN * AND SHRINK above) and both top and destination bucket have their scan * bits cleared before merge. * * [] Shrink, where destination bucket is scanned, below current. * * The only way the top bucket can have scanned elements is that it is * expanded from a scanned bucket, below current. Since that must be the * shrink destination bucket, no element can have more scan bits set than * the destination buckets head container. * * [] Expand. * * The new top bucket is always a new bucket and head containers scan bits * are taken from split source bucket. * * [] Insert. * * A new element may be inserted in any container with free space, and it * inherits the containers scan bits. If a new container is needed it is * put last with container scan bits copied from preceding container. * * [] Delete. * * Deleting an element, replaces the deleted element with the last * element with same scan bits as the deleted element. If a container * becomes empty it is unlinked, unless it is the head container which * always must remain. * * Since the first containers in a bucket are more likely to be on the * same (normal) page, it is better to unlink a container towards the * end of bucket. If the deleted element is the last one in its * container, but not the head container, and there are no other element * in bucket with same scan bits that can replace the deleted element. * It is allowed to use another element with fewer bits as replacement * and clear scan bits of the container accordingly. * * The reason the bucket head container may not have some of its scan * bits cleared, is that it could later result in a need to insert back * an element with more scan bits set. The scenario for that is: * * 1) Split a merged bucket, A, into a new bucket B, moving some * elements with some scan bits set. * * 2) Delete some elements in bucket A, leaving only elements with no * scan bits set. * * 3) Shrink table and merge back bucket B into bucket A, if we have * cleared the head container of bucket A, this would result in * inserting elements with more scan bits set then bucket A head * container. * */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* */ /* MODULE: INSERT */ /* THE FOLLOWING SUBROUTINES ARE ONLY USED BY INSERT_ELEMENT. THIS */ /* ROUTINE IS THE SOLE INTERFACE TO INSERT ELEMENTS INTO THE INDEX. */ /* CURRENT USERS ARE INSERT REQUESTS, EXPAND CONTAINER AND SHRINK */ /* CONTAINER. */ /* */ /* THE FOLLOWING SUBROUTINES ARE INCLUDED IN THIS MODULE: */ /* INSERT_ELEMENT */ /* INSERT_CONTAINER */ /* ADDNEWCONTAINER */ /* GETFREELIST */ /* INCREASELISTCONT */ /* SEIZE_LEFTLIST */ /* SEIZE_RIGHTLIST */ /* */ /* THESE ROUTINES ARE ONLY USED BY THIS MODULE AND BY NO ONE ELSE. */ /* ALSO THE ROUTINES MAKE NO USE OF ROUTINES IN OTHER MODULES. */ /* TAKE_REC_OUT_OF_FREE_OVERPAGE AND RELEASE_OVERFLOW_REC ARE */ /* EXCEPTIONS TO THIS RULE. */ /* */ /* THE ONLY SHORT-LIVED VARIABLES USED IN OTHER PARTS OF THE BLOCK ARE */ /* THOSE DEFINED AS INPUT AND OUTPUT IN INSERT_ELEMENT */ /* SHORT-LIVED VARIABLES INCLUDE TEMPORARY VARIABLES, COMMON VARIABLES */ /* AND POINTER VARIABLES. */ /* THE ONLY EXCEPTION TO THIS RULE IS FRAGRECPTR WHICH POINTS TO THE */ /* FRAGMENT RECORD. THIS IS MORE LESS STATIC ALWAYS DURING A SIGNAL */ /* EXECUTION. */ /* */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* INSERT_ELEMENT */ /* INPUT: */ /* IDR_PAGEPTR (POINTER TO THE ACTIVE PAGE REC) */ /* TIDR_PAGEINDEX (INDEX OF THE CONTAINER) */ /* TIDR_FORWARD (DIRECTION FORWARD OR BACKWARD) */ /* TIDR_ELEMHEAD (HEADER OF ELEMENT TO BE INSERTED */ /* CIDR_KEYS(ARRAY OF TUPLE KEYS) */ /* CLOCALKEY(ARRAY OF LOCAL KEYS). */ /* FRAGRECPTR */ /* IDR_OPERATION_REC_PTR */ /* TIDR_KEY_LEN */ /* conScanMask - ANY_SCANBITS or scan bits container must */ /* have. Note elements inserted are never more scanned than */ /* container. */ /* */ /* OUTPUT: */ /* TIDR_PAGEINDEX (PAGE INDEX OF INSERTED ELEMENT) */ /* IDR_PAGEPTR (PAGE POINTER OF INSERTED ELEMENT) */ /* TIDR_FORWARD (CONTAINER DIRECTION OF INSERTED ELEMENT) */ /* NONE */ /* --------------------------------------------------------------------------------- */ void Dbacc::insertElement(const Element elem, OperationrecPtr oprecptr, Page8Ptr& pageptr, Uint32& conidx, bool& isforward, Uint32& conptr, Uint16 conScanMask, const bool newBucket) { Page8Ptr inrNewPageptr; Uint32 tidrResult; Uint16 scanmask; bool newContainer = newBucket; ContainerHeader containerhead; do { insertContainer(elem, oprecptr, pageptr, conidx, isforward, conptr, containerhead, conScanMask, newContainer, tidrResult); if (tidrResult != ZFALSE) { jam(); return; /* INSERTION IS DONE, OR */ /* AN ERROR IS DETECTED */ }//if if (containerhead.getNextEnd() != 0) { /* THE NEXT CONTAINER IS IN THE SAME PAGE */ conidx = containerhead.getNextIndexNumber(); if (containerhead.getNextEnd() == ZLEFT) { jam(); isforward = true; } else if (containerhead.getNextEnd() == ZRIGHT) { jam(); isforward = false; } else { ndbrequire(false); return; }//if if (!containerhead.isNextOnSamePage()) { jam(); /* NEXT CONTAINER IS IN AN OVERFLOW PAGE */ pageptr.i = pageptr.p->word32[conptr + 1]; ptrCheckGuard(pageptr, cpagesize, page8); }//if ndbrequire(conidx <= Container::MAX_CONTAINER_INDEX); } else { scanmask = containerhead.getScanBits(); break; }//if // Only first container can be a new container newContainer = false; } while (1); Uint32 newPageindex;; Uint32 newBuftype; getfreelist(pageptr, newPageindex, newBuftype); bool nextOnSamePage; if (newPageindex == Container::NO_CONTAINER_INDEX) { jam(); /* NO FREE BUFFER IS FOUND */ if (fragrecptr.p->sparsepages.isEmpty()) { jam(); allocOverflowPage(); ndbrequire(tresult <= ZLIMIT_OF_ERROR); }//if { LocalContainerPageList sparselist(*this, fragrecptr.p->sparsepages); sparselist.first(inrNewPageptr); } getfreelist(inrNewPageptr, newPageindex, newBuftype); ndbrequire(newPageindex != Container::NO_CONTAINER_INDEX); nextOnSamePage = false; } else { jam(); inrNewPageptr = pageptr; nextOnSamePage = true; }//if if (newBuftype == ZLEFT) { seizeLeftlist(inrNewPageptr, newPageindex); isforward = true; } else if (newBuftype == ZRIGHT) { seizeRightlist(inrNewPageptr, newPageindex); isforward = false; } else { ndbrequire(newBuftype == ZLEFT || newBuftype == ZRIGHT); } Uint32 containerptr = getContainerPtr(newPageindex, isforward); ContainerHeader newcontainerhead; newcontainerhead.initInUse(); Uint32 nextPtrI; if (containerhead.haveNext()) { nextPtrI = pageptr.p->word32[conptr+1]; newcontainerhead.setNext(containerhead.getNextEnd(), containerhead.getNextIndexNumber(), inrNewPageptr.i == nextPtrI); } else { nextPtrI = RNIL; newcontainerhead.clearNext(); } inrNewPageptr.p->word32[containerptr] = newcontainerhead; inrNewPageptr.p->word32[containerptr + 1] = nextPtrI; addnewcontainer(pageptr, conptr, newPageindex, newBuftype, nextOnSamePage, inrNewPageptr.i); pageptr = inrNewPageptr; conidx = newPageindex; if (conScanMask == Operationrec::ANY_SCANBITS) { /** * ANY_SCANBITS indicates that this is an insert of a new element, not * an insert from expand or shrink. In that case the inserted element * and the new container will inherit scan bits from previous container. * This makes the element look as scanned as possible still preserving * the invariant that containers and element towards the end of bucket * has less scan bits set than those towards the beginning. */ conScanMask = scanmask; } insertContainer(elem, oprecptr, pageptr, conidx, isforward, conptr, containerhead, conScanMask, true, tidrResult); ndbrequire(tidrResult == ZTRUE); }//Dbacc::insertElement() /** * insertContainer puts an element into a container if it has free space and * the requested scan bits match. * * If it is a new element inserted the requested scan bits given by * conScanMask can be ANY_SCANBITS or a valid set of bits. If it is * ANY_SCANBITS the containers scan bits are not checked. If it is set to * valid scan bits the container is a newly created empty container. * * The buckets header container may never be removed. Nor should any scan * bit of it be cleared, unless for expand there the first inserted element * determines the bucket header containers scan bits. newContainer indicates * that that current insert is part of populating a new bucket with expand. * * In case the container is empty it is either the bucket header container * or a new container created by caller (insertElement). * * @param[in] elem * @param[in] oprecptr * @param[in] pageptr * @param[in] conidx * @param[in] isforward * @param[out] conptr * @param[out] containerhead * @param[in] conScanMask * @param[in] newContainer * @param[out] result */ void Dbacc::insertContainer(const Element elem, const OperationrecPtr oprecptr, const Page8Ptr pageptr, const Uint32 conidx, const bool isforward, Uint32& conptr, ContainerHeader& containerhead, Uint16 conScanMask, const bool newContainer, Uint32& result) { Uint32 tidrContainerlen; Uint32 tidrConfreelen; Uint32 tidrNextSide; Uint32 tidrNextConLen; Uint32 tidrIndex; result = ZFALSE; /* --------------------------------------------------------------------------------- */ /* CALCULATE THE POINTER TO THE ELEMENT TO BE INSERTED AND THE POINTER TO THE */ /* CONTAINER HEADER OF THE OTHER SIDE OF THE BUFFER. */ /* --------------------------------------------------------------------------------- */ conptr = getForwardContainerPtr(conidx); if (isforward) { jam(); tidrNextSide = conptr + (ZBUF_SIZE - Container::HEADER_SIZE); arrGuard(tidrNextSide + 1, 2048); containerhead = pageptr.p->word32[conptr]; tidrContainerlen = containerhead.getLength(); tidrIndex = conptr + tidrContainerlen; } else { jam(); tidrNextSide = conptr; conptr = conptr + (ZBUF_SIZE - Container::HEADER_SIZE); arrGuard(conptr + 1, 2048); containerhead = pageptr.p->word32[conptr]; tidrContainerlen = containerhead.getLength(); tidrIndex = (conptr - tidrContainerlen) + (Container::HEADER_SIZE - fragrecptr.p->elementLength); }//if const Uint16 activeScanMask = fragrecptr.p->activeScanMask; const Uint16 conscanmask = containerhead.getScanBits(); if(tidrContainerlen > Container::HEADER_SIZE || !newContainer) { if (conScanMask != Operationrec::ANY_SCANBITS && ((conscanmask & ~conScanMask) & activeScanMask) != 0) { /* Container have more scan bits set than requested */ /* Continue to next container. */ return; } } if (tidrContainerlen == Container::HEADER_SIZE && newContainer) { /** * Only the first header container in a bucket or a newly created bucket * in insertElement can be empty. * * Set container scan bits as requested. */ ndbrequire(conScanMask != Operationrec::ANY_SCANBITS); containerhead.copyScanBits(conScanMask & activeScanMask); pageptr.p->word32[conptr] = containerhead; } if (tidrContainerlen >= (ZBUF_SIZE - fragrecptr.p->elementLength)) { return; }//if tidrConfreelen = ZBUF_SIZE - tidrContainerlen; /* --------------------------------------------------------------------------------- */ /* WE CALCULATE THE TOTAL LENGTH THE CONTAINER CAN EXPAND TO */ /* THIS INCLUDES THE OTHER SIDE OF THE BUFFER IF POSSIBLE TO EXPAND THERE. */ /* --------------------------------------------------------------------------------- */ if (!containerhead.isUsingBothEnds()) { jam(); /* --------------------------------------------------------------------------------- */ /* WE HAVE NOT EXPANDED TO THE ENTIRE BUFFER YET. WE CAN THUS READ THE OTHER */ /* SIDE'S CONTAINER HEADER TO READ HIS LENGTH. */ /* --------------------------------------------------------------------------------- */ ContainerHeader conhead(pageptr.p->word32[tidrNextSide]); tidrNextConLen = conhead.getLength(); tidrConfreelen = tidrConfreelen - tidrNextConLen; if (tidrConfreelen > ZBUF_SIZE) { ndbrequire(false); /* --------------------------------------------------------------------------------- */ /* THE BUFFERS ARE PLACED ON TOP OF EACH OTHER. THIS SHOULD NEVER OCCUR. */ /* --------------------------------------------------------------------------------- */ return; }//if } else { jam(); tidrNextConLen = 1; /* INDICATE OTHER SIDE IS NOT PART OF FREE LIST */ }//if if (tidrConfreelen < fragrecptr.p->elementLength) { jam(); /* --------------------------------------------------------------------------------- */ /* THE CONTAINER COULD NOT BE EXPANDED TO FIT THE NEW ELEMENT. WE HAVE TO */ /* RETURN AND FIND A NEW CONTAINER TO INSERT IT INTO. */ /* --------------------------------------------------------------------------------- */ return; }//if tidrContainerlen = tidrContainerlen + fragrecptr.p->elementLength; if (tidrNextConLen == 0) { /* EACH SIDE OF THE BUFFER WHICH BELONG TO A FREE */ /* LIST, HAS ZERO AS LENGTH. */ if (tidrContainerlen > Container::UP_LIMIT) { ContainerHeader conthead = pageptr.p->word32[conptr]; conthead.setUsingBothEnds(); dbgWord32(pageptr, conptr, conthead); pageptr.p->word32[conptr] = conthead; if (isforward) { jam(); /* REMOVE THE RIGHT SIDE OF THE BUFFER FROM THE FREE LIST */ seizeRightlist(pageptr, conidx); } else { jam(); /* REMOVE THE LEFT SIDE OF THE BUFFER FROM THE FREE LIST */ seizeLeftlist(pageptr, conidx); }//if }//if }//if /* OF THE FREE CONTAINERS */ /* --------------------------------------------------------------------------------- */ /* WE HAVE NOW FOUND A FREE SPOT IN THE CURRENT CONTAINER. WE INSERT THE */ /* ELEMENT HERE. THE ELEMENT CONTAINS A HEADER, A LOCAL KEY AND A TUPLE KEY. */ /* BEFORE INSERTING THE ELEMENT WE WILL UPDATE THE OPERATION RECORD WITH THE */ /* DATA CONCERNING WHERE WE INSERTED THE ELEMENT. THIS MAKES IT EASY TO FIND */ /* THIS INFORMATION WHEN WE RETURN TO UPDATE THE LOCAL KEY OR RETURN TO COMMIT */ /* OR ABORT THE INSERT. IF NO OPERATION RECORD EXIST IT MEANS THAT WE ARE */ /* PERFORMING THIS AS A PART OF THE EXPAND OR SHRINK PROCESS. */ /* --------------------------------------------------------------------------------- */ const Uint32 elemhead = elem.getHeader(); ContainerHeader conthead = pageptr.p->word32[conptr]; if (oprecptr.i != RNIL) { jam(); ndbrequire(ElementHeader::getLocked(elemhead)); oprecptr.p->elementPage = pageptr.i; oprecptr.p->elementContainer = conptr; oprecptr.p->elementPointer = tidrIndex; } else { ndbassert(!ElementHeader::getLocked(elemhead)); } /* --------------------------------------------------------------------------------- */ /* WE CHOOSE TO UNDO LOG INSERTS BY WRITING THE BEFORE VALUE TO THE UNDO LOG. */ /* WE COULD ALSO HAVE DONE THIS BY WRITING THIS BEFORE VALUE WHEN DELETING */ /* ELEMENTS. WE CHOOSE TO PUT IT HERE SINCE WE THEREBY ENSURE THAT WE ALWAYS */ /* UNDO LOG ALL WRITES TO PAGE MEMORY. IT SHOULD BE EASIER TO MAINTAIN SUCH A */ /* STRUCTURE. IT IS RATHER DIFFICULT TO MAINTAIN A LOGICAL STRUCTURE WHERE */ /* DELETES ARE INSERTS AND INSERTS ARE PURELY DELETES. */ /* --------------------------------------------------------------------------------- */ ndbrequire(fragrecptr.p->localkeylen == 1); arrGuard(tidrIndex + 1, 2048); pageptr.p->word32[tidrIndex] = elem.getHeader(); pageptr.p->word32[tidrIndex + 1] = elem.getData(); /* INSERTS LOCALKEY */ conthead.setLength(tidrContainerlen); dbgWord32(pageptr, conptr, conthead); pageptr.p->word32[conptr] = conthead; result = ZTRUE; }//Dbacc::insertContainer() /** --------------------------------------------------------------------------- * Set next link of a container to reference to next container. * * @param[in] pageptr Pointer to page of container to modify. * @param[in] conptr Pointer within page of container to modify. * @param[in] nextConidx Index within page of next container. * @param[in] nextContype Type of next container, left or right end. * @param[in] nextSamepage True if next container is on same page as modified * container * @param[in] nextPagei Overflow page number of next container. * ------------------------------------------------------------------------- */ void Dbacc::addnewcontainer(Page8Ptr pageptr, Uint32 conptr, Uint32 nextConidx, Uint32 nextContype, bool nextSamepage, Uint32 nextPagei) const { ContainerHeader containerhead(pageptr.p->word32[conptr]); containerhead.setNext(nextContype, nextConidx, nextSamepage); dbgWord32(pageptr, conptr, containerhead); pageptr.p->word32[conptr] = containerhead; dbgWord32(pageptr, conptr + 1, nextPagei); pageptr.p->word32[conptr + 1] = nextPagei; }//Dbacc::addnewcontainer() /* --------------------------------------------------------------------------------- */ /* GETFREELIST */ /* INPUT: */ /* GFL_PAGEPTR (POINTER TO A PAGE RECORD). */ /* OUTPUT: */ /* TGFL_PAGEINDEX(POINTER TO A FREE BUFFER IN THE FREEPAGE), AND */ /* TGFL_BUF_TYPE( TYPE OF THE FREE BUFFER). */ /* DESCRIPTION: SEARCHS IN THE FREE LIST OF THE FREE BUFFER IN THE PAGE HEAD */ /* (WORD32(1)),AND RETURN ADDRESS OF A FREE BUFFER OR NIL. */ /* THE FREE BUFFER CAN BE A RIGHT CONTAINER OR A LEFT ONE */ /* THE KIND OF THE CONTAINER IS NOTED BY TGFL_BUF_TYPE. */ /* --------------------------------------------------------------------------------- */ void Dbacc::getfreelist(Page8Ptr pageptr, Uint32& pageindex, Uint32& buftype) { const Uint32 emptylist = pageptr.p->word32[ZPOS_EMPTY_LIST]; pageindex = (emptylist >> 7) & 0x7f; /* LEFT FREE LIST */ buftype = ZLEFT; if (pageindex == Container::NO_CONTAINER_INDEX) { jam(); pageindex = emptylist & 0x7f; /* RIGHT FREE LIST */ buftype = ZRIGHT; }//if ndbrequire((pageindex <= Container::MAX_CONTAINER_INDEX) || (pageindex == Container::NO_CONTAINER_INDEX)); }//Dbacc::getfreelist() /* --------------------------------------------------------------------------------- */ /* INCREASELISTCONT */ /* INPUT: */ /* ILC_PAGEPTR PAGE POINTER TO INCREASE NUMBER OF CONTAINERS IN */ /* A CONTAINER OF AN OVERFLOW PAGE (FREEPAGEPTR) IS ALLOCATED, NR OF */ /* ALLOCATED CONTAINER HAVE TO BE INCRESE BY ONE . */ /* IF THE NUMBER OF ALLOCATED CONTAINERS IS ABOVE THE FREE LIMIT WE WILL */ /* REMOVE THE PAGE FROM THE FREE LIST. */ /* --------------------------------------------------------------------------------- */ void Dbacc::increaselistcont(Page8Ptr ilcPageptr) { dbgWord32(ilcPageptr, ZPOS_ALLOC_CONTAINERS, ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] + 1); ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] + 1; // A sparse page just got full if (ilcPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] == ZFREE_LIMIT + 1) { // Check that it is an overflow page if (((ilcPageptr.p->word32[ZPOS_EMPTY_LIST] >> ZPOS_PAGE_TYPE_BIT) & 3) == 1) { jam(); LocalContainerPageList sparselist(*this, fragrecptr.p->sparsepages); LocalContainerPageList fulllist(*this, fragrecptr.p->fullpages); sparselist.remove(ilcPageptr); fulllist.addLast(ilcPageptr); }//if }//if }//Dbacc::increaselistcont() /* --------------------------------------------------------------------------------- */ /* SEIZE_LEFTLIST */ /* INPUT: */ /* TSL_PAGEINDEX PAGE INDEX OF CONTAINER TO SEIZE */ /* SL_PAGEPTR PAGE POINTER OF CONTAINER TO SEIZE */ /* TSL_UPDATE_HEADER SHOULD WE UPDATE THE CONTAINER HEADER */ /* */ /* OUTPUT: */ /* NONE */ /* DESCRIPTION: THE BUFFER NOTED BY TSL_PAGEINDEX WILL BE REMOVED FROM THE */ /* LIST OF LEFT FREE CONTAINER, IN THE HEADER OF THE PAGE */ /* (FREEPAGEPTR). PREVIOUS AND NEXT BUFFER OF REMOVED BUFFER */ /* WILL BE UPDATED. */ /* --------------------------------------------------------------------------------- */ void Dbacc::seizeLeftlist(Page8Ptr slPageptr, Uint32 tslPageindex) { Uint32 tsllTmp1; Uint32 tsllHeadIndex; Uint32 tsllTmp; tsllHeadIndex = getForwardContainerPtr(tslPageindex); arrGuard(tsllHeadIndex + 1, 2048); Uint32 tslNextfree = slPageptr.p->word32[tsllHeadIndex]; Uint32 tslPrevfree = slPageptr.p->word32[tsllHeadIndex + 1]; if (tslPrevfree == Container::NO_CONTAINER_INDEX) { jam(); /* UPDATE FREE LIST OF LEFT CONTAINER IN PAGE HEAD */ tsllTmp1 = slPageptr.p->word32[ZPOS_EMPTY_LIST]; tsllTmp = tsllTmp1 & 0x7f; tsllTmp1 = (tsllTmp1 >> 14) << 14; tsllTmp1 = (tsllTmp1 | (tslNextfree << 7)) | tsllTmp; dbgWord32(slPageptr, ZPOS_EMPTY_LIST, tsllTmp1); slPageptr.p->word32[ZPOS_EMPTY_LIST] = tsllTmp1; } else { ndbrequire(tslPrevfree <= Container::MAX_CONTAINER_INDEX); jam(); tsllTmp = getForwardContainerPtr(tslPrevfree); dbgWord32(slPageptr, tsllTmp, tslNextfree); slPageptr.p->word32[tsllTmp] = tslNextfree; }//if if (tslNextfree <= Container::MAX_CONTAINER_INDEX) { jam(); tsllTmp = getForwardContainerPtr(tslNextfree) + 1; dbgWord32(slPageptr, tsllTmp, tslPrevfree); slPageptr.p->word32[tsllTmp] = tslPrevfree; } else { ndbrequire(tslNextfree == Container::NO_CONTAINER_INDEX); jam(); }//if increaselistcont(slPageptr); }//Dbacc::seizeLeftlist() /* --------------------------------------------------------------------------------- */ /* SEIZE_RIGHTLIST */ /* DESCRIPTION: THE BUFFER NOTED BY TSL_PAGEINDEX WILL BE REMOVED FROM THE */ /* LIST OF RIGHT FREE CONTAINER, IN THE HEADER OF THE PAGE */ /* (SL_PAGEPTR). PREVIOUS AND NEXT BUFFER OF REMOVED BUFFER */ /* WILL BE UPDATED. */ /* --------------------------------------------------------------------------------- */ void Dbacc::seizeRightlist(Page8Ptr slPageptr, Uint32 tslPageindex) { Uint32 tsrlHeadIndex; Uint32 tsrlTmp; tsrlHeadIndex = getBackwardContainerPtr(tslPageindex); arrGuard(tsrlHeadIndex + 1, 2048); Uint32 tslNextfree = slPageptr.p->word32[tsrlHeadIndex]; Uint32 tslPrevfree = slPageptr.p->word32[tsrlHeadIndex + 1]; if (tslPrevfree == Container::NO_CONTAINER_INDEX) { jam(); tsrlTmp = slPageptr.p->word32[ZPOS_EMPTY_LIST]; dbgWord32(slPageptr, ZPOS_EMPTY_LIST, ((tsrlTmp >> 7) << 7) | tslNextfree); slPageptr.p->word32[ZPOS_EMPTY_LIST] = ((tsrlTmp >> 7) << 7) | tslNextfree; } else { ndbrequire(tslPrevfree <= Container::MAX_CONTAINER_INDEX); jam(); tsrlTmp = getBackwardContainerPtr(tslPrevfree); dbgWord32(slPageptr, tsrlTmp, tslNextfree); slPageptr.p->word32[tsrlTmp] = tslNextfree; }//if if (tslNextfree <= Container::MAX_CONTAINER_INDEX) { jam(); tsrlTmp = getBackwardContainerPtr(tslNextfree) + 1; dbgWord32(slPageptr, tsrlTmp, tslPrevfree); slPageptr.p->word32[tsrlTmp] = tslPrevfree; } else { ndbrequire(tslNextfree == Container::NO_CONTAINER_INDEX); jam(); }//if increaselistcont(slPageptr); }//Dbacc::seizeRightlist() /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* */ /* END OF INSERT_ELEMENT MODULE */ /* */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* */ /* MODULE: GET_ELEMENT */ /* THE FOLLOWING SUBROUTINES ARE ONLY USED BY GET_ELEMENT AND */ /* GETDIRINDEX. THIS ROUTINE IS THE SOLE INTERFACE TO GET ELEMENTS */ /* FROM THE INDEX. CURRENT USERS ARE ALL REQUESTS AND EXECUTE UNDO LOG */ /* */ /* THE FOLLOWING SUBROUTINES ARE INCLUDED IN THIS MODULE: */ /* GET_ELEMENT */ /* GET_DIRINDEX */ /* SEARCH_LONG_KEY */ /* */ /* THESE ROUTINES ARE ONLY USED BY THIS MODULE AND BY NO ONE ELSE. */ /* ALSO THE ROUTINES MAKE NO USE OF ROUTINES IN OTHER MODULES. */ /* THE ONLY SHORT-LIVED VARIABLES USED IN OTHER PARTS OF THE BLOCK ARE */ /* THOSE DEFINED AS INPUT AND OUTPUT IN GET_ELEMENT AND GETDIRINDEX */ /* SHORT-LIVED VARIABLES INCLUDE TEMPORARY VARIABLES, COMMON VARIABLES */ /* AND POINTER VARIABLES. */ /* THE ONLY EXCEPTION TO THIS RULE IS FRAGRECPTR WHICH POINTS TO THE */ /* FRAGMENT RECORD. THIS IS MORE LESS STATIC ALWAYS DURING A SIGNAL */ /* EXECUTION. */ /* */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* GETDIRINDEX */ /* SUPPORT ROUTINE FOR INSERT ELEMENT, GET ELEMENT AND COMMITDELETE */ /* INPUT:FRAGRECPTR ( POINTER TO THE ACTIVE FRAGMENT REC) */ /* OPERATION_REC_PTR (POINTER TO THE OPERATION REC). */ /* */ /* OUTPUT:GDI_PAGEPTR ( POINTER TO THE PAGE OF THE ELEMENT) */ /* TGDI_PAGEINDEX ( INDEX OF THE ELEMENT IN THE PAGE). */ /* */ /* DESCRIPTION: CHECK THE HASH VALUE OF THE OPERATION REC AND CALCULATE THE */ /* THE ADDRESS OF THE ELEMENT IN THE HASH TABLE,(GDI_PAGEPTR, */ /* TGDI_PAGEINDEX) ACCORDING TO LH3. */ /* --------------------------------------------------------------------------------- */ Uint32 Dbacc::getPagePtr(DynArr256::Head& directory, Uint32 index) { DynArr256 dir(directoryPool, directory); Uint32* ptr = dir.get(index); return *ptr; } bool Dbacc::setPagePtr(DynArr256::Head& directory, Uint32 index, Uint32 ptri) { DynArr256 dir(directoryPool, directory); Uint32* ptr = dir.set(index); if (ptr == NULL) return false; *ptr = ptri; return true; } Uint32 Dbacc::unsetPagePtr(DynArr256::Head& directory, Uint32 index) { DynArr256 dir(directoryPool, directory); Uint32* ptr = dir.get(index); Uint32 ptri = *ptr; *ptr = RNIL; return ptri; } void Dbacc::getdirindex(Page8Ptr& pageptr, Uint32& conidx) { const LHBits32 hashValue = operationRecPtr.p->hashValue; const Uint32 address = fragrecptr.p->level.getBucketNumber(hashValue); conidx = fragrecptr.p->getPageIndex(address); pageptr.i = getPagePtr(fragrecptr.p->directory, fragrecptr.p->getPageNumber(address)); ptrCheckGuard(pageptr, cpagesize, page8); }//Dbacc::getdirindex() Uint32 Dbacc::readTablePk(Uint32 localkey1, Uint32 localkey2, Uint32 eh, Ptr<Operationrec> opPtr) { int ret; Uint32 tableId = fragrecptr.p->myTableId; Uint32 fragId = fragrecptr.p->myfid; bool xfrm = fragrecptr.p->hasCharAttr; #ifdef VM_TRACE memset(ckeys, 0x1f, (fragrecptr.p->keyLength * MAX_XFRM_MULTIPLY) << 2); #endif if (likely(! Local_key::isInvalid(localkey1, localkey2))) { ret = c_tup->accReadPk(tableId, fragId, localkey1, localkey2, ckeys, true); } else { ndbrequire(ElementHeader::getLocked(eh)); if (unlikely((opPtr.p->m_op_bits & Operationrec::OP_MASK) == ZSCAN_OP)) { dump_lock_queue(opPtr); ndbrequire(opPtr.p->nextParallelQue == RNIL); ndbrequire(opPtr.p->m_op_bits & Operationrec::OP_ELEMENT_DISAPPEARED); ndbrequire(opPtr.p->m_op_bits & Operationrec::OP_COMMIT_DELETE_CHECK); ndbrequire((opPtr.p->m_op_bits & Operationrec::OP_STATE_MASK) == Operationrec::OP_STATE_RUNNING); return 0; } ret = c_lqh->readPrimaryKeys(opPtr.p->userptr, ckeys, xfrm); } jamEntry(); ndbrequire(ret >= 0); return ret; } /** --------------------------------------------------------------------------- * Find element. * * Method scan the bucket given by hashValue from operationRecPtr and look for * the element with primary key given in signal. If element found return * pointer to element, if not found return only bucket information. * * @param[in] signal Signal containing primary key to look for. * @param[out] lockOwnerPtr Lock owner if any of found element. * @param[out] bucketPageptr Page of first container of bucket there element should be. * @param[out] bucketConidx Index within page of first container of bucket there element should be. * @param[out] elemPageptr Page of found element. * @param[out] elemConptr Pointer within page to container of found element. * @param[out] elemptr Pointer within page to found element. * @return Returns ZTRUE if element was found. * ------------------------------------------------------------------------- */ Uint32 Dbacc::getElement(const AccKeyReq* signal, OperationrecPtr& lockOwnerPtr, Page8Ptr& bucketPageptr, Uint32& bucketConidx, Page8Ptr& elemPageptr, Uint32& elemConptr, Uint32& elemptr) { Uint32 tgeElementHeader; Uint32 tgeElemStep; Uint32 tgePageindex; Uint32 tgeNextptrtype; register Uint32 tgeRemLen; const Uint32 TelemLen = fragrecptr.p->elementLength; register const Uint32* Tkeydata = signal->keyInfo; /* or localKey if keyLen == 0 */ const Uint32 localkeylen = fragrecptr.p->localkeylen; Uint32 bucket_number = fragrecptr.p->level.getBucketNumber(operationRecPtr.p->hashValue); getdirindex(bucketPageptr, bucketConidx); elemPageptr = bucketPageptr; tgePageindex = bucketConidx; /* * The value seached is * - table key for ACCKEYREQ, stored in TUP * - local key (1 word) for ACC_LOCKREQ and UNDO, stored in ACC */ const bool searchLocalKey = operationRecPtr.p->tupkeylen == 0; ndbrequire(TelemLen == ZELEM_HEAD_SIZE + localkeylen); tgeNextptrtype = ZLEFT; do { if (tgeNextptrtype == ZLEFT) { jam(); elemConptr = getForwardContainerPtr(tgePageindex); elemptr = elemConptr + Container::HEADER_SIZE; tgeElemStep = TelemLen; ndbrequire(elemConptr < 2048); ContainerHeader conhead(elemPageptr.p->word32[elemConptr]); tgeRemLen = conhead.getLength(); ndbrequire((elemConptr + tgeRemLen - 1) < 2048); } else if (tgeNextptrtype == ZRIGHT) { jam(); elemConptr = getBackwardContainerPtr(tgePageindex); tgeElemStep = 0 - TelemLen; elemptr = elemConptr - TelemLen; ndbrequire(elemConptr < 2048); ContainerHeader conhead(elemPageptr.p->word32[elemConptr]); tgeRemLen = conhead.getLength(); ndbrequire((elemConptr - tgeRemLen) < 2048); } else { ndbrequire((tgeNextptrtype == ZLEFT) || (tgeNextptrtype == ZRIGHT)); }//if if (tgeRemLen >= Container::HEADER_SIZE + TelemLen) { ndbrequire(tgeRemLen <= ZBUF_SIZE); /* ------------------------------------------------------------------- */ // There is at least one element in this container. // Check if it is the element searched for. /* ------------------------------------------------------------------- */ do { bool possible_match; tgeElementHeader = elemPageptr.p->word32[elemptr]; tgeRemLen = tgeRemLen - TelemLen; Local_key localkey; lockOwnerPtr.i = RNIL; lockOwnerPtr.p = NULL; LHBits16 reducedHashValue; if (ElementHeader::getLocked(tgeElementHeader)) { jam(); lockOwnerPtr.i = ElementHeader::getOpPtrI(tgeElementHeader); ptrCheckGuard(lockOwnerPtr, coprecsize, operationrec); possible_match = lockOwnerPtr.p->hashValue.match(operationRecPtr.p->hashValue); reducedHashValue = lockOwnerPtr.p->reducedHashValue; localkey = lockOwnerPtr.p->localdata; } else { jam(); reducedHashValue = ElementHeader::getReducedHashValue(tgeElementHeader); const Uint32 pos = elemptr + 1; ndbrequire(localkeylen == 1); localkey.m_page_no = elemPageptr.p->word32[pos]; localkey.m_page_idx = ElementHeader::getPageIdx(tgeElementHeader); possible_match = true; } if (possible_match && operationRecPtr.p->hashValue.match(fragrecptr.p->level.enlarge(reducedHashValue, bucket_number))) { jam(); bool found; if (! searchLocalKey) { Uint32 len = readTablePk(localkey.m_page_no, localkey.m_page_idx, tgeElementHeader, lockOwnerPtr); found = (len == operationRecPtr.p->xfrmtupkeylen) && (memcmp(Tkeydata, ckeys, len << 2) == 0); } else { jam(); found = (localkey.m_page_no == Tkeydata[0] && Uint32(localkey.m_page_idx) == Tkeydata[1]); } if (found) { jam(); operationRecPtr.p->localdata = localkey; return ZTRUE; } } if (tgeRemLen <= Container::HEADER_SIZE) { break; } elemptr = elemptr + tgeElemStep; } while (true); }//if ndbrequire(tgeRemLen == Container::HEADER_SIZE); ContainerHeader containerhead = elemPageptr.p->word32[elemConptr]; tgeNextptrtype = containerhead.getNextEnd(); if (tgeNextptrtype == 0) { jam(); return ZFALSE; /* NO MORE CONTAINER */ }//if tgePageindex = containerhead.getNextIndexNumber(); /* NEXT CONTAINER PAGE INDEX 7 BITS */ ndbrequire(tgePageindex <= Container::NO_CONTAINER_INDEX); if (!containerhead.isNextOnSamePage()) { jam(); elemPageptr.i = elemPageptr.p->word32[elemConptr + 1]; /* NEXT PAGE ID */ ptrCheckGuard(elemPageptr, cpagesize, page8); }//if } while (1); return ZFALSE; }//Dbacc::getElement() /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* */ /* END OF GET_ELEMENT MODULE */ /* */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* */ /* MODULE: DELETE */ /* */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* COMMITDELETE */ /* INPUT: OPERATION_REC_PTR, PTR TO AN OPERATION RECORD. */ /* FRAGRECPTR, PTR TO A FRAGMENT RECORD */ /* */ /* OUTPUT: */ /* NONE */ /* DESCRIPTION: DELETE OPERATIONS WILL BE COMPLETED AT THE * COMMIT OF TRANSACTION. THIS SUBROUTINE SEARCHS FOR ELEMENT AND * DELETES IT. IT DOES SO BY REPLACING IT WITH THE LAST * ELEMENT IN THE BUCKET. IF THE DELETED ELEMENT IS ALSO THE LAST * ELEMENT THEN IT IS ONLY NECESSARY TO REMOVE THE ELEMENT * ------------------------------------------------------------------------- */ void Dbacc::report_dealloc(Signal* signal, const Operationrec* opPtrP) { Local_key localKey = opPtrP->localdata; Uint32 opbits = opPtrP->m_op_bits; Uint32 userptr= opPtrP->userptr; Uint32 scanInd = ((opbits & Operationrec::OP_MASK) == ZSCAN_OP) || (opbits & Operationrec::OP_LOCK_REQ); if (! localKey.isInvalid()) { signal->theData[0] = fragrecptr.p->myfid; signal->theData[1] = fragrecptr.p->myTableId; signal->theData[2] = localKey.m_page_no; signal->theData[3] = localKey.m_page_idx; signal->theData[4] = userptr; signal->theData[5] = scanInd; EXECUTE_DIRECT(DBLQH, GSN_TUP_DEALLOCREQ, signal, 6); jamEntry(); } } void Dbacc::commitdelete(Signal* signal) { Page8Ptr lastPageptr; Page8Ptr lastPrevpageptr; bool lastIsforward; Uint32 tlastPageindex; Uint32 tlastElementptr; Uint32 tlastContainerptr; Uint32 tlastPrevconptr; Page8Ptr lastBucketPageptr; Uint32 lastBucketConidx; jam(); report_dealloc(signal, operationRecPtr.p); getdirindex(lastBucketPageptr, lastBucketConidx); lastPageptr = lastBucketPageptr; tlastPageindex = lastBucketConidx; lastIsforward = true; tlastContainerptr = getForwardContainerPtr(tlastPageindex); arrGuard(tlastContainerptr, 2048); lastPrevpageptr.i = RNIL; ptrNull(lastPrevpageptr); tlastPrevconptr = 0; /** * Position last on delete container before call to getLastAndRemove. */ Page8Ptr delPageptr; delPageptr.i = operationRecPtr.p->elementPage; ptrCheckGuard(delPageptr, cpagesize, page8); const Uint32 delConptr = operationRecPtr.p->elementContainer; while (lastPageptr.i != delPageptr.i || tlastContainerptr != delConptr) { lastPrevpageptr = lastPageptr; tlastPrevconptr = tlastContainerptr; ContainerHeader lasthead(lastPageptr.p->word32[tlastContainerptr]); ndbrequire(lasthead.haveNext()); if (!lasthead.isNextOnSamePage()) { lastPageptr.i = lastPageptr.p->word32[tlastContainerptr + 1]; ptrCheckGuard(lastPageptr, cpagesize, page8); } tlastPageindex = lasthead.getNextIndexNumber(); lastIsforward = lasthead.getNextEnd() == ZLEFT; tlastContainerptr = getContainerPtr(tlastPageindex, lastIsforward); } getLastAndRemove(lastPrevpageptr, tlastPrevconptr, lastPageptr, tlastPageindex, tlastContainerptr, lastIsforward, tlastElementptr); const Uint32 delElemptr = operationRecPtr.p->elementPointer; /* * If last element is in same container as delete element, and that container * have scans in progress, one must make sure the last element still have the * same scan state, or clear if it is the one deleted. * If last element is not in same container as delete element, that element * can not have any scans in progress, in that case the container scanbits * should have been fewer than delete containers which is not allowed for last. */ if ((lastPageptr.i == delPageptr.i) && (tlastContainerptr == delConptr)) { ContainerHeader conhead(delPageptr.p->word32[delConptr]); /** * If the deleted element was the only element in container * getLastAndRemove may have released the container already. * In that case header is still valid to read but it will * not be in use, but free. */ if (conhead.isInUse() && conhead.isScanInProgress()) { /** * Initialize scanInProgress with the active scans which have not * completly scanned the container. Then check which scan actually * currently scan the container. */ Uint16 scansInProgress = fragrecptr.p->activeScanMask & ~conhead.getScanBits(); scansInProgress = delPageptr.p->checkScans(scansInProgress, delConptr); for(int i = 0; scansInProgress != 0; i++, scansInProgress >>= 1) { /** * For each scan in progress in container, move the scan bit for * last element to the delete elements place. If it is the last * element that is deleted, the scan bit will be cleared by * moveScanBit. */ if ((scansInProgress & 1) != 0) { ScanRecPtr scanPtr; scanPtr.i = fragrecptr.p->scan[i]; ptrCheckGuard(scanPtr, cscanRecSize, scanRec); scanPtr.p->moveScanBit(delElemptr, tlastElementptr); } } } } else { /** * The last element which is to be moved into deleted elements place * are in different containers. * * Since both containers have the same scan bits that implies that there * are no scans in progress in the last elements container, otherwise * the delete container should have an extra scan bit set. */ #ifdef VM_TRACE ContainerHeader conhead(lastPageptr.p->word32[tlastContainerptr]); ndbassert(!conhead.isInUse() || !conhead.isScanInProgress()); conhead = ContainerHeader(delPageptr.p->word32[delConptr]); #else ContainerHeader conhead(delPageptr.p->word32[delConptr]); #endif if (conhead.isScanInProgress()) { /** * Initialize scanInProgress with the active scans which have not * completly scanned the container. Then check which scan actually * currently scan the container. */ Uint16 scansInProgress = fragrecptr.p->activeScanMask & ~conhead.getScanBits(); scansInProgress = delPageptr.p->checkScans(scansInProgress, delConptr); for(int i = 0; scansInProgress != 0; i++, scansInProgress >>= 1) { if ((scansInProgress & 1) != 0) { ScanRecPtr scanPtr; scanPtr.i = fragrecptr.p->scan[i]; ptrCheckGuard(scanPtr, cscanRecSize, scanRec); if(scanPtr.p->isScanned(delElemptr)) { scanPtr.p->clearScanned(delElemptr); } } } } } if (operationRecPtr.p->elementPage == lastPageptr.i) { if (operationRecPtr.p->elementPointer == tlastElementptr) { jam(); /* --------------------------------------------------------------------------------- */ /* THE LAST ELEMENT WAS THE ELEMENT TO BE DELETED. WE NEED NOT COPY IT. */ /* Setting it to an invalid value only for sanity, the value should never be read. */ /* --------------------------------------------------------------------------------- */ delPageptr.p->word32[delElemptr] = ElementHeader::setInvalid(); return; }//if }//if /* --------------------------------------------------------------------------------- */ /* THE DELETED ELEMENT IS NOT THE LAST. WE READ THE LAST ELEMENT AND OVERWRITE THE */ /* DELETED ELEMENT. */ /* --------------------------------------------------------------------------------- */ #if defined(VM_TRACE) || !defined(NDEBUG) delPageptr.p->word32[delElemptr] = ElementHeader::setInvalid(); #endif deleteElement(delPageptr, delConptr, delElemptr, lastPageptr, tlastElementptr); }//Dbacc::commitdelete() /** -------------------------------------------------------------------------- * Move last element over deleted element. * * And if moved element has an operation record update that with new element * location. * * @param[in] delPageptr Pointer to page of deleted element. * @param[in] delConptr Pointer within page to container of deleted element * @param[in] delElemptr Pointer within page to deleted element. * @param[in] lastPageptr Pointer to page of last element. * @param[in] lastElemptr Pointer within page to last element. * ------------------------------------------------------------------------- */ void Dbacc::deleteElement(Page8Ptr delPageptr, Uint32 delConptr, Uint32 delElemptr, Page8Ptr lastPageptr, Uint32 lastElemptr) const { OperationrecPtr deOperationRecPtr; if (lastElemptr >= 2048) goto deleteElement_index_error1; { const Uint32 tdeElemhead = lastPageptr.p->word32[lastElemptr]; ndbrequire(fragrecptr.p->elementLength == 2); ndbassert(!ElementHeader::isValid(delPageptr.p->word32[delElemptr])); delPageptr.p->word32[delElemptr] = lastPageptr.p->word32[lastElemptr]; delPageptr.p->word32[delElemptr + 1] = lastPageptr.p->word32[lastElemptr + 1]; if (ElementHeader::getLocked(tdeElemhead)) { /* --------------------------------------------------------------------------------- */ /* THE LAST ELEMENT IS LOCKED AND IS THUS REFERENCED BY AN OPERATION RECORD. WE NEED */ /* TO UPDATE THE OPERATION RECORD WITH THE NEW REFERENCE TO THE ELEMENT. */ /* --------------------------------------------------------------------------------- */ deOperationRecPtr.i = ElementHeader::getOpPtrI(tdeElemhead); ptrCheckGuard(deOperationRecPtr, coprecsize, operationrec); deOperationRecPtr.p->elementPage = delPageptr.i; deOperationRecPtr.p->elementContainer = delConptr; deOperationRecPtr.p->elementPointer = delElemptr; /* Writing an invalid value only for sanity, the value should never be read. */ lastPageptr.p->word32[lastElemptr] = ElementHeader::setInvalid(); }//if return; } deleteElement_index_error1: arrGuard(lastElemptr, 2048); return; }//Dbacc::deleteElement() /** --------------------------------------------------------------------------- * Find last element in bucket. * * Shrink container of last element, but keep element words intact. If * container became empty and is not the first container in bucket, unlink it * from previous container. * * @param[in] lastPrevpageptr Page of previous container, if any. * @param[in] tlastPrevconptr Pointer within page of previous container * @param[in,out] lastPageptr Page of first container to search, and on * return the last container. * @param[in,out] tlastPageindex Index of container within first page to * search, and on return the last container. * @param[in,out] tlastContainerptr Pointer within page to first container to * search, and on return the last container. * @param[in,out] lastIsforward Direction of first container to search, * and on return the last container. * @param[out] tlastElementptr On return the pointer within page to last * element. * ------------------------------------------------------------------------ */ void Dbacc::getLastAndRemove(Page8Ptr lastPrevpageptr, Uint32 tlastPrevconptr, Page8Ptr& lastPageptr, Uint32& tlastPageindex, Uint32& tlastContainerptr, bool& lastIsforward, Uint32& tlastElementptr) { /** * Should find the last container with same scanbits as the first. */ ContainerHeader containerhead(lastPageptr.p->word32[tlastContainerptr]); Uint32 tlastContainerlen = containerhead.getLength(); /** * getLastAndRemove are always called prior delete of element in first * container, and that can not be empty. */ ndbassert(tlastContainerlen != Container::HEADER_SIZE); const Uint16 activeScanMask = fragrecptr.p->activeScanMask; const Uint16 conScanMask = containerhead.getScanBits(); while (containerhead.getNextEnd() != 0) { jam(); Uint32 nextIndex = containerhead.getNextIndexNumber(); Uint32 nextEnd = containerhead.getNextEnd(); bool nextOnSamePage = containerhead.isNextOnSamePage(); Page8Ptr nextPage; if (nextOnSamePage) { nextPage = lastPageptr; } else { jam(); nextPage.i = lastPageptr.p->word32[tlastContainerptr + 1]; ptrCheckGuard(nextPage, cpagesize, page8); } const bool nextIsforward = nextEnd == ZLEFT; const Uint32 nextConptr = getContainerPtr(nextIndex, nextIsforward); const ContainerHeader nextHead(nextPage.p->word32[nextConptr]); const Uint16 nextScanMask = nextHead.getScanBits(); if (((conScanMask ^ nextScanMask) & activeScanMask) != 0) { /** * Next container have different active scan bits, * current container is the last one with wanted scan bits. * Stop searching! */ ndbassert(((nextScanMask & ~conScanMask) & activeScanMask) == 0); break; } lastPrevpageptr.i = lastPageptr.i; lastPrevpageptr.p = lastPageptr.p; tlastPrevconptr = tlastContainerptr; tlastPageindex = nextIndex; if (!nextOnSamePage) { lastPageptr = nextPage; } lastIsforward = nextIsforward; tlastContainerptr = nextConptr; containerhead = lastPageptr.p->word32[tlastContainerptr]; tlastContainerlen = containerhead.getLength(); ndbassert(tlastContainerlen >= ((Uint32)Container::HEADER_SIZE + fragrecptr.p->elementLength)); } /** * Last container found. */ tlastContainerlen = tlastContainerlen - fragrecptr.p->elementLength; if (lastIsforward) { jam(); tlastElementptr = tlastContainerptr + tlastContainerlen; } else { jam(); tlastElementptr = (tlastContainerptr + (Container::HEADER_SIZE - fragrecptr.p->elementLength)) - tlastContainerlen; }//if if (containerhead.isUsingBothEnds()) { /* --------------------------------------------------------------------------------- */ /* WE HAVE OWNERSHIP OF BOTH PARTS OF THE CONTAINER ENDS. */ /* --------------------------------------------------------------------------------- */ if (tlastContainerlen < Container::DOWN_LIMIT) { /* --------------------------------------------------------------------------------- */ /* WE HAVE DECREASED THE SIZE BELOW THE DOWN LIMIT, WE MUST GIVE UP THE OTHER */ /* SIDE OF THE BUFFER. */ /* --------------------------------------------------------------------------------- */ containerhead.clearUsingBothEnds(); if (lastIsforward) { jam(); Uint32 relconptr = tlastContainerptr + (ZBUF_SIZE - Container::HEADER_SIZE); releaseRightlist(lastPageptr, tlastPageindex, relconptr); } else { jam(); Uint32 relconptr = tlastContainerptr - (ZBUF_SIZE - Container::HEADER_SIZE); releaseLeftlist(lastPageptr, tlastPageindex, relconptr); }//if }//if }//if if (tlastContainerlen <= Container::HEADER_SIZE) { ndbrequire(tlastContainerlen == Container::HEADER_SIZE); if (lastPrevpageptr.i != RNIL) { jam(); /* --------------------------------------------------------------------------------- */ /* THE LAST CONTAINER IS EMPTY AND IS NOT THE FIRST CONTAINER WHICH IS NOT REMOVED. */ /* DELETE THE LAST CONTAINER AND UPDATE THE PREVIOUS CONTAINER. ALSO PUT THIS */ /* CONTAINER IN FREE CONTAINER LIST OF THE PAGE. */ /* --------------------------------------------------------------------------------- */ ndbrequire(tlastPrevconptr < 2048); ContainerHeader prevConhead(lastPrevpageptr.p->word32[tlastPrevconptr]); ndbrequire(containerhead.isInUse()); if (!containerhead.haveNext()) { Uint32 tglrTmp = prevConhead.clearNext(); dbgWord32(lastPrevpageptr, tlastPrevconptr, tglrTmp); lastPrevpageptr.p->word32[tlastPrevconptr] = tglrTmp; } else { Uint32 nextPagei = (containerhead.isNextOnSamePage() ? lastPageptr.i : lastPageptr.p->word32[tlastContainerptr+1]); Uint32 tglrTmp = prevConhead.setNext(containerhead.getNextEnd(), containerhead.getNextIndexNumber(), (nextPagei == lastPrevpageptr.i)); dbgWord32(lastPrevpageptr, tlastPrevconptr, tglrTmp); lastPrevpageptr.p->word32[tlastPrevconptr] = tglrTmp; lastPrevpageptr.p->word32[tlastPrevconptr+1] = nextPagei; } /** * Any scans currently scanning the last container must be evicted from * container since it is about to be deleted. Scans will look for next * unscanned container at next call to getScanElement. */ if (containerhead.isScanInProgress()) { Uint16 scansInProgress = fragrecptr.p->activeScanMask & ~containerhead.getScanBits(); scansInProgress = lastPageptr.p->checkScans(scansInProgress, tlastContainerptr); Uint16 scanbit = 1; for(int i = 0 ; scansInProgress != 0 ; i++, scansInProgress>>=1, scanbit<<=1) { if ((scansInProgress & 1) != 0) { ScanRecPtr scanPtr; scanPtr.i = fragrecptr.p->scan[i]; ptrCheckGuard(scanPtr, cscanRecSize, scanRec); scanPtr.p->leaveContainer(lastPageptr.i, tlastContainerptr); lastPageptr.p->clearScanContainer(scanbit, tlastContainerptr); } } /** * All scans in progress for container are now canceled. * No need to call clearScanInProgress for container header since * container is about to be released anyway. */ } if (lastIsforward) { jam(); releaseLeftlist(lastPageptr, tlastPageindex, tlastContainerptr); } else { jam(); releaseRightlist(lastPageptr, tlastPageindex, tlastContainerptr); }//if return; }//if }//if containerhead.setLength(tlastContainerlen); dbgWord32(lastPageptr, tlastContainerptr, containerhead); arrGuard(tlastContainerptr, 2048); lastPageptr.p->word32[tlastContainerptr] = containerhead; }//Dbacc::getLastAndRemove() /* --------------------------------------------------------------------------------- */ /* RELEASE_LEFTLIST */ /* INPUT: */ /* RL_PAGEPTR PAGE POINTER OF CONTAINER TO BE RELEASED */ /* TRL_PAGEINDEX PAGE INDEX OF CONTAINER TO BE RELEASED */ /* TURL_INDEX INDEX OF CONTAINER TO BE RELEASED */ /* TRL_REL_CON TRUE IF CONTAINER RELEASED OTHERWISE ONLY */ /* A PART IS RELEASED. */ /* */ /* OUTPUT: */ /* NONE */ /* */ /* THE FREE LIST OF LEFT FREE BUFFER IN THE PAGE WILL BE UPDATE */ /* TULL_INDEX IS INDEX TO THE FIRST WORD IN THE LEFT SIDE OF THE BUFFER */ /* --------------------------------------------------------------------------------- */ void Dbacc::releaseLeftlist(Page8Ptr pageptr, Uint32 conidx, Uint32 conptr) { Uint32 tullTmp; Uint32 tullTmp1; dbgWord32(pageptr, conptr + 1, Container::NO_CONTAINER_INDEX); arrGuard(conptr + 1, 2048); pageptr.p->word32[conptr + 1] = Container::NO_CONTAINER_INDEX; tullTmp1 = (pageptr.p->word32[ZPOS_EMPTY_LIST] >> 7) & 0x7f; dbgWord32(pageptr, conptr, tullTmp1); arrGuard(conptr, 2048); pageptr.p->word32[conptr] = tullTmp1; if (tullTmp1 <= Container::MAX_CONTAINER_INDEX) { jam(); tullTmp1 = getForwardContainerPtr(tullTmp1) + 1; dbgWord32(pageptr, tullTmp1, conidx); /* UPDATES PREV POINTER IN THE NEXT FREE */ pageptr.p->word32[tullTmp1] = conidx; } else { ndbrequire(tullTmp1 == Container::NO_CONTAINER_INDEX); }//if tullTmp = pageptr.p->word32[ZPOS_EMPTY_LIST]; tullTmp = (((tullTmp >> 14) << 14) | (conidx << 7)) | (tullTmp & 0x7f); dbgWord32(pageptr, ZPOS_EMPTY_LIST, tullTmp); pageptr.p->word32[ZPOS_EMPTY_LIST] = tullTmp; dbgWord32(pageptr, ZPOS_ALLOC_CONTAINERS, pageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1); pageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = pageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1; ndbrequire(pageptr.p->word32[ZPOS_ALLOC_CONTAINERS] <= ZNIL); if (((pageptr.p->word32[ZPOS_EMPTY_LIST] >> ZPOS_PAGE_TYPE_BIT) & 3) == 1) { jam(); ptrCheck(pageptr, cpagesize, page8); checkoverfreelist(pageptr); }//if }//Dbacc::releaseLeftlist() /* --------------------------------------------------------------------------------- */ /* RELEASE_RIGHTLIST */ /* INPUT: */ /* RL_PAGEPTR PAGE POINTER OF CONTAINER TO BE RELEASED */ /* TRL_PAGEINDEX PAGE INDEX OF CONTAINER TO BE RELEASED */ /* TURL_INDEX INDEX OF CONTAINER TO BE RELEASED */ /* TRL_REL_CON TRUE IF CONTAINER RELEASED OTHERWISE ONLY */ /* A PART IS RELEASED. */ /* */ /* OUTPUT: */ /* NONE */ /* */ /* THE FREE LIST OF RIGHT FREE BUFFER IN THE PAGE WILL BE UPDATE. */ /* TURL_INDEX IS INDEX TO THE FIRST WORD IN THE RIGHT SIDE OF */ /* THE BUFFER, WHICH IS THE LAST WORD IN THE BUFFER. */ /* --------------------------------------------------------------------------------- */ void Dbacc::releaseRightlist(Page8Ptr pageptr, Uint32 conidx, Uint32 conptr) { Uint32 turlTmp1; Uint32 turlTmp; dbgWord32(pageptr, conptr + 1, Container::NO_CONTAINER_INDEX); arrGuard(conptr + 1, 2048); pageptr.p->word32[conptr + 1] = Container::NO_CONTAINER_INDEX; turlTmp1 = pageptr.p->word32[ZPOS_EMPTY_LIST] & 0x7f; dbgWord32(pageptr, conptr, turlTmp1); arrGuard(conptr, 2048); pageptr.p->word32[conptr] = turlTmp1; if (turlTmp1 <= Container::MAX_CONTAINER_INDEX) { jam(); turlTmp = getBackwardContainerPtr(turlTmp1) + 1; dbgWord32(pageptr, turlTmp, conidx); /* UPDATES PREV POINTER IN THE NEXT FREE */ pageptr.p->word32[turlTmp] = conidx; } else { ndbrequire(turlTmp1 == Container::NO_CONTAINER_INDEX); }//if turlTmp = pageptr.p->word32[ZPOS_EMPTY_LIST]; dbgWord32(pageptr, ZPOS_EMPTY_LIST, ((turlTmp >> 7) << 7) | conidx); pageptr.p->word32[ZPOS_EMPTY_LIST] = ((turlTmp >> 7) << 7) | conidx; dbgWord32(pageptr, ZPOS_ALLOC_CONTAINERS, pageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1); pageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = pageptr.p->word32[ZPOS_ALLOC_CONTAINERS] - 1; ndbrequire(pageptr.p->word32[ZPOS_ALLOC_CONTAINERS] <= ZNIL); if (((pageptr.p->word32[ZPOS_EMPTY_LIST] >> ZPOS_PAGE_TYPE_BIT) & 3) == 1) { jam(); checkoverfreelist(pageptr); }//if }//Dbacc::releaseRightlist() /* --------------------------------------------------------------------------------- */ /* CHECKOVERFREELIST */ /* INPUT: COL_PAGEPTR, POINTER OF AN OVERFLOW PAGE RECORD. */ /* DESCRIPTION: CHECKS IF THE PAGE HAVE TO PUT IN FREE LIST OF OVER FLOW */ /* PAGES. WHEN IT HAVE TO, AN OVERFLOW REC PTR WILL BE ALLOCATED */ /* TO KEEP NFORMATION ABOUT THE PAGE. */ /* --------------------------------------------------------------------------------- */ void Dbacc::checkoverfreelist(Page8Ptr colPageptr) { Uint32 tcolTmp; // always an overflow page tcolTmp = colPageptr.p->word32[ZPOS_ALLOC_CONTAINERS]; if (tcolTmp == 0) // Just got empty { jam(); releaseOverpage(colPageptr); } else if (tcolTmp == ZFREE_LIMIT) // Just got sparse { jam(); LocalContainerPageList fulllist(*this, fragrecptr.p->fullpages); LocalContainerPageList sparselist(*this, fragrecptr.p->sparsepages); fulllist.remove(colPageptr); sparselist.addFirst(colPageptr); }//if }//Dbacc::checkoverfreelist() /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* */ /* END OF DELETE MODULE */ /* */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* */ /* COMMIT AND ABORT MODULE */ /* */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ------------------------------------------------------------------------- */ /* ABORT_OPERATION */ /*DESCRIPTION: AN OPERATION RECORD CAN BE IN A LOCK QUEUE OF AN ELEMENT OR */ /*OWNS THE LOCK. BY THIS SUBROUTINE THE LOCK STATE OF THE OPERATION WILL */ /*BE CHECKED. THE OPERATION RECORD WILL BE REMOVED FROM THE QUEUE IF IT */ /*BELONGED TO ANY ONE, OTHERWISE THE ELEMENT HEAD WILL BE UPDATED. */ /* ------------------------------------------------------------------------- */ /** * * P0 - P1 - P2 - P3 * S0 * S1 * S2 */ void Dbacc::abortParallelQueueOperation(Signal* signal, OperationrecPtr opPtr) { jam(); OperationrecPtr nextP; OperationrecPtr prevP; OperationrecPtr loPtr; Uint32 opbits = opPtr.p->m_op_bits; Uint32 opstate = opbits & Operationrec::OP_STATE_MASK; nextP.i = opPtr.p->nextParallelQue; prevP.i = opPtr.p->prevParallelQue; loPtr.i = opPtr.p->m_lock_owner_ptr_i; ndbassert(! (opbits & Operationrec::OP_LOCK_OWNER)); ndbassert(opbits & Operationrec::OP_RUN_QUEUE); ptrCheckGuard(prevP, coprecsize, operationrec); ndbassert(prevP.p->nextParallelQue == opPtr.i); prevP.p->nextParallelQue = nextP.i; if (nextP.i != RNIL) { ptrCheckGuard(nextP, coprecsize, operationrec); ndbassert(nextP.p->prevParallelQue == opPtr.i); nextP.p->prevParallelQue = prevP.i; } else if (prevP.i != loPtr.i) { jam(); ptrCheckGuard(loPtr, coprecsize, operationrec); ndbassert(loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER); ndbassert(loPtr.p->m_lo_last_parallel_op_ptr_i == opPtr.i); loPtr.p->m_lo_last_parallel_op_ptr_i = prevP.i; prevP.p->m_lock_owner_ptr_i = loPtr.i; /** * Abort P3...check start next */ startNext(signal, prevP); validate_lock_queue(prevP); return; } else { jam(); /** * P0 - P1 * * Abort P1, check start next */ ndbassert(prevP.p->m_op_bits & Operationrec::OP_LOCK_OWNER); prevP.p->m_lo_last_parallel_op_ptr_i = RNIL; startNext(signal, prevP); validate_lock_queue(prevP); return; } /** * Abort P1/P2 */ if (opbits & Operationrec::OP_LOCK_MODE) { Uint32 nextbits = nextP.p->m_op_bits; while ((nextbits & Operationrec::OP_LOCK_MODE) == 0) { ndbassert(nextbits & Operationrec::OP_ACC_LOCK_MODE); nextbits &= ~(Uint32)Operationrec::OP_ACC_LOCK_MODE; nextP.p->m_op_bits = nextbits; if (nextP.p->nextParallelQue != RNIL) { nextP.i = nextP.p->nextParallelQue; ptrCheckGuard(nextP, coprecsize, operationrec); nextbits = nextP.p->m_op_bits; } else { break; } } } /** * Abort P1, P2 */ if (opstate == Operationrec::OP_STATE_RUNNING) { jam(); startNext(signal, prevP); validate_lock_queue(prevP); return; } ndbassert(opstate == Operationrec::OP_STATE_EXECUTED || opstate == Operationrec::OP_STATE_WAITING); /** * Scan to last of run queue */ while (nextP.p->nextParallelQue != RNIL) { jam(); nextP.i = nextP.p->nextParallelQue; ptrCheckGuard(nextP, coprecsize, operationrec); } #ifdef VM_TRACE loPtr.i = nextP.p->m_lock_owner_ptr_i; ptrCheckGuard(loPtr, coprecsize, operationrec); ndbassert(loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER); ndbassert(loPtr.p->m_lo_last_parallel_op_ptr_i == nextP.i); #endif startNext(signal, nextP); validate_lock_queue(nextP); return; } void Dbacc::abortSerieQueueOperation(Signal* signal, OperationrecPtr opPtr) { jam(); OperationrecPtr prevS, nextS; OperationrecPtr prevP, nextP; OperationrecPtr loPtr; Uint32 opbits = opPtr.p->m_op_bits; prevS.i = opPtr.p->prevSerialQue; nextS.i = opPtr.p->nextSerialQue; prevP.i = opPtr.p->prevParallelQue; nextP.i = opPtr.p->nextParallelQue; ndbassert((opbits & Operationrec::OP_LOCK_OWNER) == 0); ndbassert((opbits & Operationrec::OP_RUN_QUEUE) == 0); { FragmentrecPtr frp; frp.i = opPtr.p->fragptr; ptrCheckGuard(frp, cfragmentsize, fragmentrec); frp.p->m_lockStats.wait_fail((opbits & Operationrec::OP_LOCK_MODE) != ZREADLOCK, opPtr.p->m_lockTime, getHighResTimer()); } if (prevP.i != RNIL) { /** * We're not list head... */ ptrCheckGuard(prevP, coprecsize, operationrec); ndbassert(prevP.p->nextParallelQue == opPtr.i); prevP.p->nextParallelQue = nextP.i; if (nextP.i != RNIL) { ptrCheckGuard(nextP, coprecsize, operationrec); ndbassert(nextP.p->prevParallelQue == opPtr.i); ndbassert((nextP.p->m_op_bits & Operationrec::OP_STATE_MASK) == Operationrec::OP_STATE_WAITING); nextP.p->prevParallelQue = prevP.i; if ((prevP.p->m_op_bits & Operationrec::OP_ACC_LOCK_MODE) == 0 && opbits & Operationrec::OP_LOCK_MODE) { /** * Scan right in parallel queue to fix OP_ACC_LOCK_MODE */ while ((nextP.p->m_op_bits & Operationrec::OP_LOCK_MODE) == 0) { ndbassert(nextP.p->m_op_bits & Operationrec::OP_ACC_LOCK_MODE); nextP.p->m_op_bits &= ~(Uint32)Operationrec::OP_ACC_LOCK_MODE; nextP.i = nextP.p->nextParallelQue; if (nextP.i == RNIL) break; ptrCheckGuard(nextP, coprecsize, operationrec); } } } validate_lock_queue(prevP); return; } else { /** * We're a list head */ ptrCheckGuard(prevS, coprecsize, operationrec); ndbassert(prevS.p->nextSerialQue == opPtr.i); if (nextP.i != RNIL) { /** * Promote nextP to list head */ ptrCheckGuard(nextP, coprecsize, operationrec); ndbassert(nextP.p->prevParallelQue == opPtr.i); prevS.p->nextSerialQue = nextP.i; nextP.p->prevParallelQue = RNIL; nextP.p->nextSerialQue = nextS.i; if (nextS.i != RNIL) { jam(); ptrCheckGuard(nextS, coprecsize, operationrec); ndbassert(nextS.p->prevSerialQue == opPtr.i); nextS.p->prevSerialQue = nextP.i; validate_lock_queue(prevS); return; } else { // nextS is RNIL, i.e we're last in serie queue... // we must update lockOwner.m_lo_last_serial_op_ptr_i loPtr = prevS; while ((loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0) { loPtr.i = loPtr.p->prevSerialQue; ptrCheckGuard(loPtr, coprecsize, operationrec); } ndbassert(loPtr.p->m_lo_last_serial_op_ptr_i == opPtr.i); loPtr.p->m_lo_last_serial_op_ptr_i = nextP.i; validate_lock_queue(loPtr); return; } } if (nextS.i == RNIL) { /** * Abort S2 */ // nextS is RNIL, i.e we're last in serie queue... // and we have no parallel queue, // we must update lockOwner.m_lo_last_serial_op_ptr_i prevS.p->nextSerialQue = RNIL; loPtr = prevS; while ((loPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) == 0) { loPtr.i = loPtr.p->prevSerialQue; ptrCheckGuard(loPtr, coprecsize, operationrec); } ndbassert(loPtr.p->m_lo_last_serial_op_ptr_i == opPtr.i); if (prevS.i != loPtr.i) { jam(); loPtr.p->m_lo_last_serial_op_ptr_i = prevS.i; } else { loPtr.p->m_lo_last_serial_op_ptr_i = RNIL; } validate_lock_queue(loPtr); } else if (nextP.i == RNIL) { ptrCheckGuard(nextS, coprecsize, operationrec); ndbassert(nextS.p->prevSerialQue == opPtr.i); prevS.p->nextSerialQue = nextS.i; nextS.p->prevSerialQue = prevS.i; if (prevS.p->m_op_bits & Operationrec::OP_LOCK_OWNER) { /** * Abort S0 */ OperationrecPtr lastOp; lastOp.i = prevS.p->m_lo_last_parallel_op_ptr_i; if (lastOp.i != RNIL) { jam(); ptrCheckGuard(lastOp, coprecsize, operationrec); ndbassert(lastOp.p->m_lock_owner_ptr_i == prevS.i); } else { jam(); lastOp = prevS; } startNext(signal, lastOp); validate_lock_queue(lastOp); } else { validate_lock_queue(prevS); } } } } void Dbacc::abortOperation(Signal* signal) { Uint32 opbits = operationRecPtr.p->m_op_bits; validate_lock_queue(operationRecPtr); if (opbits & Operationrec::OP_LOCK_OWNER) { takeOutLockOwnersList(operationRecPtr); opbits &= ~(Uint32)Operationrec::OP_LOCK_OWNER; if (opbits & Operationrec::OP_INSERT_IS_DONE) { jam(); opbits |= Operationrec::OP_ELEMENT_DISAPPEARED; }//if operationRecPtr.p->m_op_bits = opbits; const bool queue = (operationRecPtr.p->nextParallelQue != RNIL || operationRecPtr.p->nextSerialQue != RNIL); if (queue) { jam(); release_lockowner(signal, operationRecPtr, false); } else { /* ------------------------------------------------------------------- * WE ARE OWNER OF THE LOCK AND NO OTHER OPERATIONS ARE QUEUED. * IF INSERT OR STANDBY WE DELETE THE ELEMENT OTHERWISE WE REMOVE * THE LOCK FROM THE ELEMENT. * ------------------------------------------------------------------ */ if ((opbits & Operationrec::OP_ELEMENT_DISAPPEARED) == 0) { jam(); Page8Ptr aboPageidptr; Uint32 taboElementptr; Uint32 tmp2Olq; taboElementptr = operationRecPtr.p->elementPointer; aboPageidptr.i = operationRecPtr.p->elementPage; ndbassert(!operationRecPtr.p->localdata.isInvalid()); tmp2Olq = ElementHeader::setUnlocked( operationRecPtr.p->localdata.m_page_idx, operationRecPtr.p->reducedHashValue); ptrCheckGuard(aboPageidptr, cpagesize, page8); dbgWord32(aboPageidptr, taboElementptr, tmp2Olq); arrGuard(taboElementptr, 2048); aboPageidptr.p->word32[taboElementptr] = tmp2Olq; return; } else { jam(); commitdelete(signal); }//if }//if } else if (opbits & Operationrec::OP_RUN_QUEUE) { abortParallelQueueOperation(signal, operationRecPtr); } else { abortSerieQueueOperation(signal, operationRecPtr); } } void Dbacc::commitDeleteCheck()const { OperationrecPtr opPtr; OperationrecPtr lastOpPtr; OperationrecPtr deleteOpPtr; Uint32 elementDeleted = 0; bool deleteCheckOngoing = true; LHBits32 hashValue; lastOpPtr = operationRecPtr; opPtr.i = operationRecPtr.p->nextParallelQue; while (opPtr.i != RNIL) { jam(); ptrCheckGuard(opPtr, coprecsize, operationrec); lastOpPtr = opPtr; opPtr.i = opPtr.p->nextParallelQue; }//while deleteOpPtr = lastOpPtr; do { Uint32 opbits = deleteOpPtr.p->m_op_bits; Uint32 op = opbits & Operationrec::OP_MASK; if (op == ZDELETE) { jam(); /* ------------------------------------------------------------------- * IF THE CURRENT OPERATION TO BE COMMITTED IS A DELETE OPERATION DUE TO * A SCAN-TAKEOVER THE ACTUAL DELETE WILL BE PERFORMED BY THE PREVIOUS * OPERATION (SCAN) IN THE PARALLEL QUEUE WHICH OWNS THE LOCK. * THE PROBLEM IS THAT THE SCAN OPERATION DOES NOT HAVE A HASH VALUE * ASSIGNED TO IT SO WE COPY IT FROM THIS OPERATION. * * WE ASSUME THAT THIS SOLUTION WILL WORK BECAUSE THE ONLY WAY A * SCAN CAN PERFORM A DELETE IS BY BEING FOLLOWED BY A NORMAL * DELETE-OPERATION THAT HAS A HASH VALUE. * ----------------------------------------------------------------- */ hashValue = deleteOpPtr.p->hashValue; elementDeleted = Operationrec::OP_ELEMENT_DISAPPEARED; deleteCheckOngoing = false; } else if (op == ZREAD || op == ZSCAN_OP) { /* ------------------------------------------------------------------- * We are trying to find out whether the commit will in the end delete * the tuple. Normally the delete will be the last operation in the * list of operations on this. It is however possible to issue reads * and scans in the same savepoint as the delete operation was issued * and these can end up after the delete in the list of operations * in the parallel queue. Thus if we discover a read or a scan * we have to continue scanning the list looking for a delete operation. */ deleteOpPtr.i = deleteOpPtr.p->prevParallelQue; if (opbits & Operationrec::OP_LOCK_OWNER) { jam(); deleteCheckOngoing = false; } else { jam(); ptrCheckGuard(deleteOpPtr, coprecsize, operationrec); }//if } else { jam(); /* ------------------------------------------------------------------ */ /* Finding an UPDATE or INSERT before finding a DELETE * means we cannot be deleting as the end result of this transaction. */ deleteCheckOngoing = false; }//if } while (deleteCheckOngoing); opPtr = lastOpPtr; do { jam(); opPtr.p->m_op_bits |= Operationrec::OP_COMMIT_DELETE_CHECK; if (elementDeleted) { jam(); opPtr.p->m_op_bits |= elementDeleted; opPtr.p->hashValue = hashValue; }//if opPtr.i = opPtr.p->prevParallelQue; if (opPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER) { jam(); break; }//if ptrCheckGuard(opPtr, coprecsize, operationrec); } while (true); }//Dbacc::commitDeleteCheck() /* ------------------------------------------------------------------------- */ /* COMMIT_OPERATION */ /* INPUT: OPERATION_REC_PTR, POINTER TO AN OPERATION RECORD */ /* DESCRIPTION: THE OPERATION RECORD WILL BE TAKE OUT OF ANY LOCK QUEUE. */ /* IF IT OWNS THE ELEMENT LOCK. HEAD OF THE ELEMENT WILL BE UPDATED. */ /* ------------------------------------------------------------------------- */ void Dbacc::commitOperation(Signal* signal) { validate_lock_queue(operationRecPtr); Uint32 opbits = operationRecPtr.p->m_op_bits; Uint32 op = opbits & Operationrec::OP_MASK; ndbrequire((opbits & Operationrec::OP_STATE_MASK) == Operationrec::OP_STATE_EXECUTED); if ((opbits & Operationrec::OP_COMMIT_DELETE_CHECK) == 0 && (op != ZREAD && op != ZSCAN_OP)) { jam(); /* This method is used to check whether the end result of the transaction will be to delete the tuple. In this case all operation will be marked with elementIsDisappeared = true to ensure that the last operation committed will remove the tuple. We only run this once per transaction (commitDeleteCheckFlag = true if performed earlier) and we don't execute this code when committing a scan operation since committing a scan operation only means that the scan is continuing and the scan lock is released. */ commitDeleteCheck(); opbits = operationRecPtr.p->m_op_bits; }//if ndbassert(opbits & Operationrec::OP_RUN_QUEUE); if (opbits & Operationrec::OP_LOCK_OWNER) { takeOutLockOwnersList(operationRecPtr); opbits &= ~(Uint32)Operationrec::OP_LOCK_OWNER; operationRecPtr.p->m_op_bits = opbits; const bool queue = (operationRecPtr.p->nextParallelQue != RNIL || operationRecPtr.p->nextSerialQue != RNIL); if (!queue && (opbits & Operationrec::OP_ELEMENT_DISAPPEARED) == 0) { /* * This is the normal path through the commit for operations owning the * lock without any queues and not a delete operation. */ Page8Ptr coPageidptr; Uint32 tcoElementptr; Uint32 tmp2Olq; coPageidptr.i = operationRecPtr.p->elementPage; tcoElementptr = operationRecPtr.p->elementPointer; ndbassert(!operationRecPtr.p->localdata.isInvalid()); tmp2Olq = ElementHeader::setUnlocked( operationRecPtr.p->localdata.m_page_idx, operationRecPtr.p->reducedHashValue); ptrCheckGuard(coPageidptr, cpagesize, page8); dbgWord32(coPageidptr, tcoElementptr, tmp2Olq); arrGuard(tcoElementptr, 2048); coPageidptr.p->word32[tcoElementptr] = tmp2Olq; return; } else if (queue) { jam(); /* * The case when there is a queue lined up. * Release the lock and pass it to the next operation lined up. */ release_lockowner(signal, operationRecPtr, true); return; } else { jam(); /* * No queue and elementIsDisappeared is true. * We perform the actual delete operation. */ commitdelete(signal); return; }//if } else { /** * THE OPERATION DOES NOT OWN THE LOCK. IT MUST BE IN A LOCK QUEUE OF THE * ELEMENT. */ jam(); OperationrecPtr prev, next, lockOwner; prev.i = operationRecPtr.p->prevParallelQue; next.i = operationRecPtr.p->nextParallelQue; lockOwner.i = operationRecPtr.p->m_lock_owner_ptr_i; ptrCheckGuard(prev, coprecsize, operationrec); prev.p->nextParallelQue = next.i; if (next.i != RNIL) { jam(); ptrCheckGuard(next, coprecsize, operationrec); next.p->prevParallelQue = prev.i; } else if (prev.p->m_op_bits & Operationrec::OP_LOCK_OWNER) { jam(); ndbassert(lockOwner.i == prev.i); prev.p->m_lo_last_parallel_op_ptr_i = RNIL; next = prev; } else { jam(); /** * Last operation in parallell queue */ ndbassert(prev.i != lockOwner.i); ptrCheckGuard(lockOwner, coprecsize, operationrec); ndbassert(lockOwner.p->m_op_bits & Operationrec::OP_LOCK_OWNER); lockOwner.p->m_lo_last_parallel_op_ptr_i = prev.i; prev.p->m_lock_owner_ptr_i = lockOwner.i; next = prev; } /** * Check possible lock upgrade */ if(opbits & Operationrec::OP_ACC_LOCK_MODE) { jam(); /** * Not lock owner...committing a exclusive operation... * * e.g * T1(R) T1(X) * T2(R/X) * * If T1(X) commits T2(R/X) is not supposed to run * as T1(R) should also commit * * e.g * T1(R) T1(X) T1*(R) * T2(R/X) * * If T1*(R) commits T2(R/X) is not supposed to run * as T1(R),T2(x) should also commit */ validate_lock_queue(prev); return; } /** * We committed a shared lock * Check if we can start next... */ while(next.p->nextParallelQue != RNIL) { jam(); next.i = next.p->nextParallelQue; ptrCheckGuard(next, coprecsize, operationrec); if ((next.p->m_op_bits & Operationrec::OP_STATE_MASK) != Operationrec::OP_STATE_EXECUTED) { jam(); return; } } startNext(signal, next); validate_lock_queue(prev); } }//Dbacc::commitOperation() void Dbacc::release_lockowner(Signal* signal, OperationrecPtr opPtr, bool commit) { OperationrecPtr nextP; OperationrecPtr nextS; OperationrecPtr newOwner; OperationrecPtr lastP; Uint32 opbits = opPtr.p->m_op_bits; nextP.i = opPtr.p->nextParallelQue; nextS.i = opPtr.p->nextSerialQue; lastP.i = opPtr.p->m_lo_last_parallel_op_ptr_i; Uint32 lastS = opPtr.p->m_lo_last_serial_op_ptr_i; ndbassert(lastP.i != RNIL || lastS != RNIL); ndbassert(nextP.i != RNIL || nextS.i != RNIL); enum { NOTHING, CHECK_LOCK_UPGRADE, START_NEW } action = NOTHING; if (nextP.i != RNIL) { jam(); ptrCheckGuard(nextP, coprecsize, operationrec); newOwner = nextP; if (lastP.i == newOwner.i) { newOwner.p->m_lo_last_parallel_op_ptr_i = RNIL; lastP = nextP; } else { ptrCheckGuard(lastP, coprecsize, operationrec); newOwner.p->m_lo_last_parallel_op_ptr_i = lastP.i; lastP.p->m_lock_owner_ptr_i = newOwner.i; } newOwner.p->m_lo_last_serial_op_ptr_i = lastS; newOwner.p->nextSerialQue = nextS.i; if (nextS.i != RNIL) { jam(); ptrCheckGuard(nextS, coprecsize, operationrec); ndbassert(nextS.p->prevSerialQue == opPtr.i); nextS.p->prevSerialQue = newOwner.i; } if (commit) { if ((opbits & Operationrec::OP_ACC_LOCK_MODE) == ZREADLOCK) { jam(); /** * Lock owner...committing a shared operation... * this can be a lock upgrade * * e.g * T1(R) T2(R) * T2(X) * * If T1(R) commits T2(X) is supposed to run * * e.g * T1(X) T1(R) * T2(R) * * If T1(X) commits, then T1(R) _should_ commit before T2(R) is * allowed to proceed */ action = CHECK_LOCK_UPGRADE; } else { jam(); newOwner.p->m_op_bits |= Operationrec::OP_LOCK_MODE; } } else { /** * Aborting an operation can *always* lead to lock upgrade */ action = CHECK_LOCK_UPGRADE; Uint32 opstate = opbits & Operationrec::OP_STATE_MASK; if (opstate != Operationrec::OP_STATE_EXECUTED) { ndbassert(opstate == Operationrec::OP_STATE_RUNNING); if (opbits & Operationrec::OP_ELEMENT_DISAPPEARED) { jam(); report_dealloc(signal, opPtr.p); newOwner.p->localdata.setInvalid(); } else { jam(); newOwner.p->localdata = opPtr.p->localdata; } action = START_NEW; } /** * Update ACC_LOCK_MODE */ if (opbits & Operationrec::OP_LOCK_MODE) { Uint32 nextbits = nextP.p->m_op_bits; while ((nextbits & Operationrec::OP_LOCK_MODE) == 0) { ndbassert(nextbits & Operationrec::OP_ACC_LOCK_MODE); nextbits &= ~(Uint32)Operationrec::OP_ACC_LOCK_MODE; nextP.p->m_op_bits = nextbits; if (nextP.p->nextParallelQue != RNIL) { nextP.i = nextP.p->nextParallelQue; ptrCheckGuard(nextP, coprecsize, operationrec); nextbits = nextP.p->m_op_bits; } else { break; } } } } } else { jam(); ptrCheckGuard(nextS, coprecsize, operationrec); newOwner = nextS; newOwner.p->m_op_bits |= Operationrec::OP_RUN_QUEUE; if (opbits & Operationrec::OP_ELEMENT_DISAPPEARED) { report_dealloc(signal, opPtr.p); newOwner.p->localdata.setInvalid(); } else { jam(); newOwner.p->localdata = opPtr.p->localdata; } lastP = newOwner; while (lastP.p->nextParallelQue != RNIL) { lastP.i = lastP.p->nextParallelQue; ptrCheckGuard(lastP, coprecsize, operationrec); lastP.p->m_op_bits |= Operationrec::OP_RUN_QUEUE; } if (newOwner.i != lastP.i) { jam(); newOwner.p->m_lo_last_parallel_op_ptr_i = lastP.i; } else { jam(); newOwner.p->m_lo_last_parallel_op_ptr_i = RNIL; } if (newOwner.i != lastS) { jam(); newOwner.p->m_lo_last_serial_op_ptr_i = lastS; } else { jam(); newOwner.p->m_lo_last_serial_op_ptr_i = RNIL; } action = START_NEW; } insertLockOwnersList(newOwner); /** * Copy op info, and store op in element * */ { newOwner.p->elementPage = opPtr.p->elementPage; newOwner.p->elementPointer = opPtr.p->elementPointer; newOwner.p->elementContainer = opPtr.p->elementContainer; newOwner.p->reducedHashValue = opPtr.p->reducedHashValue; newOwner.p->m_op_bits |= (opbits & Operationrec::OP_ELEMENT_DISAPPEARED); if (opbits & Operationrec::OP_ELEMENT_DISAPPEARED) { /* ------------------------------------------------------------------- */ // If the elementIsDisappeared is set then we know that the // hashValue is also set since it always originates from a // committing abort or a aborting insert. // Scans do not initialise the hashValue and must have this // value initialised if they are // to successfully commit the delete. /* ------------------------------------------------------------------- */ jam(); newOwner.p->hashValue = opPtr.p->hashValue; }//if Page8Ptr pagePtr; pagePtr.i = newOwner.p->elementPage; ptrCheckGuard(pagePtr, cpagesize, page8); const Uint32 tmp = ElementHeader::setLocked(newOwner.i); arrGuard(newOwner.p->elementPointer, 2048); pagePtr.p->word32[newOwner.p->elementPointer] = tmp; #if defined(VM_TRACE) || defined(ERROR_INSERT) /** * Invalidate page number in elements second word for test in initScanOp */ if (newOwner.p->localdata.isInvalid()) { pagePtr.p->word32[newOwner.p->elementPointer + 1] = newOwner.p->localdata.m_page_no; } else { ndbrequire(newOwner.p->localdata.m_page_no == pagePtr.p->word32[newOwner.p->elementPointer+1]); } #endif } switch(action){ case NOTHING: validate_lock_queue(newOwner); return; case START_NEW: startNew(signal, newOwner); validate_lock_queue(newOwner); return; case CHECK_LOCK_UPGRADE: startNext(signal, lastP); validate_lock_queue(lastP); break; } } void Dbacc::startNew(Signal* signal, OperationrecPtr newOwner) { OperationrecPtr save = operationRecPtr; operationRecPtr = newOwner; Uint32 opbits = newOwner.p->m_op_bits; Uint32 op = opbits & Operationrec::OP_MASK; ndbassert((opbits & Operationrec::OP_STATE_MASK) == Operationrec::OP_STATE_WAITING); ndbassert(opbits & Operationrec::OP_LOCK_OWNER); const bool deleted = opbits & Operationrec::OP_ELEMENT_DISAPPEARED; Uint32 errCode = 0; opbits &= opbits & ~(Uint32)Operationrec::OP_STATE_MASK; opbits |= Operationrec::OP_STATE_RUNNING; if (op == ZSCAN_OP && (opbits & Operationrec::OP_LOCK_REQ) == 0) goto scan; /* Waiting op now runnable... */ { FragmentrecPtr frp; frp.i = newOwner.p->fragptr; ptrCheckGuard(frp, cfragmentsize, fragmentrec); frp.p->m_lockStats.wait_ok((opbits & Operationrec::OP_LOCK_MODE) != ZREADLOCK, operationRecPtr.p->m_lockTime, getHighResTimer()); } if (deleted) { jam(); if (op != ZINSERT && op != ZWRITE) { errCode = ZREAD_ERROR; goto ref; } opbits &= ~(Uint32)Operationrec::OP_MASK; opbits &= ~(Uint32)Operationrec::OP_ELEMENT_DISAPPEARED; opbits |= (op = ZINSERT); opbits |= Operationrec::OP_INSERT_IS_DONE; goto conf; } else if (op == ZINSERT) { jam(); errCode = ZWRITE_ERROR; goto ref; } else if (op == ZWRITE) { jam(); opbits &= ~(Uint32)Operationrec::OP_MASK; opbits |= (op = ZUPDATE);<|fim▁hole|> conf: newOwner.p->m_op_bits = opbits; sendAcckeyconf(signal); sendSignal(newOwner.p->userblockref, GSN_ACCKEYCONF, signal, 6, JBB); operationRecPtr = save; return; scan: jam(); newOwner.p->m_op_bits = opbits; takeOutScanLockQueue(newOwner.p->scanRecPtr); putReadyScanQueue(newOwner.p->scanRecPtr); operationRecPtr = save; return; ref: newOwner.p->m_op_bits = opbits; signal->theData[0] = newOwner.p->userptr; signal->theData[1] = errCode; sendSignal(newOwner.p->userblockref, GSN_ACCKEYREF, signal, 2, JBB); operationRecPtr = save; return; } /** * takeOutLockOwnersList * * Description: Take out an operation from the doubly linked * lock owners list on the fragment. * */ void Dbacc::takeOutLockOwnersList(const OperationrecPtr& outOperPtr) const { const Uint32 Tprev = outOperPtr.p->prevLockOwnerOp; const Uint32 Tnext = outOperPtr.p->nextLockOwnerOp; #ifdef VM_TRACE // Check that operation is already in the list OperationrecPtr tmpOperPtr; bool inList = false; tmpOperPtr.i = fragrecptr.p->lockOwnersList; while (tmpOperPtr.i != RNIL){ ptrCheckGuard(tmpOperPtr, coprecsize, operationrec); if (tmpOperPtr.i == outOperPtr.i) inList = true; tmpOperPtr.i = tmpOperPtr.p->nextLockOwnerOp; } ndbrequire(inList == true); #endif ndbassert(outOperPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER); // Fast path through the code for the common case. if ((Tprev == RNIL) && (Tnext == RNIL)) { ndbrequire(fragrecptr.p->lockOwnersList == outOperPtr.i); fragrecptr.p->lockOwnersList = RNIL; return; } // Check previous operation if (Tprev != RNIL) { jam(); arrGuard(Tprev, coprecsize); operationrec[Tprev].nextLockOwnerOp = Tnext; } else { fragrecptr.p->lockOwnersList = Tnext; }//if // Check next operation if (Tnext == RNIL) { return; } else { jam(); arrGuard(Tnext, coprecsize); operationrec[Tnext].prevLockOwnerOp = Tprev; }//if return; }//Dbacc::takeOutLockOwnersList() /** * insertLockOwnersList * * Description: Insert an operation first in the dubly linked lock owners * list on the fragment. * */ void Dbacc::insertLockOwnersList(const OperationrecPtr& insOperPtr) const { OperationrecPtr tmpOperPtr; #ifdef VM_TRACE // Check that operation is not already in list tmpOperPtr.i = fragrecptr.p->lockOwnersList; while(tmpOperPtr.i != RNIL){ ptrCheckGuard(tmpOperPtr, coprecsize, operationrec); ndbrequire(tmpOperPtr.i != insOperPtr.i); tmpOperPtr.i = tmpOperPtr.p->nextLockOwnerOp; } #endif tmpOperPtr.i = fragrecptr.p->lockOwnersList; ndbrequire(! (insOperPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER)); insOperPtr.p->m_op_bits |= Operationrec::OP_LOCK_OWNER; insOperPtr.p->prevLockOwnerOp = RNIL; insOperPtr.p->nextLockOwnerOp = tmpOperPtr.i; fragrecptr.p->lockOwnersList = insOperPtr.i; if (tmpOperPtr.i == RNIL) { return; } else { jam(); ptrCheckGuard(tmpOperPtr, coprecsize, operationrec); tmpOperPtr.p->prevLockOwnerOp = insOperPtr.i; }//if }//Dbacc::insertLockOwnersList() /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* */ /* END OF COMMIT AND ABORT MODULE */ /* */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* ALLOC_OVERFLOW_PAGE */ /* DESCRIPTION: */ /* --------------------------------------------------------------------------------- */ void Dbacc::allocOverflowPage() { tresult = 0; Page8Ptr spPageptr; seizePage(spPageptr); if (tresult > ZLIMIT_OF_ERROR) { return; } { LocalContainerPageList sparselist(*this, fragrecptr.p->sparsepages); sparselist.addLast(spPageptr); } initOverpage(spPageptr); }//Dbacc::allocOverflowPage() /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* */ /* EXPAND/SHRINK MODULE */ /* */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* ******************--------------------------------------------------------------- */ /*EXPANDCHECK EXPAND BUCKET ORD */ /* SENDER: ACC, LEVEL B */ /* INPUT: FRAGRECPTR, POINTS TO A FRAGMENT RECORD. */ /* DESCRIPTION: A BUCKET OF A FRAGMENT PAGE WILL BE EXPAND INTO TWO BUCKETS */ /* ACCORDING TO LH3. */ /* ******************--------------------------------------------------------------- */ /* ******************--------------------------------------------------------------- */ /* EXPANDCHECK EXPAND BUCKET ORD */ /* ******************------------------------------+ */ /* SENDER: ACC, LEVEL B */ /* A BUCKET OF THE FRAGMENT WILL */ /* BE EXPANDED ACORDING TO LH3, */ /* AND COMMIT TRANSACTION PROCESS */ /* WILL BE CONTINUED */ Uint32 Dbacc::checkScanExpand(Uint32 splitBucket) { Uint32 Ti; Uint32 TreturnCode = 0; Uint32 TPageIndex; Uint32 TDirInd; Uint32 TSplit; Uint32 TreleaseScanBucket; Page8Ptr TPageptr; ScanRecPtr TscanPtr; Uint16 releaseScanMask = 0; TSplit = splitBucket; for (Ti = 0; Ti < MAX_PARALLEL_SCANS_PER_FRAG; Ti++) { if (fragrecptr.p->scan[Ti] != RNIL) { //------------------------------------------------------------- // A scan is ongoing on this particular local fragment. We have // to check its current state. //------------------------------------------------------------- TscanPtr.i = fragrecptr.p->scan[Ti]; ptrCheckGuard(TscanPtr, cscanRecSize, scanRec); if (TscanPtr.p->activeLocalFrag == fragrecptr.i) { if (TscanPtr.p->scanBucketState == ScanRec::FIRST_LAP) { if (TSplit == TscanPtr.p->nextBucketIndex) { jam(); //------------------------------------------------------------- // We are currently scanning this bucket. We cannot split it // simultaneously with the scan. We have to pass this offer for // splitting the bucket. //------------------------------------------------------------- TreturnCode = 1; return TreturnCode; } else if (TSplit > TscanPtr.p->nextBucketIndex) { jam(); ndbassert(TSplit <= TscanPtr.p->startNoOfBuckets); if (TSplit <= TscanPtr.p->startNoOfBuckets) { //------------------------------------------------------------- // This bucket has not yet been scanned. We must reset the scanned // bit indicator for this scan on this bucket. //------------------------------------------------------------- releaseScanMask |= TscanPtr.p->scanMask; } } else { jam(); }//if } else if (TscanPtr.p->scanBucketState == ScanRec::SECOND_LAP) { jam(); //------------------------------------------------------------- // We are performing a second lap to handle buckets that was // merged during the first lap of scanning. During this second // lap we do not allow any splits or merges. //------------------------------------------------------------- TreturnCode = 1; return TreturnCode; } else { ndbrequire(TscanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED); jam(); //------------------------------------------------------------- // The scan is completed and we can thus go ahead and perform // the split. //------------------------------------------------------------- }//if }//if }//if }//for TreleaseScanBucket = TSplit; TPageIndex = fragrecptr.p->getPageIndex(TreleaseScanBucket); TDirInd = fragrecptr.p->getPageNumber(TreleaseScanBucket); TPageptr.i = getPagePtr(fragrecptr.p->directory, TDirInd); ptrCheckGuard(TPageptr, cpagesize, page8); releaseScanBucket(TPageptr, TPageIndex, releaseScanMask); return TreturnCode; }//Dbacc::checkScanExpand() void Dbacc::execEXPANDCHECK2(Signal* signal) { jamEntry(); if(refToBlock(signal->getSendersBlockRef()) == DBLQH) { jam(); return; } fragrecptr.i = signal->theData[0]; tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */ ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); fragrecptr.p->expandOrShrinkQueued = false; #ifdef ERROR_INSERT bool force_expand_shrink = false; if (ERROR_INSERTED(3004) && fragrecptr.p->fragmentid == 0) { if (fragrecptr.p->level.getSize() > ERROR_INSERT_EXTRA) { return execSHRINKCHECK2(signal); } else if (fragrecptr.p->level.getSize() == ERROR_INSERT_EXTRA) { return; } force_expand_shrink = true; } if (!force_expand_shrink && fragrecptr.p->slack > 0) #else if (fragrecptr.p->slack > 0) #endif { jam(); /* IT MEANS THAT IF SLACK > ZERO */ /*--------------------------------------------------------------*/ /* THE SLACK HAS IMPROVED AND IS NOW ACCEPTABLE AND WE */ /* CAN FORGET ABOUT THE EXPAND PROCESS. */ /*--------------------------------------------------------------*/ if (ERROR_INSERTED(3002)) debug_lh_vars("SLK"); if (fragrecptr.p->dirRangeFull == ZTRUE) { jam(); fragrecptr.p->dirRangeFull = ZFALSE; } return; }//if if (fragrecptr.p->sparsepages.isEmpty()) { jam(); allocOverflowPage(); if (tresult > ZLIMIT_OF_ERROR) { jam(); /*--------------------------------------------------------------*/ /* WE COULD NOT ALLOCATE ANY OVERFLOW PAGE. THUS WE HAVE TO STOP*/ /* THE EXPAND SINCE WE CANNOT GUARANTEE ITS COMPLETION. */ /*--------------------------------------------------------------*/ return; }//if }//if if (cfreepages.isEmpty()) { jam(); /*--------------------------------------------------------------*/ /* WE HAVE TO STOP THE EXPAND PROCESS SINCE THERE ARE NO FREE */ /* PAGES. THIS MEANS THAT WE COULD BE FORCED TO CRASH SINCE WE */ /* CANNOT COMPLETE THE EXPAND. TO AVOID THE CRASH WE EXIT HERE. */ /*--------------------------------------------------------------*/ return; }//if if (fragrecptr.p->level.isFull()) { jam(); /* * The level structure does not allow more buckets. * Do not expand. */ return; } Uint32 splitBucket; Uint32 receiveBucket; bool doSplit = fragrecptr.p->level.getSplitBucket(splitBucket, receiveBucket); // Check that splitted bucket is not currently scanned if (doSplit && checkScanExpand(splitBucket) == 1) { jam(); /*--------------------------------------------------------------*/ // A scan state was inconsistent with performing an expand // operation. /*--------------------------------------------------------------*/ return; }//if /*--------------------------------------------------------------------------*/ /* WE START BY FINDING THE PAGE, THE PAGE INDEX AND THE PAGE DIRECTORY*/ /* OF THE NEW BUCKET WHICH SHALL RECEIVE THE ELEMENT WHICH HAVE A 1 IN*/ /* THE NEXT HASH BIT. THIS BIT IS USED IN THE SPLIT MECHANISM TO */ /* DECIDE WHICH ELEMENT GOES WHERE. */ /*--------------------------------------------------------------------------*/ texpDirInd = fragrecptr.p->getPageNumber(receiveBucket); if (fragrecptr.p->getPageIndex(receiveBucket) == 0) { // Need new bucket expPageptr.i = RNIL; } else { expPageptr.i = getPagePtr(fragrecptr.p->directory, texpDirInd); #ifdef VM_TRACE require(expPageptr.i != RNIL); #endif } if (expPageptr.i == RNIL) { jam(); seizePage(expPageptr); if (tresult > ZLIMIT_OF_ERROR) { jam(); return; }//if if (!setPagePtr(fragrecptr.p->directory, texpDirInd, expPageptr.i)) { jam(); // TODO: should release seized page tresult = ZDIR_RANGE_FULL_ERROR; return; } tipPageId = texpDirInd; initPage(expPageptr); } else { ptrCheckGuard(expPageptr, cpagesize, page8); }//if fragrecptr.p->expReceivePageptr = expPageptr.i; fragrecptr.p->expReceiveIndex = fragrecptr.p->getPageIndex(receiveBucket); /*--------------------------------------------------------------------------*/ /* THE NEXT ACTION IS TO FIND THE PAGE, THE PAGE INDEX AND THE PAGE */ /* DIRECTORY OF THE BUCKET TO BE SPLIT. */ /*--------------------------------------------------------------------------*/ Page8Ptr pageptr; Uint32 conidx = fragrecptr.p->getPageIndex(splitBucket); texpDirInd = fragrecptr.p->getPageNumber(splitBucket); pageptr.i = getPagePtr(fragrecptr.p->directory, texpDirInd); #ifdef VM_TRACE require(pageptr.i != RNIL); #endif fragrecptr.p->expSenderIndex = conidx; fragrecptr.p->expSenderPageptr = pageptr.i; if (pageptr.i == RNIL) { jam(); endofexpLab(signal); /* EMPTY BUCKET */ return; }//if fragrecptr.p->expReceiveIsforward = true; ptrCheckGuard(pageptr, cpagesize, page8); expandcontainer(pageptr, conidx); endofexpLab(signal); return; }//Dbacc::execEXPANDCHECK2() void Dbacc::endofexpLab(Signal* signal) const { fragrecptr.p->slack += fragrecptr.p->maxloadfactor; fragrecptr.p->expandCounter++; fragrecptr.p->level.expand(); Uint32 noOfBuckets = fragrecptr.p->level.getSize(); Uint32 Thysteres = fragrecptr.p->maxloadfactor - fragrecptr.p->minloadfactor; fragrecptr.p->slackCheck = Int64(noOfBuckets) * Thysteres; #ifdef ERROR_INSERT bool force_expand_shrink = false; if (ERROR_INSERTED(3004) && fragrecptr.p->fragmentid == 0 && fragrecptr.p->level.getSize() != ERROR_INSERT_EXTRA) { force_expand_shrink = true; } if ((force_expand_shrink || fragrecptr.p->slack < 0) && !fragrecptr.p->level.isFull()) #else if (fragrecptr.p->slack < 0 && !fragrecptr.p->level.isFull()) #endif { jam(); /* IT MEANS THAT IF SLACK < ZERO */ /* --------------------------------------------------------------------------------- */ /* IT IS STILL NECESSARY TO EXPAND THE FRAGMENT EVEN MORE. START IT FROM HERE */ /* WITHOUT WAITING FOR NEXT COMMIT ON THE FRAGMENT. */ /* --------------------------------------------------------------------------------- */ signal->theData[0] = fragrecptr.i; fragrecptr.p->expandOrShrinkQueued = true; sendSignal(cownBlockref, GSN_EXPANDCHECK2, signal, 1, JBB); }//if return; }//Dbacc::endofexpLab() void Dbacc::execDEBUG_SIG(Signal* signal) { jamEntry(); expPageptr.i = signal->theData[0]; progError(__LINE__, NDBD_EXIT_SR_UNDOLOG); return; }//Dbacc::execDEBUG_SIG() LHBits32 Dbacc::getElementHash(OperationrecPtr& oprec) { jam(); ndbassert(!oprec.isNull()); // Only calculate hash value if operation does not already have a complete hash value if (oprec.p->hashValue.valid_bits() < fragrecptr.p->MAX_HASH_VALUE_BITS) { jam(); Local_key localkey; localkey = oprec.p->localdata; Uint32 len = readTablePk(localkey.m_page_no, localkey.m_page_idx, ElementHeader::setLocked(oprec.i), oprec); if (len > 0) oprec.p->hashValue = LHBits32(md5_hash((Uint64*)ckeys, len)); } return oprec.p->hashValue; } LHBits32 Dbacc::getElementHash(Uint32 const* elemptr) { jam(); assert(ElementHeader::getUnlocked(*elemptr)); Uint32 elemhead = *elemptr; Local_key localkey; elemptr += 1; ndbrequire(fragrecptr.p->localkeylen == 1); localkey.m_page_no = *elemptr; localkey.m_page_idx = ElementHeader::getPageIdx(elemhead); OperationrecPtr oprec; oprec.i = RNIL; Uint32 len = readTablePk(localkey.m_page_no, localkey.m_page_idx, elemhead, oprec); if (len > 0) { jam(); return LHBits32(md5_hash((Uint64*)ckeys, len)); } else { // Return an invalid hash value if no data jam(); return LHBits32(); } } LHBits32 Dbacc::getElementHash(Uint32 const* elemptr, OperationrecPtr& oprec) { jam(); if (!oprec.isNull()) { jam(); return getElementHash(oprec); } Uint32 elemhead = *elemptr; if (ElementHeader::getUnlocked(elemhead)) { jam(); return getElementHash(elemptr); } else { jam(); oprec.i = ElementHeader::getOpPtrI(elemhead); ptrCheckGuard(oprec, coprecsize, operationrec); return getElementHash(oprec); } } /* --------------------------------------------------------------------------------- */ /* EXPANDCONTAINER */ /* INPUT: EXC_PAGEPTR (POINTER TO THE ACTIVE PAGE RECORD) */ /* CEXC_PAGEINDEX (INDEX OF THE BUCKET). */ /* */ /* DESCRIPTION: THE HASH VALUE OF ALL ELEMENTS IN THE CONTAINER WILL BE */ /* CHECKED. SOME OF THIS ELEMENTS HAVE TO MOVE TO THE NEW CONTAINER */ /* --------------------------------------------------------------------------------- */ void Dbacc::expandcontainer(Page8Ptr pageptr, Uint32 conidx) { ContainerHeader containerhead; LHBits32 texcHashvalue; Uint32 tidrContainerptr; Uint32 tidrElemhead; Page8Ptr lastPageptr; Page8Ptr lastPrevpageptr; bool lastIsforward; Uint32 tlastPageindex; Uint32 tlastElementptr; Uint32 tlastContainerptr; Uint32 tlastPrevconptr; Uint32 elemptr; Uint32 prevPageptr = RNIL; Uint32 prevConptr = 0; bool isforward = true; Uint32 elemStep; const Uint32 elemLen = fragrecptr.p->elementLength; OperationrecPtr oprecptr; bool newBucket = true; EXP_CONTAINER_LOOP: Uint32 conptr = getContainerPtr(conidx, isforward); if (isforward) { jam(); elemptr = conptr + Container::HEADER_SIZE; elemStep = elemLen; } else { jam(); elemStep = -elemLen; elemptr = conptr + elemStep; } arrGuard(conptr, 2048); containerhead = pageptr.p->word32[conptr]; const Uint32 conlen = containerhead.getLength(); Uint32 cexcMovedLen = Container::HEADER_SIZE; if (conlen <= Container::HEADER_SIZE) { ndbrequire(conlen >= Container::HEADER_SIZE); jam(); goto NEXT_ELEMENT; }//if NEXT_ELEMENT_LOOP: oprecptr.i = RNIL; ptrNull(oprecptr); /* --------------------------------------------------------------------------------- */ /* CEXC_PAGEINDEX PAGE INDEX OF CURRENT CONTAINER BEING EXAMINED. */ /* CEXC_CONTAINERPTR INDEX OF CURRENT CONTAINER BEING EXAMINED. */ /* CEXC_ELEMENTPTR INDEX OF CURRENT ELEMENT BEING EXAMINED. */ /* EXC_PAGEPTR PAGE WHERE CURRENT ELEMENT RESIDES. */ /* CEXC_PREVPAGEPTR PAGE OF PREVIOUS CONTAINER. */ /* CEXC_PREVCONPTR INDEX OF PREVIOUS CONTAINER */ /* CEXC_FORWARD DIRECTION OF CURRENT CONTAINER */ /* --------------------------------------------------------------------------------- */ arrGuard(elemptr, 2048); tidrElemhead = pageptr.p->word32[elemptr]; bool move; if (ElementHeader::getLocked(tidrElemhead)) { jam(); oprecptr.i = ElementHeader::getOpPtrI(tidrElemhead); ptrCheckGuard(oprecptr, coprecsize, operationrec); ndbassert(oprecptr.p->reducedHashValue.valid_bits() >= 1); move = oprecptr.p->reducedHashValue.get_bit(1); oprecptr.p->reducedHashValue.shift_out(); const LHBits16 reducedHashValue = oprecptr.p->reducedHashValue; if (!fragrecptr.p->enough_valid_bits(reducedHashValue)) { jam(); oprecptr.p->reducedHashValue = fragrecptr.p->level.reduceForSplit(getElementHash(oprecptr)); } } else { jam(); LHBits16 reducedHashValue = ElementHeader::getReducedHashValue(tidrElemhead); ndbassert(reducedHashValue.valid_bits() >= 1); move = reducedHashValue.get_bit(1); reducedHashValue.shift_out(); if (!fragrecptr.p->enough_valid_bits(reducedHashValue)) { jam(); const Uint32* elemwordptr = &pageptr.p->word32[elemptr]; const LHBits32 hashValue = getElementHash(elemwordptr); reducedHashValue = fragrecptr.p->level.reduceForSplit(hashValue); } tidrElemhead = ElementHeader::setReducedHashValue(tidrElemhead, reducedHashValue); } if (!move) { jam(); if (ElementHeader::getUnlocked(tidrElemhead)) pageptr.p->word32[elemptr] = tidrElemhead; /* --------------------------------------------------------------------------------- */ /* THIS ELEMENT IS NOT TO BE MOVED. WE CALCULATE THE WHEREABOUTS OF THE NEXT */ /* ELEMENT AND PROCEED WITH THAT OR END THE SEARCH IF THERE ARE NO MORE */ /* ELEMENTS IN THIS CONTAINER. */ /* --------------------------------------------------------------------------------- */ goto NEXT_ELEMENT; }//if /* --------------------------------------------------------------------------------- */ /* THE HASH BIT WAS SET AND WE SHALL MOVE THIS ELEMENT TO THE NEW BUCKET. */ /* WE START BY READING THE ELEMENT TO BE ABLE TO INSERT IT INTO THE NEW BUCKET.*/ /* THEN WE INSERT THE ELEMENT INTO THE NEW BUCKET. THE NEXT STEP IS TO DELETE */ /* THE ELEMENT FROM THIS BUCKET. THIS IS PERFORMED BY REPLACING IT WITH THE */ /* LAST ELEMENT IN THE BUCKET. IF THIS ELEMENT IS TO BE MOVED WE MOVE IT AND */ /* GET THE LAST ELEMENT AGAIN UNTIL WE EITHER FIND ONE THAT STAYS OR THIS */ /* ELEMENT IS THE LAST ELEMENT. */ /* --------------------------------------------------------------------------------- */ { ndbrequire(fragrecptr.p->localkeylen == 1); const Uint32 localkey = pageptr.p->word32[elemptr + 1]; #if defined(VM_TRACE) || !defined(NDEBUG) pageptr.p->word32[elemptr] = ElementHeader::setInvalid(); #endif Uint32 tidrPageindex = fragrecptr.p->expReceiveIndex; Page8Ptr idrPageptr; idrPageptr.i = fragrecptr.p->expReceivePageptr; ptrCheckGuard(idrPageptr, cpagesize, page8); bool tidrIsforward = fragrecptr.p->expReceiveIsforward; insertElement(Element(tidrElemhead, localkey), oprecptr, idrPageptr, tidrPageindex, tidrIsforward, tidrContainerptr, containerhead.getScanBits(), newBucket); fragrecptr.p->expReceiveIndex = tidrPageindex; fragrecptr.p->expReceivePageptr = idrPageptr.i; fragrecptr.p->expReceiveIsforward = tidrIsforward; newBucket = false; } REMOVE_LAST_LOOP: jam(); lastPageptr.i = pageptr.i; lastPageptr.p = pageptr.p; tlastContainerptr = conptr; lastPrevpageptr.i = prevPageptr; ptrCheck(lastPrevpageptr, cpagesize, page8); tlastPrevconptr = prevConptr; arrGuard(tlastContainerptr, 2048); lastIsforward = isforward; tlastPageindex = conidx; getLastAndRemove(lastPrevpageptr, tlastPrevconptr, lastPageptr, tlastPageindex, tlastContainerptr, lastIsforward, tlastElementptr); if (pageptr.i == lastPageptr.i) { if (elemptr == tlastElementptr) { jam(); /* --------------------------------------------------------------------------------- */ /* THE CURRENT ELEMENT WAS ALSO THE LAST ELEMENT. */ /* --------------------------------------------------------------------------------- */ return; }//if }//if /* --------------------------------------------------------------------------------- */ /* THE CURRENT ELEMENT WAS NOT THE LAST ELEMENT. IF THE LAST ELEMENT SHOULD */ /* STAY WE COPY IT TO THE POSITION OF THE CURRENT ELEMENT, OTHERWISE WE INSERT */ /* INTO THE NEW BUCKET, REMOVE IT AND TRY WITH THE NEW LAST ELEMENT. */ /* --------------------------------------------------------------------------------- */ oprecptr.i = RNIL; ptrNull(oprecptr); arrGuard(tlastElementptr, 2048); tidrElemhead = lastPageptr.p->word32[tlastElementptr]; if (ElementHeader::getLocked(tidrElemhead)) { jam(); oprecptr.i = ElementHeader::getOpPtrI(tidrElemhead); ptrCheckGuard(oprecptr, coprecsize, operationrec); ndbassert(oprecptr.p->reducedHashValue.valid_bits() >= 1); move = oprecptr.p->reducedHashValue.get_bit(1); oprecptr.p->reducedHashValue.shift_out(); if (!fragrecptr.p->enough_valid_bits(oprecptr.p->reducedHashValue)) { jam(); oprecptr.p->reducedHashValue = fragrecptr.p->level.reduceForSplit(getElementHash(oprecptr)); } } else { jam(); LHBits16 reducedHashValue = ElementHeader::getReducedHashValue(tidrElemhead); ndbassert(reducedHashValue.valid_bits() > 0); move = reducedHashValue.get_bit(1); reducedHashValue.shift_out(); if (!fragrecptr.p->enough_valid_bits(reducedHashValue)) { jam(); const Uint32* elemwordptr = &lastPageptr.p->word32[tlastElementptr]; const LHBits32 hashValue = getElementHash(elemwordptr); reducedHashValue = fragrecptr.p->level.reduceForSplit(hashValue); } tidrElemhead = ElementHeader::setReducedHashValue(tidrElemhead, reducedHashValue); } if (!move) { jam(); if (ElementHeader::getUnlocked(tidrElemhead)) lastPageptr.p->word32[tlastElementptr] = tidrElemhead; /* --------------------------------------------------------------------------------- */ /* THE LAST ELEMENT IS NOT TO BE MOVED. WE COPY IT TO THE CURRENT ELEMENT. */ /* --------------------------------------------------------------------------------- */ const Page8Ptr delPageptr = pageptr; const Uint32 delConptr = conptr; const Uint32 delElemptr = elemptr; deleteElement(delPageptr, delConptr, delElemptr, lastPageptr, tlastElementptr); } else { jam(); /* --------------------------------------------------------------------------------- */ /* THE LAST ELEMENT IS ALSO TO BE MOVED. */ /* --------------------------------------------------------------------------------- */ { ndbrequire(fragrecptr.p->localkeylen == 1); const Uint32 localkey = lastPageptr.p->word32[tlastElementptr + 1]; Uint32 tidrPageindex = fragrecptr.p->expReceiveIndex; Page8Ptr idrPageptr; idrPageptr.i = fragrecptr.p->expReceivePageptr; ptrCheckGuard(idrPageptr, cpagesize, page8); bool tidrIsforward = fragrecptr.p->expReceiveIsforward; insertElement(Element(tidrElemhead, localkey), oprecptr, idrPageptr, tidrPageindex, tidrIsforward, tidrContainerptr, containerhead.getScanBits(), newBucket); fragrecptr.p->expReceiveIndex = tidrPageindex; fragrecptr.p->expReceivePageptr = idrPageptr.i; fragrecptr.p->expReceiveIsforward = tidrIsforward; newBucket = false; } goto REMOVE_LAST_LOOP; }//if NEXT_ELEMENT: arrGuard(conptr, 2048); containerhead = pageptr.p->word32[conptr]; cexcMovedLen = cexcMovedLen + fragrecptr.p->elementLength; if (containerhead.getLength() > cexcMovedLen) { jam(); /* --------------------------------------------------------------------------------- */ /* WE HAVE NOT YET MOVED THE COMPLETE CONTAINER. WE PROCEED WITH THE NEXT */ /* ELEMENT IN THE CONTAINER. IT IS IMPORTANT TO READ THE CONTAINER LENGTH */ /* FROM THE CONTAINER HEADER SINCE IT MIGHT CHANGE BY REMOVING THE LAST */ /* ELEMENT IN THE BUCKET. */ /* --------------------------------------------------------------------------------- */ elemptr = elemptr + elemStep; goto NEXT_ELEMENT_LOOP; }//if if (containerhead.getNextEnd() != 0) { jam(); /* --------------------------------------------------------------------------------- */ /* WE PROCEED TO THE NEXT CONTAINER IN THE BUCKET. */ /* --------------------------------------------------------------------------------- */ prevPageptr = pageptr.i; prevConptr = conptr; nextcontainerinfo(pageptr, conptr, containerhead, conidx, isforward); goto EXP_CONTAINER_LOOP; }//if }//Dbacc::expandcontainer() /* ******************--------------------------------------------------------------- */ /* SHRINKCHECK JOIN BUCKET ORD */ /* SENDER: ACC, LEVEL B */ /* INPUT: FRAGRECPTR, POINTS TO A FRAGMENT RECORD. */ /* DESCRIPTION: TWO BUCKET OF A FRAGMENT PAGE WILL BE JOINED TOGETHER */ /* ACCORDING TO LH3. */ /* ******************--------------------------------------------------------------- */ /* ******************--------------------------------------------------------------- */ /* SHRINKCHECK JOIN BUCKET ORD */ /* ******************------------------------------+ */ /* SENDER: ACC, LEVEL B */ /* TWO BUCKETS OF THE FRAGMENT */ /* WILL BE JOINED ACORDING TO LH3 */ /* AND COMMIT TRANSACTION PROCESS */ /* WILL BE CONTINUED */ Uint32 Dbacc::checkScanShrink(Uint32 sourceBucket, Uint32 destBucket) { Uint32 Ti; Uint32 TreturnCode = 0; Uint32 TPageIndex; Uint32 TDirInd; Uint32 TmergeDest; Uint32 TmergeSource; Uint32 TreleaseScanBucket; Uint32 TreleaseInd = 0; enum Actions { ExtendRescan, ReduceUndefined }; Bitmask<1> actions[MAX_PARALLEL_SCANS_PER_FRAG]; Uint16 releaseDestScanMask = 0; Uint16 releaseSourceScanMask = 0; Page8Ptr TPageptr; ScanRecPtr scanPtr; TmergeDest = destBucket; TmergeSource = sourceBucket; for (Ti = 0; Ti < MAX_PARALLEL_SCANS_PER_FRAG; Ti++) { actions[Ti].clear(); if (fragrecptr.p->scan[Ti] != RNIL) { scanPtr.i = fragrecptr.p->scan[Ti]; ptrCheckGuard(scanPtr, cscanRecSize, scanRec); if (scanPtr.p->activeLocalFrag == fragrecptr.i) { //------------------------------------------------------------- // A scan is ongoing on this particular local fragment. We have // to check its current state. //------------------------------------------------------------- if (scanPtr.p->scanBucketState == ScanRec::FIRST_LAP) { jam(); if ((TmergeDest == scanPtr.p->nextBucketIndex) || (TmergeSource == scanPtr.p->nextBucketIndex)) { jam(); //------------------------------------------------------------- // We are currently scanning one of the buckets involved in the // merge. We cannot merge while simultaneously performing a scan. // We have to pass this offer for merging the buckets. //------------------------------------------------------------- TreturnCode = 1; return TreturnCode; } else if (TmergeDest < scanPtr.p->nextBucketIndex) { jam(); /** * Merge bucket into scanned bucket. Mark for rescan. */ actions[Ti].set(ExtendRescan); if (TmergeSource == scanPtr.p->startNoOfBuckets) { /** * Merge unscanned bucket with undefined scan bits into scanned * bucket. Source buckets scan bits must be cleared. */ actions[Ti].set(ReduceUndefined); releaseSourceScanMask |= scanPtr.p->scanMask; } TreleaseInd = 1; }//if else { /** * Merge unscanned bucket with undefined scan bits into unscanned * bucket with undefined scan bits. */ if (TmergeSource == scanPtr.p->startNoOfBuckets) { actions[Ti].set(ReduceUndefined); releaseSourceScanMask |= scanPtr.p->scanMask; TreleaseInd = 1; } if (TmergeDest <= scanPtr.p->startNoOfBuckets) { jam(); // Destination bucket is not scanned by scan releaseDestScanMask |= scanPtr.p->scanMask; } } } else if (scanPtr.p->scanBucketState == ScanRec::SECOND_LAP) { jam(); //------------------------------------------------------------- // We are performing a second lap to handle buckets that was // merged during the first lap of scanning. During this second // lap we do not allow any splits or merges. //------------------------------------------------------------- TreturnCode = 1; return TreturnCode; } else if (scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) { jam(); //------------------------------------------------------------- // The scan is completed and we can thus go ahead and perform // the split. //------------------------------------------------------------- releaseDestScanMask |= scanPtr.p->scanMask; releaseSourceScanMask |= scanPtr.p->scanMask; } else { jam(); sendSystemerror(__LINE__); return TreturnCode; }//if }//if }//if }//for TreleaseScanBucket = TmergeSource; TPageIndex = fragrecptr.p->getPageIndex(TreleaseScanBucket); TDirInd = fragrecptr.p->getPageNumber(TreleaseScanBucket); TPageptr.i = getPagePtr(fragrecptr.p->directory, TDirInd); ptrCheckGuard(TPageptr, cpagesize, page8); releaseScanBucket(TPageptr, TPageIndex, releaseSourceScanMask); TreleaseScanBucket = TmergeDest; TPageIndex = fragrecptr.p->getPageIndex(TreleaseScanBucket); TDirInd = fragrecptr.p->getPageNumber(TreleaseScanBucket); TPageptr.i = getPagePtr(fragrecptr.p->directory, TDirInd); ptrCheckGuard(TPageptr, cpagesize, page8); releaseScanBucket(TPageptr, TPageIndex, releaseDestScanMask); if (TreleaseInd == 1) { jam(); for (Ti = 0; Ti < MAX_PARALLEL_SCANS_PER_FRAG; Ti++) { if (!actions[Ti].isclear()) { jam(); scanPtr.i = fragrecptr.p->scan[Ti]; ptrCheckGuard(scanPtr, cscanRecSize, scanRec); if (actions[Ti].get(ReduceUndefined)) { scanPtr.p->startNoOfBuckets --; } if (actions[Ti].get(ExtendRescan)) { if (TmergeDest < scanPtr.p->minBucketIndexToRescan) { jam(); //------------------------------------------------------------- // We have to keep track of the starting bucket to Rescan in the // second lap. //------------------------------------------------------------- scanPtr.p->minBucketIndexToRescan = TmergeDest; }//if if (TmergeDest > scanPtr.p->maxBucketIndexToRescan) { jam(); //------------------------------------------------------------- // We have to keep track of the ending bucket to Rescan in the // second lap. //------------------------------------------------------------- scanPtr.p->maxBucketIndexToRescan = TmergeDest; }//if } }//if }//for }//if return TreturnCode; }//Dbacc::checkScanShrink() void Dbacc::execSHRINKCHECK2(Signal* signal) { jamEntry(); fragrecptr.i = signal->theData[0]; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); fragrecptr.p->expandOrShrinkQueued = false; tresult = 0; /* 0= FALSE,1= TRUE,> ZLIMIT_OF_ERROR =ERRORCODE */ #ifdef ERROR_INSERT bool force_expand_shrink = false; if (ERROR_INSERTED(3004) && fragrecptr.p->fragmentid == 0) { if (fragrecptr.p->level.getSize() < ERROR_INSERT_EXTRA) { return execEXPANDCHECK2(signal); } else if (fragrecptr.p->level.getSize() == ERROR_INSERT_EXTRA) { return; } force_expand_shrink = true; } if (!force_expand_shrink && fragrecptr.p->slack <= fragrecptr.p->slackCheck) #else if (fragrecptr.p->slack <= fragrecptr.p->slackCheck) #endif { jam(); /* TIME FOR JOIN BUCKETS PROCESS */ /*--------------------------------------------------------------*/ /* NO LONGER NECESSARY TO SHRINK THE FRAGMENT. */ /*--------------------------------------------------------------*/ return; }//if #ifdef ERROR_INSERT if (!force_expand_shrink && fragrecptr.p->slack < 0) #else if (fragrecptr.p->slack < 0) #endif { jam(); /*--------------------------------------------------------------*/ /* THE SLACK IS NEGATIVE, IN THIS CASE WE WILL NOT NEED ANY */ /* SHRINK. */ /*--------------------------------------------------------------*/ return; }//if if (fragrecptr.p->sparsepages.isEmpty()) { jam(); allocOverflowPage(); if (tresult > ZLIMIT_OF_ERROR) { jam(); return; }//if }//if if (cfreepages.isEmpty()) { jam(); /*--------------------------------------------------------------*/ /* WE HAVE TO STOP THE SHRINK PROCESS SINCE THERE ARE NO FREE */ /* PAGES. THIS MEANS THAT WE COULD BE FORCED TO CRASH SINCE WE */ /* CANNOT COMPLETE THE SHRINK. TO AVOID THE CRASH WE EXIT HERE. */ /*--------------------------------------------------------------*/ return; }//if if (fragrecptr.p->level.isEmpty()) { jam(); /* no need to shrink empty hash table */ return; } // Since expandCounter guards more shrinks than expands and // all fragments starts with a full page of buckets ndbassert(fragrecptr.p->getPageNumber(fragrecptr.p->level.getTop()) > 0); Uint32 mergeSourceBucket; Uint32 mergeDestBucket; bool doMerge = fragrecptr.p->level.getMergeBuckets(mergeSourceBucket, mergeDestBucket); ndbassert(doMerge); // Merge always needed since we never shrink below one page of buckets /* check that neither of source or destination bucket are currently scanned */ if (doMerge && checkScanShrink(mergeSourceBucket, mergeDestBucket) == 1) { jam(); /*--------------------------------------------------------------*/ // A scan state was inconsistent with performing a shrink // operation. /*--------------------------------------------------------------*/ return; }//if if (ERROR_INSERTED(3002)) debug_lh_vars("SHR"); if (fragrecptr.p->dirRangeFull == ZTRUE) { jam(); fragrecptr.p->dirRangeFull = ZFALSE; } shrink_adjust_reduced_hash_value(mergeDestBucket); /*--------------------------------------------------------------------------*/ /* WE START BY FINDING THE NECESSARY INFORMATION OF THE BUCKET TO BE */ /* REMOVED WHICH WILL SEND ITS ELEMENTS TO THE RECEIVING BUCKET. */ /*--------------------------------------------------------------------------*/ Uint32 cexcPageindex = fragrecptr.p->getPageIndex(mergeSourceBucket); texpDirInd = fragrecptr.p->getPageNumber(mergeSourceBucket); Page8Ptr pageptr; pageptr.i = getPagePtr(fragrecptr.p->directory, texpDirInd); fragrecptr.p->expSenderIndex = cexcPageindex; fragrecptr.p->expSenderPageptr = pageptr.i; fragrecptr.p->expSenderDirIndex = texpDirInd; /*--------------------------------------------------------------------------*/ /* WE NOW PROCEED BY FINDING THE NECESSARY INFORMATION ABOUT THE */ /* RECEIVING BUCKET. */ /*--------------------------------------------------------------------------*/ texpDirInd = fragrecptr.p->getPageNumber(mergeDestBucket); fragrecptr.p->expReceivePageptr = getPagePtr(fragrecptr.p->directory, texpDirInd); fragrecptr.p->expReceiveIndex = fragrecptr.p->getPageIndex(mergeDestBucket); fragrecptr.p->expReceiveIsforward = true; if (pageptr.i == RNIL) { jam(); endofshrinkbucketLab(signal); /* EMPTY BUCKET */ return; }//if /*--------------------------------------------------------------------------*/ /* INITIALISE THE VARIABLES FOR THE SHRINK PROCESS. */ /*--------------------------------------------------------------------------*/ ptrCheckGuard(pageptr, cpagesize, page8); bool isforward = true; Uint32 conptr = getForwardContainerPtr(cexcPageindex); arrGuard(conptr, 2048); ContainerHeader containerhead = pageptr.p->word32[conptr]; Uint32 conlen = containerhead.getLength(); if (conlen <= Container::HEADER_SIZE) { ndbrequire(conlen == Container::HEADER_SIZE); } else { jam(); shrinkcontainer(pageptr, conptr, isforward, conlen); }//if /*--------------------------------------------------------------------------*/ /* THIS CONTAINER IS NOT YET EMPTY AND WE REMOVE ALL THE ELEMENTS. */ /*--------------------------------------------------------------------------*/ if (containerhead.isUsingBothEnds()) { jam(); Uint32 relconptr = conptr + (ZBUF_SIZE - Container::HEADER_SIZE); releaseRightlist(pageptr, cexcPageindex, relconptr); }//if ContainerHeader conthead; conthead.initInUse(); dbgWord32(pageptr, conptr, conthead); arrGuard(conptr, 2048); pageptr.p->word32[conptr] = conthead; if (containerhead.getNextEnd() == 0) { jam(); endofshrinkbucketLab(signal); return; }//if nextcontainerinfo(pageptr, conptr, containerhead, cexcPageindex, isforward); do { conptr = getContainerPtr(cexcPageindex, isforward); arrGuard(conptr, 2048); containerhead = pageptr.p->word32[conptr]; conlen = containerhead.getLength(); ndbrequire(conlen > Container::HEADER_SIZE); /*--------------------------------------------------------------------------*/ /* THIS CONTAINER IS NOT YET EMPTY AND WE REMOVE ALL THE ELEMENTS. */ /*--------------------------------------------------------------------------*/ shrinkcontainer(pageptr, conptr, isforward, conlen); const Uint32 prevPageptr = pageptr.i; const Uint32 cexcPrevpageindex = cexcPageindex; const Uint32 cexcPrevisforward = isforward; if (containerhead.getNextEnd() != 0) { jam(); /*--------------------------------------------------------------------------*/ /* WE MUST CALL THE NEXT CONTAINER INFO ROUTINE BEFORE WE RELEASE THE */ /* CONTAINER SINCE THE RELEASE WILL OVERWRITE THE NEXT POINTER. */ /*--------------------------------------------------------------------------*/ nextcontainerinfo(pageptr, conptr, containerhead, cexcPageindex, isforward); }//if Page8Ptr rlPageptr; rlPageptr.i = prevPageptr; ptrCheckGuard(rlPageptr, cpagesize, page8); ndbassert(!containerhead.isScanInProgress()); if (cexcPrevisforward) { jam(); if (containerhead.isUsingBothEnds()) { jam(); Uint32 relconptr = conptr + (ZBUF_SIZE - Container::HEADER_SIZE); releaseRightlist(rlPageptr, cexcPrevpageindex, relconptr); }//if ndbrequire(ContainerHeader(rlPageptr.p->word32[conptr]).isInUse()); releaseLeftlist(rlPageptr, cexcPrevpageindex, conptr); } else { jam(); if (containerhead.isUsingBothEnds()) { jam(); Uint32 relconptr = conptr - (ZBUF_SIZE - Container::HEADER_SIZE); releaseLeftlist(rlPageptr, cexcPrevpageindex, relconptr); }//if ndbrequire(ContainerHeader(rlPageptr.p->word32[conptr]).isInUse()); releaseRightlist(rlPageptr, cexcPrevpageindex, conptr); }//if } while (containerhead.getNextEnd() != 0); endofshrinkbucketLab(signal); return; }//Dbacc::execSHRINKCHECK2() void Dbacc::endofshrinkbucketLab(Signal* signal) { fragrecptr.p->level.shrink(); fragrecptr.p->expandCounter--; fragrecptr.p->slack -= fragrecptr.p->maxloadfactor; if (fragrecptr.p->expSenderIndex == 0) { jam(); if (fragrecptr.p->expSenderPageptr != RNIL) { jam(); Page8Ptr rpPageptr; rpPageptr.i = fragrecptr.p->expSenderPageptr; ptrCheckGuard(rpPageptr, cpagesize, page8); releasePage(rpPageptr); unsetPagePtr(fragrecptr.p->directory, fragrecptr.p->expSenderDirIndex); }//if if ((fragrecptr.p->getPageNumber(fragrecptr.p->level.getSize()) & 0xff) == 0) { jam(); DynArr256 dir(directoryPool, fragrecptr.p->directory); DynArr256::ReleaseIterator iter; Uint32 relcode; #ifdef VM_TRACE Uint32 count = 0; #endif dir.init(iter); while ((relcode = dir.trim(fragrecptr.p->expSenderDirIndex, iter)) != 0) { #ifdef VM_TRACE count++; ndbrequire(count <= 256); #endif } }//if }//if #ifdef ERROR_INSERT bool force_expand_shrink = false; if (ERROR_INSERTED(3004) && fragrecptr.p->fragmentid == 0 && fragrecptr.p->level.getSize() != ERROR_INSERT_EXTRA) { force_expand_shrink = true; } if (force_expand_shrink || fragrecptr.p->slack > 0) #else if (fragrecptr.p->slack > 0) #endif { jam(); /*--------------------------------------------------------------*/ /* THE SLACK IS POSITIVE, IN THIS CASE WE WILL CHECK WHETHER */ /* WE WILL CONTINUE PERFORM ANOTHER SHRINK. */ /*--------------------------------------------------------------*/ Uint32 noOfBuckets = fragrecptr.p->level.getSize(); Uint32 Thysteresis = fragrecptr.p->maxloadfactor - fragrecptr.p->minloadfactor; fragrecptr.p->slackCheck = Int64(noOfBuckets) * Thysteresis; #ifdef ERROR_INSERT if (force_expand_shrink || fragrecptr.p->slack > Thysteresis) #else if (fragrecptr.p->slack > Thysteresis) #endif { /*--------------------------------------------------------------*/ /* IT IS STILL NECESSARY TO SHRINK THE FRAGMENT MORE. THIS*/ /* CAN HAPPEN WHEN A NUMBER OF SHRINKS GET REJECTED */ /* DURING A LOCAL CHECKPOINT. WE START A NEW SHRINK */ /* IMMEDIATELY FROM HERE WITHOUT WAITING FOR A COMMIT TO */ /* START IT. */ /*--------------------------------------------------------------*/ if (fragrecptr.p->expandCounter > 0) { jam(); /*--------------------------------------------------------------*/ /* IT IS VERY IMPORTANT TO NOT TRY TO SHRINK MORE THAN */ /* WAS EXPANDED. IF MAXP IS SET TO A VALUE BELOW 63 THEN */ /* WE WILL LOSE RECORDS SINCE GETDIRINDEX CANNOT HANDLE */ /* SHRINKING BELOW 2^K - 1 (NOW 63). THIS WAS A BUG THAT */ /* WAS REMOVED 2000-05-12. */ /*--------------------------------------------------------------*/ signal->theData[0] = fragrecptr.i; ndbrequire(!fragrecptr.p->expandOrShrinkQueued); fragrecptr.p->expandOrShrinkQueued = true; sendSignal(cownBlockref, GSN_SHRINKCHECK2, signal, 1, JBB); }//if }//if }//if ndbrequire(fragrecptr.p->getPageNumber(fragrecptr.p->level.getSize()) > 0); return; }//Dbacc::endofshrinkbucketLab() /* --------------------------------------------------------------------------------- */ /* SHRINKCONTAINER */ /* INPUT: EXC_PAGEPTR (POINTER TO THE ACTIVE PAGE RECORD) */ /* CEXC_CONTAINERLEN (LENGTH OF THE CONTAINER). */ /* CEXC_CONTAINERPTR (ARRAY INDEX OF THE CONTAINER). */ /* CEXC_FORWARD (CONTAINER FORWARD (+1) OR BACKWARD (-1)) */ /* */ /* DESCRIPTION: SCAN ALL ELEMENTS IN DESTINATION BUCKET BEFORE MERGE */ /* AND ADJUST THE STORED REDUCED HASH VALUE (SHIFT IN ZERO). */ /* --------------------------------------------------------------------------------- */ void Dbacc::shrink_adjust_reduced_hash_value(Uint32 bucket_number) { /* * Note: function are a copy paste from getElement() with modified inner loop * instead of finding a specific element, scan through all and modify. */ Uint32 tgeElementHeader; Uint32 tgeElemStep; Uint32 tgePageindex; Uint32 tgeNextptrtype; Uint32 tgeContainerptr; Uint32 tgeElementptr; register Uint32 tgeRemLen; const Uint32 TelemLen = fragrecptr.p->elementLength; const Uint32 localkeylen = fragrecptr.p->localkeylen; tgePageindex = fragrecptr.p->getPageIndex(bucket_number); Page8Ptr gePageptr; gePageptr.i = getPagePtr(fragrecptr.p->directory, fragrecptr.p->getPageNumber(bucket_number)); ptrCheckGuard(gePageptr, cpagesize, page8); ndbrequire(TelemLen == ZELEM_HEAD_SIZE + localkeylen); tgeNextptrtype = ZLEFT; /* Loop through all containers in a bucket */ do { if (tgeNextptrtype == ZLEFT) { jam(); tgeContainerptr = getForwardContainerPtr(tgePageindex); tgeElementptr = tgeContainerptr + Container::HEADER_SIZE; tgeElemStep = TelemLen; ndbrequire(tgeContainerptr < 2048); tgeRemLen = ContainerHeader(gePageptr.p->word32[tgeContainerptr]).getLength(); ndbrequire((tgeContainerptr + tgeRemLen - 1) < 2048); } else if (tgeNextptrtype == ZRIGHT) { jam(); tgeContainerptr = getBackwardContainerPtr(tgePageindex); tgeElementptr = tgeContainerptr - TelemLen; tgeElemStep = 0 - TelemLen; ndbrequire(tgeContainerptr < 2048); tgeRemLen = ContainerHeader(gePageptr.p->word32[tgeContainerptr]).getLength(); ndbrequire((tgeContainerptr - tgeRemLen) < 2048); } else { jam(); jamLine(tgeNextptrtype); ndbrequire(false); }//if if (tgeRemLen >= Container::HEADER_SIZE + TelemLen) { ndbrequire(tgeRemLen <= ZBUF_SIZE); /* ------------------------------------------------------------------- */ /* Loop through all elements in a container */ do { tgeElementHeader = gePageptr.p->word32[tgeElementptr]; tgeRemLen = tgeRemLen - TelemLen; /* * Adjust the stored reduced hash value for element, shifting in a zero */ if (ElementHeader::getLocked(tgeElementHeader)) { jam(); OperationrecPtr oprec; oprec.i = ElementHeader::getOpPtrI(tgeElementHeader); ptrCheckGuard(oprec, coprecsize, operationrec); oprec.p->reducedHashValue.shift_in(false); } else { jam(); LHBits16 reducedHashValue = ElementHeader::getReducedHashValue(tgeElementHeader); reducedHashValue.shift_in(false); tgeElementHeader = ElementHeader::setReducedHashValue(tgeElementHeader, reducedHashValue); gePageptr.p->word32[tgeElementptr] = tgeElementHeader; } if (tgeRemLen <= Container::HEADER_SIZE) { break; } tgeElementptr = tgeElementptr + tgeElemStep; } while (true); }//if ndbrequire(tgeRemLen == Container::HEADER_SIZE); ContainerHeader containerhead = gePageptr.p->word32[tgeContainerptr]; ndbassert((containerhead.getScanBits() & ~fragrecptr.p->activeScanMask) == 0); tgeNextptrtype = containerhead.getNextEnd(); if (tgeNextptrtype == 0) { jam(); return; /* NO MORE CONTAINER */ }//if tgePageindex = containerhead.getNextIndexNumber(); /* NEXT CONTAINER PAGE INDEX 7 BITS */ ndbrequire((tgePageindex <= Container::MAX_CONTAINER_INDEX) || (tgePageindex == Container::NO_CONTAINER_INDEX)); if (!containerhead.isNextOnSamePage()) { jam(); gePageptr.i = gePageptr.p->word32[tgeContainerptr + 1]; /* NEXT PAGE I */ ptrCheckGuard(gePageptr, cpagesize, page8); }//if } while (1); return; }//Dbacc::shrink_adjust_reduced_hash_value() void Dbacc::shrinkcontainer(Page8Ptr pageptr, Uint32 conptr, bool isforward, Uint32 conlen) { Uint32 tshrElementptr; Uint32 tshrRemLen; Uint32 tidrContainerptr; Uint32 tidrElemhead; const Uint32 elemLen = fragrecptr.p->elementLength; Uint32 elemStep; OperationrecPtr oprecptr; tshrRemLen = conlen - Container::HEADER_SIZE; if (isforward) { jam(); tshrElementptr = conptr + Container::HEADER_SIZE; elemStep = elemLen; } else { jam(); elemStep = -elemLen; tshrElementptr = conptr + elemStep; }//if SHR_LOOP: oprecptr.i = RNIL; ptrNull(oprecptr); /* --------------------------------------------------------------------------------- */ /* THE CODE BELOW IS ALL USED TO PREPARE FOR THE CALL TO INSERT_ELEMENT AND */ /* HANDLE THE RESULT FROM INSERT_ELEMENT. INSERT_ELEMENT INSERTS THE ELEMENT */ /* INTO ANOTHER BUCKET. */ /* --------------------------------------------------------------------------------- */ arrGuard(tshrElementptr, 2048); tidrElemhead = pageptr.p->word32[tshrElementptr]; if (ElementHeader::getLocked(tidrElemhead)) { jam(); /* --------------------------------------------------------------------------------- */ /* IF THE ELEMENT IS LOCKED WE MUST UPDATE THE ELEMENT INFO IN THE OPERATION */ /* RECORD OWNING THE LOCK. WE DO THIS BY READING THE OPERATION RECORD POINTER */ /* FROM THE ELEMENT HEADER. */ /* --------------------------------------------------------------------------------- */ oprecptr.i = ElementHeader::getOpPtrI(tidrElemhead); ptrCheckGuard(oprecptr, coprecsize, operationrec); oprecptr.p->reducedHashValue.shift_in(true); }//if else { LHBits16 reducedHashValue = ElementHeader::getReducedHashValue(tidrElemhead); reducedHashValue.shift_in(true); tidrElemhead = ElementHeader::setReducedHashValue(tidrElemhead, reducedHashValue); } { ndbrequire(fragrecptr.p->localkeylen == 1); const Uint32 localkey = pageptr.p->word32[tshrElementptr + 1]; Uint32 tidrPageindex = fragrecptr.p->expReceiveIndex; Page8Ptr idrPageptr; idrPageptr.i = fragrecptr.p->expReceivePageptr; ptrCheckGuard(idrPageptr, cpagesize, page8); bool tidrIsforward = fragrecptr.p->expReceiveIsforward; insertElement(Element(tidrElemhead, localkey), oprecptr, idrPageptr, tidrPageindex, tidrIsforward, tidrContainerptr, ContainerHeader(pageptr.p->word32[conptr]).getScanBits(), false); /* --------------------------------------------------------------- */ /* TAKE CARE OF RESULT FROM INSERT_ELEMENT. */ /* --------------------------------------------------------------- */ fragrecptr.p->expReceiveIndex = tidrPageindex; fragrecptr.p->expReceivePageptr = idrPageptr.i; fragrecptr.p->expReceiveIsforward = tidrIsforward; } if (tshrRemLen < elemLen) { jam(); sendSystemerror(__LINE__); }//if tshrRemLen = tshrRemLen - elemLen; if (tshrRemLen != 0) { jam(); tshrElementptr += elemStep; goto SHR_LOOP; }//if }//Dbacc::shrinkcontainer() void Dbacc::initFragAdd(Signal* signal, FragmentrecPtr regFragPtr) const { const AccFragReq * const req = (AccFragReq*)&signal->theData[0]; Uint32 minLoadFactor = (req->minLoadFactor * ZBUF_SIZE) / 100; Uint32 maxLoadFactor = (req->maxLoadFactor * ZBUF_SIZE) / 100; if (ERROR_INSERTED(3003)) // use small LoadFactors to force sparse hash table { jam(); minLoadFactor = 1; maxLoadFactor = 2; } if (minLoadFactor >= maxLoadFactor) { jam(); minLoadFactor = maxLoadFactor - 1; }//if regFragPtr.p->fragState = ACTIVEFRAG; // NOTE: next line must match calculation in Dblqh::execLQHFRAGREQ regFragPtr.p->myfid = req->fragId; regFragPtr.p->myTableId = req->tableId; ndbrequire(req->kValue == 6); ndbrequire(req->kValue == regFragPtr.p->k); regFragPtr.p->expandCounter = 0; /** * Only allow shrink during SR * - to make sure we don't run out of pages during REDO log execution * * Is later restored to 0 by LQH at end of REDO log execution */ regFragPtr.p->expandOrShrinkQueued = false; regFragPtr.p->level.setSize(1 << req->kValue); regFragPtr.p->minloadfactor = minLoadFactor; regFragPtr.p->maxloadfactor = maxLoadFactor; regFragPtr.p->slack = Int64(regFragPtr.p->level.getSize()) * maxLoadFactor; regFragPtr.p->localkeylen = req->localKeyLen; regFragPtr.p->nodetype = (req->reqInfo >> 4) & 0x3; regFragPtr.p->keyLength = req->keyLength; ndbrequire(req->keyLength != 0); ndbrequire(regFragPtr.p->elementLength == ZELEM_HEAD_SIZE + regFragPtr.p->localkeylen); Uint32 Tmp1 = regFragPtr.p->level.getSize(); Uint32 Tmp2 = regFragPtr.p->maxloadfactor - regFragPtr.p->minloadfactor; regFragPtr.p->slackCheck = Int64(Tmp1) * Tmp2; regFragPtr.p->mytabptr = req->tableId; regFragPtr.p->roothashcheck = req->kValue + req->lhFragBits; regFragPtr.p->noOfElements = 0; regFragPtr.p->m_commit_count = 0; // stable results for (Uint32 i = 0; i < MAX_PARALLEL_SCANS_PER_FRAG; i++) { regFragPtr.p->scan[i] = RNIL; }//for Uint32 hasCharAttr = g_key_descriptor_pool.getPtr(req->tableId)->hasCharAttr; regFragPtr.p->hasCharAttr = hasCharAttr; }//Dbacc::initFragAdd() void Dbacc::initFragGeneral(FragmentrecPtr regFragPtr)const { new (&regFragPtr.p->directory) DynArr256::Head(); regFragPtr.p->lockOwnersList = RNIL; regFragPtr.p->hasCharAttr = ZFALSE; regFragPtr.p->dirRangeFull = ZFALSE; regFragPtr.p->fragState = FREEFRAG; regFragPtr.p->sparsepages.init(); regFragPtr.p->fullpages.init(); regFragPtr.p->m_noOfAllocatedPages = 0; regFragPtr.p->activeScanMask = 0; regFragPtr.p->m_lockStats.init(); }//Dbacc::initFragGeneral() void Dbacc::execACC_SCANREQ(Signal* signal) //Direct Executed { jamEntry(); AccScanReq * req = (AccScanReq*)&signal->theData[0]; tuserptr = req->senderData; tuserblockref = req->senderRef; tabptr.i = req->tableId; tfid = req->fragmentNo; tscanFlag = req->requestInfo; tscanTrid1 = req->transId1; tscanTrid2 = req->transId2; tresult = 0; ptrCheckGuard(tabptr, ctablesize, tabrec); ndbrequire(getfragmentrec(fragrecptr, tfid)); Uint32 i; for (i = 0; i < MAX_PARALLEL_SCANS_PER_FRAG; i++) { jam(); if (fragrecptr.p->scan[i] == RNIL) { jam(); break; } } ndbrequire(i != MAX_PARALLEL_SCANS_PER_FRAG); ndbrequire(cfirstFreeScanRec != RNIL); seizeScanRec(); fragrecptr.p->scan[i] = scanPtr.i; scanPtr.p->scanBucketState = ScanRec::FIRST_LAP; scanPtr.p->scanLockMode = AccScanReq::getLockMode(tscanFlag); scanPtr.p->scanReadCommittedFlag = AccScanReq::getReadCommittedFlag(tscanFlag); /* TWELVE BITS OF THE ELEMENT HEAD ARE SCAN */ /* CHECK BITS. THE MASK NOTES WHICH BIT IS */ /* ALLOCATED FOR THE ACTIVE SCAN */ scanPtr.p->scanMask = 1 << i; scanPtr.p->scanUserptr = tuserptr; scanPtr.p->scanUserblockref = tuserblockref; scanPtr.p->scanTrid1 = tscanTrid1; scanPtr.p->scanTrid2 = tscanTrid2; scanPtr.p->scanLockHeld = 0; scanPtr.p->scanOpsAllocated = 0; scanPtr.p->scanFirstActiveOp = RNIL; scanPtr.p->scanFirstQueuedOp = RNIL; scanPtr.p->scanLastQueuedOp = RNIL; scanPtr.p->scanFirstLockedOp = RNIL; scanPtr.p->scanLastLockedOp = RNIL; scanPtr.p->scanState = ScanRec::WAIT_NEXT; initScanFragmentPart(); /* ************************ */ /* ACC_SCANCONF */ /* ************************ */ signal->theData[0] = scanPtr.p->scanUserptr; signal->theData[1] = scanPtr.i; signal->theData[2] = 1; /* NR OF LOCAL FRAGMENT */ signal->theData[3] = fragrecptr.p->fragmentid; signal->theData[4] = RNIL; signal->theData[7] = AccScanConf::ZNOT_EMPTY_FRAGMENT; signal->theData[8] = 0; /* Success */ /** * Return with signal->theData[8] == 0 indicates ACC_SCANCONF * return signal. */ return; }//Dbacc::execACC_SCANREQ() /* ******************--------------------------------------------------------------- */ /* NEXT_SCANREQ REQUEST FOR NEXT ELEMENT OF */ /* ******************------------------------------+ A FRAGMENT. */ /* SENDER: LQH, LEVEL B */ void Dbacc::execNEXT_SCANREQ(Signal* signal) { Uint32 tscanNextFlag; jamEntry(); scanPtr.i = signal->theData[0]; operationRecPtr.i = signal->theData[1]; tscanNextFlag = signal->theData[2]; /* ------------------------------------------ */ /* 1 = ZCOPY_NEXT GET NEXT ELEMENT */ /* 2 = ZCOPY_NEXT_COMMIT COMMIT THE */ /* ACTIVE ELEMENT AND GET THE NEXT ONE */ /* 3 = ZCOPY_COMMIT COMMIT THE ACTIVE ELEMENT */ /* 4 = ZCOPY_REPEAT GET THE ACTIVE ELEMENT */ /* 5 = ZCOPY_ABORT RELOCK THE ACTIVE ELEMENT */ /* 6 = ZCOPY_CLOSE THE SCAN PROCESS IS READY */ /* ------------------------------------------ */ tresult = 0; ptrCheckGuard(scanPtr, cscanRecSize, scanRec); ndbrequire(scanPtr.p->scanState == ScanRec::WAIT_NEXT); switch (tscanNextFlag) { case NextScanReq::ZSCAN_NEXT: jam(); /*empty*/; break; case NextScanReq::ZSCAN_NEXT_COMMIT: case NextScanReq::ZSCAN_COMMIT: jam(); /* --------------------------------------------------------------------- */ /* COMMIT ACTIVE OPERATION. * SEND NEXT SCAN ELEMENT IF IT IS ZCOPY_NEXT_COMMIT. * --------------------------------------------------------------------- */ ptrCheckGuard(operationRecPtr, coprecsize, operationrec); fragrecptr.i = operationRecPtr.p->fragptr; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); if (!scanPtr.p->scanReadCommittedFlag) { commitOperation(signal); }//if operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; takeOutActiveScanOp(); releaseOpRec(); scanPtr.p->scanOpsAllocated--; if (tscanNextFlag == NextScanReq::ZSCAN_COMMIT) { jam(); signal->theData[0] = 0; /* Success */ /** * signal->theData[0] = 0 indicates NEXT_SCANCONF return * signal for NextScanReq::ZSCAN_COMMIT */ return; }//if break; case NextScanReq::ZSCAN_CLOSE: jam(); fragrecptr.i = scanPtr.p->activeLocalFrag; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); ndbassert(fragrecptr.p->activeScanMask & scanPtr.p->scanMask); /* --------------------------------------------------------------------- * THE SCAN PROCESS IS FINISHED. RELOCK ALL LOCKED EL. * RELESE ALL INVOLVED REC. * ------------------------------------------------------------------- */ releaseScanLab(signal); return; break; default: ndbrequire(false); break; }//switch signal->theData[0] = scanPtr.i; signal->theData[1] = AccCheckScan::ZNOT_CHECK_LCP_STOP; execACC_CHECK_SCAN(signal); return; }//Dbacc::execNEXT_SCANREQ() void Dbacc::checkNextBucketLab(Signal* signal) { Page8Ptr nsPageptr; Page8Ptr gnsPageidptr; Page8Ptr tnsPageidptr; Uint32 tnsElementptr; Uint32 tnsContainerptr; Uint32 tnsIsLocked; Uint32 tnsCopyDir; tnsCopyDir = fragrecptr.p->getPageNumber(scanPtr.p->nextBucketIndex); tnsPageidptr.i = getPagePtr(fragrecptr.p->directory, tnsCopyDir); ptrCheckGuard(tnsPageidptr, cpagesize, page8); gnsPageidptr.i = tnsPageidptr.i; gnsPageidptr.p = tnsPageidptr.p; Uint32 conidx = fragrecptr.p->getPageIndex(scanPtr.p->nextBucketIndex); Page8Ptr pageptr; pageptr.i = gnsPageidptr.i; pageptr.p = gnsPageidptr.p; Uint32 conptr; bool isforward; Uint32 elemptr; Uint32 islocked; if (!getScanElement(pageptr, conidx, conptr, isforward, elemptr, islocked)) { scanPtr.p->nextBucketIndex++; if (scanPtr.p->scanBucketState == ScanRec::SECOND_LAP) { if (scanPtr.p->nextBucketIndex > scanPtr.p->maxBucketIndexToRescan) { /* ---------------------------------------------------------------- */ // We have finished the rescan phase. // We are ready to proceed with the next fragment part. /* ---------------------------------------------------------------- */ jam(); checkNextFragmentLab(signal); return; }//if } else if (scanPtr.p->scanBucketState == ScanRec::FIRST_LAP) { if (fragrecptr.p->level.getTop() < scanPtr.p->nextBucketIndex) { /* ---------------------------------------------------------------- */ // All buckets have been scanned a first time. /* ---------------------------------------------------------------- */ if (scanPtr.p->minBucketIndexToRescan == 0xFFFFFFFF) { jam(); /* -------------------------------------------------------------- */ // We have not had any merges behind the scan. // Thus it is not necessary to perform any rescan any buckets // and we can proceed immediately with the next fragment part. /* --------------------------------------------------------------- */ checkNextFragmentLab(signal); return; } else { jam(); /* --------------------------------------------------------------------------------- */ // Some buckets are in the need of rescanning due to merges that have moved records // from in front of the scan to behind the scan. During the merges we kept track of // which buckets that need a rescan. We start with the minimum and end with maximum. /* --------------------------------------------------------------------------------- */ scanPtr.p->nextBucketIndex = scanPtr.p->minBucketIndexToRescan; scanPtr.p->scanBucketState = ScanRec::SECOND_LAP; if (scanPtr.p->maxBucketIndexToRescan > fragrecptr.p->level.getTop()) { jam(); /* --------------------------------------------------------------------------------- */ // If we have had so many merges that the maximum is bigger than the number of buckets // then we will simply satisfy ourselves with scanning to the end. This can only happen // after bringing down the total of buckets to less than half and the minimum should // be 0 otherwise there is some problem. /* --------------------------------------------------------------------------------- */ if (scanPtr.p->minBucketIndexToRescan != 0) { jam(); sendSystemerror(__LINE__); return; }//if scanPtr.p->maxBucketIndexToRescan = fragrecptr.p->level.getTop(); }//if }//if }//if }//if if ((scanPtr.p->scanBucketState == ScanRec::FIRST_LAP) && (scanPtr.p->nextBucketIndex <= scanPtr.p->startNoOfBuckets)) { /* --------------------------------------------------------------------------------- */ // We will only reset the scan indicator on the buckets that existed at the start of the // scan. The others will be handled by the split and merge code. /* --------------------------------------------------------------------------------- */ Uint32 conidx = fragrecptr.p->getPageIndex(scanPtr.p->nextBucketIndex); if (conidx == 0) { jam(); Uint32 pagei = fragrecptr.p->getPageNumber(scanPtr.p->nextBucketIndex); gnsPageidptr.i = getPagePtr(fragrecptr.p->directory, pagei); ptrCheckGuard(gnsPageidptr, cpagesize, page8); }//if ndbassert(!scanPtr.p->isInContainer()); releaseScanBucket(gnsPageidptr, conidx, scanPtr.p->scanMask); }//if signal->theData[0] = scanPtr.i; signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP; sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB); return; }//if /* ----------------------------------------------------------------------- */ /* AN ELEMENT WHICH HAVE NOT BEEN SCANNED WAS FOUND. WE WILL PREPARE IT */ /* TO BE SENT TO THE LQH BLOCK FOR FURTHER PROCESSING. */ /* WE ASSUME THERE ARE OPERATION RECORDS AVAILABLE SINCE LQH SHOULD HAVE*/ /* GUARANTEED THAT THROUGH EARLY BOOKING. */ /* ----------------------------------------------------------------------- */ tnsIsLocked = islocked; tnsElementptr = elemptr; tnsContainerptr = conptr; nsPageptr.i = pageptr.i; nsPageptr.p = pageptr.p; seizeOpRec(); initScanOpRec(nsPageptr, tnsContainerptr, tnsElementptr); if (!tnsIsLocked){ if (!scanPtr.p->scanReadCommittedFlag) { jam(); /* Immediate lock grant as element unlocked */ fragrecptr.p->m_lockStats. req_start_imm_ok(scanPtr.p->scanLockMode != ZREADLOCK, operationRecPtr.p->m_lockTime, getHighResTimer()); setlock(nsPageptr, tnsElementptr); insertLockOwnersList(operationRecPtr); operationRecPtr.p->m_op_bits |= Operationrec::OP_STATE_RUNNING | Operationrec::OP_RUN_QUEUE; }//if } else { arrGuard(tnsElementptr, 2048); queOperPtr.i = ElementHeader::getOpPtrI(nsPageptr.p->word32[tnsElementptr]); ptrCheckGuard(queOperPtr, coprecsize, operationrec); if (queOperPtr.p->m_op_bits & Operationrec::OP_ELEMENT_DISAPPEARED || queOperPtr.p->localdata.isInvalid()) { jam(); /* ------------------------------------------------------------------ */ // If the lock owner indicates the element is disappeared then // we will not report this tuple. We will continue with the next tuple. /* ------------------------------------------------------------------ */ /* FC : Is this correct, shouldn't we wait for lock holder commit? */ operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; releaseOpRec(); scanPtr.p->scanOpsAllocated--; signal->theData[0] = scanPtr.i; signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP; sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB); return; }//if if (!scanPtr.p->scanReadCommittedFlag) { Uint32 return_result; if (scanPtr.p->scanLockMode == ZREADLOCK) { jam(); return_result = placeReadInLockQueue(queOperPtr); } else { jam(); return_result = placeWriteInLockQueue(queOperPtr); }//if if (return_result == ZSERIAL_QUEUE) { /* ----------------------------------------------------------------- * WE PLACED THE OPERATION INTO A SERIAL QUEUE AND THUS WE HAVE TO * WAIT FOR THE LOCK TO BE RELEASED. WE CONTINUE WITH THE NEXT ELEMENT * ----------------------------------------------------------------- */ fragrecptr.p-> m_lockStats.req_start(scanPtr.p->scanLockMode != ZREADLOCK, operationRecPtr.p->m_lockTime, getHighResTimer()); putOpScanLockQue(); /* PUT THE OP IN A QUE IN THE SCAN REC */ signal->theData[0] = scanPtr.i; signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP; sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB); return; } else if (return_result != ZPARALLEL_QUEUE) { jam(); /* ----------------------------------------------------------------- */ // The tuple is either not committed yet or a delete in // the same transaction (not possible here since we are a scan). // Thus we simply continue with the next tuple. /* ----------------------------------------------------------------- */ operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; releaseOpRec(); scanPtr.p->scanOpsAllocated--; signal->theData[0] = scanPtr.i; signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP; sendSignal(cownBlockref, GSN_ACC_CHECK_SCAN, signal, 2, JBB); return; }//if ndbassert(return_result == ZPARALLEL_QUEUE); /* We got into the parallel queue - immediate grant */ fragrecptr.p->m_lockStats. req_start_imm_ok(scanPtr.p->scanLockMode != ZREADLOCK, operationRecPtr.p->m_lockTime, getHighResTimer()); }//if }//if /* ----------------------------------------------------------------------- */ // Committed read proceed without caring for locks immediately // down here except when the tuple was deleted permanently // and no new operation has inserted it again. /* ----------------------------------------------------------------------- */ putActiveScanOp(); sendNextScanConf(signal); return; }//Dbacc::checkNextBucketLab() void Dbacc::checkNextFragmentLab(Signal* signal) { scanPtr.p->scanBucketState = ScanRec::SCAN_COMPLETED; // The scan is completed. ACC_CHECK_SCAN will perform all the necessary // checks to see // what the next step is. signal->theData[0] = scanPtr.i; signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP; execACC_CHECK_SCAN(signal); return; }//Dbacc::checkNextFragmentLab() void Dbacc::initScanFragmentPart() { Page8Ptr cnfPageidptr; /* ----------------------------------------------------------------------- */ // Set the active fragment part. // Set the current bucket scanned to the first. // Start with the first lap. // Remember the number of buckets at start of the scan. // Set the minimum and maximum to values that will always be smaller and // larger than. // Reset the scan indicator on the first bucket. /* ----------------------------------------------------------------------- */ ndbassert(scanPtr.p->activeLocalFrag == RNIL); scanPtr.p->activeLocalFrag = fragrecptr.i; scanPtr.p->nextBucketIndex = 0; /* INDEX OF SCAN BUCKET */ ndbassert(!scanPtr.p->isInContainer()); scanPtr.p->scanBucketState = ScanRec::FIRST_LAP; scanPtr.p->startNoOfBuckets = fragrecptr.p->level.getTop(); scanPtr.p->minBucketIndexToRescan = 0xFFFFFFFF; scanPtr.p->maxBucketIndexToRescan = 0; cnfPageidptr.i = getPagePtr(fragrecptr.p->directory, 0); ptrCheckGuard(cnfPageidptr, cpagesize, page8); const Uint32 conidx = fragrecptr.p->getPageIndex(scanPtr.p->nextBucketIndex); ndbassert(!(fragrecptr.p->activeScanMask & scanPtr.p->scanMask)); ndbassert(!scanPtr.p->isInContainer()); releaseScanBucket(cnfPageidptr, conidx, scanPtr.p->scanMask); fragrecptr.p->activeScanMask |= scanPtr.p->scanMask; }//Dbacc::initScanFragmentPart() /* ------------------------------------------------------------------------- * FLAG = 6 = ZCOPY_CLOSE THE SCAN PROCESS IS READY OR ABORTED. * ALL OPERATION IN THE ACTIVE OR WAIT QUEUE ARE RELEASED, * SCAN FLAG OF ROOT FRAG IS RESET AND THE SCAN RECORD IS RELEASED. * ------------------------------------------------------------------------ */ void Dbacc::releaseScanLab(Signal* signal) { releaseAndCommitActiveOps(signal); releaseAndCommitQueuedOps(signal); releaseAndAbortLockedOps(signal); fragrecptr.i = scanPtr.p->activeLocalFrag; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); ndbassert(fragrecptr.p->activeScanMask & scanPtr.p->scanMask); /** * Dont leave partial scanned bucket as partial scanned. * Elements scanbits must match containers scanbits. */ if ((scanPtr.p->scanBucketState == ScanRec::FIRST_LAP && scanPtr.p->nextBucketIndex <= fragrecptr.p->level.getTop()) || (scanPtr.p->scanBucketState == ScanRec::SECOND_LAP && scanPtr.p->nextBucketIndex <= scanPtr.p->maxBucketIndexToRescan)) { jam(); Uint32 conidx = fragrecptr.p->getPageIndex(scanPtr.p->nextBucketIndex); Uint32 pagei = fragrecptr.p->getPageNumber(scanPtr.p->nextBucketIndex); Page8Ptr pageptr; pageptr.i = getPagePtr(fragrecptr.p->directory, pagei); ptrCheckGuard(pageptr, cpagesize, page8); Uint32 inPageI; Uint32 inConptr; if(scanPtr.p->getContainer(inPageI, inConptr)) { Page8Ptr page; page.i = inPageI; ptrCheckGuard(page, cpagesize, page8); ContainerHeader conhead(page.p->word32[inConptr]); scanPtr.p->leaveContainer(inPageI, inConptr); page.p->clearScanContainer(scanPtr.p->scanMask, inConptr); if (!page.p->checkScanContainer(inConptr)) { conhead.clearScanInProgress(); page.p->word32[inConptr] = Uint32(conhead); } } releaseScanBucket(pageptr, conidx, scanPtr.p->scanMask); } for (tmp = 0; tmp < MAX_PARALLEL_SCANS_PER_FRAG; tmp++) { jam(); if (fragrecptr.p->scan[tmp] == scanPtr.i) { jam(); fragrecptr.p->scan[tmp] = RNIL; }//if }//for // Stops the heartbeat Uint32 blockNo = refToMain(scanPtr.p->scanUserblockref); signal->theData[0] = scanPtr.p->scanUserptr; signal->theData[1] = RNIL; signal->theData[2] = RNIL; fragrecptr.p->activeScanMask &= ~scanPtr.p->scanMask; scanPtr.p->activeLocalFrag = RNIL; releaseScanRec(); EXECUTE_DIRECT(blockNo, GSN_NEXT_SCANCONF, signal, 3); return; }//Dbacc::releaseScanLab() void Dbacc::releaseAndCommitActiveOps(Signal* signal) { OperationrecPtr trsoOperPtr; operationRecPtr.i = scanPtr.p->scanFirstActiveOp; while (operationRecPtr.i != RNIL) { jam(); ptrCheckGuard(operationRecPtr, coprecsize, operationrec); trsoOperPtr.i = operationRecPtr.p->nextOp; fragrecptr.i = operationRecPtr.p->fragptr; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); if (!scanPtr.p->scanReadCommittedFlag) { jam(); if ((operationRecPtr.p->m_op_bits & Operationrec::OP_STATE_MASK) == Operationrec::OP_STATE_EXECUTED) { commitOperation(signal); } else { abortOperation(signal); } }//if operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; takeOutActiveScanOp(); releaseOpRec(); scanPtr.p->scanOpsAllocated--; operationRecPtr.i = trsoOperPtr.i; }//if }//Dbacc::releaseAndCommitActiveOps() void Dbacc::releaseAndCommitQueuedOps(Signal* signal) { OperationrecPtr trsoOperPtr; operationRecPtr.i = scanPtr.p->scanFirstQueuedOp; while (operationRecPtr.i != RNIL) { jam(); ptrCheckGuard(operationRecPtr, coprecsize, operationrec); trsoOperPtr.i = operationRecPtr.p->nextOp; fragrecptr.i = operationRecPtr.p->fragptr; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); if (!scanPtr.p->scanReadCommittedFlag) { jam(); if ((operationRecPtr.p->m_op_bits & Operationrec::OP_STATE_MASK) == Operationrec::OP_STATE_EXECUTED) { commitOperation(signal); } else { abortOperation(signal); } }//if operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; takeOutReadyScanQueue(); releaseOpRec(); scanPtr.p->scanOpsAllocated--; operationRecPtr.i = trsoOperPtr.i; }//if }//Dbacc::releaseAndCommitQueuedOps() void Dbacc::releaseAndAbortLockedOps(Signal* signal) { OperationrecPtr trsoOperPtr; operationRecPtr.i = scanPtr.p->scanFirstLockedOp; while (operationRecPtr.i != RNIL) { jam(); ptrCheckGuard(operationRecPtr, coprecsize, operationrec); trsoOperPtr.i = operationRecPtr.p->nextOp; fragrecptr.i = operationRecPtr.p->fragptr; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); if (!scanPtr.p->scanReadCommittedFlag) { jam(); abortOperation(signal); }//if takeOutScanLockQueue(scanPtr.i); operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; releaseOpRec(); scanPtr.p->scanOpsAllocated--; operationRecPtr.i = trsoOperPtr.i; }//if }//Dbacc::releaseAndAbortLockedOps() /* 3.18.3 ACC_CHECK_SCAN */ /* ******************--------------------------------------------------------------- */ /* ACC_CHECK_SCAN */ /* ENTER ACC_CHECK_SCAN WITH */ /* SCAN_PTR */ /* ******************--------------------------------------------------------------- */ /* ******************--------------------------------------------------------------- */ /* ACC_CHECK_SCAN */ /* ******************------------------------------+ */ void Dbacc::execACC_CHECK_SCAN(Signal* signal) { Uint32 TcheckLcpStop; jamEntry(); scanPtr.i = signal->theData[0]; TcheckLcpStop = signal->theData[1]; ptrCheckGuard(scanPtr, cscanRecSize, scanRec); while (scanPtr.p->scanFirstQueuedOp != RNIL) { jam(); //--------------------------------------------------------------------- // An operation has been released from the lock queue. // We are in the parallel queue of this tuple. We are // ready to report the tuple now. //------------------------------------------------------------------------ operationRecPtr.i = scanPtr.p->scanFirstQueuedOp; ptrCheckGuard(operationRecPtr, coprecsize, operationrec); takeOutReadyScanQueue(); fragrecptr.i = operationRecPtr.p->fragptr; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); /* Scan op that had to wait for a lock is now runnable */ fragrecptr.p->m_lockStats.wait_ok(scanPtr.p->scanLockMode != ZREADLOCK, operationRecPtr.p->m_lockTime, getHighResTimer()); if (operationRecPtr.p->m_op_bits & Operationrec::OP_ELEMENT_DISAPPEARED) { jam(); /** * Despite aborting, this is an 'ok' wait. * This op is waking up to find the entity it locked has gone. * As a 'QueuedOp', we are in the parallel queue of the element, so * at the abort below we don't double-count abort as a failure. */ abortOperation(signal); operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; releaseOpRec(); scanPtr.p->scanOpsAllocated--; continue; }//if putActiveScanOp(); sendNextScanConf(signal); return; }//while if ((scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) && (scanPtr.p->scanLockHeld == 0)) { jam(); //---------------------------------------------------------------------------- // The scan is now completed and there are no more locks outstanding. Thus we // we will report the scan as completed to LQH. //---------------------------------------------------------------------------- signal->theData[0] = scanPtr.p->scanUserptr; signal->theData[1] = RNIL; signal->theData[2] = RNIL; EXECUTE_DIRECT(refToMain(scanPtr.p->scanUserblockref), GSN_NEXT_SCANCONF, signal, 3); return; }//if if (TcheckLcpStop == AccCheckScan::ZCHECK_LCP_STOP) { //--------------------------------------------------------------------------- // To ensure that the block of the fragment occurring at the start of a local // checkpoint is not held for too long we insert a release and reacquiring of // that lock here. This is performed in LQH. If we are blocked or if we have // requested a sleep then we will receive RNIL in the returning signal word. //--------------------------------------------------------------------------- signal->theData[0] = scanPtr.p->scanUserptr; signal->theData[1] = ((scanPtr.p->scanLockHeld >= ZSCAN_MAX_LOCK) || (scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED)); EXECUTE_DIRECT(DBLQH, GSN_CHECK_LCP_STOP, signal, 2); jamEntry(); if (signal->theData[0] == RNIL) { jam(); return; }//if }//if /** * If we have more than max locks held OR * scan is completed AND at least one lock held * - Inform LQH about this condition */ if ((scanPtr.p->scanLockHeld >= ZSCAN_MAX_LOCK) || (cfreeopRec == RNIL) || ((scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) && (scanPtr.p->scanLockHeld > 0))) { jam(); signal->theData[0] = scanPtr.p->scanUserptr; signal->theData[1] = RNIL; // No operation is returned signal->theData[2] = 512; // MASV sendSignal(scanPtr.p->scanUserblockref, GSN_NEXT_SCANCONF, signal, 3, JBB); return; } if (scanPtr.p->scanBucketState == ScanRec::SCAN_COMPLETED) { jam(); signal->theData[0] = scanPtr.i; signal->theData[1] = AccCheckScan::ZCHECK_LCP_STOP; execACC_CHECK_SCAN(signal); return; }//if fragrecptr.i = scanPtr.p->activeLocalFrag; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); ndbassert(fragrecptr.p->activeScanMask & scanPtr.p->scanMask); checkNextBucketLab(signal); return; }//Dbacc::execACC_CHECK_SCAN() /* ******************---------------------------------------------------- */ /* ACC_TO_REQ PERFORM A TAKE OVER */ /* ******************-------------------+ */ /* SENDER: LQH, LEVEL B */ void Dbacc::execACC_TO_REQ(Signal* signal) { OperationrecPtr tatrOpPtr; jamEntry(); tatrOpPtr.i = signal->theData[1]; /* OPER PTR OF ACC */ ptrCheckGuard(tatrOpPtr, coprecsize, operationrec); /* Only scan locks can be taken over */ if ((tatrOpPtr.p->m_op_bits & Operationrec::OP_MASK) == ZSCAN_OP) { if (signal->theData[2] == tatrOpPtr.p->transId1 && signal->theData[3] == tatrOpPtr.p->transId2) { /* If lock is from same transaction as take over, lock can * be taken over several times. * * This occurs for example in this scenario: * * create table t (x int primary key, y int); * insert into t (x, y) values (1, 0); * begin; * # Scan and lock rows in t, update using take over operation. * update t set y = 1; * # The second update on same row, will take over the same lock as previous update * update t set y = 2; * commit; */ return; } else if (tatrOpPtr.p->m_op_bits & Operationrec::OP_LOCK_OWNER && tatrOpPtr.p->nextParallelQue == RNIL) { /* If lock is taken over from other transaction it must be * the only one in the parallel queue. Otherwise one could * end up with mixing operations from different transaction * in a parallel queue. */ tatrOpPtr.p->transId1 = signal->theData[2]; tatrOpPtr.p->transId2 = signal->theData[3]; validate_lock_queue(tatrOpPtr); return; } } jam(); signal->theData[0] = cminusOne; signal->theData[1] = ZTO_OP_STATE_ERROR; return; }//Dbacc::execACC_TO_REQ() /** --------------------------------------------------------------------------- * Get next unscanned element in fragment. * * @param[in,out] pageptr Page of first container to scan, on return * container for found element. * @param[in,out] conidx Index within page for first container to scan, on * return container for found element. * @param[out] conptr Pointer withing page of first container to scan, * on return container for found element. * @param[in,out] isforward Direction of first container to scan, on return * the direction of container for found element. * @param[out] elemptr Pointer within page of next element in scan. * @param[out] islocked Indicates if element is locked. * @return Return true if an unscanned element was found. * ------------------------------------------------------------------------- */ bool Dbacc::getScanElement(Page8Ptr& pageptr, Uint32& conidx, Uint32& conptr, bool& isforward, Uint32& elemptr, Uint32& islocked) const { /* Input is always the bucket header container */ isforward = true; /* Check if scan is already active in a container */ Uint32 inPageI; Uint32 inConptr; if (scanPtr.p->getContainer(inPageI, inConptr)) { // TODO: in VM_TRACE double check container is in bucket! pageptr.i = inPageI; ptrCheckGuard(pageptr, cpagesize, page8); conptr = inConptr; ContainerHeader conhead(pageptr.p->word32[conptr]); ndbassert(conhead.isScanInProgress()); ndbassert((conhead.getScanBits() & scanPtr.p->scanMask)==0); getContainerIndex(conptr, conidx, isforward); } else // if first bucket is not in scan nor scanned , start it { Uint32 conptr = getContainerPtr(conidx, isforward); ContainerHeader containerhead(pageptr.p->word32[conptr]); if (!(containerhead.getScanBits() & scanPtr.p->scanMask)) { if(!containerhead.isScanInProgress()) { containerhead.setScanInProgress(); pageptr.p->word32[conptr] = containerhead; } scanPtr.p->enterContainer(pageptr.i, conptr); pageptr.p->setScanContainer(scanPtr.p->scanMask, conptr); } } NEXTSEARCH_SCAN_LOOP: conptr = getContainerPtr(conidx, isforward); ContainerHeader containerhead(pageptr.p->word32[conptr]); Uint32 conlen = containerhead.getLength(); if (containerhead.getScanBits() & scanPtr.p->scanMask) { // Already scanned, go to next. ndbassert(!pageptr.p->checkScans(scanPtr.p->scanMask, conptr)); } else { ndbassert(containerhead.isScanInProgress()); if (searchScanContainer(pageptr, conptr, isforward, conlen, elemptr, islocked)) { jam(); return true; }//if } if ((containerhead.getScanBits() & scanPtr.p->scanMask) == 0) { containerhead.setScanBits(scanPtr.p->scanMask); scanPtr.p->leaveContainer(pageptr.i, conptr); pageptr.p->clearScanContainer(scanPtr.p->scanMask, conptr); if (!pageptr.p->checkScanContainer(conptr)) { containerhead.clearScanInProgress(); } pageptr.p->word32[conptr] = Uint32(containerhead); } if (containerhead.haveNext()) { jam(); nextcontainerinfo(pageptr, conptr, containerhead, conidx, isforward); conptr=getContainerPtr(conidx,isforward); containerhead=pageptr.p->word32[conptr]; if ((containerhead.getScanBits() & scanPtr.p->scanMask) == 0) { if(!containerhead.isScanInProgress()) { containerhead.setScanInProgress(); } pageptr.p->word32[conptr] = Uint32(containerhead); scanPtr.p->enterContainer(pageptr.i, conptr); pageptr.p->setScanContainer(scanPtr.p->scanMask, conptr); } // else already scanned, get next goto NEXTSEARCH_SCAN_LOOP; }//if pageptr.p->word32[conptr] = Uint32(containerhead); return false; }//Dbacc::getScanElement() /* --------------------------------------------------------------------------------- */ /* INIT_SCAN_OP_REC */ /* --------------------------------------------------------------------------------- */ void Dbacc::initScanOpRec(Page8Ptr pageptr, Uint32 conptr, Uint32 elemptr) const { Uint32 tisoLocalPtr; Uint32 localkeylen = fragrecptr.p->localkeylen; scanPtr.p->scanOpsAllocated++; Uint32 opbits = 0; opbits |= ZSCAN_OP; opbits |= scanPtr.p->scanLockMode ? (Uint32) Operationrec::OP_LOCK_MODE : 0; opbits |= scanPtr.p->scanLockMode ? (Uint32) Operationrec::OP_ACC_LOCK_MODE : 0; opbits |= (scanPtr.p->scanReadCommittedFlag ? (Uint32) Operationrec::OP_EXECUTED_DIRTY_READ : 0); opbits |= Operationrec::OP_COMMIT_DELETE_CHECK; operationRecPtr.p->userptr = RNIL; operationRecPtr.p->scanRecPtr = scanPtr.i; operationRecPtr.p->fid = fragrecptr.p->myfid; operationRecPtr.p->fragptr = fragrecptr.i; operationRecPtr.p->nextParallelQue = RNIL; operationRecPtr.p->prevParallelQue = RNIL; operationRecPtr.p->nextSerialQue = RNIL; operationRecPtr.p->prevSerialQue = RNIL; operationRecPtr.p->transId1 = scanPtr.p->scanTrid1; operationRecPtr.p->transId2 = scanPtr.p->scanTrid2; operationRecPtr.p->elementContainer = conptr; operationRecPtr.p->elementPointer = elemptr; operationRecPtr.p->elementPage = pageptr.i; operationRecPtr.p->m_op_bits = opbits; tisoLocalPtr = elemptr + 1; arrGuard(tisoLocalPtr, 2048); if(ElementHeader::getUnlocked(pageptr.p->word32[elemptr])) { Local_key key; key.m_page_no = pageptr.p->word32[tisoLocalPtr]; key.m_page_idx = ElementHeader::getPageIdx(pageptr.p->word32[elemptr]); operationRecPtr.p->localdata = key; } else { OperationrecPtr oprec; oprec.i = ElementHeader::getOpPtrI(pageptr.p->word32[elemptr]); ptrCheckGuard(oprec, coprecsize, operationrec); #if defined(VM_TRACE) || defined(ERROR_INSERT) ndbrequire(oprec.p->localdata.m_page_no == pageptr.p->word32[tisoLocalPtr]); #endif operationRecPtr.p->localdata = oprec.p->localdata; } tisoLocalPtr = tisoLocalPtr + 1; ndbrequire(localkeylen == 1) operationRecPtr.p->hashValue.clear(); operationRecPtr.p->tupkeylen = fragrecptr.p->keyLength; operationRecPtr.p->xfrmtupkeylen = 0; // not used NdbTick_Invalidate(&operationRecPtr.p->m_lockTime); }//Dbacc::initScanOpRec() /* ---------------------------------------------------------------------------- * Get information of next container. * * @param[in,out] pageptr Page of current container, and on return to * next container. * @param[in] conptr Pointer within page to current container. * @param[in] containerheader Header of current container. * @param[out] nextConidx Index within page to next container. * @param[out] nextIsforward Direction of next container. * ------------------------------------------------------------------------- */ void Dbacc::nextcontainerinfo(Page8Ptr& pageptr, Uint32 conptr, ContainerHeader containerhead, Uint32& nextConidx, bool& nextIsforward) const { /* THE NEXT CONTAINER IS IN THE SAME PAGE */ nextConidx = containerhead.getNextIndexNumber(); if (containerhead.getNextEnd() == ZLEFT) { jam(); nextIsforward = true; } else if (containerhead.getNextEnd() == ZRIGHT) { jam(); nextIsforward = false; } else { ndbrequire(containerhead.getNextEnd() == ZLEFT || containerhead.getNextEnd() == ZRIGHT); } if (!containerhead.isNextOnSamePage()) { jam(); /* NEXT CONTAINER IS IN AN OVERFLOW PAGE */ arrGuard(conptr + 1, 2048); pageptr.i = pageptr.p->word32[conptr + 1]; ptrCheckGuard(pageptr, cpagesize, page8); }//if }//Dbacc::nextcontainerinfo() /* --------------------------------------------------------------------------------- */ /* PUT_ACTIVE_SCAN_OP */ /* --------------------------------------------------------------------------------- */ void Dbacc::putActiveScanOp() const { OperationrecPtr pasOperationRecPtr; pasOperationRecPtr.i = scanPtr.p->scanFirstActiveOp; if (pasOperationRecPtr.i != RNIL) { jam(); ptrCheckGuard(pasOperationRecPtr, coprecsize, operationrec); pasOperationRecPtr.p->prevOp = operationRecPtr.i; }//if operationRecPtr.p->nextOp = pasOperationRecPtr.i; operationRecPtr.p->prevOp = RNIL; scanPtr.p->scanFirstActiveOp = operationRecPtr.i; }//Dbacc::putActiveScanOp() /** * putOpScanLockQueue * * Description: Put an operation in the doubly linked * lock list on a scan record. The list is used to * keep track of which operations belonging * to the scan are put in serial lock list of another * operation * * @note Use takeOutScanLockQueue to remove an operation * from the list * */ void Dbacc::putOpScanLockQue() const { #ifdef VM_TRACE // DEBUG CODE // Check that there are as many operations in the lockqueue as // scanLockHeld indicates OperationrecPtr tmpOp; int numLockedOpsBefore = 0; tmpOp.i = scanPtr.p->scanFirstLockedOp; while(tmpOp.i != RNIL){ numLockedOpsBefore++; ptrCheckGuard(tmpOp, coprecsize, operationrec); if (tmpOp.p->nextOp == RNIL) { ndbrequire(tmpOp.i == scanPtr.p->scanLastLockedOp); } tmpOp.i = tmpOp.p->nextOp; } ndbrequire(numLockedOpsBefore==scanPtr.p->scanLockHeld); #endif OperationrecPtr pslOperationRecPtr; ScanRec theScanRec; theScanRec = *scanPtr.p; pslOperationRecPtr.i = scanPtr.p->scanLastLockedOp; operationRecPtr.p->prevOp = pslOperationRecPtr.i; operationRecPtr.p->nextOp = RNIL; if (pslOperationRecPtr.i != RNIL) { jam(); ptrCheckGuard(pslOperationRecPtr, coprecsize, operationrec); pslOperationRecPtr.p->nextOp = operationRecPtr.i; } else { jam(); scanPtr.p->scanFirstLockedOp = operationRecPtr.i; }//if scanPtr.p->scanLastLockedOp = operationRecPtr.i; scanPtr.p->scanLockHeld++; }//Dbacc::putOpScanLockQue() /* --------------------------------------------------------------------------------- */ /* PUT_READY_SCAN_QUEUE */ /* --------------------------------------------------------------------------------- */ void Dbacc::putReadyScanQueue(Uint32 scanRecIndex) const { OperationrecPtr prsOperationRecPtr; ScanRecPtr TscanPtr; TscanPtr.i = scanRecIndex; ptrCheckGuard(TscanPtr, cscanRecSize, scanRec); prsOperationRecPtr.i = TscanPtr.p->scanLastQueuedOp; operationRecPtr.p->prevOp = prsOperationRecPtr.i; operationRecPtr.p->nextOp = RNIL; TscanPtr.p->scanLastQueuedOp = operationRecPtr.i; if (prsOperationRecPtr.i != RNIL) { jam(); ptrCheckGuard(prsOperationRecPtr, coprecsize, operationrec); prsOperationRecPtr.p->nextOp = operationRecPtr.i; } else { jam(); TscanPtr.p->scanFirstQueuedOp = operationRecPtr.i; }//if }//Dbacc::putReadyScanQueue() /** --------------------------------------------------------------------------- * Reset scan bit for all elements within a bucket. * * Which scan bit are determined by scanPtr. * * @param[in] pageptr Page of first container of bucket * @param[in] conidx Index within page to first container of bucket * @param[in] scanMask Scan bit mask for scan bits that should be cleared * ------------------------------------------------------------------------- */ void Dbacc::releaseScanBucket(Page8Ptr pageptr, Uint32 conidx, Uint16 scanMask) const { scanMask |= (~fragrecptr.p->activeScanMask & ((1 << MAX_PARALLEL_SCANS_PER_FRAG) - 1)); bool isforward = true; NEXTRELEASESCANLOOP: Uint32 conptr = getContainerPtr(conidx, isforward); ContainerHeader containerhead(pageptr.p->word32[conptr]); Uint32 conlen = containerhead.getLength(); const Uint16 isScanned = containerhead.getScanBits() & scanMask; releaseScanContainer(pageptr, conptr, isforward, conlen, scanMask, isScanned); if (isScanned) { containerhead.clearScanBits(isScanned); pageptr.p->word32[conptr] = Uint32(containerhead); } if (containerhead.getNextEnd() != 0) { jam(); nextcontainerinfo(pageptr, conptr, containerhead, conidx, isforward); goto NEXTRELEASESCANLOOP; }//if }//Dbacc::releaseScanBucket() /** -------------------------------------------------------------------------- * Reset scan bit of the element for each element in a container. * Which scan bit are determined by scanPtr. * * @param[in] pageptr Pointer to page holding container. * @param[in] conptr Pointer within page to container. * @param[in] forward Container growing direction. * @param[in] conlen Containers current size. * @param[in] scanMask Scan bits that should be cleared if set * @param[in] allScanned All elements should have this bits set (debug) * ------------------------------------------------------------------------- */ void Dbacc::releaseScanContainer(const Page8Ptr pageptr, const Uint32 conptr, const bool isforward, const Uint32 conlen, const Uint16 scanMask, const Uint16 allScanned) const { OperationrecPtr rscOperPtr; Uint32 trscElemStep; Uint32 trscElementptr; Uint32 trscElemlens; Uint32 trscElemlen; if (conlen < 4) { if (conlen != Container::HEADER_SIZE) { jam(); sendSystemerror(__LINE__); }//if return; /* 2 IS THE MINIMUM SIZE OF THE ELEMENT */ }//if trscElemlens = conlen - Container::HEADER_SIZE; trscElemlen = fragrecptr.p->elementLength; if (isforward) { jam(); trscElementptr = conptr + Container::HEADER_SIZE; trscElemStep = trscElemlen; } else { jam(); trscElementptr = conptr - trscElemlen; trscElemStep = 0 - trscElemlen; }//if if (trscElemlens % trscElemlen != 0) { jam(); sendSystemerror(__LINE__); }//if }//Dbacc::releaseScanContainer() /* --------------------------------------------------------------------------------- */ /* RELEASE_SCAN_REC */ /* --------------------------------------------------------------------------------- */ void Dbacc::releaseScanRec() { // Check that all ops this scan has allocated have been // released ndbrequire(scanPtr.p->scanOpsAllocated==0); // Check that all locks this scan might have aquired // have been properly released ndbrequire(scanPtr.p->scanLockHeld == 0); ndbrequire(scanPtr.p->scanFirstLockedOp == RNIL); ndbrequire(scanPtr.p->scanLastLockedOp == RNIL); // Check that all active operations have been // properly released ndbrequire(scanPtr.p->scanFirstActiveOp == RNIL); // Check that all queued operations have been // properly released ndbrequire(scanPtr.p->scanFirstQueuedOp == RNIL); ndbrequire(scanPtr.p->scanLastQueuedOp == RNIL); // Put scan record in free list scanPtr.p->scanNextfreerec = cfirstFreeScanRec; scanPtr.p->scanState = ScanRec::SCAN_DISCONNECT; cfirstFreeScanRec = scanPtr.i; }//Dbacc::releaseScanRec() /* --------------------------------------------------------------------------------- */ /* SEARCH_SCAN_CONTAINER */ /* INPUT: TSSC_CONTAINERLEN */ /* TSSC_CONTAINERPTR */ /* TSSC_ISFORWARD */ /* SSC_PAGEIDPTR */ /* SCAN_PTR */ /* OUTPUT: TSSC_IS_LOCKED */ /* */ /* DESCRIPTION: SEARCH IN A CONTAINER TO FIND THE NEXT SCAN ELEMENT. */ /* TO DO THIS THE SCAN BIT OF THE ELEMENT HEADER IS CHECKED. IF */ /* THIS BIT IS ZERO, IT IS SET TO ONE AND THE ELEMENT IS RETURNED.*/ /* --------------------------------------------------------------------------------- */ bool Dbacc::searchScanContainer(Page8Ptr pageptr, Uint32 conptr, bool isforward, Uint32 conlen, Uint32& elemptr, Uint32& islocked) const { OperationrecPtr operPtr; Uint32 elemlens; Uint32 elemlen; Uint32 elemStep; Uint32 Telemptr; Uint32 Tislocked; #ifdef VM_TRACE ContainerHeader chead(pageptr.p->word32[conptr]); ndbassert((chead.getScanBits()&scanPtr.p->scanMask)==0); ndbassert(chead.isScanInProgress()); ndbassert(scanPtr.p->isInContainer()); { Uint32 pagei; Uint32 cptr; ndbassert(scanPtr.p->getContainer(pagei, cptr)); ndbassert(pageptr.i==pagei); ndbassert(conptr==cptr); } #endif if (conlen < 4) { jam(); return false; /* 2 IS THE MINIMUM SIZE OF THE ELEMENT */ }//if elemlens = conlen - Container::HEADER_SIZE; elemlen = fragrecptr.p->elementLength; /* LENGTH OF THE ELEMENT */ if (isforward) { jam(); Telemptr = conptr + Container::HEADER_SIZE; elemStep = elemlen; } else { jam(); Telemptr = conptr - elemlen; elemStep = 0 - elemlen; }//if SCANELEMENTLOOP001: arrGuard(Telemptr, 2048); const Uint32 eh = pageptr.p->word32[Telemptr]; bool found=false; if (!scanPtr.p->isScanned(Telemptr)) { found=true; scanPtr.p->setScanned(Telemptr); } Tislocked = ElementHeader::getLocked(eh); if (found) { elemptr = Telemptr; islocked = Tislocked; return true; } ndbassert(!found); /* THE ELEMENT IS ALREADY SENT. */ /* SEARCH FOR NEXT ONE */ elemlens = elemlens - elemlen; if (elemlens > 1) { jam(); Telemptr = Telemptr + elemStep; goto SCANELEMENTLOOP001; }//if return false; }//Dbacc::searchScanContainer() /* --------------------------------------------------------------------------------- */ /* SEND THE RESPONSE NEXT_SCANCONF AND POSSIBLE KEYINFO SIGNALS AS WELL. */ /* --------------------------------------------------------------------------------- */ void Dbacc::sendNextScanConf(Signal* signal) { const Local_key localKey = operationRecPtr.p->localdata; c_tup->prepareTUPKEYREQ(localKey.m_page_no, localKey.m_page_idx, fragrecptr.p->tupFragptr); const Uint32 scanUserPtr = scanPtr.p->scanUserptr; const Uint32 opPtrI = operationRecPtr.i; const Uint32 fid = operationRecPtr.p->fid; BlockReference blockRef = scanPtr.p->scanUserblockref; jam(); /** --------------------------------------------------------------------- * LQH WILL NOT HAVE ANY USE OF THE TUPLE KEY LENGTH IN THIS CASE AND * SO WE DO NOT PROVIDE IT. IN THIS CASE THESE VALUES ARE UNDEFINED. * ---------------------------------------------------------------------- */ signal->theData[0] = scanUserPtr; signal->theData[1] = opPtrI; signal->theData[2] = fid; signal->theData[3] = localKey.m_page_no; signal->theData[4] = localKey.m_page_idx; EXECUTE_DIRECT(refToMain(blockRef), GSN_NEXT_SCANCONF, signal, 5); return; }//Dbacc::sendNextScanConf() /** --------------------------------------------------------------------------- * Sets lock on an element. * * Information about the element is copied from element head into operation * record. A pointer to operation record are inserted in element header * instead. * * @param[in] pageptr Pointer to page holding element. * @param[in] elemptr Pointer within page to element. * ------------------------------------------------------------------------- */ void Dbacc::setlock(Page8Ptr pageptr, Uint32 elemptr) const { Uint32 tselTmp1; arrGuard(elemptr, 2048); tselTmp1 = pageptr.p->word32[elemptr]; operationRecPtr.p->reducedHashValue = ElementHeader::getReducedHashValue(tselTmp1); tselTmp1 = ElementHeader::setLocked(operationRecPtr.i); dbgWord32(pageptr, elemptr, tselTmp1); pageptr.p->word32[elemptr] = tselTmp1; }//Dbacc::setlock() /* --------------------------------------------------------------------------------- */ /* TAKE_OUT_ACTIVE_SCAN_OP */ /* DESCRIPTION: AN ACTIVE SCAN OPERATION IS BELOGED TO AN ACTIVE LIST OF THE */ /* SCAN RECORD. BY THIS SUBRUTIN THE LIST IS UPDATED. */ /* --------------------------------------------------------------------------------- */ void Dbacc::takeOutActiveScanOp() const { OperationrecPtr tasOperationRecPtr; if (operationRecPtr.p->prevOp != RNIL) { jam(); tasOperationRecPtr.i = operationRecPtr.p->prevOp; ptrCheckGuard(tasOperationRecPtr, coprecsize, operationrec); tasOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp; } else { jam(); scanPtr.p->scanFirstActiveOp = operationRecPtr.p->nextOp; }//if if (operationRecPtr.p->nextOp != RNIL) { jam(); tasOperationRecPtr.i = operationRecPtr.p->nextOp; ptrCheckGuard(tasOperationRecPtr, coprecsize, operationrec); tasOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp; }//if }//Dbacc::takeOutActiveScanOp() /** * takeOutScanLockQueue * * Description: Take out an operation from the doubly linked * lock list on a scan record. * * @note Use putOpScanLockQue to insert a operation in * the list * */ void Dbacc::takeOutScanLockQueue(Uint32 scanRecIndex) const { OperationrecPtr tslOperationRecPtr; ScanRecPtr TscanPtr; TscanPtr.i = scanRecIndex; ptrCheckGuard(TscanPtr, cscanRecSize, scanRec); if (operationRecPtr.p->prevOp != RNIL) { jam(); tslOperationRecPtr.i = operationRecPtr.p->prevOp; ptrCheckGuard(tslOperationRecPtr, coprecsize, operationrec); tslOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp; } else { jam(); // Check that first are pointing at operation to take out ndbrequire(TscanPtr.p->scanFirstLockedOp==operationRecPtr.i); TscanPtr.p->scanFirstLockedOp = operationRecPtr.p->nextOp; }//if if (operationRecPtr.p->nextOp != RNIL) { jam(); tslOperationRecPtr.i = operationRecPtr.p->nextOp; ptrCheckGuard(tslOperationRecPtr, coprecsize, operationrec); tslOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp; } else { jam(); // Check that last are pointing at operation to take out ndbrequire(TscanPtr.p->scanLastLockedOp==operationRecPtr.i); TscanPtr.p->scanLastLockedOp = operationRecPtr.p->prevOp; }//if TscanPtr.p->scanLockHeld--; #ifdef VM_TRACE // DEBUG CODE // Check that there are as many operations in the lockqueue as // scanLockHeld indicates OperationrecPtr tmpOp; int numLockedOps = 0; tmpOp.i = TscanPtr.p->scanFirstLockedOp; while(tmpOp.i != RNIL){ numLockedOps++; ptrCheckGuard(tmpOp, coprecsize, operationrec); if (tmpOp.p->nextOp == RNIL) { ndbrequire(tmpOp.i == TscanPtr.p->scanLastLockedOp); } tmpOp.i = tmpOp.p->nextOp; } ndbrequire(numLockedOps==TscanPtr.p->scanLockHeld); #endif }//Dbacc::takeOutScanLockQueue() /* --------------------------------------------------------------------------------- */ /* TAKE_OUT_READY_SCAN_QUEUE */ /* --------------------------------------------------------------------------------- */ void Dbacc::takeOutReadyScanQueue() const { OperationrecPtr trsOperationRecPtr; if (operationRecPtr.p->prevOp != RNIL) { jam(); trsOperationRecPtr.i = operationRecPtr.p->prevOp; ptrCheckGuard(trsOperationRecPtr, coprecsize, operationrec); trsOperationRecPtr.p->nextOp = operationRecPtr.p->nextOp; } else { jam(); scanPtr.p->scanFirstQueuedOp = operationRecPtr.p->nextOp; }//if if (operationRecPtr.p->nextOp != RNIL) { jam(); trsOperationRecPtr.i = operationRecPtr.p->nextOp; ptrCheckGuard(trsOperationRecPtr, coprecsize, operationrec); trsOperationRecPtr.p->prevOp = operationRecPtr.p->prevOp; } else { jam(); scanPtr.p->scanLastQueuedOp = operationRecPtr.p->nextOp; }//if }//Dbacc::takeOutReadyScanQueue() /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* */ /* END OF SCAN MODULE */ /* */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ bool Dbacc::getfragmentrec(FragmentrecPtr& rootPtr, Uint32 fid) { for (Uint32 i = 0; i < NDB_ARRAY_SIZE(tabptr.p->fragholder); i++) { jam(); if (tabptr.p->fragholder[i] == fid) { jam(); fragrecptr.i = tabptr.p->fragptrholder[i]; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); return true; }//if }//for return false; }//Dbacc::getrootfragmentrec() /* --------------------------------------------------------------------------------- */ /* INIT_OVERPAGE */ /* INPUT. IOP_PAGEPTR, POINTER TO AN OVERFLOW PAGE RECORD */ /* DESCRIPTION: CONTAINERS AND FREE LISTS OF THE PAGE, GET INITIALE VALUE */ /* ACCORDING TO LH3 AND PAGE STRUCTOR DESCRIPTION OF NDBACC BLOCK */ /* --------------------------------------------------------------------------------- */ void Dbacc::initOverpage(Page8Ptr iopPageptr) { Uint32 tiopPrevFree; Uint32 tiopNextFree; // Clear page, but keep page list entries // Setting word32[ALLOC_CONTAINERS] and word32[CHECK_SUM] to zero is essential Uint32 nextPage = iopPageptr.p->word32[Page8::NEXT_PAGE]; Uint32 prevPage = iopPageptr.p->word32[Page8::PREV_PAGE]; bzero(iopPageptr.p->word32, sizeof(iopPageptr.p->word32)); iopPageptr.p->word32[Page8::NEXT_PAGE] = nextPage; iopPageptr.p->word32[Page8::PREV_PAGE] = prevPage; iopPageptr.p->word32[Page8::EMPTY_LIST] = (1 << ZPOS_PAGE_TYPE_BIT); /* --------------------------------------------------------------------------------- */ /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */ /* --------------------------------------------------------------------------------- */ tiopIndex = ZHEAD_SIZE + 1; iopPageptr.p->word32[tiopIndex] = Container::NO_CONTAINER_INDEX; for (tiopPrevFree = 0; tiopPrevFree <= Container::MAX_CONTAINER_INDEX - 1; tiopPrevFree++) { tiopIndex = tiopIndex + ZBUF_SIZE; iopPageptr.p->word32[tiopIndex] = tiopPrevFree; }//for /* --------------------------------------------------------------------------------- */ /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */ /* --------------------------------------------------------------------------------- */ tiopIndex = ZHEAD_SIZE; for (tiopNextFree = 1; tiopNextFree <= Container::MAX_CONTAINER_INDEX; tiopNextFree++) { iopPageptr.p->word32[tiopIndex] = tiopNextFree; tiopIndex = tiopIndex + ZBUF_SIZE; }//for iopPageptr.p->word32[tiopIndex] = Container::NO_CONTAINER_INDEX; /* LEFT_LIST IS UPDATED */ /* --------------------------------------------------------------------------------- */ /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */ /* --------------------------------------------------------------------------------- */ tiopIndex = (ZBUF_SIZE + ZHEAD_SIZE) - 1; iopPageptr.p->word32[tiopIndex] = Container::NO_CONTAINER_INDEX; for (tiopPrevFree = 0; tiopPrevFree <= Container::MAX_CONTAINER_INDEX - 1; tiopPrevFree++) { tiopIndex = tiopIndex + ZBUF_SIZE; iopPageptr.p->word32[tiopIndex] = tiopPrevFree; }//for /* --------------------------------------------------------------------------------- */ /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */ /* --------------------------------------------------------------------------------- */ tiopIndex = (ZBUF_SIZE + ZHEAD_SIZE) - 2; for (tiopNextFree = 1; tiopNextFree <= Container::MAX_CONTAINER_INDEX; tiopNextFree++) { iopPageptr.p->word32[tiopIndex] = tiopNextFree; tiopIndex = tiopIndex + ZBUF_SIZE; }//for iopPageptr.p->word32[tiopIndex] = Container::NO_CONTAINER_INDEX; /* RIGHT_LIST IS UPDATED */ }//Dbacc::initOverpage() /* --------------------------------------------------------------------------------- */ /* INIT_PAGE */ /* INPUT. INP_PAGEPTR, POINTER TO A PAGE RECORD */ /* DESCRIPTION: CONTAINERS AND FREE LISTS OF THE PAGE, GET INITIALE VALUE */ /* ACCORDING TO LH3 AND PAGE STRUCTOR DISACRIPTION OF NDBACC BLOCK */ /* --------------------------------------------------------------------------------- */ void Dbacc::initPage(Page8Ptr inpPageptr) { Uint32 tinpIndex; Uint32 tinpTmp; Uint32 tinpPrevFree; Uint32 tinpNextFree; for (tiopIndex = 0; tiopIndex <= 2047; tiopIndex++) { // Do not clear page list if (tiopIndex == Page8::NEXT_PAGE) continue; if (tiopIndex == Page8::PREV_PAGE) continue; inpPageptr.p->word32[tiopIndex] = 0; }//for /* --------------------------------------------------------------------------------- */ /* SET PAGE ID FOR USE OF CHECKPOINTER. */ /* PREPARE CONTAINER HEADERS INDICATING EMPTY CONTAINERS WITHOUT NEXT. */ /* --------------------------------------------------------------------------------- */ inpPageptr.p->word32[ZPOS_PAGE_ID] = tipPageId; ContainerHeader tinpTmp1; tinpTmp1.initInUse(); /* --------------------------------------------------------------------------------- */ /* INITIALISE ZNO_CONTAINERS PREDEFINED HEADERS ON LEFT SIZE. */ /* --------------------------------------------------------------------------------- */ tinpIndex = ZHEAD_SIZE; for (tinpTmp = 0; tinpTmp <= ZNO_CONTAINERS - 1; tinpTmp++) { inpPageptr.p->word32[tinpIndex] = tinpTmp1; tinpIndex = tinpIndex + ZBUF_SIZE; }//for /* WORD32(ZPOS_EMPTY_LIST) DATA STRUCTURE:*/ /*--------------------------------------- */ /*| PAGE TYPE|LEFT FREE|RIGHT FREE */ /*| 1 | LIST | LIST */ /*| BIT | 7 BITS | 7 BITS */ /*--------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* INITIALISE FIRST POINTER TO DOUBLY LINKED LIST OF FREE CONTAINERS. */ /* INITIALISE LEFT FREE LIST TO 64 AND RIGHT FREE LIST TO ZERO. */ /* ALSO INITIALISE PAGE TYPE TO NOT OVERFLOW PAGE. */ /* --------------------------------------------------------------------------------- */ tinpTmp = (ZNO_CONTAINERS << 7); inpPageptr.p->word32[ZPOS_EMPTY_LIST] = tinpTmp; /* --------------------------------------------------------------------------------- */ /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */ /* --------------------------------------------------------------------------------- */ tinpIndex = (ZHEAD_SIZE + ZBUF_SIZE) - 1; inpPageptr.p->word32[tinpIndex] = Container::NO_CONTAINER_INDEX; for (tinpPrevFree = 0; tinpPrevFree <= Container::MAX_CONTAINER_INDEX - 1; tinpPrevFree++) { tinpIndex = tinpIndex + ZBUF_SIZE; inpPageptr.p->word32[tinpIndex] = tinpPrevFree; }//for /* --------------------------------------------------------------------------------- */ /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR RIGHT CONTAINERS. */ /* --------------------------------------------------------------------------------- */ tinpIndex = (ZHEAD_SIZE + ZBUF_SIZE) - 2; for (tinpNextFree = 1; tinpNextFree <= Container::MAX_CONTAINER_INDEX; tinpNextFree++) { inpPageptr.p->word32[tinpIndex] = tinpNextFree; tinpIndex = tinpIndex + ZBUF_SIZE; }//for inpPageptr.p->word32[tinpIndex] = Container::NO_CONTAINER_INDEX; /* --------------------------------------------------------------------------------- */ /* INITIALISE PREVIOUS PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */ /* THE FIRST ZNO_CONTAINERS ARE NOT PUT INTO FREE LIST SINCE THEY ARE */ /* PREDEFINED AS OCCUPIED. */ /* --------------------------------------------------------------------------------- */ tinpIndex = (ZNO_CONTAINERS * ZBUF_SIZE) + ZHEAD_SIZE; for (tinpNextFree = ZNO_CONTAINERS + 1; tinpNextFree <= Container::MAX_CONTAINER_INDEX; tinpNextFree++) { inpPageptr.p->word32[tinpIndex] = tinpNextFree; tinpIndex = tinpIndex + ZBUF_SIZE; }//for inpPageptr.p->word32[tinpIndex] = Container::NO_CONTAINER_INDEX; /* --------------------------------------------------------------------------------- */ /* INITIALISE NEXT PART OF DOUBLY LINKED LIST FOR LEFT CONTAINERS. */ /* THE FIRST ZNO_CONTAINERS ARE NOT PUT INTO FREE LIST SINCE THEY ARE */ /* PREDEFINED AS OCCUPIED. */ /* --------------------------------------------------------------------------------- */ tinpIndex = ((ZNO_CONTAINERS * ZBUF_SIZE) + ZHEAD_SIZE) + 1; inpPageptr.p->word32[tinpIndex] = Container::NO_CONTAINER_INDEX; for (tinpPrevFree = ZNO_CONTAINERS; tinpPrevFree <= Container::MAX_CONTAINER_INDEX - 1; tinpPrevFree++) { tinpIndex = tinpIndex + ZBUF_SIZE; inpPageptr.p->word32[tinpIndex] = tinpPrevFree; }//for /* --------------------------------------------------------------------------------- */ /* INITIALISE HEADER POSITIONS NOT CURRENTLY USED AND ENSURE USE OF OVERFLOW */ /* RECORD POINTER ON THIS PAGE LEADS TO ERROR. */ /* --------------------------------------------------------------------------------- */ inpPageptr.p->word32[ZPOS_CHECKSUM] = 0; inpPageptr.p->word32[ZPOS_ALLOC_CONTAINERS] = 0; }//Dbacc::initPage() /* --------------------------------------------------------------------------------- */ /* RELEASE OP RECORD */ /* PUT A FREE OPERATION IN A FREE LIST OF THE OPERATIONS */ /* --------------------------------------------------------------------------------- */ void Dbacc::releaseOpRec() { #if 0 // DEBUG CODE // Check that the operation to be released isn't // already in the list of free operations // Since this code loops through the entire list of free operations // it's only enabled in VM_TRACE mode OperationrecPtr opRecPtr; bool opInList = false; opRecPtr.i = cfreeopRec; while (opRecPtr.i != RNIL){ if (opRecPtr.i == operationRecPtr.i){ opInList = true; break; } ptrCheckGuard(opRecPtr, coprecsize, operationrec); opRecPtr.i = opRecPtr.p->nextOp; } ndbrequire(opInList == false); #endif ndbrequire(operationRecPtr.p->m_op_bits == Operationrec::OP_INITIAL); operationRecPtr.p->nextOp = cfreeopRec; cfreeopRec = operationRecPtr.i; /* UPDATE FREE LIST OF OP RECORDS */ operationRecPtr.p->prevOp = RNIL; operationRecPtr.p->m_op_bits = Operationrec::OP_INITIAL; }//Dbacc::releaseOpRec() /* --------------------------------------------------------------------------------- */ /* RELEASE_OVERPAGE */ /* --------------------------------------------------------------------------------- */ void Dbacc::releaseOverpage(Page8Ptr ropPageptr) { jam(); { LocalContainerPageList sparselist(*this, fragrecptr.p->sparsepages); sparselist.remove(ropPageptr); } jam(); releasePage(ropPageptr); }//Dbacc::releaseOverpage() /* ------------------------------------------------------------------------- */ /* RELEASE_PAGE */ /* ------------------------------------------------------------------------- */ void Dbacc::releasePage(Page8Ptr rpPageptr) { jam(); ndbassert(g_acc_pages_used[instance()] == cnoOfAllocatedPages); LocalPage8List freelist(*this, cfreepages); #ifdef VM_TRACE // ndbrequire(!freelist.find(rpPageptr)); #endif freelist.addFirst(rpPageptr); cnoOfAllocatedPages--; ndbassert(freelist.getCount() + cnoOfAllocatedPages == cpageCount); fragrecptr.p->m_noOfAllocatedPages--; g_acc_pages_used[instance()] = cnoOfAllocatedPages; if (cnoOfAllocatedPages < m_maxAllocPages) m_oom = false; }//Dbacc::releasePage() bool Dbacc::validatePageCount() const { jam(); FragmentrecPtr regFragPtr; Uint32 pageCount = 0; for (regFragPtr.i = 0; regFragPtr.i < cfragmentsize; regFragPtr.i++) { ptrAss(regFragPtr, fragmentrec); pageCount += regFragPtr.p->m_noOfAllocatedPages; } return pageCount==cnoOfAllocatedPages; }//Dbacc::validatePageCount() Uint64 Dbacc::getLinHashByteSize(Uint32 fragId) const { ndbassert(validatePageCount()); FragmentrecPtr fragPtr(NULL, fragId); ptrCheck(fragPtr, cfragmentsize, fragmentrec); if (unlikely(fragPtr.p == NULL)) { jam(); ndbassert(false); return 0; } else { jam(); ndbassert(fragPtr.p->fragState == ACTIVEFRAG); return fragPtr.p->m_noOfAllocatedPages * static_cast<Uint64>(sizeof(Page8)); } } /* --------------------------------------------------------------------------------- */ /* SEIZE FRAGREC */ /* --------------------------------------------------------------------------------- */ void Dbacc::seizeFragrec() { RSS_OP_ALLOC(cnoOfFreeFragrec); fragrecptr.i = cfirstfreefrag; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); cfirstfreefrag = fragrecptr.p->nextfreefrag; fragrecptr.p->nextfreefrag = RNIL; }//Dbacc::seizeFragrec() /* --------------------------------------------------------------------------------- */ /* SEIZE_OP_REC */ /* --------------------------------------------------------------------------------- */ void Dbacc::seizeOpRec() { operationRecPtr.i = cfreeopRec; ptrCheckGuard(operationRecPtr, coprecsize, operationrec); cfreeopRec = operationRecPtr.p->nextOp; /* UPDATE FREE LIST OF OP RECORDS */ /* PUTS OPERTION RECORD PTR IN THE LIST */ /* OF OPERATION IN CONNECTION RECORD */ operationRecPtr.p->nextOp = RNIL; }//Dbacc::seizeOpRec() /** * A ZPAGESIZE_ERROR has occured, out of index pages * Print some debug info if debug compiled */ void Dbacc::zpagesize_error(const char* where){ DEBUG(where << endl << " ZPAGESIZE_ERROR" << endl << " cfreepages.getCount()=" << cfreepages.getCount() << endl << " cpagesize=" <<cpagesize<<endl << " cnoOfAllocatedPages="<<cnoOfAllocatedPages); } /* --------------------------------------------------------------------------------- */ /* SEIZE_PAGE */ /* --------------------------------------------------------------------------------- */ void Dbacc::seizePage(Page8Ptr& spPageptr) { jam(); ndbassert(g_acc_pages_used[instance()] == cnoOfAllocatedPages); tresult = 0; if (cfreepages.isEmpty() || m_oom) { jam(); zpagesize_error("Dbacc::seizePage"); tresult = ZPAGESIZE_ERROR; } else { jam(); LocalPage8List freelist(*this, cfreepages); freelist.removeFirst(spPageptr); cnoOfAllocatedPages++; ndbassert(freelist.getCount() + cnoOfAllocatedPages == cpageCount); fragrecptr.p->m_noOfAllocatedPages++; if (cnoOfAllocatedPages >= m_maxAllocPages) m_oom = true; if (cnoOfAllocatedPages > cnoOfAllocatedPagesMax) cnoOfAllocatedPagesMax = cnoOfAllocatedPages; g_acc_pages_used[instance()] = cnoOfAllocatedPages; } }//Dbacc::seizePage() /* --------------------------------------------------------------------------------- */ /* SEIZE_ROOTFRAGREC */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* SEIZE_SCAN_REC */ /* --------------------------------------------------------------------------------- */ void Dbacc::seizeScanRec() { scanPtr.i = cfirstFreeScanRec; ptrCheckGuard(scanPtr, cscanRecSize, scanRec); ndbrequire(scanPtr.p->scanState == ScanRec::SCAN_DISCONNECT); cfirstFreeScanRec = scanPtr.p->scanNextfreerec; }//Dbacc::seizeScanRec() /* --------------------------------------------------------------------------------- */ /* SEIZE_SR_VERSION_REC */ /* --------------------------------------------------------------------------------- */ /* --------------------------------------------------------------------------------- */ /* SEND_SYSTEMERROR */ /* --------------------------------------------------------------------------------- */ void Dbacc::sendSystemerror(int line)const { progError(line, NDBD_EXIT_PRGERR); }//Dbacc::sendSystemerror() void Dbacc::execDBINFO_SCANREQ(Signal *signal) { jamEntry(); DbinfoScanReq req= *(DbinfoScanReq*)signal->theData; const Ndbinfo::ScanCursor* cursor = CAST_CONSTPTR(Ndbinfo::ScanCursor, DbinfoScan::getCursorPtr(&req)); Ndbinfo::Ratelimit rl; switch(req.tableId){ case Ndbinfo::POOLS_TABLEID: { jam(); const DynArr256Pool::Info pmpInfo = directoryPool.getInfo(); Ndbinfo::pool_entry pools[] = { { "Index memory", cnoOfAllocatedPages, cpageCount, sizeof(Page8), cnoOfAllocatedPagesMax, { CFG_DB_INDEX_MEM,0,0,0 }}, { "L2PMap pages", pmpInfo.pg_count, 0, /* No real limit */ pmpInfo.pg_byte_sz, /* No HWM for this row as it would be a fixed fraction of "Data memory" and therefore of limited interest. */ 0, { 0, 0, 0}}, { "L2PMap nodes", pmpInfo.inuse_nodes, pmpInfo.pg_count * pmpInfo.nodes_per_page, // Max within current pages. pmpInfo.node_byte_sz, /* No HWM for this row as it would be a fixed fraction of "Data memory" and therefore of limited interest. */ 0, { 0, 0, 0 }}, { NULL, 0,0,0,0,{ 0,0,0,0 }} }; static const size_t num_config_params = sizeof(pools[0].config_params)/sizeof(pools[0].config_params[0]); Uint32 pool = cursor->data[0]; BlockNumber bn = blockToMain(number()); while(pools[pool].poolname) { jam(); Ndbinfo::Row row(signal, req); row.write_uint32(getOwnNodeId()); row.write_uint32(bn); // block number row.write_uint32(instance()); // block instance row.write_string(pools[pool].poolname); row.write_uint64(pools[pool].used); row.write_uint64(pools[pool].total); row.write_uint64(pools[pool].used_hi); row.write_uint64(pools[pool].entry_size); for (size_t i = 0; i < num_config_params; i++) row.write_uint32(pools[pool].config_params[i]); ndbinfo_send_row(signal, req, row, rl); pool++; if (rl.need_break(req)) { jam(); ndbinfo_send_scan_break(signal, req, rl, pool); return; } } break; } case Ndbinfo::FRAG_LOCKS_TABLEID: { Uint32 tableid = cursor->data[0]; for (;tableid < ctablesize; tableid++) { TabrecPtr tabPtr; tabPtr.i = tableid; ptrAss(tabPtr, tabrec); if (tabPtr.p->fragholder[0] != RNIL) { jam(); // Loop over all fragments for this table. for (Uint32 f = 0; f < NDB_ARRAY_SIZE(tabPtr.p->fragholder); f++) { if (tabPtr.p->fragholder[f] != RNIL) { jam(); FragmentrecPtr frp; frp.i = tabPtr.p->fragptrholder[f]; ptrCheckGuard(frp, cfragmentsize, fragmentrec); const Fragmentrec::LockStats& ls = frp.p->m_lockStats; Ndbinfo::Row row(signal, req); row.write_uint32(getOwnNodeId()); row.write_uint32(instance()); row.write_uint32(tableid); row.write_uint32(tabPtr.p->fragholder[f]); row.write_uint64(ls.m_ex_req_count); row.write_uint64(ls.m_ex_imm_ok_count); row.write_uint64(ls.m_ex_wait_ok_count); row.write_uint64(ls.m_ex_wait_fail_count); row.write_uint64(ls.m_sh_req_count); row.write_uint64(ls.m_sh_imm_ok_count); row.write_uint64(ls.m_sh_wait_ok_count); row.write_uint64(ls.m_sh_wait_fail_count); row.write_uint64(ls.m_wait_ok_millis); row.write_uint64(ls.m_wait_fail_millis); ndbinfo_send_row(signal, req, row, rl); } } } /* If a break is needed, break on a table boundary, as we use the table id as a cursor. */ if (rl.need_break(req)) { jam(); ndbinfo_send_scan_break(signal, req, rl, tableid + 1); return; } } break; } case Ndbinfo::ACC_OPERATIONS_TABLEID: { jam(); /* Take a break periodically when scanning records */ Uint32 maxToCheck = 1024; NDB_TICKS now = getHighResTimer(); OperationrecPtr opRecPtr; opRecPtr.i = cursor->data[0]; while (opRecPtr.i < coprecsize) { ptrCheckGuard(opRecPtr, coprecsize, operationrec); /** * ACC holds lock requests/operations in a 2D queue * structure. * The lock owning operation is directly linked from the * PK hash element. Only one operation is the 'owner' * at any one time. * * The lock owning operation may have other operations * concurrently holding the lock, for example other * operations in the same transaction, or, for shared * reads, in other transactions. * These operations are in the 'parallel' queue of the * lock owning operation, linked from its * nextParallelQue member. * * Non-compatible lock requests must wait until some/ * all of the current lock holder(s) have released the * lock before they can run. They are held in the * 'serial' queue, lined from the lockOwner's * nextSerialQue member. * * Note also : Only one operation per row can 'run' * in LDM at any one time, but this serialisation * is not considered as locking overhead. * * Note also : These queue members are part of overlays * and are not always guaranteed to be valid, m_op_bits * often must be consulted too. */ if (opRecPtr.p->m_op_bits != Operationrec::OP_INITIAL) { jam(); FragmentrecPtr fp; fp.i = opRecPtr.p->fragptr; ptrCheckGuard(fp, cfragmentsize, fragmentrec); const Uint32 tableId = fp.p->myTableId; const Uint32 fragId = fp.p->myfid; const Uint64 rowId = Uint64(opRecPtr.p->localdata.m_page_no) << 32 | Uint64(opRecPtr.p->localdata.m_page_idx); /* Send as separate attrs, as in cluster_operations */ const Uint32 transId0 = opRecPtr.p->transId1; const Uint32 transId1 = opRecPtr.p->transId2; const Uint32 prevSerialQue = opRecPtr.p->prevSerialQue; const Uint32 nextSerialQue = opRecPtr.p->nextSerialQue; const Uint32 prevParallelQue = opRecPtr.p->prevParallelQue; const Uint32 nextParallelQue = opRecPtr.p->nextParallelQue; const Uint32 flags = opRecPtr.p->m_op_bits; /* Ignore Uint32 overflow at ~ 50 days */ const Uint32 durationMillis = (Uint32) NdbTick_Elapsed(opRecPtr.p->m_lockTime, now).milliSec(); const Uint32 userPtr = opRecPtr.p->userptr; /* Live operation */ Ndbinfo::Row row(signal, req); row.write_uint32(getOwnNodeId()); row.write_uint32(instance()); row.write_uint32(tableId); row.write_uint32(fragId); row.write_uint64(rowId); row.write_uint32(transId0); row.write_uint32(transId1); row.write_uint32(opRecPtr.i); row.write_uint32(flags); row.write_uint32(prevSerialQue); row.write_uint32(nextSerialQue); row.write_uint32(prevParallelQue); row.write_uint32(nextParallelQue); row.write_uint32(durationMillis); row.write_uint32(userPtr); ndbinfo_send_row(signal, req, row, rl); } maxToCheck--; opRecPtr.i++; if (rl.need_break(req) || maxToCheck == 0) { jam(); ndbinfo_send_scan_break(signal, req, rl, opRecPtr.i); return; } } break; } default: break; } ndbinfo_send_scan_conf(signal, req, rl); } void Dbacc::execDUMP_STATE_ORD(Signal* signal) { DumpStateOrd * const dumpState = (DumpStateOrd *)&signal->theData[0]; if (dumpState->args[0] == DumpStateOrd::AccDumpOneScanRec){ Uint32 recordNo = RNIL; if (signal->length() == 2) recordNo = dumpState->args[1]; else return; if (recordNo >= cscanRecSize) return; scanPtr.i = recordNo; ptrAss(scanPtr, scanRec); infoEvent("Dbacc::ScanRec[%d]: state=%d, transid(0x%x, 0x%x)", scanPtr.i, scanPtr.p->scanState,scanPtr.p->scanTrid1, scanPtr.p->scanTrid2); infoEvent(" activeLocalFrag=%d, nextBucketIndex=%d", scanPtr.p->activeLocalFrag, scanPtr.p->nextBucketIndex); infoEvent(" scanNextfreerec=%d firstActOp=%d firstLockedOp=%d, " "scanLastLockedOp=%d firstQOp=%d lastQOp=%d", scanPtr.p->scanNextfreerec, scanPtr.p->scanFirstActiveOp, scanPtr.p->scanFirstLockedOp, scanPtr.p->scanLastLockedOp, scanPtr.p->scanFirstQueuedOp, scanPtr.p->scanLastQueuedOp); infoEvent(" scanUserP=%d, startNoBuck=%d, minBucketIndexToRescan=%d, " "maxBucketIndexToRescan=%d", scanPtr.p->scanUserptr, scanPtr.p->startNoOfBuckets, scanPtr.p->minBucketIndexToRescan, scanPtr.p->maxBucketIndexToRescan); infoEvent(" scanBucketState=%d, scanLockHeld=%d, userBlockRef=%d, " "scanMask=%d scanLockMode=%d", scanPtr.p->scanBucketState, scanPtr.p->scanLockHeld, scanPtr.p->scanUserblockref, scanPtr.p->scanMask, scanPtr.p->scanLockMode); return; } // Dump all ScanRec(ords) if (dumpState->args[0] == DumpStateOrd::AccDumpAllScanRec){ Uint32 recordNo = 0; if (signal->length() == 1) infoEvent("ACC: Dump all ScanRec - size: %d", cscanRecSize); else if (signal->length() == 2) recordNo = dumpState->args[1]; else return; dumpState->args[0] = DumpStateOrd::AccDumpOneScanRec; dumpState->args[1] = recordNo; execDUMP_STATE_ORD(signal); if (recordNo < cscanRecSize-1){ dumpState->args[0] = DumpStateOrd::AccDumpAllScanRec; dumpState->args[1] = recordNo+1; sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB); } return; } // Dump all active ScanRec(ords) if (dumpState->args[0] == DumpStateOrd::AccDumpAllActiveScanRec){ Uint32 recordNo = 0; if (signal->length() == 1) infoEvent("ACC: Dump active ScanRec - size: %d", cscanRecSize); else if (signal->length() == 2) recordNo = dumpState->args[1]; else return; ScanRecPtr sp; sp.i = recordNo; ptrAss(sp, scanRec); if (sp.p->scanState != ScanRec::SCAN_DISCONNECT){ dumpState->args[0] = DumpStateOrd::AccDumpOneScanRec; dumpState->args[1] = recordNo; execDUMP_STATE_ORD(signal); } if (recordNo < cscanRecSize-1){ dumpState->args[0] = DumpStateOrd::AccDumpAllActiveScanRec; dumpState->args[1] = recordNo+1; sendSignal(reference(), GSN_DUMP_STATE_ORD, signal, 2, JBB); } return; } if(dumpState->args[0] == DumpStateOrd::EnableUndoDelayDataWrite){ ndbout << "Dbacc:: delay write of datapages for table = " << dumpState->args[1]<< endl; c_errorInsert3000_TableId = dumpState->args[1]; SET_ERROR_INSERT_VALUE(3000); return; } if(dumpState->args[0] == DumpStateOrd::AccDumpOneOperationRec){ Uint32 recordNo = RNIL; if (signal->length() == 2) recordNo = dumpState->args[1]; else return; if (recordNo >= coprecsize) return; OperationrecPtr tmpOpPtr; tmpOpPtr.i = recordNo; ptrAss(tmpOpPtr, operationrec); infoEvent("Dbacc::operationrec[%d]: transid(0x%x, 0x%x)", tmpOpPtr.i, tmpOpPtr.p->transId1, tmpOpPtr.p->transId2); infoEvent("elementPage=%d, elementPointer=%d ", tmpOpPtr.p->elementPage, tmpOpPtr.p->elementPointer); infoEvent("fid=%d, fragptr=%d ", tmpOpPtr.p->fid, tmpOpPtr.p->fragptr); infoEvent("hashValue=%d", tmpOpPtr.p->hashValue.pack()); infoEvent("nextLockOwnerOp=%d, nextOp=%d, nextParallelQue=%d ", tmpOpPtr.p->nextLockOwnerOp, tmpOpPtr.p->nextOp, tmpOpPtr.p->nextParallelQue); infoEvent("nextSerialQue=%d, prevOp=%d ", tmpOpPtr.p->nextSerialQue, tmpOpPtr.p->prevOp); infoEvent("prevLockOwnerOp=%d, prevParallelQue=%d", tmpOpPtr.p->prevLockOwnerOp, tmpOpPtr.p->nextParallelQue); infoEvent("prevSerialQue=%d, scanRecPtr=%d", tmpOpPtr.p->prevSerialQue, tmpOpPtr.p->scanRecPtr); infoEvent("m_op_bits=0x%x, reducedHashValue=%x ", tmpOpPtr.p->m_op_bits, tmpOpPtr.p->reducedHashValue.pack()); return; } if(dumpState->args[0] == DumpStateOrd::AccDumpNumOpRecs){ Uint32 freeOpRecs = 0; OperationrecPtr opRecPtr; opRecPtr.i = cfreeopRec; while (opRecPtr.i != RNIL){ freeOpRecs++; ptrCheckGuard(opRecPtr, coprecsize, operationrec); opRecPtr.i = opRecPtr.p->nextOp; } infoEvent("Dbacc::OperationRecords: num=%d, free=%d", coprecsize, freeOpRecs); return; } if(dumpState->args[0] == DumpStateOrd::AccDumpFreeOpRecs){ OperationrecPtr opRecPtr; opRecPtr.i = cfreeopRec; while (opRecPtr.i != RNIL){ dumpState->args[0] = DumpStateOrd::AccDumpOneOperationRec; dumpState->args[1] = opRecPtr.i; execDUMP_STATE_ORD(signal); ptrCheckGuard(opRecPtr, coprecsize, operationrec); opRecPtr.i = opRecPtr.p->nextOp; } return; } if(dumpState->args[0] == DumpStateOrd::AccDumpNotFreeOpRecs){ Uint32 recordStart = RNIL; if (signal->length() == 2) recordStart = dumpState->args[1]; else return; if (recordStart >= coprecsize) return; for (Uint32 i = recordStart; i < coprecsize; i++){ bool inFreeList = false; OperationrecPtr opRecPtr; opRecPtr.i = cfreeopRec; while (opRecPtr.i != RNIL){ if (opRecPtr.i == i){ inFreeList = true; break; } ptrCheckGuard(opRecPtr, coprecsize, operationrec); opRecPtr.i = opRecPtr.p->nextOp; } if (inFreeList == false){ dumpState->args[0] = DumpStateOrd::AccDumpOneOperationRec; dumpState->args[1] = i; execDUMP_STATE_ORD(signal); } } return; } #if 0 if (type == 100) { RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend(); req->primaryTableId = 2; req->secondaryTableId = RNIL; req->userPtr = 2; req->userRef = DBDICT_REF; sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal, RelTabMemReq::SignalLength, JBB); return; }//if if (type == 101) { RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend(); req->primaryTableId = 4; req->secondaryTableId = 5; req->userPtr = 4; req->userRef = DBDICT_REF; sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal, RelTabMemReq::SignalLength, JBB); return; }//if if (type == 102) { RelTabMemReq * const req = (RelTabMemReq *)signal->getDataPtrSend(); req->primaryTableId = 6; req->secondaryTableId = 8; req->userPtr = 6; req->userRef = DBDICT_REF; sendSignal(cownBlockref, GSN_REL_TABMEMREQ, signal, RelTabMemReq::SignalLength, JBB); return; }//if if (type == 103) { DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend(); req->primaryTableId = 2; req->secondaryTableId = RNIL; req->userPtr = 2; req->userRef = DBDICT_REF; sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal, DropTabFileReq::SignalLength, JBB); return; }//if if (type == 104) { DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend(); req->primaryTableId = 4; req->secondaryTableId = 5; req->userPtr = 4; req->userRef = DBDICT_REF; sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal, DropTabFileReq::SignalLength, JBB); return; }//if if (type == 105) { DropTabFileReq * const req = (DropTabFileReq *)signal->getDataPtrSend(); req->primaryTableId = 6; req->secondaryTableId = 8; req->userPtr = 6; req->userRef = DBDICT_REF; sendSignal(cownBlockref, GSN_DROP_TABFILEREQ, signal, DropTabFileReq::SignalLength, JBB); return; }//if #endif if (signal->theData[0] == DumpStateOrd::SchemaResourceSnapshot) { RSS_OP_SNAPSHOT_SAVE(cnoOfFreeFragrec); return; } if (signal->theData[0] == DumpStateOrd::SchemaResourceCheckLeak) { RSS_OP_SNAPSHOT_CHECK(cnoOfFreeFragrec); return; } }//Dbacc::execDUMP_STATE_ORD() Uint32 Dbacc::getL2PMapAllocBytes(Uint32 fragId) const { jam(); FragmentrecPtr fragPtr(NULL, fragId); ptrCheckGuard(fragPtr, cfragmentsize, fragmentrec); return fragPtr.p->directory.getByteSize(); } void Dbacc::execREAD_PSEUDO_REQ(Signal* signal){ jamEntry(); fragrecptr.i = signal->theData[0]; Uint32 attrId = signal->theData[1]; ptrCheckGuard(fragrecptr, cfragmentsize, fragmentrec); Uint64 tmp; switch(attrId){ case AttributeHeader::ROW_COUNT: tmp = fragrecptr.p->noOfElements; break; case AttributeHeader::COMMIT_COUNT: tmp = fragrecptr.p->m_commit_count; break; default: tmp = 0; } memcpy(signal->theData, &tmp, 8); /* must be memcpy, gives strange results on * ithanium gcc (GCC) 3.4.1 smp linux 2.4 * otherwise */ // Uint32 * src = (Uint32*)&tmp; // signal->theData[0] = src[0]; // signal->theData[1] = src[1]; } void Dbacc::execNODE_STATE_REP(Signal* signal) { jamEntry(); const NodeStateRep* rep = CAST_CONSTPTR(NodeStateRep, signal->getDataPtr()); if (rep->nodeState.startLevel == NodeState::SL_STARTED) { jam(); const ndb_mgm_configuration_iterator * p = m_ctx.m_config.getOwnConfigIterator(); ndbrequire(p != 0); Uint32 free_pct = 5; ndb_mgm_get_int_parameter(p, CFG_DB_FREE_PCT, &free_pct); m_free_pct = free_pct; m_maxAllocPages = (cpagesize * (100 - free_pct)) / 100; if (cnoOfAllocatedPages >= m_maxAllocPages) m_oom = true; } SimulatedBlock::execNODE_STATE_REP(signal); } #ifdef VM_TRACE void Dbacc::debug_lh_vars(const char* where)const { Uint32 b = fragrecptr.p->level.getTop(); Uint32 di = fragrecptr.p->getPageNumber(b); Uint32 ri = di >> 8; ndbout << "DBACC: " << where << ":" << " frag:" << fragrecptr.p->myTableId << "/" << fragrecptr.p->myfid << " slack:" << fragrecptr.p->slack << "/" << fragrecptr.p->slackCheck << " top:" << fragrecptr.p->level.getTop() << " di:" << di << " ri:" << ri << " full:" << fragrecptr.p->dirRangeFull << "\n"; } #endif<|fim▁end|>
goto conf; }
<|file_name|>bitsnoop.py<|end_file_name|><|fim▁begin|># coding=utf-8 # Author: Gonçalo M. (aka duramato/supergonkas) <[email protected]> # # This file is part of Medusa. # # Medusa is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Medusa is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Medusa. If not, see <http://www.gnu.org/licenses/>. """Provider code for Bitsnoop.""" from __future__ import unicode_literals import traceback from requests.compat import urljoin from ..torrent_provider import TorrentProvider from .... import app, logger, tv_cache from ....bs4_parser import BS4Parser from ....helper.common import convert_size, try_int class BitSnoopProvider(TorrentProvider): """BitSnoop Torrent provider.""" def __init__(self): """Initialize the class.""" super(self.__class__, self).__init__('BitSnoop') # Credentials self.public = True # URLs self.url = 'https://bitsnoop.com' self.urls = { 'base': self.url, 'rss': urljoin(self.url, '/new_video.html?fmt=rss'), 'search': urljoin(self.url, '/search/video/'), } # Proper Strings self.proper_strings = ['PROPER', 'REPACK'] # Miscellaneous Options # Torrent Stats self.minseed = None self.minleech = None # Cache self.cache = tv_cache.TVCache(self, search_params={'RSS': ['rss']}) def search(self, search_strings, age=0, ep_obj=None): """ Search a provider and parse the results. :param search_strings: A dict with mode (key) and the search value (value) :param age: Not used :param ep_obj: Not used :returns: A list of search results (structure) """ results = [] for mode in search_strings: logger.log('Search mode: {0}'.format(mode), logger.DEBUG) for search_string in search_strings[mode]: if mode != 'RSS': logger.log('Search string: {search}'.format (search=search_string), logger.DEBUG) search_url = (self.urls['rss'], self.urls['search'] + search_string + '/s/d/1/?fmt=rss')[mode != 'RSS'] response = self.get_url(search_url, returns='response') if not response or not response.text: logger.log('No data returned from provider', logger.DEBUG) continue elif not response or not response.text.startswith('<?xml'): logger.log('Expected xml but got something else, is your mirror failing?', logger.INFO) continue results += self.parse(response.text, mode) return results def parse(self, data, mode): """ Parse search results for items. :param data: The raw response from a search :param mode: The current mode used to search, e.g. RSS :return: A list of items found """ items = [] with BS4Parser(data, 'html5lib') as html: torrent_rows = html('item') for row in torrent_rows: try: if not row.category.text.endswith(('TV', 'Anime')): continue title = row.title.text # Use the torcache link bitsnoop provides, # unless it is not torcache or we are not using blackhole # because we want to use magnets if connecting direct to client<|fim▁hole|> if app.TORRENT_METHOD != 'blackhole' or 'torcache' not in download_url: download_url = row.find('magneturi').next.replace('CDATA', '').strip('[]') + \ self._custom_trackers if not all([title, download_url]): continue seeders = try_int(row.find('numseeders').text) leechers = try_int(row.find('numleechers').text) # Filter unseeded torrent if seeders < min(self.minseed, 1): if mode != 'RSS': logger.log("Discarding torrent because it doesn't meet the " "minimum seeders: {0}. Seeders: {1}".format (title, seeders), logger.DEBUG) continue torrent_size = row.find('size').text size = convert_size(torrent_size) or -1 item = { 'title': title, 'link': download_url, 'size': size, 'seeders': seeders, 'leechers': leechers, 'pubdate': None, } if mode != 'RSS': logger.log('Found result: {0} with {1} seeders and {2} leechers'.format (title, seeders, leechers), logger.DEBUG) items.append(item) except (AttributeError, TypeError, KeyError, ValueError, IndexError): logger.log('Failed parsing provider. Traceback: {0!r}'.format (traceback.format_exc()), logger.ERROR) return items provider = BitSnoopProvider()<|fim▁end|>
# so that proxies work. download_url = row.enclosure['url']
<|file_name|>ops_config.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. # DOCUMENTATION = """ --- module: ops_config version_added: "2.1" author: "Peter sprygada (@privateip)" short_description: Manage OpenSwitch configuration using CLI description: - OpenSwitch configurations use a simple block indent file syntax for segmenting configuration into sections. This module provides an implementation for working with ops configuration sections in a deterministic way. extends_documentation_fragment: openswitch options: lines: description: - The ordered set of commands that should be configured in the section. The commands must be the exact same commands as found in the device running-config. Be sure to note the configuration command syntax as some commands are automatically modified by the device config parser. required: true parents: description: - The ordered set of parents that uniquely identify the section the commands should be checked against. If the parents argument is omitted, the commands are checked against the set of top level or global commands. required: false default: null before: description: - The ordered set of commands to push on to the command stack if a change needs to be made. This allows the playbook designer the opportunity to perform configuration commands prior to pushing any changes without affecting how the set of commands are matched against the system. required: false default: null after: description: - The ordered set of commands to append to the end of the command stack if a changed needs to be made. Just like with I(before) this allows the playbook designer to append a set of commands to be executed after the command set. required: false default: null match: description: - Instructs the module on the way to perform the matching of the set of commands against the current device config. If match is set to I(line), commands are matched line by line. If match is set to I(strict), command lines are matched with respect to position. Finally if match is set to I(exact), command lines must be an equal match. required: false default: line choices: ['line', 'strict', 'exact'] replace: description: - Instructs the module on the way to perform the configuration on the device. If the replace argument is set to I(line) then the modified lines are pushed to the device in configuration mode. If the replace argument is set to I(block) then the entire command block is pushed to the device in configuration mode if any line is not correct. required: false default: line choices: ['line', 'block'] force: description: - The force argument instructs the module to not consider the current devices running-config. When set to true, this will cause the module to push the contents of I(src) into the device without first checking if already configured. required: false default: false choices: ['true', 'false'] config: description: - The module, by default, will connect to the remote device and retrieve the current running-config to use as a base for comparing against the contents of source. There are times when it is not desirable to have the task get the current running-config for every task in a playbook. The I(config) argument allows the implementer to pass in the configuration to use as the base config for comparison. required: false default: null """ EXAMPLES = """ - name: configure hostname over cli ops_config: lines: - "hostname {{ inventory_hostname }}" - name: configure vlan 10 over cli ops_config: lines: - no shutdown parents: - vlan 10 """ RETURN = """ updates: description: The set of commands that will be pushed to the remote device returned: always type: list sample: ['...', '...'] <|fim▁hole|> sample: ['...', '...'] """ import re import itertools def get_config(module): config = module.params['config'] or dict() if not config and not module.params['force']: config = module.config return config def build_candidate(lines, parents, config, strategy): candidate = list() if strategy == 'strict': for index, cmd in enumerate(lines): try: if cmd != config[index]: candidate.append(cmd) except IndexError: candidate.append(cmd) elif strategy == 'exact': if len(lines) != len(config): candidate = list(lines) else: for cmd, cfg in itertools.izip(lines, config): if cmd != cfg: candidate = list(lines) break else: for cmd in lines: if cmd not in config: candidate.append(cmd) return candidate def main(): argument_spec = dict( lines=dict(aliases=['commands'], required=True, type='list'), parents=dict(type='list'), before=dict(type='list'), after=dict(type='list'), match=dict(default='line', choices=['line', 'strict', 'exact']), replace=dict(default='line', choices=['line', 'block']), force=dict(default=False, type='bool'), config=dict(), transport=dict(default='cli', choices=['cli']) ) module = get_module(argument_spec=argument_spec, supports_check_mode=True) lines = module.params['lines'] parents = module.params['parents'] or list() before = module.params['before'] after = module.params['after'] match = module.params['match'] replace = module.params['replace'] contents = get_config(module) config = module.parse_config(contents) if parents: for parent in parents: for item in config: if item.text == parent: config = item try: children = [c.text for c in config.children] except AttributeError: children = [c.text for c in config] else: children = [c.text for c in config if not c.parents] result = dict(changed=False) candidate = build_candidate(lines, parents, children, match) if candidate: if replace == 'line': candidate[:0] = parents else: candidate = list(parents) candidate.extend(lines) if before: candidate[:0] = before if after: candidate.extend(after) if not module.check_mode: response = module.configure(candidate) result['responses'] = response result['changed'] = True result['updates'] = candidate return module.exit_json(**result) from ansible.module_utils.basic import * from ansible.module_utils.shell import * from ansible.module_utils.netcfg import * from ansible.module_utils.openswitch import * if __name__ == '__main__': main()<|fim▁end|>
responses: description: The set of responses from issuing the commands on the device retured: when not check_mode type: list
<|file_name|>symbols.py<|end_file_name|><|fim▁begin|>#!/usr/bin/python3 # -*- coding: utf-8 -*- ################################################################################ # DChars Copyright (C) 2012 Suizokukan # Contact: suizokukan _A.T._ orange dot fr # # This file is part of DChars. # DChars is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # DChars is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with DChars. If not, see <http://www.gnu.org/licenses/>. ################################################################################ """ ❏DChars❏ : dchars/languages/lat/symbols.py """ # problem with Pylint : # pylint: disable=E0611 # many errors like "No name 'extensions' in module 'dchars'"<|fim▁hole|># # CAVEAT ! If you modify these dictionaries, don't forget to modify the # corresponding transliteration's dictionaries ! # #............................................................................... SYMB_UPPER_CASE = Name2Symbols( { 'a' : ('A',), 'b' : ('B',), 'c' : ('C',), 'd' : ('D',), 'e' : ('E',), 'f' : ('F',), 'g' : ('G',), 'h' : ('H',), 'i' : ('I',), 'j' : ('J',), 'k' : ('K',), 'l' : ('L',), 'm' : ('M',), 'n' : ('N',), 'o' : ('O',), 'p' : ('P',), 'q' : ('Q',), 'r' : ('R',), 's' : ('S',), 't' : ('T',), 'u' : ('U',), 'v' : ('V',), 'w' : ('W',), 'x' : ('X',), 'y' : ('Y',), 'z' : ('Z',), }) SYMB_LOWER_CASE = Name2Symbols( { 'a' : ('a',), 'b' : ('b',), 'c' : ('c',), 'd' : ('d',), 'e' : ('e',), 'f' : ('f',), 'g' : ('g',), 'h' : ('h',), 'i' : ('i',), 'j' : ('j',), 'k' : ('k',), 'l' : ('l',), 'm' : ('m',), 'n' : ('n',), 'o' : ('o',), 'p' : ('p',), 'q' : ('q',), 'r' : ('r',), 's' : ('s',), 't' : ('t',), 'u' : ('u',), 'v' : ('v',), 'w' : ('w',), 'x' : ('x',), 'y' : ('y',), 'z' : ('z',), }) SYMB_PUNCTUATION = Name2Symbols( {'-' : ("-", "—"), ')' : (')',), '(' : ('(',), '[' : ('[',), ']' : (']',), '{' : ('{',), '}' : ('}',), '0' : ('0',), '1' : ('1',), '2' : ('2',), '3' : ('3',), '4' : ('4',), '5' : ('5',), '6' : ('6',), '7' : ('7',), '8' : ('8',), '9' : ('9',), ' ' : (' ',), '.' : ('.',), ',' : (',',), ';' : (';',), '!' : ('!',), '?' : ('?',), '"' : ('"','‘',"’",), "'" : ("'","᾽"), ":" : (":"), '\n' : ('\n',), '\r' : ('\r',), '\t' : ('\t',), }) SYMB_DIACRITICS = Name2Symbols( { "stress" : ( chr(0x301), chr(0x030D) ), # á, a̍ "long" : ( chr(0x304),), # ā "short" : ( chr(0x306),), # ă "diaeresis": ( chr(0x308),), # ä }) # we define these constants in order to avoir multiple calls to SYMB_DIACRITICS.get_default_symbol : DEFAULTSYMB__STRESS = SYMB_DIACRITICS.get_default_symbol("stress") DEFAULTSYMB__DIAERESIS = SYMB_DIACRITICS.get_default_symbol("diaeresis") #............................................................................... # we calculate these tuple which is often used in order to speed up the code : #............................................................................... SYMB_DIACRITICS__STRESS = SYMB_DIACRITICS["stress"] SYMB_DIACRITICS__LENGTH = ( SYMB_DIACRITICS["short"] + \ SYMB_DIACRITICS["long"] ) SYMB_DIACRITICS__DIAERESIS = SYMB_DIACRITICS["diaeresis"]<|fim▁end|>
from dchars.utilities.name2symbols import Name2Symbols #............................................................................... # symbols used by Latin
<|file_name|>bootstrap-datetimepicker.ms.js<|end_file_name|><|fim▁begin|>/** * Malay translation for bootstrap-datetimepicker * Ateman Faiz <[email protected]><|fim▁hole|> $.fn.datetimepicker.dates['ms'] = { days: ["Ahad", "Isnin", "Selasa", "Rabu", "Khamis", "Jumaat", "Sabtu", "Ahad"], daysShort: ["Aha", "Isn", "Sel", "Rab", "Kha", "Jum", "Sab", "Aha"], daysMin: ["Ah", "Is", "Se", "Ra", "Kh", "Ju", "Sa", "Ah"], months: ["Januari", "Februari", "Mac", "April", "Mei", "Jun", "Julai", "Ogos", "September", "Oktober", "November", "Disember"], monthsShort: ["Jan", "Feb", "Mar", "Apr", "Mei", "Jun", "Jul", "Ogo", "Sep", "Okt", "Nov", "Dis"], today: "Hari Ini", suffix: [], meridiem: [] }; }(jQuery));<|fim▁end|>
*/ ;(function ($) {
<|file_name|>NodeGlyphInterface.ts<|end_file_name|><|fim▁begin|>import { Selection } from "d3-selection"; import { DynamicGraph } from "../model/DynamicGraph";<|fim▁hole|>export interface NodeGlyphShape { readonly shapeType: string; init(location: Selection<any, {}, any, {}>): Selection<any, {}, any, {}>; initDraw(location: Selection<any, {}, any, {}>): Selection<any, {}, any, {}>; updateDraw(location: Selection<any, {}, any, {}>, attrOpts: SVGAttrOpts): Selection<any, {}, any, {}>; transformTo(source: Selection<any, {}, any, {}>, shape: NodeGlyphShape, target: Selection<any, {}, any, {}>): void; draw(location: Selection<any, {}, any, {}>, data: DynamicGraph, timeStepIndex: number, attrOpts: SVGAttrOpts, duplicateNodes?: boolean, enterExit?: any): void; }<|fim▁end|>
import { SVGAttrOpts } from "./DGLOsSVG";