filename
stringlengths 13
19
| text
stringlengths 134
1.04M
|
---|---|
the-stack_0_23166 | # Form / Progress
# Use a progress bar to indicate completion status of an operation.
# ---
from h2o_wave import site, ui
page = site['/demo']
page['example'] = ui.form_card(
box='1 1 4 -1',
items=[
ui.progress(label='Indeterminate Progress', caption='Goes on forever'),
ui.progress(label='Standard Progress', caption='Downloading the interwebs...', value=0.25),
]
)
page.save()
|
the-stack_0_23168 | import datetime
import logging
import os.path
import shutil
import ssl
import sys
import tempfile
import warnings
from pathlib import Path
from test import (
LONG_TIMEOUT,
SHORT_TIMEOUT,
TARPIT_HOST,
notSecureTransport,
requires_network,
requires_ssl_context_keyfile_password,
resolvesLocalhostFQDN,
)
from test.conftest import ServerConfig
from typing import List, Optional
from unittest import mock
import pytest
import trustme
import urllib3.util as util
import urllib3.util.ssl_
from dummyserver.server import (
DEFAULT_CA,
DEFAULT_CA_KEY,
DEFAULT_CERTS,
encrypt_key_pem,
)
from dummyserver.testcase import HTTPSDummyServerTestCase
from urllib3 import HTTPSConnectionPool
from urllib3.connection import RECENT_DATE, VerifiedHTTPSConnection
from urllib3.exceptions import (
ConnectTimeoutError,
InsecureRequestWarning,
MaxRetryError,
ProtocolError,
SSLError,
SystemTimeWarning,
)
from urllib3.util.ssl_match_hostname import CertificateError
from urllib3.util.timeout import Timeout
from .. import has_alpn
# Retry failed tests
pytestmark = pytest.mark.flaky
log = logging.getLogger("urllib3.connectionpool")
log.setLevel(logging.NOTSET)
log.addHandler(logging.StreamHandler(sys.stdout))
TLSv1_CERTS = DEFAULT_CERTS.copy()
TLSv1_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLSv1", None)
TLSv1_1_CERTS = DEFAULT_CERTS.copy()
TLSv1_1_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLSv1_1", None)
TLSv1_2_CERTS = DEFAULT_CERTS.copy()
TLSv1_2_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLSv1_2", None)
TLSv1_3_CERTS = DEFAULT_CERTS.copy()
TLSv1_3_CERTS["ssl_version"] = getattr(ssl, "PROTOCOL_TLS", None)
CLIENT_INTERMEDIATE_PEM = "client_intermediate.pem"
CLIENT_NO_INTERMEDIATE_PEM = "client_no_intermediate.pem"
CLIENT_INTERMEDIATE_KEY = "client_intermediate.key"
PASSWORD_CLIENT_KEYFILE = "client_password.key"
CLIENT_CERT = CLIENT_INTERMEDIATE_PEM
class TestHTTPS(HTTPSDummyServerTestCase):
tls_protocol_name: Optional[str] = None
def tls_protocol_not_default(self) -> bool:
return self.tls_protocol_name in {"TLSv1", "TLSv1.1"}
def tls_version(self) -> "ssl.TLSVersion":
if self.tls_protocol_name is None:
return pytest.skip("Skipping base test class")
try:
from ssl import TLSVersion
except ImportError:
return pytest.skip("ssl.TLSVersion isn't available")
return TLSVersion[self.tls_protocol_name.replace(".", "_")]
def ssl_version(self) -> int:
if self.tls_protocol_name is None:
return pytest.skip("Skipping base test class")
attribute = f"PROTOCOL_{self.tls_protocol_name.replace('.', '_')}"
ssl_version = getattr(ssl, attribute, None)
if ssl_version is None:
return pytest.skip(f"ssl.{attribute} isn't available")
return ssl_version # type: ignore[no-any-return]
@classmethod
def setup_class(cls) -> None:
super().setup_class()
cls.certs_dir = tempfile.mkdtemp()
# Start from existing root CA as we don't want to change the server certificate yet
with open(DEFAULT_CA, "rb") as crt, open(DEFAULT_CA_KEY, "rb") as key:
root_ca = trustme.CA.from_pem(crt.read(), key.read())
# Generate another CA to test verification failure
bad_ca = trustme.CA()
cls.bad_ca_path = os.path.join(cls.certs_dir, "ca_bad.pem")
bad_ca.cert_pem.write_to_path(cls.bad_ca_path)
# client cert chain
intermediate_ca = root_ca.create_child_ca()
cert = intermediate_ca.issue_cert("example.com")
encrypted_key = encrypt_key_pem(cert.private_key_pem, b"letmein")
cert.private_key_pem.write_to_path(
os.path.join(cls.certs_dir, CLIENT_INTERMEDIATE_KEY)
)
encrypted_key.write_to_path(
os.path.join(cls.certs_dir, PASSWORD_CLIENT_KEYFILE)
)
# Write the client cert and the intermediate CA
client_cert = os.path.join(cls.certs_dir, CLIENT_INTERMEDIATE_PEM)
cert.cert_chain_pems[0].write_to_path(client_cert)
cert.cert_chain_pems[1].write_to_path(client_cert, append=True)
# Write only the client cert
cert.cert_chain_pems[0].write_to_path(
os.path.join(cls.certs_dir, CLIENT_NO_INTERMEDIATE_PEM)
)
@classmethod
def teardown_class(cls) -> None:
super().teardown_class()
shutil.rmtree(cls.certs_dir)
def test_simple(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200, r.data
@resolvesLocalhostFQDN()
def test_dotted_fqdn(self) -> None:
with HTTPSConnectionPool(
self.host + ".",
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as pool:
r = pool.request("GET", "/")
assert r.status == 200, r.data
def test_client_intermediate(self) -> None:
"""Check that certificate chains work well with client certs
We generate an intermediate CA from the root CA, and issue a client certificate
from that intermediate CA. Since the server only knows about the root CA, we
need to send it the certificate *and* the intermediate CA, so that it can check
the whole chain.
"""
with HTTPSConnectionPool(
self.host,
self.port,
key_file=os.path.join(self.certs_dir, CLIENT_INTERMEDIATE_KEY),
cert_file=os.path.join(self.certs_dir, CLIENT_INTERMEDIATE_PEM),
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/certificate")
subject = r.json()
assert subject["organizationalUnitName"].startswith("Testing cert")
def test_client_no_intermediate(self) -> None:
"""Check that missing links in certificate chains indeed break
The only difference with test_client_intermediate is that we don't send the
intermediate CA to the server, only the client cert.
"""
with HTTPSConnectionPool(
self.host,
self.port,
cert_file=os.path.join(self.certs_dir, CLIENT_NO_INTERMEDIATE_PEM),
key_file=os.path.join(self.certs_dir, CLIENT_INTERMEDIATE_KEY),
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises((SSLError, ProtocolError)):
https_pool.request("GET", "/certificate", retries=False)
@requires_ssl_context_keyfile_password()
def test_client_key_password(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
key_file=os.path.join(self.certs_dir, PASSWORD_CLIENT_KEYFILE),
cert_file=os.path.join(self.certs_dir, CLIENT_CERT),
key_password="letmein",
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/certificate")
subject = r.json()
assert subject["organizationalUnitName"].startswith("Testing cert")
@requires_ssl_context_keyfile_password()
def test_client_encrypted_key_requires_password(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
key_file=os.path.join(self.certs_dir, PASSWORD_CLIENT_KEYFILE),
cert_file=os.path.join(self.certs_dir, CLIENT_CERT),
key_password=None,
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(MaxRetryError, match="password is required") as e:
https_pool.request("GET", "/certificate")
assert isinstance(e.value.reason, SSLError)
def test_verified(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
conn = https_pool._new_conn()
assert conn.__class__ == VerifiedHTTPSConnection
with warnings.catch_warnings(record=True) as w:
r = https_pool.request("GET", "/")
assert r.status == 200
assert w == []
def test_verified_with_context(self) -> None:
ctx = util.ssl_.create_urllib3_context(
cert_reqs=ssl.CERT_REQUIRED, ssl_minimum_version=self.tls_version()
)
ctx.load_verify_locations(cafile=DEFAULT_CA)
with HTTPSConnectionPool(self.host, self.port, ssl_context=ctx) as https_pool:
conn = https_pool._new_conn()
assert conn.__class__ == VerifiedHTTPSConnection
with mock.patch("warnings.warn") as warn:
r = https_pool.request("GET", "/")
assert r.status == 200
assert not warn.called, warn.call_args_list
def test_context_combines_with_ca_certs(self) -> None:
ctx = util.ssl_.create_urllib3_context(
cert_reqs=ssl.CERT_REQUIRED, ssl_minimum_version=self.tls_version()
)
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA, ssl_context=ctx
) as https_pool:
conn = https_pool._new_conn()
assert conn.__class__ == VerifiedHTTPSConnection
with mock.patch("warnings.warn") as warn:
r = https_pool.request("GET", "/")
assert r.status == 200
assert not warn.called, warn.call_args_list
@notSecureTransport() # SecureTransport does not support cert directories
def test_ca_dir_verified(self, tmp_path: Path) -> None:
# OpenSSL looks up certificates by the hash for their name, see c_rehash
# TODO infer the bytes using `cryptography.x509.Name.public_bytes`.
# https://github.com/pyca/cryptography/pull/3236
shutil.copyfile(DEFAULT_CA, str(tmp_path / "81deb5f7.0"))
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_cert_dir=str(tmp_path),
ssl_minimum_version=self.tls_version(),
) as https_pool:
conn = https_pool._new_conn()
assert conn.__class__ == VerifiedHTTPSConnection
with warnings.catch_warnings(record=True) as w:
r = https_pool.request("GET", "/")
assert r.status == 200
assert w == []
def test_invalid_common_name(self) -> None:
with HTTPSConnectionPool(
"127.0.0.1",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/", retries=0)
assert isinstance(e.value.reason, SSLError)
assert "doesn't match" in str(
e.value.reason
) or "certificate verify failed" in str(e.value.reason)
def test_verified_with_bad_ca_certs(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=self.bad_ca_path,
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/")
assert isinstance(e.value.reason, SSLError)
assert (
"certificate verify failed" in str(e.value.reason)
# PyPy is more specific
or "self signed certificate in certificate chain" in str(e.value.reason)
), f"Expected 'certificate verify failed', instead got: {e.value.reason!r}"
def test_wrap_socket_failure_resource_leak(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=self.bad_ca_path,
ssl_minimum_version=self.tls_version(),
) as https_pool:
conn = https_pool._get_conn()
try:
with pytest.raises(ssl.SSLError):
conn.connect()
assert conn.sock
finally:
conn.close()
def test_verified_without_ca_certs(self) -> None:
# default is cert_reqs=None which is ssl.CERT_NONE
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/")
assert isinstance(e.value.reason, SSLError)
# there is a different error message depending on whether or
# not pyopenssl is injected
assert (
"No root certificates specified" in str(e.value.reason)
# PyPy is more specific
or "self signed certificate in certificate chain" in str(e.value.reason)
# PyPy sometimes uses all-caps here
or "certificate verify failed" in str(e.value.reason).lower()
or "invalid certificate chain" in str(e.value.reason)
), (
"Expected 'No root certificates specified', "
"'certificate verify failed', or "
"'invalid certificate chain', "
"instead got: %r" % e.value.reason
)
def test_no_ssl(self) -> None:
with HTTPSConnectionPool(self.host, self.port) as pool:
pool.ConnectionCls = None # type: ignore[assignment]
with pytest.raises(SSLError):
pool._new_conn()
with pytest.raises(MaxRetryError) as cm:
pool.request("GET", "/", retries=0)
assert isinstance(cm.value.reason, SSLError)
def test_unverified_ssl(self) -> None:
"""Test that bare HTTPSConnection can connect, make requests"""
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs=ssl.CERT_NONE,
ssl_minimum_version=self.tls_version(),
) as pool:
with mock.patch("warnings.warn") as warn:
r = pool.request("GET", "/")
assert r.status == 200
assert warn.called
# Modern versions of Python, or systems using PyOpenSSL, only emit
# the unverified warning. Older systems may also emit other
# warnings, which we want to ignore here.
calls = warn.call_args_list
assert InsecureRequestWarning in [x[0][1] for x in calls]
def test_ssl_unverified_with_ca_certs(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_NONE",
ca_certs=self.bad_ca_path,
ssl_minimum_version=self.tls_version(),
) as pool:
with mock.patch("warnings.warn") as warn:
r = pool.request("GET", "/")
assert r.status == 200
assert warn.called
# Modern versions of Python, or systems using PyOpenSSL, only emit
# the unverified warning. Older systems may also emit other
# warnings, which we want to ignore here.
calls = warn.call_args_list
category = calls[0][0][1]
assert category == InsecureRequestWarning
def test_assert_hostname_false(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_hostname = False
https_pool.request("GET", "/")
def test_assert_specific_hostname(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_hostname = "localhost"
https_pool.request("GET", "/")
def test_server_hostname(self) -> None:
with HTTPSConnectionPool(
"127.0.0.1",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
server_hostname="localhost",
ssl_minimum_version=self.tls_version(),
) as https_pool:
conn = https_pool._new_conn()
conn.request("GET", "/")
# Assert the wrapping socket is using the passed-through SNI name.
# pyopenssl doesn't let you pull the server_hostname back off the
# socket, so only add this assertion if the attribute is there (i.e.
# the python ssl module).
if hasattr(conn.sock, "server_hostname"):
assert conn.sock.server_hostname == "localhost"
def test_assert_fingerprint_md5(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_fingerprint = (
"55:39:BF:70:05:12:43:FA:1F:D1:BF:4E:E8:1B:07:1D"
)
https_pool.request("GET", "/")
def test_assert_fingerprint_sha1(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_fingerprint = (
"72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
)
https_pool.request("GET", "/")
def test_assert_fingerprint_sha256(self) -> None:
with HTTPSConnectionPool(
"localhost",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_fingerprint = (
"E3:59:8E:69:FF:C5:9F:C7:88:87:44:58:22:7F:90:8D:D9:BC:12:C4:90:79:D5:"
"DC:A8:5D:4F:60:40:1E:A6:D2"
)
https_pool.request("GET", "/")
def test_assert_invalid_fingerprint(self) -> None:
def _test_request(pool: HTTPSConnectionPool) -> SSLError:
with pytest.raises(MaxRetryError) as cm:
pool.request("GET", "/", retries=0)
assert isinstance(cm.value.reason, SSLError)
return cm.value.reason
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_fingerprint = (
"AA:AA:AA:AA:AA:AAAA:AA:AAAA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA"
)
e = _test_request(https_pool)
expected = "aaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaaa"
got = "728b554c9afc1e88a11cad1bb2e7cc3edbc8f98a"
assert (
str(e)
== f'Fingerprints did not match. Expected "{expected}", got "{got}"'
)
# Uneven length
https_pool.assert_fingerprint = "AA:A"
e = _test_request(https_pool)
assert "Fingerprint of invalid length:" in str(e)
# Invalid length
https_pool.assert_fingerprint = "AA"
e = _test_request(https_pool)
assert "Fingerprint of invalid length:" in str(e)
def test_verify_none_and_bad_fingerprint(self) -> None:
with HTTPSConnectionPool(
"127.0.0.1", self.port, cert_reqs="CERT_NONE", ca_certs=self.bad_ca_path
) as https_pool:
https_pool.assert_fingerprint = (
"AA:AA:AA:AA:AA:AAAA:AA:AAAA:AA:AA:AA:AA:AA:AA:AA:AA:AA:AA"
)
with pytest.raises(MaxRetryError) as cm:
https_pool.request("GET", "/", retries=0)
assert isinstance(cm.value.reason, SSLError)
def test_verify_none_and_good_fingerprint(self) -> None:
with HTTPSConnectionPool(
"127.0.0.1",
self.port,
cert_reqs="CERT_NONE",
ca_certs=self.bad_ca_path,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_fingerprint = (
"72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
)
https_pool.request("GET", "/")
@notSecureTransport()
def test_good_fingerprint_and_hostname_mismatch(self) -> None:
# This test doesn't run with SecureTransport because we don't turn off
# hostname validation without turning off all validation, which this
# test doesn't do (deliberately). We should revisit this if we make
# new decisions.
with HTTPSConnectionPool(
"127.0.0.1",
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.assert_fingerprint = (
"72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
)
https_pool.request("GET", "/")
@requires_network()
def test_https_timeout(self) -> None:
timeout = Timeout(total=None, connect=SHORT_TIMEOUT)
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=timeout,
retries=False,
cert_reqs="CERT_REQUIRED",
ssl_minimum_version=self.tls_version(),
) as https_pool:
with pytest.raises(ConnectTimeoutError):
https_pool.request("GET", "/")
timeout = Timeout(read=0.01)
with HTTPSConnectionPool(
self.host,
self.port,
timeout=timeout,
retries=False,
cert_reqs="CERT_REQUIRED",
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.ca_certs = DEFAULT_CA
https_pool.assert_fingerprint = (
"72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
)
timeout = Timeout(total=None)
with HTTPSConnectionPool(
self.host,
self.port,
timeout=timeout,
cert_reqs="CERT_NONE",
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.request("GET", "/")
def test_tunnel(self) -> None:
"""test the _tunnel behavior"""
timeout = Timeout(total=None)
with HTTPSConnectionPool(
self.host,
self.port,
timeout=timeout,
cert_reqs="CERT_NONE",
ssl_minimum_version=self.tls_version(),
) as https_pool:
conn = https_pool._new_conn()
try:
conn.set_tunnel(self.host, self.port)
with mock.patch.object(
conn, "_tunnel", create=True, return_value=None
) as conn_tunnel:
https_pool._make_request(conn, "GET", "/")
conn_tunnel.assert_called_once_with()
finally:
conn.close()
@requires_network()
def test_enhanced_timeout(self) -> None:
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=Timeout(connect=SHORT_TIMEOUT),
retries=False,
cert_reqs="CERT_REQUIRED",
) as https_pool:
conn = https_pool._new_conn()
try:
with pytest.raises(ConnectTimeoutError):
https_pool.request("GET", "/")
with pytest.raises(ConnectTimeoutError):
https_pool._make_request(conn, "GET", "/")
finally:
conn.close()
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=Timeout(connect=LONG_TIMEOUT),
retries=False,
cert_reqs="CERT_REQUIRED",
) as https_pool:
with pytest.raises(ConnectTimeoutError):
https_pool.request("GET", "/", timeout=Timeout(connect=SHORT_TIMEOUT))
with HTTPSConnectionPool(
TARPIT_HOST,
self.port,
timeout=Timeout(total=None),
retries=False,
cert_reqs="CERT_REQUIRED",
) as https_pool:
conn = https_pool._new_conn()
try:
with pytest.raises(ConnectTimeoutError):
https_pool.request(
"GET", "/", timeout=Timeout(total=None, connect=SHORT_TIMEOUT)
)
finally:
conn.close()
def test_enhanced_ssl_connection(self) -> None:
fingerprint = "72:8B:55:4C:9A:FC:1E:88:A1:1C:AD:1B:B2:E7:CC:3E:DB:C8:F9:8A"
with HTTPSConnectionPool(
self.host,
self.port,
cert_reqs="CERT_REQUIRED",
ca_certs=DEFAULT_CA,
assert_fingerprint=fingerprint,
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
def test_ssl_correct_system_time(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.cert_reqs = "CERT_REQUIRED"
https_pool.ca_certs = DEFAULT_CA
w = self._request_without_resource_warnings("GET", "/")
assert [] == w
def test_ssl_wrong_system_time(self) -> None:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.cert_reqs = "CERT_REQUIRED"
https_pool.ca_certs = DEFAULT_CA
with mock.patch("urllib3.connection.datetime") as mock_date:
mock_date.date.today.return_value = datetime.date(1970, 1, 1)
w = self._request_without_resource_warnings("GET", "/")
assert len(w) == 1
warning = w[0]
assert SystemTimeWarning == warning.category
assert isinstance(warning.message, Warning)
assert str(RECENT_DATE) in warning.message.args[0]
def _request_without_resource_warnings(
self, method: str, url: str
) -> List[warnings.WarningMessage]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
https_pool.request(method, url)
w = [x for x in w if not isinstance(x.message, ResourceWarning)]
return w
def test_set_ssl_version_to_tls_version(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA
) as https_pool:
https_pool.ssl_version = self.certs["ssl_version"]
r = https_pool.request("GET", "/")
assert r.status == 200, r.data
def test_set_cert_default_cert_required(self) -> None:
conn = VerifiedHTTPSConnection(self.host, self.port)
conn.set_cert()
assert conn.cert_reqs == ssl.CERT_REQUIRED
def test_tls_protocol_name_of_socket(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
conn = https_pool._get_conn()
try:
conn.connect()
if not hasattr(conn.sock, "version"):
pytest.skip("SSLSocket.version() not available")
assert conn.sock.version() == self.tls_protocol_name
finally:
conn.close()
def test_ssl_version_is_deprecated(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA, ssl_version=self.ssl_version()
) as https_pool:
conn = https_pool._get_conn()
try:
with warnings.catch_warnings(record=True) as w:
conn.connect()
finally:
conn.close()
assert len(w) >= 1
assert any(x.category == DeprecationWarning for x in w)
assert any(
str(x.message)
== (
"'ssl_version' option is deprecated and will be removed in "
"a future release of urllib3 2.x. Instead use 'ssl_minimum_version'"
)
for x in w
)
@pytest.mark.parametrize(
"ssl_version", [None, ssl.PROTOCOL_TLS, ssl.PROTOCOL_TLS_CLIENT]
)
def test_ssl_version_with_protocol_tls_or_client_not_deprecated(
self, ssl_version: Optional[int]
) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
if self.tls_protocol_not_default():
pytest.skip(
f"Skipping because '{self.tls_protocol_name}' isn't set by default"
)
with HTTPSConnectionPool(
self.host, self.port, ca_certs=DEFAULT_CA, ssl_version=ssl_version
) as https_pool:
conn = https_pool._get_conn()
try:
with warnings.catch_warnings(record=True) as w:
conn.connect()
finally:
conn.close()
assert w == []
def test_no_tls_version_deprecation_with_ssl_context(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
ctx = util.ssl_.create_urllib3_context(ssl_minimum_version=self.tls_version())
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_context=ctx,
) as https_pool:
conn = https_pool._get_conn()
try:
with warnings.catch_warnings(record=True) as w:
conn.connect()
finally:
conn.close()
assert w == []
def test_tls_version_maximum_and_minimum(self) -> None:
if self.tls_protocol_name is None:
pytest.skip("Skipping base test class")
from ssl import TLSVersion
min_max_versions = [
(self.tls_version(), self.tls_version()),
(TLSVersion.MINIMUM_SUPPORTED, self.tls_version()),
(TLSVersion.MINIMUM_SUPPORTED, TLSVersion.MAXIMUM_SUPPORTED),
]
for minimum_version, maximum_version in min_max_versions:
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=minimum_version,
ssl_maximum_version=maximum_version,
) as https_pool:
conn = https_pool._get_conn()
try:
conn.connect()
assert conn.sock.version() == self.tls_protocol_name
finally:
conn.close()
@pytest.mark.skipif(sys.version_info < (3, 8), reason="requires python 3.8+")
def test_sslkeylogfile(
self, tmp_path: Path, monkeypatch: pytest.MonkeyPatch
) -> None:
if not hasattr(util.SSLContext, "keylog_filename"):
pytest.skip("requires OpenSSL 1.1.1+")
keylog_file = tmp_path / "keylogfile.txt"
monkeypatch.setenv("SSLKEYLOGFILE", str(keylog_file))
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200, r.data
assert keylog_file.is_file(), "keylogfile '%s' should exist" % str(
keylog_file
)
assert keylog_file.read_text().startswith(
"# TLS secrets log file"
), "keylogfile '%s' should start with '# TLS secrets log file'" % str(
keylog_file
)
@pytest.mark.parametrize("sslkeylogfile", [None, ""])
def test_sslkeylogfile_empty(
self, monkeypatch: pytest.MonkeyPatch, sslkeylogfile: Optional[str]
) -> None:
# Assert that an HTTPS connection doesn't error out when given
# no SSLKEYLOGFILE or an empty value (ie 'SSLKEYLOGFILE=')
if sslkeylogfile is not None:
monkeypatch.setenv("SSLKEYLOGFILE", sslkeylogfile)
else:
monkeypatch.delenv("SSLKEYLOGFILE", raising=False)
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as pool:
r = pool.request("GET", "/")
assert r.status == 200, r.data
def test_alpn_default(self) -> None:
"""Default ALPN protocols are sent by default."""
if not has_alpn() or not has_alpn(ssl.SSLContext):
pytest.skip("ALPN-support not available")
with HTTPSConnectionPool(
self.host,
self.port,
ca_certs=DEFAULT_CA,
ssl_minimum_version=self.tls_version(),
) as pool:
r = pool.request("GET", "/alpn_protocol", retries=0)
assert r.status == 200
assert r.data.decode("utf-8") == util.ALPN_PROTOCOLS[0]
def test_default_ssl_context_ssl_min_max_versions(self) -> None:
ctx = urllib3.util.ssl_.create_urllib3_context()
assert ctx.minimum_version == ssl.TLSVersion.TLSv1_2
assert ctx.maximum_version == ssl.TLSVersion.MAXIMUM_SUPPORTED
def test_ssl_context_ssl_version_uses_ssl_min_max_versions(self) -> None:
ctx = urllib3.util.ssl_.create_urllib3_context(ssl_version=self.ssl_version())
assert ctx.minimum_version == self.tls_version()
assert ctx.maximum_version == self.tls_version()
@pytest.mark.usefixtures("requires_tlsv1")
class TestHTTPS_TLSv1(TestHTTPS):
tls_protocol_name = "TLSv1"
certs = TLSv1_CERTS
@pytest.mark.usefixtures("requires_tlsv1_1")
class TestHTTPS_TLSv1_1(TestHTTPS):
tls_protocol_name = "TLSv1.1"
certs = TLSv1_1_CERTS
@pytest.mark.usefixtures("requires_tlsv1_2")
class TestHTTPS_TLSv1_2(TestHTTPS):
tls_protocol_name = "TLSv1.2"
certs = TLSv1_2_CERTS
@pytest.mark.usefixtures("requires_tlsv1_3")
class TestHTTPS_TLSv1_3(TestHTTPS):
tls_protocol_name = "TLSv1.3"
certs = TLSv1_3_CERTS
class TestHTTPS_Hostname:
def test_can_validate_san(self, san_server: ServerConfig) -> None:
"""Ensure that urllib3 can validate SANs with IP addresses in them."""
with HTTPSConnectionPool(
san_server.host,
san_server.port,
cert_reqs="CERT_REQUIRED",
ca_certs=san_server.ca_certs,
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
def test_common_name_without_san_fails(self, no_san_server: ServerConfig) -> None:
with HTTPSConnectionPool(
no_san_server.host,
no_san_server.port,
cert_reqs="CERT_REQUIRED",
ca_certs=no_san_server.ca_certs,
) as https_pool:
with pytest.raises(
MaxRetryError,
) as e:
https_pool.request("GET", "/")
assert "mismatch, certificate is not valid" in str(
e.value
) or "no appropriate subjectAltName" in str(e.value)
def test_common_name_without_san_with_different_common_name(
self, no_san_server_with_different_commmon_name: ServerConfig
) -> None:
ctx = urllib3.util.ssl_.create_urllib3_context()
try:
ctx.hostname_checks_common_name = True
except AttributeError:
pytest.skip("Couldn't set 'SSLContext.hostname_checks_common_name'")
with HTTPSConnectionPool(
no_san_server_with_different_commmon_name.host,
no_san_server_with_different_commmon_name.port,
cert_reqs="CERT_REQUIRED",
ca_certs=no_san_server_with_different_commmon_name.ca_certs,
ssl_context=ctx,
) as https_pool:
with pytest.raises(MaxRetryError) as e:
https_pool.request("GET", "/")
assert "mismatch, certificate is not valid for 'localhost'" in str(
e.value
) or "hostname 'localhost' doesn't match 'example.com'" in str(e.value)
@pytest.mark.parametrize("use_assert_hostname", [True, False])
def test_hostname_checks_common_name_respected(
self, no_san_server: ServerConfig, use_assert_hostname: bool
) -> None:
ctx = urllib3.util.ssl_.create_urllib3_context()
if not hasattr(ctx, "hostname_checks_common_name"):
pytest.skip("Test requires 'SSLContext.hostname_checks_common_name'")
ctx.load_verify_locations(no_san_server.ca_certs)
try:
ctx.hostname_checks_common_name = True
except AttributeError:
pytest.skip("Couldn't set 'SSLContext.hostname_checks_common_name'")
err: Optional[MaxRetryError]
try:
with HTTPSConnectionPool(
no_san_server.host,
no_san_server.port,
cert_reqs="CERT_REQUIRED",
ssl_context=ctx,
assert_hostname=no_san_server.host if use_assert_hostname else None,
) as https_pool:
https_pool.request("GET", "/")
except MaxRetryError as e:
err = e
else:
err = None
# commonName is only valid for DNS names, not IP addresses.
if no_san_server.host == "localhost":
assert err is None
# IP addresses should fail for commonName.
else:
assert err is not None
assert type(err.reason) == SSLError
assert isinstance(
err.reason.args[0], (ssl.SSLCertVerificationError, CertificateError)
)
class TestHTTPS_IPV4SAN:
def test_can_validate_ip_san(self, ipv4_san_server: ServerConfig) -> None:
"""Ensure that urllib3 can validate SANs with IP addresses in them."""
with HTTPSConnectionPool(
ipv4_san_server.host,
ipv4_san_server.port,
cert_reqs="CERT_REQUIRED",
ca_certs=ipv4_san_server.ca_certs,
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
class TestHTTPS_IPV6SAN:
@pytest.mark.parametrize("host", ["::1", "[::1]"])
def test_can_validate_ipv6_san(
self, ipv6_san_server: ServerConfig, host: str
) -> None:
"""Ensure that urllib3 can validate SANs with IPv6 addresses in them."""
with HTTPSConnectionPool(
host,
ipv6_san_server.port,
cert_reqs="CERT_REQUIRED",
ca_certs=ipv6_san_server.ca_certs,
) as https_pool:
r = https_pool.request("GET", "/")
assert r.status == 200
|
the-stack_0_23169 | import argparse
try:
import configparser
except ImportError:
import ConfigParser as configparser
import os
import numpy as np
import inspect
import random
import parsing_utils
from helper_utils import eval_string_as_list_of_lists
def set_seed(seed):
"""Set the seed of the pseudo-random generator to the specified value.
Parameters
----------
seed : int
Value to intialize or re-seed the generator.
"""
os.environ['PYTHONHASHSEED'] = '0'
np.random.seed(seed)
random.seed(seed)
class Benchmark:
""" Class that implements an interface to handle configuration options for the
different CANDLE benchmarks.
It provides access to all the common configuration
options and configuration options particular to each individual benchmark.
It describes what minimum requirements should be specified to instantiate
the corresponding benchmark.
It interacts with the argparser to extract command-line options and arguments
from the benchmark's configuration files.
"""
def __init__(self, filepath, defmodel, framework, prog=None, desc=None, parser=None):
""" Initialize Benchmark object.
Parameters
----------
filepath : ./
os.path.dirname where the benchmark is located. Necessary to locate utils and
establish input/ouput paths
defmodel : 'p*b*_default_model.txt'
string corresponding to the default model of the benchmark
framework : 'keras', 'neon', 'mxnet', 'pytorch'
framework used to run the benchmark
prog : 'p*b*_baseline_*'
string for program name (usually associated to benchmark and framework)
desc : ' '
string describing benchmark (usually a description of the neural network model built)
parser : argparser (default None)
if 'neon' framework a NeonArgparser is passed. Otherwise an argparser is constructed.
"""
if parser is None:
parser = argparse.ArgumentParser(prog=prog, formatter_class=argparse.ArgumentDefaultsHelpFormatter, description=desc, conflict_handler='resolve')
self.parser = parser
self.file_path = filepath
self.default_model = defmodel
self.framework = framework
self.registered_conf = []
for lst in parsing_utils.registered_conf:
self.registered_conf.extend(lst)
self.required = set([])
self.additional_definitions = []
self.set_locals()
def parse_parameters(self):
"""Functionality to parse options common
for all benchmarks.
This functionality is based on methods 'get_default_neon_parser' and
'get_common_parser' which are defined previously(above). If the order changes
or they are moved, the calling has to be updated.
"""
# Parse has been split between arguments that are common with the default neon parser
# and all the other options
self.parser = parsing_utils.parse_common(self.parser)
self.parser = parsing_utils.parse_from_dictlist(self.additional_definitions, self.parser)
# Set default configuration file
self.conffile = os.path.join(self.file_path, self.default_model)
def format_benchmark_config_arguments(self, dictfileparam):
""" Functionality to format the particular parameters of
the benchmark.
Parameters
----------
dictfileparam : python dictionary
parameters read from configuration file
args : python dictionary
parameters read from command-line
Most of the time command-line overwrites configuration file
except when the command-line is using default values and
config file defines those values
"""
configOut = dictfileparam.copy()
kwall = self.additional_definitions + self.registered_conf
for d in kwall: # self.additional_definitions:
if d['name'] in configOut.keys():
if 'type' in d:
dtype = d['type']
else:
dtype = None
if 'action' in d:
if inspect.isclass(d['action']):
str_read = dictfileparam[d['name']]
configOut[d['name']] = eval_string_as_list_of_lists(str_read, ':', ',', dtype)
elif d['default'] != argparse.SUPPRESS:
# default value on benchmark definition cannot overwrite config file
self.parser.add_argument('--' + d['name'],
type=d['type'],
default=configOut[d['name']],
help=d['help'])
return configOut
def read_config_file(self, file):
"""Functionality to read the configue file
specific for each benchmark.
"""
config = configparser.ConfigParser()
config.read(file)
section = config.sections()
fileParams = {}
# parse specified arguments (minimal validation: if arguments
# are written several times in the file, just the first time
# will be used)
for sec in section:
for k, v in config.items(sec):
# if not k in fileParams:
if k not in fileParams:
fileParams[k] = eval(v)
fileParams = self.format_benchmark_config_arguments(fileParams)
print(fileParams)
return fileParams
def set_locals(self):
""" Functionality to set variables specific for the benchmark
- required: set of required parameters for the benchmark.
- additional_definitions: list of dictionaries describing \
the additional parameters for the benchmark.
"""
pass
def check_required_exists(self, gparam):
"""Functionality to verify that the required
model parameters have been specified.
"""
key_set = set(gparam.keys())
intersect_set = key_set.intersection(self.required)
diff_set = self.required.difference(intersect_set)
if (len(diff_set) > 0):
raise Exception(
'ERROR ! Required parameters are not specified. These required parameters have not been initialized: ' + str(sorted(diff_set)) + '... Exiting')
|
the-stack_0_23170 | import numpy as np
import cv2 as cv
img1 = cv.imread('2.png')
img2 = cv.imread('3.png')
add = img1 + img2
#this is another method which adds all the pixel values by that image produce somuch of white color so by that we cant even see the image.
####### add = cv.add(img1,img2) #########
# addWighted helps to add two images by its wights with its ratios, 1.image to add , 2.wight, 3.image to add, 4.wight of secound image, 5.alpha channel.
weight = cv.addWeighted(img1, 0.4, img2, 0.6, 0)
cv.imshow('add',add)
cv.imshow('weight', weight)
cv.waitKey(0)
cv.destroyAllWindows() |
the-stack_0_23173 | import platform
import collections
from openpype.modules import OpenPypeModule
from openpype_interfaces import (
ITrayService,
IIdleManager
)
class IdleManager(OpenPypeModule, ITrayService):
""" Measure user's idle time in seconds.
Idle time resets on keyboard/mouse input.
Is able to emit signals at specific time idle.
"""
label = "Idle Service"
name = "idle_manager"
def initialize(self, module_settings):
enabled = True
# Ignore on MacOs
# - pynput need root permissions and enabled access for application
if platform.system().lower() == "darwin":
enabled = False
self.enabled = enabled
self.time_callbacks = collections.defaultdict(list)
self.idle_thread = None
def tray_init(self):
return
def tray_start(self):
if self.time_callbacks:
self.start_thread()
def tray_exit(self):
self.stop_thread()
try:
self.time_callbacks = {}
except Exception:
pass
def connect_with_modules(self, enabled_modules):
for module in enabled_modules:
if not isinstance(module, IIdleManager):
continue
module.idle_manager = self
callbacks_items = module.callbacks_by_idle_time() or {}
for emit_time, callbacks in callbacks_items.items():
if not isinstance(callbacks, (tuple, list, set)):
callbacks = [callbacks]
self.time_callbacks[emit_time].extend(callbacks)
@property
def idle_time(self):
if self.idle_thread and self.idle_thread.is_running:
return self.idle_thread.idle_time
def _create_thread(self):
from .idle_threads import IdleManagerThread
return IdleManagerThread(self)
def start_thread(self):
if self.idle_thread:
self.idle_thread.stop()
self.idle_thread.join()
self.idle_thread = self._create_thread()
self.idle_thread.start()
def stop_thread(self):
if self.idle_thread:
self.idle_thread.stop()
self.idle_thread.join()
def on_thread_stop(self):
self.set_service_failed_icon()
|
the-stack_0_23176 | #!/usr/bin/env python
#
# $Id$
#
# Copyright (c) 2009, Jay Loden, Giampaolo Rodola'. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
"""
A clone of 'free' cmdline utility.
"""
import psutil
from psutil._compat import print_
def main():
virt = psutil.virtual_memory()
swap = psutil.swap_memory()
templ = "%-7s %10s %10s %10s %10s %10s %10s"
print_(templ % ('', 'total', 'used', 'free', 'shared', 'buffers', 'cache'))
print_(templ % ('Mem:', int(virt.total / 1024),
int(virt.used / 1024),
int(virt.free / 1024),
int(getattr(virt, 'shared', 0) / 1024),
int(getattr(virt, 'buffers', 0) / 1024),
int(getattr(virt, 'cached', 0) / 1024)))
print_(templ % ('Swap:', int(swap.total / 1024),
int(swap.used / 1024),
int(swap.free / 1024),
'', '', ''))
if __name__ == '__main__':
main()
|
the-stack_0_23178 | from pymongo import MongoClient
# Get database and collection
client = MongoClient()
db = client.test
all_anime = db.anime_info
# Checks if the anime exists in the database
def in_db(title):
if all_anime.find_one({"title": title}) is not None:
return True
return False
# Add anime to database
def add(title, url, alt_titles=[], episodes=[], resolution=[]):
all_anime.insert_one({
"title": title,
"url": url,
"alt_titles": alt_titles,
"episodes": episodes,
"resolution": resolution
})
# Update the anime list
def update_anime(anime_list):
for anime in anime_list:
title = anime.string.strip()
url = "https://kissanime.to" + anime["href"]
if not in_db(title):
add(title, url)
# Get all anime
def get_all():
return all_anime.find()
|
the-stack_0_23179 | # Copyright 2017 The Armada Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import difflib
import time
import yaml
from oslo_config import cfg
from oslo_log import log as logging
from armada.handlers.chartbuilder import ChartBuilder
from armada.handlers.manifest import Manifest
from armada.handlers.override import Override
from armada.handlers.tiller import Tiller
from armada.exceptions.armada_exceptions import ArmadaTimeoutException
from armada.exceptions import source_exceptions
from armada.exceptions import validate_exceptions
from armada.exceptions import tiller_exceptions
from armada.utils.release import release_prefix
from armada.utils import source
from armada.utils import validate
from armada.const import DEFAULT_CHART_TIMEOUT
from armada.const import KEYWORD_ARMADA
from armada.const import KEYWORD_CHARTS
from armada.const import KEYWORD_GROUPS
from armada.const import KEYWORD_PREFIX
from armada.const import STATUS_FAILED
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
class Armada(object):
'''
This is the main Armada class handling the Armada
workflows
'''
def __init__(self,
documents,
disable_update_pre=False,
disable_update_post=False,
enable_chart_cleanup=False,
dry_run=False,
set_ovr=None,
force_wait=False,
timeout=0,
tiller_host=None,
tiller_port=None,
tiller_namespace=None,
values=None,
target_manifest=None,
k8s_wait_attempts=1,
k8s_wait_attempt_sleep=1):
'''
Initialize the Armada engine and establish a connection to Tiller.
:param List[dict] documents: Armada documents.
:param bool disable_update_pre: Disable pre-update Tiller operations.
:param bool disable_update_post: Disable post-update Tiller
operations.
:param bool enable_chart_cleanup: Clean up unmanaged charts.
:param bool dry_run: Run charts without installing them.
:param bool force_wait: Force Tiller to wait until all charts are
deployed, rather than using each chart's specified wait policy.
:param int timeout: Specifies overall time in seconds that Tiller
should wait for charts until timing out.
:param str tiller_host: Tiller host IP. Default is None.
:param int tiller_port: Tiller host port. Default is
``CONF.tiller_port``.
:param str tiller_namespace: Tiller host namespace. Default is
``CONF.tiller_namespace``.
:param str target_manifest: The target manifest to run. Useful for
specifying which manifest to run when multiple are available.
:param int k8s_wait_attempts: The number of times to attempt waiting
for pods to become ready.
:param int k8s_wait_attempt_sleep: The time in seconds to sleep
between attempts.
'''
tiller_port = tiller_port or CONF.tiller_port
tiller_namespace = tiller_namespace or CONF.tiller_namespace
self.disable_update_pre = disable_update_pre
self.disable_update_post = disable_update_post
self.enable_chart_cleanup = enable_chart_cleanup
self.dry_run = dry_run
self.force_wait = force_wait
self.timeout = timeout
self.tiller = Tiller(
tiller_host=tiller_host, tiller_port=tiller_port,
tiller_namespace=tiller_namespace)
self.documents = Override(
documents, overrides=set_ovr,
values=values).update_manifests()
self.k8s_wait_attempts = k8s_wait_attempts
self.k8s_wait_attempt_sleep = k8s_wait_attempt_sleep
self.manifest = Manifest(
self.documents,
target_manifest=target_manifest).get_manifest()
def find_release_chart(self, known_releases, name):
'''
Find a release given a list of known_releases and a release name
'''
for chart_name, _, chart, values, _ in known_releases:
if chart_name == name:
return chart, values
def pre_flight_ops(self):
"""Perform a series of checks and operations to ensure proper
deployment.
"""
LOG.info("Performing pre-flight operations.")
# Ensure Tiller is available and manifest is valid
if not self.tiller.tiller_status():
raise tiller_exceptions.TillerServicesUnavailableException()
valid, details = validate.validate_armada_documents(self.documents)
if details:
for msg in details:
if msg.get('error', False):
LOG.error(msg.get('message', 'Unknown validation error.'))
else:
LOG.debug(msg.get('message', 'Validation succeeded.'))
if not valid:
raise validate_exceptions.InvalidManifestException(
error_messages=details)
result, msg_list = validate.validate_armada_manifests(self.documents)
if not result:
raise validate_exceptions.InvalidArmadaObjectException(
details=','.join([m.get('message') for m in msg_list]))
# Purge known releases that have failed and are in the current yaml
manifest_data = self.manifest.get(KEYWORD_ARMADA, {})
prefix = manifest_data.get(KEYWORD_PREFIX, '')
failed_releases = self.get_releases_by_status(STATUS_FAILED)
for release in failed_releases:
for group in manifest_data.get(KEYWORD_GROUPS, []):
for ch in group.get(KEYWORD_CHARTS, []):
ch_release_name = release_prefix(
prefix, ch.get('chart', {}).get('chart_name'))
if release[0] == ch_release_name:
LOG.info('Purging failed release %s '
'before deployment', release[0])
self.tiller.uninstall_release(release[0])
# Clone the chart sources
#
# We only support a git source type right now, which can also
# handle git:// local paths as well
repos = {}
for group in manifest_data.get(KEYWORD_GROUPS, []):
for ch in group.get(KEYWORD_CHARTS, []):
self.tag_cloned_repo(ch, repos)
for dep in ch.get('chart', {}).get('dependencies', []):
self.tag_cloned_repo(dep, repos)
def tag_cloned_repo(self, ch, repos):
chart = ch.get('chart', {})
chart_source = chart.get('source', {})
location = chart_source.get('location')
ct_type = chart_source.get('type')
subpath = chart_source.get('subpath', '.')
if ct_type == 'local':
chart['source_dir'] = (location, subpath)
elif ct_type == 'tar':
LOG.info('Downloading tarball from: %s', location)
if not CONF.certs:
LOG.warn(
'Disabling server validation certs to extract charts')
tarball_dir = source.get_tarball(location, verify=False)
else:
tarball_dir = source.get_tarball(location, verify=CONF.cert)
chart['source_dir'] = (tarball_dir, subpath)
elif ct_type == 'git':
reference = chart_source.get('reference', 'master')
repo_branch = (location, reference)
if repo_branch not in repos:
auth_method = chart_source.get('auth_method')
proxy_server = chart_source.get('proxy_server')
logstr = 'Cloning repo: {} from branch: {}'.format(
*repo_branch)
if proxy_server:
logstr += ' proxy: {}'.format(proxy_server)
if auth_method:
logstr += ' auth method: {}'.format(auth_method)
LOG.info(logstr)
repo_dir = source.git_clone(*repo_branch,
proxy_server=proxy_server,
auth_method=auth_method)
repos[repo_branch] = repo_dir
chart['source_dir'] = (repo_dir, subpath)
else:
chart['source_dir'] = (repos.get(repo_branch), subpath)
else:
chart_name = chart.get('chart_name')
raise source_exceptions.ChartSourceException(ct_type, chart_name)
def get_releases_by_status(self, status):
'''
:params status - status string to filter releases on
Return a list of current releases with a specified status
'''
filtered_releases = []
known_releases = self.tiller.list_charts()
for release in known_releases:
if release[4] == status:
filtered_releases.append(release)
return filtered_releases
def sync(self):
'''
Synchronize Helm with the Armada Config(s)
'''
msg = {'install': [], 'upgrade': [], 'diff': []}
# TODO: (gardlt) we need to break up this func into
# a more cleaner format
self.pre_flight_ops()
# extract known charts on tiller right now
known_releases = self.tiller.list_charts()
manifest_data = self.manifest.get(KEYWORD_ARMADA, {})
prefix = manifest_data.get(KEYWORD_PREFIX, '')
for chartgroup in manifest_data.get(KEYWORD_GROUPS, []):
cg_name = chartgroup.get('name', '<missing name>')
cg_desc = chartgroup.get('description', '<missing description>')
LOG.info('Processing ChartGroup: %s (%s)', cg_name, cg_desc)
cg_sequenced = chartgroup.get('sequenced', False)
cg_test_all_charts = chartgroup.get('test_charts', False)
namespaces_seen = set()
tests_to_run = []
cg_charts = chartgroup.get(KEYWORD_CHARTS, [])
# Track largest Chart timeout to stop the ChartGroup at the end
cg_max_timeout = 0
for chart_entry in cg_charts:
chart = chart_entry.get('chart', {})
namespace = chart.get('namespace')
release = chart.get('release')
values = chart.get('values', {})
pre_actions = {}
post_actions = {}
wait_timeout = self.timeout
wait_labels = {}
release_name = release_prefix(prefix, release)
# Retrieve appropriate timeout value
if wait_timeout <= 0:
# TODO(MarshM): chart's `data.timeout` should be deprecated
chart_timeout = chart.get('timeout', 0)
# Favor data.wait.timeout over data.timeout, until removed
wait_values = chart.get('wait', {})
wait_timeout = wait_values.get('timeout', chart_timeout)
wait_labels = wait_values.get('labels', {})
this_chart_should_wait = (
cg_sequenced or self.force_wait or
wait_timeout > 0 or len(wait_labels) > 0)
if this_chart_should_wait and wait_timeout <= 0:
LOG.warn('No Chart timeout specified, using default: %ss',
DEFAULT_CHART_TIMEOUT)
wait_timeout = DEFAULT_CHART_TIMEOUT
# Track namespaces + labels touched
namespaces_seen.add((namespace, tuple(wait_labels.items())))
# Naively take largest timeout to apply at end
# TODO(MarshM) better handling of timeout/timer
cg_max_timeout = max(wait_timeout, cg_max_timeout)
# Chart test policy can override ChartGroup, if specified
test_this_chart = chart.get('test', cg_test_all_charts)
chartbuilder = ChartBuilder(chart)
protoc_chart = chartbuilder.get_helm_chart()
deployed_releases = [x[0] for x in known_releases]
# Begin Chart timeout deadline
deadline = time.time() + wait_timeout
# TODO(mark-burnett): It may be more robust to directly call
# tiller status to decide whether to install/upgrade rather
# than checking for list membership.
if release_name in deployed_releases:
# indicate to the end user what path we are taking
LOG.info("Upgrading release %s in namespace %s",
release_name, namespace)
# extract the installed chart and installed values from the
# latest release so we can compare to the intended state
apply_chart, apply_values = self.find_release_chart(
known_releases, release_name)
upgrade = chart.get('upgrade', {})
disable_hooks = upgrade.get('no_hooks', False)
LOG.info("Checking Pre/Post Actions")
if upgrade:
upgrade_pre = upgrade.get('pre', {})
upgrade_post = upgrade.get('post', {})
if not self.disable_update_pre and upgrade_pre:
pre_actions = upgrade_pre
if not self.disable_update_post and upgrade_post:
post_actions = upgrade_post
# Show delta for both the chart templates and the chart
# values
# TODO(alanmeadows) account for .files differences
# once we support those
LOG.info('Checking upgrade chart diffs.')
upgrade_diff = self.show_diff(
chart, apply_chart, apply_values,
chartbuilder.dump(), values, msg)
if not upgrade_diff:
LOG.info("There are no updates found in this chart")
continue
# TODO(MarshM): Add tiller dry-run before upgrade and
# consider deadline impacts
# do actual update
timer = int(round(deadline - time.time()))
LOG.info('Beginning Upgrade, wait=%s, timeout=%ss',
this_chart_should_wait, timer)
tiller_result = self.tiller.update_release(
protoc_chart,
release_name,
namespace,
pre_actions=pre_actions,
post_actions=post_actions,
dry_run=self.dry_run,
disable_hooks=disable_hooks,
values=yaml.safe_dump(values),
wait=this_chart_should_wait,
timeout=timer)
if this_chart_should_wait:
self.tiller.k8s.wait_until_ready(
release=release_name,
labels=wait_labels,
namespace=namespace,
k8s_wait_attempts=self.k8s_wait_attempts,
k8s_wait_attempt_sleep=self.k8s_wait_attempt_sleep,
timeout=timer
)
LOG.info('Upgrade completed with results from Tiller: %s',
tiller_result.__dict__)
msg['upgrade'].append(release_name)
# process install
else:
LOG.info("Installing release %s in namespace %s",
release_name, namespace)
timer = int(round(deadline - time.time()))
LOG.info('Beginning Install, wait=%s, timeout=%ss',
this_chart_should_wait, timer)
tiller_result = self.tiller.install_release(
protoc_chart,
release_name,
namespace,
dry_run=self.dry_run,
values=yaml.safe_dump(values),
wait=this_chart_should_wait,
timeout=timer)
if this_chart_should_wait:
self.tiller.k8s.wait_until_ready(
release=release_name,
labels=wait_labels,
namespace=namespace,
k8s_wait_attempts=self.k8s_wait_attempts,
k8s_wait_attempt_sleep=self.k8s_wait_attempt_sleep,
timeout=timer
)
LOG.info('Install completed with results from Tiller: %s',
tiller_result.__dict__)
msg['install'].append(release_name)
# Sequenced ChartGroup should run tests after each Chart
timer = int(round(deadline - time.time()))
if test_this_chart and cg_sequenced:
LOG.info('Running sequenced test, timeout remaining: %ss.',
timer)
if timer <= 0:
reason = ('Timeout expired before testing sequenced '
'release %s' % release_name)
LOG.error(reason)
raise ArmadaTimeoutException(reason)
self._test_chart(release_name, timer)
# Un-sequenced ChartGroup should run tests at the end
elif test_this_chart:
# Keeping track of time remaining
tests_to_run.append((release_name, timer))
# End of Charts in ChartGroup
LOG.info('All Charts applied.')
# After all Charts are applied, we should wait for the entire
# ChartGroup to become healthy by looking at the namespaces seen
# TODO(MarshM): Need to restrict to only releases we processed
# TODO(MarshM): Need to determine a better timeout
# (not cg_max_timeout)
if cg_max_timeout <= 0:
cg_max_timeout = DEFAULT_CHART_TIMEOUT
deadline = time.time() + cg_max_timeout
for (ns, labels) in namespaces_seen:
labels_dict = dict(labels)
timer = int(round(deadline - time.time()))
LOG.info('Final wait for healthy namespace (%s), label=(%s), '
'timeout remaining: %ss.', ns, labels_dict, timer)
if timer <= 0:
reason = ('Timeout expired waiting on namespace: %s, '
'label: %s' % (ns, labels_dict))
LOG.error(reason)
raise ArmadaTimeoutException(reason)
self.tiller.k8s.wait_until_ready(
namespace=ns,
labels=labels_dict,
k8s_wait_attempts=self.k8s_wait_attempts,
k8s_wait_attempt_sleep=self.k8s_wait_attempt_sleep,
timeout=timer)
# After entire ChartGroup is healthy, run any pending tests
for (test, test_timer) in tests_to_run:
self._test_chart(test, test_timer)
LOG.info("Performing Post-Flight Operations")
self.post_flight_ops()
if self.enable_chart_cleanup:
self.tiller.chart_cleanup(
prefix,
self.manifest[KEYWORD_ARMADA][KEYWORD_GROUPS])
return msg
def post_flight_ops(self):
'''
Operations to run after deployment process has terminated
'''
# Delete temp dirs used for deployment
for group in self.manifest.get(KEYWORD_ARMADA, {}).get(
KEYWORD_GROUPS, []):
for ch in group.get(KEYWORD_CHARTS, []):
chart = ch.get('chart', {})
if chart.get('source', {}).get('type') == 'git':
source_dir = chart.get('source_dir')
if isinstance(source_dir, tuple) and source_dir:
source.source_cleanup(source_dir[0])
def _test_chart(self, release_name, timeout):
# TODO(MarshM): Fix testing, it's broken, and track timeout
resp = self.tiller.testing_release(release_name, timeout=timeout)
status = getattr(resp.info.status, 'last_test_suite_run', 'FAILED')
LOG.info("Test INFO: %s", status)
if resp:
LOG.info("PASSED: %s", release_name)
return True
else:
LOG.info("FAILED: %s", release_name)
return False
def show_diff(self, chart, installed_chart, installed_values, target_chart,
target_values, msg):
'''Produce a unified diff of the installed chart vs our intention'''
# TODO(MarshM) This gives decent output comparing values. Would be
# nice to clean it up further. Are \\n or \n\n ever valid diffs?
# Can these be cleanly converted to dicts, for easier compare?
def _sanitize_diff_str(str):
return str.replace('\\n', '\n').replace('\n\n', '\n').split('\n')
source = _sanitize_diff_str(str(installed_chart.SerializeToString()))
target = _sanitize_diff_str(str(target_chart))
chart_diff = list(difflib.unified_diff(source, target, n=0))
chart_release = chart.get('release', None)
if len(chart_diff) > 0:
LOG.info("Found diff in Chart (%s)", chart_release)
diff_msg = []
for line in chart_diff:
diff_msg.append(line)
msg['diff'].append({'chart': diff_msg})
pretty_diff = '\n'.join(diff_msg)
LOG.debug(pretty_diff)
source = _sanitize_diff_str(installed_values)
target = _sanitize_diff_str(yaml.safe_dump(target_values))
values_diff = list(difflib.unified_diff(source, target, n=0))
if len(values_diff) > 0:
LOG.info("Found diff in values (%s)", chart_release)
diff_msg = []
for line in values_diff:
diff_msg.append(line)
msg['diff'].append({'values': diff_msg})
pretty_diff = '\n'.join(diff_msg)
LOG.debug(pretty_diff)
result = (len(chart_diff) > 0) or (len(values_diff) > 0)
return result
|
the-stack_0_23181 | # coding=UTF-8
import re
from numpy import dot
from numpy.linalg import norm
import numpy as np
# 텍스트 클렌징
def cleanText(text):
text = text.replace(u'\xa0', u' ')
text = re.sub('([a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+)', repl=' ', string=text)
text = re.sub('(http|ftp|https)://(?:[-\w.]|(?:%[\da-fA-F]{2}))+', repl=' ', string=text)
text = re.sub('([ㄱ-ㅎㅏ-ㅣ]+)', repl=' ', string=text)
text = re.sub('<[^>]*>', repl=' ', string=text)
text = re.sub('[^-/.&*+%$\w\s]', repl=' ', string=text)
text = re.sub('([가-힣].[가-힣]*)\.', repl=r'\1. ', string=text)
text = re.sub('(^[ \t]+|[ \t]+(?=:))', '', text, flags=re.M)
text = text.replace('\n', ' ')
text = text.replace('\t', ' ')
text = text.replace('\r', ' ')
text = re.sub(' +', ' ', text)
text = text.replace('()', ' ')
text = text.upper()
return text
# 단어가 포함된 어절의 위치 인덱스 출력
def containIndex(eojeollist, word):
return [i for i in range(len(eojeollist)) if word in eojeollist[i]]
# 인덱스 주위 일정 거리 내 모든 인덱스 출력
def aroundIndex(indexlist, maxindex, distance=2):
out = []
for i in indexlist:
temp = list(range(i-distance,i+1+distance))
for j in temp:
# if j >=0 and j < maxindex and j not in indexlist: out.append(j) # 자신이 포함된 어절의 인덱스 포함 안함
if j >=0 and j < maxindex: out.append(j)
out = list(dict.fromkeys(out))
out.sort()
return out
# 명사를 임베딩 벡터로 변환
def wordEmbedding(eojeollist, nounlist, word, distance=2):
out = []
wordindices = containIndex(eojeollist, word)
aroundindices = aroundIndex(wordindices, len(eojeollist), distance)
stringaround = ' '.join([eojeollist[i] for i in aroundindices])
for i in nounlist:
if i in stringaround: out.append(1)
else: out.append(0)
return(out)
# 두 벡터간의 유사도
def cosineSimilarity(A, B):
return dot(A, B)/(norm(A)*norm(B))
text = r"11월 입찰 예정서울시 구로구 구로동에 위치한 `센터포인트 웨스트(구 서부금융센터)` 마스턴투자운용은 서울시 구로구 구로동 '센터포인트 웨스트(옛 서부금융센터)' 매각에 속도를 낸다.27일 관련업계에 따르면 마스턴투자운용은 지난달 삼정KPMG·폴스트먼앤코 아시아 컨소시엄을 매각 주관사로 선정한 후 현재 잠재 매수자에게 투자설명서(IM)를 배포하고 있는 단계다. 입찰은 11월 중순 예정이다.2007년 12월 준공된 '센터포인트 웨스트'는 지하 7층~지상 40층, 연면적 9만5000여㎡(약 2만8000평) 규모의 프라임급 오피스다. 판매동(테크노마트)과 사무동으로 이뤄졌다. 마스턴투자운용의 소유분은 사무동 지하 1층부터 지상 40층이다. 지하 1층과 지상 10층은 판매시설이고 나머지는 업무시설이다. 주요 임차인으로는 삼성카드, 우리카드, 삼성화재, 교보생명, 한화생명 등이 있다. 임차인의 대부분이 신용도가 높은 대기업 계열사 혹은 우량한 금융 및 보험사 등이다.'센터포인트 웨스트'는 서울 서남부 신도림 권역 내 최고층 빌딩으로 초광역 교통 연결성을 보유한 오피스 입지를 갖췄다고 평가받는다. 최근 신도림·영등포 권역은 타임스퀘어, 영시티, 디큐브시티 등 프라임급 오피스들과 함께 형성된 신흥 업무 권역으로 주목받고 있다고 회사 측은 설명했다.마스턴투자운용 측은 2021년 1분기를 클로징 예상 시점으로 잡고 있다 며 신도림 권역의 랜드마크로서 임대 수요가 꾸준해 안정적인 배당이 가능한 투자상품이 될 것 이라고 설명했다.한편 마스턴투자운용은 지난 2017년 말 신한BNP파리바자산운용으로부터 당시 '서부금융센터'를 약 3200억원에 사들였으며 이후 '센터포인트 웨스트'로 이름을 바꿨다.[김규리 기자 [email protected]]"
eoList = cleanText(text).split(' ')
nouns = ['예정', '구로', '센터포인트', '웨스트', '서부금융센터', '마스턴투자운용', '서울시', '투자', '프라임급', '오피스', '판매', '사무동', '임차인', '삼성', '신도림', '권역']
# 추출된 명사 당 비슷한 명사 순서대로 출력
listofwordvectors = [wordEmbedding(eoList, nouns, i) for i in nouns]
for i in range(len(listofwordvectors)):
s = [cosineSimilarity(listofwordvectors[i], j) for j in listofwordvectors]
out = sorted(range(len(s)), key=lambda k: s[k], reverse=True)
similarityorder = [nouns[j] for j in out]
print(nouns[i], similarityorder)
|
the-stack_0_23184 | """
Version: 1.0
Last modified on: 17 November, 2014
Developers: Eduardo Nobre Luis, Fabricio Olivetti de Franca.
email: eduardo_(DOT)_luis_(AT)_aluno_(DOT)_ufabc_(DOT)_edu_(DOT)_br
: folivetti_(AT)_ufabc_(DOT)_edu_(DOT)_br
Based on source-code by Michael G. Epitropakis and Xiaodong Li
available at http://goanna.cs.rmit.edu.au/~xiaodong/cec15-niching/competition/
"""
# Vincent
#Variable range: x_i in [0.25, 10]^n, i=1,2,...,n
#No. of global optima: 6^n
#No. of local optima: 0
import numpy as np
def f7(x = None):
if x is None:
f7.lb = 0.25*np.ones(100)
f7.ub = 10*np.ones(100)
f7.nopt = 216
f7.fgoptima = 1.0
return None
D = x.shape[0]
return (np.sin(10.0*np.log10(x))).sum()/float(D)
|
the-stack_0_23185 | """
Basics of reporting capabilities
Copyright 2015 BlazeMeter Inc.
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import copy
import csv
import os
import time
from collections import Counter, OrderedDict
from datetime import datetime
from bzt import TaurusInternalException, TaurusConfigError
from bzt.engine import Reporter
from bzt.modules.aggregator import DataPoint, KPISet, AggregatorListener, ResultsProvider
from bzt.modules.blazemeter import BlazeMeterUploader, CloudProvisioning
from bzt.modules.functional import FunctionalAggregator, FunctionalAggregatorListener
from bzt.modules.passfail import PassFailStatus
from bzt.six import etree, iteritems, string_types
from bzt.utils import get_full_path
from terminaltables import AsciiTable
from textwrap import wrap
class FinalStatus(Reporter, AggregatorListener, FunctionalAggregatorListener):
"""
A reporter that prints short statistics on test end
"""
def __init__(self):
super(FinalStatus, self).__init__()
self.last_sec = None
self.cumulative_results = None
self.start_time = time.time() # default value
self.end_time = time.time()
self.first_ts = float("inf")
self.last_ts = 0
def startup(self):
self.start_time = time.time()
def prepare(self):
super(FinalStatus, self).prepare()
if isinstance(self.engine.aggregator, ResultsProvider):
self.engine.aggregator.add_listener(self)
elif isinstance(self.engine.aggregator, FunctionalAggregator):
self.engine.aggregator.add_listener(self)
def aggregated_second(self, data):
"""
Just store the latest info
:type data: bzt.modules.aggregator.DataPoint
"""
self.first_ts = min(self.first_ts, data[DataPoint.TIMESTAMP])
self.last_ts = max(self.last_ts, data[DataPoint.TIMESTAMP])
self.last_sec = data
def aggregated_results(self, results, cumulative_results):
"""
Just store the latest info
:type cumulative_results: bzt.modules.functional.ResultsTree
:type results: bzt.modules.functional.ResultsTree
"""
self.cumulative_results = cumulative_results
def shutdown(self):
self.end_time = time.time()
def post_process(self):
"""
Log basic stats
"""
super(FinalStatus, self).post_process()
if self.parameters.get("test-duration", True):
self.__report_duration()
if self.last_sec:
summary_kpi = self.last_sec[DataPoint.CUMULATIVE][""]
if self.parameters.get("summary", True):
self.__report_samples_count(summary_kpi)
if self.parameters.get("percentiles", True):
self.__report_percentiles(summary_kpi)
if self.parameters.get("failed-labels", False):
self.__report_failed_labels(self.last_sec[DataPoint.CUMULATIVE])
if self.parameters.get("summary-labels", False):
self.__report_summary_labels(self.last_sec[DataPoint.CUMULATIVE])
if self.parameters.get("dump-xml", None):
self.__dump_xml(self.parameters.get("dump-xml"))
if self.parameters.get("dump-csv", None):
self.__dump_csv(self.parameters.get("dump-csv"))
elif self.cumulative_results:
self.__report_summary()
report_mode = self.parameters.get("report-tests", "failed")
if report_mode == "failed":
self.__report_failed_tests()
else:
self.__report_all_tests()
def __plural(self, count, noun):
return noun + 's' if count > 1 else noun
def __report_all_tests(self):
for test_suite in self.cumulative_results.test_suites():
for case in self.cumulative_results.test_cases(test_suite):
full_name = case.test_suite + "." + case.test_case
self.log.info("Test %s - %s", full_name, case.status)
print_trace = self.parameters.get("print-stacktrace", True)
if print_trace and case.error_trace:
self.log.info("Stacktrace:\n%s", case.error_trace)
def __report_failed_tests(self):
for test_suite in self.cumulative_results.test_suites():
for case in self.cumulative_results.test_cases(test_suite):
if case.status in ("FAILED", "BROKEN"):
full_name = case.test_suite + "." + case.test_case
msg = "Test {test_case} failed: {error_msg}".format(test_case=full_name, error_msg=case.error_msg)
if case.error_trace:
msg += "\n" + case.error_trace
self.log.warning(msg)
def __report_summary(self):
status_counter = Counter()
for test_suite in self.cumulative_results.test_suites():
for case in self.cumulative_results.test_cases(test_suite):
status_counter[case.status] += 1
total = sum(count for _, count in iteritems(status_counter))
self.log.info("Total: %s %s", total, self.__plural(total, 'test')) # FIXME: it's actually not tests, but test cases
def __report_samples_count(self, summary_kpi_set):
"""
reports samples count
"""
if summary_kpi_set[KPISet.SAMPLE_COUNT]:
err_rate = 100 * summary_kpi_set[KPISet.FAILURES] / float(summary_kpi_set[KPISet.SAMPLE_COUNT])
self.log.info("Samples count: %s, %.2f%% failures", summary_kpi_set[KPISet.SAMPLE_COUNT], err_rate)
def __report_percentiles(self, summary_kpi_set):
"""
reports percentiles
"""
fmt = "Average times: total %.3f, latency %.3f, connect %.3f"
self.log.info(fmt, summary_kpi_set[KPISet.AVG_RESP_TIME], summary_kpi_set[KPISet.AVG_LATENCY],
summary_kpi_set[KPISet.AVG_CONN_TIME])
for key in sorted(summary_kpi_set[KPISet.PERCENTILES].keys(), key=float):
self.log.info("Percentile %.1f%%: %.3f", float(key), summary_kpi_set[KPISet.PERCENTILES][key])
def __report_failed_labels(self, cumulative):
"""
reports failed labels
"""
report_template = "%d failed samples: %s"
sorted_labels = sorted(cumulative.keys())
for sample_label in sorted_labels:
if sample_label != "":
failed_samples_count = cumulative[sample_label]['fail']
if failed_samples_count:
self.log.info(report_template, failed_samples_count, sample_label)
def __get_table(self, header, data, title=""):
table_headers = header["headers"]
table_headers_desc = header["descriptions"]
table_data = []
table_header = []
header_index = 0
justify_columns = {}
for header in table_headers:
table_header.append(table_headers_desc[header].split(":")[0])
justify_columns[header_index] = table_headers_desc[header].split(":")[1]
header_index += 1
table_data.append(table_header)
for element in data:
table_item = []
for header in table_headers:
table_item.append(element[header])
table_data.append(table_item)
table_instance = AsciiTable(table_data, title)
table_instance.justify_columns = justify_columns
return table_instance.table.splitlines()
def __report_summary_labels(self, cumulative):
"""
reports failed labels
"""
header = {
"headers": ["scenario", "label", "status", "succ", "avg_rt", "error"],
"descriptions": {"scenario": "scenario:left", "label": "label:left",
"status": "status:center", "succ": "success:center",
"avg_rt": "avg time:center", "error": "error:left"
}
}
elements = []
sorted_labels = sorted(cumulative.keys())
last_scenario_name = None
for sample_label in sorted_labels:
if sample_label != "":
label_splited = sample_label.split(":")
if len(label_splited) > 2:
scenario_name = label_splited[0]
label_name = label_splited[2]
else:
scenario_name = ""
label_name = sample_label
# When change scenario add empty line
if last_scenario_name and last_scenario_name != scenario_name:
item = {"scenario": "", "label": "", "status": "",
"succ": "" , "error": "",
"avg_rt": ""}
elements.append(item)
failed_samples_count = cumulative[sample_label]['fail']
success_samples_count = cumulative[sample_label]['succ']
success_samples = (success_samples_count * 100) / (failed_samples_count + success_samples_count)
avg_rt_samples_value = cumulative[sample_label]['avg_rt']
result_status = "OK"
if failed_samples_count > 0:
result_status = "FAIL"
item = {"scenario": scenario_name, "label": label_name, "status":result_status,
"succ": str(success_samples) + "%", "error": "",
"avg_rt": str(avg_rt_samples_value)}
# self.log.info(cumulative[sample_label])
errors = []
max_width = 60
for err_desc in cumulative[sample_label]['errors']:
errors.append('\n'.join(wrap(err_desc["msg"], max_width)))
item["error"] = "\n".join(errors)
elements.append(item)
last_scenario_name = scenario_name
for line in self.__get_table(header, elements):
self.log.info(line)
def __report_duration(self):
"""
asks executors start_time and end_time, provides time delta
"""
date_start = datetime.fromtimestamp(int(self.start_time))
date_end = datetime.fromtimestamp(int(self.end_time))
self.log.info("Test duration: %s", date_end - date_start)
def __dump_xml(self, filename):
self.log.info("Dumping final status as XML: %s", filename)
root = etree.Element("FinalStatus")
if self.first_ts < float("inf") and self.last_ts > 0:
duration_elem = etree.Element("TestDuration")
duration_elem.text = str(round(float(self.last_ts - self.first_ts), 3))
root.append(duration_elem)
report_info = get_bza_report_info(self.engine, self.log)
if report_info:
link, _ = report_info[0]
report_element = etree.Element("ReportURL")
report_element.text = link
root.append(report_element)
if self.last_sec:
for label, kpiset in iteritems(self.last_sec[DataPoint.CUMULATIVE]):
root.append(self.__get_xml_summary(label, kpiset))
with open(get_full_path(filename), 'wb') as fhd:
tree = etree.ElementTree(root)
tree.write(fhd, pretty_print=True, encoding="UTF-8", xml_declaration=True)
def __get_xml_summary(self, label, kpiset):
elem = etree.Element("Group", label=label)
for kpi_name, kpi_val in iteritems(kpiset):
if kpi_name in ('errors', 'rt'):
continue
if isinstance(kpi_val, dict):
for param_name, param_val in iteritems(kpi_val):
elem.append(self.__get_kpi_xml(kpi_name, param_val, param_name))
else:
elem.append(self.__get_kpi_xml(kpi_name, kpi_val))
return elem
def __get_kpi_xml(self, kpi_name, kpi_val, param=None):
kpi = etree.Element(kpi_name)
kpi.attrib['value'] = self.__val_to_str(kpi_val)
elm_name = etree.Element("name")
elm_name.text = kpi_name
if param is not None:
kpi.attrib['param'] = self.__val_to_str(param)
elm_name.text += "/" + param
kpi.append(elm_name)
elm_value = etree.Element("value")
elm_value.text = self.__val_to_str(kpi_val)
kpi.append(elm_value)
return kpi
def __val_to_str(self, kpi_val):
if isinstance(kpi_val, float):
return '%.5f' % kpi_val
elif isinstance(kpi_val, int):
return '%d' % kpi_val
elif isinstance(kpi_val, string_types):
return kpi_val
else:
raise TaurusInternalException("Unhandled kpi type: %s" % type(kpi_val))
def __dump_csv(self, filename):
self.log.info("Dumping final status as CSV: %s", filename)
# FIXME: what if there's no last_sec
with open(get_full_path(filename), 'wt') as fhd:
fieldnames = self.__get_csv_dict('', self.last_sec[DataPoint.CUMULATIVE]['']).keys()
writer = csv.DictWriter(fhd, fieldnames)
writer.writeheader()
for label, kpiset in iteritems(self.last_sec[DataPoint.CUMULATIVE]):
writer.writerow(self.__get_csv_dict(label, kpiset))
def __get_csv_dict(self, label, kpiset):
kpi_copy = copy.deepcopy(kpiset)
res = OrderedDict()
res['label'] = label
# sort label
for key in sorted(kpi_copy.keys()):
res[key] = kpi_copy[key]
del res[KPISet.ERRORS]
del res[KPISet.RESP_TIMES]
del res[KPISet.RESP_CODES]
del res[KPISet.PERCENTILES]
percentiles = list(iteritems(kpiset[KPISet.PERCENTILES]))
for level, val in sorted(percentiles, key=lambda lv: (float(lv[0]), lv[1])):
res['perc_%s' % level] = val
resp_codes = list(iteritems(kpiset[KPISet.RESP_CODES]))
for rcd, val in sorted(resp_codes):
res['rc_%s' % rcd] = val
for key in res:
if isinstance(res[key], float):
res[key] = "%.5f" % res[key]
return res
class JUnitXMLReporter(Reporter, AggregatorListener, FunctionalAggregatorListener):
"""
A reporter that exports results in Jenkins JUnit XML format.
"""
def __init__(self):
super(JUnitXMLReporter, self).__init__()
self.last_second = None
self.report_file_path = None
self.cumulative_results = None
def prepare(self):
if isinstance(self.engine.aggregator, ResultsProvider):
self.engine.aggregator.add_listener(self)
elif isinstance(self.engine.aggregator, FunctionalAggregator):
self.engine.aggregator.add_listener(self)
def aggregated_second(self, data):
self.last_second = data
def aggregated_results(self, _, cumulative_results):
"""
:type cumulative_results: bzt.modules.functional.ResultsTree
"""
self.cumulative_results = cumulative_results
def post_process(self):
"""
Get report data, generate xml report.
"""
filename = self.parameters.get("filename", None)
if not filename:
filename = self.engine.create_artifact(XUnitFileWriter.REPORT_FILE_NAME, XUnitFileWriter.REPORT_FILE_EXT)
self.parameters["filename"] = filename # reflect it in effective config
test_data_source = self.parameters.get("data-source", "sample-labels")
if self.cumulative_results is None:
if test_data_source == "sample-labels":
if not self.last_second:
self.log.warning("No last second data to generate XUnit.xml")
else:
writer = XUnitFileWriter(self.engine)
self.process_sample_labels(writer)
writer.save_report(filename)
elif test_data_source == "pass-fail":
writer = XUnitFileWriter(self.engine)
self.process_pass_fail(writer)
writer.save_report(filename)
else:
raise TaurusConfigError("Unsupported data source: %s" % test_data_source)
else:
writer = XUnitFileWriter(self.engine)
self.process_functional(writer)
writer.save_report(filename)
self.report_file_path = filename # TODO: just for backward compatibility, remove later
def process_sample_labels(self, xunit):
"""
:type xunit: XUnitFileWriter
"""
xunit.report_test_suite('sample_labels')
labels = self.last_second[DataPoint.CUMULATIVE]
for key in sorted(labels.keys()):
if key == "": # skip total label
continue
errors = []
for er_dict in labels[key][KPISet.ERRORS]:
rc = str(er_dict["rc"])
msg = str(er_dict["msg"])
cnt = str(er_dict["cnt"])
if er_dict["type"] == KPISet.ERRTYPE_ASSERT:
err_element = etree.Element("failure", message=msg, type="Assertion Failure")
else:
err_element = etree.Element("error", message=msg, type="Error")
err_desc = "%s\n(status code is %s)\n(total errors of this type: %s)" % (msg, rc, cnt)
err_element.text = err_desc
errors.append(err_element)
xunit.report_test_case('sample_labels', key, errors)
def process_pass_fail(self, xunit):
"""
:type xunit: XUnitFileWriter
"""
xunit.report_test_suite('bzt_pass_fail')
mods = self.engine.reporters + self.engine.services # TODO: remove it after passfail is only reporter
pass_fail_objects = [_x for _x in mods if isinstance(_x, PassFailStatus)]
self.log.debug("Processing passfail objects: %s", pass_fail_objects)
fail_criteria = []
for pf_obj in pass_fail_objects:
if pf_obj.criteria:
for _fc in pf_obj.criteria:
fail_criteria.append(_fc)
for fc_obj in fail_criteria:
if 'label' in fc_obj.config:
data = (fc_obj.config['subject'], fc_obj.config['label'], fc_obj.config['condition'],
fc_obj.config['threshold'])
tpl = "%s of %s%s%s"
else:
data = (fc_obj.config['subject'], fc_obj.config['condition'], fc_obj.config['threshold'])
tpl = "%s%s%s"
if fc_obj.config['timeframe']:
tpl += " for %s"
data += (fc_obj.config['timeframe'],)
disp_name = tpl % data
if fc_obj.is_triggered and fc_obj.fail:
errors = [etree.Element("error", message=str(fc_obj), type="pass/fail criteria triggered")]
else:
errors = ()
xunit.report_test_case('bzt_pass_fail', disp_name, errors)
def process_functional(self, xunit):
for suite_name, samples in iteritems(self.cumulative_results):
duration = max(s.start_time for s in samples) - min(s.start_time for s in samples)
duration += max(samples, key=lambda s: s.start_time).duration
attrs = {
"name": suite_name,
"tests": str(len(samples)),
"errors": str(len([sample for sample in samples if sample.status == "BROKEN"])),
"skipped": str(len([sample for sample in samples if sample.status == "SKIPPED"])),
"failures": str(len([sample for sample in samples if sample.status == "FAILED"])),
"time": str(round(duration, 3)),
# TODO: "timestamp" attribute
}
xunit.add_test_suite(suite_name, attributes=attrs)
for sample in samples:
attrs = {
"classname": sample.test_suite,
"name": sample.test_case,
"time": str(round(sample.duration, 3))
}
children = []
if sample.status == "BROKEN":
error = etree.Element("error", type=sample.error_msg)
if sample.error_trace:
error.text = sample.error_trace
children.append(error)
elif sample.status == "FAILED":
failure = etree.Element("failure", message=sample.error_msg)
if sample.error_trace:
failure.text = sample.error_trace
children.append(failure)
elif sample.status == "SKIPPED":
skipped = etree.Element("skipped")
children.append(skipped)
xunit.add_test_case(suite_name, attributes=attrs, children=children)
def get_bza_report_info(engine, log):
"""
:return: [(url, test), (url, test), ...]
"""
result = []
if isinstance(engine.provisioning, CloudProvisioning):
cloud_prov = engine.provisioning
test_name = cloud_prov.settings.get('test', None)
report_url = cloud_prov.results_url
result.append((report_url, test_name if test_name is not None else report_url))
else:
bza_reporters = [_x for _x in engine.reporters if isinstance(_x, BlazeMeterUploader)]
for bza_reporter in bza_reporters:
if bza_reporter.results_url:
test_name = bza_reporter.parameters.get("test", None)
report_url = bza_reporter.results_url
result.append((report_url, test_name if test_name is not None else report_url))
if len(result) > 1:
log.warning("More than one blazemeter reporter found")
return result
class XUnitFileWriter(object):
REPORT_FILE_NAME = "xunit"
REPORT_FILE_EXT = ".xml"
def __init__(self, engine):
"""
:type engine: bzt.engine.Engine
:type suite_name: str
"""
super(XUnitFileWriter, self).__init__()
self.engine = engine
self.log = engine.log.getChild(self.__class__.__name__)
self.test_suites = OrderedDict()
bza_report_info = get_bza_report_info(engine, self.log)
self.class_name = bza_report_info[0][1] if bza_report_info else "bzt-" + str(self.__hash__())
self.report_urls = ["BlazeMeter report link: %s\n" % info_item[0] for info_item in bza_report_info]
def save_report(self, fname):
"""
:type fname: str
"""
try:
if os.path.exists(fname):
self.log.warning("File %s already exists, it will be overwritten", fname)
else:
dirname = os.path.dirname(fname)
if dirname and not os.path.exists(dirname):
os.makedirs(dirname)
testsuites = etree.Element("testsuites")
for _, suite in iteritems(self.test_suites):
testsuites.append(suite)
etree_obj = etree.ElementTree(testsuites)
self.log.info("Writing JUnit XML report into: %s", fname)
with open(get_full_path(fname), 'wb') as _fds:
etree_obj.write(_fds, xml_declaration=True, encoding="UTF-8", pretty_print=True)
except BaseException:
raise TaurusInternalException("Cannot create file %s" % fname)
def report_test_suite(self, suite_name):
"""
:type suite_name: str
:type children: list[bzt.six.etree.Element]
"""
self.add_test_suite(suite_name, attributes={"name": suite_name, "package_name": "bzt"})
def report_test_case(self, suite_name, case_name, children=None):
"""
:type suite_name: str
:type case_name: str
:type children: list[bzt.six.etree.Element]
"""
children = children or []
if self.report_urls:
system_out = etree.Element("system-out")
system_out.text = "".join(self.report_urls)
children.insert(0, system_out)
self.add_test_case(suite_name, attributes={"classname": self.class_name, "name": case_name}, children=children)
def add_test_suite(self, suite_name, attributes=None, children=()):
attributes = attributes or {}
suite = etree.Element("testsuite", **attributes)
for child in children:
suite.append(child)
if not suite_name in self.test_suites:
self.test_suites[suite_name] = suite
def add_test_case(self, suite_name, attributes=None, children=()):
attributes = attributes or {}
case = etree.Element("testcase", **attributes)
for child in children:
case.append(child)
self.test_suites[suite_name].append(case)
|
the-stack_0_23187 | # Copyright 2017 AT&T Intellectual Property. All other rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A UWSGI-dependent implementation of a logging filter allowing for
request-based logging.
"""
import logging
import threading
class UwsgiLogFilter(logging.Filter):
""" A filter that preepends log records with additional request
based information, or information provided by log_extra in the
kwargs provided to a thread
"""
def __init__(self, uwsgi, additional_fields=None):
super().__init__()
if additional_fields is None:
additional_fields = []
self.uwsgi = uwsgi
self.log_fields = additional_fields
def filter(self, record):
""" Checks for thread provided values, or attempts to get values
from uwsgi
"""
if self._thread_has_log_extra():
value_setter = self._set_values_from_log_extra
else:
value_setter = self._set_value
for field_nm in self.log_fields:
value_setter(record, field_nm)
return True
def _set_value(self, record, logvar):
# handles setting the logvars from uwsgi or '' in case of none/empty
try:
logvar_value = self.uwsgi.get_logvar(logvar)
if logvar_value:
setattr(record, logvar, logvar_value.decode('UTF-8'))
else:
setattr(record, logvar, '')
except SystemError:
# This happens if log_extra is not on a thread that is spawned
# by a process running under uwsgi
setattr(record, logvar, '')
def _set_values_from_log_extra(self, record, logvar):
# sets the values from the log_extra on the thread
setattr(record, logvar, self._get_value_from_thread(logvar) or '')
def _thread_has_log_extra(self):
# Checks to see if log_extra is present on the current thread
if self._get_log_extra_from_thread():
return True
return False
def _get_value_from_thread(self, logvar):
# retrieve the logvar from the log_extra from kwargs for the thread
return self._get_log_extra_from_thread().get(logvar, '')
def _get_log_extra_from_thread(self):
# retrieves the log_extra value from kwargs or {} if it doesn't
# exist
return threading.current_thread()._kwargs.get('log_extra', {})
|
the-stack_0_23188 | # Copyright 2016 The TensorFlow Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
import tensorflow as tf
from tensorflow.contrib.distributions.python.ops import operator_pd_identity
from tensorflow.contrib.distributions.python.ops import operator_test_util
distributions = tf.contrib.distributions
class OperatorPDIdentityTest(operator_test_util.OperatorPDDerivedClassTest):
"""Most tests done in the base class."""
def _build_operator_and_mat(self, batch_shape, k, dtype=np.float64):
# Build an identity matrix with right shape and dtype.
# Build an operator that should act the same way.
batch_shape = list(batch_shape)
diag_shape = batch_shape + [k]
matrix_shape = batch_shape + [k, k]
diag = tf.ones(diag_shape, dtype=dtype)
scale = tf.constant(2.0, dtype=dtype)
scaled_identity_matrix = scale * tf.matrix_diag(diag)
operator = operator_pd_identity.OperatorPDIdentity(
matrix_shape, dtype, scale=scale)
return operator, scaled_identity_matrix.eval()
def testBadDtypeArgsRaise(self):
dtype = np.float32
batch_shape = [2, 3]
k = 4
with self.test_session():
operator, _ = self._build_operator_and_mat(batch_shape, k, dtype=dtype)
x_good_shape = batch_shape + [k, 5]
x_good = self._rng.randn(*x_good_shape).astype(dtype)
x_bad = x_good.astype(np.float64)
operator.matmul(x_good).eval() # Should not raise.
with self.assertRaisesRegexp(TypeError, "dtype"):
operator.matmul(x_bad)
with self.assertRaisesRegexp(TypeError, "dtype"):
operator.solve(x_bad)
with self.assertRaisesRegexp(TypeError, "dtype"):
operator.sqrt_solve(x_bad)
def testBadRankArgsRaise(self):
# Prepend a singleton dimension, changing the rank of "x", but not the size.
dtype = np.float32
batch_shape = [2, 3]
k = 4
with self.test_session():
operator, _ = self._build_operator_and_mat(batch_shape, k, dtype=dtype)
x_good_shape = batch_shape + [k, 5]
x_good = self._rng.randn(*x_good_shape).astype(dtype)
x_bad = x_good.reshape(1, 2, 3, 4, 5)
operator.matmul(x_good).eval() # Should not raise.
with self.assertRaisesRegexp(ValueError, "tensor rank"):
operator.matmul(x_bad)
with self.assertRaisesRegexp(ValueError, "tensor rank"):
operator.solve(x_bad)
with self.assertRaisesRegexp(ValueError, "tensor rank"):
operator.sqrt_solve(x_bad)
def testIncompatibleShapeArgsRaise(self):
# Test shapes that are the same rank but incompatible for matrix
# multiplication.
dtype = np.float32
batch_shape = [2, 3]
k = 4
with self.test_session():
operator, _ = self._build_operator_and_mat(batch_shape, k, dtype=dtype)
x_good_shape = batch_shape + [k, 5]
x_good = self._rng.randn(*x_good_shape).astype(dtype)
x_bad_shape = batch_shape + [5, k]
x_bad = x_good.reshape(*x_bad_shape)
operator.matmul(x_good).eval() # Should not raise.
with self.assertRaisesRegexp(ValueError, "Incompatible"):
operator.matmul(x_bad)
with self.assertRaisesRegexp(ValueError, "Incompatible"):
operator.solve(x_bad)
with self.assertRaisesRegexp(ValueError, "Incompatible"):
operator.sqrt_solve(x_bad)
if __name__ == "__main__":
tf.test.main()
|
the-stack_0_23190 | import json
import os
import sys
import warnings
from collections import OrderedDict
import pandas as pd
from jsonschema import exceptions, Draft4Validator
from pkg_resources import resource_stream
import numpy as np
# ensure backward compatibility
try:
from hypermapper import bo
from hypermapper import evolution
from hypermapper import local_search
from hypermapper.utility_functions import (
deal_with_relative_and_absolute_path,
Logger,
extend_with_default,
get_min_configurations,
get_min_feasible_configurations,
)
from hypermapper.profiling import Profiler
except ImportError:
if os.getenv("HYPERMAPPER_HOME"): # noqa
warnings.warn(
"Found environment variable 'HYPERMAPPER_HOME', used to update the system path. Support might be discontinued in the future. Please make sure your installation is working without this environment variable, e.g., by installing with 'pip install hypermapper'.",
DeprecationWarning,
2,
) # noqa
sys.path.append(os.environ["HYPERMAPPER_HOME"]) # noqa
ppath = os.getenv("PYTHONPATH")
if ppath:
path_items = ppath.split(":")
scripts_path = ["hypermapper/scripts", "hypermapper_dev/scripts"]
if os.getenv("HYPERMAPPER_HOME"):
scripts_path.append(os.path.join(os.getenv("HYPERMAPPER_HOME"), "scripts"))
truncated_items = [
p for p in sys.path if len([q for q in scripts_path if q in p]) == 0
]
if len(truncated_items) < len(sys.path):
warnings.warn(
"Found hypermapper in PYTHONPATH. Usage is deprecated and might break things. "
"Please remove all hypermapper references from PYTHONPATH. Trying to import"
"without hypermapper in PYTHONPATH..."
)
sys.path = truncated_items
sys.path.append(".") # noqa
sys.path = list(OrderedDict.fromkeys(sys.path))
from hypermapper import bo
from hypermapper import evolution
from hypermapper import local_search
from hypermapper.utility_functions import (
deal_with_relative_and_absolute_path,
Logger,
extend_with_default,
get_min_configurations,
get_min_feasible_configurations,
)
from hypermapper.profiling import Profiler
def optimize(parameters_file, black_box_function=None, output_file=''):
try:
hypermapper_pwd = os.environ["PWD"]
hypermapper_home = os.environ["HYPERMAPPER_HOME"]
os.chdir(hypermapper_home)
warnings.warn(
"Found environment variable 'HYPERMAPPER_HOME', used to update the system path. Support might be discontinued in the future. Please make sure your installation is working without this environment variable, e.g., by installing with 'pip install hypermapper'.",
DeprecationWarning,
2,
)
except:
hypermapper_pwd = "."
if not parameters_file.endswith(".json"):
_, file_extension = os.path.splitext(parameters_file)
print(
"Error: invalid file name. \nThe input file has to be a .json file not a %s"
% file_extension
)
raise SystemExit
with open(parameters_file, "r") as f:
config = json.load(f)
'''if "conv_shallow" in config["application_name"]:
config["input_parameters"]["LP"]["prior"] = [0.4, 0.065, 0.07, 0.065, 0.4]
config["input_parameters"]["P1"]["prior"] = [0.1, 0.3, 0.3, 0.3]
config["input_parameters"]["SP"]["prior"] = [0.4, 0.065, 0.07, 0.065, 0.4]
config["input_parameters"]["P2"]["prior"] = [0.1, 0.3, 0.3, 0.3]
config["input_parameters"]["P3"]["prior"] = [0.1, 0.1, 0.033, 0.1, 0.021, 0.021, 0.021, 0.1, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021, 0.021]
config["input_parameters"]["P4"]["prior"] = [0.08, 0.0809, 0.0137, 0.1, 0.0137, 0.0137, 0.0137, 0.1, 0.0137, 0.0137, 0.0137, 0.05, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137, 0.0137]
config["input_parameters"]["x276"]["prior"] = [0.1, 0.9]
elif "conv_deep" in config["application_name"]:
config["input_parameters"]["LP"]["prior"] = [0.4, 0.065, 0.07, 0.065, 0.4]
config["input_parameters"]["P1"]["prior"] = [0.4, 0.3, 0.2, 0.1]
config["input_parameters"]["SP"]["prior"] = [0.4, 0.065, 0.07, 0.065, 0.4]
config["input_parameters"]["P2"]["prior"] = [0.4,0.3,0.2,0.1]
config["input_parameters"]["P3"]["prior"] = [0.04, 0.01, 0.01, 0.1, 0.01, 0.01, 0.01, 0.1, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.2, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.1, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.01, 0.2]
config["input_parameters"]["P4"]["prior"] = [0.05, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.13, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.2, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.11, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.2, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.005, 0.1]
config["input_parameters"]["x276"]["prior"] = [0.1, 0.9]
elif "md_grid" in config["application_name"]:
config["input_parameters"]["loop_grid0_z"]["prior"] = [0.2, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05]
config["input_parameters"]["loop_q"]["prior"] = [0.08, 0.08, 0.02, 0.1, 0.02, 0.02, 0.02, 0.1, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.1, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02]
config["input_parameters"]["par_load"]["prior"] = [0.45, 0.1, 0.45]
config["input_parameters"]["loop_p"]["prior"] = [0.1, 0.1, 0.1, 0.1, 0.05, 0.03, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02, 0.02]
config["input_parameters"]["loop_grid0_x"]["prior"] = [0.2, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05]
config["input_parameters"]["loop_grid1_z"]["prior"] = [0.2, 0.2, 0.1, 0.1, 0.07, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03, 0.03]
config["input_parameters"]["loop_grid0_y"]["prior"] = [0.2, 0.1, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05, 0.05]
config["input_parameters"]["ATOM1LOOP"]["prior"] = [0.1,0.9]
config["input_parameters"]["ATOM2LOOP"]["prior"] = [0.1,0.9]
config["input_parameters"]["PLOOP"]["prior"] = [0.1,0.9]'''
schema = json.load(resource_stream("hypermapper", "schema.json"))
DefaultValidatingDraft4Validator = extend_with_default(Draft4Validator)
try:
DefaultValidatingDraft4Validator(schema).validate(config)
except exceptions.ValidationError as ve:
print("Failed to validate json:")
#print(ve)
raise SystemExit
# TODO CHANGE - hypermapper mode not present in bopro
#config["hypermapper_mode"] = {}
#config["hypermapper_mode"]['mode'] = 'default'
config["verbose_logging"] = False
config["noise"] = False
config["print_posterior_best"] = False
# This handles the logger. The standard setting is that HyperMapper always logs both on screen and on the log file.
# In cases like the client-server mode we only want to log on the file.
run_directory = config["run_directory"]
if run_directory == ".":
run_directory = hypermapper_pwd
config["run_directory"] = run_directory
log_file = config["log_file"]
log_file = deal_with_relative_and_absolute_path(run_directory, log_file)
sys.stdout = Logger(log_file)
optimization_method = config["optimization_method"]
profiling = None
if (
(optimization_method == "random_scalarizations")
or (optimization_method == "bayesian_optimization")
or (optimization_method == "prior_guided_optimization")
):
data_array = bo.main(
config, black_box_function=black_box_function, profiling=profiling
)
elif optimization_method == "local_search":
data_array = local_search.main(
config, black_box_function=black_box_function, profiling=profiling
)
elif optimization_method == "evolutionary_optimization":
data_array = evolution.main(
config, black_box_function=black_box_function, profiling=profiling
)
else:
print("Unrecognized optimization method:", optimization_method)
raise SystemExit
if config["profiling"]:
profiling.stop()
try:
os.chdir(hypermapper_pwd)
except:
pass
print(config['parameters'])
# If mono-objective, compute the best point found
objectives = config["optimization_objectives"]
inputs = list(config["input_parameters"].keys())
if len(objectives) == 1:
explored_points = {}
for parameter in inputs + objectives:
explored_points[parameter] = data_array[parameter]
objective = objectives[0]
feasible_output = config["feasible_output"]
if feasible_output["enable_feasible_predictor"]:
feasible_parameter = feasible_output["name"]
explored_points[feasible_parameter] = data_array[feasible_parameter]
best_point = get_min_feasible_configurations(
explored_points, 1, objective, feasible_parameter
)
else:
best_point = get_min_configurations(explored_points, 1, objective)
keys = ""
best_point_string = ""
for parameter in inputs + objectives:
keys += f"{parameter},"
best_point_string += f"{best_point[parameter][0]},"
keys = keys[:-1]
best_point_string = best_point_string[:-1]
# If there is a best point, return it according the user's preference
print_best = config["print_best"]
if (print_best is not True) and (print_best is not False):
if print_best != "auto":
print(
f"Warning: unrecognized option for print_best: {print_best}. Should be either 'auto' or a boolean."
)
print("Using default.")
hypermapper_mode = config["hypermapper_mode"]
print_best = False if hypermapper_mode == "client-server" else True
try:
os.mkdir(f'results_{config["application_name"]}')
except:
pass
i = 0
while os.path.isfile(f'results_{config["application_name"]}/results{i}.csv'):
i += 1
print('SAVING TO CSV!!!')
print(data_array)
data_array.pop('scalarization')
pd.DataFrame(data_array).to_csv(f'results_{config["application_name"]}/results{i}.csv')
print('successfully saved at', f'results_{config["application_name"]}/results{i}.csv')
if print_best:
if len(objectives) == 1:
sys.stdout.write_protocol("Best point found:\n")
sys.stdout.write_protocol(f"{keys}\n")
sys.stdout.write_protocol(f"{best_point_string}\n\n")
else:
if (
config["print_best"] is True
): # If the user requested this, let them know it is not possible
sys.stdout.write_protocol(
"\nMultiple objectives, there is no single best point.\n"
)
else:
if len(objectives) > 1:
sys.stdout.write_to_logfile(
"\nMultiple objectives, there is no single best point.\n"
)
else:
sys.stdout.write_to_logfile("Best point found:\n")
sys.stdout.write_to_logfile(f"{keys}\n")
sys.stdout.write_to_logfile(f"{best_point}\n\n")
sys.stdout.write_protocol("End of HyperMapper\n")
def main():
if len(sys.argv) == 2:
parameters_file = sys.argv[1]
else:
print("Error: only one argument needed, the parameters json file.")
if parameters_file == "--help" or len(sys.argv) != 2:
print("#########################################")
print("HyperMapper: a multi-objective black-box optimization tool")
print(
"Quickstart guide: https://github.com/luinardi/hypermapper/wiki/Quick-Start-Guide"
)
print("Full documentation: https://github.com/luinardi/hypermapper/wiki")
print("Useful commands:")
print(
" hm-quickstart test the installation with a quick optimization run"
)
print(
" hypermapper /path/to/configuration_file run HyperMapper in client-server mode"
)
print(
" hm-plot-optimization-results /path/to/configuration_file plot the results of a mono-objective optimization run"
)
print(
" hm-compute-pareto /path/to/configuration_file compute the pareto of a two-objective optimization run"
)
print(
" hm-plot-pareto /path/to/configuration_file /path/to/configuration_file plot the pareto computed by hm-compute-pareto"
)
print(
" hm-plot-hvi /path/to/configuration_file /path/to/configuration_file plot the hypervolume indicator for a multi-objective optimization run"
)
print("###########################################")
exit(1)
optimize(parameters_file)
if __name__ == "__main__":
main()
|
the-stack_0_23192 | #!/usr/bin/env python2
"""
braces.py - Implementation of {andy,bob}@example.com
NOTE: bash implements brace expansion in the braces.c file (835 lines). It
uses goto!
Possible optimization flags for Compound:
- has Lit_LBrace, LitRBrace -- set during word_parse phase
- it if has both, then do _BraceDetect
- has BracedTuple -- set during _BraceDetect
- if it does, then do the expansion
- has Lit_Star, ?, [ ] -- globbing?
- but after expansion do you still have those flags?
"""
from __future__ import print_function
from _devbuild.gen.id_kind_asdl import Id, Id_t
from _devbuild.gen.syntax_asdl import (
word, word_t, word__Compound, word__BracedTree,
word_part, word_part_t,
word_part__BracedTuple, word_part__BracedRange,
word_part__Literal,
token,
)
from asdl import const
#from core.util import log
from core.util import p_die
from frontend.match import BRACE_RANGE_LEXER
from typing import List, Optional, Iterator, Tuple
# The brace language has no syntax errors! But we still need to abort the
# parse.
class _NotARange(Exception):
pass
class _RangeParser(object):
"""Grammar for ranges:
step = Dots Int
int_range = Int Dots Int step?
char_range = Char Dots Char step?
range = (int_range | char_range) Eof # ensure no extra tokens!
"""
def __init__(self, lexer, span_id):
# type: (Iterator[Tuple[Id_t, str]], int) -> None
self.lexer = lexer
self.span_id = span_id
self.token_type = None # type: Id_t
self.token_val = ''
def _Next(self):
# type: () -> None
"""Move to the next token."""
try:
self.token_type, self.token_val = self.lexer.next()
except StopIteration:
self.token_type = Id.Range_Eof
self.token_val = ''
def _Eat(self, token_type):
# type: (Id_t) -> str
if self.token_type != token_type:
raise _NotARange('Expected %s, got %s' % (token_type, self.token_type))
val = self.token_val
self._Next()
return val
def _ParseStep(self):
# type: () -> int
self._Next() # past Dots
return int(self._Eat(Id.Range_Int))
def _ParseRange(self, range_kind):
# type: (Id_t) -> word_part__BracedRange
start = self.token_val
self._Next() # past Char
self._Eat(Id.Range_Dots)
end = self._Eat(range_kind)
part = word_part.BracedRange(range_kind, start, end)
if self.token_type == Id.Range_Dots:
part.step = self._ParseStep()
return part
def Parse(self):
# type: () -> word_part__BracedRange
self._Next()
if self.token_type == Id.Range_Int:
part = self._ParseRange(self.token_type)
# Check step validity and fill in a default
start = int(part.start)
end = int(part.end)
if start < end:
if part.step == const.NO_INTEGER:
part.step = 1
if part.step <= 0: # 0 step is not allowed
p_die('Invalid step %d for ascending integer range', part.step,
span_id=self.span_id)
elif start > end:
if part.step == const.NO_INTEGER:
part.step = -1
if part.step >= 0: # 0 step is not allowed
p_die('Invalid step %d for descending integer range', part.step,
span_id=self.span_id)
# else: singleton range is dumb but I suppose consistent
elif self.token_type == Id.Range_Char:
part = self._ParseRange(self.token_type)
# Check step validity and fill in a default
if part.start < part.end:
if part.step == const.NO_INTEGER:
part.step = 1
if part.step <= 0: # 0 step is not allowed
p_die('Invalid step %d for ascending character range', part.step,
span_id=self.span_id)
elif part.start > part.end:
if part.step == const.NO_INTEGER:
part.step = -1
if part.step >= 0: # 0 step is not allowed
p_die('Invalid step %d for descending character range', part.step,
span_id=self.span_id)
# else: singleton range is dumb but I suppose consistent
# Check matching cases
upper1 = part.start.isupper()
upper2 = part.end.isupper()
if upper1 != upper2:
p_die('Mismatched cases in character range', span_id=self.span_id)
else:
raise _NotARange()
# prevent unexpected trailing tokens
self._Eat(Id.Range_Eof)
return part
def _RangePartDetect(token):
# type: (token) -> Optional[word_part_t]
"""Parse the token and return a new word_part if it looks like a range."""
lexer = BRACE_RANGE_LEXER.Tokens(token.val)
p = _RangeParser(lexer, token.span_id)
try:
part = p.Parse()
except _NotARange as e:
return None
part.spids.append(token.span_id) # Propagate location info
return part
class _StackFrame(object):
def __init__(self, cur_parts):
# type: (List[word_part_t]) -> None
self.cur_parts = cur_parts
self.alt_part = word_part.BracedTuple()
self.saw_comma = False
def _BraceDetect(w):
# type: (word__Compound) -> Optional[word__BracedTree]
"""Return a new word if the input word looks like a brace expansion.
e.g. {a,b} or {1..10..2} (TODO)
Do we want to accept {01..02} ? zsh does make some attempt to do this too.
NOTE: This is an iterative algorithm that uses a stack. The grammar-based
approach didn't seem natural.
It's not LL(1) because of 'part*'. And not LL(k) even? Maybe it be handled
with an LR parser? In any case the imperative algorithm with 'early return'
for a couple cases is fairly simple.
Grammar:
# an alternative is a literal, possibly empty, or another brace_expr
part = <any part except Literal>
alt = part* | brace_expr
# a brace_expr is group of at least 2 braced and comma-separated
# alternatives, with optional prefix and suffix.
brace_expr = part* '{' alt ',' alt (',' alt)* '}' part*
"""
# Errors:
# }a{ - stack depth dips below 0
# {a,b}{ - Stack depth doesn't end at 0
# {a} - no comma, and also not an numeric range
cur_parts = [] # type: List[word_part_t]
stack = [] # type: List[_StackFrame]
found = False
for i, part in enumerate(w.parts):
append = True
if isinstance(part, word_part__Literal):
id_ = part.token.id
if id_ == Id.Lit_LBrace:
# Save prefix parts. Start new parts list.
new_frame = _StackFrame(cur_parts)
stack.append(new_frame)
cur_parts = []
append = False
found = True # assume found, but can early exit with None later
elif id_ == Id.Lit_Comma: # Append a new alternative.
# NOTE: Should we allow this:
# ,{a,b}
# or force this:
# \,{a,b}
# ? We're forcing braces right now but not commas.
if stack:
stack[-1].saw_comma = True
stack[-1].alt_part.words.append(word.Compound(cur_parts))
cur_parts = [] # clear
append = False
elif id_ == Id.Lit_RBrace:
if not stack: # e.g. echo {a,b}{ -- unbalanced {
return None # do not expand ANYTHING because of invalid syntax
# Detect {1..10} and {1..10..2}
#log('stack[-1]: %s', stack[-1])
#log('cur_parts: %s', cur_parts)
range_part = None
# only allow {1..3}, not {a,1..3}
if not stack[-1].saw_comma and len(cur_parts) == 1:
# It must be ONE part. For example, -1..-100..-2 is initially
# lexed as a single Lit_Chars token.
part = cur_parts[0]
if (isinstance(part, word_part__Literal) and
part.token.id == Id.Lit_Chars):
range_part = _RangePartDetect(part.token)
if range_part:
frame = stack.pop()
cur_parts = frame.cur_parts
cur_parts.append(range_part)
append = False
# It doesn't look like a range -- process it as the last element in
# {a,b,c}
if not range_part:
if not stack[-1].saw_comma: # {foo} is not a real alternative
return None # early return
stack[-1].alt_part.words.append(word.Compound(cur_parts))
frame = stack.pop()
cur_parts = frame.cur_parts
cur_parts.append(frame.alt_part)
append = False
if append:
cur_parts.append(part)
if len(stack) != 0:
return None
if found:
return word.BracedTree(cur_parts)
else:
return None
def BraceDetectAll(words):
# type: (List[word__Compound]) -> List[word_t]
"""Return a new list of words, possibly with BracedTree instances."""
out = [] # type: List[word_t]
for w in words:
brace_tree = _BraceDetect(w)
if brace_tree:
out.append(brace_tree)
else:
out.append(w)
return out
def _LeadingZeros(s):
# type: (str) -> int
n = 0
for c in s:
if c == '0':
n += 1
else:
break
return n
def _RangeStrings(part):
# type: (word_part__BracedRange) -> List[str]
if part.kind == Id.Range_Int:
nums = []
z1 = _LeadingZeros(part.start)
z2 = _LeadingZeros(part.end)
if z1 == 0 and z2 == 0:
fmt = '%d'
else:
if z1 < z2:
width = len(part.end)
else:
width = len(part.start)
# TODO: Does the mycpp runtime support this dynamic format? Or write it
# out?
fmt = '%0' + str(width) + 'd'
n = int(part.start)
end = int(part.end)
step = part.step
if step > 0:
while True:
nums.append(fmt % n)
n += step
if n > end:
break
else:
while True:
nums.append(fmt % n)
n += step
if n < end:
break
return nums
else: # Id.Range_Char
chars = []
n = ord(part.start)
ord_end = ord(part.end)
step = part.step
if step > 0:
while True:
chars.append(chr(n))
n += step
if n > ord_end:
break
else:
while True:
chars.append(chr(n))
n += step
if n < ord_end:
break
return chars
def _ExpandPart(parts, # type: List[word_part_t]
first_alt_index, # type: int
suffixes, # type: List[List[word_part_t]]
):
# type: (...) -> List[List[word_part_t]]
"""Mutually recursive with _BraceExpand.
Args:
parts: input parts
first_alt_index: index of the first BracedTuple
suffixes: List of suffixes to append.
"""
out = []
prefix = parts[ : first_alt_index]
expand_part = parts[first_alt_index]
if isinstance(expand_part, word_part__BracedTuple):
# Call _BraceExpand on each of the inner words too!
expanded_alts = [] # type: List[List[word_part_t]]
for w in expand_part.words:
assert isinstance(w, word__Compound) # for MyPy
expanded_alts.extend(_BraceExpand(w.parts))
for alt_parts in expanded_alts:
for suffix in suffixes:
out_parts = [] # type: List[word_part_t]
out_parts.extend(prefix)
out_parts.extend(alt_parts)
out_parts.extend(suffix)
out.append(out_parts)
elif isinstance(expand_part, word_part__BracedRange):
# Not mutually recursive with _BraceExpand
strs = _RangeStrings(expand_part)
for s in strs:
for suffix in suffixes:
out_parts_ = [] # type: List[word_part_t]
out_parts_.extend(prefix)
# Preserve span_id from the original
t = token(Id.Lit_Chars, s, expand_part.spids[0])
out_parts_.append(word_part.Literal(t))
out_parts_.extend(suffix)
out.append(out_parts_)
else:
raise AssertionError
return out
def _BraceExpand(parts):
# type: (List[word_part_t]) -> List[List[word_part_t]]
"""Mutually recursive with _ExpandPart."""
num_alts = 0
first_alt_index = -1
for i, part in enumerate(parts):
if isinstance(part, (word_part__BracedTuple, word_part__BracedRange)):
num_alts += 1
if num_alts == 1:
first_alt_index = i
elif num_alts == 2:
break # don't need to count anymore
# NOTE: There are TWO recursive calls here, not just one -- one for
# nested {}, and one for adjacent {}. This is hard to do iteratively.
if num_alts == 0:
return [parts]
elif num_alts == 1:
suffix = parts[first_alt_index+1 : ]
return _ExpandPart(parts, first_alt_index, [suffix])
else:
# Now call it on the tail
tail_parts = parts[first_alt_index+1 : ]
suffixes = _BraceExpand(tail_parts) # recursive call
return _ExpandPart(parts, first_alt_index, suffixes)
def BraceExpandWords(words):
# type: (List[word__Compound]) -> List[word__Compound]
out = [] # type: List[word__Compound]
for w in words:
if isinstance(w, word__BracedTree):
parts_list = _BraceExpand(w.parts)
out.extend(word.Compound(p) for p in parts_list)
else:
out.append(w)
return out
|
the-stack_0_23194 | # @name: Katana-dorkscanner
# @repo: https://github.com/adnane-X-tebbaa/Katana
# @author: Adnane tebbaa (AXT)
# Main Google Dorking file V0.1
"""
MIT License
Copyright (c) 2020 adnane tebbaa
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import requests
import proxybroker
from googlesearch import search
import os
import sys
from termcolor import colored, cprint
import warnings
def fxn():
warnings.warn("deprecated", DeprecationWarning)
with warnings.catch_warnings():
warnings.simplefilter("ignore")
fxn()
def clear():
return os.system('cls' if os.name == 'nt' else 'clear')
print ("")
A = """
,_._._._._._._._._|__________________________________________________________
|G|o|o|g|l|e|_|_|_|_________________________________________________________/
|
Katana dork scanner (Katana-ds V1.5) coded by adnane-X-tebbaa
Google Mode
"""
print ("")
print(A)
alpha = input (colored('[>] Please set a Dork : ', 'green' ))
query = alpha
beta = input (colored('[>] Please set a TLD : ', 'green' ))
for gamma in search(query, tld=beta, num=7,pause=2):
print(colored ('[+] Found > ' ,'yellow') + (gamma) )
|
the-stack_0_23195 | from django.utils.translation import ugettext_lazy as _
import graphene
from wagtailcommerce.addresses.models import Address
from wagtailcommerce.carts.object_types import CartReplyObjectType, CartTotalsObjectType
from wagtailcommerce.carts.utils import get_cart_from_request
from wagtailcommerce.promotions.utils import remove_coupon, verify_coupon
from wagtailcommerce.shipping.exceptions import ShippingCostCalculationException
from wagtailcommerce.shipping.models import ShippingMethod
class CartQuery(graphene.ObjectType):
cart = graphene.Field(CartReplyObjectType)
cart_totals = graphene.Field(
lambda: CartTotalsObjectType,
shipping_address_pk=graphene.String(required=False),
shipping_method_pk=graphene.String(required=False)
)
def resolve_cart_totals(self, info, shipping_address_pk=None, shipping_method_pk=None, **kwargs):
cart = get_cart_from_request(info.context)
if shipping_address_pk and shipping_method_pk:
try:
shipping_address = info.context.user.addresses.get(deleted=False, pk=shipping_address_pk)
shipping_method = ShippingMethod.objects.for_shipping_address(
shipping_address, info.context.user
).get(pk=shipping_method_pk).specific
totals = cart.get_totals_with_shipping(shipping_address, shipping_method)
except Address.DoesNotExist:
raise ShippingCostCalculationException(_('Address not found'))
except ShippingMethod.DoesNotExist:
raise ShippingCostCalculationException(_('Shipping method not found'))
else:
totals = cart.get_totals()
totals.update({
'coupon_code': cart.coupon.code if cart.coupon else None,
'coupon_auto_assigned': cart.coupon.auto_assign_to_new_users if cart.coupon else False
})
return CartTotalsObjectType(**totals)
def resolve_cart(self, info, **kwargs):
from wagtailcommerce.carts.utils import get_cart_from_request
cart = get_cart_from_request(info.context)
coupon_removed = None
coupon_auto_assigned = False
if cart.coupon:
if cart.coupon.auto_assign_to_new_users:
coupon_auto_assigned = True
if not verify_coupon(cart.coupon):
coupon_removed = cart.coupon.code
remove_coupon(cart)
return CartReplyObjectType(cart=cart, coupon_removed=coupon_removed, coupon_auto_assigned=coupon_auto_assigned)
|
the-stack_0_23196 | import re
import os
import urllib.parse as parse
import warnings
from urllib.request import urlopen
import requests
import time
from tqdm import tqdm
from scripts.GEOSpyder.user_agents import randagent as ragent
class GEODownloader(object):
DEFAULT_URL = 'https://www.ncbi.nlm.nih.gov/geo/query/acc.cgi'
PATTERN = r'<a href="(ftp://[^:]*?suppl/[^>]*?)">'
SAVE_ROOT = os.path.dirname(__file__)
def __init__(self, geo_id, url=None, pattern=None, save_dir='', unicode='utf-8'):
self.geo_id = geo_id.upper()
self.url = url or self.DEFAULT_URL
self.params = {'acc': self.geo_id}
self.header = {'User-Agent': ragent(ends='pc')}
self.unicode = unicode
self.pattern = re.compile(pattern or self.PATTERN)
self.save_path = self._init_save_path(save_dir)
def _init_save_path(self, save_dir):
path = os.path.join(self.SAVE_ROOT, save_dir)
if os.path.exists(path):
warnings.warn('%s has been EXISTED!!!' % save_dir)
else:
os.mkdir(path)
return path
def send(self, url=None, params=None):
resp = requests.get(url or self.url, params=params)
return resp.content.decode(self.unicode)
def href_resolute(self, text):
match_ret = re.findall(self.pattern, text)
assert match_ret, 'Match Nothing from the pattern: %s' % self.pattern
return match_ret
def write(self, data, calling_fn=lambda x: x, many=True, namespace=None, dtype='txt/w'):
namespace = re.compile('suppl/{}(.*)'.format(self.geo_id or namespace))
dtype_patten = re.compile(r'(\w*)/(\w+)')
ftype, wtype = re.match(dtype_patten, dtype).groups()
encoding = None if wtype == 'wb' else self.unicode
def _write(content, _instance=self, _name=None):
path = os.path.join(_instance.save_path, _name or _instance.geo_id + ftype)
with open(path, wtype, encoding=encoding) as f:
f.write(content)
if not many:
_write(data)
return
bar = tqdm(enumerate(data), total=len(data))
for idx, url in bar:
name = parse.unquote(re.search(namespace, url).groups()[0], encoding=self.unicode)
bar.set_description_str('Now: {name}'.format(name=name))
_write(calling_fn(url), _name=name)
def __call__(self, **kwargs):
print('Connecting...')
try:
recv_dat = self.send(params=self.params)
except Exception as e:
print('CONNECT FAILED: %s' % e)
return
assert recv_dat, 'No data stream has been accept!'
print('Receiving...')
try:
href_dat = self.href_resolute(recv_dat)
except AssertionError as e:
print('RESOLUTION ERROR: %s' % e)
return
try:
self.write(href_dat, **kwargs)
except Exception as e:
print('I/O ERROR: %s' % e)
return
time.sleep(0.1)
print('Done!')
if __name__ == '__main__':
GEODownloader('GSE120963', save_dir='GSE120963')(calling_fn=lambda x: urlopen(x).read(), dtype='/wb')
|
the-stack_0_23199 | """Setup file for mysensors package."""
from pathlib import Path
from setuptools import setup, find_packages
PROJECT_DIR = Path(__file__).parent.resolve()
VERSION = (PROJECT_DIR / "mysensors" / "VERSION").read_text(encoding="utf-8").strip()
README_FILE = PROJECT_DIR / "README.md"
LONG_DESCRIPTION = README_FILE.read_text(encoding="utf-8")
REQUIRES = [
"awesomeversion",
"click",
"crcmod>=1.7",
"getmac",
"IntelHex>=2.2.1",
"pyserial>=3.4",
"pyserial-asyncio>=0.4",
"voluptuous>=0.11.1",
]
EXTRAS = {"mqtt-client": ["paho-mqtt"]}
setup(
name="pymysensors",
version=VERSION,
description="Python API for talking to a MySensors gateway",
long_description=LONG_DESCRIPTION,
long_description_content_type="text/markdown",
url="https://github.com/theolind/pymysensors",
author="Theodor Lindquist",
author_email="[email protected]",
license="MIT License",
install_requires=REQUIRES,
extras_require=EXTRAS,
packages=find_packages(exclude=["tests", "tests.*"]),
include_package_data=True,
python_requires=">=3.7",
entry_points={"console_scripts": ["pymysensors = mysensors.cli:cli"]},
keywords=["sensor", "actuator", "IoT", "DYI"],
zip_safe=True,
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: End Users/Desktop",
"License :: OSI Approved :: MIT License",
"Natural Language :: English",
"Operating System :: OS Independent",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3.10",
"Topic :: Home Automation",
],
)
|
the-stack_0_23200 | #!/opt/anaconda3/bin Python
import sqlite3
# Open connection to new blank database file
conn = sqlite3.connect('demo_data.sqlite3')
# Make a cursor
curs = conn.cursor()
drop_table = '''
DROP TABLE IF EXISTS demo;
'''
# Write appropriate CREATE TABLE statement
create_table = '''
CREATE TABLE demo(
s VARCHAR(10),
x INT,
y INT
);
'''
# Write appropriate INSERT INTO statements
insert_values = '''
INSERT INTO demo (
s,
x,
y
)
VALUES
("g", 3, 9),
("v", 5, 7),
("f", 8, 7);
'''
# Execute and commit
curs.execute(drop_table)
curs.execute(create_table)
curs.execute(insert_values)
conn.commit()
# Test queries
curs.execute('SELECT COUNT (*) FROM demo;')
print(f'Rows: {curs.fetchall()[0][0]}')
'''
ANSWER how many rows: 3 rows
'''
curs.execute('SELECT COUNT(*) FROM DEMO WHERE x>=5 AND y>=5;')
print(f'Rows where x, y >= 5: {curs.fetchall()[0][0]}')
'''
ANSWER rows where x>=5 and y>=5: 2 rows
'''
curs.execute('SELECT COUNT(DISTINCT y) FROM demo')
print(f'Unique values of y: {curs.fetchall()[0][0]}')
'''
ANSWER unique values of y: 2
''' |
the-stack_0_23202 | import py, os, sys
from rpython.rtyper.lltypesystem import lltype, rffi, llmemory
from rpython.translator.tool.cbuild import ExternalCompilationInfo
from rpython.rtyper.annlowlevel import cast_instance_to_gcref, cast_base_ptr_to_instance
from rpython.rlib.objectmodel import we_are_translated
from rpython.rlib import jit, rposix, rgc
from rpython.rlib.rarithmetic import ovfcheck_float_to_int
from rpython.rtyper.tool import rffi_platform as platform
from rpython.rlib.rstring import StringBuilder
from pypy.interpreter.baseobjspace import W_Root
from pypy.interpreter.error import oefmt, wrap_oserror, OperationError
from pypy.interpreter.gateway import unwrap_spec
from pypy.interpreter.pyframe import PyFrame
from pypy.interpreter.pycode import PyCode
ROOT = py.path.local(__file__).join('..')
SRC = ROOT.join('src')
# by default, we statically link vmprof.c into pypy; however, if you set
# DYNAMIC_VMPROF to True, it will be dynamically linked to the libvmprof.so
# which is expected to be inside pypy/module/_vmprof/src: this is very useful
# during development. Note that you have to manually build libvmprof by
# running make inside the src dir
DYNAMIC_VMPROF = False
eci_kwds = dict(
include_dirs = [SRC],
includes = ['vmprof.h', 'trampoline.h'],
separate_module_files = [SRC.join('trampoline.asmgcc.s')],
libraries = ['unwind'],
post_include_bits=["""
void pypy_vmprof_init(void);
"""],
separate_module_sources=["""
void pypy_vmprof_init(void) {
vmprof_set_mainloop(pypy_execute_frame_trampoline, 0,
NULL);
}
"""],
)
if DYNAMIC_VMPROF:
eci_kwds['libraries'] += ['vmprof']
eci_kwds['link_extra'] = ['-Wl,-rpath,%s' % SRC, '-L%s' % SRC]
else:
eci_kwds['separate_module_files'] += [SRC.join('vmprof.c')]
eci = ExternalCompilationInfo(**eci_kwds)
check_eci = eci.merge(ExternalCompilationInfo(separate_module_files=[
SRC.join('fake_pypy_api.c')]))
platform.verify_eci(check_eci)
pypy_execute_frame_trampoline = rffi.llexternal(
"pypy_execute_frame_trampoline",
[llmemory.GCREF, llmemory.GCREF, llmemory.GCREF, lltype.Signed],
llmemory.GCREF,
compilation_info=eci,
_nowrapper=True, sandboxsafe=True,
random_effects_on_gcobjs=True)
pypy_vmprof_init = rffi.llexternal("pypy_vmprof_init", [], lltype.Void,
compilation_info=eci)
vmprof_enable = rffi.llexternal("vmprof_enable",
[rffi.INT, rffi.LONG, rffi.INT,
rffi.CCHARP, rffi.INT],
rffi.INT, compilation_info=eci,
save_err=rffi.RFFI_SAVE_ERRNO)
vmprof_disable = rffi.llexternal("vmprof_disable", [], rffi.INT,
compilation_info=eci,
save_err=rffi.RFFI_SAVE_ERRNO)
vmprof_register_virtual_function = rffi.llexternal(
"vmprof_register_virtual_function",
[rffi.CCHARP, rffi.VOIDP, rffi.VOIDP], lltype.Void,
compilation_info=eci, _nowrapper=True)
original_execute_frame = PyFrame.execute_frame.im_func
original_execute_frame.c_name = 'pypy_pyframe_execute_frame'
original_execute_frame._dont_inline_ = True
class __extend__(PyFrame):
def execute_frame(frame, w_inputvalue=None, operr=None):
# go through the asm trampoline ONLY if we are translated but not being JITted.
#
# If we are not translated, we obviously don't want to go through the
# trampoline because there is no C function it can call.
#
# If we are being JITted, we want to skip the trampoline, else the JIT
# cannot see throug it
if we_are_translated() and not jit.we_are_jitted():
# if we are translated, call the trampoline
gc_frame = cast_instance_to_gcref(frame)
gc_inputvalue = cast_instance_to_gcref(w_inputvalue)
gc_operr = cast_instance_to_gcref(operr)
unique_id = frame.pycode._unique_id
gc_result = pypy_execute_frame_trampoline(gc_frame, gc_inputvalue,
gc_operr, unique_id)
return cast_base_ptr_to_instance(W_Root, gc_result)
else:
return original_execute_frame(frame, w_inputvalue, operr)
def write_long_to_string_builder(l, b):
if sys.maxint == 2147483647:
b.append(chr(l & 0xff))
b.append(chr((l >> 8) & 0xff))
b.append(chr((l >> 16) & 0xff))
b.append(chr((l >> 24) & 0xff))
else:
b.append(chr(l & 0xff))
b.append(chr((l >> 8) & 0xff))
b.append(chr((l >> 16) & 0xff))
b.append(chr((l >> 24) & 0xff))
b.append(chr((l >> 32) & 0xff))
b.append(chr((l >> 40) & 0xff))
b.append(chr((l >> 48) & 0xff))
b.append(chr((l >> 56) & 0xff))
def try_cast_to_pycode(gcref):
return rgc.try_cast_gcref_to_instance(PyCode, gcref)
MAX_CODES = 1000
class VMProf(object):
def __init__(self):
self.is_enabled = False
self.ever_enabled = False
self.fileno = -1
self.current_codes = []
def enable(self, space, fileno, period_usec):
if self.is_enabled:
raise oefmt(space.w_ValueError, "_vmprof already enabled")
self.fileno = fileno
self.is_enabled = True
self.write_header(fileno, period_usec)
if not self.ever_enabled:
if we_are_translated():
pypy_vmprof_init()
self.ever_enabled = True
self.gather_all_code_objs(space)
space.register_code_callback(vmprof_register_code)
if we_are_translated():
# does not work untranslated
res = vmprof_enable(fileno, period_usec, 0,
lltype.nullptr(rffi.CCHARP.TO), 0)
else:
res = 0
if res == -1:
raise wrap_oserror(space, OSError(rposix.get_saved_errno(),
"_vmprof.enable"))
def gather_all_code_objs(self, space):
all_code_objs = rgc.do_get_objects(try_cast_to_pycode)
for code in all_code_objs:
self.register_code(space, code)
def write_header(self, fileno, period_usec):
assert period_usec > 0
b = StringBuilder()
write_long_to_string_builder(0, b)
write_long_to_string_builder(3, b)
write_long_to_string_builder(0, b)
write_long_to_string_builder(period_usec, b)
write_long_to_string_builder(0, b)
b.append('\x04') # interp name
b.append(chr(len('pypy')))
b.append('pypy')
os.write(fileno, b.build())
def register_code(self, space, code):
if self.fileno == -1:
raise OperationError(space.w_RuntimeError,
space.wrap("vmprof not running"))
self.current_codes.append(code)
if len(self.current_codes) >= MAX_CODES:
self._flush_codes(space)
def _flush_codes(self, space):
b = StringBuilder()
for code in self.current_codes:
name = code._get_full_name()
b.append('\x02')
write_long_to_string_builder(code._unique_id, b)
write_long_to_string_builder(len(name), b)
b.append(name)
os.write(self.fileno, b.build())
self.current_codes = []
def disable(self, space):
if not self.is_enabled:
raise oefmt(space.w_ValueError, "_vmprof not enabled")
self.is_enabled = False
space.register_code_callback(None)
self._flush_codes(space)
self.fileno = -1
if we_are_translated():
# does not work untranslated
res = vmprof_disable()
else:
res = 0
if res == -1:
raise wrap_oserror(space, OSError(rposix.get_saved_errno(),
"_vmprof.disable"))
def vmprof_register_code(space, code):
from pypy.module._vmprof import Module
mod_vmprof = space.getbuiltinmodule('_vmprof')
assert isinstance(mod_vmprof, Module)
mod_vmprof.vmprof.register_code(space, code)
@unwrap_spec(fileno=int, period=float)
def enable(space, fileno, period=0.01): # default 100 Hz
from pypy.module._vmprof import Module
mod_vmprof = space.getbuiltinmodule('_vmprof')
assert isinstance(mod_vmprof, Module)
#
try:
period_usec = ovfcheck_float_to_int(period * 1000000.0 + 0.5)
if period_usec <= 0 or period_usec >= 1e6:
# we don't want seconds here at all
raise ValueError
except (ValueError, OverflowError):
raise OperationError(space.w_ValueError,
space.wrap("'period' too large or non positive"))
#
mod_vmprof.vmprof.enable(space, fileno, period_usec)
def disable(space):
from pypy.module._vmprof import Module
mod_vmprof = space.getbuiltinmodule('_vmprof')
assert isinstance(mod_vmprof, Module)
mod_vmprof.vmprof.disable(space)
|
the-stack_0_23205 | import os
import sys
import math
from PIL import Image
import numpy as np
if len(sys.argv) != 2:
sys.stderr.write("png2header imagefile\n")
sys.exit(1)
if not os.path.exists(sys.argv[1]):
sys.stderr.write(sys.argv[1] + " not found\n")
sys.exit(1)
im = Image.open(sys.argv[1])
if not((im.size[0] <= 640) and (im.size[1] <= 480)):
sys.stderr.write("image size must be smaller than 640x480\n")
sys.exit(1)
print("const int image_width = ", end='')
print(im.size[0], end='')
print(";")
print("const int image_height = ", end='')
print(im.size[1], end='')
print(";\n")
rgb = np.array(im)
print('const uint16_t image[] = \n{')
for l in rgb:
print("\t", end='')
for p in l:
color = p[0] >> 3 | (p[1] & 0xfc) << 3 | (p[2] & 0xf8) << 8
print(hex(color), ', ', sep='', end='')
print()
print('};')
|
the-stack_0_23206 | import pytest
import sys
from tests.client import TestClient
from uvicorn.middleware.wsgi import WSGIMiddleware
def hello_world(environ, start_response):
status = '200 OK'
output = b'Hello World!\n'
headers = [
('Content-Type', 'text/plain; charset=utf-8'),
('Content-Length', str(len(output)))
]
start_response(status, headers)
return [output]
def echo_body(environ, start_response):
status = '200 OK'
output = environ['wsgi.input'].read()
headers = [
('Content-Type', 'text/plain; charset=utf-8'),
('Content-Length', str(len(output)))
]
start_response(status, headers)
return [output]
def raise_exception(environ, start_response):
raise RuntimeError('Something went wrong')
def return_exc_info(environ, start_response):
try:
raise RuntimeError('Something went wrong')
except:
status = '500 Internal Server Error'
output = b'Internal Server Error'
headers = [
('Content-Type', 'text/plain; charset=utf-8'),
('Content-Length', str(len(output)))
]
start_response(status, headers, exc_info=sys.exc_info())
return [output]
def test_wsgi_get():
app = WSGIMiddleware(hello_world)
client = TestClient(app)
response = client.get('/')
assert response.status_code == 200
assert response.text == 'Hello World!\n'
def test_wsgi_post():
app = WSGIMiddleware(echo_body)
client = TestClient(app)
response = client.post('/', json={"example": 123})
assert response.status_code == 200
assert response.text == '{"example": 123}'
def test_wsgi_exception():
# Note that we're testing the WSGI app directly here.
# The HTTP protocol implementations would catch this error and return 500.
app = WSGIMiddleware(raise_exception)
client = TestClient(app)
with pytest.raises(RuntimeError):
response = client.get('/')
def test_wsgi_exc_info():
# Note that we're testing the WSGI app directly here.
# The HTTP protocol implementations would catch this error and return 500.
app = WSGIMiddleware(return_exc_info)
client = TestClient(app)
with pytest.raises(RuntimeError):
response = client.get('/')
app = WSGIMiddleware(return_exc_info)
client = TestClient(app, raise_server_exceptions=False)
response = client.get('/')
assert response.status_code == 500
assert response.text == 'Internal Server Error'
|
the-stack_0_23207 | #!/usr/bin/env python3
"""
Unwanted patterns test cases.
The reason this file exist despite the fact we already have
`ci/code_checks.sh`,
(see https://github.com/pandas-dev/pandas/blob/master/ci/code_checks.sh)
is that some of the test cases are more complex/imposible to validate via regex.
So this file is somewhat an extensions to `ci/code_checks.sh`
"""
import argparse
import ast
import os
import sys
import token
import tokenize
from typing import IO, Callable, FrozenSet, Iterable, List, Set, Tuple
def _get_literal_string_prefix_len(token_string: str) -> int:
"""
Getting the length of the literal string prefix.
Parameters
----------
token_string : str
String to check.
Returns
-------
int
Length of the literal string prefix.
Examples
--------
>>> example_string = "'Hello world'"
>>> _get_literal_string_prefix_len(example_string)
0
>>> example_string = "r'Hello world'"
>>> _get_literal_string_prefix_len(example_string)
1
"""
try:
return min(
token_string.find(quote)
for quote in (r"'", r'"')
if token_string.find(quote) >= 0
)
except ValueError:
return 0
def bare_pytest_raises(file_obj: IO[str]) -> Iterable[Tuple[int, str]]:
"""
Test Case for bare pytest raises.
For example, this is wrong:
>>> with pytest.raise(ValueError):
... # Some code that raises ValueError
And this is what we want instead:
>>> with pytest.raise(ValueError, match="foo"):
... # Some code that raises ValueError
Parameters
----------
file_obj : IO
File-like object containing the Python code to validate.
Yields
------
line_number : int
Line number of unconcatenated string.
msg : str
Explenation of the error.
Notes
-----
GH #23922
"""
contents = file_obj.read()
tree = ast.parse(contents)
for node in ast.walk(tree):
if not isinstance(node, ast.Call):
continue
try:
if not (node.func.value.id == "pytest" and node.func.attr == "raises"):
continue
except AttributeError:
continue
if not node.keywords:
yield (
node.lineno,
"Bare pytests raise have been found. "
"Please pass in the argument 'match' as well the exception.",
)
else:
# Means that there are arguments that are being passed in,
# now we validate that `match` is one of the passed in arguments
if not any(keyword.arg == "match" for keyword in node.keywords):
yield (
node.lineno,
"Bare pytests raise have been found. "
"Please pass in the argument 'match' as well the exception.",
)
PRIVATE_FUNCTIONS_ALLOWED = {"sys._getframe"} # no known alternative
def private_function_across_module(file_obj: IO[str]) -> Iterable[Tuple[int, str]]:
"""
Checking that a private function is not used across modules.
Parameters
----------
file_obj : IO
File-like object containing the Python code to validate.
Yields
------
line_number : int
Line number of the private function that is used across modules.
msg : str
Explenation of the error.
"""
contents = file_obj.read()
tree = ast.parse(contents)
imported_modules: Set[str] = set()
for node in ast.walk(tree):
if isinstance(node, (ast.Import, ast.ImportFrom)):
for module in node.names:
module_fqdn = module.name if module.asname is None else module.asname
imported_modules.add(module_fqdn)
if not isinstance(node, ast.Call):
continue
try:
module_name = node.func.value.id
function_name = node.func.attr
except AttributeError:
continue
# Exception section #
# (Debatable) Class case
if module_name[0].isupper():
continue
# (Debatable) Dunder methods case
elif function_name.startswith("__") and function_name.endswith("__"):
continue
elif module_name + "." + function_name in PRIVATE_FUNCTIONS_ALLOWED:
continue
if module_name in imported_modules and function_name.startswith("_"):
yield (node.lineno, f"Private function '{module_name}.{function_name}'")
def strings_to_concatenate(file_obj: IO[str]) -> Iterable[Tuple[int, str]]:
"""
This test case is necessary after 'Black' (https://github.com/psf/black),
is formating strings over multiple lines.
For example, when this:
>>> foo = (
... "bar "
... "baz"
... )
Is becoming this:
>>> foo = ("bar " "baz")
'Black' is not considering this as an
issue (see https://github.com/psf/black/issues/1051),
so we are checking it here instead.
Parameters
----------
file_obj : IO
File-like object containing the Python code to validate.
Yields
------
line_number : int
Line number of unconcatenated string.
msg : str
Explenation of the error.
Notes
-----
GH #30454
"""
tokens: List = list(tokenize.generate_tokens(file_obj.readline))
for current_token, next_token in zip(tokens, tokens[1:]):
if current_token.type == next_token.type == token.STRING:
yield (
current_token.start[0],
(
"String unnecessarily split in two by black. "
"Please merge them manually."
),
)
def strings_with_wrong_placed_whitespace(
file_obj: IO[str],
) -> Iterable[Tuple[int, str]]:
"""
Test case for leading spaces in concated strings.
For example:
>>> rule = (
... "We want the space at the end of the line, "
... "not at the beginning"
... )
Instead of:
>>> rule = (
... "We want the space at the end of the line,"
... " not at the beginning"
... )
Parameters
----------
file_obj : IO
File-like object containing the Python code to validate.
Yields
------
line_number : int
Line number of unconcatenated string.
msg : str
Explenation of the error.
"""
def has_wrong_whitespace(first_line: str, second_line: str) -> bool:
"""
Checking if the two lines are mattching the unwanted pattern.
Parameters
----------
first_line : str
First line to check.
second_line : str
Second line to check.
Returns
-------
bool
True if the two recived string match, an unwanted pattern.
Notes
-----
The unwanted pattern that we are trying to catch is if the spaces in
a string that is concatenated over multiple lines are placed at the
end of each string, unless this string is ending with a
newline character (\n).
For example, this is bad:
>>> rule = (
... "We want the space at the end of the line,"
... " not at the beginning"
... )
And what we want is:
>>> rule = (
... "We want the space at the end of the line, "
... "not at the beginning"
... )
And if the string is ending with a new line character (\n) we
do not want any trailing whitespaces after it.
For example, this is bad:
>>> rule = (
... "We want the space at the begging of "
... "the line if the previous line is ending with a \n "
... "not at the end, like always"
... )
And what we do want is:
>>> rule = (
... "We want the space at the begging of "
... "the line if the previous line is ending with a \n"
... " not at the end, like always"
... )
"""
if first_line.endswith(r"\n"):
return False
elif first_line.startswith(" ") or second_line.startswith(" "):
return False
elif first_line.endswith(" ") or second_line.endswith(" "):
return False
elif (not first_line.endswith(" ")) and second_line.startswith(" "):
return True
return False
tokens: List = list(tokenize.generate_tokens(file_obj.readline))
for first_token, second_token, third_token in zip(tokens, tokens[1:], tokens[2:]):
# Checking if we are in a block of concated string
if (
first_token.type == third_token.type == token.STRING
and second_token.type == token.NL
):
# Striping the quotes, with the string litteral prefix
first_string: str = first_token.string[
_get_literal_string_prefix_len(first_token.string) + 1 : -1
]
second_string: str = third_token.string[
_get_literal_string_prefix_len(third_token.string) + 1 : -1
]
if has_wrong_whitespace(first_string, second_string):
yield (
third_token.start[0],
(
"String has a space at the beginning instead "
"of the end of the previous string."
),
)
def main(
function: Callable[[IO[str]], Iterable[Tuple[int, str]]],
source_path: str,
output_format: str,
file_extensions_to_check: str,
excluded_file_paths: str,
) -> bool:
"""
Main entry point of the script.
Parameters
----------
function : Callable
Function to execute for the specified validation type.
source_path : str
Source path representing path to a file/directory.
output_format : str
Output format of the error message.
file_extensions_to_check : str
Coma seperated values of what file extensions to check.
excluded_file_paths : str
Coma seperated values of what file paths to exclude during the check.
Returns
-------
bool
True if found any patterns are found related to the given function.
Raises
------
ValueError
If the `source_path` is not pointing to existing file/directory.
"""
if not os.path.exists(source_path):
raise ValueError("Please enter a valid path, pointing to a file/directory.")
is_failed: bool = False
file_path: str = ""
FILE_EXTENSIONS_TO_CHECK: FrozenSet[str] = frozenset(
file_extensions_to_check.split(",")
)
PATHS_TO_IGNORE = frozenset(excluded_file_paths.split(","))
if os.path.isfile(source_path):
file_path = source_path
with open(file_path, "r") as file_obj:
for line_number, msg in function(file_obj):
is_failed = True
print(
output_format.format(
source_path=file_path, line_number=line_number, msg=msg
)
)
for subdir, _, files in os.walk(source_path):
if any(path in subdir for path in PATHS_TO_IGNORE):
continue
for file_name in files:
if not any(
file_name.endswith(extension) for extension in FILE_EXTENSIONS_TO_CHECK
):
continue
file_path = os.path.join(subdir, file_name)
with open(file_path, "r") as file_obj:
for line_number, msg in function(file_obj):
is_failed = True
print(
output_format.format(
source_path=file_path, line_number=line_number, msg=msg
)
)
return is_failed
if __name__ == "__main__":
available_validation_types: List[str] = [
"bare_pytest_raises",
"private_function_across_module",
"strings_to_concatenate",
"strings_with_wrong_placed_whitespace",
]
parser = argparse.ArgumentParser(description="Unwanted patterns checker.")
parser.add_argument(
"path", nargs="?", default=".", help="Source path of file/directory to check."
)
parser.add_argument(
"--format",
"-f",
default="{source_path}:{line_number}:{msg}",
help="Output format of the error message.",
)
parser.add_argument(
"--validation-type",
"-vt",
choices=available_validation_types,
required=True,
help="Validation test case to check.",
)
parser.add_argument(
"--included-file-extensions",
default="py,pyx,pxd,pxi",
help="Coma seperated file extensions to check.",
)
parser.add_argument(
"--excluded-file-paths",
default="asv_bench/env",
help="Comma separated file extensions to check.",
)
args = parser.parse_args()
sys.exit(
main(
function=globals().get(args.validation_type), # type: ignore
source_path=args.path,
output_format=args.format,
file_extensions_to_check=args.included_file_extensions,
excluded_file_paths=args.excluded_file_paths,
)
)
|
the-stack_0_23208 | import argparse
from sklearn import datasets
from sklearn.model_selection import train_test_split
from sklearn.metrics import accuracy_score
from sklearn.svm import SVC
# import matplotlib.pyplot as plt
iris = datasets.load_iris()
digits = datasets.load_digits()
X_train, X_test, y_train, y_test = train_test_split(digits.data, digits.target, test_size=0.2, random_state=42)
parser = argparse.ArgumentParser()
parser.add_argument('--gamma', type=float, default=0.001)
parser.add_argument('--C', type=float, default=100.)
args=parser.parse_args()
def fit_predict(gamma, C):
clf=SVC(gamma=gamma, C=C)
clf.fit(X_train, y_train)
predictions=clf.predict(X_test)
return accuracy_score(y_test, predictions)
print(fit_predict(args.gamma, args.C))
# Grid search over gamma for future reference
# accuracies = []
# gamma = 0.00001
# C = 100.
# # for gamma in [10**(-n) for n in range(10)]:
# for C in range(1,100):
# accuracy = fit_predict(gamma, C)
# accuracies.append(accuracy)
# print(f'gamma={gamma}, C={C}, accuracy={accuracy}')
# import matplotlib.pyplot as plt
# plt.plot(accuracies)
# plt.show()
|
the-stack_0_23209 | import errno
from http import client
import io
import os
import array
import socket
import unittest
TestCase = unittest.TestCase
from test import support
here = os.path.dirname(__file__)
# Self-signed cert file for 'localhost'
CERT_localhost = os.path.join(here, 'keycert.pem')
# Self-signed cert file for 'fakehostname'
CERT_fakehostname = os.path.join(here, 'keycert2.pem')
# Root cert file (CA) for svn.python.org's cert
CACERT_svn_python_org = os.path.join(here, 'https_svn_python_org_root.pem')
# constants for testing chunked encoding
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd! \r\n'
'8\r\n'
'and now \r\n'
'22\r\n'
'for something completely different\r\n'
)
chunked_expected = b'hello world! and now for something completely different'
chunk_extension = ";foo=bar"
last_chunk = "0\r\n"
last_chunk_extended = "0" + chunk_extension + "\r\n"
trailers = "X-Dummy: foo\r\nX-Dumm2: bar\r\n"
chunked_end = "\r\n"
HOST = support.HOST
class FakeSocket:
def __init__(self, text, fileclass=io.BytesIO, host=None, port=None):
if isinstance(text, str):
text = text.encode("ascii")
self.text = text
self.fileclass = fileclass
self.data = b''
self.sendall_calls = 0
self.host = host
self.port = port
def sendall(self, data):
self.sendall_calls += 1
self.data += data
def makefile(self, mode, bufsize=None):
if mode != 'r' and mode != 'rb':
raise client.UnimplementedFileMode()
# keep the file around so we can check how much was read from it
self.file = self.fileclass(self.text)
self.file.close = lambda:None #nerf close ()
return self.file
def close(self):
pass
class EPipeSocket(FakeSocket):
def __init__(self, text, pipe_trigger):
# When sendall() is called with pipe_trigger, raise EPIPE.
FakeSocket.__init__(self, text)
self.pipe_trigger = pipe_trigger
def sendall(self, data):
if self.pipe_trigger in data:
raise OSError(errno.EPIPE, "gotcha")
self.data += data
def close(self):
pass
class NoEOFBytesIO(io.BytesIO):
"""Like BytesIO, but raises AssertionError on EOF.
This is used below to test that http.client doesn't try to read
more from the underlying file than it should.
"""
def read(self, n=-1):
data = io.BytesIO.read(self, n)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
def readline(self, length=None):
data = io.BytesIO.readline(self, length)
if data == b'':
raise AssertionError('caller tried to read past EOF')
return data
class HeaderTests(TestCase):
def test_auto_headers(self):
# Some headers are added automatically, but should not be added by
# .request() if they are explicitly set.
class HeaderCountingBuffer(list):
def __init__(self):
self.count = {}
def append(self, item):
kv = item.split(b':')
if len(kv) > 1:
# item is a 'Key: Value' header string
lcKey = kv[0].decode('ascii').lower()
self.count.setdefault(lcKey, 0)
self.count[lcKey] += 1
list.append(self, item)
for explicit_header in True, False:
for header in 'Content-length', 'Host', 'Accept-encoding':
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('blahblahblah')
conn._buffer = HeaderCountingBuffer()
body = 'spamspamspam'
headers = {}
if explicit_header:
headers[header] = str(len(body))
conn.request('POST', '/', body, headers)
self.assertEqual(conn._buffer.count[header.lower()], 1)
def test_content_length_0(self):
class ContentLengthChecker(list):
def __init__(self):
list.__init__(self)
self.content_length = None
def append(self, item):
kv = item.split(b':', 1)
if len(kv) > 1 and kv[0].lower() == b'content-length':
self.content_length = kv[1].strip()
list.append(self, item)
# POST with empty body
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request('POST', '/', '')
self.assertEqual(conn._buffer.content_length, b'0',
'Header Content-Length not set')
# PUT request with empty body
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn._buffer = ContentLengthChecker()
conn.request('PUT', '/', '')
self.assertEqual(conn._buffer.content_length, b'0',
'Header Content-Length not set')
def test_putheader(self):
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket(None)
conn.putrequest('GET','/')
conn.putheader('Content-length', 42)
self.assertIn(b'Content-length: 42', conn._buffer)
def test_ipv6host_header(self):
# Default host header on IPv6 transaction should wrapped by [] if
# its actual IPv6 address
expected = b'GET /foo HTTP/1.1\r\nHost: [2001::]:81\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001::]:81')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
expected = b'GET /foo HTTP/1.1\r\nHost: [2001:102A::]\r\n' \
b'Accept-Encoding: identity\r\n\r\n'
conn = client.HTTPConnection('[2001:102A::]')
sock = FakeSocket('')
conn.sock = sock
conn.request('GET', '/foo')
self.assertTrue(sock.data.startswith(expected))
class BasicTest(TestCase):
def test_status_lines(self):
# Test HTTP status lines
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(0), b'') # Issue #20007
self.assertFalse(resp.isclosed())
self.assertFalse(resp.closed)
self.assertEqual(resp.read(), b"Text")
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
body = "HTTP/1.1 400.100 Not Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
self.assertRaises(client.BadStatusLine, resp.begin)
def test_bad_status_repr(self):
exc = client.BadStatusLine('')
self.assertEqual(repr(exc), '''BadStatusLine("\'\'",)''')
def test_partial_reads(self):
# if we have a length, the system knows when to close itself
# same behaviour than when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos(self):
# if we have a length, the system knows when to close itself
# same behaviour than when we read the whole thing with read()
body = "HTTP/1.1 200 Ok\r\nContent-Length: 4\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_reads_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_partial_readintos_no_content_length(self):
# when no length is present, the socket should be gracefully closed when
# all data was read
body = "HTTP/1.1 200 Ok\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
def test_partial_reads_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(2), b'Te')
self.assertFalse(resp.isclosed())
self.assertEqual(resp.read(2), b'xt')
self.assertEqual(resp.read(1), b'')
self.assertTrue(resp.isclosed())
def test_partial_readintos_incomplete_body(self):
# if the server shuts down the connection before the whole
# content-length is delivered, the socket is gracefully closed
body = "HTTP/1.1 200 Ok\r\nContent-Length: 10\r\n\r\nText"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
b = bytearray(2)
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'Te')
self.assertFalse(resp.isclosed())
n = resp.readinto(b)
self.assertEqual(n, 2)
self.assertEqual(bytes(b), b'xt')
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:[email protected]"):
self.assertRaises(client.InvalidURL, client.HTTPConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:80", "www.python.org", 80),
("www.python.org:", "www.python.org", 80),
("www.python.org", "www.python.org", 80),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 80),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b", 80)):
c = client.HTTPConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
def test_response_headers(self):
# test response with multiple message headers with the same field name.
text = ('HTTP/1.1 200 OK\r\n'
'Set-Cookie: Customer="WILE_E_COYOTE"; '
'Version="1"; Path="/acme"\r\n'
'Set-Cookie: Part_Number="Rocket_Launcher_0001"; Version="1";'
' Path="/acme"\r\n'
'\r\n'
'No body\r\n')
hdr = ('Customer="WILE_E_COYOTE"; Version="1"; Path="/acme"'
', '
'Part_Number="Rocket_Launcher_0001"; Version="1"; Path="/acme"')
s = FakeSocket(text)
r = client.HTTPResponse(s)
r.begin()
cookies = r.getheader("Set-Cookie")
self.assertEqual(cookies, hdr)
def test_read_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
if resp.read():
self.fail("Did not expect response from HEAD request")
def test_readinto_head(self):
# Test that the library doesn't attempt to read any data
# from a HEAD request. (Tickles SF bug #622042.)
sock = FakeSocket(
'HTTP/1.1 200 OK\r\n'
'Content-Length: 14432\r\n'
'\r\n',
NoEOFBytesIO)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
if resp.readinto(b) != 0:
self.fail("Did not expect response from HEAD request")
self.assertEqual(bytes(b), b'\x00'*5)
def test_too_many_headers(self):
headers = '\r\n'.join('Header%d: foo' % i
for i in range(client._MAXHEADERS + 1)) + '\r\n'
text = ('HTTP/1.1 200 OK\r\n' + headers)
s = FakeSocket(text)
r = client.HTTPResponse(s)
self.assertRaisesRegex(client.HTTPException,
r"got more than \d+ headers", r.begin)
def test_send_file(self):
expected = (b'GET /foo HTTP/1.1\r\nHost: example.com\r\n'
b'Accept-Encoding: identity\r\nContent-Length:')
with open(__file__, 'rb') as body:
conn = client.HTTPConnection('example.com')
sock = FakeSocket(body)
conn.sock = sock
conn.request('GET', '/foo', body)
self.assertTrue(sock.data.startswith(expected), '%r != %r' %
(sock.data[:len(expected)], expected))
def test_send(self):
expected = b'this is a test this is only a test'
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
conn.send(expected)
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(array.array('b', expected))
self.assertEqual(expected, sock.data)
sock.data = b''
conn.send(io.BytesIO(expected))
self.assertEqual(expected, sock.data)
def test_send_updating_file(self):
def data():
yield 'data'
yield None
yield 'data_two'
class UpdatingFile():
mode = 'r'
d = data()
def read(self, blocksize=-1):
return self.d.__next__()
expected = b'data'
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.send(UpdatingFile())
self.assertEqual(sock.data, expected)
def test_send_iter(self):
expected = b'GET /foo HTTP/1.1\r\nHost: example.com\r\n' \
b'Accept-Encoding: identity\r\nContent-Length: 11\r\n' \
b'\r\nonetwothree'
def body():
yield b"one"
yield b"two"
yield b"three"
conn = client.HTTPConnection('example.com')
sock = FakeSocket("")
conn.sock = sock
conn.request('GET', '/foo', body(), {'Content-Length': '11'})
self.assertEqual(sock.data, expected)
def test_send_type_error(self):
# See: Issue #12676
conn = client.HTTPConnection('example.com')
conn.sock = FakeSocket('')
with self.assertRaises(TypeError):
conn.request('POST', 'test', conn)
def test_chunked(self):
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(n) + resp.read(n) + resp.read(), expected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_readinto_chunked(self):
expected = chunked_expected
nexpected = len(expected)
b = bytearray(128)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
n = resp.readinto(b)
self.assertEqual(b[:nexpected], expected)
self.assertEqual(n, nexpected)
resp.close()
# Various read sizes
for n in range(1, 12):
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
m = memoryview(b)
i = resp.readinto(m[0:n])
i += resp.readinto(m[i:n + i])
i += resp.readinto(m[i:])
self.assertEqual(b[:nexpected], expected)
self.assertEqual(i, nexpected)
resp.close()
for x in ('', 'foo\r\n'):
sock = FakeSocket(chunked_start + x)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
n = resp.readinto(b)
except client.IncompleteRead as i:
self.assertEqual(i.partial, expected)
expected_message = 'IncompleteRead(%d bytes read)' % len(expected)
self.assertEqual(repr(i), expected_message)
self.assertEqual(str(i), expected_message)
else:
self.fail('IncompleteRead expected')
finally:
resp.close()
def test_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_readinto_chunked_head(self):
chunked_start = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello world\r\n'
'1\r\n'
'd\r\n'
)
sock = FakeSocket(chunked_start + last_chunk + chunked_end)
resp = client.HTTPResponse(sock, method="HEAD")
resp.begin()
b = bytearray(5)
n = resp.readinto(b)
self.assertEqual(n, 0)
self.assertEqual(bytes(b), b'\x00'*5)
self.assertEqual(resp.status, 200)
self.assertEqual(resp.reason, 'OK')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_negative_content_length(self):
sock = FakeSocket(
'HTTP/1.1 200 OK\r\nContent-Length: -1\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), b'Hello\r\n')
self.assertTrue(resp.isclosed())
def test_incomplete_read(self):
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello\r\n')
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
try:
resp.read()
except client.IncompleteRead as i:
self.assertEqual(i.partial, b'Hello\r\n')
self.assertEqual(repr(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertEqual(str(i),
"IncompleteRead(7 bytes read, 3 more expected)")
self.assertTrue(resp.isclosed())
else:
self.fail('IncompleteRead expected')
def test_epipe(self):
sock = EPipeSocket(
"HTTP/1.0 401 Authorization Required\r\n"
"Content-type: text/html\r\n"
"WWW-Authenticate: Basic realm=\"example\"\r\n",
b"Content-Length")
conn = client.HTTPConnection("example.com")
conn.sock = sock
self.assertRaises(OSError,
lambda: conn.request("PUT", "/url", "body"))
resp = conn.getresponse()
self.assertEqual(401, resp.status)
self.assertEqual("Basic realm=\"example\"",
resp.getheader("www-authenticate"))
# Test lines overflowing the max line size (_MAXLINE in http.client)
def test_overflowing_status_line(self):
body = "HTTP/1.1 200 Ok" + "k" * 65536 + "\r\n"
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises((client.LineTooLong, client.BadStatusLine), resp.begin)
def test_overflowing_header_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'X-Foo: bar' + 'r' * 65536 + '\r\n\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
self.assertRaises(client.LineTooLong, resp.begin)
def test_overflowing_chunked_line(self):
body = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
+ '0' * 65536 + 'a\r\n'
'hello world\r\n'
'0\r\n'
'\r\n'
)
resp = client.HTTPResponse(FakeSocket(body))
resp.begin()
self.assertRaises(client.LineTooLong, resp.read)
def test_early_eof(self):
# Test httpresponse with no \r\n termination,
body = "HTTP/1.1 200 Ok"
sock = FakeSocket(body)
resp = client.HTTPResponse(sock)
resp.begin()
self.assertEqual(resp.read(), b'')
self.assertTrue(resp.isclosed())
self.assertFalse(resp.closed)
resp.close()
self.assertTrue(resp.closed)
def test_delayed_ack_opt(self):
# Test that Nagle/delayed_ack optimistaion works correctly.
# For small payloads, it should coalesce the body with
# headers, resulting in a single sendall() call
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
body = b'x' * (conn.mss - 1)
conn.request('POST', '/', body)
self.assertEqual(sock.sendall_calls, 1)
# For large payloads, it should send the headers and
# then the body, resulting in more than one sendall()
# call
conn = client.HTTPConnection('example.com')
sock = FakeSocket(None)
conn.sock = sock
body = b'x' * conn.mss
conn.request('POST', '/', body)
self.assertGreater(sock.sendall_calls, 1)
def test_chunked_extension(self):
extra = '3;foo=bar\r\n' + 'abc\r\n'
expected = chunked_expected + b'abc'
sock = FakeSocket(chunked_start + extra + last_chunk_extended + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_missing_end(self):
"""some servers may serve up a short chunked encoding stream"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk) #no terminating crlf
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
resp.close()
def test_chunked_trailers(self):
"""See that trailers are read and ignored"""
expected = chunked_expected
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# we should have reached the end of the file
self.assertEqual(sock.file.read(100), b"") #we read to the end
resp.close()
def test_chunked_sync(self):
"""Check that we don't read past the end of the chunked-encoding stream"""
expected = chunked_expected
extradata = "extradata"
sock = FakeSocket(chunked_start + last_chunk + trailers + chunked_end + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(100), extradata.encode("ascii")) #we read to the end
resp.close()
def test_content_length_sync(self):
"""Check that we don't read past the end of the Content-Length stream"""
extradata = "extradata"
expected = b"Hello123\r\n"
sock = FakeSocket('HTTP/1.1 200 OK\r\nContent-Length: 10\r\n\r\nHello123\r\n' + extradata)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
self.assertEqual(resp.read(), expected)
# the file should now have our extradata ready to be read
self.assertEqual(sock.file.read(100), extradata.encode("ascii")) #we read to the end
resp.close()
class ExtendedReadTest(TestCase):
"""
Test peek(), read1(), readline()
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'\r\n'
'hello world!\n'
'and now \n'
'for something completely different\n'
'foo'
)
lines_expected = lines[lines.find('hello'):].encode("ascii")
lines_chunked = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
def setUp(self):
sock = FakeSocket(self.lines)
resp = client.HTTPResponse(sock, method="GET")
resp.begin()
resp.fp = io.BufferedReader(resp.fp)
self.resp = resp
def test_peek(self):
resp = self.resp
# patch up the buffered peek so that it returns not too much stuff
oldpeek = resp.fp.peek
def mypeek(n=-1):
p = oldpeek(n)
if n >= 0:
return p[:n]
return p[:10]
resp.fp.peek = mypeek
all = []
while True:
# try a short peek
p = resp.peek(3)
if p:
self.assertGreater(len(p), 0)
# then unbounded peek
p2 = resp.peek()
self.assertGreaterEqual(len(p2), len(p))
self.assertTrue(p2.startswith(p))
next = resp.read(len(p2))
self.assertEqual(next, p2)
else:
next = resp.read()
self.assertFalse(next)
all.append(next)
if not next:
break
self.assertEqual(b"".join(all), self.lines_expected)
def test_readline(self):
resp = self.resp
self._verify_readline(self.resp.readline, self.lines_expected)
def _verify_readline(self, readline, expected):
all = []
while True:
# short readlines
line = readline(5)
if line and line != b"foo":
if len(line) < 5:
self.assertTrue(line.endswith(b"\n"))
all.append(line)
if not line:
break
self.assertEqual(b"".join(all), expected)
def test_read1(self):
resp = self.resp
def r():
res = resp.read1(4)
self.assertLessEqual(len(res), 4)
return res
readliner = Readliner(r)
self._verify_readline(readliner.readline, self.lines_expected)
def test_read1_unbounded(self):
resp = self.resp
all = []
while True:
data = resp.read1()
if not data:
break
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_bounded(self):
resp = self.resp
all = []
while True:
data = resp.read1(10)
if not data:
break
self.assertLessEqual(len(data), 10)
all.append(data)
self.assertEqual(b"".join(all), self.lines_expected)
def test_read1_0(self):
self.assertEqual(self.resp.read1(0), b"")
def test_peek_0(self):
p = self.resp.peek(0)
self.assertLessEqual(0, len(p))
class ExtendedReadTestChunked(ExtendedReadTest):
"""
Test peek(), read1(), readline() in chunked mode
"""
lines = (
'HTTP/1.1 200 OK\r\n'
'Transfer-Encoding: chunked\r\n\r\n'
'a\r\n'
'hello worl\r\n'
'3\r\n'
'd!\n\r\n'
'9\r\n'
'and now \n\r\n'
'23\r\n'
'for something completely different\n\r\n'
'3\r\n'
'foo\r\n'
'0\r\n' # terminating chunk
'\r\n' # end of trailers
)
class Readliner:
"""
a simple readline class that uses an arbitrary read function and buffering
"""
def __init__(self, readfunc):
self.readfunc = readfunc
self.remainder = b""
def readline(self, limit):
data = []
datalen = 0
read = self.remainder
try:
while True:
idx = read.find(b'\n')
if idx != -1:
break
if datalen + len(read) >= limit:
idx = limit - datalen - 1
# read more data
data.append(read)
read = self.readfunc()
if not read:
idx = 0 #eof condition
break
idx += 1
data.append(read[:idx])
self.remainder = read[idx:]
return b"".join(data)
except:
self.remainder = b"".join(data)
raise
class OfflineTest(TestCase):
def test_responses(self):
self.assertEqual(client.responses[client.NOT_FOUND], "Not Found")
class SourceAddressTest(TestCase):
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
self.port = support.bind_port(self.serv)
self.source_port = support.find_unused_port()
self.serv.listen(5)
self.conn = None
def tearDown(self):
if self.conn:
self.conn.close()
self.conn = None
self.serv.close()
self.serv = None
def testHTTPConnectionSourceAddress(self):
self.conn = client.HTTPConnection(HOST, self.port,
source_address=('', self.source_port))
self.conn.connect()
self.assertEqual(self.conn.sock.getsockname()[1], self.source_port)
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not defined')
def testHTTPSConnectionSourceAddress(self):
self.conn = client.HTTPSConnection(HOST, self.port,
source_address=('', self.source_port))
# We don't test anything here other the constructor not barfing as
# this code doesn't deal with setting up an active running SSL server
# for an ssl_wrapped connect() to actually return from.
class TimeoutTest(TestCase):
PORT = None
def setUp(self):
self.serv = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
TimeoutTest.PORT = support.bind_port(self.serv)
self.serv.listen(5)
def tearDown(self):
self.serv.close()
self.serv = None
def testTimeoutAttribute(self):
# This will prove that the timeout gets through HTTPConnection
# and into the socket.
# default -- use global socket timeout
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
# no timeout -- do not use global socket default
self.assertIsNone(socket.getdefaulttimeout())
socket.setdefaulttimeout(30)
try:
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT,
timeout=None)
httpConn.connect()
finally:
socket.setdefaulttimeout(None)
self.assertEqual(httpConn.sock.gettimeout(), None)
httpConn.close()
# a value
httpConn = client.HTTPConnection(HOST, TimeoutTest.PORT, timeout=30)
httpConn.connect()
self.assertEqual(httpConn.sock.gettimeout(), 30)
httpConn.close()
class HTTPSTest(TestCase):
def setUp(self):
if not hasattr(client, 'HTTPSConnection'):
self.skipTest('ssl support required')
def make_server(self, certfile):
from test.ssl_servers import make_https_server
return make_https_server(self, certfile=certfile)
def test_attributes(self):
# simple test to check it's storing the timeout
h = client.HTTPSConnection(HOST, TimeoutTest.PORT, timeout=30)
self.assertEqual(h.timeout, 30)
def _check_svn_python_org(self, resp):
# Just a simple check that everything went fine
server_string = resp.getheader('server')
self.assertIn('Apache', server_string)
def test_networked(self):
# Default settings: no cert verification is done
support.requires('network')
with support.transient_internet('svn.python.org'):
h = client.HTTPSConnection('svn.python.org', 443)
h.request('GET', '/')
resp = h.getresponse()
self._check_svn_python_org(resp)
def test_networked_good_cert(self):
# We feed a CA cert that validates the server's cert
import ssl
support.requires('network')
with support.transient_internet('svn.python.org'):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CACERT_svn_python_org)
h = client.HTTPSConnection('svn.python.org', 443, context=context)
h.request('GET', '/')
resp = h.getresponse()
self._check_svn_python_org(resp)
def test_networked_bad_cert(self):
# We feed a "CA" cert that is unrelated to the server's cert
import ssl
support.requires('network')
with support.transient_internet('svn.python.org'):
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('svn.python.org', 443, context=context)
with self.assertRaises(ssl.SSLError):
h.request('GET', '/')
def test_local_good_hostname(self):
# The (valid) cert validates the HTTP hostname
import ssl
server = self.make_server(CERT_localhost)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_localhost)
h = client.HTTPSConnection('localhost', server.port, context=context)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
del server
def test_local_bad_hostname(self):
# The (valid) cert doesn't validate the HTTP hostname
import ssl
server = self.make_server(CERT_fakehostname)
context = ssl.SSLContext(ssl.PROTOCOL_TLSv1)
context.verify_mode = ssl.CERT_REQUIRED
context.load_verify_locations(CERT_fakehostname)
h = client.HTTPSConnection('localhost', server.port, context=context)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# Same with explicit check_hostname=True
h = client.HTTPSConnection('localhost', server.port, context=context,
check_hostname=True)
with self.assertRaises(ssl.CertificateError):
h.request('GET', '/')
# With check_hostname=False, the mismatching is ignored
h = client.HTTPSConnection('localhost', server.port, context=context,
check_hostname=False)
h.request('GET', '/nonexistent')
resp = h.getresponse()
self.assertEqual(resp.status, 404)
del server
@unittest.skipIf(not hasattr(client, 'HTTPSConnection'),
'http.client.HTTPSConnection not available')
def test_host_port(self):
# Check invalid host_port
for hp in ("www.python.org:abc", "user:[email protected]"):
self.assertRaises(client.InvalidURL, client.HTTPSConnection, hp)
for hp, h, p in (("[fe80::207:e9ff:fe9b]:8000",
"fe80::207:e9ff:fe9b", 8000),
("www.python.org:443", "www.python.org", 443),
("www.python.org:", "www.python.org", 443),
("www.python.org", "www.python.org", 443),
("[fe80::207:e9ff:fe9b]", "fe80::207:e9ff:fe9b", 443),
("[fe80::207:e9ff:fe9b]:", "fe80::207:e9ff:fe9b",
443)):
c = client.HTTPSConnection(hp)
self.assertEqual(h, c.host)
self.assertEqual(p, c.port)
class RequestBodyTest(TestCase):
"""Test cases where a request includes a message body."""
def setUp(self):
self.conn = client.HTTPConnection('example.com')
self.conn.sock = self.sock = FakeSocket("")
self.conn.sock = self.sock
def get_headers_and_fp(self):
f = io.BytesIO(self.sock.data)
f.readline() # read the request line
message = client.parse_headers(f)
return message, f
def test_manual_content_length(self):
# Set an incorrect content-length so that we can verify that
# it will not be over-ridden by the library.
self.conn.request("PUT", "/url", "body",
{"Content-Length": "42"})
message, f = self.get_headers_and_fp()
self.assertEqual("42", message.get("content-length"))
self.assertEqual(4, len(f.read()))
def test_ascii_body(self):
self.conn.request("PUT", "/url", "body")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_latin1_body(self):
self.conn.request("PUT", "/url", "body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_bytes_body(self):
self.conn.request("PUT", "/url", b"body\xc1")
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
def test_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "w") as f:
f.write("body")
with open(support.TESTFN) as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("4", message.get("content-length"))
self.assertEqual(b'body', f.read())
def test_binary_file_body(self):
self.addCleanup(support.unlink, support.TESTFN)
with open(support.TESTFN, "wb") as f:
f.write(b"body\xc1")
with open(support.TESTFN, "rb") as f:
self.conn.request("PUT", "/url", f)
message, f = self.get_headers_and_fp()
self.assertEqual("text/plain", message.get_content_type())
self.assertIsNone(message.get_charset())
self.assertEqual("5", message.get("content-length"))
self.assertEqual(b'body\xc1', f.read())
class HTTPResponseTest(TestCase):
def setUp(self):
body = "HTTP/1.1 200 Ok\r\nMy-Header: first-value\r\nMy-Header: \
second-value\r\n\r\nText"
sock = FakeSocket(body)
self.resp = client.HTTPResponse(sock)
self.resp.begin()
def test_getting_header(self):
header = self.resp.getheader('My-Header')
self.assertEqual(header, 'first-value, second-value')
header = self.resp.getheader('My-Header', 'some default')
self.assertEqual(header, 'first-value, second-value')
def test_getting_nonexistent_header_with_string_default(self):
header = self.resp.getheader('No-Such-Header', 'default-value')
self.assertEqual(header, 'default-value')
def test_getting_nonexistent_header_with_iterable_default(self):
header = self.resp.getheader('No-Such-Header', ['default', 'values'])
self.assertEqual(header, 'default, values')
header = self.resp.getheader('No-Such-Header', ('default', 'values'))
self.assertEqual(header, 'default, values')
def test_getting_nonexistent_header_without_default(self):
header = self.resp.getheader('No-Such-Header')
self.assertEqual(header, None)
def test_getting_header_defaultint(self):
header = self.resp.getheader('No-Such-Header',default=42)
self.assertEqual(header, 42)
class TunnelTests(TestCase):
def test_connect(self):
response_text = (
'HTTP/1.0 200 OK\r\n\r\n' # Reply to CONNECT
'HTTP/1.1 200 OK\r\n' # Reply to HEAD
'Content-Length: 42\r\n\r\n'
)
def create_connection(address, timeout=None, source_address=None):
return FakeSocket(response_text, host=address[0],
port=address[1])
conn = client.HTTPConnection('proxy.com')
conn._create_connection = create_connection
# Once connected, we shouldn't be able to tunnel anymore
conn.connect()
self.assertRaises(RuntimeError, conn.set_tunnel,
'destination.com')
# But if we close the connection, we're good
conn.close()
conn.set_tunnel('destination.com')
conn.request('HEAD', '/', '')
self.assertEqual(conn.sock.host, 'proxy.com')
self.assertEqual(conn.sock.port, 80)
self.assertTrue(b'CONNECT destination.com' in conn.sock.data)
self.assertTrue(b'Host: destination.com' in conn.sock.data)
# This test should be removed when CONNECT gets the HTTP/1.1 blessing
self.assertTrue(b'Host: proxy.com' not in conn.sock.data)
conn.close()
conn.request('PUT', '/', '')
self.assertEqual(conn.sock.host, 'proxy.com')
self.assertEqual(conn.sock.port, 80)
self.assertTrue(b'CONNECT destination.com' in conn.sock.data)
self.assertTrue(b'Host: destination.com' in conn.sock.data)
def test_main(verbose=None):
support.run_unittest(HeaderTests, OfflineTest, BasicTest, TimeoutTest,
HTTPSTest, RequestBodyTest, SourceAddressTest,
HTTPResponseTest, ExtendedReadTest,
ExtendedReadTestChunked, TunnelTests)
if __name__ == '__main__':
test_main()
|
the-stack_0_23211 | import sys
from gtts import gTTS # Importamos el módulo de TTS
from pygame import mixer # Importamos el módulo para reproducir sonido
# Funcion playTexto() crea el audio TTS y lo reproduce
# params:
# - texto -> Texto a pasar a TTS
# - lenguage -> El idioma en el que quieres que se cree el TTS
# - nombre -> Nombre del archivo de salida
#
def playTexto(texto, language = 'es', nombre = 'output'):
# Generamos el objeto del TTS pasandole los parámetros y desactivando el modo lento
myobj = gTTS(text=texto, lang=language, slow=False)
# Obtenemos el nombre del archivo de salida
filename = nombre+".mp3"
filename = str(filename)
# Guardamos el archivo generado del TTS
myobj.save(filename)
# Iniciamos el motor de audio de pygame
mixer.pre_init()
mixer.init()
mixer.music.load(filename) # Cargamos la cancion
mixer.music.play()
#time.sleep(2)
while(mixer.music.get_busy()): # Mientras se este reproduciendo nos quedamos en el while
continue
mixer.music.stop() # Cerramos
exit() # Salimos
# The text that you want to convert to audio
playTexto(str(sys.argv[1]),nombre = 'final')
|
the-stack_0_23212 | # 通过32位色图来生成地图信息文件
from PIL import Image
from sys import argv
from random import choice
import json
MU_GRID_OBJTYPE_NONE = (0, 0, 0, 255) # 黑色标定空地区
MU_GRID_OBJTYPE_DARK = (200, 0, 255, 255) # 紫色标定地图界线
MU_GRID_OBJTYPE_EMPTY = (255, 255, 255, 255) # 白色标定空地块
MU_GRID_OBJTYPE_SNAKEHEAD = (0, 255, 0, 255) # 绿色标定蛇头
MU_GRID_OBJTYPE_SNAKEBODY = (255, 255, 0, 255) # 黄色标定蛇身
MU_GRID_OBJTYPE_BLOCK = (0, 0, 255, 255) # 蓝色标定障碍物
MU_GRID_OBJTYPE_FOOD = (255, 200, 0, 255) # 橙色标定食物
MU_SNAKE_DIRECTION_UP = 0
MU_SNAKE_DIRECTION_RIGHT = 1
MU_SNAKE_DIRECTION_DOWN = 2
MU_SNAKE_DIRECTION_LEFT = 3
def sum38(iterseq, start=0):
s = start
for i in iterseq:
s += i
return s
def select(objtype):
return sum38([[(i[0], j[0]) for j in enumerate(i[1]) if j[1] == objtype]
for i in enumerate(pixels)], start=[])
def sortSnake(snake, direct):
so = (snake[0], snake[1]-1)
if direct != MU_SNAKE_DIRECTION_UP and so in snakes:
return [so] + sortSnake(so, MU_SNAKE_DIRECTION_DOWN)
so = (snake[0]+1, snake[1])
if direct != MU_SNAKE_DIRECTION_RIGHT and so in snakes:
return [so] + sortSnake(so, MU_SNAKE_DIRECTION_LEFT)
so = (snake[0], snake[1]+1)
if direct != MU_SNAKE_DIRECTION_DOWN and so in snakes:
return [so] + sortSnake(so, MU_SNAKE_DIRECTION_UP)
so = (snake[0]-1, snake[1])
if direct != MU_SNAKE_DIRECTION_LEFT and so in snakes:
return [so] + sortSnake(so, MU_SNAKE_DIRECTION_RIGHT)
return []
if __name__ == '__main__':
global pixels, snakes
img = Image.open(argv[1])
encoder = json.JSONEncoder()
mapmu = {}
pixels = [[img.getpixel((i, j)) for j in range(64)] for i in range(64)]
snakes = select(MU_GRID_OBJTYPE_SNAKEBODY)
path = '\\'.join(argv[0].split('\\')[:-1])
ofo = open(path+'\\map.mu', 'w')
mapmu['none'] = [{'x':i[0], 'y':i[1]} for i in select(MU_GRID_OBJTYPE_NONE)]
mapmu['darkPos'] = [{'x':i[0], 'y':i[1]} for i in select(MU_GRID_OBJTYPE_DARK)]
mapmu['food'] = [{'x':i[0], 'y':i[1]} for i in select(MU_GRID_OBJTYPE_FOOD)]
mapmu['blockPos'] = [{'x':i[0], 'y':i[1]} for i in select(MU_GRID_OBJTYPE_BLOCK)]
mapmu['snakePos'] = [{'x':i[0], 'y':i[1]} for i in sortSnake(choice(select(MU_GRID_OBJTYPE_SNAKEHEAD)), -1)]
ofo.write(encoder.encode(mapmu))
ofo.close()
|
the-stack_0_23213 | from ophyd import (EpicsSignal, EpicsSignalRO)
from ophyd import (Device, Component as Cpt)
import hxntools.handlers
from hxntools.detectors import (HxnTimepixDetector as _HTD,
HxnMerlinDetector as _HMD,
BeamStatusDetector, HxnMercuryDetector,
HxnDexelaDetector)
from hxntools.detectors.merlin import HDF5PluginWithFileStore as _mhdf
from hxntools.detectors.timepix import HDF5PluginWithFileStore as _thdf
from hxntools.detectors.zebra import HxnZebra
# - 2D pixel array detectors
# -- Timepix 1
class HxnTimepixDetector(_HTD):
hdf5 = Cpt(_thdf, 'HDF1:',
read_attrs=[],
configuration_attrs=[],
write_path_template='/data/%Y/%m/%d/',
root='/data',
fs=db.fs)
timepix1 = HxnTimepixDetector('XF:03IDC-ES{Tpx:1}', name='timepix1',
image_name='timepix1',
read_attrs=['hdf5', 'cam','stats1'])
timepix1.hdf5.read_attrs = []
# -- Timepix 2
#timepix2 = HxnTimepixDetector('XF:03IDC-ES{Tpx:2}', name='timepix2',
# image_name='timepix2',
# read_attrs=['hdf5', 'cam'])
#timepix2.hdf5.read_attrs = []
# -- Merlin 1
class HxnMerlinDetector(_HMD):
hdf5 = Cpt(_mhdf, 'HDF1:',
read_attrs=[],
configuration_attrs=[],
write_path_template='/data/%Y/%m/%d/',
root='/data',
fs=db.fs)
merlin1 = HxnMerlinDetector('XF:03IDC-ES{Merlin:1}', name='merlin1',
image_name='merlin1',
read_attrs=['hdf5', 'cam', 'stats1'])
merlin1.hdf5.read_attrs = []
merlin2 = HxnMerlinDetector('XF:03IDC-ES{Merlin:2}', name='merlin2',
image_name='merlin2',
read_attrs=['hdf5', 'cam', 'stats1'])
merlin2.hdf5.read_attrs = []
# -- Dexela 1 (Dexela 1512 GigE-V24)
#class HxnDexelaDetector(_HMD):
# hdf5 = Cpt(_mhdf, 'HDF1:',
# read_attrs=[],
# configuration_attrs=[],
# write_path_template='c:\Dexela\Yijin_YBCO_2018Q1',
# root='c:',
# fs=db.fs)
dexela1 = HxnDexelaDetector('XF:03IDC-ES{Dexela:1}', name='dexela1',
image_name='dexela1',
read_attrs=['hdf5', 'cam','stats1'])
dexela1.hdf5.read_attrs = []
# - Other detectors and triggering devices
# -- DXP Mercury (1 channel)
mercury1 = HxnMercuryDetector('XF:03IDC-ES{DXP:1}', name='mercury1')
mercury1.read_attrs = ['dxp', 'mca']
mercury1.dxp.read_attrs = []
# -- Quantum Detectors Zebra
zebra = HxnZebra('XF:03IDC-ES{Zeb:1}:', name='zebra')
zebra.read_attrs = []
# -- Lakeshores
class HxnLakeShore(Device):
ch_a = Cpt(EpicsSignalRO, '-Ch:A}C:T-I')
ch_b = Cpt(EpicsSignalRO, '-Ch:B}C:T-I')
ch_c = Cpt(EpicsSignalRO, '-Ch:C}C:T-I')
ch_d = Cpt(EpicsSignalRO, '-Ch:D}C:T-I')
def set_names(self, cha, chb, chc, chd):
'''Set names of all channels
Returns channel signals
'''
self.ch_a.name = cha
self.ch_b.name = chb
self.ch_c.name = chc
self.ch_d.name = chd
return self.ch_a, self.ch_b, self.ch_c, self.ch_d
lakeshore2 = HxnLakeShore('XF:03IDC-ES{LS:2', name='lakeshore2')
# Name the lakeshore channels:
t_hlens, t_vlens, t_sample, t_base = lakeshore2.set_names(
't_hlens', 't_vlens', 't_sample', 't_base')
# X-ray eye camera sigma X/sigma Y
sigx = EpicsSignalRO('XF:03IDB-BI{Xeye-CAM:1}Stats1:SigmaX_RBV', name='sigx')
sigy = EpicsSignalRO('XF:03IDB-BI{Xeye-CAM:1}Stats1:SigmaY_RBV', name='sigy')
# Front-end Xray BPMs and local bumps
class HxnBpm(Device):
x = Cpt(EpicsSignalRO, 'Pos:X-I')
y = Cpt(EpicsSignalRO, 'Pos:Y-I')
xbpm = HxnBpm('SR:C03-BI{XBPM:1}', name='xbpm')
angle_x = EpicsSignalRO('SR:C31-{AI}Aie3:Angle-x-Cal', name='angle_x')
angle_y = EpicsSignalRO('SR:C31-{AI}Aie3:Angle-y-Cal', name='angle_y')
# Diamond Quad BPMs in C hutch
quad = HxnBpm('XF:03ID{XBPM:17}', name='quad')
sr_shutter_status = EpicsSignalRO('SR-EPS{PLC:1}Sts:MstrSh-Sts',
name='sr_shutter_status')
sr_beam_current = EpicsSignalRO('SR:C03-BI{DCCT:1}I:Real-I',
name='sr_beam_current')
det_beamstatus = BeamStatusDetector(min_current=100.0, name='det_beamstatus')
#Temporary EPICS PV detectors
#dexela_roi1_tot = EpicsSignalRO('XF:03IDC-ES{Dexela:1}Stats1:Total_RBV', name='dexela_roi1_tot')
#roi1_tot = EpicsSignalRO('XF:03IDC-ES{Merlin:1}Stats1:Total_RBV', name='roi1_tot')
#roi1_tot = roi1_tot.value
|
the-stack_0_23215 | """
FILENAME: processor01.py (/helical-project/structures/)
PROJECT: HELiCAL
AUTHOR: Aakash "Kash" Sudhakar
DESCRIPTION: First algorithm structure for HELiCAL project data processing.
Processor 01 enables frequency counting of nucleotides in DNA.
DATE CREATED: Wednesday, March 20, 2019
DATE LAST MODIFIED: Wednesday, March 20, 2019
"""
from collections import OrderedDict
class Processor01_NucleotideCounter(object):
""" Object structure containing logic for DNA Nucleotide Counts processing algorithm. """
def __init__(self, data):
self.dataset = data
def nucleotide_counter(self):
""" Method to calculate frequency distribution of base occurrences in input data sequence. """
dictogram, PERMITTED_NUCLEOTIDES = dict(), ["A", "C", "G", "T"]
# Builds dictionary-structured histogram of nucleotide frequencies while checking for appropriate permitted nucleotides
for nucleotide in self.dataset:
if nucleotide in PERMITTED_NUCLEOTIDES:
if nucleotide not in dictogram:
dictogram[nucleotide] = 1
else:
dictogram[nucleotide] += 1
continue
# Creates ordered dictionary by key alphabetization and returns values in-line
return OrderedDict(sorted(dictogram.items(), key=lambda X: X[0]))
def render_response(self, response):
""" Method to render stylized response text to user. """
return " ".join(["{}".format(value) for key, value in response.items()]) |
the-stack_0_23216 | '''
@author: Monkey Coders
@version: 1
Este prototipo en Python con estandar MVC, filtra y procesa los datos para poder ser exportado a otras plataformas.
Condiciones:
2010 - Actualidad
'''
import csv
import re
class CsvScanner:
# Extrae los datos minados de inegi sobre la poblacion 2010
def leer_poblacion_2010(self, filename):
datos = []
with open(filename, encoding="utf8") as csvfile:
csvFileReader = csv.reader(csvfile)
for i, row in enumerate(csvfile):
if i >= 4:
datos_entidad = row.split(";")
datos_entidad.pop(1)
datos_entidad[0] = datos_entidad[0].replace('"',"")
datos_entidad[1] = datos_entidad[1].replace('"',"")
datos_entidad[1] = datos_entidad[1].replace("\n","")
datos.append(datos_entidad)
print("[✔] Poblacion del 2010 minada. Fuente: INEGI")
return datos
# Extrae los datos minados de inegi sobre la natalidad 2011 - 2017
def leer_natalidad_2010_2017(self, filename):
natalidad_2010_2017 = []
natalidad_ordenada = []
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
for i, row in enumerate(csvfile):
if i >= 4:
natalidad_2010_2017.append(row.split(";"))
# AQUI ELIMINAR EL ULTIMO ELEMENTO DE LA LISTA
for i, elemento in enumerate(natalidad_2010_2017):
ultimo_elemento = natalidad_2010_2017[i][8].replace("\n","")
natalidad_ordenada.append({"2010": natalidad_2010_2017[i][1], "2011": natalidad_2010_2017[i][2], "2012": natalidad_2010_2017[i][3], "2013": natalidad_2010_2017[i][4], "2014": natalidad_2010_2017[i][5], "2015": natalidad_2010_2017[i][6], "2016":natalidad_2010_2017[i][7], "2017": ultimo_elemento})
print("[✔] Natalidad del 2010 - 2017 minada. Fuente: INEGI")
return natalidad_ordenada
# Extrae los datos minados de inegi sobre la mortalidad 2011 - 2017
def leer_mortalidad_2010_2017(self, filename):
mortalidad_2010_2017 = []
mortalidad_ordenada = []
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
for i, row in enumerate(csvfile):
if i >= 4:
mortalidad_2010_2017.append(row.split(";"))
# AQUI ELIMINAR EL ULTIMO ELEMENTO DE LA LISTA
for i, elemento in enumerate(mortalidad_2010_2017):
ultimo_elemento = mortalidad_2010_2017[i][8].replace("\n","")
mortalidad_ordenada.append({"2010": mortalidad_2010_2017[i][1],"2011": mortalidad_2010_2017[i][2], "2012": mortalidad_2010_2017[i][3], "2013": mortalidad_2010_2017[i][4], "2014": mortalidad_2010_2017[i][5], "2015": mortalidad_2010_2017[i][6], "2016":mortalidad_2010_2017[i][7], "2017": ultimo_elemento})
print("[✔] Mortalidad del 2010 - 2017 minada. Fuente: INEGI")
return mortalidad_ordenada
# Extrae los datos minados de CONAPO, sobre la mortalidad 2011 - 2017
def leer_poblacion_2018_2019(self, filename):
poblacion_2018 = []
poblacion_2019 = []
poblacion_2018_2019 = []
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
for row in csvfile:
if(row.split(",")[1] == '2018' and row.split(",")[2] != 'República Mexicana'):
poblacion_2018.append(row.split(",")[6])
if(row.split(",")[1] == '2019' and row.split(",")[2] != 'República Mexicana'):
poblacion_2019.append(row.split(",")[6])
poblacion_2018_2019 = [poblacion_2018, poblacion_2019]
print("[✔] Poblacion 2018-2019 minados. Fuente: CONAPO")
return poblacion_2018_2019
def leer_patentes_2010_2018(self, filename):
patentes_2010_2018 = []
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
for i, row in enumerate(csvfile):
if i >= 1:
patentes_2010_2018.append(row.split(","))
print("[✔] Patentes por entidad federativa 2010 - 2018 minados. Fuente: IMPI")
return patentes_2010_2018
def leer_unidades_economicas_2013_2018(self, filename):
unidades_economicas_2013_2018 = []
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
for i, row in enumerate(csvfile):
if i >= 1:
unidades_economicas_2013_2018.append(row.split(',"'))
print("[✔] Unidades economicas por entidad federativa 2013 - 2018 minados. Fuente: DENUE")
return unidades_economicas_2013_2018
def leer_turistas_2010_2018(self, filename):
turistas_2010_2018 = []
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
for i, row in enumerate(csvfile):
if i >= 1:
turistas_2010_2018.append(row.split(',"'))
print("[✔] Turistas por entidad federativa 2010 - 2018 minados. Fuente: SECTUR")
return turistas_2010_2018
def leer_pib_mexico_1993_2018(self, filename):
pib_mexico_1993_2018 = []
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
for row in csvfile:
pib_mexico_1993_2018.append(row.split(","))
print("[✔] PIB de Mexico 1993 - 2018 minados. Fuente: INEGI")
return pib_mexico_1993_2018
def leer_actividades_economicas_entidades_2010_2017(self, filenames):
valores_entidades_2010_2017 = []
for entidad in filenames:
pib_entidades_2010_2017 = []
with open(entidad, encoding="utf8") as csvfile:
csvFileReader = csv.reader(csvfile)
for i, row in enumerate(csvfile):
datos_limpio = row.split(",")
condicion_comun = datos_limpio[0].split("|")
titulo = condicion_comun[4: len(condicion_comun)]
titulo_str = str(titulo)
vector_titulo = titulo_str.split(",")
conjunto_de_datos = []
if len(condicion_comun) > 4:
if len(vector_titulo) > 1:
titulo_str = str(vector_titulo[1])
#Limpieza de caracteres especiales
titulo_str = titulo_str.replace("[","")
titulo_str = titulo_str.replace("'","")
titulo_str = titulo_str.replace("<C1>","")
titulo_str = titulo_str.replace("]","")
titulo_str = titulo_str.replace("-","")
titulo_str = ''.join([i for i in titulo_str if not i.isdigit()])
datos_limpio[0] = titulo_str
contador = 0
datos_ordenados = []
for elemento in datos_limpio:
if contador != 0:
try:
float(elemento)
datos_ordenados.append(elemento.replace("\n",""))
except ValueError:
continue
else:
datos_ordenados.append(elemento)
contador += 1
pib_entidades_2010_2017.append(datos_ordenados)
if i == 38:
break # rompe hasta los datos que queremos
valores_entidades_2010_2017.append(pib_entidades_2010_2017)
print("[✔] PIB por entidades 2010 - 2017 minados. Fuente: INEGI")
return valores_entidades_2010_2017
def leer_exportaciones_entidades_2010_2018(self, filename):
exportaciones_entidades_2010_2018 = []
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
for i, row in enumerate(csvfile):
if i >= 1:
row = row.replace(" ","")
row = row.replace("\n","")
row = row.replace("'", "")
exportaciones_entidades_2010_2018.append(row.split(","))
print("[✔] Exportaciones por entidades 2010 - 2018 minados. Fuente: INEGI")
return exportaciones_entidades_2010_2018
def leer_promedio_actividad_trimestral_2010_2017(self, filename):
actividad_trimestral_2010_2017 = []
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
for i, row in enumerate(csvfile):
if i >= 1:
row = row.replace("\n", "")
actividad_trimestral_2010_2017.append(row.split(",")[1: len(row)])
print("[✔] Promedio de los indicadores trimestrales por entidades 2010 - 2017 minados. Fuente: ITAEE/INEGI")
return actividad_trimestral_2010_2017
def leer_consumo_electrico_municipios_2010_2017(self, filename):
consumo_electrico_municipios_2010_2017 = []
with open(filename, 'r') as csvfile:
csvFileReader = csv.reader(csvfile)
for i, row in enumerate(csvfile):
if row.find("TOTAL") !=-1 and row.find("N a c i o n a l") == -1:
consumo_electrico_municipios_2010_2017.append(row)
print("[✔] Consumo electrico por municipio 2010 - 2017 minados. Fuente: CFE")
return consumo_electrico_municipios_2010_2017 |
the-stack_0_23218 | import importlib.util
import os
import inspect
from abc import ABC, abstractmethod
from typing import List
from datetime import date, datetime
from .note import Note
from .config import Config
substitutions = {
'[[DATE]]': date.today().strftime('%d.%m.%Y'),
'[[TIME]]': datetime.now().strftime('%H:%M'),
}
def substitute_placeholders(s: str) -> str:
for key, val in substitutions.items():
s = s.replace(key, val)
return s
class Template(ABC):
cfg = None
@classmethod
def set_config(cls, cfg: Config):
cls.cfg = cfg
@abstractmethod
def get_name(self) -> str:
raise NotImplementedError
@abstractmethod
def get_tags(self) -> List[str]:
raise NotImplementedError
@abstractmethod
def get_content(self) -> str:
raise NotImplementedError
def to_note(self) -> Note:
name = substitute_placeholders(self.get_name())
tags = list(set(map(substitute_placeholders, self.get_tags())))
content = substitute_placeholders(self.get_content())
return Note(name=name, tags=tags, content=content)
def template_by_name(name: str, cfg: Config) -> Template:
path = cfg.template_path
if os.path.isfile(path):
spec = importlib.util.spec_from_file_location('user_templates', path)
foo = importlib.util.module_from_spec(spec)
spec.loader.exec_module(foo)
clsmembers = inspect.getmembers(foo, inspect.isclass)
try:
_, cls = next(
filter(
lambda x: x[0].lower() == name.lower() and issubclass(
x[1], Template), clsmembers))
except StopIteration:
raise ValueError(f'{name} is not a valid template name')
Template.set_config(cfg)
return cls()
else:
raise ValueError('No template file exists')
|
the-stack_0_23220 | import numpy as np
import pylab as plt
import skfmm
X, Y = np.meshgrid(np.linspace(-1,1,201), np.linspace(-1,1,201))
phi = -1*np.ones_like(X)
phi[X>-0.5] = 1
phi[np.logical_and(np.abs(Y)<0.25, X>-0.75)] = 1
plt.contour(X, Y, phi,[0], linewidths=(3), colors='black')
plt.title('Boundary location: the zero contour of phi')
# plt.savefig('2d_phi.png')
plt.show()
d = skfmm.distance(phi, dx=1e-2)
plt.title('Distance from the boundary')
plt.contour(X, Y, phi,[0], linewidths=(3), colors='black')
plt.contour(X, Y, d, 15)
plt.colorbar()
# plt.savefig('2d_phi_distance.png')
plt.show()
speed = np.ones_like(X)
speed[Y>0] = 1.5
t = skfmm.travel_time(phi, speed, dx=1e-2)
plt.title('Travel time from the boundary')
plt.contour(X, Y, phi,[0], linewidths=(3), colors='black')
plt.contour(X, Y, t, 15)
plt.colorbar()
# plt.savefig('2d_phi_travel_time.png')
plt.show()
mask = np.logical_and(abs(X)<0.1, abs(Y)<0.5)
phi = np.ma.MaskedArray(phi, mask)
t = skfmm.travel_time(phi, speed, dx=1e-2)
plt.title('Travel time from the boundary with an obstacle')
plt.contour(X, Y, phi, [0], linewidths=(3), colors='black')
plt.contour(X, Y, phi.mask, [0], linewidths=(3), colors='red')
plt.contour(X, Y, t, 15)
plt.colorbar()
# plt.savefig('2d_phi_travel_time_mask.png')
plt.show()
phi = -1 * np.ones_like(X)
phi[X > -0.5] = 1
phi[np.logical_and(np.abs(Y) < 0.25, X > -0.75)] = 1
d = skfmm.distance(phi, dx=1e-2, narrow=0.3)
plt.title('Distance calculation limited to narrow band')
plt.contour(X, Y, phi, [0], linewidths=(3), colors='black')
plt.contour(X, Y, d, 15)
plt.colorbar()
# plt.savefig('2d_phi_distance_narrow.png')
plt.show() |
the-stack_0_23221 | #!/usr/bin/env python
#
# Electrum - lightweight Bitcoin client
# Copyright (C) 2012 thomasv@gitorious
#
# Permission is hereby granted, free of charge, to any person
# obtaining a copy of this software and associated documentation files
# (the "Software"), to deal in the Software without restriction,
# including without limitation the rights to use, copy, modify, merge,
# publish, distribute, sublicense, and/or sell copies of the Software,
# and to permit persons to whom the Software is furnished to do so,
# subject to the following conditions:
#
# The above copyright notice and this permission notice shall be
# included in all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
# EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
# MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
# NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS
# BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN
# ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN
# CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import sys
import time
import threading
import os
import traceback
import json
import shutil
import weakref
import webbrowser
import csv
from decimal import Decimal
import base64
from functools import partial
import queue
import asyncio
from PyQt5.QtGui import *
from PyQt5.QtCore import *
import PyQt5.QtCore as QtCore
from PyQt5.QtWidgets import *
import electrum
from electrum import (keystore, simple_config, ecc, constants, util, bitcoin, commands,
coinchooser, paymentrequest)
from electrum.bitcoin import COIN, is_address, TYPE_ADDRESS
from electrum.plugin import run_hook
from electrum.i18n import _
from electrum.util import (format_time, format_satoshis, format_fee_satoshis,
format_satoshis_plain, NotEnoughFunds, PrintError,
UserCancelled, NoDynamicFeeEstimates, profiler,
export_meta, import_meta, bh2u, bfh, InvalidPassword,
base_units, base_units_list, base_unit_name_to_decimal_point,
decimal_point_to_base_unit_name, quantize_feerate,
UnknownBaseUnit, DECIMAL_POINT_DEFAULT, UserFacingException,
get_new_wallet_name, send_exception_to_crash_reporter)
from electrum.transaction import Transaction, TxOutput
from electrum.address_synchronizer import AddTransactionException
from electrum.wallet import (Multisig_Wallet, CannotBumpFee, Abstract_Wallet,
sweep_preparations, InternalAddressCorruption)
from electrum.version import ELECTRUM_VERSION
from electrum.network import Network, TxBroadcastError, BestEffortRequestFailed
from electrum.exchange_rate import FxThread
from electrum.simple_config import SimpleConfig
from .exception_window import Exception_Hook
from .amountedit import AmountEdit, BTCAmountEdit, MyLineEdit, FeerateEdit
from .qrcodewidget import QRCodeWidget, QRDialog
from .qrtextedit import ShowQRTextEdit, ScanQRTextEdit
from .transaction_dialog import show_transaction
from .fee_slider import FeeSlider
from .util import *
from .installwizard import WIF_HELP_TEXT
from .history_list import HistoryList, HistoryModel
class StatusBarButton(QPushButton):
def __init__(self, icon, tooltip, func):
QPushButton.__init__(self, icon, '')
self.setToolTip(tooltip)
self.setFlat(True)
self.setMaximumWidth(25)
self.clicked.connect(self.onPress)
self.func = func
self.setIconSize(QSize(25,25))
self.setCursor(QCursor(Qt.PointingHandCursor))
def onPress(self, checked=False):
'''Drops the unwanted PyQt5 "checked" argument'''
self.func()
def keyPressEvent(self, e):
if e.key() == Qt.Key_Return:
self.func()
from electrum.paymentrequest import PR_PAID
class ElectrumWindow(QMainWindow, MessageBoxMixin, PrintError):
payment_request_ok_signal = pyqtSignal()
payment_request_error_signal = pyqtSignal()
new_fx_quotes_signal = pyqtSignal()
new_fx_history_signal = pyqtSignal()
network_signal = pyqtSignal(str, object)
alias_received_signal = pyqtSignal()
computing_privkeys_signal = pyqtSignal()
show_privkeys_signal = pyqtSignal()
def __init__(self, gui_object, wallet: Abstract_Wallet):
QMainWindow.__init__(self)
self.gui_object = gui_object
self.config = config = gui_object.config # type: SimpleConfig
self.gui_thread = gui_object.gui_thread
self.setup_exception_hook()
self.network = gui_object.daemon.network # type: Network
assert wallet, "no wallet"
self.wallet = wallet
self.fx = gui_object.daemon.fx # type: FxThread
self.invoices = wallet.invoices
self.contacts = wallet.contacts
self.tray = gui_object.tray
self.app = gui_object.app
self.cleaned_up = False
self.is_max = False
self.payment_request = None
self.checking_accounts = False
self.qr_window = None
self.not_enough_funds = False
self.pluginsdialog = None
self.require_fee_update = False
self.tl_windows = []
self.tx_external_keypairs = {}
self.tx_notification_queue = queue.Queue()
self.tx_notification_last_time = 0
self.create_status_bar()
self.need_update = threading.Event()
self.decimal_point = config.get('decimal_point', DECIMAL_POINT_DEFAULT)
try:
decimal_point_to_base_unit_name(self.decimal_point)
except UnknownBaseUnit:
self.decimal_point = DECIMAL_POINT_DEFAULT
self.num_zeros = int(config.get('num_zeros', 0))
self.completions = QStringListModel()
self.tabs = tabs = QTabWidget(self)
self.send_tab = self.create_send_tab()
self.receive_tab = self.create_receive_tab()
self.addresses_tab = self.create_addresses_tab()
self.utxo_tab = self.create_utxo_tab()
self.console_tab = self.create_console_tab()
self.contacts_tab = self.create_contacts_tab()
tabs.addTab(self.create_history_tab(), QIcon(":icons/tab_history.png"), _('History'))
tabs.addTab(self.send_tab, QIcon(":icons/tab_send.png"), _('Send'))
tabs.addTab(self.receive_tab, QIcon(":icons/tab_receive.png"), _('Receive'))
def add_optional_tab(tabs, tab, icon, description, name):
tab.tab_icon = icon
tab.tab_description = description
tab.tab_pos = len(tabs)
tab.tab_name = name
if self.config.get('show_{}_tab'.format(name), False):
tabs.addTab(tab, icon, description.replace("&", ""))
add_optional_tab(tabs, self.addresses_tab, QIcon(":icons/tab_addresses.png"), _("&Addresses"), "addresses")
add_optional_tab(tabs, self.utxo_tab, QIcon(":icons/tab_coins.png"), _("Co&ins"), "utxo")
add_optional_tab(tabs, self.contacts_tab, QIcon(":icons/tab_contacts.png"), _("Con&tacts"), "contacts")
add_optional_tab(tabs, self.console_tab, QIcon(":icons/tab_console.png"), _("Con&sole"), "console")
tabs.setSizePolicy(QSizePolicy.Expanding, QSizePolicy.Expanding)
self.setCentralWidget(tabs)
if self.config.get("is_maximized"):
self.showMaximized()
self.setWindowIcon(QIcon(":icons/electrum.png"))
self.init_menubar()
wrtabs = weakref.proxy(tabs)
QShortcut(QKeySequence("Ctrl+W"), self, self.close)
QShortcut(QKeySequence("Ctrl+Q"), self, self.close)
QShortcut(QKeySequence("Ctrl+R"), self, self.update_wallet)
QShortcut(QKeySequence("F5"), self, self.update_wallet)
QShortcut(QKeySequence("Ctrl+PgUp"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() - 1)%wrtabs.count()))
QShortcut(QKeySequence("Ctrl+PgDown"), self, lambda: wrtabs.setCurrentIndex((wrtabs.currentIndex() + 1)%wrtabs.count()))
for i in range(wrtabs.count()):
QShortcut(QKeySequence("Alt+" + str(i + 1)), self, lambda i=i: wrtabs.setCurrentIndex(i))
self.payment_request_ok_signal.connect(self.payment_request_ok)
self.payment_request_error_signal.connect(self.payment_request_error)
self.history_list.setFocus(True)
# network callbacks
if self.network:
self.network_signal.connect(self.on_network_qt)
interests = ['wallet_updated', 'network_updated', 'blockchain_updated',
'new_transaction', 'status',
'banner', 'verified', 'fee', 'fee_histogram']
# To avoid leaking references to "self" that prevent the
# window from being GC-ed when closed, callbacks should be
# methods of this class only, and specifically not be
# partials, lambdas or methods of subobjects. Hence...
self.network.register_callback(self.on_network, interests)
# set initial message
self.console.showMessage(self.network.banner)
self.network.register_callback(self.on_quotes, ['on_quotes'])
self.network.register_callback(self.on_history, ['on_history'])
self.new_fx_quotes_signal.connect(self.on_fx_quotes)
self.new_fx_history_signal.connect(self.on_fx_history)
# update fee slider in case we missed the callback
self.fee_slider.update()
self.load_wallet(wallet)
gui_object.timer.timeout.connect(self.timer_actions)
self.fetch_alias()
def on_history(self, b):
self.wallet.clear_coin_price_cache()
self.new_fx_history_signal.emit()
def setup_exception_hook(self):
Exception_Hook(self)
def on_fx_history(self):
self.history_model.refresh('fx_history')
self.address_list.update()
def on_quotes(self, b):
self.new_fx_quotes_signal.emit()
def on_fx_quotes(self):
self.update_status()
# Refresh edits with the new rate
edit = self.fiat_send_e if self.fiat_send_e.is_last_edited else self.amount_e
edit.textEdited.emit(edit.text())
edit = self.fiat_receive_e if self.fiat_receive_e.is_last_edited else self.receive_amount_e
edit.textEdited.emit(edit.text())
# History tab needs updating if it used spot
if self.fx.history_used_spot:
self.history_model.refresh('fx_quotes')
self.address_list.update()
def toggle_tab(self, tab):
show = not self.config.get('show_{}_tab'.format(tab.tab_name), False)
self.config.set_key('show_{}_tab'.format(tab.tab_name), show)
item_text = (_("Hide {}") if show else _("Show {}")).format(tab.tab_description)
tab.menu_action.setText(item_text)
if show:
# Find out where to place the tab
index = len(self.tabs)
for i in range(len(self.tabs)):
try:
if tab.tab_pos < self.tabs.widget(i).tab_pos:
index = i
break
except AttributeError:
pass
self.tabs.insertTab(index, tab, tab.tab_icon, tab.tab_description.replace("&", ""))
else:
i = self.tabs.indexOf(tab)
self.tabs.removeTab(i)
def push_top_level_window(self, window):
'''Used for e.g. tx dialog box to ensure new dialogs are appropriately
parented. This used to be done by explicitly providing the parent
window, but that isn't something hardware wallet prompts know.'''
self.tl_windows.append(window)
def pop_top_level_window(self, window):
self.tl_windows.remove(window)
def top_level_window(self, test_func=None):
'''Do the right thing in the presence of tx dialog windows'''
override = self.tl_windows[-1] if self.tl_windows else None
if override and test_func and not test_func(override):
override = None # only override if ok for test_func
return self.top_level_window_recurse(override, test_func)
def diagnostic_name(self):
return "%s/%s" % (PrintError.diagnostic_name(self),
self.wallet.basename() if self.wallet else "None")
def is_hidden(self):
return self.isMinimized() or self.isHidden()
def show_or_hide(self):
if self.is_hidden():
self.bring_to_top()
else:
self.hide()
def bring_to_top(self):
self.show()
self.raise_()
def on_error(self, exc_info):
e = exc_info[1]
if isinstance(e, UserCancelled):
pass
elif isinstance(e, UserFacingException):
self.show_error(str(e))
else:
try:
traceback.print_exception(*exc_info)
except OSError:
pass # see #4418
self.show_error(str(e))
def on_network(self, event, *args):
if event == 'wallet_updated':
wallet = args[0]
if wallet == self.wallet:
self.need_update.set()
elif event == 'network_updated':
self.gui_object.network_updated_signal_obj.network_updated_signal \
.emit(event, args)
self.network_signal.emit('status', None)
elif event == 'blockchain_updated':
# to update number of confirmations in history
self.need_update.set()
elif event == 'new_transaction':
wallet, tx = args
if wallet == self.wallet:
self.tx_notification_queue.put(tx)
elif event in ['status', 'banner', 'verified', 'fee', 'fee_histogram']:
# Handle in GUI thread
self.network_signal.emit(event, args)
else:
self.print_error("unexpected network message:", event, args)
def on_network_qt(self, event, args=None):
# Handle a network message in the GUI thread
if event == 'status':
self.update_status()
elif event == 'banner':
self.console.showMessage(args[0])
elif event == 'verified':
wallet, tx_hash, tx_mined_status = args
if wallet == self.wallet:
self.history_model.update_tx_mined_status(tx_hash, tx_mined_status)
elif event == 'fee':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
elif event == 'fee_histogram':
if self.config.is_dynfee():
self.fee_slider.update()
self.do_update_fee()
self.history_model.on_fee_histogram()
else:
self.print_error("unexpected network_qt signal:", event, args)
def fetch_alias(self):
self.alias_info = None
alias = self.config.get('alias')
if alias:
alias = str(alias)
def f():
self.alias_info = self.contacts.resolve_openalias(alias)
self.alias_received_signal.emit()
t = threading.Thread(target=f)
t.setDaemon(True)
t.start()
def close_wallet(self):
if self.wallet:
self.print_error('close_wallet', self.wallet.storage.path)
run_hook('close_wallet', self.wallet)
@profiler
def load_wallet(self, wallet):
wallet.thread = TaskThread(self, self.on_error)
self.update_recently_visited(wallet.storage.path)
self.need_update.set()
# Once GUI has been initialized check if we want to announce something since the callback has been called before the GUI was initialized
# update menus
self.seed_menu.setEnabled(self.wallet.has_seed())
self.update_lock_icon()
self.update_buttons_on_seed()
self.update_console()
self.clear_receive_tab()
self.request_list.update()
self.tabs.show()
self.init_geometry()
if self.config.get('hide_gui') and self.gui_object.tray.isVisible():
self.hide()
else:
self.show()
self.watching_only_changed()
run_hook('load_wallet', wallet, self)
try:
wallet.try_detecting_internal_addresses_corruption()
except InternalAddressCorruption as e:
self.show_error(str(e))
send_exception_to_crash_reporter(e)
def init_geometry(self):
winpos = self.wallet.storage.get("winpos-qt")
try:
screen = self.app.desktop().screenGeometry()
assert screen.contains(QRect(*winpos))
self.setGeometry(*winpos)
except:
self.print_error("using default geometry")
self.setGeometry(100, 100, 840, 400)
def watching_only_changed(self):
name = "Electrum Testnet" if constants.net.TESTNET else "Electrum"
title = '%s %s - %s' % (name, ELECTRUM_VERSION,
self.wallet.basename())
extra = [self.wallet.storage.get('wallet_type', '?')]
if self.wallet.is_watching_only():
extra.append(_('watching only'))
title += ' [%s]'% ', '.join(extra)
self.setWindowTitle(title)
self.password_menu.setEnabled(self.wallet.may_have_password())
self.import_privkey_menu.setVisible(self.wallet.can_import_privkey())
self.import_address_menu.setVisible(self.wallet.can_import_address())
self.export_menu.setEnabled(self.wallet.can_export())
def warn_if_watching_only(self):
if self.wallet.is_watching_only():
msg = ' '.join([
_("This wallet is watching-only."),
_("This means you will not be able to spend Bitcoins with it."),
_("Make sure you own the seed phrase or the private keys, before you request Bitcoins to be sent to this wallet.")
])
self.show_warning(msg, title=_('Information'))
def open_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename, __ = QFileDialog.getOpenFileName(self, "Select your wallet file", wallet_folder)
if not filename:
return
self.gui_object.new_window(filename)
def backup_wallet(self):
path = self.wallet.storage.path
wallet_folder = os.path.dirname(path)
filename, __ = QFileDialog.getSaveFileName(self, _('Enter a filename for the copy of your wallet'), wallet_folder)
if not filename:
return
new_path = os.path.join(wallet_folder, filename)
if new_path != path:
try:
shutil.copy2(path, new_path)
self.show_message(_("A copy of your wallet file was created in")+" '%s'" % str(new_path), title=_("Wallet backup created"))
except BaseException as reason:
self.show_critical(_("Electrum was unable to copy your wallet file to the specified location.") + "\n" + str(reason), title=_("Unable to create backup"))
def update_recently_visited(self, filename):
recent = self.config.get('recently_open', [])
try:
sorted(recent)
except:
recent = []
if filename in recent:
recent.remove(filename)
recent.insert(0, filename)
recent = [path for path in recent if os.path.exists(path)]
recent = recent[:5]
self.config.set_key('recently_open', recent)
self.recently_visited_menu.clear()
for i, k in enumerate(sorted(recent)):
b = os.path.basename(k)
def loader(k):
return lambda: self.gui_object.new_window(k)
self.recently_visited_menu.addAction(b, loader(k)).setShortcut(QKeySequence("Ctrl+%d"%(i+1)))
self.recently_visited_menu.setEnabled(len(recent))
def get_wallet_folder(self):
return os.path.dirname(os.path.abspath(self.config.get_wallet_path()))
def new_wallet(self):
try:
wallet_folder = self.get_wallet_folder()
except FileNotFoundError as e:
self.show_error(str(e))
return
filename = get_new_wallet_name(wallet_folder)
full_path = os.path.join(wallet_folder, filename)
self.gui_object.start_new_window(full_path, None)
def init_menubar(self):
menubar = QMenuBar()
file_menu = menubar.addMenu(_("&File"))
self.recently_visited_menu = file_menu.addMenu(_("&Recently open"))
file_menu.addAction(_("&Open"), self.open_wallet).setShortcut(QKeySequence.Open)
file_menu.addAction(_("&New/Restore"), self.new_wallet).setShortcut(QKeySequence.New)
file_menu.addAction(_("&Save Copy"), self.backup_wallet).setShortcut(QKeySequence.SaveAs)
file_menu.addAction(_("Delete"), self.remove_wallet)
file_menu.addSeparator()
file_menu.addAction(_("&Quit"), self.close)
wallet_menu = menubar.addMenu(_("&Wallet"))
wallet_menu.addAction(_("&Information"), self.show_master_public_keys)
wallet_menu.addSeparator()
self.password_menu = wallet_menu.addAction(_("&Password"), self.change_password_dialog)
self.seed_menu = wallet_menu.addAction(_("&Seed"), self.show_seed_dialog)
self.private_keys_menu = wallet_menu.addMenu(_("&Private keys"))
self.private_keys_menu.addAction(_("&Sweep"), self.sweep_key_dialog)
self.import_privkey_menu = self.private_keys_menu.addAction(_("&Import"), self.do_import_privkey)
self.export_menu = self.private_keys_menu.addAction(_("&Export"), self.export_privkeys_dialog)
self.import_address_menu = wallet_menu.addAction(_("Import addresses"), self.import_addresses)
wallet_menu.addSeparator()
addresses_menu = wallet_menu.addMenu(_("&Addresses"))
addresses_menu.addAction(_("&Filter"), lambda: self.address_list.toggle_toolbar(self.config))
labels_menu = wallet_menu.addMenu(_("&Labels"))
labels_menu.addAction(_("&Import"), self.do_import_labels)
labels_menu.addAction(_("&Export"), self.do_export_labels)
history_menu = wallet_menu.addMenu(_("&History"))
history_menu.addAction(_("&Filter"), lambda: self.history_list.toggle_toolbar(self.config))
history_menu.addAction(_("&Summary"), self.history_list.show_summary)
history_menu.addAction(_("&Plot"), self.history_list.plot_history_dialog)
history_menu.addAction(_("&Export"), self.history_list.export_history_dialog)
contacts_menu = wallet_menu.addMenu(_("Contacts"))
contacts_menu.addAction(_("&New"), self.new_contact_dialog)
contacts_menu.addAction(_("Import"), lambda: self.contact_list.import_contacts())
contacts_menu.addAction(_("Export"), lambda: self.contact_list.export_contacts())
invoices_menu = wallet_menu.addMenu(_("Invoices"))
invoices_menu.addAction(_("Import"), lambda: self.invoice_list.import_invoices())
invoices_menu.addAction(_("Export"), lambda: self.invoice_list.export_invoices())
wallet_menu.addSeparator()
wallet_menu.addAction(_("Find"), self.toggle_search).setShortcut(QKeySequence("Ctrl+F"))
def add_toggle_action(view_menu, tab):
is_shown = self.config.get('show_{}_tab'.format(tab.tab_name), False)
item_name = (_("Hide") if is_shown else _("Show")) + " " + tab.tab_description
tab.menu_action = view_menu.addAction(item_name, lambda: self.toggle_tab(tab))
view_menu = menubar.addMenu(_("&View"))
add_toggle_action(view_menu, self.addresses_tab)
add_toggle_action(view_menu, self.utxo_tab)
add_toggle_action(view_menu, self.contacts_tab)
add_toggle_action(view_menu, self.console_tab)
tools_menu = menubar.addMenu(_("&Tools"))
# Settings / Preferences are all reserved keywords in macOS using this as work around
tools_menu.addAction(_("Electrum preferences") if sys.platform == 'darwin' else _("Preferences"), self.settings_dialog)
tools_menu.addAction(_("&Network"), lambda: self.gui_object.show_network_dialog(self))
tools_menu.addAction(_("&Plugins"), self.plugins_dialog)
tools_menu.addSeparator()
tools_menu.addAction(_("&Sign/verify message"), self.sign_verify_message)
tools_menu.addAction(_("&Encrypt/decrypt message"), self.encrypt_message)
tools_menu.addSeparator()
paytomany_menu = tools_menu.addAction(_("&Pay to many"), self.paytomany)
raw_transaction_menu = tools_menu.addMenu(_("&Load transaction"))
raw_transaction_menu.addAction(_("&From file"), self.do_process_from_file)
raw_transaction_menu.addAction(_("&From text"), self.do_process_from_text)
raw_transaction_menu.addAction(_("&From the blockchain"), self.do_process_from_txid)
raw_transaction_menu.addAction(_("&From QR code"), self.read_tx_from_qrcode)
self.raw_transaction_menu = raw_transaction_menu
run_hook('init_menubar_tools', self, tools_menu)
help_menu = menubar.addMenu(_("&Help"))
help_menu.addAction(_("&About"), self.show_about)
help_menu.addAction(_("&Official website"), lambda: webbrowser.open("https://electrum.org"))
help_menu.addSeparator()
help_menu.addAction(_("&Documentation"), lambda: webbrowser.open("http://docs.electrum.org/")).setShortcut(QKeySequence.HelpContents)
help_menu.addAction(_("&Report Bug"), self.show_report_bug)
help_menu.addSeparator()
help_menu.addAction(_("&Donate to server"), self.donate_to_server)
self.setMenuBar(menubar)
def donate_to_server(self):
d = self.network.get_donation_address()
if d:
host = self.network.get_parameters().host
self.pay_to_URI('bitcoin:%s?message=donation for %s'%(d, host))
else:
self.show_error(_('No donation address for this server'))
def show_about(self):
QMessageBox.about(self, "Electrum",
(_("Version")+" %s" % ELECTRUM_VERSION + "\n\n" +
_("Electrum's focus is speed, with low resource usage and simplifying Bitcoin.") + " " +
_("You do not need to perform regular backups, because your wallet can be "
"recovered from a secret phrase that you can memorize or write on paper.") + " " +
_("Startup times are instant because it operates in conjunction with high-performance "
"servers that handle the most complicated parts of the Bitcoin system.") + "\n\n" +
_("Uses icons from the Icons8 icon pack (icons8.com).")))
def show_report_bug(self):
msg = ' '.join([
_("Please report any bugs as issues on github:<br/>"),
"<a href=\"https://github.com/spesmilo/electrum/issues\">https://github.com/spesmilo/electrum/issues</a><br/><br/>",
_("Before reporting a bug, upgrade to the most recent version of Electrum (latest release or git HEAD), and include the version number in your report."),
_("Try to explain not only what the bug is, but how it occurs.")
])
self.show_message(msg, title="Electrum - " + _("Reporting Bugs"), rich_text=True)
def notify_transactions(self):
if self.tx_notification_queue.qsize() == 0:
return
if not self.wallet.up_to_date:
return # no notifications while syncing
now = time.time()
rate_limit = 20 # seconds
if self.tx_notification_last_time + rate_limit > now:
return
self.tx_notification_last_time = now
self.print_error("Notifying GUI about new transactions")
txns = []
while True:
try:
txns.append(self.tx_notification_queue.get_nowait())
except queue.Empty:
break
# Combine the transactions if there are at least three
if len(txns) >= 3:
total_amount = 0
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
total_amount += v
self.notify(_("{} new transactions received: Total amount received in the new transactions {}")
.format(len(txns), self.format_amount_and_units(total_amount)))
else:
for tx in txns:
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if is_relevant:
self.notify(_("New transaction received: {}").format(self.format_amount_and_units(v)))
def notify(self, message):
if self.tray:
try:
# this requires Qt 5.9
self.tray.showMessage("Electrum", message, QIcon(":icons/electrum_dark_icon"), 20000)
except TypeError:
self.tray.showMessage("Electrum", message, QSystemTrayIcon.Information, 20000)
# custom wrappers for getOpenFileName and getSaveFileName, that remember the path selected by the user
def getOpenFileName(self, title, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
fileName, __ = QFileDialog.getOpenFileName(self, title, directory, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def getSaveFileName(self, title, filename, filter = ""):
directory = self.config.get('io_dir', os.path.expanduser('~'))
path = os.path.join( directory, filename )
fileName, __ = QFileDialog.getSaveFileName(self, title, path, filter)
if fileName and directory != os.path.dirname(fileName):
self.config.set_key('io_dir', os.path.dirname(fileName), True)
return fileName
def timer_actions(self):
# Note this runs in the GUI thread
if self.need_update.is_set():
self.need_update.clear()
self.update_wallet()
# resolve aliases
# FIXME this is a blocking network call that has a timeout of 5 sec
self.payto_e.resolve()
# update fee
if self.require_fee_update:
self.do_update_fee()
self.require_fee_update = False
self.notify_transactions()
def format_amount(self, x, is_diff=False, whitespaces=False):
return format_satoshis(x, self.num_zeros, self.decimal_point, is_diff=is_diff, whitespaces=whitespaces)
def format_amount_and_units(self, amount):
text = self.format_amount(amount) + ' '+ self.base_unit()
x = self.fx.format_amount_and_units(amount) if self.fx else None
if text and x:
text += ' (%s)'%x
return text
def format_fee_rate(self, fee_rate):
return format_fee_satoshis(fee_rate/1000, num_zeros=self.num_zeros) + ' sat/byte'
def get_decimal_point(self):
return self.decimal_point
def base_unit(self):
return decimal_point_to_base_unit_name(self.decimal_point)
def connect_fields(self, window, btc_e, fiat_e, fee_e):
def edit_changed(edit):
if edit.follows:
return
edit.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
fiat_e.is_last_edited = (edit == fiat_e)
amount = edit.get_amount()
rate = self.fx.exchange_rate() if self.fx else Decimal('NaN')
if rate.is_nan() or amount is None:
if edit is fiat_e:
btc_e.setText("")
if fee_e:
fee_e.setText("")
else:
fiat_e.setText("")
else:
if edit is fiat_e:
btc_e.follows = True
btc_e.setAmount(int(amount / Decimal(rate) * COIN))
btc_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
btc_e.follows = False
if fee_e:
window.update_fee()
else:
fiat_e.follows = True
fiat_e.setText(self.fx.ccy_amount_str(
amount * Decimal(rate) / COIN, False))
fiat_e.setStyleSheet(ColorScheme.BLUE.as_stylesheet())
fiat_e.follows = False
btc_e.follows = False
fiat_e.follows = False
fiat_e.textChanged.connect(partial(edit_changed, fiat_e))
btc_e.textChanged.connect(partial(edit_changed, btc_e))
fiat_e.is_last_edited = False
def update_status(self):
if not self.wallet:
return
if self.network is None:
text = _("Offline")
icon = QIcon(":icons/status_disconnected.png")
elif self.network.is_connected():
server_height = self.network.get_server_height()
server_lag = self.network.get_local_height() - server_height
fork_str = "_fork" if len(self.network.get_blockchains())>1 else ""
# Server height can be 0 after switching to a new server
# until we get a headers subscription request response.
# Display the synchronizing message in that case.
if not self.wallet.up_to_date or server_height == 0:
text = _("Synchronizing...")
icon = QIcon(":icons/status_waiting.png")
elif server_lag > 1:
text = _("Server is lagging ({} blocks)").format(server_lag)
icon = QIcon(":icons/status_lagging%s.png"%fork_str)
else:
c, u, x = self.wallet.get_balance()
text = _("Balance" ) + ": %s "%(self.format_amount_and_units(c))
if u:
text += " [%s unconfirmed]"%(self.format_amount(u, is_diff=True).strip())
if x:
text += " [%s unmatured]"%(self.format_amount(x, is_diff=True).strip())
# append fiat balance and price
if self.fx.is_enabled():
text += self.fx.get_fiat_status_text(c + u + x,
self.base_unit(), self.get_decimal_point()) or ''
if not self.network.proxy:
icon = QIcon(":icons/status_connected%s.png"%fork_str)
else:
icon = QIcon(":icons/status_connected_proxy%s.png"%fork_str)
else:
if self.network.proxy:
text = "{} ({})".format(_("Not connected"), _("proxy enabled"))
else:
text = _("Not connected")
icon = QIcon(":icons/status_disconnected.png")
self.tray.setToolTip("%s (%s)" % (text, self.wallet.basename()))
self.balance_label.setText(text)
self.status_button.setIcon( icon )
def update_wallet(self):
self.update_status()
if self.wallet.up_to_date or not self.network or not self.network.is_connected():
self.update_tabs()
def update_tabs(self, wallet=None):
if wallet is None:
wallet = self.wallet
if wallet != self.wallet:
return
self.history_model.refresh('update_tabs')
self.request_list.update()
self.address_list.update()
self.utxo_list.update()
self.contact_list.update()
self.invoice_list.update()
self.update_completions()
def create_history_tab(self):
self.history_model = HistoryModel(self)
self.history_list = l = HistoryList(self, self.history_model)
self.history_model.set_view(self.history_list)
l.searchable_list = l
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_history', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def show_address(self, addr):
from . import address_dialog
d = address_dialog.AddressDialog(self, addr)
d.exec_()
def show_transaction(self, tx, tx_desc = None):
'''tx_desc is set only for txs created in the Send tab'''
show_transaction(tx, self, tx_desc)
def create_receive_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.receive_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
self.receive_address_e = ButtonsLineEdit()
self.receive_address_e.addCopyButton(self.app)
self.receive_address_e.setReadOnly(True)
msg = _('Bitcoin address where the payment should be received. Note that each payment request uses a different Bitcoin address.')
self.receive_address_label = HelpLabel(_('Receiving address'), msg)
self.receive_address_e.textChanged.connect(self.update_receive_qr)
self.receive_address_e.setFocusPolicy(Qt.ClickFocus)
grid.addWidget(self.receive_address_label, 0, 0)
grid.addWidget(self.receive_address_e, 0, 1, 1, -1)
self.receive_message_e = QLineEdit()
grid.addWidget(QLabel(_('Description')), 1, 0)
grid.addWidget(self.receive_message_e, 1, 1, 1, -1)
self.receive_message_e.textChanged.connect(self.update_receive_qr)
self.receive_amount_e = BTCAmountEdit(self.get_decimal_point)
grid.addWidget(QLabel(_('Requested amount')), 2, 0)
grid.addWidget(self.receive_amount_e, 2, 1)
self.receive_amount_e.textChanged.connect(self.update_receive_qr)
self.fiat_receive_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_receive_e.setVisible(False)
grid.addWidget(self.fiat_receive_e, 2, 2, Qt.AlignLeft)
self.connect_fields(self, self.receive_amount_e, self.fiat_receive_e, None)
self.expires_combo = QComboBox()
self.expires_combo.addItems([i[0] for i in expiration_values])
self.expires_combo.setCurrentIndex(3)
self.expires_combo.setFixedWidth(self.receive_amount_e.width())
msg = ' '.join([
_('Expiration date of your request.'),
_('This information is seen by the recipient if you send them a signed payment request.'),
_('Expired requests have to be deleted manually from your list, in order to free the corresponding Bitcoin addresses.'),
_('The bitcoin address never expires and will always be part of this electrum wallet.'),
])
grid.addWidget(HelpLabel(_('Request expires'), msg), 3, 0)
grid.addWidget(self.expires_combo, 3, 1)
self.expires_label = QLineEdit('')
self.expires_label.setReadOnly(1)
self.expires_label.setFocusPolicy(Qt.NoFocus)
self.expires_label.hide()
grid.addWidget(self.expires_label, 3, 1)
self.save_request_button = QPushButton(_('Save'))
self.save_request_button.clicked.connect(self.save_payment_request)
self.new_request_button = QPushButton(_('New'))
self.new_request_button.clicked.connect(self.new_payment_request)
self.receive_qr = QRCodeWidget(fixedSize=200)
self.receive_qr.mouseReleaseEvent = lambda x: self.toggle_qr_window()
self.receive_qr.enterEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.PointingHandCursor))
self.receive_qr.leaveEvent = lambda x: self.app.setOverrideCursor(QCursor(Qt.ArrowCursor))
self.receive_buttons = buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.save_request_button)
buttons.addWidget(self.new_request_button)
grid.addLayout(buttons, 4, 1, 1, 2)
self.receive_requests_label = QLabel(_('Requests'))
from .request_list import RequestList
self.request_list = RequestList(self)
# layout
vbox_g = QVBoxLayout()
vbox_g.addLayout(grid)
vbox_g.addStretch()
hbox = QHBoxLayout()
hbox.addLayout(vbox_g)
hbox.addWidget(self.receive_qr)
w = QWidget()
w.searchable_list = self.request_list
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.receive_requests_label)
vbox.addWidget(self.request_list)
vbox.setStretchFactor(self.request_list, 1000)
return w
def delete_payment_request(self, addr):
self.wallet.remove_payment_request(addr, self.config)
self.request_list.update()
self.clear_receive_tab()
def get_request_URI(self, addr):
req = self.wallet.receive_requests[addr]
message = self.wallet.labels.get(addr, '')
amount = req['amount']
URI = util.create_URI(addr, amount, message)
if req.get('time'):
URI += "&time=%d"%req.get('time')
if req.get('exp'):
URI += "&exp=%d"%req.get('exp')
if req.get('name') and req.get('sig'):
sig = bfh(req.get('sig'))
sig = bitcoin.base_encode(sig, base=58)
URI += "&name=" + req['name'] + "&sig="+sig
return str(URI)
def sign_payment_request(self, addr):
alias = self.config.get('alias')
alias_privkey = None
if alias and self.alias_info:
alias_addr, alias_name, validated = self.alias_info
if alias_addr:
if self.wallet.is_mine(alias_addr):
msg = _('This payment request will be signed.') + '\n' + _('Please enter your password')
password = None
if self.wallet.has_keystore_encryption():
password = self.password_dialog(msg)
if not password:
return
try:
self.wallet.sign_payment_request(addr, alias, alias_addr, password)
except Exception as e:
self.show_error(str(e))
return
else:
return
def save_payment_request(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
if not message and not amount:
self.show_error(_('No message or amount'))
return False
i = self.expires_combo.currentIndex()
expiration = list(map(lambda x: x[1], expiration_values))[i]
req = self.wallet.make_payment_request(addr, amount, message, expiration)
try:
self.wallet.add_payment_request(req, self.config)
except Exception as e:
traceback.print_exc(file=sys.stderr)
self.show_error(_('Error adding payment request') + ':\n' + str(e))
else:
self.sign_payment_request(addr)
self.save_request_button.setEnabled(False)
finally:
self.request_list.update()
self.address_list.update()
def view_and_paste(self, title, msg, data):
dialog = WindowModalDialog(self, title)
vbox = QVBoxLayout()
label = QLabel(msg)
label.setWordWrap(True)
vbox.addWidget(label)
pr_e = ShowQRTextEdit(text=data)
vbox.addWidget(pr_e)
vbox.addLayout(Buttons(CopyCloseButton(pr_e.text, self.app, dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def export_payment_request(self, addr):
r = self.wallet.receive_requests.get(addr)
pr = paymentrequest.serialize_request(r).SerializeToString()
name = r['id'] + '.bip70'
fileName = self.getSaveFileName(_("Select where to save your payment request"), name, "*.bip70")
if fileName:
with open(fileName, "wb+") as f:
f.write(util.to_bytes(pr))
self.show_message(_("Request saved successfully"))
self.saved = True
def new_payment_request(self):
addr = self.wallet.get_unused_address()
if addr is None:
if not self.wallet.is_deterministic():
msg = [
_('No more addresses in your wallet.'),
_('You are using a non-deterministic wallet, which cannot create new addresses.'),
_('If you want to create new addresses, use a deterministic wallet instead.')
]
self.show_message(' '.join(msg))
return
if not self.question(_("Warning: The next address will not be recovered automatically if you restore your wallet from seed; you may need to add it manually.\n\nThis occurs because you have too many unused addresses in your wallet. To avoid this situation, use the existing addresses first.\n\nCreate anyway?")):
return
addr = self.wallet.create_new_address(False)
self.set_receive_address(addr)
self.expires_label.hide()
self.expires_combo.show()
self.new_request_button.setEnabled(False)
self.receive_message_e.setFocus(1)
def set_receive_address(self, addr):
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
def clear_receive_tab(self):
try:
addr = self.wallet.get_receiving_address() or ''
except InternalAddressCorruption as e:
self.show_error(str(e))
addr = ''
self.receive_address_e.setText(addr)
self.receive_message_e.setText('')
self.receive_amount_e.setAmount(None)
self.expires_label.hide()
self.expires_combo.show()
def toggle_qr_window(self):
from . import qrwindow
if not self.qr_window:
self.qr_window = qrwindow.QR_Window(self)
self.qr_window.setVisible(True)
self.qr_window_geometry = self.qr_window.geometry()
else:
if not self.qr_window.isVisible():
self.qr_window.setVisible(True)
self.qr_window.setGeometry(self.qr_window_geometry)
else:
self.qr_window_geometry = self.qr_window.geometry()
self.qr_window.setVisible(False)
self.update_receive_qr()
def show_send_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.send_tab))
def show_receive_tab(self):
self.tabs.setCurrentIndex(self.tabs.indexOf(self.receive_tab))
def receive_at(self, addr):
if not bitcoin.is_address(addr):
return
self.show_receive_tab()
self.receive_address_e.setText(addr)
self.new_request_button.setEnabled(True)
def update_receive_qr(self):
addr = str(self.receive_address_e.text())
amount = self.receive_amount_e.get_amount()
message = self.receive_message_e.text()
self.save_request_button.setEnabled((amount is not None) or (message != ""))
uri = util.create_URI(addr, amount, message)
self.receive_qr.setData(uri)
if self.qr_window and self.qr_window.isVisible():
self.qr_window.qrw.setData(uri)
def set_feerounding_text(self, num_satoshis_added):
self.feerounding_text = (_('Additional {} satoshis are going to be added.')
.format(num_satoshis_added))
def create_send_tab(self):
# A 4-column grid layout. All the stretch is in the last column.
# The exchange rate plugin adds a fiat widget in column 2
self.send_grid = grid = QGridLayout()
grid.setSpacing(8)
grid.setColumnStretch(3, 1)
from .paytoedit import PayToEdit
self.amount_e = BTCAmountEdit(self.get_decimal_point)
self.payto_e = PayToEdit(self)
msg = _('Recipient of the funds.') + '\n\n'\
+ _('You may enter a Bitcoin address, a label from your list of contacts (a list of completions will be proposed), or an alias (email-like address that forwards to a Bitcoin address)')
payto_label = HelpLabel(_('Pay to'), msg)
grid.addWidget(payto_label, 1, 0)
grid.addWidget(self.payto_e, 1, 1, 1, -1)
completer = QCompleter()
completer.setCaseSensitivity(False)
self.payto_e.set_completer(completer)
completer.setModel(self.completions)
msg = _('Description of the transaction (not mandatory).') + '\n\n'\
+ _('The description is not sent to the recipient of the funds. It is stored in your wallet file, and displayed in the \'History\' tab.')
description_label = HelpLabel(_('Description'), msg)
grid.addWidget(description_label, 2, 0)
self.message_e = MyLineEdit()
grid.addWidget(self.message_e, 2, 1, 1, -1)
self.from_label = QLabel(_('From'))
grid.addWidget(self.from_label, 3, 0)
self.from_list = FromList(self, self.from_list_menu)
grid.addWidget(self.from_list, 3, 1, 1, -1)
self.set_pay_from([])
msg = _('Amount to be sent.') + '\n\n' \
+ _('The amount will be displayed in red if you do not have enough funds in your wallet.') + ' ' \
+ _('Note that if you have frozen some of your addresses, the available funds will be lower than your total balance.') + '\n\n' \
+ _('Keyboard shortcut: type "!" to send all your coins.')
amount_label = HelpLabel(_('Amount'), msg)
grid.addWidget(amount_label, 4, 0)
grid.addWidget(self.amount_e, 4, 1)
self.fiat_send_e = AmountEdit(self.fx.get_currency if self.fx else '')
if not self.fx or not self.fx.is_enabled():
self.fiat_send_e.setVisible(False)
grid.addWidget(self.fiat_send_e, 4, 2)
self.amount_e.frozen.connect(
lambda: self.fiat_send_e.setFrozen(self.amount_e.isReadOnly()))
self.max_button = EnterButton(_("Max"), self.spend_max)
self.max_button.setFixedWidth(140)
grid.addWidget(self.max_button, 4, 3)
hbox = QHBoxLayout()
hbox.addStretch(1)
grid.addLayout(hbox, 4, 4)
msg = _('Bitcoin transactions are in general not free. A transaction fee is paid by the sender of the funds.') + '\n\n'\
+ _('The amount of fee can be decided freely by the sender. However, transactions with low fees take more time to be processed.') + '\n\n'\
+ _('A suggested fee is automatically added to this field. You may override it. The suggested fee increases with the size of the transaction.')
self.fee_e_label = HelpLabel(_('Fee'), msg)
def fee_cb(dyn, pos, fee_rate):
if dyn:
if self.config.use_mempool_fees():
self.config.set_key('depth_level', pos, False)
else:
self.config.set_key('fee_level', pos, False)
else:
self.config.set_key('fee_per_kb', fee_rate, False)
if fee_rate:
fee_rate = Decimal(fee_rate)
self.feerate_e.setAmount(quantize_feerate(fee_rate / 1000))
else:
self.feerate_e.setAmount(None)
self.fee_e.setModified(False)
self.fee_slider.activate()
self.spend_max() if self.is_max else self.update_fee()
self.fee_slider = FeeSlider(self, self.config, fee_cb)
self.fee_slider.setFixedWidth(140)
def on_fee_or_feerate(edit_changed, editing_finished):
edit_other = self.feerate_e if edit_changed == self.fee_e else self.fee_e
if editing_finished:
if edit_changed.get_amount() is None:
# This is so that when the user blanks the fee and moves on,
# we go back to auto-calculate mode and put a fee back.
edit_changed.setModified(False)
else:
# edit_changed was edited just now, so make sure we will
# freeze the correct fee setting (this)
edit_other.setModified(False)
self.fee_slider.deactivate()
self.update_fee()
class TxSizeLabel(QLabel):
def setAmount(self, byte_size):
self.setText(('x %s bytes =' % byte_size) if byte_size else '')
self.size_e = TxSizeLabel()
self.size_e.setAlignment(Qt.AlignCenter)
self.size_e.setAmount(0)
self.size_e.setFixedWidth(140)
self.size_e.setStyleSheet(ColorScheme.DEFAULT.as_stylesheet())
self.feerate_e = FeerateEdit(lambda: 0)
self.feerate_e.setAmount(self.config.fee_per_byte())
self.feerate_e.textEdited.connect(partial(on_fee_or_feerate, self.feerate_e, False))
self.feerate_e.editingFinished.connect(partial(on_fee_or_feerate, self.feerate_e, True))
self.fee_e = BTCAmountEdit(self.get_decimal_point)
self.fee_e.textEdited.connect(partial(on_fee_or_feerate, self.fee_e, False))
self.fee_e.editingFinished.connect(partial(on_fee_or_feerate, self.fee_e, True))
def feerounding_onclick():
text = (self.feerounding_text + '\n\n' +
_('To somewhat protect your privacy, Electrum tries to create change with similar precision to other outputs.') + ' ' +
_('At most 100 satoshis might be lost due to this rounding.') + ' ' +
_("You can disable this setting in '{}'.").format(_('Preferences')) + '\n' +
_('Also, dust is not kept as change, but added to the fee.') + '\n' +
_('Also, when batching RBF transactions, BIP 125 imposes a lower bound on the fee.'))
QMessageBox.information(self, 'Fee rounding', text)
self.feerounding_icon = QPushButton(QIcon(':icons/info.png'), '')
self.feerounding_icon.setFixedWidth(20)
self.feerounding_icon.setFlat(True)
self.feerounding_icon.clicked.connect(feerounding_onclick)
self.feerounding_icon.setVisible(False)
self.connect_fields(self, self.amount_e, self.fiat_send_e, self.fee_e)
vbox_feelabel = QVBoxLayout()
vbox_feelabel.addWidget(self.fee_e_label)
vbox_feelabel.addStretch(1)
grid.addLayout(vbox_feelabel, 5, 0)
self.fee_adv_controls = QWidget()
hbox = QHBoxLayout(self.fee_adv_controls)
hbox.setContentsMargins(0, 0, 0, 0)
hbox.addWidget(self.feerate_e)
hbox.addWidget(self.size_e)
hbox.addWidget(self.fee_e)
hbox.addWidget(self.feerounding_icon, Qt.AlignLeft)
hbox.addStretch(1)
vbox_feecontrol = QVBoxLayout()
vbox_feecontrol.addWidget(self.fee_adv_controls)
vbox_feecontrol.addWidget(self.fee_slider)
grid.addLayout(vbox_feecontrol, 5, 1, 1, -1)
if not self.config.get('show_fee', False):
self.fee_adv_controls.setVisible(False)
self.preview_button = EnterButton(_("Preview"), self.do_preview)
self.preview_button.setToolTip(_('Display the details of your transaction before signing it.'))
self.send_button = EnterButton(_("Send"), self.do_send)
self.clear_button = EnterButton(_("Clear"), self.do_clear)
buttons = QHBoxLayout()
buttons.addStretch(1)
buttons.addWidget(self.clear_button)
buttons.addWidget(self.preview_button)
buttons.addWidget(self.send_button)
grid.addLayout(buttons, 6, 1, 1, 3)
self.amount_e.shortcut.connect(self.spend_max)
self.payto_e.textChanged.connect(self.update_fee)
self.amount_e.textEdited.connect(self.update_fee)
def reset_max(text):
self.is_max = False
enable = not bool(text) and not self.amount_e.isReadOnly()
self.max_button.setEnabled(enable)
self.amount_e.textEdited.connect(reset_max)
self.fiat_send_e.textEdited.connect(reset_max)
def entry_changed():
text = ""
amt_color = ColorScheme.DEFAULT
fee_color = ColorScheme.DEFAULT
feerate_color = ColorScheme.DEFAULT
if self.not_enough_funds:
amt_color, fee_color = ColorScheme.RED, ColorScheme.RED
feerate_color = ColorScheme.RED
text = _( "Not enough funds" )
c, u, x = self.wallet.get_frozen_balance()
if c+u+x:
text += ' (' + self.format_amount(c+u+x).strip() + ' ' + self.base_unit() + ' ' +_("are frozen") + ')'
# blue color denotes auto-filled values
elif self.fee_e.isModified():
feerate_color = ColorScheme.BLUE
elif self.feerate_e.isModified():
fee_color = ColorScheme.BLUE
elif self.amount_e.isModified():
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
else:
amt_color = ColorScheme.BLUE
fee_color = ColorScheme.BLUE
feerate_color = ColorScheme.BLUE
self.statusBar().showMessage(text)
self.amount_e.setStyleSheet(amt_color.as_stylesheet())
self.fee_e.setStyleSheet(fee_color.as_stylesheet())
self.feerate_e.setStyleSheet(feerate_color.as_stylesheet())
self.amount_e.textChanged.connect(entry_changed)
self.fee_e.textChanged.connect(entry_changed)
self.feerate_e.textChanged.connect(entry_changed)
self.invoices_label = QLabel(_('Invoices'))
from .invoice_list import InvoiceList
self.invoice_list = InvoiceList(self)
vbox0 = QVBoxLayout()
vbox0.addLayout(grid)
hbox = QHBoxLayout()
hbox.addLayout(vbox0)
w = QWidget()
vbox = QVBoxLayout(w)
vbox.addLayout(hbox)
vbox.addStretch(1)
vbox.addWidget(self.invoices_label)
vbox.addWidget(self.invoice_list)
vbox.setStretchFactor(self.invoice_list, 1000)
w.searchable_list = self.invoice_list
run_hook('create_send_tab', grid)
return w
def spend_max(self):
if run_hook('abort_send', self):
return
self.is_max = True
self.do_update_fee()
def update_fee(self):
self.require_fee_update = True
def get_payto_or_dummy(self):
r = self.payto_e.get_recipient()
if r:
return r
return (TYPE_ADDRESS, self.wallet.dummy_address())
def do_update_fee(self):
'''Recalculate the fee. If the fee was manually input, retain it, but
still build the TX to see if there are enough funds.
'''
freeze_fee = self.is_send_fee_frozen()
freeze_feerate = self.is_send_feerate_frozen()
amount = '!' if self.is_max else self.amount_e.get_amount()
if amount is None:
if not freeze_fee:
self.fee_e.setAmount(None)
self.not_enough_funds = False
self.statusBar().showMessage('')
else:
fee_estimator = self.get_send_fee_estimator()
outputs = self.payto_e.get_outputs(self.is_max)
if not outputs:
_type, addr = self.get_payto_or_dummy()
outputs = [TxOutput(_type, addr, amount)]
is_sweep = bool(self.tx_external_keypairs)
make_tx = lambda fee_est: \
self.wallet.make_unsigned_transaction(
self.get_coins(), outputs, self.config,
fixed_fee=fee_est, is_sweep=is_sweep)
try:
tx = make_tx(fee_estimator)
self.not_enough_funds = False
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
if not freeze_fee:
self.fee_e.setAmount(None)
if not freeze_feerate:
self.feerate_e.setAmount(None)
self.feerounding_icon.setVisible(False)
if isinstance(e, NotEnoughFunds):
self.not_enough_funds = True
elif isinstance(e, NoDynamicFeeEstimates):
try:
tx = make_tx(0)
size = tx.estimated_size()
self.size_e.setAmount(size)
except BaseException:
pass
return
except BaseException:
traceback.print_exc(file=sys.stderr)
return
size = tx.estimated_size()
self.size_e.setAmount(size)
fee = tx.get_fee()
fee = None if self.not_enough_funds else fee
# Displayed fee/fee_rate values are set according to user input.
# Due to rounding or dropping dust in CoinChooser,
# actual fees often differ somewhat.
if freeze_feerate or self.fee_slider.is_active():
displayed_feerate = self.feerate_e.get_amount()
if displayed_feerate is not None:
displayed_feerate = quantize_feerate(displayed_feerate)
else:
# fallback to actual fee
displayed_feerate = quantize_feerate(fee / size) if fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
displayed_fee = round(displayed_feerate * size) if displayed_feerate is not None else None
self.fee_e.setAmount(displayed_fee)
else:
if freeze_fee:
displayed_fee = self.fee_e.get_amount()
else:
# fallback to actual fee if nothing is frozen
displayed_fee = fee
self.fee_e.setAmount(displayed_fee)
displayed_fee = displayed_fee if displayed_fee else 0
displayed_feerate = quantize_feerate(displayed_fee / size) if displayed_fee is not None else None
self.feerate_e.setAmount(displayed_feerate)
# show/hide fee rounding icon
feerounding = (fee - displayed_fee) if fee else 0
self.set_feerounding_text(int(feerounding))
self.feerounding_icon.setToolTip(self.feerounding_text)
self.feerounding_icon.setVisible(abs(feerounding) >= 1)
if self.is_max:
amount = tx.output_value()
__, x_fee_amount = run_hook('get_tx_extra_fee', self.wallet, tx) or (None, 0)
amount_after_all_fees = amount - x_fee_amount
self.amount_e.setAmount(amount_after_all_fees)
def from_list_delete(self, item):
i = self.from_list.indexOfTopLevelItem(item)
self.pay_from.pop(i)
self.redraw_from_list()
self.update_fee()
def from_list_menu(self, position):
item = self.from_list.itemAt(position)
menu = QMenu()
menu.addAction(_("Remove"), lambda: self.from_list_delete(item))
menu.exec_(self.from_list.viewport().mapToGlobal(position))
def set_pay_from(self, coins):
self.pay_from = list(coins)
self.redraw_from_list()
def redraw_from_list(self):
self.from_list.clear()
self.from_label.setHidden(len(self.pay_from) == 0)
self.from_list.setHidden(len(self.pay_from) == 0)
def format(x):
h = x.get('prevout_hash')
return h[0:10] + '...' + h[-10:] + ":%d"%x.get('prevout_n') + u'\t' + "%s"%x.get('address')
for item in self.pay_from:
self.from_list.addTopLevelItem(QTreeWidgetItem( [format(item), self.format_amount(item['value']) ]))
def get_contact_payto(self, key):
_type, label = self.contacts.get(key)
return label + ' <' + key + '>' if _type == 'address' else key
def update_completions(self):
l = [self.get_contact_payto(key) for key in self.contacts.keys()]
self.completions.setStringList(l)
def protected(func):
'''Password request wrapper. The password is passed to the function
as the 'password' named argument. "None" indicates either an
unencrypted wallet, or the user cancelled the password request.
An empty input is passed as the empty string.'''
def request_password(self, *args, **kwargs):
parent = self.top_level_window()
password = None
while self.wallet.has_keystore_encryption():
password = self.password_dialog(parent=parent)
if password is None:
# User cancelled password input
return
try:
self.wallet.check_password(password)
break
except Exception as e:
self.show_error(str(e), parent=parent)
continue
kwargs['password'] = password
return func(self, *args, **kwargs)
return request_password
def is_send_fee_frozen(self):
return self.fee_e.isVisible() and self.fee_e.isModified() \
and (self.fee_e.text() or self.fee_e.hasFocus())
def is_send_feerate_frozen(self):
return self.feerate_e.isVisible() and self.feerate_e.isModified() \
and (self.feerate_e.text() or self.feerate_e.hasFocus())
def get_send_fee_estimator(self):
if self.is_send_fee_frozen():
fee_estimator = self.fee_e.get_amount()
elif self.is_send_feerate_frozen():
amount = self.feerate_e.get_amount() # sat/byte feerate
amount = 0 if amount is None else amount * 1000 # sat/kilobyte feerate
fee_estimator = partial(
simple_config.SimpleConfig.estimate_fee_for_feerate, amount)
else:
fee_estimator = None
return fee_estimator
def read_send_tab(self):
if self.payment_request and self.payment_request.has_expired():
self.show_error(_('Payment request has expired'))
return
label = self.message_e.text()
if self.payment_request:
outputs = self.payment_request.get_outputs()
else:
errors = self.payto_e.get_errors()
if errors:
self.show_warning(_("Invalid Lines found:") + "\n\n" + '\n'.join([ _("Line #") + str(x[0]+1) + ": " + x[1] for x in errors]))
return
outputs = self.payto_e.get_outputs(self.is_max)
if self.payto_e.is_alias and self.payto_e.validated is False:
alias = self.payto_e.toPlainText()
msg = _('WARNING: the alias "{}" could not be validated via an additional '
'security check, DNSSEC, and thus may not be correct.').format(alias) + '\n'
msg += _('Do you wish to continue?')
if not self.question(msg):
return
if not outputs:
self.show_error(_('No outputs'))
return
for o in outputs:
if o.address is None:
self.show_error(_('Bitcoin Address is None'))
return
if o.type == TYPE_ADDRESS and not bitcoin.is_address(o.address):
self.show_error(_('Invalid Bitcoin Address'))
return
if o.value is None:
self.show_error(_('Invalid Amount'))
return
fee_estimator = self.get_send_fee_estimator()
coins = self.get_coins()
return outputs, fee_estimator, label, coins
def do_preview(self):
self.do_send(preview = True)
def do_send(self, preview = False):
if run_hook('abort_send', self):
return
r = self.read_send_tab()
if not r:
return
outputs, fee_estimator, tx_desc, coins = r
try:
is_sweep = bool(self.tx_external_keypairs)
tx = self.wallet.make_unsigned_transaction(
coins, outputs, self.config, fixed_fee=fee_estimator,
is_sweep=is_sweep)
except (NotEnoughFunds, NoDynamicFeeEstimates) as e:
self.show_message(str(e))
return
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
amount = tx.output_value() if self.is_max else sum(map(lambda x:x[2], outputs))
fee = tx.get_fee()
use_rbf = self.config.get('use_rbf', True)
if use_rbf:
tx.set_rbf(True)
if fee < self.wallet.relayfee() * tx.estimated_size() / 1000:
self.show_error('\n'.join([
_("This transaction requires a higher fee, or it will not be propagated by your current server"),
_("Try to raise your transaction fee, or use a server with a lower relay fee.")
]))
return
if preview:
self.show_transaction(tx, tx_desc)
return
if not self.network:
self.show_error(_("You can't broadcast a transaction without a live network connection."))
return
# confirmation dialog
msg = [
_("Amount to be sent") + ": " + self.format_amount_and_units(amount),
_("Mining fee") + ": " + self.format_amount_and_units(fee),
]
x_fee = run_hook('get_tx_extra_fee', self.wallet, tx)
if x_fee:
x_fee_address, x_fee_amount = x_fee
msg.append( _("Additional fees") + ": " + self.format_amount_and_units(x_fee_amount) )
confirm_rate = simple_config.FEERATE_WARNING_HIGH_FEE
if fee > confirm_rate * tx.estimated_size() / 1000:
msg.append(_('Warning') + ': ' + _("The fee for this transaction seems unusually high."))
if self.wallet.has_keystore_encryption():
msg.append("")
msg.append(_("Enter your password to proceed"))
password = self.password_dialog('\n'.join(msg))
if not password:
return
else:
msg.append(_('Proceed?'))
password = None
if not self.question('\n'.join(msg)):
return
def sign_done(success):
if success:
if not tx.is_complete():
self.show_transaction(tx)
self.do_clear()
else:
self.broadcast_transaction(tx, tx_desc)
self.sign_tx_with_password(tx, sign_done, password)
@protected
def sign_tx(self, tx, callback, password):
self.sign_tx_with_password(tx, callback, password)
def sign_tx_with_password(self, tx, callback, password):
'''Sign the transaction in a separate thread. When done, calls
the callback with a success code of True or False.
'''
def on_success(result):
callback(True)
def on_failure(exc_info):
self.on_error(exc_info)
callback(False)
on_success = run_hook('tc_sign_wrapper', self.wallet, tx, on_success, on_failure) or on_success
if self.tx_external_keypairs:
# can sign directly
task = partial(Transaction.sign, tx, self.tx_external_keypairs)
else:
task = partial(self.wallet.sign_transaction, tx, password)
msg = _('Signing transaction...')
WaitingDialog(self, msg, task, on_success, on_failure)
def broadcast_transaction(self, tx, tx_desc):
def broadcast_thread():
# non-GUI thread
pr = self.payment_request
if pr and pr.has_expired():
self.payment_request = None
return False, _("Payment request has expired")
status = False
try:
self.network.run_from_another_thread(self.network.broadcast_transaction(tx))
except TxBroadcastError as e:
msg = e.get_message_for_gui()
except BestEffortRequestFailed as e:
msg = repr(e)
else:
status, msg = True, tx.txid()
if pr and status is True:
self.invoices.set_paid(pr, tx.txid())
self.invoices.save()
self.payment_request = None
refund_address = self.wallet.get_receiving_address()
coro = pr.send_payment_and_receive_paymentack(str(tx), refund_address)
fut = asyncio.run_coroutine_threadsafe(coro, self.network.asyncio_loop)
ack_status, ack_msg = fut.result(timeout=20)
self.print_error(f"Payment ACK: {ack_status}. Ack message: {ack_msg}")
return status, msg
# Capture current TL window; override might be removed on return
parent = self.top_level_window(lambda win: isinstance(win, MessageBoxMixin))
def broadcast_done(result):
# GUI thread
if result:
status, msg = result
if status:
if tx_desc is not None and tx.is_complete():
self.wallet.set_label(tx.txid(), tx_desc)
parent.show_message(_('Payment sent.') + '\n' + msg)
self.invoice_list.update()
self.do_clear()
else:
msg = msg or ''
parent.show_error(msg)
WaitingDialog(self, _('Broadcasting transaction...'),
broadcast_thread, broadcast_done, self.on_error)
def query_choice(self, msg, choices):
# Needed by QtHandler for hardware wallets
dialog = WindowModalDialog(self.top_level_window())
clayout = ChoicesLayout(msg, choices)
vbox = QVBoxLayout(dialog)
vbox.addLayout(clayout.layout())
vbox.addLayout(Buttons(OkButton(dialog)))
if not dialog.exec_():
return None
return clayout.selected_index()
def lock_amount(self, b):
self.amount_e.setFrozen(b)
self.max_button.setEnabled(not b)
def prepare_for_payment_request(self):
self.show_send_tab()
self.payto_e.is_pr = True
for e in [self.payto_e, self.message_e]:
e.setFrozen(True)
self.lock_amount(True)
self.payto_e.setText(_("please wait..."))
return True
def delete_invoice(self, key):
self.invoices.remove(key)
self.invoice_list.update()
def payment_request_ok(self):
pr = self.payment_request
key = self.invoices.add(pr)
status = self.invoices.get_status(key)
self.invoice_list.update()
if status == PR_PAID:
self.show_message("invoice already paid")
self.do_clear()
self.payment_request = None
return
self.payto_e.is_pr = True
if not pr.has_expired():
self.payto_e.setGreen()
else:
self.payto_e.setExpired()
self.payto_e.setText(pr.get_requestor())
self.amount_e.setText(format_satoshis_plain(pr.get_amount(), self.decimal_point))
self.message_e.setText(pr.get_memo())
# signal to set fee
self.amount_e.textEdited.emit("")
def payment_request_error(self):
self.show_message(self.payment_request.error)
self.payment_request = None
self.do_clear()
def on_pr(self, request):
self.payment_request = request
if self.payment_request.verify(self.contacts):
self.payment_request_ok_signal.emit()
else:
self.payment_request_error_signal.emit()
def pay_to_URI(self, URI):
if not URI:
return
try:
out = util.parse_URI(URI, self.on_pr)
except BaseException as e:
self.show_error(_('Invalid bitcoin URI:') + '\n' + str(e))
return
self.show_send_tab()
r = out.get('r')
sig = out.get('sig')
name = out.get('name')
if r or (name and sig):
self.prepare_for_payment_request()
return
address = out.get('address')
amount = out.get('amount')
label = out.get('label')
message = out.get('message')
# use label as description (not BIP21 compliant)
if label and not message:
message = label
if address:
self.payto_e.setText(address)
if message:
self.message_e.setText(message)
if amount:
self.amount_e.setAmount(amount)
self.amount_e.textEdited.emit("")
def do_clear(self):
self.is_max = False
self.not_enough_funds = False
self.payment_request = None
self.payto_e.is_pr = False
for e in [self.payto_e, self.message_e, self.amount_e, self.fiat_send_e,
self.fee_e, self.feerate_e]:
e.setText('')
e.setFrozen(False)
self.fee_slider.activate()
self.feerate_e.setAmount(self.config.fee_per_byte())
self.size_e.setAmount(0)
self.feerounding_icon.setVisible(False)
self.set_pay_from([])
self.tx_external_keypairs = {}
self.update_status()
run_hook('do_clear', self)
def set_frozen_state(self, addrs, freeze):
self.wallet.set_frozen_state(addrs, freeze)
self.address_list.update()
self.utxo_list.update()
self.update_fee()
def create_list_tab(self, l, toolbar=None):
w = QWidget()
w.searchable_list = l
vbox = QVBoxLayout()
w.setLayout(vbox)
vbox.setContentsMargins(0, 0, 0, 0)
vbox.setSpacing(0)
if toolbar:
vbox.addLayout(toolbar)
vbox.addWidget(l)
return w
def create_addresses_tab(self):
from .address_list import AddressList
self.address_list = l = AddressList(self)
toolbar = l.create_toolbar(self.config)
toolbar_shown = self.config.get('show_toolbar_addresses', False)
l.show_toolbar(toolbar_shown)
return self.create_list_tab(l, toolbar)
def create_utxo_tab(self):
from .utxo_list import UTXOList
self.utxo_list = l = UTXOList(self)
return self.create_list_tab(l)
def create_contacts_tab(self):
from .contact_list import ContactList
self.contact_list = l = ContactList(self)
return self.create_list_tab(l)
def remove_address(self, addr):
if self.question(_("Do you want to remove {} from your wallet?").format(addr)):
self.wallet.delete_address(addr)
self.need_update.set() # history, addresses, coins
self.clear_receive_tab()
def get_coins(self):
if self.pay_from:
return self.pay_from
else:
return self.wallet.get_spendable_coins(None, self.config)
def spend_coins(self, coins):
self.set_pay_from(coins)
self.show_send_tab()
self.update_fee()
def paytomany(self):
self.show_send_tab()
self.payto_e.paytomany()
msg = '\n'.join([
_('Enter a list of outputs in the \'Pay to\' field.'),
_('One output per line.'),
_('Format: address, amount'),
_('You may load a CSV file using the file icon.')
])
self.show_message(msg, title=_('Pay to many'))
def payto_contacts(self, labels):
paytos = [self.get_contact_payto(label) for label in labels]
self.show_send_tab()
if len(paytos) == 1:
self.payto_e.setText(paytos[0])
self.amount_e.setFocus()
else:
text = "\n".join([payto + ", 0" for payto in paytos])
self.payto_e.setText(text)
self.payto_e.setFocus()
def set_contact(self, label, address):
if not is_address(address):
self.show_error(_('Invalid Address'))
self.contact_list.update() # Displays original unchanged value
return False
self.contacts[address] = ('address', label)
self.contact_list.update()
self.history_list.update()
self.update_completions()
return True
def delete_contacts(self, labels):
if not self.question(_("Remove {} from your list of contacts?")
.format(" + ".join(labels))):
return
for label in labels:
self.contacts.pop(label)
self.history_list.update()
self.contact_list.update()
self.update_completions()
def show_invoice(self, key):
pr = self.invoices.get(key)
if pr is None:
self.show_error('Cannot find payment request in wallet.')
return
pr.verify(self.contacts)
self.show_pr_details(pr)
def show_pr_details(self, pr):
key = pr.get_id()
d = WindowModalDialog(self, _("Invoice"))
vbox = QVBoxLayout(d)
grid = QGridLayout()
grid.addWidget(QLabel(_("Requestor") + ':'), 0, 0)
grid.addWidget(QLabel(pr.get_requestor()), 0, 1)
grid.addWidget(QLabel(_("Amount") + ':'), 1, 0)
outputs_str = '\n'.join(map(lambda x: self.format_amount(x[2])+ self.base_unit() + ' @ ' + x[1], pr.get_outputs()))
grid.addWidget(QLabel(outputs_str), 1, 1)
expires = pr.get_expiration_date()
grid.addWidget(QLabel(_("Memo") + ':'), 2, 0)
grid.addWidget(QLabel(pr.get_memo()), 2, 1)
grid.addWidget(QLabel(_("Signature") + ':'), 3, 0)
grid.addWidget(QLabel(pr.get_verify_status()), 3, 1)
if expires:
grid.addWidget(QLabel(_("Expires") + ':'), 4, 0)
grid.addWidget(QLabel(format_time(expires)), 4, 1)
vbox.addLayout(grid)
def do_export():
name = str(key) + '.bip70'
fn = self.getSaveFileName(_("Save invoice to file"), name, filter="*.bip70")
if not fn:
return
with open(fn, 'wb') as f:
data = f.write(pr.raw)
self.show_message(_('Invoice saved as' + ' ' + fn))
exportButton = EnterButton(_('Save'), do_export)
def do_delete():
if self.question(_('Delete invoice?')):
self.invoices.remove(key)
self.history_list.update()
self.invoice_list.update()
d.close()
deleteButton = EnterButton(_('Delete'), do_delete)
vbox.addLayout(Buttons(exportButton, deleteButton, CloseButton(d)))
d.exec_()
def do_pay_invoice(self, key):
pr = self.invoices.get(key)
self.payment_request = pr
self.prepare_for_payment_request()
pr.error = None # this forces verify() to re-run
if pr.verify(self.contacts):
self.payment_request_ok()
else:
self.payment_request_error()
def create_console_tab(self):
from .console import Console
self.console = console = Console()
return console
def update_console(self):
console = self.console
console.history = self.config.get("console-history",[])
console.history_index = len(console.history)
console.updateNamespace({
'wallet': self.wallet,
'network': self.network,
'plugins': self.gui_object.plugins,
'window': self,
'config': self.config,
'electrum': electrum,
'daemon': self.gui_object.daemon,
'util': util,
'bitcoin': bitcoin,
})
c = commands.Commands(self.config, self.wallet, self.network, lambda: self.console.set_json(True))
methods = {}
def mkfunc(f, method):
return lambda *args: f(method, args, self.password_dialog)
for m in dir(c):
if m[0]=='_' or m in ['network','wallet','config']: continue
methods[m] = mkfunc(c._run, m)
console.updateNamespace(methods)
def create_status_bar(self):
sb = QStatusBar()
sb.setFixedHeight(35)
qtVersion = qVersion()
self.balance_label = QLabel("Loading wallet...")
self.balance_label.setTextInteractionFlags(Qt.TextSelectableByMouse)
self.balance_label.setStyleSheet("""QLabel { padding: 0 }""")
sb.addWidget(self.balance_label)
self.search_box = QLineEdit()
self.search_box.textChanged.connect(self.do_search)
self.search_box.hide()
sb.addPermanentWidget(self.search_box)
self.lock_icon = QIcon()
self.password_button = StatusBarButton(self.lock_icon, _("Password"), self.change_password_dialog )
sb.addPermanentWidget(self.password_button)
sb.addPermanentWidget(StatusBarButton(QIcon(":icons/preferences.png"), _("Preferences"), self.settings_dialog ) )
self.seed_button = StatusBarButton(QIcon(":icons/seed.png"), _("Seed"), self.show_seed_dialog )
sb.addPermanentWidget(self.seed_button)
self.status_button = StatusBarButton(QIcon(":icons/status_disconnected.png"), _("Network"), lambda: self.gui_object.show_network_dialog(self))
sb.addPermanentWidget(self.status_button)
run_hook('create_status_bar', sb)
self.setStatusBar(sb)
def update_lock_icon(self):
icon = QIcon(":icons/lock.png") if self.wallet.has_password() else QIcon(":icons/unlock.png")
self.password_button.setIcon(icon)
def update_buttons_on_seed(self):
self.seed_button.setVisible(self.wallet.has_seed())
self.password_button.setVisible(self.wallet.may_have_password())
self.send_button.setVisible(not self.wallet.is_watching_only())
def change_password_dialog(self):
from electrum.storage import STO_EV_XPUB_PW
if self.wallet.get_available_storage_encryption_version() == STO_EV_XPUB_PW:
from .password_dialog import ChangePasswordDialogForHW
d = ChangePasswordDialogForHW(self, self.wallet)
ok, encrypt_file = d.run()
if not ok:
return
try:
hw_dev_pw = self.wallet.keystore.get_password_for_storage_encryption()
except UserCancelled:
return
except BaseException as e:
traceback.print_exc(file=sys.stderr)
self.show_error(str(e))
return
old_password = hw_dev_pw if self.wallet.has_password() else None
new_password = hw_dev_pw if encrypt_file else None
else:
from .password_dialog import ChangePasswordDialogForSW
d = ChangePasswordDialogForSW(self, self.wallet)
ok, old_password, new_password, encrypt_file = d.run()
if not ok:
return
try:
self.wallet.update_password(old_password, new_password, encrypt_file)
except InvalidPassword as e:
self.show_error(str(e))
return
except BaseException:
traceback.print_exc(file=sys.stdout)
self.show_error(_('Failed to update password'))
return
msg = _('Password was updated successfully') if self.wallet.has_password() else _('Password is disabled, this wallet is not protected')
self.show_message(msg, title=_("Success"))
self.update_lock_icon()
def toggle_search(self):
tab = self.tabs.currentWidget()
#if hasattr(tab, 'searchable_list'):
# tab.searchable_list.toggle_toolbar()
#return
self.search_box.setHidden(not self.search_box.isHidden())
if not self.search_box.isHidden():
self.search_box.setFocus(1)
else:
self.do_search('')
def do_search(self, t):
tab = self.tabs.currentWidget()
if hasattr(tab, 'searchable_list'):
tab.searchable_list.filter(t)
def new_contact_dialog(self):
d = WindowModalDialog(self, _("New Contact"))
vbox = QVBoxLayout(d)
vbox.addWidget(QLabel(_('New Contact') + ':'))
grid = QGridLayout()
line1 = QLineEdit()
line1.setFixedWidth(280)
line2 = QLineEdit()
line2.setFixedWidth(280)
grid.addWidget(QLabel(_("Address")), 1, 0)
grid.addWidget(line1, 1, 1)
grid.addWidget(QLabel(_("Name")), 2, 0)
grid.addWidget(line2, 2, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if d.exec_():
self.set_contact(line2.text(), line1.text())
def show_master_public_keys(self):
dialog = WindowModalDialog(self, _("Wallet Information"))
dialog.setMinimumSize(500, 100)
mpk_list = self.wallet.get_master_public_keys()
vbox = QVBoxLayout()
wallet_type = self.wallet.storage.get('wallet_type', '')
if self.wallet.is_watching_only():
wallet_type += ' [{}]'.format(_('watching-only'))
seed_available = _('True') if self.wallet.has_seed() else _('False')
keystore_types = [k.get_type_text() for k in self.wallet.get_keystores()]
grid = QGridLayout()
basename = os.path.basename(self.wallet.storage.path)
grid.addWidget(QLabel(_("Wallet name")+ ':'), 0, 0)
grid.addWidget(QLabel(basename), 0, 1)
grid.addWidget(QLabel(_("Wallet type")+ ':'), 1, 0)
grid.addWidget(QLabel(wallet_type), 1, 1)
grid.addWidget(QLabel(_("Script type")+ ':'), 2, 0)
grid.addWidget(QLabel(self.wallet.txin_type), 2, 1)
grid.addWidget(QLabel(_("Seed available") + ':'), 3, 0)
grid.addWidget(QLabel(str(seed_available)), 3, 1)
if len(keystore_types) <= 1:
grid.addWidget(QLabel(_("Keystore type") + ':'), 4, 0)
ks_type = str(keystore_types[0]) if keystore_types else _('No keystore')
grid.addWidget(QLabel(ks_type), 4, 1)
vbox.addLayout(grid)
if self.wallet.is_deterministic():
mpk_text = ShowQRTextEdit()
mpk_text.setMaximumHeight(150)
mpk_text.addCopyButton(self.app)
def show_mpk(index):
mpk_text.setText(mpk_list[index])
# only show the combobox in case multiple accounts are available
if len(mpk_list) > 1:
def label(key):
if isinstance(self.wallet, Multisig_Wallet):
return _("cosigner") + f' {key+1} ( keystore: {keystore_types[key]} )'
return ''
labels = [label(i) for i in range(len(mpk_list))]
on_click = lambda clayout: show_mpk(clayout.selected_index())
labels_clayout = ChoicesLayout(_("Master Public Keys"), labels, on_click)
vbox.addLayout(labels_clayout.layout())
else:
vbox.addWidget(QLabel(_("Master Public Key")))
show_mpk(0)
vbox.addWidget(mpk_text)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(dialog)))
dialog.setLayout(vbox)
dialog.exec_()
def remove_wallet(self):
if self.question('\n'.join([
_('Delete wallet file?'),
"%s"%self.wallet.storage.path,
_('If your wallet contains funds, make sure you have saved its seed.')])):
self._delete_wallet()
@protected
def _delete_wallet(self, password):
wallet_path = self.wallet.storage.path
basename = os.path.basename(wallet_path)
self.gui_object.daemon.stop_wallet(wallet_path)
self.close()
os.unlink(wallet_path)
self.show_error(_("Wallet removed: {}").format(basename))
@protected
def show_seed_dialog(self, password):
if not self.wallet.has_seed():
self.show_message(_('This wallet has no seed'))
return
keystore = self.wallet.get_keystore()
try:
seed = keystore.get_seed(password)
passphrase = keystore.get_passphrase(password)
except BaseException as e:
self.show_error(str(e))
return
from .seed_dialog import SeedDialog
d = SeedDialog(self, seed, passphrase)
d.exec_()
def show_qrcode(self, data, title = _("QR code"), parent=None):
if not data:
return
d = QRDialog(data, parent or self, title)
d.exec_()
@protected
def show_private_key(self, address, password):
if not address:
return
try:
pk, redeem_script = self.wallet.export_private_key(address, password)
except Exception as e:
traceback.print_exc(file=sys.stdout)
self.show_message(str(e))
return
xtype = bitcoin.deserialize_privkey(pk)[0]
d = WindowModalDialog(self, _("Private key"))
d.setMinimumSize(600, 150)
vbox = QVBoxLayout()
vbox.addWidget(QLabel(_("Address") + ': ' + address))
vbox.addWidget(QLabel(_("Script type") + ': ' + xtype))
vbox.addWidget(QLabel(_("Private key") + ':'))
keys_e = ShowQRTextEdit(text=pk)
keys_e.addCopyButton(self.app)
vbox.addWidget(keys_e)
if redeem_script:
vbox.addWidget(QLabel(_("Redeem Script") + ':'))
rds_e = ShowQRTextEdit(text=redeem_script)
rds_e.addCopyButton(self.app)
vbox.addWidget(rds_e)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
d.exec_()
msg_sign = _("Signing with an address actually means signing with the corresponding "
"private key, and verifying with the corresponding public key. The "
"address you have entered does not have a unique public key, so these "
"operations cannot be performed.") + '\n\n' + \
_('The operation is undefined. Not just in Electrum, but in general.')
@protected
def do_sign(self, address, message, signature, password):
address = address.text().strip()
message = message.toPlainText().strip()
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
if not self.wallet.is_mine(address):
self.show_message(_('Address not in wallet.'))
return
txin_type = self.wallet.get_txin_type(address)
if txin_type not in ['p2pkh', 'p2wpkh', 'p2wpkh-p2sh']:
self.show_message(_('Cannot sign messages with this type of address:') + \
' ' + txin_type + '\n\n' + self.msg_sign)
return
task = partial(self.wallet.sign_message, address, message, password)
def show_signed_message(sig):
try:
signature.setText(base64.b64encode(sig).decode('ascii'))
except RuntimeError:
# (signature) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=show_signed_message)
def do_verify(self, address, message, signature):
address = address.text().strip()
message = message.toPlainText().strip().encode('utf-8')
if not bitcoin.is_address(address):
self.show_message(_('Invalid Bitcoin address.'))
return
try:
# This can throw on invalid base64
sig = base64.b64decode(str(signature.toPlainText()))
verified = ecc.verify_message_with_address(address, sig, message)
except Exception as e:
verified = False
if verified:
self.show_message(_("Signature verified"))
else:
self.show_error(_("Wrong signature"))
def sign_verify_message(self, address=''):
d = WindowModalDialog(self, _('Sign/verify Message'))
d.setMinimumSize(610, 290)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
address_e = QLineEdit()
address_e.setText(address)
layout.addWidget(QLabel(_('Address')), 2, 0)
layout.addWidget(address_e, 2, 1)
signature_e = QTextEdit()
layout.addWidget(QLabel(_('Signature')), 3, 0)
layout.addWidget(signature_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Sign"))
b.clicked.connect(lambda: self.do_sign(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Verify"))
b.clicked.connect(lambda: self.do_verify(address_e, message_e, signature_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
@protected
def do_decrypt(self, message_e, pubkey_e, encrypted_e, password):
if self.wallet.is_watching_only():
self.show_message(_('This is a watching-only wallet.'))
return
cyphertext = encrypted_e.toPlainText()
task = partial(self.wallet.decrypt_message, pubkey_e.text(), cyphertext, password)
def setText(text):
try:
message_e.setText(text.decode('utf-8'))
except RuntimeError:
# (message_e) wrapped C/C++ object has been deleted
pass
self.wallet.thread.add(task, on_success=setText)
def do_encrypt(self, message_e, pubkey_e, encrypted_e):
message = message_e.toPlainText()
message = message.encode('utf-8')
try:
public_key = ecc.ECPubkey(bfh(pubkey_e.text()))
except BaseException as e:
traceback.print_exc(file=sys.stdout)
self.show_warning(_('Invalid Public key'))
return
encrypted = public_key.encrypt_message(message)
encrypted_e.setText(encrypted.decode('ascii'))
def encrypt_message(self, address=''):
d = WindowModalDialog(self, _('Encrypt/decrypt Message'))
d.setMinimumSize(610, 490)
layout = QGridLayout(d)
message_e = QTextEdit()
layout.addWidget(QLabel(_('Message')), 1, 0)
layout.addWidget(message_e, 1, 1)
layout.setRowStretch(2,3)
pubkey_e = QLineEdit()
if address:
pubkey = self.wallet.get_public_key(address)
pubkey_e.setText(pubkey)
layout.addWidget(QLabel(_('Public key')), 2, 0)
layout.addWidget(pubkey_e, 2, 1)
encrypted_e = QTextEdit()
layout.addWidget(QLabel(_('Encrypted')), 3, 0)
layout.addWidget(encrypted_e, 3, 1)
layout.setRowStretch(3,1)
hbox = QHBoxLayout()
b = QPushButton(_("Encrypt"))
b.clicked.connect(lambda: self.do_encrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Decrypt"))
b.clicked.connect(lambda: self.do_decrypt(message_e, pubkey_e, encrypted_e))
hbox.addWidget(b)
b = QPushButton(_("Close"))
b.clicked.connect(d.accept)
hbox.addWidget(b)
layout.addLayout(hbox, 4, 1)
d.exec_()
def password_dialog(self, msg=None, parent=None):
from .password_dialog import PasswordDialog
parent = parent or self
d = PasswordDialog(parent, msg)
return d.run()
def tx_from_text(self, txt):
from electrum.transaction import tx_from_str
try:
tx = tx_from_str(txt)
return Transaction(tx)
except BaseException as e:
self.show_critical(_("Electrum was unable to parse your transaction") + ":\n" + str(e))
return
def read_tx_from_qrcode(self):
from electrum import qrscanner
try:
data = qrscanner.scan_barcode(self.config.get_video_device())
except BaseException as e:
self.show_error(str(e))
return
if not data:
return
# if the user scanned a bitcoin URI
if str(data).startswith("bitcoin:"):
self.pay_to_URI(data)
return
# else if the user scanned an offline signed tx
try:
data = bh2u(bitcoin.base_decode(data, length=None, base=43))
except BaseException as e:
self.show_error((_('Could not decode QR code')+':\n{}').format(repr(e)))
return
tx = self.tx_from_text(data)
if not tx:
return
self.show_transaction(tx)
def read_tx_from_file(self):
fileName = self.getOpenFileName(_("Select your transaction file"), "*.txn")
if not fileName:
return
try:
with open(fileName, "r") as f:
file_content = f.read()
except (ValueError, IOError, os.error) as reason:
self.show_critical(_("Electrum was unable to open your transaction file") + "\n" + str(reason), title=_("Unable to read file or no transaction found"))
return
return self.tx_from_text(file_content)
def do_process_from_text(self):
text = text_dialog(self, _('Input raw transaction'), _("Transaction:"), _("Load transaction"))
if not text:
return
tx = self.tx_from_text(text)
if tx:
self.show_transaction(tx)
def do_process_from_file(self):
tx = self.read_tx_from_file()
if tx:
self.show_transaction(tx)
def do_process_from_txid(self):
from electrum import transaction
txid, ok = QInputDialog.getText(self, _('Lookup transaction'), _('Transaction ID') + ':')
if ok and txid:
txid = str(txid).strip()
try:
raw_tx = self.network.run_from_another_thread(
self.network.get_transaction(txid, timeout=10))
except Exception as e:
self.show_message(_("Error getting transaction from network") + ":\n" + str(e))
return
tx = transaction.Transaction(raw_tx)
self.show_transaction(tx)
@protected
def export_privkeys_dialog(self, password):
if self.wallet.is_watching_only():
self.show_message(_("This is a watching-only wallet"))
return
if isinstance(self.wallet, Multisig_Wallet):
self.show_message(_('WARNING: This is a multi-signature wallet.') + '\n' +
_('It cannot be "backed up" by simply exporting these private keys.'))
d = WindowModalDialog(self, _('Private keys'))
d.setMinimumSize(980, 300)
vbox = QVBoxLayout(d)
msg = "%s\n%s\n%s" % (_("WARNING: ALL your private keys are secret."),
_("Exposing a single private key can compromise your entire wallet!"),
_("In particular, DO NOT use 'redeem private key' services proposed by third parties."))
vbox.addWidget(QLabel(msg))
e = QTextEdit()
e.setReadOnly(True)
vbox.addWidget(e)
defaultname = 'electrum-private-keys.csv'
select_msg = _('Select file to export your private keys to')
hbox, filename_e, csv_button = filename_field(self, self.config, defaultname, select_msg)
vbox.addLayout(hbox)
b = OkButton(d, _('Export'))
b.setEnabled(False)
vbox.addLayout(Buttons(CancelButton(d), b))
private_keys = {}
addresses = self.wallet.get_addresses()
done = False
cancelled = False
def privkeys_thread():
for addr in addresses:
time.sleep(0.1)
if done or cancelled:
break
privkey = self.wallet.export_private_key(addr, password)[0]
private_keys[addr] = privkey
self.computing_privkeys_signal.emit()
if not cancelled:
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.emit()
def show_privkeys():
s = "\n".join( map( lambda x: x[0] + "\t"+ x[1], private_keys.items()))
e.setText(s)
b.setEnabled(True)
self.show_privkeys_signal.disconnect()
nonlocal done
done = True
def on_dialog_closed(*args):
nonlocal done
nonlocal cancelled
if not done:
cancelled = True
self.computing_privkeys_signal.disconnect()
self.show_privkeys_signal.disconnect()
self.computing_privkeys_signal.connect(lambda: e.setText("Please wait... %d/%d"%(len(private_keys),len(addresses))))
self.show_privkeys_signal.connect(show_privkeys)
d.finished.connect(on_dialog_closed)
threading.Thread(target=privkeys_thread).start()
if not d.exec_():
done = True
return
filename = filename_e.text()
if not filename:
return
try:
self.do_export_privkeys(filename, private_keys, csv_button.isChecked())
except (IOError, os.error) as reason:
txt = "\n".join([
_("Electrum was unable to produce a private key-export."),
str(reason)
])
self.show_critical(txt, title=_("Unable to create csv"))
except Exception as e:
self.show_message(str(e))
return
self.show_message(_("Private keys exported."))
def do_export_privkeys(self, fileName, pklist, is_csv):
with open(fileName, "w+") as f:
if is_csv:
transaction = csv.writer(f)
transaction.writerow(["address", "private_key"])
for addr, pk in pklist.items():
transaction.writerow(["%34s"%addr,pk])
else:
f.write(json.dumps(pklist, indent = 4))
def do_import_labels(self):
def import_labels(path):
def _validate(data):
return data # TODO
def import_labels_assign(data):
for key, value in data.items():
self.wallet.set_label(key, value)
import_meta(path, _validate, import_labels_assign)
def on_import():
self.need_update.set()
import_meta_gui(self, _('labels'), import_labels, on_import)
def do_export_labels(self):
def export_labels(filename):
export_meta(self.wallet.labels, filename)
export_meta_gui(self, _('labels'), export_labels)
def sweep_key_dialog(self):
d = WindowModalDialog(self, title=_('Sweep private keys'))
d.setMinimumSize(600, 300)
vbox = QVBoxLayout(d)
hbox_top = QHBoxLayout()
hbox_top.addWidget(QLabel(_("Enter private keys:")))
hbox_top.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
vbox.addLayout(hbox_top)
keys_e = ScanQRTextEdit(allow_multi=True)
keys_e.setTabChangesFocus(True)
vbox.addWidget(keys_e)
addresses = self.wallet.get_unused_addresses()
if not addresses:
try:
addresses = self.wallet.get_receiving_addresses()
except AttributeError:
addresses = self.wallet.get_addresses()
h, address_e = address_field(addresses)
vbox.addLayout(h)
vbox.addStretch(1)
button = OkButton(d, _('Sweep'))
vbox.addLayout(Buttons(CancelButton(d), button))
button.setEnabled(False)
def get_address():
addr = str(address_e.text()).strip()
if bitcoin.is_address(addr):
return addr
def get_pk():
text = str(keys_e.toPlainText())
return keystore.get_private_keys(text)
f = lambda: button.setEnabled(get_address() is not None and get_pk() is not None)
on_address = lambda text: address_e.setStyleSheet((ColorScheme.DEFAULT if get_address() else ColorScheme.RED).as_stylesheet())
keys_e.textChanged.connect(f)
address_e.textChanged.connect(f)
address_e.textChanged.connect(on_address)
on_address(str(address_e.text()))
if not d.exec_():
return
# user pressed "sweep"
addr = get_address()
try:
self.wallet.check_address(addr)
except InternalAddressCorruption as e:
self.show_error(str(e))
raise
try:
coins, keypairs = sweep_preparations(get_pk(), self.network)
except Exception as e: # FIXME too broad...
#traceback.print_exc(file=sys.stderr)
self.show_message(str(e))
return
self.do_clear()
self.tx_external_keypairs = keypairs
self.spend_coins(coins)
self.payto_e.setText(addr)
self.spend_max()
self.payto_e.setFrozen(True)
self.amount_e.setFrozen(True)
self.warn_if_watching_only()
def _do_import(self, title, header_layout, func):
text = text_dialog(self, title, header_layout, _('Import'), allow_multi=True)
if not text:
return
keys = str(text).split()
good_inputs, bad_inputs = func(keys)
if good_inputs:
msg = '\n'.join(good_inputs[:10])
if len(good_inputs) > 10: msg += '\n...'
self.show_message(_("The following addresses were added")
+ f' ({len(good_inputs)}):\n' + msg)
if bad_inputs:
msg = "\n".join(f"{key[:10]}... ({msg})" for key, msg in bad_inputs[:10])
if len(bad_inputs) > 10: msg += '\n...'
self.show_error(_("The following inputs could not be imported")
+ f' ({len(bad_inputs)}):\n' + msg)
self.address_list.update()
self.history_list.update()
def import_addresses(self):
if not self.wallet.can_import_address():
return
title, msg = _('Import addresses'), _("Enter addresses")+':'
self._do_import(title, msg, self.wallet.import_addresses)
@protected
def do_import_privkey(self, password):
if not self.wallet.can_import_privkey():
return
title = _('Import private keys')
header_layout = QHBoxLayout()
header_layout.addWidget(QLabel(_("Enter private keys")+':'))
header_layout.addWidget(InfoButton(WIF_HELP_TEXT), alignment=Qt.AlignRight)
self._do_import(title, header_layout, lambda x: self.wallet.import_private_keys(x, password))
def update_fiat(self):
b = self.fx and self.fx.is_enabled()
self.fiat_send_e.setVisible(b)
self.fiat_receive_e.setVisible(b)
self.history_list.update()
self.address_list.refresh_headers()
self.address_list.update()
self.update_status()
def settings_dialog(self):
self.need_restart = False
d = WindowModalDialog(self, _('Preferences'))
vbox = QVBoxLayout()
tabs = QTabWidget()
gui_widgets = []
fee_widgets = []
tx_widgets = []
id_widgets = []
# language
lang_help = _('Select which language is used in the GUI (after restart).')
lang_label = HelpLabel(_('Language') + ':', lang_help)
lang_combo = QComboBox()
from electrum.i18n import languages
lang_combo.addItems(list(languages.values()))
lang_keys = list(languages.keys())
lang_cur_setting = self.config.get("language", '')
try:
index = lang_keys.index(lang_cur_setting)
except ValueError: # not in list
index = 0
lang_combo.setCurrentIndex(index)
if not self.config.is_modifiable('language'):
for w in [lang_combo, lang_label]: w.setEnabled(False)
def on_lang(x):
lang_request = list(languages.keys())[lang_combo.currentIndex()]
if lang_request != self.config.get('language'):
self.config.set_key("language", lang_request, True)
self.need_restart = True
lang_combo.currentIndexChanged.connect(on_lang)
gui_widgets.append((lang_label, lang_combo))
nz_help = _('Number of zeros displayed after the decimal point. For example, if this is set to 2, "1." will be displayed as "1.00"')
nz_label = HelpLabel(_('Zeros after decimal point') + ':', nz_help)
nz = QSpinBox()
nz.setMinimum(0)
nz.setMaximum(self.decimal_point)
nz.setValue(self.num_zeros)
if not self.config.is_modifiable('num_zeros'):
for w in [nz, nz_label]: w.setEnabled(False)
def on_nz():
value = nz.value()
if self.num_zeros != value:
self.num_zeros = value
self.config.set_key('num_zeros', value, True)
self.history_list.update()
self.address_list.update()
nz.valueChanged.connect(on_nz)
gui_widgets.append((nz_label, nz))
msg = '\n'.join([
_('Time based: fee rate is based on average confirmation time estimates'),
_('Mempool based: fee rate is targeting a depth in the memory pool')
]
)
fee_type_label = HelpLabel(_('Fee estimation') + ':', msg)
fee_type_combo = QComboBox()
fee_type_combo.addItems([_('Static'), _('ETA'), _('Mempool')])
fee_type_combo.setCurrentIndex((2 if self.config.use_mempool_fees() else 1) if self.config.is_dynfee() else 0)
def on_fee_type(x):
self.config.set_key('mempool_fees', x==2)
self.config.set_key('dynamic_fees', x>0)
self.fee_slider.update()
fee_type_combo.currentIndexChanged.connect(on_fee_type)
fee_widgets.append((fee_type_label, fee_type_combo))
feebox_cb = QCheckBox(_('Edit fees manually'))
feebox_cb.setChecked(self.config.get('show_fee', False))
feebox_cb.setToolTip(_("Show fee edit box in send tab."))
def on_feebox(x):
self.config.set_key('show_fee', x == Qt.Checked)
self.fee_adv_controls.setVisible(bool(x))
feebox_cb.stateChanged.connect(on_feebox)
fee_widgets.append((feebox_cb, None))
use_rbf = self.config.get('use_rbf', True)
use_rbf_cb = QCheckBox(_('Use Replace-By-Fee'))
use_rbf_cb.setChecked(use_rbf)
use_rbf_cb.setToolTip(
_('If you check this box, your transactions will be marked as non-final,') + '\n' + \
_('and you will have the possibility, while they are unconfirmed, to replace them with transactions that pay higher fees.') + '\n' + \
_('Note that some merchants do not accept non-final transactions until they are confirmed.'))
def on_use_rbf(x):
self.config.set_key('use_rbf', bool(x))
batch_rbf_cb.setEnabled(bool(x))
use_rbf_cb.stateChanged.connect(on_use_rbf)
fee_widgets.append((use_rbf_cb, None))
batch_rbf_cb = QCheckBox(_('Batch RBF transactions'))
batch_rbf_cb.setChecked(self.config.get('batch_rbf', False))
batch_rbf_cb.setEnabled(use_rbf)
batch_rbf_cb.setToolTip(
_('If you check this box, your unconfirmed transactions will be consolidated into a single transaction.') + '\n' + \
_('This will save fees.'))
def on_batch_rbf(x):
self.config.set_key('batch_rbf', bool(x))
batch_rbf_cb.stateChanged.connect(on_batch_rbf)
fee_widgets.append((batch_rbf_cb, None))
msg = _('OpenAlias record, used to receive coins and to sign payment requests.') + '\n\n'\
+ _('The following alias providers are available:') + '\n'\
+ '\n'.join(['https://cryptoname.co/', 'http://xmr.link']) + '\n\n'\
+ 'For more information, see https://openalias.org'
alias_label = HelpLabel(_('OpenAlias') + ':', msg)
alias = self.config.get('alias','')
alias_e = QLineEdit(alias)
def set_alias_color():
if not self.config.get('alias'):
alias_e.setStyleSheet("")
return
if self.alias_info:
alias_addr, alias_name, validated = self.alias_info
alias_e.setStyleSheet((ColorScheme.GREEN if validated else ColorScheme.RED).as_stylesheet(True))
else:
alias_e.setStyleSheet(ColorScheme.RED.as_stylesheet(True))
def on_alias_edit():
alias_e.setStyleSheet("")
alias = str(alias_e.text())
self.config.set_key('alias', alias, True)
if alias:
self.fetch_alias()
set_alias_color()
self.alias_received_signal.connect(set_alias_color)
alias_e.editingFinished.connect(on_alias_edit)
id_widgets.append((alias_label, alias_e))
# SSL certificate
msg = ' '.join([
_('SSL certificate used to sign payment requests.'),
_('Use setconfig to set ssl_chain and ssl_privkey.'),
])
if self.config.get('ssl_privkey') or self.config.get('ssl_chain'):
try:
SSL_identity = paymentrequest.check_ssl_config(self.config)
SSL_error = None
except BaseException as e:
SSL_identity = "error"
SSL_error = str(e)
else:
SSL_identity = ""
SSL_error = None
SSL_id_label = HelpLabel(_('SSL certificate') + ':', msg)
SSL_id_e = QLineEdit(SSL_identity)
SSL_id_e.setStyleSheet((ColorScheme.RED if SSL_error else ColorScheme.GREEN).as_stylesheet(True) if SSL_identity else '')
if SSL_error:
SSL_id_e.setToolTip(SSL_error)
SSL_id_e.setReadOnly(True)
id_widgets.append((SSL_id_label, SSL_id_e))
units = base_units_list
msg = (_('Base unit of your wallet.')
+ '\n1 BTC = 1000 mBTC. 1 mBTC = 1000 bits. 1 bit = 100 sat.\n'
+ _('This setting affects the Send tab, and all balance related fields.'))
unit_label = HelpLabel(_('Base unit') + ':', msg)
unit_combo = QComboBox()
unit_combo.addItems(units)
unit_combo.setCurrentIndex(units.index(self.base_unit()))
def on_unit(x, nz):
unit_result = units[unit_combo.currentIndex()]
if self.base_unit() == unit_result:
return
edits = self.amount_e, self.fee_e, self.receive_amount_e
amounts = [edit.get_amount() for edit in edits]
self.decimal_point = base_unit_name_to_decimal_point(unit_result)
self.config.set_key('decimal_point', self.decimal_point, True)
nz.setMaximum(self.decimal_point)
self.history_list.update()
self.request_list.update()
self.address_list.update()
for edit, amount in zip(edits, amounts):
edit.setAmount(amount)
self.update_status()
unit_combo.currentIndexChanged.connect(lambda x: on_unit(x, nz))
gui_widgets.append((unit_label, unit_combo))
block_explorers = sorted(util.block_explorer_info().keys())
msg = _('Choose which online block explorer to use for functions that open a web browser')
block_ex_label = HelpLabel(_('Online Block Explorer') + ':', msg)
block_ex_combo = QComboBox()
block_ex_combo.addItems(block_explorers)
block_ex_combo.setCurrentIndex(block_ex_combo.findText(util.block_explorer(self.config)))
def on_be(x):
be_result = block_explorers[block_ex_combo.currentIndex()]
self.config.set_key('block_explorer', be_result, True)
block_ex_combo.currentIndexChanged.connect(on_be)
gui_widgets.append((block_ex_label, block_ex_combo))
from electrum import qrscanner
system_cameras = qrscanner._find_system_cameras()
qr_combo = QComboBox()
qr_combo.addItem("Default","default")
for camera, device in system_cameras.items():
qr_combo.addItem(camera, device)
#combo.addItem("Manually specify a device", config.get("video_device"))
index = qr_combo.findData(self.config.get("video_device"))
qr_combo.setCurrentIndex(index)
msg = _("Install the zbar package to enable this.")
qr_label = HelpLabel(_('Video Device') + ':', msg)
qr_combo.setEnabled(qrscanner.libzbar is not None)
on_video_device = lambda x: self.config.set_key("video_device", qr_combo.itemData(x), True)
qr_combo.currentIndexChanged.connect(on_video_device)
gui_widgets.append((qr_label, qr_combo))
colortheme_combo = QComboBox()
colortheme_combo.addItem(_('Light'), 'default')
colortheme_combo.addItem(_('Dark'), 'dark')
index = colortheme_combo.findData(self.config.get('qt_gui_color_theme', 'default'))
colortheme_combo.setCurrentIndex(index)
colortheme_label = QLabel(_('Color theme') + ':')
def on_colortheme(x):
self.config.set_key('qt_gui_color_theme', colortheme_combo.itemData(x), True)
self.need_restart = True
colortheme_combo.currentIndexChanged.connect(on_colortheme)
gui_widgets.append((colortheme_label, colortheme_combo))
usechange_cb = QCheckBox(_('Use change addresses'))
usechange_cb.setChecked(self.wallet.use_change)
if not self.config.is_modifiable('use_change'): usechange_cb.setEnabled(False)
def on_usechange(x):
usechange_result = x == Qt.Checked
if self.wallet.use_change != usechange_result:
self.wallet.use_change = usechange_result
self.wallet.storage.put('use_change', self.wallet.use_change)
multiple_cb.setEnabled(self.wallet.use_change)
usechange_cb.stateChanged.connect(on_usechange)
usechange_cb.setToolTip(_('Using change addresses makes it more difficult for other people to track your transactions.'))
tx_widgets.append((usechange_cb, None))
def on_multiple(x):
multiple = x == Qt.Checked
if self.wallet.multiple_change != multiple:
self.wallet.multiple_change = multiple
self.wallet.storage.put('multiple_change', multiple)
multiple_change = self.wallet.multiple_change
multiple_cb = QCheckBox(_('Use multiple change addresses'))
multiple_cb.setEnabled(self.wallet.use_change)
multiple_cb.setToolTip('\n'.join([
_('In some cases, use up to 3 change addresses in order to break '
'up large coin amounts and obfuscate the recipient address.'),
_('This may result in higher transactions fees.')
]))
multiple_cb.setChecked(multiple_change)
multiple_cb.stateChanged.connect(on_multiple)
tx_widgets.append((multiple_cb, None))
def fmt_docs(key, klass):
lines = [ln.lstrip(" ") for ln in klass.__doc__.split("\n")]
return '\n'.join([key, "", " ".join(lines)])
choosers = sorted(coinchooser.COIN_CHOOSERS.keys())
if len(choosers) > 1:
chooser_name = coinchooser.get_name(self.config)
msg = _('Choose coin (UTXO) selection method. The following are available:\n\n')
msg += '\n\n'.join(fmt_docs(*item) for item in coinchooser.COIN_CHOOSERS.items())
chooser_label = HelpLabel(_('Coin selection') + ':', msg)
chooser_combo = QComboBox()
chooser_combo.addItems(choosers)
i = choosers.index(chooser_name) if chooser_name in choosers else 0
chooser_combo.setCurrentIndex(i)
def on_chooser(x):
chooser_name = choosers[chooser_combo.currentIndex()]
self.config.set_key('coin_chooser', chooser_name)
chooser_combo.currentIndexChanged.connect(on_chooser)
tx_widgets.append((chooser_label, chooser_combo))
def on_unconf(x):
self.config.set_key('confirmed_only', bool(x))
conf_only = self.config.get('confirmed_only', False)
unconf_cb = QCheckBox(_('Spend only confirmed coins'))
unconf_cb.setToolTip(_('Spend only confirmed inputs.'))
unconf_cb.setChecked(conf_only)
unconf_cb.stateChanged.connect(on_unconf)
tx_widgets.append((unconf_cb, None))
def on_outrounding(x):
self.config.set_key('coin_chooser_output_rounding', bool(x))
enable_outrounding = self.config.get('coin_chooser_output_rounding', False)
outrounding_cb = QCheckBox(_('Enable output value rounding'))
outrounding_cb.setToolTip(
_('Set the value of the change output so that it has similar precision to the other outputs.') + '\n' +
_('This might improve your privacy somewhat.') + '\n' +
_('If enabled, at most 100 satoshis might be lost due to this, per transaction.'))
outrounding_cb.setChecked(enable_outrounding)
outrounding_cb.stateChanged.connect(on_outrounding)
tx_widgets.append((outrounding_cb, None))
# Fiat Currency
hist_checkbox = QCheckBox()
hist_capgains_checkbox = QCheckBox()
fiat_address_checkbox = QCheckBox()
ccy_combo = QComboBox()
ex_combo = QComboBox()
def update_currencies():
if not self.fx: return
currencies = sorted(self.fx.get_currencies(self.fx.get_history_config()))
ccy_combo.clear()
ccy_combo.addItems([_('None')] + currencies)
if self.fx.is_enabled():
ccy_combo.setCurrentIndex(ccy_combo.findText(self.fx.get_currency()))
def update_history_cb():
if not self.fx: return
hist_checkbox.setChecked(self.fx.get_history_config())
hist_checkbox.setEnabled(self.fx.is_enabled())
def update_fiat_address_cb():
if not self.fx: return
fiat_address_checkbox.setChecked(self.fx.get_fiat_address_config())
def update_history_capgains_cb():
if not self.fx: return
hist_capgains_checkbox.setChecked(self.fx.get_history_capital_gains_config())
hist_capgains_checkbox.setEnabled(hist_checkbox.isChecked())
def update_exchanges():
if not self.fx: return
b = self.fx.is_enabled()
ex_combo.setEnabled(b)
if b:
h = self.fx.get_history_config()
c = self.fx.get_currency()
exchanges = self.fx.get_exchanges_by_ccy(c, h)
else:
exchanges = self.fx.get_exchanges_by_ccy('USD', False)
ex_combo.blockSignals(True)
ex_combo.clear()
ex_combo.addItems(sorted(exchanges))
ex_combo.setCurrentIndex(ex_combo.findText(self.fx.config_exchange()))
ex_combo.blockSignals(False)
def on_currency(hh):
if not self.fx: return
b = bool(ccy_combo.currentIndex())
ccy = str(ccy_combo.currentText()) if b else None
self.fx.set_enabled(b)
if b and ccy != self.fx.ccy:
self.fx.set_currency(ccy)
update_history_cb()
update_exchanges()
self.update_fiat()
def on_exchange(idx):
exchange = str(ex_combo.currentText())
if self.fx and self.fx.is_enabled() and exchange and exchange != self.fx.exchange.name():
self.fx.set_exchange(exchange)
def on_history(checked):
if not self.fx: return
self.fx.set_history_config(checked)
update_exchanges()
self.history_model.refresh('on_history')
if self.fx.is_enabled() and checked:
self.fx.trigger_update()
update_history_capgains_cb()
def on_history_capgains(checked):
if not self.fx: return
self.fx.set_history_capital_gains_config(checked)
self.history_model.refresh('on_history_capgains')
def on_fiat_address(checked):
if not self.fx: return
self.fx.set_fiat_address_config(checked)
self.address_list.refresh_headers()
self.address_list.update()
update_currencies()
update_history_cb()
update_history_capgains_cb()
update_fiat_address_cb()
update_exchanges()
ccy_combo.currentIndexChanged.connect(on_currency)
hist_checkbox.stateChanged.connect(on_history)
hist_capgains_checkbox.stateChanged.connect(on_history_capgains)
fiat_address_checkbox.stateChanged.connect(on_fiat_address)
ex_combo.currentIndexChanged.connect(on_exchange)
fiat_widgets = []
fiat_widgets.append((QLabel(_('Fiat currency')), ccy_combo))
fiat_widgets.append((QLabel(_('Show history rates')), hist_checkbox))
fiat_widgets.append((QLabel(_('Show capital gains in history')), hist_capgains_checkbox))
fiat_widgets.append((QLabel(_('Show Fiat balance for addresses')), fiat_address_checkbox))
fiat_widgets.append((QLabel(_('Source')), ex_combo))
tabs_info = [
(fee_widgets, _('Fees')),
(tx_widgets, _('Transactions')),
(gui_widgets, _('Appearance')),
(fiat_widgets, _('Fiat')),
(id_widgets, _('Identity')),
]
for widgets, name in tabs_info:
tab = QWidget()
grid = QGridLayout(tab)
grid.setColumnStretch(0,1)
for a,b in widgets:
i = grid.rowCount()
if b:
if a:
grid.addWidget(a, i, 0)
grid.addWidget(b, i, 1)
else:
grid.addWidget(a, i, 0, 1, 2)
tabs.addTab(tab, name)
vbox.addWidget(tabs)
vbox.addStretch(1)
vbox.addLayout(Buttons(CloseButton(d)))
d.setLayout(vbox)
# run the dialog
d.exec_()
if self.fx:
self.fx.trigger_update()
self.alias_received_signal.disconnect(set_alias_color)
run_hook('close_settings_dialog')
if self.need_restart:
self.show_warning(_('Please restart Electrum to activate the new GUI settings'), title=_('Success'))
def closeEvent(self, event):
# It seems in some rare cases this closeEvent() is called twice
if not self.cleaned_up:
self.cleaned_up = True
self.clean_up()
event.accept()
def clean_up(self):
self.wallet.thread.stop()
if self.network:
self.network.unregister_callback(self.on_network)
self.network.unregister_callback(self.on_quotes)
self.network.unregister_callback(self.on_history)
self.config.set_key("is_maximized", self.isMaximized())
if not self.isMaximized():
g = self.geometry()
self.wallet.storage.put("winpos-qt", [g.left(),g.top(),
g.width(),g.height()])
self.config.set_key("console-history", self.console.history[-50:],
True)
if self.qr_window:
self.qr_window.close()
self.close_wallet()
self.gui_object.timer.timeout.disconnect(self.timer_actions)
self.gui_object.close_window(self)
def plugins_dialog(self):
self.pluginsdialog = d = WindowModalDialog(self, _('Electrum Plugins'))
plugins = self.gui_object.plugins
vbox = QVBoxLayout(d)
# plugins
scroll = QScrollArea()
scroll.setEnabled(True)
scroll.setWidgetResizable(True)
scroll.setMinimumSize(400,250)
vbox.addWidget(scroll)
w = QWidget()
scroll.setWidget(w)
w.setMinimumHeight(plugins.count() * 35)
grid = QGridLayout()
grid.setColumnStretch(0,1)
w.setLayout(grid)
settings_widgets = {}
def enable_settings_widget(p, name, i):
widget = settings_widgets.get(name)
if not widget and p and p.requires_settings():
widget = settings_widgets[name] = p.settings_widget(d)
grid.addWidget(widget, i, 1)
if widget:
widget.setEnabled(bool(p and p.is_enabled()))
def do_toggle(cb, name, i):
p = plugins.toggle(name)
cb.setChecked(bool(p))
enable_settings_widget(p, name, i)
run_hook('init_qt', self.gui_object)
for i, descr in enumerate(plugins.descriptions.values()):
full_name = descr['__name__']
prefix, _separator, name = full_name.rpartition('.')
p = plugins.get(name)
if descr.get('registers_keystore'):
continue
try:
cb = QCheckBox(descr['fullname'])
plugin_is_loaded = p is not None
cb_enabled = (not plugin_is_loaded and plugins.is_available(name, self.wallet)
or plugin_is_loaded and p.can_user_disable())
cb.setEnabled(cb_enabled)
cb.setChecked(plugin_is_loaded and p.is_enabled())
grid.addWidget(cb, i, 0)
enable_settings_widget(p, name, i)
cb.clicked.connect(partial(do_toggle, cb, name, i))
msg = descr['description']
if descr.get('requires'):
msg += '\n\n' + _('Requires') + ':\n' + '\n'.join(map(lambda x: x[1], descr.get('requires')))
grid.addWidget(HelpButton(msg), i, 2)
except Exception:
self.print_msg("error: cannot display plugin", name)
traceback.print_exc(file=sys.stdout)
grid.setRowStretch(len(plugins.descriptions.values()), 1)
vbox.addLayout(Buttons(CloseButton(d)))
d.exec_()
def cpfp(self, parent_tx, new_tx):
total_size = parent_tx.estimated_size() + new_tx.estimated_size()
d = WindowModalDialog(self, _('Child Pays for Parent'))
vbox = QVBoxLayout(d)
msg = (
"A CPFP is a transaction that sends an unconfirmed output back to "
"yourself, with a high fee. The goal is to have miners confirm "
"the parent transaction in order to get the fee attached to the "
"child transaction.")
vbox.addWidget(WWLabel(_(msg)))
msg2 = ("The proposed fee is computed using your "
"fee/kB settings, applied to the total size of both child and "
"parent transactions. After you broadcast a CPFP transaction, "
"it is normal to see a new unconfirmed transaction in your history.")
vbox.addWidget(WWLabel(_(msg2)))
grid = QGridLayout()
grid.addWidget(QLabel(_('Total size') + ':'), 0, 0)
grid.addWidget(QLabel('%d bytes'% total_size), 0, 1)
max_fee = new_tx.output_value()
grid.addWidget(QLabel(_('Input amount') + ':'), 1, 0)
grid.addWidget(QLabel(self.format_amount(max_fee) + ' ' + self.base_unit()), 1, 1)
output_amount = QLabel('')
grid.addWidget(QLabel(_('Output amount') + ':'), 2, 0)
grid.addWidget(output_amount, 2, 1)
fee_e = BTCAmountEdit(self.get_decimal_point)
# FIXME with dyn fees, without estimates, there are all kinds of crashes here
def f(x):
a = max_fee - fee_e.get_amount()
output_amount.setText((self.format_amount(a) + ' ' + self.base_unit()) if a else '')
fee_e.textChanged.connect(f)
fee = self.config.fee_per_kb() * total_size / 1000
fee_e.setAmount(fee)
grid.addWidget(QLabel(_('Fee' + ':')), 3, 0)
grid.addWidget(fee_e, 3, 1)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * total_size / 1000
fee = min(max_fee, fee)
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
fee_slider.update()
grid.addWidget(fee_slider, 4, 1)
vbox.addLayout(grid)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
fee = fee_e.get_amount()
if fee > max_fee:
self.show_error(_('Max fee exceeded'))
return
new_tx = self.wallet.cpfp(parent_tx, fee)
new_tx.set_rbf(True)
self.show_transaction(new_tx)
def bump_fee_dialog(self, tx):
is_relevant, is_mine, v, fee = self.wallet.get_wallet_delta(tx)
if fee is None:
self.show_error(_("Can't bump fee: unknown fee for original transaction."))
return
tx_label = self.wallet.get_label(tx.txid())
tx_size = tx.estimated_size()
d = WindowModalDialog(self, _('Bump Fee'))
vbox = QVBoxLayout(d)
vbox.addWidget(WWLabel(_("Increase your transaction's fee to improve its position in mempool.")))
vbox.addWidget(QLabel(_('Current fee') + ': %s'% self.format_amount(fee) + ' ' + self.base_unit()))
vbox.addWidget(QLabel(_('New fee' + ':')))
fee_e = BTCAmountEdit(self.get_decimal_point)
fee_e.setAmount(fee * 1.5)
vbox.addWidget(fee_e)
def on_rate(dyn, pos, fee_rate):
fee = fee_rate * tx_size / 1000
fee_e.setAmount(fee)
fee_slider = FeeSlider(self, self.config, on_rate)
vbox.addWidget(fee_slider)
cb = QCheckBox(_('Final'))
vbox.addWidget(cb)
vbox.addLayout(Buttons(CancelButton(d), OkButton(d)))
if not d.exec_():
return
is_final = cb.isChecked()
new_fee = fee_e.get_amount()
delta = new_fee - fee
if delta < 0:
self.show_error("fee too low")
return
try:
new_tx = self.wallet.bump_fee(tx, delta)
except CannotBumpFee as e:
self.show_error(str(e))
return
if is_final:
new_tx.set_rbf(False)
self.show_transaction(new_tx, tx_label)
def save_transaction_into_wallet(self, tx):
win = self.top_level_window()
try:
if not self.wallet.add_transaction(tx.txid(), tx):
win.show_error(_("Transaction could not be saved.") + "\n" +
_("It conflicts with current history."))
return False
except AddTransactionException as e:
win.show_error(e)
return False
else:
self.wallet.save_transactions(write=True)
# need to update at least: history_list, utxo_list, address_list
self.need_update.set()
msg = (_("Transaction added to wallet history.") + '\n\n' +
_("Note: this is an offline transaction, if you want the network "
"to see it, you need to broadcast it."))
win.msg_box(QPixmap(":icons/offline_tx.png"), None, _('Success'), msg)
return True
|
the-stack_0_23222 | import asyncio
import logging
from json import dump, load
from pathlib import Path
from typing import Union, List, Optional, Dict
import aiohttp
from discord import Member, Embed, Colour, TextChannel
from discord.ext.commands import Cog, Bot, group, Context
logger = logging.getLogger("bot." + __name__)
class LichessAPI(Cog):
"""
Retrieve data from the lichess.com API.
"""
def __init__(self, bot: Bot) -> None:
self.bot = bot
self.json_file = Path("ynb-bot", "resources", "lichess.json")
self.linked_users = self.get_linked_users()
@staticmethod
async def fetch(session: aiohttp.ClientSession, url: str, params=None) -> Union[dict, None]:
headers: dict = {
"Accept": 'application/json'
}
try:
async with session.get(url=url, params=params, headers=headers) as response:
return await response.json(content_type=None)
except Exception as e:
logger.error(f"API request error: {e}")
return None
async def _get_user(self, username: str) -> Union[dict, None]:
"""Fetch User details."""
url: str = f"https://lichess.org/api/user/{username}"
async with aiohttp.ClientSession() as session:
response: Union[dict, None] = await self.fetch(session, url)
return response
@group(name="lichess", invoke_without_command=True)
async def lichess(self, ctx: Context) -> None:
"""Contains commands that access lichess API."""
await ctx.send_help(ctx.command)
@lichess.command(name="link")
async def link_account(self, ctx: Context, username: str) -> None:
"""Link lichess account with your discord."""
linked_usernames: List[str] = list(self.linked_users.keys())
if username.lower() in linked_usernames:
linked_discord_user: Member = self.bot.get_user(self.linked_users[username.lower()])
await ctx.send(f"```{username} is already linked with discord user {linked_discord_user}```")
return
user: Union[dict, None] = await self._get_user(username)
if user:
self.linked_users[username.lower()] = ctx.author.id
self.update_linked_users()
await ctx.send("```Account Linked Successfully.```")
else:
await ctx.send("```Invalid Username.```")
return
@lichess.command(name="unlink")
async def unlink_account(self, ctx: Context) -> None:
"""Link lichess account with your discord."""
linked_discord_ids: List[int] = list(self.linked_users.values())
if ctx.author.id not in linked_discord_ids:
await ctx.send("Your discord is not linked to a lichess account.")
return
self.linked_users = {key: val for key, val in self.linked_users.items() if val != ctx.author.id}
self.update_linked_users()
await ctx.send("```Account Unlinked Successfully.```")
@lichess.command(name="showall")
async def show_all_linked_users(self, ctx: Context) -> None:
"""Display all linked users."""
msg: str = "```Lichess Username - Discord Account\n\n"
for lichess_username, discord_id in self.linked_users.items():
discord_member = self.bot.get_user(discord_id)
msg += f"{lichess_username} - {discord_member}\n"
msg += "```"
await ctx.send(msg)
@lichess.command(name="info")
async def account_information(self, ctx, discord_user: Optional[Member], lichess_username: str = None) -> None:
"""
Display Lichess account information.
target: lichess username or tag the discord user
"""
if not lichess_username and not discord_user:
await ctx.send("```Lichess username or tag a discord user is a required parameter.```")
return
if lichess_username and not discord_user:
user: Union[dict, None] = await self._get_user(lichess_username)
else:
try:
lichess_username = {val: key for key, val in self.linked_users.items()}[discord_user.id]
except KeyError:
await ctx.send(f"```{discord_user} is not linked to a lichess account.```")
return
else:
user: Union[dict, None] = await self._get_user(lichess_username)
if not user:
await ctx.send(f"```User not found.```")
return
embed: Embed = self.generate_user_embed(user)
await ctx.send(embed=embed)
@staticmethod
def generate_user_embed(user: dict) -> Embed:
"""Generate embed for Lichess user profile."""
username: str = user["username"]
playtime: int = user["playTime"]["total"] // 60 # converting seconds to mintues
no_of_following: int = user["nbFollowing"]
no_of_followers: int = user["nbFollowers"]
no_of_games: int = user["count"]["all"]
wins: int = user["count"]["win"]
losses: int = user["count"]["loss"]
draws: int = user["count"]["draw"]
url: str = user["url"]
embed: Embed = Embed(color=Colour.red())
embed.title = f"```{username} Profile```"
embed.url = url
embed.description = f"```Total play time: {playtime} Minutes\n" \
f"Followers: {no_of_followers}\n" \
f"Following: {no_of_following}\n\n" \
f"Total Number of Games: {no_of_games}\n" \
f"Wins: {wins}\n" \
f"Losses: {losses}\n" \
f"Draws: {draws}\n\n```"
embed.description += f"**Game Modes**\n"
for game_mode, stats in user["perfs"].items():
if stats["games"] != 0:
rating: int = stats["rating"]
embed.description += f"```**{game_mode}**\n" \
f"Games: {stats['games']}\n" \
f"Rating: {rating}```"
return embed
def get_linked_users(self) -> dict:
"""Get linked users from json file."""
logger.info("Fetching Lichess linked accounts.")
with self.json_file.open() as f:
data: dict = load(f)
return data
def update_linked_users(self) -> None:
"""Update json file containing user data."""
logger.info("Updating Lichess json file.")
with self.json_file.open("w") as f:
dump(self.linked_users, f, indent=2)
async def get_ongoing_games(self) -> None:
"""Check status of each user and send link for on-going games."""
logger.info("Lichess - Get Ongoing Games loop running...")
chess_channel: TextChannel = self.bot.get_channel(self.bot.conf["CHESS_CHANNEL_ID"])
games: list = []
while not self.bot.is_closed():
async with aiohttp.ClientSession() as session:
usernames: str = ",".join(list(self.linked_users.keys()))
url: str = "https://lichess.org/api/users/status"
params: dict = {
"ids": usernames
}
all_users_status: dict = await self.fetch(session, url, params)
if all_users_status is not None:
for user_status in all_users_status:
if "playing" in user_status:
fetch_game_url: str = f"https://lichess.org/api/user/{user_status['name']}/current-game"
response: Union[dict, None] = await self.fetch(session, fetch_game_url)
if not response:
continue
game_id: int = response["id"]
game_url: str = f"https://lichess.org/{game_id}"
if game_url not in games:
games.append(game_url)
msg: str = f"On Going Match! Spectate now!\n{game_url}"
logger.info(f"Lichess Live game found: {game_url}")
await chess_channel.send(msg)
await asyncio.sleep(10)
def setup(bot: Bot) -> None:
bot.loop.create_task(LichessAPI(bot).get_ongoing_games())
bot.add_cog(LichessAPI(bot))
logger.info("LichessAPI cog loaded.")
|
the-stack_0_23225 | #! /usr/bin/env python
'''
SimpleStyleTransfer - Transfer styles onto source frames, mixing between
successive style frames.
See https://github.com/tensorflow/magenta/tree/master/magenta/models/arbitrary_image_stylization
for more specifics and greater customizability.
Based on Remideza's MichelStyle project, https://github.com/Remideza/MichelStyle
By Remideza, Evan Jones, 2020
'''
import argparse
import os
from pathlib import Path
import sys
import time
# TODO: these imports take a long time. Import inline to decrease startup time?
import cv2
import tensorflow as tf
import numpy as np
import tensorflow_hub
import moviepy
import PIL
# ==============
# = TYPE HINTS =
# ==============
from typing import List, Dict, Sequence, Tuple, Any
class CVImage(np.ndarray): pass
STYLE_SIZE = (256, 256)
def main():
args = parse_all_args()
total_elapsed = style_video(source_dir_or_image=args.source,
styles_dir_or_tsv=args.styles,
output_dir=args.output_frames)
print(f'Wrote frames to {args.output_frames}')
if args.video:
vid_start = time.time()
output_path = write_video_file(frames_dir=args.output_frames,
output_path=args.video,
fps=args.fps,
audio_path=args.audio)
total_elapsed += (time.time() - vid_start)
print(f'Total time: {total_elapsed/60:.1f} minutes')
def parse_all_args(args_in=None):
''' Set up argparser and return a namespace with named
values from the command line arguments.
If help is requested (-h / --help) the help message will be printed
and the program will exit.
'''
program_description = '''Output a video with styles transferred onto each frame'''
parser = argparse.ArgumentParser( description=program_description,
formatter_class=argparse.HelpFormatter)
# Replace these with your arguments below
parser.add_argument('--source', type=Path, required=True,
help=('A directory containing frames of a movie sequence that should have '
'styles applied to them. Frame numbers start at 1. A single image may also '
'be supplied'))
parser.add_argument( '--styles', type=Path, required=False,
help=('A directory containing image files to take styles from. Each '
'image should have a number for the frame it should be most applied to '))
parser.add_argument( '-st', '--styles_tsv', type=Path, required=False,
help=('Patht to a tsv file describing which style images to apply to '
'which output frame. Format is: "24\t/PATH/TO/IMAGE.png"'))
parser.add_argument('-o','--output_frames', type=Path, default=Path('styled_frames'),
help='Path to an output directory where stylized frames will be written. Default: "%(default)s"')
parser.add_argument('-v', '--video', type=Path,
help='Path to an MP4 output file.')
parser.add_argument('-a', '--audio', type=Path, default=None,
help='Path to an MP3 file. If specified, it will be added to the '
'generated video')
parser.add_argument('-f', '--fps', type=int, default=24,
help='Destination frame rate. Default: %(default)s')
# TODO: support this option
# parser.add_argument('--force_overwrite', action='store_true', default=False,
# help=('If specified, force already-written files in the OUTPUT_FRAMES directory '
# 'to be overwritten' ))
# print help if no args are supplied
if len(sys.argv) <= 2:
sys.argv.append('--help')
# If args_in isn't specified, args will be taken from sys.argv
args = parser.parse_args(args_in)
# Specified TSV file overrides styles directory
if args.styles_tsv:
args.styles = args.styles_tsv
# Validation:
if not args.source.exists():
raise ValueError(f"Specified source '{args.source}'' doesn't exist")
ensure_dir(args.output_frames)
return args
def style_video(source_dir_or_image: Path,
styles_dir_or_tsv: Path,
output_dir:Path) -> float:
total_start_time = time.time()
params = calculate_styling_params(source_dir_or_image, styles_dir_or_tsv)
hub_module = get_tf_hub()
print('Transferring styles...\n\n')
frame_count = len(params)
style_images: Dict[Path, CVImage] = {}
single_source_file = is_image(source_dir_or_image)
if single_source_file:
source_image = frame_image(source_dir_or_image, as_float=True)
for frame, (source_path, style_a_path, style_b_path, style_ratio) in params.items():
start_time = time.time()
output_path = output_dir / f'{frame}.jpg'
if not single_source_file:
source_image = frame_image(source_path, as_float=True)
style_a_image = style_images.setdefault(style_a_path, frame_image(style_a_path, destination_size=STYLE_SIZE))
style_b_image = style_images.setdefault(style_b_path, frame_image(style_b_path, destination_size=STYLE_SIZE))
stylized_image = transfer_styles(source_image, style_a_image, style_b_image, style_ratio, hub_module)
stylized_image.save(output_path)
infoscreen(frame, frame_count, time.time() - start_time)
return time.time() - total_start_time
def calculate_styling_params(source_dir_or_image: Path,
styles_dir_or_tsv: Path,) -> Dict[int, Tuple[Path, Path, Path, float]]:
params: Dict[int, Tuple[Path, Path, Path, float]] = {}
source_frame_paths: Dict[int, Path]
style_frame_paths: Dict[int, Path]
# Figure out how many frames we'll need
source_frame_paths = numbered_images_dict(source_dir_or_image)
if styles_dir_or_tsv.is_file():
style_frame_paths = parse_frames_tsv(styles_dir_or_tsv)
elif styles_dir_or_tsv.is_dir():
style_frame_paths = numbered_images_dict(styles_dir_or_tsv)
else:
raise ValueError(f'styles_dir_or_tsv should be a directory or a .tsv '
'file. It is: {styles_dir_or_tsv}')
style_frame_numbers = sorted(style_frame_paths.keys())
source_frame_numbers = sorted(source_frame_paths.keys())
first_source_frame, last_source_frame = source_frame_numbers[0], source_frame_numbers[-1]
first_style_frame, last_style_frame = style_frame_numbers[0], style_frame_numbers[-1]
style_args: Dict[int, Tuple[Path, Path, float]]= {}
# TODO: get frame lengths from movies, too.
# TODO: Handle missing source frames, e.g. 1.jpg & 3.jpg exist, but not 2.jpg
frame_count = last_style_frame
if len(source_frame_numbers) == 1:
source_path = source_frame_paths[first_source_frame]
source_frame_paths = dict({f: source_path for f in range(1,last_style_frame + 1)})
# Insert beginning and end elements in the style transitions so the
# entire frame range is covered by a pair of style images
if first_style_frame != 1:
style_frame_paths[1] = style_frame_paths[first_style_frame]
if last_style_frame != frame_count:
style_frame_paths[frame_count] = style_frame_paths[last_style_frame]
style_transitions = sorted(list(style_frame_paths.keys()))
transition_pairs = zip(style_transitions[:-1], style_transitions[1:])
for start_frame, end_frame in transition_pairs:
style_a_path = style_frame_paths[start_frame]
style_b_path = style_frame_paths[end_frame]
for frame in range(start_frame, end_frame + 1):
# if frame == start_frame, we will have just calculated its params
# for the previous start_frame/end_frame pair; skip this step
if frame in params:
continue
style_ratio = (frame - start_frame)/(end_frame - start_frame)
params[frame] = (source_frame_paths[frame], style_a_path, style_b_path, style_ratio)
return params
def transfer_styles(source_image_as_floats:CVImage,
style_a:CVImage,
style_b: CVImage=None,
style_ratio:float=0,
hub_module: Any=None) -> CVImage:
# Style source_image_as_floats with a single other image, or with an affine
# combination of two images.
# Note that style_ratio should be in [0,1] and represents the
# proportion of ** style_b ** used in the styling.
hub_module = hub_module or get_tf_hub()
stylized_image: CVImage
if style_b is not None and style_ratio != 0:
style_image = cv2.addWeighted(style_a, 1-style_ratio, style_b, style_ratio, 0.0)
else:
style_image = style_a
style_image = cv2.cvtColor(style_image, cv2.COLOR_BGR2RGB)
style_image = style_image.astype(np.float32)[np.newaxis, ...] / 255.0
outputs = hub_module(tf.constant(source_image_as_floats), tf.constant(style_image))
stylized_image = tensor_to_image(outputs[0])
return stylized_image
def write_video_file(frames_dir: Path, output_path: Path=None, fps=24, audio_path:Path=None) -> Path:
'''
Writes all the numbered frames in frames_dir to an mp4
'''
if output_path is None:
output_path = Path(__file__) / 'video_out.mp4'
output_path = output_path.resolve().with_suffix('.mp4')
out_posix = output_path.as_posix()
frames_paths = numbered_images_dict(frames_dir)
# assume sizes of all generated frames are the same, and get size from
# a random frame
random_frame = list(frames_paths.values())[0].as_posix()
HEIGHT, WIDTH, channels = cv2.imread(random_frame, 1).shape
OUTPUT_SIZE = (WIDTH * 2,HEIGHT * 2)
video = cv2.VideoWriter(out_posix, cv2.VideoWriter_fourcc(*"mp4v"), fps, OUTPUT_SIZE)
print("Compiling video ...")
frames_count = len(frames_paths)
for frame_num in sorted(frames_paths.keys()):
frame_path = frames_paths[frame_num].as_posix()
sys.stdout.flush()
sys.stdout.write(f'{frame_num}/{frames_count}\r')
video.write(cv2.resize(cv2.imread(frame_path, 1), OUTPUT_SIZE))
video.release()
if audio_path is not None:
mp_video = moviepy.editor.VideoFileClip(out_posix, fps)
mp_video.write_videofile(out_posix, audio=audio_path.as_posix())
print(f'Wrote {frames_count} frames to video at {output_path}')
return output_path
# ===========
# = HELPERS =
# ===========
def get_tf_hub():
# TODO: cache this ~80MB file somewhere so that every run doesn't download it again.
# OR maybe that already happens? See https://www.tensorflow.org/hub/tf2_saved_model
TF_HUB = tensorflow_hub.load('https://tfhub.dev/google/magenta/arbitrary-image-stylization-v1-256/2')
# importing hub_module prints some debug info to my screen. Remove that
clear_screen()
return TF_HUB
def tensor_to_image(tensor):
tensor = tensor*255
tensor = np.array(tensor, dtype=np.uint8)
if np.ndim(tensor)>3:
assert tensor.shape[0] == 1
tensor = tensor[0]
return PIL.Image.fromarray(tensor)
def ensure_dir(dir_name:Path) -> bool:
if dir_name.exists():
if dir_name.is_dir():
return True
else:
raise ValueError(f'{dir_name} exists, but is not a directory')
else:
dir_name.mkdir()
return True
def clear_screen():
if sys.platform == 'darwin':
os.system('clear')
else:
os.system('cls')
def infoscreen(frame: int, total_frames:int, frame_elapsed: float):
minutes_left = (frame_elapsed * (total_frames - frame))/60
line = f" SimpleStyleTransfer - {frame}/{total_frames}"
marquee = '='*(len(line) + 3)
clear_screen()
print(marquee)
print(line)
print(marquee)
print()
# print(f"------------------------------------")
# print(f" SimpleStyleTransfer - {frame}/{total_frames}")
# print(f"------------------------------------")
print()
print(f"{minutes_left:.1f} minutes remaining")
def is_image(image:Path) -> bool:
IMAGE_SUFFIXES = ('.jpg', '.jpeg', '.png', '.gif')
return image.exists() and image.is_file() and image.suffix in IMAGE_SUFFIXES
def numbered_images_dict(a_dir: Path) -> Dict[int, Path]:
result: Dict[int, Path] = {}
# If a_dir is an image file, not a directory, we'll just return a single
# element dict
if is_image(a_dir):
result = {1: a_dir}
elif a_dir.is_dir():
for f in a_dir.iterdir():
# TODO: maybe accept files with a number anyplace in the stem?
if f.stem.isdigit() and is_image(f):
result[int(f.stem)] = f
else:
raise ValueError(f'argument {a_dir} is neither a directory nor an image '
'file we know how to handle')
return result
def frame_image(path:Path, as_float:bool = False, destination_size:Tuple[int, int]=None) -> CVImage:
img = cv2.imread(path.as_posix(), cv2.IMREAD_COLOR)
if as_float:
img = img.astype(np.float32)[np.newaxis, ...] / 255.0
if destination_size is not None:
img = cv2.resize(img, destination_size)
return img
def parse_frames_tsv(path:Path) -> Dict[int, Path]:
'''
Opens a file of the format:
1\t<path_to_style_img.[png|jpg]>\n
24\t<path_to_other_img.[png|jpg]>
and returns a dictionary of frame_number:Path entries
'''
result: Dict[int, Path] = {}
lines = path.read_text().splitlines()
for l in lines:
if len(l) > 1:
try:
frame_num, path_str = l.split('\t', maxsplit=1)
style_path = Path(path_str)
if not (style_path.exists() and style_path.is_file()):
raise ValueError(f'File at ${style_path} not found')
result[int(frame_num)] = style_path
except Exception as e:
print(f'Exception {e}. Skipping line: "{l}"')
return result
if __name__ == '__main__':
main() |
the-stack_0_23227 | import numpy as np
import pickle
from utils.dataset import ListDataset
import constants
import vis_utils
from model.LehnertGridworldModelLatent import LehnertGridworldModelLatent
import config_constants as cc
from runners.runner import Runner
import model.utils as model_utils
class LehnertGridworldLatentRunner(Runner):
def __init__(self, runner_config, model_config):
super(LehnertGridworldLatentRunner, self).__init__()
self.model_config = model_config
self.load_path = runner_config[cc.LOAD_PATH]
self.saver = runner_config[cc.SAVER]
self.logger = runner_config[cc.LOGGER]
self.oversample = runner_config[cc.OVERSAMPLE]
self.validation_fraction = runner_config[cc.VALIDATION_FRACTION]
self.validation_freq = runner_config[cc.VALIDATION_FREQ]
self.batch_size = runner_config[cc.BATCH_SIZE]
self.load_model_path = runner_config[cc.LOAD_MODEL_PATH]
self.num_steps = runner_config[cc.NUM_STEPS]
self.data_limit = runner_config[cc.DATA_LIMIT]
def setup(self):
self.prepare_dataset_()
self.prepare_model_()
def evaluate_and_visualize(self):
self.inference_()
self.plot_latent_space_()
def inference_(self):
self.train_embeddings = self.model.encode(self.dataset[constants.STATES] )
self.valid_embeddings = self.model.encode(self.valid_dataset[constants.STATES])
def plot_latent_space_(self):
fig = model_utils.transform_and_plot_embeddings(
self.valid_embeddings[:200], self.valid_dataset[constants.STATE_LABELS][:200], num_components=2,
use_colorbar=False
)
self.saver.save_figure(fig, "embeddings")
def main_training_loop(self):
self.prepare_losses_()
if self.load_model_path is None:
for train_step in range(self.num_steps):
if train_step % self.validation_freq == 0:
tmp_valid_losses = self.model.validate(
self.valid_dataset[constants.STATES], self.valid_dataset[constants.ACTIONS],
self.valid_dataset[constants.Q_VALUES]
)
self.valid_losses.append(np.mean(tmp_valid_losses, axis=0))
epoch_step = train_step % self.epoch_size
if train_step > 0 and epoch_step == 0:
self.logger.info("training step {:d}".format(train_step))
self.dataset.shuffle()
if len(self.tmp_epoch_losses) > 0:
self.add_epoch_losses_()
feed_dict = self.get_feed_dict_(epoch_step)
bundle = self.model.session.run(self.to_run, feed_dict=feed_dict)
to_add = [bundle[constants.TOTAL_LOSS], bundle[constants.Q_LOSS], bundle[constants.ENTROPY_LOSS]]
self.add_step_losses_(to_add, train_step)
if len(self.tmp_epoch_losses) > 0:
self.add_epoch_losses_()
self.postprocess_losses_()
self.plot_losses_()
def plot_losses_(self):
train_indices = [1]
valid_indices = [0]
train_labels = ["train q loss"]
valid_labels = ["valid q loss"]
if len(self.per_epoch_losses) > 0:
vis_utils.plot_epoch_losses(
self.per_epoch_losses, self.valid_losses, train_indices, valid_indices, train_labels,
valid_labels, self.epoch_size, self.validation_freq, saver=self.saver, save_name="epoch_losses"
)
if len(self.all_losses) > 0:
vis_utils.plot_all_losses(
self.all_losses, self.valid_losses, train_indices, valid_indices, train_labels,
valid_labels, self.validation_freq, saver=self.saver, save_name="losses"
)
vis_utils.plot_all_losses(
self.all_losses, self.valid_losses, [2], [1], ["train encoder entropy"], ["valid encoder entropy"],
self.validation_freq, saver=self.saver, save_name="entropy", log_scale=False
)
def get_feed_dict_(self, epoch_step):
b = np.index_exp[epoch_step * self.batch_size:(epoch_step + 1) * self.batch_size]
feed_dict = {
self.model.states_pl: self.dataset[constants.STATES][b],
self.model.actions_pl: self.dataset[constants.ACTIONS][b],
self.model.qs_pl: self.dataset[constants.Q_VALUES][b]
}
return feed_dict
def prepare_dataset_(self):
with open(self.load_path, "rb") as file:
transitions = pickle.load(file)
if self.data_limit is not None:
transitions = transitions[:self.data_limit]
self.dataset = self.transitions_to_dataset_(transitions)
vis_utils.plot_many_histograms(
[self.dataset[constants.REWARDS], self.dataset[constants.STATE_LABELS],
self.dataset[constants.NEXT_STATE_LABELS]],
["rewards_hist", "state_labels_hist", "next_state_labels_hist"], xlabel="items",
num_bins=10, saver=self.saver
)
if self.oversample:
self.dataset.oversample((self.dataset[constants.REWARDS] > 0))
self.dataset.shuffle()
self.num_samples = self.dataset.size
self.valid_samples = int(self.num_samples * self.validation_fraction)
self.valid_dataset = self.dataset.split(self.valid_samples)
self.epoch_size = self.dataset[constants.STATES].shape[0] // self.batch_size
self.log_dataset_stats_()
def log_dataset_stats_(self):
for dataset, name in zip([self.dataset, self.valid_dataset], ["train", "valid"]):
self.logger.info("actions: min {:d} max {:d}".format(
np.min(dataset[constants.ACTIONS]), np.max(dataset[constants.ACTIONS]))
)
self.logger.info("rewards: min {:.0f} max {:.0f}".format(
np.min(dataset[constants.REWARDS]), np.max(dataset[constants.REWARDS]))
)
self.logger.info("state labels: min {:d} max {:d}".format(
np.min(dataset[constants.STATE_LABELS]), np.max(dataset[constants.STATE_LABELS]))
)
@staticmethod
def transitions_to_dataset_(transitions):
dataset = ListDataset()
for t in transitions:
dataset.add(constants.STATES, t.state)
dataset.add(constants.ACTIONS, t.action)
dataset.add(constants.REWARDS, t.reward)
dataset.add(constants.NEXT_STATES, t.next_state)
dataset.add(constants.Q_VALUES, t.q_values / 10.0)
dataset.add(constants.NEXT_Q_VALUES, t.next_q_values / 10.0)
dataset.add(constants.STATE_LABELS, t.state_block)
dataset.add(constants.NEXT_STATE_LABELS, t.next_state_block)
types_dict = {
constants.STATES: np.float32, constants.ACTIONS: np.int32, constants.REWARDS: np.float32,
constants.NEXT_STATES: np.float32, constants.STATE_LABELS: np.int32, constants.NEXT_STATE_LABELS: np.int32,
constants.Q_VALUES: np.float32, constants.NEXT_Q_VALUES: np.float32
}
return dataset.to_array_dataset(types_dict)
def prepare_model_(self):
self.model = LehnertGridworldModelLatent(self.model_config)
self.model.build()
self.model.start_session()
if self.load_model_path is not None:
self.model.load(self.load_model_path)
self.to_run = {
constants.TRAIN_STEP: self.model.train_step,
constants.TOTAL_LOSS: self.model.loss_t,
constants.Q_LOSS: self.model.q_loss_t,
constants.ENTROPY_LOSS: self.model.encoder_entropy_t
}
|
the-stack_0_23228 | #!/usr/bin/env python3
# Copyright (c) 2014-2016 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
'''
assumevalid.py
Test logic for skipping signature validation on blocks which we've assumed
valid (https://github.com/massgrid/massgrid/pull/9484)
We build a chain that includes and invalid signature for one of the
transactions:
0: genesis block
1: block 1 with coinbase transaction output.
2-101: bury that block with 100 blocks so the coinbase transaction
output can be spent
102: a block containing a transaction spending the coinbase
transaction output. The transaction has an invalid signature.
103-2202: bury the bad block with just over two weeks' worth of blocks
(2100 blocks)
Start three nodes:
- node0 has no -assumevalid parameter. Try to sync to block 2202. It will
reject block 102 and only sync as far as block 101
- node1 has -assumevalid set to the hash of block 102. Try to sync to
block 2202. node1 will sync all the way to block 2202.
- node2 has -assumevalid set to the hash of block 102. Try to sync to
block 200. node2 will reject block 102 since it's assumed valid, but it
isn't buried by at least two weeks' work.
'''
from test_framework.mininode import *
from test_framework.test_framework import MassGridTestFramework
from test_framework.util import *
from test_framework.blocktools import create_block, create_coinbase
from test_framework.key import CECKey
from test_framework.script import *
class BaseNode(SingleNodeConnCB):
def __init__(self):
SingleNodeConnCB.__init__(self)
self.last_inv = None
self.last_headers = None
self.last_block = None
self.last_getdata = None
self.block_announced = False
self.last_getheaders = None
self.disconnected = False
self.last_blockhash_announced = None
def on_close(self, conn):
self.disconnected = True
def wait_for_disconnect(self, timeout=60):
test_function = lambda: self.disconnected
assert(wait_until(test_function, timeout=timeout))
return
def send_header_for_blocks(self, new_blocks):
headers_message = msg_headers()
headers_message.headers = [ CBlockHeader(b) for b in new_blocks ]
self.send_message(headers_message)
class SendHeadersTest(MassGridTestFramework):
def __init__(self):
super().__init__()
self.setup_clean_chain = True
self.num_nodes = 3
def setup_network(self):
# Start node0. We don't start the other nodes yet since
# we need to pre-mine a block with an invalid transaction
# signature so we can pass in the block hash as assumevalid.
self.nodes = []
self.nodes.append(start_node(0, self.options.tmpdir, ["-debug"]))
def run_test(self):
# Connect to node0
node0 = BaseNode()
connections = []
connections.append(NodeConn('127.0.0.1', p2p_port(0), self.nodes[0], node0))
node0.add_connection(connections[0])
NetworkThread().start() # Start up network handling in another thread
node0.wait_for_verack()
# Build the blockchain
self.tip = int(self.nodes[0].getbestblockhash(), 16)
self.block_time = self.nodes[0].getblock(self.nodes[0].getbestblockhash())['time'] + 1
self.blocks = []
# Get a pubkey for the coinbase TXO
coinbase_key = CECKey()
coinbase_key.set_secretbytes(b"horsebattery")
coinbase_pubkey = coinbase_key.get_pubkey()
# Create the first block with a coinbase output to our key
height = 1
block = create_block(self.tip, create_coinbase(height, coinbase_pubkey), self.block_time)
self.blocks.append(block)
self.block_time += 1
block.solve()
# Save the coinbase for later
self.block1 = block
self.tip = block.sha256
height += 1
# Bury the block 100 deep so the coinbase output is spendable
for i in range(100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
# Create a transaction spending the coinbase output with an invalid (null) signature
tx = CTransaction()
tx.vin.append(CTxIn(COutPoint(self.block1.vtx[0].sha256, 0), scriptSig=b""))
tx.vout.append(CTxOut(49*100000000, CScript([OP_TRUE])))
tx.calc_sha256()
block102 = create_block(self.tip, create_coinbase(height), self.block_time)
self.block_time += 1
block102.vtx.extend([tx])
block102.hashMerkleRoot = block102.calc_merkle_root()
block102.rehash()
block102.solve()
self.blocks.append(block102)
self.tip = block102.sha256
self.block_time += 1
height += 1
# Bury the assumed valid block 2100 deep
for i in range(2100):
block = create_block(self.tip, create_coinbase(height), self.block_time)
block.nVersion = 4
block.solve()
self.blocks.append(block)
self.tip = block.sha256
self.block_time += 1
height += 1
# Start node1 and node2 with assumevalid so they accept a block with a bad signature.
self.nodes.append(start_node(1, self.options.tmpdir,
["-debug", "-assumevalid=" + hex(block102.sha256)]))
node1 = BaseNode() # connects to node1
connections.append(NodeConn('127.0.0.1', p2p_port(1), self.nodes[1], node1))
node1.add_connection(connections[1])
node1.wait_for_verack()
self.nodes.append(start_node(2, self.options.tmpdir,
["-debug", "-assumevalid=" + hex(block102.sha256)]))
node2 = BaseNode() # connects to node2
connections.append(NodeConn('127.0.0.1', p2p_port(2), self.nodes[2], node2))
node2.add_connection(connections[2])
node2.wait_for_verack()
# send header lists to all three nodes
node0.send_header_for_blocks(self.blocks[0:2000])
node0.send_header_for_blocks(self.blocks[2000:])
node1.send_header_for_blocks(self.blocks[0:2000])
node1.send_header_for_blocks(self.blocks[2000:])
node2.send_header_for_blocks(self.blocks[0:200])
# Send 102 blocks to node0. Block 102 will be rejected.
for i in range(101):
node0.send_message(msg_block(self.blocks[i]))
node0.sync_with_ping() # make sure the most recent block is synced
node0.send_message(msg_block(self.blocks[101]))
assert_equal(self.nodes[0].getblock(self.nodes[0].getbestblockhash())['height'], 101)
# Send 3102 blocks to node1. All blocks will be accepted.
for i in range(2202):
node1.send_message(msg_block(self.blocks[i]))
node1.sync_with_ping() # make sure the most recent block is synced
assert_equal(self.nodes[1].getblock(self.nodes[1].getbestblockhash())['height'], 2202)
# Send 102 blocks to node2. Block 102 will be rejected.
for i in range(101):
node2.send_message(msg_block(self.blocks[i]))
node2.sync_with_ping() # make sure the most recent block is synced
node2.send_message(msg_block(self.blocks[101]))
assert_equal(self.nodes[2].getblock(self.nodes[2].getbestblockhash())['height'], 101)
if __name__ == '__main__':
SendHeadersTest().main()
|
the-stack_0_23229 | #--------------------------------------------------------------------------------
# Authors:
# - Yik Lung Pang: [email protected]
# - Alessio Xompero: [email protected]
#
# MIT License
# Copyright (c) 2021 CORSMAL
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
#--------------------------------------------------------------------------------
import cv2
import numpy.ma as ma
import cv2.aruco as aruco
import numpy as np
import pickle
from numpy import linalg as LA
from numpy.linalg import inv
import math
import copy
import rospy
# from sensor_msgs.msg import CameraInfo
# from realsense2_camera.msg import Extrinsics
class projection:
def __init__(self, camId):
self.camId = camId
self.intrinsic = dict.fromkeys(['rgb'])
self.extrinsic = dict.fromkeys(['rgb'])
self.distCoeffs = None
self.extrinsic['rgb'] = dict.fromkeys(['rvec','tvec','projMatrix'])
def getIntrinsicFromROS(self, data):
if 'color' in data.header.frame_id:
self.intrinsic['rgb'] = np.array(data.K).reshape(3,3)
self.distCoeffs = np.zeros((1,5), dtype=np.float64)
# def getIntrinsicParameters(self):
# self.getIntrinsicFromROS(rospy.wait_for_message('/camera{}/color/camera_info'.format(self.camId), CameraInfo)) #wait_for_message read only once the topic
def calibrateWithBoard(self, imgs, sensor, draw=False):
# Constant parameters used in Aruco methods
ARUCO_PARAMETERS = aruco.DetectorParameters_create()
ARUCO_DICT = aruco.Dictionary_get(aruco.DICT_4X4_50)
# Create grid board object we're using in our stream
CHARUCO_BOARD = aruco.CharucoBoard_create(
squaresX=10,
squaresY=6,
squareLength=0.04, #in meters
markerLength=0.03, #in meters
dictionary=ARUCO_DICT)
# grayscale image
gray = cv2.cvtColor(imgs[sensor], cv2.COLOR_BGR2GRAY)
# Detect Aruco markers
corners, ids, rejectedImgPoints = aruco.detectMarkers(gray, ARUCO_DICT, parameters=ARUCO_PARAMETERS)
# Refine detected markers
# Eliminates markers not part of our board, adds missing markers to the board
corners, ids, rejectedImgPoints, recoveredIds = aruco.refineDetectedMarkers(image = gray,
board = CHARUCO_BOARD,
detectedCorners = corners,
detectedIds = ids,
rejectedCorners = rejectedImgPoints,
cameraMatrix = self.intrinsic[sensor],
distCoeffs = self.distCoeffs)
#print('Found {} corners in C{} sensor {}'.format(len(corners), self.camId, sensor))
imgs[sensor] = aruco.drawDetectedMarkers(imgs[sensor], corners, ids=ids, borderColor=(0, 0, 255))
# Only try to find CharucoBoard if we found markers
if ids is not None and len(ids) > 10:
# Get charuco corners and ids from detected aruco markers
response, charuco_corners, charuco_ids = aruco.interpolateCornersCharuco(markerCorners=corners,
markerIds=ids,
image=gray,
board=CHARUCO_BOARD)
# Require more than 20 squares
if response is not None and response > 20:
# Estimate the posture of the charuco board, which is a construction of 3D space based on the 2D video
pose, self.extrinsic[sensor]['rvec'], self.extrinsic[sensor]['tvec'] = aruco.estimatePoseCharucoBoard(charucoCorners=charuco_corners,
charucoIds=charuco_ids,
board=CHARUCO_BOARD,
cameraMatrix=self.intrinsic[sensor],
distCoeffs=self.distCoeffs)
if draw:
imgs[sensor] = aruco.drawAxis(imgs[sensor], self.intrinsic[sensor], self.distCoeffs, self.extrinsic[sensor]['rvec'], self.extrinsic[sensor]['tvec'], 2)
cv2.imwrite('./data/out/calib_C{}_{}.png'.format(self.camId,sensor), imgs[sensor])
else:
print('Calibration board is not fully visible for C{} sensor: {}'.format(self.camId, sensor))
assert 1==0
self.extrinsic[sensor]['rvec'] = cv2.Rodrigues(self.extrinsic[sensor]['rvec'])[0]
self.extrinsic[sensor]['projMatrix'] = np.matmul(self.intrinsic[sensor], np.concatenate((self.extrinsic[sensor]['rvec'],self.extrinsic[sensor]['tvec']), axis=1))
# def cameraPose(self, _imgs):
# rospy.loginfo('Calibrating camera {} ...'.format(self.camId))
# imgs = copy.deepcopy(_imgs)
# # Estimate extrinsic parameters (need a calibration board within the field of view of all cameras)
# self.getIntrinsicParameters()
# self.calibrateWithBoard(imgs, 'rgb', draw=True)
def getCentroid(mask):
# Get the largest contour
# mask = mask.copy()
mask = mask.astype(np.uint8).squeeze()
# _, contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contours, _ = cv2.findContours(mask, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
contour_sizes = [(cv2.contourArea(contour), contour) for contour in contours]
largest_contour = max(contour_sizes, key=lambda x: x[0])[1]
# Get centroid of the largest contour
M = cv2.moments(largest_contour)
try:
centroid = np.array((M['m10']/M['m00'], M['m01']/M['m00']))
return centroid
except:
print('Centroid not found')
return None
def triangulate(c1, c2, point1, point2, undistort=True):
if (point1.dtype != 'float64'):
point1 = point1.astype(np.float64)
if (point2.dtype != 'float64'):
point2 = point2.astype(np.float64)
point3d = cv2.triangulatePoints(c1.extrinsic['rgb']['projMatrix'], c2.extrinsic['rgb']['projMatrix'], point1.reshape(2,1), point2.reshape(2,1)).transpose()
for point in point3d:
point /= point[-1]
return point3d.reshape(-1)
def get3D(c1, c2, mask1, mask2, glass, _img1=None, _img2=None, drawCentroid=False, drawDimensions=False):
img1 = copy.deepcopy(_img1)
img2 = copy.deepcopy(_img2)
centr1 = getCentroid(c1, mask1)
centr2 = getCentroid(c2, mask2)
if centr1 is not None and centr2 is not None:
glass.centroid = triangulate(c1, c2, centr1, centr2)[:-1].reshape(-1,3)
# Draw centroid
if drawCentroid:
glass.drawCentroid2D(c1, img1)
glass.drawCentroid2D(c2, img2)
# Draw height and width lines
if drawDimensions:
# Get top/bottom points
top = copy.deepcopy(glass.centroid)
bottom = copy.deepcopy(glass.centroid)
top[0,2] += glass.h/2
bottom[0,2] -= glass.h/2
topC1, _ = cv2.projectPoints(top, c1.extrinsic['rgb']['rvec'], c1.extrinsic['rgb']['tvec'], c1.intrinsic['rgb'], c1.distCoeffs)
bottomC1, _ = cv2.projectPoints(bottom, c1.extrinsic['rgb']['rvec'], c1.extrinsic['rgb']['tvec'], c1.intrinsic['rgb'], c1.distCoeffs)
topC2, _ = cv2.projectPoints(top, c2.extrinsic['rgb']['rvec'], c2.extrinsic['rgb']['tvec'], c2.intrinsic['rgb'], c2.distCoeffs)
bottomC2, _ = cv2.projectPoints(bottom, c2.extrinsic['rgb']['rvec'], c2.extrinsic['rgb']['tvec'], c2.intrinsic['rgb'], c2.distCoeffs)
topC1 = topC1.squeeze().astype(int)
bottomC1 = bottomC1.squeeze().astype(int)
topC2 = topC2.squeeze().astype(int)
bottomC2 = bottomC2.squeeze().astype(int)
# Get rigth/left points
right = copy.deepcopy(glass.centroid)
left = copy.deepcopy(glass.centroid)
right[0,0] += glass.w/2
left[0,0] -= glass.w/2
rightC1, _ = cv2.projectPoints(right, c1.extrinsic['rgb']['rvec'], c1.extrinsic['rgb']['tvec'], c1.intrinsic['rgb'], c1.distCoeffs)
leftC1, _ = cv2.projectPoints(left, c1.extrinsic['rgb']['rvec'], c1.extrinsic['rgb']['tvec'], c1.intrinsic['rgb'], c1.distCoeffs)
rightC2, _ = cv2.projectPoints(right, c2.extrinsic['rgb']['rvec'], c2.extrinsic['rgb']['tvec'], c2.intrinsic['rgb'], c2.distCoeffs)
leftC2, _ = cv2.projectPoints(left, c2.extrinsic['rgb']['rvec'], c2.extrinsic['rgb']['tvec'], c2.intrinsic['rgb'], c2.distCoeffs)
rightC1 = rightC1.squeeze().astype(int)
leftC1 = leftC1.squeeze().astype(int)
rightC2 = rightC2.squeeze().astype(int)
leftC2 = leftC2.squeeze().astype(int)
cv2.line(img1, tuple(topC1), tuple(bottomC1), (128,0,0), 2)
cv2.line(img1, tuple(rightC1), tuple(leftC1), (128,0,0), 2)
cv2.line(img2, tuple(topC2), tuple(bottomC2), (128,0,0), 2)
cv2.line(img2, tuple(rightC2), tuple(leftC2), (128,0,0), 2)
return glass, img1, img2
def getObjectDimensions(cam, _seg, _img, centroid, offset, atHeight, draw=False):
# Sample 3D circumferences in world coordinate system at z = centroid
step = 0.001 #1mm
minDiameter = 0.01 #1cm
maxDiameter = 0.15 #20cm
radiuses = np.linspace(maxDiameter/2, minDiameter/2, num=int((maxDiameter-minDiameter)/step))
angularStep = 18#degrees
angles = np.linspace(0., 359., num=int((359.)/angularStep))
h = centroid[2]*2.
for radius in radiuses:
seg2plot = copy.deepcopy(_seg).squeeze()
seg = copy.deepcopy(_seg).squeeze()
img = copy.deepcopy(_img)
p_3d = []
for angle_d in angles:
angle = math.radians(angle_d)
p_3d.append(np.array((centroid[0]+(radius*math.cos(angle)), centroid[1]+(radius*math.sin(angle)), atHeight)).reshape(1,3))
# Reproject to image
p_2d, _ = cv2.projectPoints(np.array(p_3d), cam.extrinsic['rgb']['rvec'], cam.extrinsic['rgb']['tvec'], cam.intrinsic['rgb'], cam.distCoeffs)
p_2d = p_2d.squeeze().astype(int)
# Displace to segmentation
p_2d[:,0] -= offset
if draw:
for p in p_2d:
cv2.circle(img, (int(p[0]), int(p[1])), 2, (0,0,255), -1)
if draw:
for p in p_2d:
cv2.circle(seg2plot, (int(p[0]), int(p[1])), 2, (0,0,255), -1)
areIn = seg[p_2d[:,1], p_2d[:,0]]
# Check if imaged points are in the segmentation
if np.count_nonzero(areIn) == areIn.shape[0]:
return radius*2, h
if draw:
cv2.imwrite('./data/out/measuring_C{}_rad{:.5f}.png'.format(cam.camId, radius), img)
def getObjectDimensionsLODE(_c1, _c2, _seg1, _seg2, _img1, _img2, centroid, offset, h_step, r_step, export_pointcloud, pointcloud_path, draw=False):
c1 = copy.deepcopy(_c1)
c2 = copy.deepcopy(_c2)
# Radiuses
step = 0.001 #meters
minDiameter = 0.005 #meters
maxDiameter = 0.15 #meters
radiuses = np.linspace(maxDiameter/2, minDiameter/2, num=int((maxDiameter-minDiameter)/step))
angularStep = r_step#degrees
angles = np.linspace(0., 359., num=int((359.)/angularStep))
# Heights
step = h_step #meters
minHeight = -0.05 #meters
maxHeight = 0.3 #meters
estRadius = []
converged = []
#heights = np.linspace(centroid[2]-delta, centroid[2]+delta, num=(maxHeight-minHeight)/step)
# import pdb; pdb.set_trace()
heights = np.linspace(minHeight, maxHeight, num=int((maxHeight-minHeight)/step))
for height in heights:
for rad in radiuses:
seg1 = copy.deepcopy(_seg1)
seg2 = copy.deepcopy(_seg2)
# Sample 3D circunference
p3d = []
for angle_d in angles:
angle = math.radians(angle_d)
p3d.append(np.array((centroid[0]+(rad*math.cos(angle)), centroid[1]+(rad*math.sin(angle)), height)).reshape(1,3))
p3d = np.array(p3d)
# Reproject to C1
p2d_c1, _ = cv2.projectPoints(p3d, c1.extrinsic['rgb']['rvec'], c1.extrinsic['rgb']['tvec'], c1.intrinsic['rgb'], np.array([0.,0.,0.,0.,0.]))
p2d_c1 = p2d_c1.squeeze().astype(int)
p2d_c1[:,0] -= offset
# Reproject to C2
p2d_c2, _ = cv2.projectPoints(p3d, c2.extrinsic['rgb']['rvec'], c2.extrinsic['rgb']['tvec'], c2.intrinsic['rgb'], np.array([0.,0.,0.,0.,0.]))
p2d_c2 = p2d_c2.squeeze().astype(int)
p2d_c2[:,0] -= offset
# Check if imaged points are in the segmentation
areIn_c1 = seg1[p2d_c1[:,1], p2d_c1[:,0]]
areIn_c2 = seg2[p2d_c2[:,1], p2d_c2[:,0]]
if (np.count_nonzero(areIn_c1) == areIn_c1.shape[0]) and (np.count_nonzero(areIn_c2) == areIn_c2.shape[0]):
estRadius.append(rad)
converged.append(True)
break
if rad==minDiameter/2:
estRadius.append(rad)
converged.append(False)
break
estRadius = np.array(estRadius)
converged = np.array(converged)
estHeights = heights[converged]
calcRadius = estRadius[converged]
volume = 0
for pos, h in enumerate(estHeights):
volume += h_step*math.pi*(calcRadius[pos]**2)
print("Estimated volume is: "+str(volume*1000000))
width = np.max(estRadius) *2. * 1000
height = (estHeights[-1] - estHeights[0]) *1000
#height =
# Post-processing
#pEstRadius = []
#estRadius = np.array(estRadius)
#diffEstDiameters = np.diff(estRadius)
#diffdiffEstDiameters = np.diff(diffEstDiameters)
#plt.figure(figsize=(9, 3))
#plt.ion()
'''
for i, rad in enumerate(estRadius):
plt.subplot(131)
plt.plot(estRadius[:i])
#peaksMax = np.argmax(diffEstDiameters)
#peaksMin = np.argmin(diffEstDiameters)
#peaksMax, _ = find_peaks(diffEstDiameters)
#peaksMin, _ = find_peaks(-diffEstDiameters)
#plt.plot([peaksMax], [estRadius[peaksMax]], marker='o', markersize=3, color="green")
#plt.plot([peaksMin], [estRadius[peaksMin]], marker='o', markersize=3, color="red")
plt.subplot(132)
plt.plot(diffEstDiameters[:i])
#plt.plot([peaksMax[0]], [diffEstDiameters[peaksMax[0]]], marker='o', markersize=3, color="green")
#plt.plot([peaksMin[-1]], [diffEstDiameters[peaksMin[-1]]], marker='o', markersize=3, color="red")
plt.subplot(133)
plt.plot(diffdiffEstDiameters[:i])
plt.show()
plt.pause(0.1)
#plt.savefig('tmp.png')
plt.clf()
'''
'''
peaksDiff, _ = find_peaks(diffEstDiameters, height=(0))
peaksDiffDiff, _ = find_peaks(diffdiffEstDiameters, height=(0))
plt.plot(estRadius, 'b')
plt.plot(diffEstDiameters, 'r')
plt.plot(diffdiffEstDiameters, 'g')
plt.scatter(peaksDiff, diffEstDiameters[peaksDiff])
plt.scatter(peaksDiffDiff, diffdiffEstDiameters[peaksDiffDiff])
plt.show()
#plt.waitforbuttonpress()
#plt.clf()
import pdb; pdb.set_trace()
'''
# Draw final dimensions
if draw or export_pointcloud:
img1 = copy.deepcopy(_img1)
img2 = copy.deepcopy(_img2)
pointcloud = []
for i, rad in enumerate(estRadius):
p3d = []
for angle_d in angles:
angle = math.radians(angle_d)
p3d.append(np.array((centroid[0]+(rad*math.cos(angle)), centroid[1]+(rad*math.sin(angle)), heights[i])).reshape(1,3))
if converged[i] and export_pointcloud:
pointcloud.append(np.array(((rad*math.cos(angle)), (rad*math.sin(angle)), heights[i])).reshape(1,3))
p3d = np.array(p3d)
# Reproject to C1
p2d_c1, _ = cv2.projectPoints(p3d, c1.extrinsic['rgb']['rvec'], c1.extrinsic['rgb']['tvec'], c1.intrinsic['rgb'], np.array([0.,0.,0.,0.,0.]))
p2d_c1 = p2d_c1.squeeze().astype(int)
p2d_c1[:,0] -= offset
# Reproject to C2
p2d_c2, _ = cv2.projectPoints(p3d, c2.extrinsic['rgb']['rvec'], c2.extrinsic['rgb']['tvec'], c2.intrinsic['rgb'], np.array([0.,0.,0.,0.,0.]))
p2d_c2 = p2d_c2.squeeze().astype(int)
p2d_c2[:,0] -= offset
# Check if imaged points are in the segmentation
areIn_c1 = seg1[p2d_c1[:,1], p2d_c1[:,0]]
areIn_c2 = seg2[p2d_c2[:,1], p2d_c2[:,0]]
# Compute 2D points on the contour
#pointsOnContour(p2d_c1, p2d_c2, _contour1, _contour2, _c1, _c2)
if draw:
#if (i > peaksMax[0]) and (i < peaksMin[-1]):
for p, isIn in zip(p2d_c1, areIn_c1):
if isIn:
cv2.circle(img1, (int(p[0])+offset, int(p[1])), 1, (0,255,0), -1)
else:
cv2.circle(img1, (int(p[0])+offset, int(p[1])), 1, (0,0,255), -1)
for p, isIn in zip(p2d_c2, areIn_c2):
if isIn:
cv2.circle(img2, (int(p[0])+offset, int(p[1])), 1, (0,255,0), -1)
else:
cv2.circle(img2, (int(p[0])+offset, int(p[1])), 1, (0,0,255), -1)
#cv2.imshow('plot', np.concatenate((img1,img2), axis=1))
#cv2.waitKey(0)
if export_pointcloud:
np.savetxt(pointcloud_path, np.squeeze(np.array(pointcloud)), delimiter=",")
if draw:
print('/home/yiklungpang/Benchmark/S2/src/corsmal_benchmark_s2/data/c1_h{}_r{}.png'.format(str("%.3f" % h_step).replace('.','_'),str(r_step).replace('.','_')))
cv2.imwrite('/home/yiklungpang/Benchmark/S2/src/corsmal_benchmark_s2/data/c1_h{}_r{}.png'.format(str("%.3f" % h_step).replace('.','_'),str(r_step).replace('.','_')), np.concatenate((img1,img2), axis=1))
return height, width, volume
|
the-stack_0_23231 | from xml.etree.ElementTree import Element
from frappe.model.document import Document
from trebelge.TRUBLCommonElementsStrategy.TRUBLAddress import TRUBLAddress
from trebelge.TRUBLCommonElementsStrategy.TRUBLAllowanceCharge import TRUBLAllowanceCharge
from trebelge.TRUBLCommonElementsStrategy.TRUBLCommonElement import TRUBLCommonElement
from trebelge.TRUBLCommonElementsStrategy.TRUBLDimension import TRUBLDimension
from trebelge.TRUBLCommonElementsStrategy.TRUBLInvoiceLine import TRUBLInvoiceLine
from trebelge.TRUBLCommonElementsStrategy.TRUBLItem import TRUBLItem
from trebelge.TRUBLCommonElementsStrategy.TRUBLTemperature import TRUBLTemperature
class TRUBLGoodsItem(TRUBLCommonElement):
_frappeDoctype: str = 'UBL TR GoodsItem'
def process_element(self, element: Element, cbcnamespace: str, cacnamespace: str) -> Document:
frappedoc: dict = {}
# ['ID'] = ('cbc', '', 'Seçimli(0..1)')
# ['HazardousRiskIndicator'] = ('cbc', '', 'Seçimli(0..1)')
# ['RequiredCustomsID'] = ('cbc', '', 'Seçimli(0..1)')
# ['CustomsStatusCode'] = ('cbc', '', 'Seçimli(0..1)')
# ['CustomsImportClassifiedIndicator'] = ('cbc', '', 'Seçimli(0..1)')
# ['TraceID'] = ('cbc', '', 'Seçimli(0..1)')
cbcsecimli01: list = ['ID', 'HazardousRiskIndicator', 'RequiredCustomsID', 'CustomsStatusCode',
'CustomsImportClassifiedIndicator', 'TraceID']
for elementtag_ in cbcsecimli01:
field_: Element = element.find('./' + cbcnamespace + elementtag_)
if field_ is not None:
if field_.text is not None:
frappedoc[elementtag_.lower()] = field_.text.strip()
# ['DeclaredCustomsValueAmount'] = ('cbc', '', 'Seçimli(0..1)')
# ['DeclaredForCarriageValueAmount'] = ('cbc', '', 'Seçimli(0..1)')
# ['DeclaredStatisticsValueAmount'] = ('cbc', '', 'Seçimli(0..1)')
# ['FreeOnBoardValueAmount'] = ('cbc', '', 'Seçimli(0..1)')
# ['InsuranceValueAmount'] = ('cbc', '', 'Seçimli(0..1)')
# ['ValueAmount'] = ('cbc', '', 'Seçimli(0..1)')
cbcamntsecimli01: list = ['DeclaredCustomsValueAmount', 'DeclaredForCarriageValueAmount',
'DeclaredStatisticsValueAmount', 'FreeOnBoardValueAmount',
'InsuranceValueAmount', 'ValueAmount']
for elementtag_ in cbcamntsecimli01:
field_: Element = element.find('./' + cbcnamespace + elementtag_)
if field_ is not None:
if field_.text is not None:
frappedoc[elementtag_.lower()] = field_.text.strip()
frappedoc[elementtag_.lower() + 'currencyid'] = field_.attrib.get('currencyID').strip()
# ['GrossWeightMeasure'] = ('cbc', '', 'Seçimli(0..1)')
# ['NetWeightMeasure'] = ('cbc', '', 'Seçimli(0..1)')
# ['ChargeableWeightMeasure'] = ('cbc', '', 'Seçimli(0..1)')
# ['GrossVolumeMeasure'] = ('cbc', '', 'Seçimli(0..1)')
# ['NetVolumeMeasure'] = ('cbc', '', 'Seçimli(0..1)')
# ['Quantity'] = ('cbc', '', 'Seçimli(0..1)')
# ['CustomsTariffQuantity'] = ('cbc', '', 'Seçimli(0..1)')
# ['ChargeableQuantity'] = ('cbc', '', 'Seçimli(0..1)')
# ['ReturnableQuantity'] = ('cbc', '', 'Seçimli(0..1)')
cbcamntsecimli01: list = ['GrossWeightMeasure', 'NetWeightMeasure',
'ChargeableWeightMeasure', 'GrossVolumeMeasure',
'NetVolumeMeasure', 'Quantity',
'CustomsTariffQuantity', 'ChargeableQuantity',
'ReturnableQuantity']
for elementtag_ in cbcamntsecimli01:
field_: Element = element.find('./' + cbcnamespace + elementtag_)
if field_ is not None:
if field_.text is not None:
frappedoc[elementtag_.lower()] = field_.text.strip()
frappedoc[elementtag_.lower() + 'unitcode'] = field_.attrib.get('unitCode').strip()
# ['Description'] = ('cbc', '', 'Seçimli(0..n)')
descriptions = list()
descriptions_: list = element.findall('./' + cbcnamespace + 'Description')
if len(descriptions_) != 0:
for description_ in descriptions_:
element_ = description_.text
if element_ is not None and element_.strip() != '':
descriptions.append(element_.strip())
# ['OriginAddress'] = ('cac', 'Address', 'Seçimli(0..1)')
address_: Element = element.find('./' + cacnamespace + 'OriginAddress')
if address_ is not None:
tmp = TRUBLAddress().process_element(address_, cbcnamespace, cacnamespace)
if tmp is not None:
frappedoc['originaddress'] = tmp.name
if frappedoc == {}:
return None
# ['Item'] = ('cac', 'Item', 'Seçimli(0..n)')
tagelements_: list = element.findall('./' + cacnamespace + 'Item')
items = list()
if len(tagelements_) != 0:
for tagelement in tagelements_:
tmp = TRUBLItem().process_element(tagelement, cbcnamespace, cacnamespace)
if tmp is not None:
items.append(tmp.name)
# ['FreightAllowanceCharge'] = ('cac', 'AllowanceCharge', 'Seçimli(0..n)')
tagelements_: list = element.findall('./' + cacnamespace + 'FreightAllowanceCharge')
charges = list()
if len(tagelements_) != 0:
for tagelement in tagelements_:
tmp = TRUBLAllowanceCharge().process_element(tagelement, cbcnamespace, cacnamespace)
if tmp is not None:
charges.append(tmp.name)
# ['InvoiceLine'] = ('cac', 'InvoiceLine', 'Seçimli(0..n)')
lines = list()
tagelements_: list = element.findall('./' + cacnamespace + 'InvoiceLine')
if len(tagelements_) != 0:
for tagelement in tagelements_:
tmp = TRUBLInvoiceLine().process_element(tagelement, cbcnamespace, cacnamespace)
if tmp is not None:
lines.append(tmp.name)
# ['Temperature'] = ('cac', 'Temperature', 'Seçimli(0..n)')
temperatures = list()
tagelements_: list = element.findall('./' + cacnamespace + 'Temperature')
if len(tagelements_) != 0:
for tagelement in tagelements_:
tmp = TRUBLTemperature().process_element(tagelement, cbcnamespace, cacnamespace)
if tmp is not None:
temperatures.append(tmp.name)
# ['MeasurementDimension'] = ('cac', 'Dimension', 'Seçimli(0..n)')
dimensions = list()
tagelements_: list = element.findall('./' + cacnamespace + 'MeasurementDimension')
if len(tagelements_) != 0:
for tagelement in tagelements_:
tmp = TRUBLDimension().process_element(tagelement, cbcnamespace, cacnamespace)
if tmp is not None:
dimensions.append(tmp.name)
if len(items) + len(charges) + len(lines) + len(temperatures) + len(dimensions) == 0:
document: Document = self._get_frappedoc(self._frappeDoctype, frappedoc)
else:
document: Document = self._get_frappedoc(self._frappeDoctype, frappedoc, False)
if len(descriptions) != 0:
for description in descriptions:
document.append("description", dict(note=description))
document.save()
if len(items) != 0:
doc_append = document.append("item", {})
for item in items:
doc_append.item = item
document.save()
if len(charges) != 0:
doc_append = document.append("freightallowancecharge", {})
for charge in charges:
doc_append.allowancecharge = charge
document.save()
if len(lines) != 0:
doc_append = document.append("invoiceline", {})
for line in lines:
doc_append.invoiceline = line
document.save()
if len(temperatures) != 0:
doc_append = document.append("temperature", {})
for temperature in temperatures:
doc_append.temperature = temperature
document.save()
if len(dimensions) != 0:
doc_append = document.append("measurementdimension", {})
for dimension in dimensions:
doc_append.dimension = dimension
document.save()
return document
def process_elementasdict(self, element: Element, cbcnamespace: str, cacnamespace: str) -> dict:
pass
|
the-stack_0_23232 |
from foundations_spec import *
from acceptance.api_acceptance_test_case_base import APIAcceptanceTestCaseBase
from acceptance.v2beta.jobs_tests_helper_mixin_v2 import JobsTestsHelperMixinV2
class TestArtifactLoading(JobsTestsHelperMixinV2, APIAcceptanceTestCaseBase):
url = '/api/v2beta/projects/{_project_name}/job_listing'
sorting_columns = []
filtering_columns = []
@classmethod
def setUpClass(klass):
from copy import deepcopy
import shutil
import foundations_contrib.global_state as global_state
from foundations_internal.foundations_job import FoundationsJob
shutil.rmtree('/tmp/foundations_acceptance', ignore_errors=True)
JobsTestsHelperMixinV2.setUpClass()
klass._set_project_name(JobsTestsHelperMixinV2._str_random_uuid())
klass._some_artifacts = JobsTestsHelperMixinV2._str_random_uuid()
klass._no_artifacts = JobsTestsHelperMixinV2._str_random_uuid()
klass._one_artifact = JobsTestsHelperMixinV2._str_random_uuid()
random_uuid = JobsTestsHelperMixinV2._str_random_uuid()
klass._make_running_job(klass._one_artifact, JobsTestsHelperMixinV2._str_random_uuid(), start_timestamp=99999999)
klass._make_completed_job(klass._no_artifacts, random_uuid, start_timestamp=100000000, end_timestamp=100086400)
klass._make_completed_job(klass._some_artifacts, random_uuid, start_timestamp=100000001, end_timestamp=100086400)
klass._old_config = deepcopy(global_state.config_manager.config())
klass._old_context = global_state.foundations_job
global_state.config_manager.reset()
global_state.foundations_job = FoundationsJob()
klass._save_artifacts()
@classmethod
def tearDownClass(klass):
import foundations_contrib.global_state as global_state
global_state.config_manager.reset()
global_state.config_manager.config().update(klass._old_config)
global_state.foundations_job = klass._old_context
@classmethod
def _set_job_id(klass, job_id):
import foundations_contrib.global_state as global_state
job = global_state.foundations_job
job.job_id = job_id
@classmethod
def _artifact_fixture_path(klass, artifact_name):
import os.path as path
return path.join('acceptance/v2beta/fixtures', artifact_name)
@classmethod
def _save_artifacts(klass):
import foundations
klass._set_job_id(klass._one_artifact)
foundations.save_artifact(filepath=klass._artifact_fixture_path('image_file.png'))
klass._set_job_id(klass._some_artifacts)
foundations.save_artifact(filepath=klass._artifact_fixture_path('no_extension'))
foundations.save_artifact(filepath=klass._artifact_fixture_path('other_file.other'))
foundations.save_artifact(filepath=klass._artifact_fixture_path('audio_file.mp3'), key='audio_artifact')
def test_get_route(self):
data = super().test_get_route()
jobs = data['jobs']
some_artifacts_payload = [
{
'filename': 'audio_file.mp3',
'uri': f'https://archive.dessa.com/archive/{self._some_artifacts}/user_artifacts/audio_file.mp3',
'artifact_type': 'audio',
'archive_key': 'audio_artifact'
},
{
'filename': 'no_extension',
'uri': f'https://archive.dessa.com/archive/{self._some_artifacts}/user_artifacts/no_extension',
'artifact_type': 'unknown',
'archive_key': 'no_extension'
},
{
'filename': 'other_file.other',
'uri': f'https://archive.dessa.com/archive/{self._some_artifacts}/user_artifacts/other_file.other',
'artifact_type': 'unknown',
'archive_key': 'other_file.other'
}
]
self.assertEqual(some_artifacts_payload, jobs[0]['artifacts'])
self.assertEqual([], jobs[1]['artifacts'])
one_artifact_payload = [
{
'filename': 'image_file.png',
'uri': f'https://archive.dessa.com/archive/{self._one_artifact}/user_artifacts/image_file.png',
'artifact_type': 'image',
'archive_key': 'image_file.png'
}
]
self.assertEqual(one_artifact_payload, jobs[2]['artifacts']) |
the-stack_0_23233 | import info
class subinfo(info.infoclass):
def setTargets(self):
self.svnTargets['master'] = 'https://github.com/protocolbuffers/protobuf.git'
self.targetConfigurePath["master"] = "cmake"
for ver in ["3.11.2"]:
self.targets[ver] = f"https://github.com/protocolbuffers/protobuf/archive/v{ver}.tar.gz"
self.archiveNames[ver] = f"protobuf-{ver}.tar.gz"
self.targetInstSrc[ver] = f"protobuf-{ver}"
self.targetConfigurePath[ver] = "cmake"
self.targetDigests["3.11.2"] = (['e8c7601439dbd4489fe5069c33d374804990a56c2f710e00227ee5d8fd650e67'], CraftHash.HashAlgorithm.SHA256)
self.defaultTarget = "3.11.2"
def setDependencies(self):
self.buildDependencies["virtual/base"] = None
self.buildDependencies["libs/zlib"] = None
from Package.CMakePackageBase import *
class Package(CMakePackageBase):
def __init__(self):
CMakePackageBase.__init__(self)
self.subinfo.options.configure.args += " -Dprotobuf_BUILD_TESTS=OFF -Dprotobuf_MSVC_STATIC_RUNTIME=OFF"
if not CraftCore.compiler.isWindows:
self.subinfo.options.configure.args += " -DBUILD_SHARED_LIBS=ON" |
the-stack_0_23237 | """Collection of TensorFlow general functions, wrapped to fit Ivy syntax and
signature.
"""
# global
_round = round
import tensorflow as tf
from tensorflow.python.types.core import Tensor
import ivy
# local
from ivy.functional.ivy.device import Profiler as BaseProfiler
def _same_device(dev_a, dev_b):
if dev_a is None or dev_b is None:
return False
return "/" + ":".join(dev_a[1:].split(":")[-2:]) == "/" + ":".join(
dev_b[1:].split(":")[-2:]
)
def dev(x: Tensor, as_str: bool = False) -> str:
dv = x.device
if as_str:
return dev_to_str(dv)
return dv
def to_dev(x: Tensor, device=None, out: Tensor = None) -> Tensor:
if device is None:
if ivy.exists(out):
return ivy.inplace_update(out, x)
return x
current_dev = _dev_callable(x)
if not _same_device(current_dev, device):
with tf.device("/" + device.upper()):
if ivy.exists(out):
return ivy.inplace_update(out, tf.identity(x))
return tf.identity(x)
if ivy.exists(out):
return ivy.inplace_update(out, x)
return x
def dev_to_str(device):
if isinstance(device, str) and "/" not in device:
return device
dev_in_split = device[1:].split(":")[-2:]
if len(dev_in_split) == 1:
return dev_in_split[0]
dev_type, dev_idx = dev_in_split
dev_type = dev_type.lower()
if dev_type == "cpu":
return dev_type
return ":".join([dev_type, dev_idx])
def dev_from_str(device):
if isinstance(device, str) and "/" in device:
return device
ret = "/" + device.upper()
if not ret[-1].isnumeric():
ret = ret + ":0"
return ret
clear_mem_on_dev = lambda dev: None
_dev_callable = dev
def num_gpus() -> int:
return len(tf.config.list_physical_devices("GPU"))
def gpu_is_available() -> bool:
return len(tf.config.list_physical_devices("GPU")) > 0
def tpu_is_available() -> bool:
try:
resolver = tf.distribute.cluster_resolver.TPUClusterResolver()
tf.config.experimental_connect_to_cluster(resolver)
tf.tpu.experimental.initialize_tpu_system(resolver)
tf.config.list_logical_devices("TPU")
tf.distribute.experimental.TPUStrategy(resolver)
return True
except ValueError:
return False
class Profiler(BaseProfiler):
def __init__(self, save_dir):
super(Profiler, self).__init__(save_dir)
self._options = tf.profiler.experimental.ProfilerOptions(
host_tracer_level=3, python_tracer_level=1, device_tracer_level=1
)
def start(self):
tf.profiler.experimental.start(self._save_dir, options=self._options)
def stop(self):
tf.profiler.experimental.stop()
def __enter__(self):
self.start()
def __exit__(self, exc_type, exc_val, exc_tb):
self.stop()
|
the-stack_0_23241 | '''title'''
__title__ = 'pikachupytools'
'''description'''
__description__ = 'Pytools: Some useful tools written by pure python.'
'''url'''
__url__ = 'https://github.com/CharlesPikachu/pytools'
'''version'''
__version__ = '0.1.23'
'''author'''
__author__ = 'Charles'
'''email'''
__email__ = '[email protected]'
'''license'''
__license__ = 'Apache License 2.0'
'''copyright'''
__copyright__ = 'Copyright 2020-2022 Zhenchao Jin' |
the-stack_0_23243 | #!/usr/bin/env python
# -*- coding:utf-8 -*-
"""
Created on 18/10/10 21:30:45
@author: Changzhi Sun
"""
import torch
import torch.nn as nn
import torch.nn.functional as F
from typing import List, Dict, Any, Optional
from antNRE.src.word_encoder import WordCharEncoder
class BiLSTMEncoder(nn.Module):
def __init__(self,
word_encoder_size: int,
hidden_size: int,
num_layers: int = 1,
bidirectional: bool = True,
dropout: float = 0.5) -> None:
super(BiLSTMEncoder, self).__init__()
self.word_encoder_size = word_encoder_size
self.hidden_size = hidden_size
self.bilstm = nn.LSTM(word_encoder_size,
hidden_size // 2,
num_layers=num_layers,
bidirectional=bidirectional,
batch_first=True,
dropout=dropout)
self.dropout = nn.Dropout(dropout)
def forward(self,
batch_seq_encoder_input: torch.Tensor,
batch_seq_len: List) -> torch.Tensor:
batch_size, seq_size, word_encoder_size = batch_seq_encoder_input.size()
assert word_encoder_size == self.word_encoder_size
batch_seq_encoder_input_pack = nn.utils.rnn.pack_padded_sequence(
batch_seq_encoder_input,
batch_seq_len,
batch_first=True)
batch_seq_encoder_output, _ = self.bilstm(batch_seq_encoder_input_pack)
batch_seq_encoder_output, _ = nn.utils.rnn.pad_packed_sequence(
batch_seq_encoder_output, batch_first=True)
return self.dropout(batch_seq_encoder_output)
|
the-stack_0_23244 | #!/usr/bin/env python3
# Copyright (c) 2013-2018 The DigiByte Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
#
# Generate seeds.txt from Pieter's DNS seeder
#
import re
import sys
import dns.resolver
import collections
NSEEDS=512
MAX_SEEDS_PER_ASN=2
MIN_BLOCKS = 337600
# These are hosts that have been observed to be behaving strangely (e.g.
# aggressively connecting to every node).
SUSPICIOUS_HOSTS = {
"130.211.129.106", "178.63.107.226",
"83.81.130.26", "88.198.17.7", "148.251.238.178", "176.9.46.6",
"54.173.72.127", "54.174.10.182", "54.183.64.54", "54.194.231.211",
"54.66.214.167", "54.66.220.137", "54.67.33.14", "54.77.251.214",
"54.94.195.96", "54.94.200.247"
}
PATTERN_IPV4 = re.compile(r"^((\d{1,3})\.(\d{1,3})\.(\d{1,3})\.(\d{1,3})):(\d+)$")
PATTERN_IPV6 = re.compile(r"^\[([0-9a-z:]+)\]:(\d+)$")
PATTERN_ONION = re.compile(r"^([abcdefghijklmnopqrstuvwxyz234567]{16}\.onion):(\d+)$")
PATTERN_AGENT = re.compile(r"^(/Satoshi:0.14.(0|1|2|99)/|/Satoshi:0.15.(0|1|2|99)|/Satoshi:0.16.(0|1|2|99)/)$")
def parseline(line):
sline = line.split()
if len(sline) < 11:
return None
m = PATTERN_IPV4.match(sline[0])
sortkey = None
ip = None
if m is None:
m = PATTERN_IPV6.match(sline[0])
if m is None:
m = PATTERN_ONION.match(sline[0])
if m is None:
return None
else:
net = 'onion'
ipstr = sortkey = m.group(1)
port = int(m.group(2))
else:
net = 'ipv6'
if m.group(1) in ['::']: # Not interested in localhost
return None
ipstr = m.group(1)
sortkey = ipstr # XXX parse IPv6 into number, could use name_to_ipv6 from generate-seeds
port = int(m.group(2))
else:
# Do IPv4 sanity check
ip = 0
for i in range(0,4):
if int(m.group(i+2)) < 0 or int(m.group(i+2)) > 255:
return None
ip = ip + (int(m.group(i+2)) << (8*(3-i)))
if ip == 0:
return None
net = 'ipv4'
sortkey = ip
ipstr = m.group(1)
port = int(m.group(6))
# Skip bad results.
if sline[1] == 0:
return None
# Extract uptime %.
uptime30 = float(sline[7][:-1])
# Extract Unix timestamp of last success.
lastsuccess = int(sline[2])
# Extract protocol version.
version = int(sline[10])
# Extract user agent.
agent = sline[11][1:-1]
# Extract service flags.
service = int(sline[9], 16)
# Extract blocks.
blocks = int(sline[8])
# Construct result.
return {
'net': net,
'ip': ipstr,
'port': port,
'ipnum': ip,
'uptime': uptime30,
'lastsuccess': lastsuccess,
'version': version,
'agent': agent,
'service': service,
'blocks': blocks,
'sortkey': sortkey,
}
def filtermultiport(ips):
'''Filter out hosts with more nodes per IP'''
hist = collections.defaultdict(list)
for ip in ips:
hist[ip['sortkey']].append(ip)
return [value[0] for (key,value) in list(hist.items()) if len(value)==1]
# Based on Greg Maxwell's seed_filter.py
def filterbyasn(ips, max_per_asn, max_total):
# Sift out ips by type
ips_ipv46 = [ip for ip in ips if ip['net'] in ['ipv4', 'ipv6']]
ips_onion = [ip for ip in ips if ip['net'] == 'onion']
# Filter IPv46 by ASN
result = []
asn_count = {}
for ip in ips_ipv46:
if len(result) == max_total:
break
try:
if ip['net'] == 'ipv4':
ipaddr = ip['ip']
prefix = '.origin'
else: # http://www.team-cymru.com/IP-ASN-mapping.html
res = str() # 2001:4860:b002:23::68
for nb in ip['ip'].split(':')[:4]: # pick the first 4 nibbles
for c in nb.zfill(4): # right padded with '0'
res += c + '.' # 2001 4860 b002 0023
ipaddr = res.rstrip('.') # 2.0.0.1.4.8.6.0.b.0.0.2.0.0.2.3
prefix = '.origin6'
asn = int([x.to_text() for x in dns.resolver.query('.'.join(
reversed(ipaddr.split('.'))) + prefix + '.asn.cymru.com',
'TXT').response.answer][0].split('\"')[1].split(' ')[0])
if asn not in asn_count:
asn_count[asn] = 0
if asn_count[asn] == max_per_asn:
continue
asn_count[asn] += 1
result.append(ip)
except:
sys.stderr.write('ERR: Could not resolve ASN for "' + ip['ip'] + '"\n')
# Add back Onions
result.extend(ips_onion)
return result
def main():
lines = sys.stdin.readlines()
ips = [parseline(line) for line in lines]
# Skip entries with valid address.
ips = [ip for ip in ips if ip is not None]
# Skip entries from suspicious hosts.
ips = [ip for ip in ips if ip['ip'] not in SUSPICIOUS_HOSTS]
# Enforce minimal number of blocks.
ips = [ip for ip in ips if ip['blocks'] >= MIN_BLOCKS]
# Require service bit 1.
ips = [ip for ip in ips if (ip['service'] & 1) == 1]
# Require at least 50% 30-day uptime.
ips = [ip for ip in ips if ip['uptime'] > 50]
# Require a known and recent user agent.
ips = [ip for ip in ips if PATTERN_AGENT.match(ip['agent'])]
# Sort by availability (and use last success as tie breaker)
ips.sort(key=lambda x: (x['uptime'], x['lastsuccess'], x['ip']), reverse=True)
# Filter out hosts with multiple digibyte ports, these are likely abusive
ips = filtermultiport(ips)
# Look up ASNs and limit results, both per ASN and globally.
ips = filterbyasn(ips, MAX_SEEDS_PER_ASN, NSEEDS)
# Sort the results by IP address (for deterministic output).
ips.sort(key=lambda x: (x['net'], x['sortkey']))
for ip in ips:
if ip['net'] == 'ipv6':
print('[%s]:%i' % (ip['ip'], ip['port']))
else:
print('%s:%i' % (ip['ip'], ip['port']))
if __name__ == '__main__':
main()
|
the-stack_0_23251 | # -*- coding: utf-8 -*-
"""
dicom2nifti
@author: abrys
"""
from __future__ import print_function
import dicom2nifti.patch_pydicom_encodings
dicom2nifti.patch_pydicom_encodings.apply()
import os
import traceback
import logging
import nibabel
import numpy
import pydicom.config as pydicom_config
from pydicom.tag import Tag
import dicom2nifti.common as common
import dicom2nifti.settings as settings
import dicom2nifti.convert_generic as convert_generic
from dicom2nifti.exceptions import ConversionError
pydicom_config.enforce_valid_values = False
logger = logging.getLogger(__name__)
def dicom_to_nifti(dicom_input, output_file=None):
"""
This is the main dicom to nifti conversion fuction for philips images.
As input philips images are required. It will then determine the type of images and do the correct conversion
Examples: See unit test
:param output_file: file path to the output nifti
:param dicom_input: directory with dicom files for 1 scan
"""
assert common.is_philips(dicom_input)
# remove duplicate slices based on position and data
dicom_input = convert_generic.remove_duplicate_slices(dicom_input)
# remove localizers based on image type
dicom_input = convert_generic.remove_localizers_by_imagetype(dicom_input)
# remove_localizers based on image orientation (only valid if slicecount is validated)
dicom_input = convert_generic.remove_localizers_by_orientation(dicom_input)
if common.is_multiframe_dicom(dicom_input):
_assert_explicit_vr(dicom_input)
logger.info('Found multiframe dicom')
if _is_multiframe_4d(dicom_input):
logger.info('Found sequence type: MULTIFRAME 4D')
return _multiframe_to_nifti(dicom_input, output_file)
if _is_multiframe_anatomical(dicom_input):
logger.info('Found sequence type: MULTIFRAME ANATOMICAL')
return _multiframe_to_nifti(dicom_input, output_file)
else:
logger.info('Found singleframe dicom')
grouped_dicoms = _get_grouped_dicoms(dicom_input)
if _is_singleframe_4d(dicom_input):
logger.info('Found sequence type: SINGLEFRAME 4D')
return _singleframe_to_nifti(grouped_dicoms, output_file)
logger.info('Assuming anatomical data')
return convert_generic.dicom_to_nifti(dicom_input, output_file)
def _assert_explicit_vr(dicom_input):
"""
Assert that explicit vr is used
"""
if settings.validate_multiframe_implicit:
header = dicom_input[0]
if header.file_meta[0x0002, 0x0010].value == '1.2.840.10008.1.2':
raise ConversionError('IMPLICIT_VR_ENHANCED_DICOM')
def _is_multiframe_diffusion_imaging(dicom_input):
"""
Use this function to detect if a dicom series is a philips multiframe dti dataset
NOTE: We already assue this is a 4D dataset as input
"""
header = dicom_input[0]
if "PerFrameFunctionalGroupsSequence" not in header:
return False
# check if there is diffusion info in the frame
found_diffusion = False
diffusion_tag = Tag(0x0018, 0x9117)
for frame in header.PerFrameFunctionalGroupsSequence:
if diffusion_tag in frame:
found_diffusion = True
break
if not found_diffusion:
return False
return True
def _is_multiframe_4d(dicom_input):
"""
Use this function to detect if a dicom series is a philips multiframe 4D dataset
"""
# check if it is multi frame dicom
if not common.is_multiframe_dicom(dicom_input):
return False
header = dicom_input[0]
# check if there are multiple stacks
number_of_stack_slices = common.get_ss_value(header[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)])
number_of_stacks = int(int(header.NumberOfFrames) / number_of_stack_slices)
if number_of_stacks <= 1:
return False
return True
def _is_multiframe_anatomical(dicom_input):
"""
Use this function to detect if a dicom series is a philips multiframe anatomical dataset
NOTE: Only the first slice will be checked so you can only provide an already sorted dicom directory
(containing one series)
"""
# check if it is multi frame dicom
if not common.is_multiframe_dicom(dicom_input):
return False
header = dicom_input[0]
# check if there are multiple stacks
number_of_stack_slices = common.get_ss_value(header[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)])
number_of_stacks = int(int(header.NumberOfFrames) / number_of_stack_slices)
if number_of_stacks > 1:
return False
return True
def _is_singleframe_4d(dicom_input):
"""
Use this function to detect if a dicom series is a philips singleframe 4D dataset
"""
header = dicom_input[0]
# check if there are stack information
slice_number_mr_tag = Tag(0x2001, 0x100a)
if slice_number_mr_tag not in header:
return False
# check if there are multiple timepoints
grouped_dicoms = _get_grouped_dicoms(dicom_input)
if len(grouped_dicoms) <= 1:
return False
return True
def _is_singleframe_diffusion_imaging(grouped_dicoms):
"""
Use this function to detect if a dicom series is a philips singleframe dti dataset
NOTE: We already assume singleframe 4D input
"""
# check that there is bval information
if _is_bval_type_b(grouped_dicoms):
return True
if _is_bval_type_a(grouped_dicoms):
return True
return False
def _is_bval_type_a(grouped_dicoms):
"""
Check if the bvals are stored in the first of 2 currently known ways for single frame dti
"""
bval_tag = Tag(0x2001, 0x1003)
bvec_x_tag = Tag(0x2005, 0x10b0)
bvec_y_tag = Tag(0x2005, 0x10b1)
bvec_z_tag = Tag(0x2005, 0x10b2)
for group in grouped_dicoms:
if bvec_x_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_x_tag])) and \
bvec_y_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_y_tag])) and \
bvec_z_tag in group[0] and _is_float(common.get_fl_value(group[0][bvec_z_tag])) and \
bval_tag in group[0] and _is_float(common.get_fl_value(group[0][bval_tag])) and \
common.get_fl_value(group[0][bval_tag]) != 0:
return True
return False
def _is_bval_type_b(grouped_dicoms):
"""
Check if the bvals are stored in the second of 2 currently known ways for single frame dti
"""
bval_tag = Tag(0x0018, 0x9087)
bvec_tag = Tag(0x0018, 0x9089)
for group in grouped_dicoms:
if bvec_tag in group[0] and bval_tag in group[0]:
bvec = common.get_fd_array_value(group[0][bvec_tag], 3)
bval = common.get_fd_value(group[0][bval_tag])
if _is_float(bvec[0]) and _is_float(bvec[1]) and _is_float(bvec[2]) and _is_float(bval) and bval != 0:
return True
return False
def _is_float(value):
"""
Check if float
"""
try:
float(value)
return True
except ValueError:
return False
def _multiframe_to_nifti(dicom_input, output_file):
"""
This function will convert philips 4D or anatomical multiframe series to a nifti
"""
# Read the multiframe dicom file
logger.info('Read dicom file')
multiframe_dicom = dicom_input[0]
# Create mosaic block
logger.info('Creating data block')
full_block = _multiframe_to_block(multiframe_dicom)
logger.info('Creating affine')
# Create the nifti header info
affine = _create_affine_multiframe(multiframe_dicom)
logger.info('Creating nifti')
# Convert to nifti
nii_image = nibabel.Nifti1Image(full_block.squeeze(), affine)
timing_parameters = multiframe_dicom.SharedFunctionalGroupsSequence[0].MRTimingAndRelatedParametersSequence[0]
first_frame = multiframe_dicom[Tag(0x5200, 0x9230)][0]
common.set_tr_te(nii_image, float(timing_parameters.RepetitionTime),
float(first_frame[0x2005, 0x140f][0].EchoTime))
# Save to disk
if output_file is not None:
logger.info('Saving nifti to disk %s' % output_file)
nii_image.to_filename(output_file)
if _is_multiframe_diffusion_imaging(dicom_input):
bval_file = None
bvec_file = None
if output_file is not None:
# Create the bval en bvec files
base_path = os.path.dirname(output_file)
base_name = os.path.splitext(os.path.splitext(os.path.basename(output_file))[0])[0]
logger.info('Creating bval en bvec files')
bval_file = '%s/%s.bval' % (base_path, base_name)
bvec_file = '%s/%s.bvec' % (base_path, base_name)
bval, bvec, bval_file, bvec_file = _create_bvals_bvecs(multiframe_dicom, bval_file, bvec_file, nii_image,
output_file)
return {'NII_FILE': output_file,
'BVAL_FILE': bval_file,
'BVEC_FILE': bvec_file,
'NII': nii_image,
'BVAL': bval,
'BVEC': bvec}
return {'NII_FILE': output_file,
'NII': nii_image}
def _singleframe_to_nifti(grouped_dicoms, output_file):
"""
This function will convert a philips singleframe series to a nifti
"""
# Create mosaic block
logger.info('Creating data block')
full_block = _singleframe_to_block(grouped_dicoms)
logger.info('Creating affine')
# Create the nifti header info
affine, slice_increment = common.create_affine(grouped_dicoms[0])
logger.info('Creating nifti')
# Convert to nifti
nii_image = nibabel.Nifti1Image(full_block.squeeze(), affine)
common.set_tr_te(nii_image, float(grouped_dicoms[0][0].RepetitionTime), float(grouped_dicoms[0][0].EchoTime))
if output_file is not None:
# Save to disk
logger.info('Saving nifti to disk %s' % output_file)
nii_image.to_filename(output_file)
if _is_singleframe_diffusion_imaging(grouped_dicoms):
bval_file = None
bvec_file = None
# Create the bval en bvec files
if output_file is not None:
base_name = os.path.splitext(output_file)[0]
if base_name.endswith('.nii'):
base_name = os.path.splitext(base_name)[0]
logger.info('Creating bval en bvec files')
bval_file = '%s.bval' % base_name
bvec_file = '%s.bvec' % base_name
nii_image, bval, bvec, bval_file, bvec_file = _create_singleframe_bvals_bvecs(grouped_dicoms,
bval_file,
bvec_file,
nii_image,
output_file)
return {'NII_FILE': output_file,
'BVAL_FILE': bval_file,
'BVEC_FILE': bvec_file,
'NII': nii_image,
'BVAL': bval,
'BVEC': bvec,
'MAX_SLICE_INCREMENT': slice_increment}
return {'NII_FILE': output_file,
'NII': nii_image,
'MAX_SLICE_INCREMENT': slice_increment}
def _singleframe_to_block(grouped_dicoms):
"""
Generate a full datablock containing all timepoints
"""
# For each slice / mosaic create a data volume block
data_blocks = []
for index in range(0, len(grouped_dicoms)):
logger.info('Creating block %s of %s' % (index + 1, len(grouped_dicoms)))
current_block = _stack_to_block(grouped_dicoms[index])
current_block = current_block[:, :, :, numpy.newaxis]
data_blocks.append(current_block)
try:
full_block = numpy.concatenate(data_blocks, axis=3)
except:
traceback.print_exc()
raise ConversionError("MISSING_DICOM_FILES")
# Apply the rescaling if needed
common.apply_scaling(full_block, grouped_dicoms[0][0])
return full_block
def _stack_to_block(timepoint_dicoms):
"""
Convert a mosaic slice to a block of data by reading the headers, splitting the mosaic and appending
"""
return common.get_volume_pixeldata(timepoint_dicoms)
def _get_grouped_dicoms(dicom_input):
"""
Search all dicoms in the dicom directory, sort and validate them
fast_read = True will only read the headers not the data
"""
# if all dicoms have an instance number try sorting by instance number else by position
if [d for d in dicom_input if 'InstanceNumber' in d]:
dicoms = sorted(dicom_input, key=lambda x: x.InstanceNumber)
else:
dicoms = common.sort_dicoms(dicom_input)
# now group per stack
grouped_dicoms = [[]] # list with first element a list
timepoint_index = 0
previous_stack_position = -1
# loop over all sorted dicoms
stack_position_tag = Tag(0x2001, 0x100a) # put this there as this is a slow step and used a lot
for index in range(0, len(dicoms)):
dicom_ = dicoms[index]
stack_position = 0
if stack_position_tag in dicom_:
stack_position = common.get_is_value(dicom_[stack_position_tag])
if previous_stack_position == stack_position:
# if the stack number is the same we move to the next timepoint
timepoint_index += 1
if len(grouped_dicoms) <= timepoint_index:
grouped_dicoms.append([])
else:
# if it changes move back to the first timepoint
timepoint_index = 0
grouped_dicoms[timepoint_index].append(dicom_)
previous_stack_position = stack_position
return grouped_dicoms
def _create_affine_multiframe(multiframe_dicom):
"""
Function to create the affine matrix for a siemens mosaic dataset
This will work for siemens dti and 4D if in mosaic format
"""
first_frame = multiframe_dicom[Tag(0x5200, 0x9230)][0]
last_frame = multiframe_dicom[Tag(0x5200, 0x9230)][-1]
# Create affine matrix (http://nipy.sourceforge.net/nibabel/dicom/dicom_orientation.html#dicom-slice-affine)
image_orient1 = numpy.array(first_frame.PlaneOrientationSequence[0].ImageOrientationPatient)[0:3].astype(float)
image_orient2 = numpy.array(first_frame.PlaneOrientationSequence[0].ImageOrientationPatient)[3:6].astype(float)
normal = numpy.cross(image_orient1, image_orient2)
delta_r = float(first_frame[0x2005, 0x140f][0].PixelSpacing[0])
delta_c = float(first_frame[0x2005, 0x140f][0].PixelSpacing[1])
image_pos = numpy.array(first_frame.PlanePositionSequence[0].ImagePositionPatient).astype(float)
last_image_pos = numpy.array(last_frame.PlanePositionSequence[0].ImagePositionPatient).astype(float)
number_of_stack_slices = int(common.get_ss_value(multiframe_dicom[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)]))
delta_s = abs(numpy.linalg.norm(last_image_pos - image_pos)) / (number_of_stack_slices - 1)
return numpy.array(
[[-image_orient1[0] * delta_c, -image_orient2[0] * delta_r, -delta_s * normal[0], -image_pos[0]],
[-image_orient1[1] * delta_c, -image_orient2[1] * delta_r, -delta_s * normal[1], -image_pos[1]],
[image_orient1[2] * delta_c, image_orient2[2] * delta_r, delta_s * normal[2], image_pos[2]],
[0, 0, 0, 1]])
def _multiframe_to_block(multiframe_dicom):
"""
Generate a full datablock containing all stacks
"""
# Calculate the amount of stacks and slices in the stack
number_of_stack_slices = int(common.get_ss_value(multiframe_dicom[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)]))
number_of_stacks = int(int(multiframe_dicom.NumberOfFrames) / number_of_stack_slices)
# We create a numpy array
size_x = multiframe_dicom.pixel_array.shape[2]
size_y = multiframe_dicom.pixel_array.shape[1]
size_z = number_of_stack_slices
size_t = number_of_stacks
# get the format
format_string = common.get_numpy_type(multiframe_dicom)
# get header info needed for ordering
frame_info = multiframe_dicom[0x5200, 0x9230]
data_4d = numpy.zeros((size_z, size_y, size_x, size_t), dtype=format_string)
# loop over each slice and insert in datablock
t_location_index = _get_t_position_index(multiframe_dicom)
for slice_index in range(0, size_t * size_z):
z_location = frame_info[slice_index].FrameContentSequence[0].InStackPositionNumber - 1
if t_location_index is None:
t_location = frame_info[slice_index].FrameContentSequence[0].TemporalPositionIndex - 1
else:
t_location = frame_info[slice_index].FrameContentSequence[0].DimensionIndexValues[t_location_index] - 1
block_data = multiframe_dicom.pixel_array[slice_index, :, :]
# apply scaling
rescale_intercept = frame_info[slice_index].PixelValueTransformationSequence[0].RescaleIntercept
rescale_slope = frame_info[slice_index].PixelValueTransformationSequence[0].RescaleSlope
block_data = common.do_scaling(block_data,
rescale_slope, rescale_intercept)
# switch to float if needed
if block_data.dtype != data_4d.dtype:
data_4d = data_4d.astype(block_data.dtype)
data_4d[z_location, :, :, t_location] = block_data
full_block = numpy.zeros((size_x, size_y, size_z, size_t), dtype=data_4d.dtype)
# loop over each stack and reorganize the data
for t_index in range(0, size_t):
# transpose the block so the directions are correct
data_3d = numpy.transpose(data_4d[:, :, :, t_index], (2, 1, 0))
# add the block the the full data
full_block[:, :, :, t_index] = data_3d
return full_block
def _get_t_position_index(multiframe_dicom):
# First try temporal position index itself
if 'DimensionIndexSequence' not in multiframe_dicom:
return None
for current_index in range(len(multiframe_dicom.DimensionIndexSequence)):
item = multiframe_dicom.DimensionIndexSequence[current_index]
if 'DimensionDescriptionLabel' in item and \
'Temporal Position Index' in item.DimensionDescriptionLabel:
return current_index
# This seems to work for most dti
for current_index in range(len(multiframe_dicom.DimensionIndexSequence)):
item = multiframe_dicom.DimensionIndexSequence[current_index]
if 'DimensionDescriptionLabel' in item and \
'Diffusion Gradient Orientation' in item.DimensionDescriptionLabel:
return current_index
# This seems to work for 3D grace sequences
for current_index in range(len(multiframe_dicom.DimensionIndexSequence)):
item = multiframe_dicom.DimensionIndexSequence[current_index]
if 'DimensionDescriptionLabel' in item and \
'Effective Echo Time' in item.DimensionDescriptionLabel:
return current_index
# First try trigger delay time (inspired by http://www.dclunie.com/papers/SCAR_20040522_CTMRMF.pdf)
for current_index in range(len(multiframe_dicom.DimensionIndexSequence)):
item = multiframe_dicom.DimensionIndexSequence[current_index]
if 'DimensionDescriptionLabel' in item and \
'Trigger Delay Time' in item.DimensionDescriptionLabel:
return current_index
return None
def _create_bvals_bvecs(multiframe_dicom, bval_file, bvec_file, nifti, nifti_file):
"""
Write the bvals from the sorted dicom files to a bval file
Inspired by https://github.com/IBIC/ibicUtils/blob/master/ibicBvalsBvecs.py
"""
# create the empty arrays
number_of_stack_slices = common.get_ss_value(multiframe_dicom[Tag(0x2001, 0x105f)][0][Tag(0x2001, 0x102d)])
number_of_stacks = int(int(multiframe_dicom.NumberOfFrames) / number_of_stack_slices)
bvals = numpy.zeros([number_of_stacks], dtype=numpy.int32)
bvecs = numpy.zeros([number_of_stacks, 3])
# loop over all timepoints and create a list with all bvals and bvecs
for stack_index in range(0, number_of_stacks):
stack = multiframe_dicom[Tag(0x5200, 0x9230)][stack_index]
if str(stack[Tag(0x0018, 0x9117)][0][Tag(0x0018, 0x9075)].value) == 'DIRECTIONAL':
bvals[stack_index] = common.get_fd_value(stack[Tag(0x0018, 0x9117)][0][Tag(0x0018, 0x9087)])
bvecs[stack_index, :] = common.get_fd_array_value(stack[Tag(0x0018, 0x9117)][0]
[Tag(0x0018, 0x9076)][0][Tag(0x0018, 0x9089)], 3)
# truncate nifti if needed
nifti, bvals, bvecs = _fix_diffusion_images(bvals, bvecs, nifti, nifti_file)
# save the found bvecs to the file
if numpy.count_nonzero(bvals) > 0 or numpy.count_nonzero(bvecs) > 0:
common.write_bval_file(bvals, bval_file)
common.write_bvec_file(bvecs, bvec_file)
else:
bval_file = None
bvec_file = None
bvals = None
bvecs = None
return bvals, bvecs, bval_file, bvec_file
def _fix_diffusion_images(bvals, bvecs, nifti, nifti_file):
"""
This function will remove the last timepoint from the nifti, bvals and bvecs if the last vector is 0,0,0
This is sometimes added at the end by philips
"""
# if all zero continue of if the last bvec is not all zero continue
if numpy.count_nonzero(bvecs) == 0 or not numpy.count_nonzero(bvals[-1]) == 0:
# nothing needs to be done here
return nifti, bvals, bvecs
# remove last elements from bvals and bvecs
bvals = bvals[:-1]
bvecs = bvecs[:-1]
# remove last elements from the nifti
new_nifti = nibabel.Nifti1Image(nifti.get_data()[:, :, :, :-1].squeeze(), nifti.affine)
new_nifti.to_filename(nifti_file)
return new_nifti, bvals, bvecs
def _create_singleframe_bvals_bvecs(grouped_dicoms, bval_file, bvec_file, nifti, nifti_file):
"""
Write the bvals from the sorted dicom files to a bval file
"""
# create the empty arrays
bvals = numpy.zeros([len(grouped_dicoms)], dtype=numpy.int32)
bvecs = numpy.zeros([len(grouped_dicoms), 3])
# loop over all timepoints and create a list with all bvals and bvecs
if _is_bval_type_a(grouped_dicoms):
bval_tag = Tag(0x2001, 0x1003)
bvec_x_tag = Tag(0x2005, 0x10b0)
bvec_y_tag = Tag(0x2005, 0x10b1)
bvec_z_tag = Tag(0x2005, 0x10b2)
for stack_index in range(0, len(grouped_dicoms)):
bvals[stack_index] = common.get_fl_value(grouped_dicoms[stack_index][0][bval_tag])
bvecs[stack_index, :] = [common.get_fl_value(grouped_dicoms[stack_index][0][bvec_x_tag]),
common.get_fl_value(grouped_dicoms[stack_index][0][bvec_y_tag]),
common.get_fl_value(grouped_dicoms[stack_index][0][bvec_z_tag])]
elif _is_bval_type_b(grouped_dicoms):
bval_tag = Tag(0x0018, 0x9087)
bvec_tag = Tag(0x0018, 0x9089)
for stack_index in range(0, len(grouped_dicoms)):
bvals[stack_index] = common.get_fd_value(grouped_dicoms[stack_index][0][bval_tag])
bvecs[stack_index, :] = common.get_fd_array_value(grouped_dicoms[stack_index][0][bvec_tag], 3)
# truncate nifti if needed
nifti, bvals, bvecs = _fix_diffusion_images(bvals, bvecs, nifti, nifti_file)
# save the found bvecs to the file
if numpy.count_nonzero(bvals) > 0 or numpy.count_nonzero(bvecs) > 0:
common.write_bval_file(bvals, bval_file)
common.write_bvec_file(bvecs, bvec_file)
else:
bval_file = None
bvec_file = None
bvals = None
bvecs = None
return nifti, bvals, bvecs, bval_file, bvec_file
|
the-stack_0_23252 | import boto3
import pickle
import sys
from .communications import ComManager
def get_capture_list(credentials, cm):
''' Returns a list of completed capture names.
Arguments:
credentials - {
"aws_access_key" : String,
"aws_secret_access_key" : String,
"region_name" : String
}
cm - A ComManager object
Returns:
A list of capture names.
'''
cap_name_query = '''SELECT name FROM Captures WHERE status = "completed"'''
cap_names = cm.execute_query(cap_name_query)
return cap_names
# Deprecated
def _get_replays_for_capture(credentials, capture_folder, cm):
s3_client = cm.get_boto('s3')
key_len = len(capture_folder)
bucket_id = ComManager.S3name
key_list = [key['Key'] for key in s3_client.list_objects(Bucket=bucket_id)['Contents']]
general_capture_list = [key for key in key_list if key != capture_folder and key[:key_len] == capture_folder]
replay_list = [key for key in general_capture_list if ".replay" == key[-len(".replay"):]]
return replay_list
# Deprecated
def _get_capture_replay_list(credentials, cm):
ret_list = []
s3_client = cm.get_boto('s3')
capture_list = get_capture_list(credentials, cm)
for capture in capture_list:
replay_list = [replay.replace(capture, "").replace("/", "") for replay in _get_replays_for_capture(credentials, capture, cm)]
if len(replay_list) > 0:
ret_list.append((capture, replay_list))
return ret_list
def get_analytics(credentials, cm):
''' Returns all analytics.
Arguments:
credentials - {
"aws_access_key" : String,
"aws_secret_access_key" : String,
"region_name" : String
}
cm - A ComManager object
Returns:
{
capture_name_1 : {
"replays" : {
replay_name_1 : {
'CPUUtilization' : [{'timestamp' : String, 'average' : Float}, ...],
'FreeableMemory' : [...],
'ReadIOPS' : [...],
'WriteIOPS' : [...],
'start_time' : String,
'end_time' : String,
'period' : String,
'db_id' : String
},
replay_name_2 : {...},
...
},
"capture_analytics" : Boolean -OR- {capture_name : {<similar to replay_name_1 above>}}
},
capture_name_2 : {...},
...
}
'''
s3_client = cm.get_boto('s3')
cap_names = get_capture_list(credentials, cm)
rep_cap_name_query = '''SELECT replay, capture FROM Replays'''
rep_cap_names = cm.execute_query(rep_cap_name_query)
metrics = {capture_name : {"replays": {}} for (capture_name,) in cap_names}
#capture_list = get_capture_list(credentials, cm)
cap_name_time = cm.execute_query("SELECT name, end_time FROM Captures WHERE status='completed'")
for cap, end_time in cap_name_time:
metrics[cap]["end_time"] = end_time
top_folder = "mycrt/"
for (replay_name, capture_name) in rep_cap_names:
key = top_folder + capture_name + "/" + replay_name + ".replay"
#if replay_name == capture_name:
# metrics[capture_name]["capture_analytics"] = {capture_name : retrieve_analytics(s3_client, log_key = key)}
#else:
metrics[capture_name]["replays"][replay_name] = retrieve_analytics(s3_client, log_key = key)
#for capture in capture_list:
# replay_list = get_replays_for_capture(credentials, capture, cm)
# metrics[capture] = {replay.replace(capture, "").replace("/", "").replace(".replay", ""): retrieve_analytics(s3_client, log_key = replay) for replay in replay_list}
return metrics
def retrieve_analytics(s3_client, bucket_id = None, log_key = "test-folder/test-metrics"):
''' Retrieve the analytics object for a single replay.
Arguments:
s3_client - An S3 Client object from Boto3
bucket_id - An S3 bucket id to retrieve metrics from
log_key - A path to extract metrics from within the S3 bucket
Returns:
{
'CPUUtilization' : [{'timestamp' : String, 'average' : Float}, ...],
'FreeableMemory' : [...],
'ReadIOPS' : [...],
'WriteIOPS' : [...],
'start_time' : String,
'end_time' : String,
'period' : String,
'db_id' : String
}
'''
if bucket_id is None:
bucket_id = ComManager.S3name
bucket_obj = s3_client.get_object(
Bucket = bucket_id,
Key = log_key
)
new_byte_log = bucket_obj["Body"].read()
metrics = pickle.loads(new_byte_log)
return metrics
|
the-stack_0_23253 | # Copyright Contributors to the Pyro project.
# SPDX-License-Identifier: Apache-2.0
from functools import partial
import warnings
import jax
from jax import device_get, lax, random, value_and_grad, vmap
from jax.flatten_util import ravel_pytree
import jax.numpy as np
import numpyro
import numpyro.distributions as dist
from numpyro.distributions.constraints import real
from numpyro.distributions.transforms import ComposeTransform, biject_to
from numpyro.handlers import block, seed, substitute, trace
from numpyro.util import not_jax_tracer, while_loop
__all__ = [
'find_valid_initial_params',
'get_potential_fn',
'log_density',
'log_likelihood',
'init_to_feasible',
'init_to_median',
'init_to_prior',
'init_to_uniform',
'init_to_value',
'potential_energy',
'initialize_model',
'Predictive',
'transformed_potential_energy',
]
def log_density(model, model_args, model_kwargs, params, skip_dist_transforms=False):
"""
(EXPERIMENTAL INTERFACE) Computes log of joint density for the model given
latent values ``params``.
:param model: Python callable containing NumPyro primitives.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:param dict params: dictionary of current parameter values keyed by site
name.
:param bool skip_dist_transforms: whether to compute log probability of a site
(if its prior is a transformed distribution) in its base distribution
domain.
:return: log of joint density and a corresponding model trace
"""
# We skip transforms in
# + autoguide's model
# + hmc's model
# We apply transforms in
# + autoguide's guide
# + svi's model + guide
if skip_dist_transforms:
model = substitute(model, base_param_map=params)
else:
model = substitute(model, param_map=params)
model_trace = trace(model).get_trace(*model_args, **model_kwargs)
log_joint = 0.
for site in model_trace.values():
if site['type'] == 'sample':
value = site['value']
intermediates = site['intermediates']
mask = site['mask']
scale = site['scale']
# Early exit when all elements are masked
if not_jax_tracer(mask) and mask is not None and not np.any(mask):
return jax.device_put(0.), model_trace
if intermediates:
if skip_dist_transforms:
log_prob = site['fn'].base_dist.log_prob(intermediates[0][0])
else:
log_prob = site['fn'].log_prob(value, intermediates)
else:
log_prob = site['fn'].log_prob(value)
# Minor optimizations
# XXX: note that this may not work correctly for dynamic masks, provide
# explicit jax.DeviceArray for masking.
if mask is not None:
if scale is not None:
log_prob = np.where(mask, scale * log_prob, 0.)
else:
log_prob = np.where(mask, log_prob, 0.)
else:
if scale is not None:
log_prob = scale * log_prob
log_prob = np.sum(log_prob)
log_joint = log_joint + log_prob
return log_joint, model_trace
def transform_fn(transforms, params, invert=False):
"""
(EXPERIMENTAL INTERFACE) Callable that applies a transformation from the `transforms`
dict to values in the `params` dict and returns the transformed values keyed on
the same names.
:param transforms: Dictionary of transforms keyed by names. Names in
`transforms` and `params` should align.
:param params: Dictionary of arrays keyed by names.
:param invert: Whether to apply the inverse of the transforms.
:return: `dict` of transformed params.
"""
if invert:
transforms = {k: v.inv for k, v in transforms.items()}
return {k: transforms[k](v) if k in transforms else v
for k, v in params.items()}
def constrain_fn(model, transforms, model_args, model_kwargs, params, return_deterministic=False):
"""
(EXPERIMENTAL INTERFACE) Gets value at each latent site in `model` given
unconstrained parameters `params`. The `transforms` is used to transform these
unconstrained parameters to base values of the corresponding priors in `model`.
If a prior is a transformed distribution, the corresponding base value lies in
the support of base distribution. Otherwise, the base value lies in the support
of the distribution.
:param model: a callable containing NumPyro primitives.
:param dict transforms: dictionary of transforms keyed by names. Names in
`transforms` and `params` should align.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:param dict params: dictionary of unconstrained values keyed by site
names.
:param bool return_deterministic: whether to return the value of `deterministic`
sites from the model. Defaults to `False`.
:return: `dict` of transformed params.
"""
params_constrained = transform_fn(transforms, params)
substituted_model = substitute(model, base_param_map=params_constrained)
model_trace = trace(substituted_model).get_trace(*model_args, **model_kwargs)
return {k: v['value'] for k, v in model_trace.items() if (k in params) or
(return_deterministic and v['type'] == 'deterministic')}
def potential_energy(model, inv_transforms, model_args, model_kwargs, params):
"""
(EXPERIMENTAL INTERFACE) Computes potential energy of a model given unconstrained params.
The `inv_transforms` is used to transform these unconstrained parameters to base values
of the corresponding priors in `model`. If a prior is a transformed distribution,
the corresponding base value lies in the support of base distribution. Otherwise,
the base value lies in the support of the distribution.
:param model: a callable containing NumPyro primitives.
:param dict inv_transforms: dictionary of transforms keyed by names.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:param dict params: unconstrained parameters of `model`.
:return: potential energy given unconstrained parameters.
"""
params_constrained = transform_fn(inv_transforms, params)
log_joint, model_trace = log_density(model, model_args, model_kwargs, params_constrained,
skip_dist_transforms=True)
for name, t in inv_transforms.items():
t_log_det = np.sum(t.log_abs_det_jacobian(params[name], params_constrained[name]))
if model_trace[name]['scale'] is not None:
t_log_det = model_trace[name]['scale'] * t_log_det
log_joint = log_joint + t_log_det
return - log_joint
def transformed_potential_energy(potential_energy, inv_transform, z):
"""
Given a potential energy `p(x)`, compute potential energy of `p(z)`
with `z = transform(x)` (i.e. `x = inv_transform(z)`).
:param potential_energy: a callable to compute potential energy of original
variable `x`.
:param ~numpyro.distributions.constraints.Transform inv_transform: a
transform from the new variable `z` to `x`.
:param z: new variable to compute potential energy
:return: potential energy of `z`.
"""
x, intermediates = inv_transform.call_with_intermediates(z)
logdet = inv_transform.log_abs_det_jacobian(z, x, intermediates=intermediates)
return potential_energy(x) - logdet
def _init_to_median(site, num_samples=15, skip_param=False):
if site['type'] == 'sample' and not site['is_observed']:
if isinstance(site['fn'], dist.TransformedDistribution):
fn = site['fn'].base_dist
else:
fn = site['fn']
samples = numpyro.sample('_init', fn,
sample_shape=(num_samples,) + site['kwargs']['sample_shape'])
return np.median(samples, axis=0)
if site['type'] == 'param' and not skip_param:
# return base value of param site
constraint = site['kwargs'].pop('constraint', real)
transform = biject_to(constraint)
value = site['args'][0]
if isinstance(transform, ComposeTransform):
base_transform = transform.parts[0]
value = base_transform(transform.inv(value))
return value
def init_to_median(num_samples=15):
"""
Initialize to the prior median.
:param int num_samples: number of prior points to calculate median.
"""
return partial(_init_to_median, num_samples=num_samples)
def init_to_prior():
"""
Initialize to a prior sample.
"""
return init_to_median(num_samples=1)
def _init_to_uniform(site, radius=2, skip_param=False):
if site['type'] == 'sample' and not site['is_observed']:
if isinstance(site['fn'], dist.TransformedDistribution):
fn = site['fn'].base_dist
else:
fn = site['fn']
value = numpyro.sample('_init', fn, sample_shape=site['kwargs']['sample_shape'])
base_transform = biject_to(fn.support)
unconstrained_value = numpyro.sample('_unconstrained_init', dist.Uniform(-radius, radius),
sample_shape=np.shape(base_transform.inv(value)))
return base_transform(unconstrained_value)
if site['type'] == 'param' and not skip_param:
# return base value of param site
constraint = site['kwargs'].pop('constraint', real)
transform = biject_to(constraint)
value = site['args'][0]
unconstrained_value = numpyro.sample('_unconstrained_init', dist.Uniform(-radius, radius),
sample_shape=np.shape(transform.inv(value)))
if isinstance(transform, ComposeTransform):
base_transform = transform.parts[0]
else:
base_transform = transform
return base_transform(unconstrained_value)
def init_to_uniform(radius=2):
"""
Initialize to a random point in the area `(-radius, radius)` of unconstrained domain.
:param float radius: specifies the range to draw an initial point in the unconstrained domain.
"""
return partial(_init_to_uniform, radius=radius)
def init_to_feasible():
"""
Initialize to an arbitrary feasible point, ignoring distribution
parameters.
"""
return init_to_uniform(radius=0)
def _init_to_value(site, values={}, skip_param=False):
if site['type'] == 'sample' and not site['is_observed']:
if site['name'] not in values:
return _init_to_uniform(site, skip_param=skip_param)
value = values[site['name']]
if isinstance(site['fn'], dist.TransformedDistribution):
value = ComposeTransform(site['fn'].transforms).inv(value)
return value
if site['type'] == 'param' and not skip_param:
# return base value of param site
constraint = site['kwargs'].pop('constraint', real)
transform = biject_to(constraint)
value = site['args'][0]
if isinstance(transform, ComposeTransform):
base_transform = transform.parts[0]
value = base_transform(transform.inv(value))
return value
def init_to_value(values):
"""
Initialize to the value specified in `values`. We defer to
:func:`init_to_uniform` strategy for sites which do not appear in `values`.
:param dict values: dictionary of initial values keyed by site name.
"""
return partial(_init_to_value, values=values)
def find_valid_initial_params(rng_key, model,
init_strategy=init_to_uniform(),
param_as_improper=False,
model_args=(),
model_kwargs=None):
"""
(EXPERIMENTAL INTERFACE) Given a model with Pyro primitives, returns an initial
valid unconstrained value for all the parameters. This function also returns an
`is_valid` flag to say whether the initial parameters are valid. Parameter values
are considered valid if the values and the gradients for the log density have
finite values.
:param jax.random.PRNGKey rng_key: random number generator seed to
sample from the prior. The returned `init_params` will have the
batch shape ``rng_key.shape[:-1]``.
:param model: Python callable containing Pyro primitives.
:param callable init_strategy: a per-site initialization function.
:param bool param_as_improper: a flag to decide whether to consider sites with
`param` statement as sites with improper priors.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:return: tuple of (`init_params`, `is_valid`).
"""
init_strategy = jax.partial(init_strategy, skip_param=not param_as_improper)
def cond_fn(state):
i, _, _, is_valid = state
return (i < 100) & (~is_valid)
def body_fn(state):
i, key, _, _ = state
key, subkey = random.split(key)
# Wrap model in a `substitute` handler to initialize from `init_loc_fn`.
# Use `block` to not record sample primitives in `init_loc_fn`.
seeded_model = substitute(model, substitute_fn=block(seed(init_strategy, subkey)))
model_trace = trace(seeded_model).get_trace(*model_args, **model_kwargs)
constrained_values, inv_transforms = {}, {}
for k, v in model_trace.items():
if v['type'] == 'sample' and not v['is_observed']:
if v['intermediates']:
constrained_values[k] = v['intermediates'][0][0]
inv_transforms[k] = biject_to(v['fn'].base_dist.support)
else:
constrained_values[k] = v['value']
inv_transforms[k] = biject_to(v['fn'].support)
elif v['type'] == 'param' and param_as_improper:
constraint = v['kwargs'].pop('constraint', real)
transform = biject_to(constraint)
if isinstance(transform, ComposeTransform):
base_transform = transform.parts[0]
inv_transforms[k] = base_transform
constrained_values[k] = base_transform(transform.inv(v['value']))
else:
inv_transforms[k] = transform
constrained_values[k] = v['value']
params = transform_fn(inv_transforms,
{k: v for k, v in constrained_values.items()},
invert=True)
potential_fn = jax.partial(potential_energy, model, inv_transforms, model_args, model_kwargs)
pe, param_grads = value_and_grad(potential_fn)(params)
z_grad = ravel_pytree(param_grads)[0]
is_valid = np.isfinite(pe) & np.all(np.isfinite(z_grad))
return i + 1, key, params, is_valid
def _find_valid_params(rng_key_):
_, _, prototype_params, is_valid = init_state = body_fn((0, rng_key_, None, None))
# Early return if valid params found.
if not_jax_tracer(is_valid):
if device_get(is_valid):
return prototype_params, is_valid
_, _, init_params, is_valid = while_loop(cond_fn, body_fn, init_state)
return init_params, is_valid
# Handle possible vectorization
if rng_key.ndim == 1:
init_params, is_valid = _find_valid_params(rng_key)
else:
init_params, is_valid = lax.map(_find_valid_params, rng_key)
return init_params, is_valid
def get_model_transforms(rng_key, model, model_args=(), model_kwargs=None):
model_kwargs = {} if model_kwargs is None else model_kwargs
seeded_model = seed(model, rng_key if rng_key.ndim == 1 else rng_key[0])
model_trace = trace(seeded_model).get_trace(*model_args, **model_kwargs)
inv_transforms = {}
# model code may need to be replayed in the presence of dynamic constraints
# or deterministic sites
replay_model = False
for k, v in model_trace.items():
if v['type'] == 'sample' and not v['is_observed']:
if v['intermediates']:
inv_transforms[k] = biject_to(v['fn'].base_dist.support)
replay_model = True
else:
inv_transforms[k] = biject_to(v['fn'].support)
elif v['type'] == 'param':
constraint = v['kwargs'].pop('constraint', real)
transform = biject_to(constraint)
if isinstance(transform, ComposeTransform):
inv_transforms[k] = transform.parts[0]
replay_model = True
else:
inv_transforms[k] = transform
elif v['type'] == 'deterministic':
replay_model = True
return inv_transforms, replay_model
def get_potential_fn(rng_key, model, dynamic_args=False, model_args=(), model_kwargs=None):
"""
(EXPERIMENTAL INTERFACE) Given a model with Pyro primitives, returns a
function which, given unconstrained parameters, evaluates the potential
energy (negative log joint density). In addition, this returns a
function to transform unconstrained values at sample sites to constrained
values within their respective support.
:param jax.random.PRNGKey rng_key: random number generator seed to
sample from the prior. The returned `init_params` will have the
batch shape ``rng_key.shape[:-1]``.
:param model: Python callable containing Pyro primitives.
:param bool dynamic_args: if `True`, the `potential_fn` and
`constraints_fn` are themselves dependent on model arguments.
When provided a `*model_args, **model_kwargs`, they return
`potential_fn` and `constraints_fn` callables, respectively.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:return: tuple of (`potential_fn`, `postprocess_fn`). The latter is used
to constrain unconstrained samples (e.g. those returned by HMC)
to values that lie within the site's support, and return values at
`deterministic` sites in the model.
"""
if dynamic_args:
def potential_fn(*args, **kwargs):
inv_transforms, replay_model = get_model_transforms(rng_key, model, args, kwargs)
return jax.partial(potential_energy, model, inv_transforms, args, kwargs)
def postprocess_fn(*args, **kwargs):
inv_transforms, replay_model = get_model_transforms(rng_key, model, args, kwargs)
if replay_model:
return jax.partial(constrain_fn, model, inv_transforms, args, kwargs,
return_deterministic=True)
else:
return jax.partial(transform_fn, inv_transforms)
else:
inv_transforms, replay_model = get_model_transforms(rng_key, model, model_args, model_kwargs)
potential_fn = jax.partial(potential_energy, model, inv_transforms, model_args, model_kwargs)
if replay_model:
postprocess_fn = jax.partial(constrain_fn, model, inv_transforms, model_args, model_kwargs,
return_deterministic=True)
else:
postprocess_fn = jax.partial(transform_fn, inv_transforms)
return potential_fn, postprocess_fn
def initialize_model(rng_key, model,
init_strategy=init_to_uniform(),
dynamic_args=False,
model_args=(),
model_kwargs=None):
"""
(EXPERIMENTAL INTERFACE) Helper function that calls :func:`~numpyro.infer.util.get_potential_fn`
and :func:`~numpyro.infer.util.find_valid_initial_params` under the hood
to return a tuple of (`init_params`, `potential_fn`, `constrain_fn`).
:param jax.random.PRNGKey rng_key: random number generator seed to
sample from the prior. The returned `init_params` will have the
batch shape ``rng_key.shape[:-1]``.
:param model: Python callable containing Pyro primitives.
:param callable init_strategy: a per-site initialization function.
See :ref:`init_strategy` section for available functions.
:param bool dynamic_args: if `True`, the `potential_fn` and
`constraints_fn` are themselves dependent on model arguments.
When provided a `*model_args, **model_kwargs`, they return
`potential_fn` and `constraints_fn` callables, respectively.
:param tuple model_args: args provided to the model.
:param dict model_kwargs: kwargs provided to the model.
:return: tuple of (`init_params`, `potential_fn`, `postprocess_fn`),
`init_params` are values from the prior used to initiate MCMC,
`postprocess_fn` is a callable that uses inverse transforms
to convert unconstrained HMC samples to constrained values that
lie within the site's support, in addition to returning values
at `deterministic` sites in the model.
"""
if model_kwargs is None:
model_kwargs = {}
potential_fn, postprocess_fn = get_potential_fn(rng_key if rng_key.ndim == 1 else rng_key[0],
model,
dynamic_args=dynamic_args,
model_args=model_args,
model_kwargs=model_kwargs)
init_params, is_valid = find_valid_initial_params(rng_key, model,
init_strategy=init_strategy,
param_as_improper=True,
model_args=model_args,
model_kwargs=model_kwargs)
if not_jax_tracer(is_valid):
if device_get(~np.all(is_valid)):
raise RuntimeError("Cannot find valid initial parameters. Please check your model again.")
return init_params, potential_fn, postprocess_fn
def _predictive(rng_key, model, posterior_samples, num_samples, return_sites=None,
parallel=True, model_args=(), model_kwargs={}):
rng_keys = random.split(rng_key, num_samples)
def single_prediction(val):
rng_key, samples = val
model_trace = trace(seed(substitute(model, samples), rng_key)).get_trace(
*model_args, **model_kwargs)
if return_sites is not None:
if return_sites == '':
sites = {k for k, site in model_trace.items() if site['type'] != 'plate'}
else:
sites = return_sites
else:
sites = {k for k, site in model_trace.items()
if (site['type'] == 'sample' and k not in samples) or (site['type'] == 'deterministic')}
return {name: site['value'] for name, site in model_trace.items() if name in sites}
if parallel:
return vmap(single_prediction)((rng_keys, posterior_samples))
else:
return lax.map(single_prediction, (rng_keys, posterior_samples))
class Predictive(object):
"""
This class is used to construct predictive distribution. The predictive distribution is obtained
by running model conditioned on latent samples from `posterior_samples`.
.. warning::
The interface for the `Predictive` class is experimental, and
might change in the future.
:param model: Python callable containing Pyro primitives.
:param dict posterior_samples: dictionary of samples from the posterior.
:param callable guide: optional guide to get posterior samples of sites not present
in `posterior_samples`.
:param dict params: dictionary of values for param sites of model/guide.
:param int num_samples: number of samples
:param list return_sites: sites to return; by default only sample sites not present
in `posterior_samples` are returned.
:param bool parallel: whether to predict in parallel using JAX vectorized map :func:`jax.vmap`.
Defaults to False.
:return: dict of samples from the predictive distribution.
"""
def __init__(self, model, posterior_samples=None, guide=None, params=None, num_samples=None,
return_sites=None, parallel=False):
if posterior_samples is None and num_samples is None:
raise ValueError("Either posterior_samples or num_samples must be specified.")
posterior_samples = {} if posterior_samples is None else posterior_samples
for name, sample in posterior_samples.items():
batch_size = sample.shape[0]
if (num_samples is not None) and (num_samples != batch_size):
warnings.warn("Sample's leading dimension size {} is different from the "
"provided {} num_samples argument. Defaulting to {}."
.format(batch_size, num_samples, batch_size), UserWarning)
num_samples = batch_size
if num_samples is None:
raise ValueError("No sample sites in posterior samples to infer `num_samples`.")
if return_sites is not None:
assert isinstance(return_sites, (list, tuple, set))
self.model = model
self.posterior_samples = {} if posterior_samples is None else posterior_samples
self.num_samples = num_samples
self.guide = guide
self.params = {} if params is None else params
self.return_sites = return_sites
self.parallel = parallel
def __call__(self, rng_key, *args, **kwargs):
"""
Returns dict of samples from the predictive distribution. By default, only sample sites not
contained in `posterior_samples` are returned. This can be modified by changing the
`return_sites` keyword argument of this :class:`Predictive` instance.
:param jax.random.PRNGKey rng_key: random key to draw samples.
:param args: model arguments.
:param kwargs: model kwargs.
"""
posterior_samples = self.posterior_samples
if self.guide is not None:
rng_key, guide_rng_key = random.split(rng_key)
# use return_sites='' as a special signal to return all sites
guide = substitute(self.guide, self.params)
posterior_samples = _predictive(guide_rng_key, guide, posterior_samples,
self.num_samples, return_sites='', parallel=self.parallel,
model_args=args, model_kwargs=kwargs)
model = substitute(self.model, self.params)
return _predictive(rng_key, model, posterior_samples, self.num_samples,
return_sites=self.return_sites, parallel=self.parallel,
model_args=args, model_kwargs=kwargs)
def get_samples(self, rng_key, *args, **kwargs):
warnings.warn("The method `.get_samples` has been deprecated in favor of `.__call__`.",
DeprecationWarning)
return self.__call__(rng_key, *args, **kwargs)
def log_likelihood(model, posterior_samples, *args, **kwargs):
"""
(EXPERIMENTAL INTERFACE) Returns log likelihood at observation nodes of model,
given samples of all latent variables.
:param model: Python callable containing Pyro primitives.
:param dict posterior_samples: dictionary of samples from the posterior.
:param args: model arguments.
:param kwargs: model kwargs.
:return: dict of log likelihoods at observation sites.
"""
def single_loglik(samples):
print(samples)
model_trace = trace(substitute(model, samples)).get_trace(*args, **kwargs)
return {name: site['fn'].log_prob(site['value']) for name, site in model_trace.items() if site['type'] == 'sample' and site['is_observed']}
#single_loglik(posterior_samples)
# model_trace = trace(substitute(model, posterior_samples[0])).get_trace(*args, **kwargs)
return vmap(single_loglik)(posterior_samples)
|
the-stack_0_23255 | """
This contains classes used for analyzing the sentiments of input texts
"""
import re
import pprint
import shelve
# import IOMDataService as DS
# from TextFiltration import Sentences, Words, Lemmatized, Bigrams, Trigrams
import numpy as np
from senti_classifier import senti_classifier
import nltk
from nltk.corpus import sentiwordnet as swn
from nltk.corpus import wordnet as wn
from nltk.corpus import stopwords
from nltk.tokenize import wordpunct_tokenize
class SentiSynsetTools(object):
"""
Tools for loading and working with SentiWordNet stuff
"""
def load_senti_synsets_for_word(self, word):
"""
Get a list of senti_synsets for the word
Args:
word: String to lookup
Returns:
List of senti_synsets
Example:
input: slow
result:
SentiSynset('decelerate.v.01'),
SentiSynset('slow.v.02'),
SentiSynset('slow.v.03'),
SentiSynset('slow.a.01'),
SentiSynset('slow.a.02'),
SentiSynset('slow.a.04'),
SentiSynset('slowly.r.01'),
SentiSynset('behind.r.03')]
"""
return list(swn.senti_synsets('slow'))
def get_scores_from_senti_synset(self, string_name_of_synset, return_format=tuple):
"""
Args:
string_name_of_synset: The string name of the synset that want scores for
return_format: What kind of object to return. Allowed values are tuple, dict
Returns:
On default of tuple returns (positiveScore, negativeScore, objScore)
"""
breakdown = swn.senti_synset(string_name_of_synset)
if return_format is tuple:
return (breakdown.pos_score(), breakdown.neg_score(), breakdown.obj_score())
elif return_format is dict:
return {
'posScore': breakdown.pos_score(),
'negScore': breakdown.neg_score(),
'objScore': breakdown.obj_score()
}
class DisambiguationTools(object):
"""
"""
def disambiguate_word_senses(self, sentence, word):
"""
Attempts to determine the proper sense of the target
word from the sentence in which it appears.
Args:
sentence: String representation of the sentence
word: String represtnation of word
Returns:
Returns a synset which is the best guess.
Example:
disambiguateWordSenses('A cat is a good pet', 'cat')
OUT: Synset('cat.v.01')
"""
wordsynsets = wn.synsets(word)
bestScore = 0.0
result = None
for synset in wordsynsets:
for w in nltk.word_tokenize(sentence):
score = 0.0
for wsynset in wn.synsets(w):
sim = wn.path_similarity(wsynset, synset)
if(sim == None):
continue
else:
score += sim
if (score > bestScore):
bestScore = score
result = synset
return result
class TextPrepare(object):
"""
All tools for preparing text for processing
"""
def __init__(self):
self.stop_words = set(stopwords.words('english'))
self.stop_words.update(['.', ',', '"', "'", '?', '!', ':', ';', '(', ')', '[', ']', '{', '}']) # remove it if you need punctuation
def prepare_text(self, tweet_text):
"""
Returns a bag of words
Prospective
Remove emoticons
:param tweet_text:
:return: list
"""
return [i.lower() for i in wordpunct_tokenize(tweet_text) if i.lower() not in self.stop_words]
class ComputeSentiments(object):
"""
"""
def __init__(self):
self.text_preparer = TextPrepare()
self.disambiguator = DisambiguationTools()
self.sentitools = SentiSynsetTools()
def compute_sentiments(self, tweet_text):
"""
:param tweet_text:
:return:
"""
tokens = self.text_preparer.prepare_text(tweet_text)
for word in tokens:
best_synset = self.disambiguator.disambiguate_word_senses(word, tweet_text)
# Compute the scores
scores_tuple = self.sentitools.get_scores_from_senti_synset(best_synset)
class ItemSentimentAnalyzer(object):
"""
This analyzes and returns the sentiment scores for a particular item
"""
def __init__(self):
pass
# DS.IOMService.__init__(self)
def computeSentimentScores(self, record, tokenizer):
"""
record is a dict which must have record['quote_text']. It normally should have record['quote_id'] or record['vin_id']
tokenizer is a tokenizer with a tokenize method. The unit of analysis (e.g., word, ngram, sentence) is determined by the tokenizer passed in
"""
self.text = record['quote_text']
# To allow this to be used with arbitrary inputs
try:
self.quoteID = record['quote_id']
except:
try:
self.quoteID = record['vin_id']
except:
# Make random ID if none exists
self.quoteID = 'ID' + str(np.random.rand())
# Tokenize the text into the appropriate units
self.tokens = tokenizer.tokenize(self.text)
# Calc number of tokens in the record
self.numTokens = len(self.tokens)
# Calc sentiment scores
self.pos_score, self.neg_score = senti_classifier.polarity_scores(self.tokens)
# Averages are needed because otherwise the score will vary with number of sentences
# Average positive sentiment score of the record
self.avgPos = self.pos_score / self.numTokens
# Average negative sentiment of the record
self.avgNeg = (self.neg_score / self.numTokens) * -1
# Net average sentiment of the record
self.netSent = self.avgPos + self.avgNeg
# Objectivity score (from chris potts )
self.obj_score = 1.0 - self.netSent
# Put the results in a dictionary
self.scores = dict(quoteID=self.quoteID, avgPos=self.avgPos, avgNeg=self.avgNeg, netSent=self.netSent)
return self.scores
#def makeDict(self):
# """
# Makes a dictionary for the result
# Keys: quote_id, avgPos, avgNeg, netSent
# """
# self.result_dict = dict(quote_id=self.quote_id, avgPos=self.avgPos, avgNeg=self.avgNeg, netSent=self.netSent)
# return self.result_dict
def saveSentiments(self, filepath):
"""
Saves the results
Args:
filepath: the path to the shelve file where the data is / is to be stored
"""
#self.makeDict()
self.to_save = self.scores
self.save_sentiment_data_to_file(filepath)
class GroupSentiments:
"""
This is used to compute the sentiment scores for a group of items
"""
def __init__(self, data, groupname):
"""
Args:
data: a list of dictionaries that have been prepared by ItemSentiments to be saved
groupname: the name that the result will be stored with/ or the name to retrieve
"""
self.name = groupname
#self.datafile = datafile
self.quoteIDs = []
self.avgPos = []
self.avgNeg = []
self.netSent = []
for d in data:
self.quoteIDs.append(d['quote_id'])
self.avgPos.append(d['avgPos'])
self.avgNeg.append(d['avgNeg'])
self.netSent.append(d['netSent'])
self.overallpos = np.average(self.avgPos)
self.overallneg = np.average(self.avgNeg)
self.overallsent = np.average(self.netSent)
def saveSentiments(self, filepath):
"""
Saves the results
@param filepath The path to the saved data or to where it should be saved
@type string
"""
self.sentiments = dict(name=self.name, overallpos=self.overallpos, overallneg=self.overallneg,
overallsent=self.overallsent)
db = shelve.open(filepath)
db[str(self.sentiments['name'])] = self.sentiments
db.close()
print(self.sentiments)
class MultiItemSentimentAnalyzer(ItemSentimentAnalyzer):
def __init__(self, data_to_analyze, tokenizer, filepath, label):
"""
@param data_to_analyze List of dictionaries with items that itemsentimentanalzer can operate on
@type list
"""
ItemSentimentAnalyzer.__init__(self)
self.to_save = []
for record in data_to_analyze:
self.computeSentimentScores(record, tokenizer)
self.to_save.append(self.scores)
self.save_sentiment_data_to_file(filepath, label)
|
the-stack_0_23257 | #!/usr/bin/python
from __future__ import absolute_import, division, print_function
# Copyright 2019-2021 Fortinet, Inc.
#
# This program is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# This program is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with this program. If not, see <https://www.gnu.org/licenses/>.
__metaclass__ = type
ANSIBLE_METADATA = {'status': ['preview'],
'supported_by': 'community',
'metadata_version': '1.1'}
DOCUMENTATION = '''
---
module: fmgr_firewall_multicastaddress_tagging
short_description: Config object tagging.
description:
- This module is able to configure a FortiManager device.
- Examples include all parameters and values which need to be adjusted to data sources before usage.
version_added: "2.10"
author:
- Link Zheng (@chillancezen)
- Jie Xue (@JieX19)
- Frank Shen (@fshen01)
- Hongbin Lu (@fgtdev-hblu)
notes:
- Running in workspace locking mode is supported in this FortiManager module, the top
level parameters workspace_locking_adom and workspace_locking_timeout help do the work.
- To create or update an object, use state present directive.
- To delete an object, use state absent directive.
- Normally, running one module can fail when a non-zero rc is returned. you can also override
the conditions to fail or succeed with parameters rc_failed and rc_succeeded
options:
enable_log:
description: Enable/Disable logging for task
required: false
type: bool
default: false
proposed_method:
description: The overridden method for the underlying Json RPC request
required: false
type: str
choices:
- update
- set
- add
bypass_validation:
description: only set to True when module schema diffs with FortiManager API structure, module continues to execute without validating parameters
required: false
type: bool
default: false
workspace_locking_adom:
description: the adom to lock for FortiManager running in workspace mode, the value can be global and others including root
required: false
type: str
workspace_locking_timeout:
description: the maximum time in seconds to wait for other user to release the workspace lock
required: false
type: int
default: 300
state:
description: the directive to create, update or delete an object
type: str
required: true
choices:
- present
- absent
rc_succeeded:
description: the rc codes list with which the conditions to succeed will be overriden
type: list
required: false
rc_failed:
description: the rc codes list with which the conditions to fail will be overriden
type: list
required: false
adom:
description: the parameter (adom) in requested url
type: str
required: true
multicast-address:
description: the parameter (multicast-address) in requested url
type: str
required: true
firewall_multicastaddress_tagging:
description: the top level parameters set
required: false
type: dict
suboptions:
category:
type: str
description: 'Tag category.'
name:
type: str
description: 'Tagging entry name.'
tags:
description: no description
type: str
'''
EXAMPLES = '''
- hosts: fortimanager00
collections:
- fortinet.fortimanager
connection: httpapi
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: Config object tagging.
fmgr_firewall_multicastaddress_tagging:
bypass_validation: False
adom: ansible
multicast-address: 'ansible-test' # name
state: present
firewall_multicastaddress_tagging:
category: 'ansible-category'
name: 'ansible-test'
tags: ''
- name: gathering fortimanager facts
hosts: fortimanager00
gather_facts: no
connection: httpapi
collections:
- fortinet.fortimanager
vars:
ansible_httpapi_use_ssl: True
ansible_httpapi_validate_certs: False
ansible_httpapi_port: 443
tasks:
- name: retrieve all the taggings in the IPv4 multicase address
fmgr_fact:
facts:
selector: 'firewall_multicastaddress_tagging'
params:
adom: 'ansible'
multicast-address: 'ansible-test' # name
tagging: ''
'''
RETURN = '''
request_url:
description: The full url requested
returned: always
type: str
sample: /sys/login/user
response_code:
description: The status of api request
returned: always
type: int
sample: 0
response_message:
description: The descriptive message of the api response
type: str
returned: always
sample: OK.
'''
from ansible.module_utils.basic import AnsibleModule
from ansible.module_utils.connection import Connection
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import NAPIManager
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_galaxy_version
from ansible_collections.fortinet.fortimanager.plugins.module_utils.napi import check_parameter_bypass
def main():
jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/multicast-address/{multicast-address}/tagging',
'/pm/config/global/obj/firewall/multicast-address/{multicast-address}/tagging'
]
perobject_jrpc_urls = [
'/pm/config/adom/{adom}/obj/firewall/multicast-address/{multicast-address}/tagging/{tagging}',
'/pm/config/global/obj/firewall/multicast-address/{multicast-address}/tagging/{tagging}'
]
url_params = ['adom', 'multicast-address']
module_primary_key = 'name'
module_arg_spec = {
'enable_log': {
'type': 'bool',
'required': False,
'default': False
},
'forticloud_access_token': {
'type': 'str',
'required': False,
'no_log': True
},
'proposed_method': {
'type': 'str',
'required': False,
'choices': [
'set',
'update',
'add'
]
},
'bypass_validation': {
'type': 'bool',
'required': False,
'default': False
},
'workspace_locking_adom': {
'type': 'str',
'required': False
},
'workspace_locking_timeout': {
'type': 'int',
'required': False,
'default': 300
},
'rc_succeeded': {
'required': False,
'type': 'list'
},
'rc_failed': {
'required': False,
'type': 'list'
},
'state': {
'type': 'str',
'required': True,
'choices': [
'present',
'absent'
]
},
'adom': {
'required': True,
'type': 'str'
},
'multicast-address': {
'required': True,
'type': 'str'
},
'firewall_multicastaddress_tagging': {
'required': False,
'type': 'dict',
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'options': {
'category': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'name': {
'required': True,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
},
'tags': {
'required': False,
'revision': {
'6.0.0': True,
'6.2.1': True,
'6.2.3': True,
'6.2.5': True,
'6.4.0': True,
'6.4.2': True,
'6.4.5': True,
'7.0.0': True
},
'type': 'str'
}
}
}
}
params_validation_blob = []
check_galaxy_version(module_arg_spec)
module = AnsibleModule(argument_spec=check_parameter_bypass(module_arg_spec, 'firewall_multicastaddress_tagging'),
supports_check_mode=False)
fmgr = None
if module._socket_path:
connection = Connection(module._socket_path)
connection.set_option('enable_log', module.params['enable_log'] if 'enable_log' in module.params else False)
connection.set_option('forticloud_access_token',
module.params['forticloud_access_token'] if 'forticloud_access_token' in module.params else None)
fmgr = NAPIManager(jrpc_urls, perobject_jrpc_urls, module_primary_key, url_params, module, connection, top_level_schema_name='data')
fmgr.validate_parameters(params_validation_blob)
fmgr.process_curd(argument_specs=module_arg_spec)
else:
module.fail_json(msg='MUST RUN IN HTTPAPI MODE')
module.exit_json(meta=module.params)
if __name__ == '__main__':
main()
|
the-stack_0_23258 | import asyncio
import logging
import shutil
from functools import partial
from typing import List, Optional, Text, Union
from sanic import Sanic
from sanic_cors import CORS
import rasa.core
import rasa.utils
import rasa.utils.io
from rasa.core import constants, utils
from rasa.core.agent import load_agent, Agent
from rasa.core.channels import console
from rasa.core.channels.channel import InputChannel
from rasa.core.interpreter import NaturalLanguageInterpreter
from rasa.core.lock_store import LockStore
from rasa.core.tracker_store import TrackerStore
from rasa.core.utils import AvailableEndpoints, configure_file_logging
from rasa.model import get_model_subdirectories, get_model
from rasa.utils.common import update_sanic_log_level, class_from_module_path
from rasa.server import add_root_route
logger = logging.getLogger() # get the root logger
def create_http_input_channels(
channel: Optional[Text], credentials_file: Optional[Text]
) -> List["InputChannel"]:
"""Instantiate the chosen input channel."""
if credentials_file:
all_credentials = rasa.utils.io.read_config_file(credentials_file)
else:
all_credentials = {}
if channel:
if len(all_credentials) > 1:
logger.info(
"Connecting to channel '{}' which was specified by the "
"'--connector' argument. Any other channels will be ignored. "
"To connect to all given channels, omit the '--connector' "
"argument.".format(channel)
)
return [_create_single_channel(channel, all_credentials.get(channel))]
else:
return [_create_single_channel(c, k) for c, k in all_credentials.items()]
def _create_single_channel(channel, credentials):
from rasa.core.channels import BUILTIN_CHANNELS
if channel in BUILTIN_CHANNELS:
return BUILTIN_CHANNELS[channel].from_credentials(credentials)
else:
# try to load channel based on class name
try:
input_channel_class = class_from_module_path(channel)
return input_channel_class.from_credentials(credentials)
except (AttributeError, ImportError):
raise Exception(
"Failed to find input channel class for '{}'. Unknown "
"input channel. Check your credentials configuration to "
"make sure the mentioned channel is not misspelled. "
"If you are creating your own channel, make sure it "
"is a proper name of a class in a module.".format(channel)
)
def _create_app_without_api(cors: Optional[Union[Text, List[Text]]] = None):
app = Sanic(__name__, configure_logging=False)
add_root_route(app)
CORS(app, resources={r"/*": {"origins": cors or ""}}, automatic_options=True)
return app
def configure_app(
input_channels: Optional[List["InputChannel"]] = None,
cors: Optional[Union[Text, List[Text]]] = None,
auth_token: Optional[Text] = None,
enable_api: bool = True,
jwt_secret: Optional[Text] = None,
jwt_method: Optional[Text] = None,
route: Optional[Text] = "/webhooks/",
port: int = constants.DEFAULT_SERVER_PORT,
endpoints: Optional[AvailableEndpoints] = None,
log_file: Optional[Text] = None,
):
"""Run the agent."""
from rasa import server
configure_file_logging(logger, log_file)
if enable_api:
app = server.create_app(
cors_origins=cors,
auth_token=auth_token,
jwt_secret=jwt_secret,
jwt_method=jwt_method,
endpoints=endpoints,
)
else:
app = _create_app_without_api(cors)
if input_channels:
rasa.core.channels.channel.register(input_channels, app, route=route)
else:
input_channels = []
if logger.isEnabledFor(logging.DEBUG):
utils.list_routes(app)
# configure async loop logging
async def configure_async_logging():
if logger.isEnabledFor(logging.DEBUG):
rasa.utils.io.enable_async_loop_debugging(asyncio.get_event_loop())
app.add_task(configure_async_logging)
if "cmdline" in {c.name() for c in input_channels}:
async def run_cmdline_io(running_app: Sanic):
"""Small wrapper to shut down the server once cmd io is done."""
await asyncio.sleep(1) # allow server to start
await console.record_messages(
server_url=constants.DEFAULT_SERVER_FORMAT.format("http", port)
)
logger.info("Killing Sanic server now.")
running_app.stop() # kill the sanic serverx
app.add_task(run_cmdline_io)
return app
def serve_application(
model_path: Optional[Text] = None,
channel: Optional[Text] = None,
port: int = constants.DEFAULT_SERVER_PORT,
credentials: Optional[Text] = None,
cors: Optional[Union[Text, List[Text]]] = None,
auth_token: Optional[Text] = None,
enable_api: bool = True,
jwt_secret: Optional[Text] = None,
jwt_method: Optional[Text] = None,
endpoints: Optional[AvailableEndpoints] = None,
remote_storage: Optional[Text] = None,
log_file: Optional[Text] = None,
ssl_certificate: Optional[Text] = None,
ssl_keyfile: Optional[Text] = None,
ssl_password: Optional[Text] = None,
):
from rasa import server
if not channel and not credentials:
channel = "cmdline"
input_channels = create_http_input_channels(channel, credentials)
app = configure_app(
input_channels,
cors,
auth_token,
enable_api,
jwt_secret,
jwt_method,
port=port,
endpoints=endpoints,
log_file=log_file,
)
ssl_context = server.create_ssl_context(ssl_certificate, ssl_keyfile, ssl_password)
protocol = "https" if ssl_context else "http"
logger.info(
"Starting Rasa server on "
"{}".format(constants.DEFAULT_SERVER_FORMAT.format(protocol, port))
)
app.register_listener(
partial(load_agent_on_start, model_path, endpoints, remote_storage),
"before_server_start",
)
async def clear_model_files(app: Sanic, _loop: Text) -> None:
if app.agent.model_directory:
shutil.rmtree(app.agent.model_directory)
app.register_listener(clear_model_files, "after_server_stop")
update_sanic_log_level(log_file)
app.run(host="0.0.0.0", port=port, ssl=ssl_context)
# noinspection PyUnusedLocal
async def load_agent_on_start(
model_path: Text,
endpoints: AvailableEndpoints,
remote_storage: Optional[Text],
app: Sanic,
loop: Text,
):
"""Load an agent.
Used to be scheduled on server start
(hence the `app` and `loop` arguments)."""
import rasa.core.brokers.utils as broker_utils
try:
with get_model(model_path) as unpacked_model:
_, nlu_model = get_model_subdirectories(unpacked_model)
_interpreter = NaturalLanguageInterpreter.create(nlu_model, endpoints.nlu)
except Exception:
logger.debug("Could not load interpreter from '{}'.".format(model_path))
_interpreter = None
_broker = broker_utils.from_endpoint_config(endpoints.event_broker)
_tracker_store = TrackerStore.find_tracker_store(
None, endpoints.tracker_store, _broker
)
_lock_store = LockStore.find_lock_store(endpoints.lock_store)
model_server = endpoints.model if endpoints and endpoints.model else None
app.agent = await load_agent(
model_path,
model_server=model_server,
remote_storage=remote_storage,
interpreter=_interpreter,
generator=endpoints.nlg,
tracker_store=_tracker_store,
lock_store=_lock_store,
action_endpoint=endpoints.action,
)
if not app.agent:
logger.warning(
"Agent could not be loaded with the provided configuration. "
"Load default agent without any model."
)
app.agent = Agent(
interpreter=_interpreter,
generator=endpoints.nlg,
tracker_store=_tracker_store,
action_endpoint=endpoints.action,
model_server=model_server,
remote_storage=remote_storage,
)
return app.agent
if __name__ == "__main__":
raise RuntimeError(
"Calling `rasa.core.run` directly is no longer supported. "
"Please use `rasa run` to start a Rasa server or `rasa shell` to chat with "
"your bot on the command line."
)
|
the-stack_0_23261 | ####################################################################################################
#
# Project: Embedded Learning Library (ELL)
# File: cntk_layers.py (importers)
# Authors: Byron Changuion, Lisa Ong
#
# Requires: Python 3.x, cntk-2.4
#
####################################################################################################
"""Imports CNTK layers to ELL equivalents"""
import logging
from cntk.initializer import glorot_uniform, he_normal
from cntk.layers import Convolution, MaxPooling, AveragePooling, Dropout, BatchNormalization, Dense
import cntk.layers.blocks
from cntk.layers.typing import *
from cntk.ops import *
from cntk import load_model
from cntk.logging.graph import *
from custom_functions import CustomSign, BinaryConvolution
import ell
import cntk_converters as converters
import cntk_utilities as utilities
from custom_functions import BinaryConvolution, CustomSign
_logger = logging.getLogger(__name__)
class BaseLayer:
"""Base class with common layer processing functionality"""
def __init__(self, layer):
self.layer = layer
self.layer.ell_inputPaddingParameters = self.get_input_padding_parameters()
self.additional_layer_text = None
if not hasattr(self, 'input_shape'):
if (len(self.layer.arguments) > 0 and len(self.layer.arguments[0].shape) > 0):
self.input_shape = self.layer.arguments[0].shape
# else, assume derived classes have already initialized the input shape
if hasattr(self, 'input_shape'):
self.layer.ell_inputShape = utilities.get_adjusted_shape(
self.input_shape, self.layer.ell_inputPaddingParameters)
else:
raise RuntimeError(
"Could not initialize input_shape") # coding error
def __repr__(self):
"""Prints summary info about this layer.
Derived classes may override this.
"""
layer_prefix = self.op_name
if self.additional_layer_text:
layer_prefix = '{} ({})'.format(layer_prefix, self.additional_layer_text)
return '{} : {} -> {} | input padding {} output padding {}'.format(layer_prefix, utilities.ell_shape_to_string(self.layer.ell_inputShape),
utilities.ell_shape_to_string(self.layer.ell_outputShape),
str(self.layer.ell_inputPaddingParameters.paddingSize),
str(self.layer.ell_outputPaddingParameters.paddingSize))
def get_input_padding_parameters(self):
"""Returns the default ell.neural.PaddingParameters for a layer's input.
Derived classes may override this.
"""
return ell.neural.PaddingParameters(ell.neural.PaddingScheme.zeros, 0)
def set_output_characteristics(self, nextLayer):
"""Sets the output characteristics based on the next layer"""
if nextLayer:
self.layer.ell_outputPaddingParameters = nextLayer.layer.ell_inputPaddingParameters
self.layer.ell_outputShape = utilities.get_adjusted_shape(
self.layer.output.shape, self.layer.ell_outputPaddingParameters)
self.layer.ell_outputShapeMinusPadding = utilities.get_shape(
self.layer.output.shape)
else:
# last layer
self.layer.ell_outputPaddingParameters = ell.neural.NoPadding()
self.layer.ell_outputShape = utilities.get_adjusted_shape(
self.layer.output.shape, ell.neural.NoPadding())
self.layer.ell_outputShapeMinusPadding = self.layer.ell_outputShape
def process(self, ellLayers):
"""Appends the ELL equivalent of the current layer to ellLayers.
Derived classes must override this.
"""
raise NotImplementedError(
"Error: subclasses must override this method")
def clone_cntk_layer(self, feature):
"""Returns a shallow clone of the CNTK layer for operating on the given feature (input-variable) """
raise NotImplementedError(
"Error: subclasses must override this method")
class DenseLayer(BaseLayer):
"""Logic for converting a CNTK Dense layer to ELL"""
def __init__(self, layer):
if not layer.is_block:
raise ValueError("Dense node is not a block node")
self.op_name = 'Dense'
super().__init__(layer)
internalNodes = utilities.get_model_layers(self.layer.block_root)
self.additional_layer_text = utilities.get_cntk_activation_name(internalNodes)
def process(self, ellLayers):
"""Appends the ELL equivalent of the current layer to ellLayers."""
# Note that a single CNTK Dense function block is equivalent to the following 3 ELL layers:
# - FullyConnectedLayer
# - BiasLayer
# - ActivationLayer. This layer is sometimes missing, depending on activation type.
#
# Therefore, make sure the output padding characteristics of the last layer reflect the next layer's
# padding requirements.
weightsParameter = utilities.find_parameter_by_name(
self.layer.parameters, 'W', 0)
biasParameter = utilities.find_parameter_by_name(
self.layer.parameters, 'b', 1)
weightsTensor = converters.get_tensor_from_cntk_dense_weight_parameter(
weightsParameter)
biasVector = converters.get_vector_from_cntk_trainable_parameter(
biasParameter)
# Create the ell.neural.LayerParameters for the various ELL layers
firstLayerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShapeMinusPadding,
ell.neural.NoPadding(), ell.nodes.PortType.smallReal)
middleLayerParameters = ell.neural.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.neural.NoPadding(),
self.layer.ell_outputShapeMinusPadding, ell.neural.NoPadding(), ell.nodes.PortType.smallReal)
lastLayerParameters = ell.neural.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.neural.NoPadding(),
self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
layerParameters = firstLayerParameters
internalNodes = utilities.get_model_layers(self.layer.block_root)
activationType = utilities.get_ell_activation_type(internalNodes)
# Create the ELL fully connected layer
ellLayers.append(ell.neural.FullyConnectedLayer(
layerParameters, weightsTensor))
# Create the ELL bias layer
if (utilities.is_softmax_activation(internalNodes) or activationType != None):
layerParameters = middleLayerParameters
else:
layerParameters = lastLayerParameters
ellLayers.append(ell.neural.BiasLayer(layerParameters, biasVector))
# Create the ELL activation layer
if (utilities.is_softmax_activation(internalNodes) or activationType != None):
layerParameters = lastLayerParameters
# Special case: if this is softmax activation, create an ELL Softmax layer.
# Else, insert an ELL ActivationLayer
if (utilities.is_softmax_activation(internalNodes)):
ellLayers.append(ell.neural.SoftmaxLayer(layerParameters))
else:
if (activationType != None):
ellLayers.append(ell.neural.ActivationLayer(
layerParameters, activationType))
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
weightsParameter = utilities.find_parameter_by_name(
self.layer.parameters, 'W', 0)
biasParameter = utilities.find_parameter_by_name(
self.layer.parameters, 'b', 1)
internalNodes = utilities.get_model_layers(self.layer.block_root)
activationType = utilities.get_cntk_activation_op(internalNodes)
includeBias = biasParameter is not None
layer = Dense(self.layer.shape, activation=activationType, bias=includeBias)(feature)
layer.parameters[0].value = weightsParameter.value
if includeBias:
layer.parameters[1].value = biasParameter.value
return layer
class BinaryConvolutionLayer(BaseLayer):
"""Logic for converting a CNTK Binary Convolution layer to ELL"""
def __init__(self, layer):
if layer.is_block:
raise ValueError(
"Error: Binary Convolution layer node is in block node")
self.op_name = 'BinaryConvolution'
# Convolution function (ASSUME part of a Binary Convolution layer)
# - Weights is 4-dimensional (filters, channels, rows, columns)
# - Input is 3-dimensional (channels, rows, columns)
# - Bias is a separate layer and not processed by this class
# - Activation is a separate layer and not processed by this class
if len(layer.inputs[0].shape) == 3:
self.input_parameter = layer.inputs[0]
weights_input = layer.inputs[1]
else:
self.input_parameter = layer.inputs[1]
weights_input = layer.inputs[0]
self.weights_parameter = utilities.find_parameter_by_name(
weights_input.owner.parameters, 'filter')
self.attributes = layer.attributes
# Determine the binarization method used for weights based on the
# name attributes of the UserFunctions defined in the custom_functions.py
# used during training.
# Until we can find a better heuristic, assume that the custom function names
# don't change across models.
function_name = weights_input.owner.name
if function_name == 'Sign':
self.convolution_method = ell.neural.BinaryConvolutionMethod.bitwise
self.weights_scale = ell.neural.BinaryWeightsScale.none
else:
raise ValueError(
"Error: unrecognized binarization function: " + function_name)
self.input_shape = self.input_parameter.shape
super().__init__(layer)
def get_input_padding_parameters(self):
"""Returns the ell.neural.PaddingParameters for a layer's input."""
paddingScheme = ell.neural.PaddingScheme.zeros
padding = 0
receptiveField = self.weights_parameter.shape[2]
if ('autoPadding' in self.attributes):
if (self.attributes['autoPadding'][1] == True):
padding = int((receptiveField - 1) / 2)
else:
padding = self.attributes['upperPad'][0]
else:
padding = self.attributes['upperPad'][0]
return ell.neural.PaddingParameters(paddingScheme, padding)
def process(self, ellLayers):
"""Helper to convert a binary convolutional layer to the ELL equivalent."""
# A CNTK Binary Convolutional layer is a single function.
# Bias and Activation are separate layers (processed outside of this class).
weightsTensor = converters.get_tensor_from_cntk_convolutional_weight_parameter(
self.weights_parameter)
layerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape,
self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
# Fill in the convolutional parameters
weightsShape = self.weights_parameter.shape
receptiveField = weightsShape[2]
stride = self.attributes['strides'][2]
convolutionalParameters = ell.neural.BinaryConvolutionalParameters(
receptiveField, stride, self.convolution_method, self.weights_scale)
ellLayers.append(ell.neural.BinaryConvolutionalLayer(
layerParameters, convolutionalParameters, weightsTensor))
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
weightsShape = self.weights_parameter.shape # filters, channels, rows, columns
pad = self.attributes['autoPadding'][0] or (
self.attributes['autoPadding'][1] and self.attributes['autoPadding'][2])
# Bias is a separate layer and not processed by this class
# Activation is a separate layer and not processed by this class
x = CustomSign(feature)
return BinaryConvolution((weightsShape[2], weightsShape[3]), num_filters=weightsShape[0],
channels=weightsShape[1], init=self.weights_parameter.value,
pad=pad, activation=False, bias=False, init_bias=0)(x)
class ConvolutionLayer(BaseLayer):
"""Logic for converting a CNTK Convolution layer to ELL"""
def __init__(self, layer):
if not layer.is_block:
raise ValueError(
"Error: Convolution layer node is not in block node")
self.op_name = 'Convolution'
# initialize weights and input characteristics
self.input_parameter = layer.arguments[0]
self.weights_parameter = utilities.find_parameter_by_name(
layer.parameters, 'W', 0)
self.bias_parameter = utilities.find_parameter_by_name(
layer.parameters, 'b', 1)
# Get the hyper-parameters for the convolution.
# They are on the convolution node inside this block.
convolution_nodes = depth_first_search(
layer.block_root, lambda x: utilities.op_name_equals(x, 'Convolution'))
self.attributes = convolution_nodes[0].attributes
self.convolution_method = 0
self.input_shape = self.input_parameter.shape
super().__init__(layer)
nodes = utilities.get_model_layers(layer.block_root)
if utilities.is_softmax_activation(nodes):
self.additional_layer_text = 'softmax'
else:
activation_type = utilities.get_cntk_activation_name(nodes)
if activation_type:
self.additional_layer_text = activation_type
def get_input_padding_parameters(self):
"""Returns the ell.neural.PaddingParameters for a layer's input."""
paddingScheme = ell.neural.PaddingScheme.zeros
padding = 0
receptiveField = self.weights_parameter.shape[2]
if ('autoPadding' in self.attributes):
if (self.attributes['autoPadding'][1] == True):
padding = int((receptiveField - 1) / 2)
else:
padding = self.attributes['upperPad'][0]
else:
padding = self.attributes['upperPad'][0]
return ell.neural.PaddingParameters(paddingScheme, padding)
def process(self, ellLayers):
"""Helper to convert a convolutional layer to the ELL equivalent."""
# Note that a single CNTK Convolutional function block is equivalent to the following 3 ELL layers:
# - ConvolutionalLayer
# - BiasLayer. This layer is sometimes missing, depending on whether bias is included.
# - ActivationLayer. This layer is sometimes missing, depending on activation type.
#
# Therefore, make sure the output padding characteristics of the last layer reflect the next layer's
# padding requirements.
weightsTensor = converters.get_tensor_from_cntk_convolutional_weight_parameter(
self.weights_parameter)
internalNodes = utilities.get_model_layers(self.layer.block_root)
activationType = utilities.get_ell_activation_type(internalNodes)
isSoftmaxActivation = utilities.is_softmax_activation(internalNodes)
hasActivation = isSoftmaxActivation or activationType != None
hasBias = self.bias_parameter != None
# Create the ell.neural.LayerParameters for the various ELL layers
onlyLayerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape,
self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
firstLayerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShapeMinusPadding,
ell.neural.NoPadding(), ell.nodes.PortType.smallReal)
middleLayerParameters = ell.neural.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.neural.NoPadding(
), self.layer.ell_outputShapeMinusPadding, ell.neural.NoPadding(), ell.nodes.PortType.smallReal)
lastLayerParameters = ell.neural.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.neural.NoPadding(
), self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
# Choose the layer parameters for the convolutional layer. If there is
# bias or activation, then the convolution is the first of two or more,
# otherwise it is the only layer
if hasActivation or hasBias:
layerParameters = firstLayerParameters
else:
layerParameters = onlyLayerParameters
# Fill in the convolutional parameters
weightsShape = self.weights_parameter.shape
receptiveField = weightsShape[2]
stride = self.attributes['strides'][2]
filterBatchSize = layerParameters.outputShape.channels
convolutionalParameters = ell.neural.ConvolutionalParameters(
receptiveField, stride, self.convolution_method, filterBatchSize)
# Create the ELL convolutional layer
ellLayers.append(ell.neural.ConvolutionalLayer(
layerParameters, convolutionalParameters, weightsTensor))
# Create the ELL bias layer
if hasBias:
if hasActivation:
layerParameters = middleLayerParameters
else:
layerParameters = lastLayerParameters
biasVector = converters.get_vector_from_cntk_trainable_parameter(
self.bias_parameter)
ellLayers.append(ell.neural.BiasLayer(layerParameters, biasVector))
# Create the ELL activation layer
if hasActivation:
layerParameters = lastLayerParameters
# Special case: if this is softmax activation, create an ELL Softmax layer.
# Else, insert an ELL ActivationLayer
if (isSoftmaxActivation):
ellLayers.append(ell.neural.SoftmaxLayer(layerParameters))
else:
ellLayers.append(ell.neural.ActivationLayer(
layerParameters, activationType))
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
nodes = utilities.get_model_layers(self.layer.block_root)
activation = utilities.get_cntk_activation_op(nodes)
weightsShape = self.weights_parameter.shape
pad = self.attributes['autoPadding'][0] or (
self.attributes['autoPadding'][1] and self.attributes['autoPadding'][2])
bias = (self.bias_parameter is not None)
layer = Convolution((weightsShape[2], weightsShape[3]), weightsShape[0],
pad=pad, activation=activation, bias=bias)(feature)
layer.parameters[0].value = self.weights_parameter.value
if bias:
layer.parameters[1].value = self.bias_parameter.value
return layer
class LinearLayer(BaseLayer):
"""Logic for converting a CNTK Linear layer to ELL"""
def __init__(self, layer):
self.op_name = 'Linear'
super().__init__(layer)
def process(self, ellLayers):
"""Appends the ELL representation of the current layer to ellLayers."""
# Note that a single CNTK Linear function block is equivalent to the following 3 ELL layers:
# - FullyConnectedLayer
# - BiasLayer
# - ActivationLayer. This layer is sometimes missing, depending on activation type.
#
# Therefore, make sure the output padding characteristics of the last layer reflect the next layer's
# padding requirements.
weightsParameter = utilities.find_parameter_by_name(self.layer.parameters, 'W', 0)
biasParameter = utilities.find_parameter_by_name(self.layer.parameters, 'b', 1)
weightsTensor = converters.get_tensor_from_cntk_dense_weight_parameter(weightsParameter)
biasVector = converters.get_vector_from_cntk_trainable_parameter(biasParameter)
# Create the ell.neural.LayerParameters for the various ELL layers
firstLayerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters,
self.layer.ell_outputShapeMinusPadding, ell.neural.NoPadding(), ell.nodes.PortType.smallReal)
middleLayerParameters = ell.neural.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.neural.NoPadding(
), self.layer.ell_outputShapeMinusPadding, ell.neural.NoPadding(), ell.nodes.PortType.smallReal)
lastLayerParameters = ell.neural.LayerParameters(self.layer.ell_outputShapeMinusPadding, ell.neural.NoPadding(
), self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
layerParameters = firstLayerParameters
internalNodes = utilities.get_model_layers(self.layer.block_root)
activationType = utilities.get_ell_activation_type(internalNodes)
# Create the ELL fully connected layer
ellLayers.append(ell.neural.FullyConnectedLayer(
layerParameters, weightsTensor))
# Create the ELL bias layer
isSoftmaxActivation = utilities.is_softmax_activation(internalNodes)
hasActivation = isSoftmaxActivation or activationType != None
if (hasActivation):
layerParameters = middleLayerParameters
else:
layerParameters = lastLayerParameters
ellLayers.append(ell.neural.BiasLayer(layerParameters, biasVector))
# Create the ELL activation layer
if (hasActivation):
layerParameters = lastLayerParameters
# Special case: if this is softmax activation, create an ELL Softmax layer.
# Else, insert an ELL ActivationLayer
if (isSoftmaxActivation):
ellLayers.append(ell.neural.SoftmaxLayer(layerParameters))
else:
ellLayers.append(ell.neural.ActivationLayer(
layerParameters, activationType))
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
raise NotImplementedError("Error: not yet implemented")
class ElementTimesLayer(BaseLayer):
"""Logic for converting a CNTK ElementTimes layer to ELL"""
def __init__(self, layer):
if (len(layer.parameters) != 1 and len(layer.constants) != 1):
raise ValueError(
"Skipping ElementTimes layer due to dimensions of Constants and Parameters")
self.op_name = 'ElementTimes'
if (len(layer.constants) > 0):
self.scale = layer.constants[0]
elif (len(layer.parameters) > 0):
self.scale = layer.parameters[0]
super().__init__(layer)
def process(self, ellLayers):
"""Appends the ELL representation of the current layer to ellLayers."""
# Create the ell.neural.LayerParameters for the ELL layer
layerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape,
self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
# Create ELL scaling layer
if (self.scale.value.size == 1):
scalesVector = converters.get_vector_from_constant(
self.scale.value, layerParameters.outputShape.channels)
else:
scalesVector = converters.get_vector_from_cntk_array(
self.scale.value)
ellLayers.append(ell.neural.ScalingLayer(
layerParameters, scalesVector))
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
x = reshape(feature, (self.layer.ell_outputShape.channels,))
return element_times(x, self.scale)
class BasePoolingLayer(BaseLayer):
"""Common logic for converting a Pooling layer to ELL"""
def __init__(self, layer):
if layer.is_block:
self.attributes = layer.block_root.attributes
else:
self.attributes = layer.attributes
super().__init__(layer)
def get_input_padding_parameters(self):
"""Returns the ell.neural.PaddingParameters for a layer's input."""
padding = 0
if ('autoPadding' in self.attributes):
if (self.attributes['autoPadding'][0] == True):
padding = int((self.attributes['poolingWindowShape'][0] - 1) / 2)
else:
padding = self.attributes['upperPad'][0]
else:
padding = self.attributes['upperPad'][0]
return ell.neural.PaddingParameters(self.padding_scheme, padding)
def get_cntk_parameters(self):
pad = False
if ('autoPadding' in self.attributes and True in self.attributes['autoPadding']):
pad = True
poolingSize = self.attributes['poolingWindowShape']
filterShape = (poolingSize[0], poolingSize[1])
stride = self.attributes['strides'][0]
return pad, filterShape, stride
def process(self, ellLayers):
"""Appends the ELL representation of the current layer to ellLayers."""
# Create the ell.neural.LayerParameters for the ELL layer
layerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape,
self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
# Fill in the pooling parameters
poolingSize = self.attributes['poolingWindowShape'][0]
stride = self.attributes['strides'][0]
poolingParameters = ell.neural.PoolingParameters(poolingSize, stride)
# Create the ELL pooling layer
ellLayers.append(ell.neural.PoolingLayer(
layerParameters, poolingParameters, self.pooling_type))
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
raise NotImplementedError(
"Error: subclasses must override this method")
class MaxPoolingLayer(BasePoolingLayer):
"""Logic for converting a CNTK MaxPooling layer to ELL"""
def __init__(self, layer):
self.op_name = 'MaxPooling'
self.padding_scheme = ell.neural.PaddingScheme.min
self.pooling_type = ell.neural.PoolingType.max
super().__init__(layer)
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
pad, filterShape, stride = self.get_cntk_parameters()
return MaxPooling(filterShape, strides=(stride, stride), pad=pad)(feature)
class AveragePoolingLayer(BasePoolingLayer):
"""Logic for converting a CNTK AveragePooling layer to ELL"""
def __init__(self, layer):
self.op_name = 'AveragePooling'
self.padding_scheme = ell.neural.PaddingScheme.zeros
self.pooling_type = ell.neural.PoolingType.mean
super().__init__(layer)
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop"""
pad, filterShape, stride = self.get_cntk_parameters()
return AveragePooling(filterShape, strides=(stride, stride), pad=pad)(feature)
class PoolingLayer(BaseLayer):
"""Logic for converting a CNTK Pooling layer to ELL"""
def __init__(self, layer):
self.op_name = 'Pooling'
super().__init__(layer)
if (layer.attributes['poolingType'] == PoolingType_Max):
self.actual_layer = MaxPoolingLayer(layer)
else:
self.actual_layer = AveragePoolingLayer(layer)
def __repr__(self):
return self.actual_layer.__repr__()
def process(self, ellLayers):
"""Appends the ELL representation of the current layer to ellLayers."""
self.actual_layer.process(ellLayers)
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop"""
return self.actual_layer.clone_cntk_layer(feature)
class ActivationLayer(BaseLayer):
"""Logic for converting a CNTK Activation layer to ELL"""
def __init__(self, layer):
if not layer.is_block:
raise ValueError("Activation node is not a block node")
self.op_name = 'Activation'
super().__init__(layer)
internal_nodes = utilities.get_model_layers(self.layer.block_root)
self.activation_type = utilities.get_ell_activation_type(internal_nodes)
self.additional_layer_text = utilities.get_cntk_activation_name(internal_nodes)
def process(self, ellLayers):
"""Appends the ELL representation of the current layer to ellLayers."""
# Create the ell.neural.LayerParameters for the ELL layer
layerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape,
self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
# Create the ELL activation layer
ellLayers.append(ell.neural.ActivationLayer(
layerParameters, self.activation_type))
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
if self.activation_type == ell.neural.ActivationType.sigmoid:
return sigmoid(feature)
elif self.activation_type == ell.neural.ActivationType.leaky:
return leaky_relu(feature)
else:
return relu(feature)
class ReLULayer(BaseLayer):
"""Logic for converting a CNTK ReLU layer to ELL"""
def __init__(self, layer):
self.op_name = 'ReLU'
super().__init__(layer)
def process(self, ellLayers):
"""Appends the ELL representation of the current layer to ellLayers."""
# Create the ell.neural.LayerParameters for the ELL layer
layerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape,
self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
# Create the ELL activation layer
ellLayers.append(ell.neural.ActivationLayer(
layerParameters, ell.neural.ActivationType.relu))
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
return relu(feature)
class LeakyReLULayer(BaseLayer):
"""Logic for converting a CNTK LeakyReLU layer to ELL"""
def __init__(self, layer):
self.op_name = 'LeakyReLU'
super().__init__(layer)
def process(self, ellLayers):
"""Appends the ELL representation of the current layer to ellLayers."""
# Create the ell.neural.LayerParameters for the ELL layer
layerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape,
self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
# Create the ELL activation layer
ellLayers.append(ell.neural.ActivationLayer(
layerParameters, ell.neural.ActivationType.leaky))
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
return leaky_relu(feature)
class PReLULayer(BaseLayer):
"""Logic for converting a CNTK PReLU layer to ELL"""
def __init__(self, layer):
self.op_name = 'PReLU'
super().__init__(layer)
self.prelu_parameter = utilities.find_parameter_by_name(
self.layer.parameters, 'prelu', 0)
def process(self, ellLayers):
"""Appends the ELL representation of the current layer to ellLayers."""
preluTensor = converters.get_tensor_from_cntk_dense_weight_parameter(
self.prelu_parameter)
# Create the ell.neural.LayerParameters for the ELL layer
layerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape,
self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
# Create the ELL PReLU activation layer
ellLayers.append(ell.neural.PReLUActivationLayer(
layerParameters, preluTensor))
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
return param_relu(self.prelu_parameter, feature)
class SoftmaxLayer(BaseLayer):
"""Logic for converting a CNTK Softmax layer to ELL"""
def __init__(self, layer):
self.op_name = 'Softmax'
super().__init__(layer)
def process(self, ellLayers):
"""Appends the ELL representation of the current layer to ellLayers."""
if (self.layer.op_name == 'CrossEntropyWithSoftmax'):
# ugly hack for CrossEntropyWithSoftmax
# CrossEntropyWithSoftmax outputs to a Tensor[1], but we just need Softmax
layerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_inputShape,
self.layer.ell_inputPaddingParameters, ell.nodes.PortType.smallReal)
else:
layerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape,
self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
# Create the ELL softmax layer
ellLayers.append(ell.neural.SoftmaxLayer(layerParameters))
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
return softmax(feature)
class BatchNormalizationLayer(BaseLayer):
"""Logic for converting a CNTK BatchNormalization layer to ELL"""
def __init__(self, layer):
self.op_name = 'BatchNormalization'
self.scale = utilities.find_parameter_by_name(
layer.parameters, 'scale', 0)
self.bias = utilities.find_parameter_by_name(
layer.parameters, 'bias', 1)
self.mean = utilities.find_parameter_by_name(
layer.constants, 'aggregate_mean', 0)
self.variance = utilities.find_parameter_by_name(
layer.constants, 'aggregate_variance', 1)
# The default CNTK epsilon
self.epsilon = 1e-5
super().__init__(layer)
def process(self, ellLayers):
"""Appends the ELL representation of the current layer to ellLayers."""
# Note that a single CNTK Batch Normalization layer is equivalent to the following 3 ELL layers:
# - BatchNormalizationLayer
# - ScalingLayer
# - BiasLayer
#
# Therefore, make sure the output padding characteristics of the last layer reflect the next layer's
# padding requirements.
scaleVector = converters.get_vector_from_cntk_trainable_parameter(
self.scale)
biasVector = converters.get_vector_from_cntk_trainable_parameter(
self.bias)
meanVector = converters.get_vector_from_cntk_trainable_parameter(
self.mean)
varianceVector = converters.get_vector_from_cntk_trainable_parameter(
self.variance)
# Create the ell.neural.LayerParameters for the various ELL layers
firstLayerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters,
self.layer.ell_outputShapeMinusPadding, ell.neural.NoPadding(), ell.nodes.PortType.smallReal)
middleLayerParameters = ell.neural.LayerParameters(self.layer.ell_outputShapeMinusPadding,
ell.neural.NoPadding(), self.layer.ell_outputShapeMinusPadding, ell.neural.NoPadding(), ell.nodes.PortType.smallReal)
lastLayerParameters = ell.neural.LayerParameters(self.layer.ell_outputShapeMinusPadding,
ell.neural.NoPadding(), self.layer.ell_outputShape, self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
# Create the layers
ellLayers.append(ell.neural.BatchNormalizationLayer(
firstLayerParameters, meanVector, varianceVector, self.epsilon, ell.neural.EpsilonSummand.variance))
ellLayers.append(ell.neural.ScalingLayer(
middleLayerParameters, scaleVector))
ellLayers.append(ell.neural.BiasLayer(lastLayerParameters, biasVector))
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
scale = parameter(shape=self.scale.shape, init=self.scale.value, name='scale')
bias = parameter(shape=self.scale.shape, init=self.bias.value, name='bias')
run_mean = constant(shape=self.scale.shape, value=self.mean.value, name='aggregate_mean')
run_variance = constant(shape=self.scale.shape, value=self.variance.value, name='aggregate_variance')
run_count = constant(0, shape=(), name='aggregate_count')
return batch_normalization(feature, scale, bias, run_mean, run_variance, running_count=run_count, spatial=True)
class BiasLayer(BaseLayer):
"""Logic for converting a CNTK Plus layer to ELL"""
def __init__(self, layer):
if (len(layer.parameters) != 1):
raise ValueError(
"Only processing Plus functions that act as bias layers")
self.op_name = 'Plus'
super().__init__(layer)
def process(self, ellLayers):
"""Appends the ELL representation of the current layer to ellLayers."""
biasVector = converters.get_vector_from_cntk_trainable_parameter(
self.layer.parameters[0])
# Create the ell.neural.LayerParameters for the ELL layer
layerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape,
self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
# Create the ELL bias layer
ellLayers.append(ell.neural.BiasLayer(layerParameters, biasVector))
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
return plus(self.layer.parameters[0], feature)
class NegativeBiasLayer(BaseLayer):
"""Logic for converting a CNTK Minus layer to ELL"""
def __init__(self, layer):
if (len(layer.constants) != 1 and layer.constants[0].value.size != 1):
raise ValueError(
"Skipping Minus function due to dimensions of Constant")
# TODO: This logic is very fragile, we may want to have a model
# schema for labeling inputs, nodes, and outputs
if (layer.output.name != 'mean_removed_input'):
raise ValueError(
"Only processing Minus functions that remove input mean")
self.op_name = 'Minus'
super().__init__(layer)
def process(self, ellLayers):
"""Appends the ELL representation of the current layer to ellLayers."""
# Create the ell.neural.LayerParameters for the ELL layer
layerParameters = ell.neural.LayerParameters(
self.layer.ell_inputShape, self.layer.ell_inputPaddingParameters, self.layer.ell_outputShape,
self.layer.ell_outputPaddingParameters, ell.nodes.PortType.smallReal)
bias = -1.0 * self.layer.constants[0].value
if len(bias.shape) == 0:
biasVector = converters.get_vector_from_constant(bias, layerParameters.outputShape.channels)
else:
biasVector = converters.get_vector_from_cntk_array(bias)
# Create the ELL bias layer
ellLayers.append(ell.neural.BiasLayer(layerParameters, biasVector))
def clone_cntk_layer(self, feature):
"""Returns a clone of the CNTK layer for per-layer forward prop validation"""
return minus(feature, constant(self.layer.constants[0].value), name=self.layer.output.name)
class LayerFactory():
@staticmethod
def get_layer_object(cntkLayer):
try:
if (cntkLayer.op_name == 'Activation'):
return ActivationLayer(cntkLayer)
elif (cntkLayer.op_name == 'AveragePooling'):
return AveragePoolingLayer(cntkLayer)
elif (cntkLayer.op_name == 'BatchNormalization'):
return BatchNormalizationLayer(cntkLayer)
elif (cntkLayer.op_name == 'Convolution'):
if (cntkLayer.is_block):
return ConvolutionLayer(cntkLayer)
else:
return BinaryConvolutionLayer(cntkLayer)
elif (cntkLayer.op_name == 'Dense'):
return DenseLayer(cntkLayer)
elif (cntkLayer.op_name == 'ElementTimes'):
return ElementTimesLayer(cntkLayer)
elif (cntkLayer.op_name == 'LeakyReLU'):
return LeakyReLULayer(cntkLayer)
elif (cntkLayer.op_name == 'linear'): # Note: this op_name is lowercase
return LinearLayer(cntkLayer)
elif (cntkLayer.op_name == 'MaxPooling'):
return MaxPoolingLayer(cntkLayer)
elif (cntkLayer.op_name == 'Minus'):
return NegativeBiasLayer(cntkLayer)
elif (cntkLayer.op_name == 'Plus'):
return BiasLayer(cntkLayer)
elif (cntkLayer.op_name == 'Pooling'):
return PoolingLayer(cntkLayer)
elif (cntkLayer.op_name == 'PReLU'):
return PReLULayer(cntkLayer)
elif (cntkLayer.op_name == 'ReLU'):
return ReLULayer(cntkLayer)
elif (cntkLayer.op_name == 'Softmax'):
return SoftmaxLayer(cntkLayer)
else:
_logger.warning("Will not process " + cntkLayer.op_name +
"- skipping this layer as irrelevant.")
except (ValueError, AttributeError) as e:
# raised if a layer contains invalid characteristics
_logger.info("\nWill not process", cntkLayer.op_name, "-", str(e))
return None
@staticmethod
def has_inputs(cntkLayer):
return ((len(cntkLayer.arguments) > 0 and len(cntkLayer.arguments[0].shape) > 0) or
# special case for Binary Convolution
(cntkLayer.op_name == 'Convolution' and len(cntkLayer.inputs) > 0 and
len(cntkLayer.inputs[0].shape) > 0))
def get_filtered_layers_list(modelLayers, maxLayerCount=None):
"""Returns a relevant list of CNTK layers and layer objects
"""
# Go through the layers and append layer objects to the relevantLayers list
relevantLayers = []
lastSoftmaxLayer = None
for currentLayer in modelLayers:
if (isinstance(currentLayer, cntk_py.Function)):
if (LayerFactory.has_inputs(currentLayer)):
layerObject = LayerFactory.get_layer_object(currentLayer)
if (layerObject is not None):
relevantLayers.append(layerObject)
elif currentLayer.op_name == 'CrossEntropyWithSoftmax':
# ugly hack for CrossEntropyWithSoftmax
# CrossEntropyWithSoftmax pops up in the beginning of the layers list
# because the input is connected to it (it's used for evaluating training)
lastSoftmaxLayer = SoftmaxLayer(currentLayer)
else:
_logger.warning("Will not process " + currentLayer.op_name +
" - empty input shape.")
if (lastSoftmaxLayer is not None):
# Retroactively insert a softmax layer
relevantLayers.append(lastSoftmaxLayer)
if (maxLayerCount is not None):
maxLayerCount = min(maxLayerCount, len(relevantLayers))
relevantLayers = relevantLayers[0:maxLayerCount]
# Go through the layers and set the output characteristics:
# - padding parameters for output, based on the next layer's input
# - output shape, which is adjusted to include the padding
currentLayer = None
for i in range(len(relevantLayers)):
currentLayer = relevantLayers[i]
if (i < (len(relevantLayers) - 1)):
# Use the next layer's input characteristics to set the output for this layer
nextLayer = relevantLayers[i + 1]
currentLayer.set_output_characteristics(nextLayer)
else:
# This is the last layer, so the output characteristics are known
currentLayer.set_output_characteristics(None)
_logger.info(currentLayer)
return relevantLayers
def convert_cntk_layers_to_ell_layers(layersToConvert):
"""Walks a list of CNTK layers and returns a list of ELL Layer objects that is used to construct a Neural Network Predictor"""
ellLayers = []
for layerObject in layersToConvert:
layerObject.process(ellLayers)
return ellLayers
|
the-stack_0_23263 | # Copyright 2020 Google LLC. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Python source which includes pipeline functions for the Penguins dataset.
The utilities in this file are used to build a model with native Keras.
This module file will be used in the Transform, Tuner and generic Trainer
components.
"""
from typing import List, Text
import absl
import kerastuner
import tensorflow as tf
from tensorflow import keras
import tensorflow_transform as tft
from tfx.components.trainer.fn_args_utils import DataAccessor
from tfx.components.trainer.fn_args_utils import FnArgs
from tfx.components.tuner.component import TunerFnResult
from tfx_bsl.tfxio import dataset_options
_FEATURE_KEYS = [
'culmen_length_mm', 'culmen_depth_mm', 'flipper_length_mm', 'body_mass_g'
]
_LABEL_KEY = 'species'
# The Penguin dataset has 342 records, and is divided into train and eval
# splits in a 2:1 ratio.
_TRAIN_DATA_SIZE = 228
_EVAL_DATA_SIZE = 114
_TRAIN_BATCH_SIZE = 20
_EVAL_BATCH_SIZE = 10
def _transformed_name(key):
return key + '_xf'
def _get_serve_tf_examples_fn(model, tf_transform_output):
"""Returns a function that parses a serialized tf.Example."""
model.tft_layer = tf_transform_output.transform_features_layer()
@tf.function
def serve_tf_examples_fn(serialized_tf_examples):
"""Returns the output to be used in the serving signature."""
feature_spec = tf_transform_output.raw_feature_spec()
feature_spec.pop(_LABEL_KEY)
parsed_features = tf.io.parse_example(serialized_tf_examples, feature_spec)
transformed_features = model.tft_layer(parsed_features)
return model(transformed_features)
return serve_tf_examples_fn
def _input_fn(file_pattern: List[Text],
data_accessor: DataAccessor,
tf_transform_output: tft.TFTransformOutput,
batch_size: int = 200) -> tf.data.Dataset:
"""Generates features and label for tuning/training.
Args:
file_pattern: List of paths or patterns of input tfrecord files.
data_accessor: DataAccessor for converting input to RecordBatch.
tf_transform_output: A TFTransformOutput.
batch_size: representing the number of consecutive elements of returned
dataset to combine in a single batch
Returns:
A dataset that contains (features, indices) tuple where features is a
dictionary of Tensors, and indices is a single Tensor of label indices.
"""
return data_accessor.tf_dataset_factory(
file_pattern,
dataset_options.TensorFlowDatasetOptions(
batch_size=batch_size, label_key=_transformed_name(_LABEL_KEY)),
tf_transform_output.transformed_metadata.schema)
def _get_hyperparameters() -> kerastuner.HyperParameters:
"""Returns hyperparameters for building Keras model."""
hp = kerastuner.HyperParameters()
# Defines search space.
hp.Choice('learning_rate', [1e-2, 1e-3], default=1e-2)
hp.Int('num_layers', 1, 3, default=2)
return hp
def _build_keras_model(hparams: kerastuner.HyperParameters) -> tf.keras.Model:
"""Creates a DNN Keras model for classifying penguin data.
Args:
hparams: Holds HyperParameters for tuning.
Returns:
A Keras Model.
"""
# The model below is built with Functional API, please refer to
# https://www.tensorflow.org/guide/keras/overview for all API options.
inputs = [
keras.layers.Input(shape=(1,), name=_transformed_name(f))
for f in _FEATURE_KEYS
]
d = keras.layers.concatenate(inputs)
for _ in range(int(hparams.get('num_layers'))):
d = keras.layers.Dense(8, activation='relu')(d)
outputs = keras.layers.Dense(3, activation='softmax')(d)
model = keras.Model(inputs=inputs, outputs=outputs)
model.compile(
optimizer=keras.optimizers.Adam(hparams.get('learning_rate')),
loss='sparse_categorical_crossentropy',
metrics=[keras.metrics.SparseCategoricalAccuracy()])
model.summary(print_fn=absl.logging.info)
return model
# TFX Transform will call this function.
def preprocessing_fn(inputs):
"""tf.transform's callback function for preprocessing inputs.
Args:
inputs: map from feature keys to raw not-yet-transformed features.
Returns:
Map from string feature key to transformed feature operations.
"""
outputs = {}
for key in _FEATURE_KEYS:
# Nothing to transform for the penguin dataset. This code is just to
# show how the preprocessing function for Transform should be defined.
# We just assign original values to the transformed feature.
outputs[_transformed_name(key)] = inputs[key]
# TODO(b/157064428): Support label transformation for Keras.
# Do not apply label transformation as it will result in wrong evaluation.
outputs[_transformed_name(_LABEL_KEY)] = inputs[_LABEL_KEY]
return outputs
# TFX Tuner will call this function.
def tuner_fn(fn_args: FnArgs) -> TunerFnResult:
"""Build the tuner using the KerasTuner API.
Args:
fn_args: Holds args as name/value pairs.
- working_dir: working dir for tuning.
- train_files: List of file paths containing training tf.Example data.
- eval_files: List of file paths containing eval tf.Example data.
- train_steps: number of train steps.
- eval_steps: number of eval steps.
- schema_path: optional schema of the input data.
- transform_graph_path: optional transform graph produced by TFT.
Returns:
A namedtuple contains the following:
- tuner: A BaseTuner that will be used for tuning.
- fit_kwargs: Args to pass to tuner's run_trial function for fitting the
model , e.g., the training and validation dataset. Required
args depend on the above tuner's implementation.
"""
# RandomSearch is a subclass of kerastuner.Tuner which inherits from
# BaseTuner.
tuner = kerastuner.RandomSearch(
_build_keras_model,
max_trials=6,
hyperparameters=_get_hyperparameters(),
allow_new_entries=False,
objective=kerastuner.Objective('val_sparse_categorical_accuracy', 'max'),
directory=fn_args.working_dir,
project_name='penguin_tuning')
transform_graph = tft.TFTransformOutput(fn_args.transform_graph_path)
train_dataset = _input_fn(
fn_args.train_files,
fn_args.data_accessor,
transform_graph,
batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(
fn_args.eval_files,
fn_args.data_accessor,
transform_graph,
batch_size=_EVAL_BATCH_SIZE)
return TunerFnResult(
tuner=tuner,
fit_kwargs={
'x': train_dataset,
'validation_data': eval_dataset,
'steps_per_epoch': fn_args.train_steps,
'validation_steps': fn_args.eval_steps
})
# TFX Trainer will call this function.
def run_fn(fn_args: FnArgs):
"""Train the model based on given args.
Args:
fn_args: Holds args used to train the model as name/value pairs.
"""
tf_transform_output = tft.TFTransformOutput(fn_args.transform_output)
train_dataset = _input_fn(
fn_args.train_files,
fn_args.data_accessor,
tf_transform_output,
batch_size=_TRAIN_BATCH_SIZE)
eval_dataset = _input_fn(
fn_args.eval_files,
fn_args.data_accessor,
tf_transform_output,
batch_size=_EVAL_BATCH_SIZE)
if fn_args.hyperparameters:
hparams = kerastuner.HyperParameters.from_config(fn_args.hyperparameters)
else:
# This is a shown case when hyperparameters is decided and Tuner is removed
# from the pipeline. User can also inline the hyperparameters directly in
# _build_keras_model.
hparams = _get_hyperparameters()
absl.logging.info('HyperParameters for training: %s' % hparams.get_config())
mirrored_strategy = tf.distribute.MirroredStrategy()
with mirrored_strategy.scope():
model = _build_keras_model(hparams)
steps_per_epoch = _TRAIN_DATA_SIZE // _TRAIN_BATCH_SIZE
# Write logs to path
tensorboard_callback = tf.keras.callbacks.TensorBoard(
log_dir=fn_args.model_run_dir, update_freq='batch')
model.fit(
train_dataset,
epochs=fn_args.train_steps // steps_per_epoch,
steps_per_epoch=steps_per_epoch,
validation_data=eval_dataset,
validation_steps=fn_args.eval_steps,
callbacks=[tensorboard_callback])
signatures = {
'serving_default':
_get_serve_tf_examples_fn(model,
tf_transform_output).get_concrete_function(
tf.TensorSpec(
shape=[None],
dtype=tf.string,
name='examples')),
}
model.save(fn_args.serving_model_dir, save_format='tf', signatures=signatures)
|
the-stack_0_23264 | # Copyright (c) 2017-present, Facebook, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
##############################################################################
"""Helper functions for working with Caffe2 networks (i.e., operator graphs)."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
from collections import OrderedDict
import logging
import numpy as np
import os
import pprint
from caffe2.python import core
from caffe2.python import workspace
from detectron.core.config import cfg
from detectron.core.config import load_cfg
from detectron.utils.io import load_object
from detectron.utils.io import save_object
import detectron.utils.c2 as c2_utils
import detectron.utils.env as envu
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
def initialize_from_weights_file(model, weights_file, broadcast=True):
"""Initialize a model from weights stored in a pickled dictionary. If
multiple GPUs are used, the loaded weights are synchronized on all GPUs,
unless 'broadcast' is False.
"""
initialize_gpu_from_weights_file(model, weights_file, gpu_id=0)
if broadcast:
broadcast_parameters(model)
def initialize_gpu_from_weights_file(model, weights_file, gpu_id=0):
"""Initialize a network with ops on a specific GPU.
If you use CUDA_VISIBLE_DEVICES to target specific GPUs, Caffe2 will
automatically map logical GPU ids (starting from 0) to the physical GPUs
specified in CUDA_VISIBLE_DEVICES.
"""
logger.info('Loading weights from: {}'.format(weights_file))
ws_blobs = workspace.Blobs()
src_blobs = load_object(weights_file)
if 'cfg' in src_blobs:
saved_cfg = load_cfg(src_blobs['cfg'])
configure_bbox_reg_weights(model, saved_cfg)
if 'blobs' in src_blobs:
# Backwards compat--dictionary used to be only blobs, now they are
# stored under the 'blobs' key
src_blobs = src_blobs['blobs']
# Initialize weights on GPU gpu_id only
unscoped_param_names = OrderedDict() # Print these out in model order
for blob in model.params:
unscoped_param_names[c2_utils.UnscopeName(str(blob))] = True
with c2_utils.NamedCudaScope(gpu_id):
for unscoped_param_name in unscoped_param_names.keys():
if (unscoped_param_name.find(']_') >= 0 and
unscoped_param_name not in src_blobs):
# Special case for sharing initialization from a pretrained
# model:
# If a blob named '_[xyz]_foo' is in model.params and not in
# the initialization blob dictionary, then load source blob
# 'foo' into destination blob '_[xyz]_foo'
src_name = unscoped_param_name[
unscoped_param_name.find(']_') + 2:]
else:
src_name = unscoped_param_name
if src_name not in src_blobs:
logger.info('{:s} not found'.format(src_name))
continue
dst_name = core.ScopedName(unscoped_param_name)
has_momentum = src_name + '_momentum' in src_blobs
has_momentum_str = ' [+ momentum]' if has_momentum else ''
logger.info(
'{:s}{:} loaded from weights file into {:s}: {}'.format(
src_name, has_momentum_str, dst_name, src_blobs[src_name]
.shape
)
)
if dst_name in ws_blobs:
# If the blob is already in the workspace, make sure that it
# matches the shape of the loaded blob
ws_blob = workspace.FetchBlob(dst_name)
assert ws_blob.shape == src_blobs[src_name].shape, \
('Workspace blob {} with shape {} does not match '
'weights file shape {}').format(
src_name,
ws_blob.shape,
src_blobs[src_name].shape)
workspace.FeedBlob(
dst_name,
src_blobs[src_name].astype(np.float32, copy=False))
if has_momentum:
workspace.FeedBlob(
dst_name + '_momentum',
src_blobs[src_name + '_momentum'].astype(
np.float32, copy=False))
# We preserve blobs that are in the weights file but not used by the current
# model. We load these into CPU memory under the '__preserve__/' namescope.
# These blobs will be stored when saving a model to a weights file. This
# feature allows for alternating optimization of Faster R-CNN in which blobs
# unused by one step can still be preserved forward and used to initialize
# another step.
for src_name in src_blobs.keys():
if (src_name not in unscoped_param_names and
not src_name.endswith('_momentum') and
src_blobs[src_name] is not None):
with c2_utils.CpuScope():
workspace.FeedBlob(
'__preserve__/{:s}'.format(src_name), src_blobs[src_name])
logger.info(
'{:s} preserved in workspace (unused)'.format(src_name))
def save_model_to_weights_file(weights_file, model):
"""Stash model weights in a dictionary and pickle them to a file. We map
GPU device scoped names to unscoped names (e.g., 'gpu_0/conv1_w' ->
'conv1_w').
"""
logger.info(
'Saving parameters and momentum to {}'.format(
os.path.abspath(weights_file)))
blobs = {}
# Save all parameters
for param in model.params:
scoped_name = str(param)
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
# Save momentum
for param in model.TrainableParams():
scoped_name = str(param) + '_momentum'
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(' {:s} -> {:s}'.format(scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
# Save preserved blobs
for scoped_name in workspace.Blobs():
if scoped_name.startswith('__preserve__/'):
unscoped_name = c2_utils.UnscopeName(scoped_name)
if unscoped_name not in blobs:
logger.debug(
' {:s} -> {:s} (preserved)'.format(
scoped_name, unscoped_name))
blobs[unscoped_name] = workspace.FetchBlob(scoped_name)
cfg_yaml = envu.yaml_dump(cfg)
save_object(dict(blobs=blobs, cfg=cfg_yaml), weights_file)
def broadcast_parameters(model):
"""Copy parameter blobs from GPU 0 over the corresponding parameter blobs
on GPUs 1 through cfg.NUM_GPUS - 1.
"""
if cfg.NUM_GPUS == 1:
# no-op if only running on a single GPU
return
def _do_broadcast(all_blobs):
assert len(all_blobs) % cfg.NUM_GPUS == 0, \
('Unexpected value for NUM_GPUS. Make sure you are not '
'running single-GPU inference with NUM_GPUS > 1.')
blobs_per_gpu = int(len(all_blobs) / cfg.NUM_GPUS)
for i in range(blobs_per_gpu):
blobs = [p for p in all_blobs[i::blobs_per_gpu]]
data = workspace.FetchBlob(blobs[0])
logger.debug('Broadcasting {} to'.format(str(blobs[0])))
for i, p in enumerate(blobs[1:]):
logger.debug(' |-> {}'.format(str(p)))
with c2_utils.CudaScope(i + 1):
workspace.FeedBlob(p, data)
_do_broadcast(model.params)
_do_broadcast([b + '_momentum' for b in model.TrainableParams()])
def sum_multi_gpu_blob(blob_name):
"""Return the sum of a scalar blob held on multiple GPUs."""
val = 0
for i in range(cfg.NUM_GPUS):
val += float(workspace.FetchBlob('gpu_{}/{}'.format(i, blob_name)))
return val
def average_multi_gpu_blob(blob_name):
"""Return the average of a scalar blob held on multiple GPUs."""
return sum_multi_gpu_blob(blob_name) / cfg.NUM_GPUS
def print_net(model, namescope='gpu_0'):
"""Print the model network."""
logger.info('Printing model: {}'.format(model.net.Name()))
op_list = model.net.Proto().op
for op in op_list:
input_name = op.input
# For simplicity: only print the first output
# Not recommended if there are split layers
try:
output_name = str(op.output[0])
except BaseException:
output_name = '<nothing>'
op_type = op.type
op_name = op.name
if namescope is None or output_name.startswith(namescope):
# Only print the forward pass network
if output_name.find('grad') >= 0 or output_name.find('__m') >= 0:
continue
try:
# Under some conditions (e.g., dynamic memory optimization)
# it is possible that the network frees some blobs when they are
# no longer needed. Handle this case...
output_shape = workspace.FetchBlob(output_name).shape
except BaseException:
output_shape = '<unknown>'
first_blob = True
op_label = op_type + (op_name if op_name == '' else ':' + op_name)
suffix = ' ------- (op: {})'.format(op_label)
for j in range(len(input_name)):
if input_name[j] in model.params:
continue
input_blob = workspace.FetchBlob(input_name[j])
if isinstance(input_blob, np.ndarray):
input_shape = input_blob.shape
logger.info('{:28s}: {:20s} => {:28s}: {:20s}{}'.format(
c2_utils.UnscopeName(str(input_name[j])),
'{}'.format(input_shape),
c2_utils.UnscopeName(str(output_name)),
'{}'.format(output_shape),
suffix))
if first_blob:
first_blob = False
suffix = ' ------|'
logger.info('End of model: {}'.format(model.net.Name()))
def configure_bbox_reg_weights(model, saved_cfg):
"""Compatibility for old models trained with bounding box regression
mean/std normalization (instead of fixed weights).
"""
if 'MODEL' not in saved_cfg or 'BBOX_REG_WEIGHTS' not in saved_cfg.MODEL:
logger.warning('Model from weights file was trained before config key '
'MODEL.BBOX_REG_WEIGHTS was added. Forcing '
'MODEL.BBOX_REG_WEIGHTS = (1., 1., 1., 1.) to ensure '
'correct **inference** behavior.')
# Generally we don't allow modifying the config, but this is a one-off
# hack to support some very old models
is_immutable = cfg.is_immutable()
cfg.immutable(False)
cfg.MODEL.BBOX_REG_WEIGHTS = (1., 1., 1., 1.)
cfg.immutable(is_immutable)
logger.info('New config:')
logger.info(pprint.pformat(cfg))
assert not model.train, (
'This model was trained with an older version of the code that '
'used bounding box regression mean/std normalization. It can no '
'longer be used for training. To upgrade it to a trainable model '
'please use fb/compat/convert_bbox_reg_normalized_model.py.'
)
def get_group_gn(dim):
"""
get number of groups used by GroupNorm, based on number of channels
"""
dim_per_gp = cfg.GROUP_NORM.DIM_PER_GP
num_groups = cfg.GROUP_NORM.NUM_GROUPS
assert dim_per_gp == -1 or num_groups == -1, \
"GroupNorm: can only specify G or C/G."
if dim_per_gp > 0:
assert dim % dim_per_gp == 0
group_gn = dim // dim_per_gp
else:
assert dim % num_groups == 0
group_gn = num_groups
return group_gn
|
the-stack_0_23265 | #!/usr/bin/env python3
# Copyright (c) 2017 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test mempool acceptance of raw transactions."""
from io import BytesIO
from test_framework.test_framework import BitcoinTestFramework
from test_framework.messages import (
BIP125_SEQUENCE_NUMBER,
COIN,
COutPoint,
CTransaction,
CTxOut,
MAX_BLOCK_BASE_SIZE,
)
from test_framework.script import (
hash160,
CScript,
OP_0,
OP_EQUAL,
OP_HASH160,
OP_RETURN,
)
from test_framework.util import (
assert_equal,
assert_raises_rpc_error,
bytes_to_hex_str,
hex_str_to_bytes,
wait_until,
)
class MempoolAcceptanceTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.extra_args = [[
'-txindex',
'-reindex', # Need reindex for txindex
'-acceptnonstdtxn=0', # Try to mimic main-net
]] * self.num_nodes
def skip_test_if_missing_module(self):
self.skip_if_no_wallet()
def check_mempool_result(self, result_expected, *args, **kwargs):
"""Wrapper to check result of testmempoolaccept on node_0's mempool"""
result_test = self.nodes[0].testmempoolaccept(*args, **kwargs)
assert_equal(result_expected, result_test)
assert_equal(self.nodes[0].getmempoolinfo()['size'], self.mempool_size) # Must not change mempool state
def run_test(self):
node = self.nodes[0]
self.log.info('Start with empty mempool, and 200 blocks')
self.mempool_size = 0
wait_until(lambda: node.getblockcount() == 200)
assert_equal(node.getmempoolinfo()['size'], self.mempool_size)
self.log.info('Should not accept garbage to testmempoolaccept')
assert_raises_rpc_error(-3, 'Expected type array, got string', lambda: node.testmempoolaccept(rawtxs='ff00baar'))
assert_raises_rpc_error(-8, 'Array must contain exactly one raw transaction for now', lambda: node.testmempoolaccept(rawtxs=['ff00baar', 'ff22']))
assert_raises_rpc_error(-22, 'TX decode failed', lambda: node.testmempoolaccept(rawtxs=['ff00baar']))
self.log.info('A transaction already in the blockchain')
coin = node.listunspent()[0] # Pick a random coin(base) to spend
raw_tx_in_block = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': coin['txid'], 'vout': coin['vout']}],
outputs=[{node.getnewaddress(): 0.3}, {node.getnewaddress(): 49}],
))['hex']
txid_in_block = node.sendrawtransaction(hexstring=raw_tx_in_block, allowhighfees=True)
node.generate(1)
self.mempool_size = 0
self.check_mempool_result(
result_expected=[{'txid': txid_in_block, 'allowed': False, 'reject-reason': '18: txn-already-known'}],
rawtxs=[raw_tx_in_block],
)
self.log.info('A transaction not in the mempool')
fee = 0.00000700
raw_tx_0 = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{"txid": txid_in_block, "vout": 0, "sequence": BIP125_SEQUENCE_NUMBER}], # RBF is used later
outputs=[{node.getnewaddress(): 0.3 - fee}],
))['hex']
tx = CTransaction()
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction in the mempool')
node.sendrawtransaction(hexstring=raw_tx_0)
self.mempool_size = 1
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': '18: txn-already-in-mempool'}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that replaces a mempool transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vout[0].nValue -= int(fee * COIN) # Double the fee
tx.vin[0].nSequence = BIP125_SEQUENCE_NUMBER + 1 # Now, opt out of RBF
raw_tx_0 = node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
txid_0 = tx.rehash()
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': True}],
rawtxs=[raw_tx_0],
)
self.log.info('A transaction that conflicts with an unconfirmed tx')
# Send the transaction that replaces the mempool transaction and opts out of replaceability
node.sendrawtransaction(hexstring=bytes_to_hex_str(tx.serialize()), allowhighfees=True)
# take original raw_tx_0
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vout[0].nValue -= int(4 * fee * COIN) # Set more fee
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '18: txn-mempool-conflict'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
allowhighfees=True,
)
self.log.info('A transaction with missing inputs, that never existed')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vin[0].prevout = COutPoint(hash=int('ff' * 32, 16), n=14)
# skip re-signing the tx
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with missing inputs, that existed once in the past')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_0)))
tx.vin[0].prevout.n = 1 # Set vout to 1, to spend the other outpoint (49 coins) of the in-chain-tx we want to double spend
raw_tx_1 = node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex']
txid_1 = node.sendrawtransaction(hexstring=raw_tx_1, allowhighfees=True)
# Now spend both to "clearly hide" the outputs, ie. remove the coins from the utxo set by spending them
raw_tx_spend_both = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[
{'txid': txid_0, 'vout': 0},
{'txid': txid_1, 'vout': 0},
],
outputs=[{node.getnewaddress(): 0.1}]
))['hex']
txid_spend_both = node.sendrawtransaction(hexstring=raw_tx_spend_both, allowhighfees=True)
node.generate(1)
self.mempool_size = 0
# Now see if we can add the coins back to the utxo set by sending the exact txs again
self.check_mempool_result(
result_expected=[{'txid': txid_0, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_0],
)
self.check_mempool_result(
result_expected=[{'txid': txid_1, 'allowed': False, 'reject-reason': 'missing-inputs'}],
rawtxs=[raw_tx_1],
)
self.log.info('Create a signed "reference" tx for later use')
raw_tx_reference = node.signrawtransactionwithwallet(node.createrawtransaction(
inputs=[{'txid': txid_spend_both, 'vout': 0}],
outputs=[{node.getnewaddress(): 0.05}],
))['hex']
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
# Reference tx should be valid on itself
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': True}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with no outputs')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = []
# Skip re-signing the transaction for context independent checks from now on
# tx.deserialize(BytesIO(hex_str_to_bytes(node.signrawtransactionwithwallet(bytes_to_hex_str(tx.serialize()))['hex'])))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-empty'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A really large transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin = [tx.vin[0]] * (MAX_BLOCK_BASE_SIZE // len(tx.vin[0].serialize()))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-oversize'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with negative output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue *= -1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-negative'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with too large output value')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].nValue = 21000000 * COIN + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-vout-toolarge'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with too large sum of output values')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout = [tx.vout[0]] * 2
tx.vout[0].nValue = 21000000 * COIN
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-txouttotal-toolarge'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction with duplicate inputs')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin = [tx.vin[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: bad-txns-inputs-duplicate'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A coinbase transaction')
# Pick the input of the first tx we signed, so it has to be a coinbase tx
raw_tx_coinbase_spent = node.getrawtransaction(txid=node.decoderawtransaction(hexstring=raw_tx_in_block)['vin'][0]['txid'])
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_coinbase_spent)))
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '16: coinbase'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('Some nonstandard transactions')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.nVersion = 3 # A version currently non-standard
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: version'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].scriptPubKey = CScript([OP_0]) # Some non-standard script
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: scriptpubkey'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].scriptSig = CScript([OP_HASH160]) # Some not-pushonly scriptSig
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: scriptsig-not-pushonly'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
output_p2sh_burn = CTxOut(nValue=540, scriptPubKey=CScript([OP_HASH160, hash160(b'burn'), OP_EQUAL]))
num_scripts = 100000 // len(output_p2sh_burn.serialize()) # Use enough outputs to make the tx too large for our policy
tx.vout = [output_p2sh_burn] * num_scripts
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: tx-size'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0] = output_p2sh_burn
tx.vout[0].nValue -= 1 # Make output smaller, such that it is dust for our policy
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: dust'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vout[0].scriptPubKey = CScript([OP_RETURN, b'\xff'])
tx.vout = [tx.vout[0]] * 2
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: multi-op-return'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A timelocked transaction')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].nSequence -= 1 # Should be non-max, so locktime is not ignored
tx.nLockTime = node.getblockcount() + 1
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: non-final'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
)
self.log.info('A transaction that is locked by BIP68 sequence logic')
tx.deserialize(BytesIO(hex_str_to_bytes(raw_tx_reference)))
tx.vin[0].nSequence = 2 # We could include it in the second block mined from now, but not the very next one
# Can skip re-signing the tx because of early rejection
self.check_mempool_result(
result_expected=[{'txid': tx.rehash(), 'allowed': False, 'reject-reason': '64: non-BIP68-final'}],
rawtxs=[bytes_to_hex_str(tx.serialize())],
allowhighfees=True,
)
if __name__ == '__main__':
MempoolAcceptanceTest().main()
|
the-stack_0_23266 | import operator
from future.utils import iteritems
from xadmin import widgets
from xadmin.plugins.utils import get_context_dict
from django.contrib.admin.utils import get_fields_from_path, lookup_needs_distinct
from django.core.exceptions import SuspiciousOperation, ImproperlyConfigured, ValidationError
from django.db import models
from django.db.models.fields import FieldDoesNotExist
from django.db.models.constants import LOOKUP_SEP
# from django.db.models.sql.constants import QUERY_TERMS
from django.template import loader
from django.utils.encoding import smart_str
from django.utils.translation import ugettext as _
from xadmin.filters import manager as filter_manager, FILTER_PREFIX, SEARCH_VAR, DateFieldListFilter, \
RelatedFieldSearchFilter
from xadmin.sites import site
from xadmin.views import BaseAdminPlugin, ListAdminView
from xadmin.util import is_related_field
from functools import reduce
class IncorrectLookupParameters(Exception):
pass
class FilterPlugin(BaseAdminPlugin):
list_filter = ()
search_fields = ()
free_query_filter = True
def lookup_allowed(self, lookup, value):
model = self.model
# Check FKey lookups that are allowed, so that popups produced by
# ForeignKeyRawIdWidget, on the basis of ForeignKey.limit_choices_to,
# are allowed to work.
for l in model._meta.related_fkey_lookups:
for k, v in widgets.url_params_from_lookup_dict(l).items():
if k == lookup and v == value:
return True
parts = lookup.split(LOOKUP_SEP)
# Last term in lookup is a query term (__exact, __startswith etc)
# This term can be ignored.
# if len(parts) > 1 and parts[-1] in QUERY_TERMS:
# parts.pop()
# Special case -- foo__id__exact and foo__id queries are implied
# if foo has been specificially included in the lookup list; so
# drop __id if it is the last part. However, first we need to find
# the pk attribute name.
rel_name = None
for part in parts[:-1]:
try:
field = model._meta.get_field(part)
except FieldDoesNotExist:
# Lookups on non-existants fields are ok, since they're ignored
# later.
return True
if hasattr(field, 'remote_field'):
model = field.remote_field.to
rel_name = field.remote_field.get_related_field().name
elif is_related_field(field):
model = field.model
rel_name = model._meta.pk.name
else:
rel_name = None
if rel_name and len(parts) > 1 and parts[-1] == rel_name:
parts.pop()
if len(parts) == 1:
return True
clean_lookup = LOOKUP_SEP.join(parts)
return clean_lookup in self.list_filter
def get_list_queryset(self, queryset):
lookup_params = dict([(smart_str(k)[len(FILTER_PREFIX):], v) for k, v in self.admin_view.params.items()
if smart_str(k).startswith(FILTER_PREFIX) and v != ''])
for p_key, p_val in iteritems(lookup_params):
if p_val == "False":
lookup_params[p_key] = False
use_distinct = False
# for clean filters
self.admin_view.has_query_param = bool(lookup_params)
self.admin_view.clean_query_url = self.admin_view.get_query_string(remove=[k for k in self.request.GET.keys() if
k.startswith(FILTER_PREFIX)])
# Normalize the types of keys
if not self.free_query_filter:
for key, value in lookup_params.items():
if not self.lookup_allowed(key, value):
raise SuspiciousOperation(
"Filtering by %s not allowed" % key)
self.filter_specs = []
if self.list_filter:
for list_filter in self.list_filter:
if callable(list_filter):
# This is simply a custom list filter class.
spec = list_filter(self.request, lookup_params,
self.model, self)
else:
field_path = None
field_parts = []
if isinstance(list_filter, (tuple, list)):
# This is a custom FieldListFilter class for a given field.
field, field_list_filter_class = list_filter
else:
# This is simply a field name, so use the default
# FieldListFilter class that has been registered for
# the type of the given field.
field, field_list_filter_class = list_filter, filter_manager.create
if not isinstance(field, models.Field):
field_path = field
field_parts = get_fields_from_path(
self.model, field_path)
field = field_parts[-1]
spec = field_list_filter_class(
field, self.request, lookup_params,
self.model, self.admin_view, field_path=field_path)
if len(field_parts) > 1:
# Add related model name to title
spec.title = "%s %s" % (field_parts[-2].name, spec.title)
# Check if we need to use distinct()
use_distinct = (use_distinct or
lookup_needs_distinct(self.opts, field_path))
if spec and spec.has_output():
try:
new_qs = spec.do_filte(queryset)
except ValidationError as e:
new_qs = None
self.admin_view.message_user(_("<b>Filtering error:</b> %s") % e.messages[0], 'error')
if new_qs is not None:
queryset = new_qs
self.filter_specs.append(spec)
self.has_filters = bool(self.filter_specs)
self.admin_view.filter_specs = self.filter_specs
obj = filter(lambda f: f.is_used, self.filter_specs)
obj = list(obj)
self.admin_view.used_filter_num = len(obj)
try:
for key, value in lookup_params.items():
use_distinct = (
use_distinct or lookup_needs_distinct(self.opts, key))
except FieldDoesNotExist as e:
raise IncorrectLookupParameters(e)
try:
# fix a bug by david: In demo, quick filter by IDC Name() cannot be used.
if isinstance(queryset, models.query.QuerySet) and lookup_params:
new_lookup_parames = dict()
for k, v in lookup_params.items():
list_v = v.split(',')
if len(list_v) > 0:
new_lookup_parames.update({k: list_v})
else:
new_lookup_parames.update({k: v})
queryset = queryset.filter(**new_lookup_parames)
except (SuspiciousOperation, ImproperlyConfigured):
raise
except Exception as e:
raise IncorrectLookupParameters(e)
else:
if not isinstance(queryset, models.query.QuerySet):
pass
query = self.request.GET.get(SEARCH_VAR, '')
# Apply keyword searches.
def construct_search(field_name):
if field_name.startswith('^'):
return "%s__istartswith" % field_name[1:]
elif field_name.startswith('='):
return "%s__iexact" % field_name[1:]
elif field_name.startswith('@'):
return "%s__search" % field_name[1:]
else:
return "%s__icontains" % field_name
if self.search_fields and query:
orm_lookups = [construct_search(str(search_field))
for search_field in self.search_fields]
for bit in query.split():
or_queries = [models.Q(**{orm_lookup: bit})
for orm_lookup in orm_lookups]
queryset = queryset.filter(reduce(operator.or_, or_queries))
if not use_distinct:
for search_spec in orm_lookups:
if lookup_needs_distinct(self.opts, search_spec):
use_distinct = True
break
self.admin_view.search_query = query
if use_distinct:
return queryset.distinct()
else:
return queryset
# Media
def get_media(self, media):
arr = filter(lambda s: isinstance(s, DateFieldListFilter), self.filter_specs)
arr = list(arr)
if bool(arr):
media = media + self.vendor('datepicker.css', 'datepicker.js',
'xadmin.widget.datetime.js')
arr = filter(lambda s: isinstance(s, RelatedFieldSearchFilter), self.filter_specs)
arr = list(arr)
if bool(arr):
media = media + self.vendor(
'select.js', 'select.css', 'xadmin.widget.select.js')
return media + self.vendor('xadmin.plugin.filters.js')
# Block Views
def block_nav_menu(self, context, nodes):
if self.has_filters:
nodes.append(loader.render_to_string('xadmin/blocks/model_list.nav_menu.filters.html',
context=get_context_dict(context)))
def block_nav_form(self, context, nodes):
if self.search_fields:
context = get_context_dict(context or {}) # no error!
context.update({
'search_var': SEARCH_VAR,
'remove_search_url': self.admin_view.get_query_string(remove=[SEARCH_VAR]),
'search_form_params': self.admin_view.get_form_params(remove=[SEARCH_VAR])
})
nodes.append(
loader.render_to_string(
'xadmin/blocks/model_list.nav_form.search_form.html',
context=context)
)
site.register_plugin(FilterPlugin, ListAdminView)
|
the-stack_0_23267 | # Import python modules
import sys
import os
import traceback
# Append
sys.path.append(os.path.dirname(os.path.realpath(__file__)))
# Import core modules
from core import command_handler
from core.module_manager import ModuleManager
from core.apistatus import *
api.enabled = True
# Exceptions
import core.exceptions
class arissploitapi:
mm = None
ch = None
allowPrint = False
ModuleError = False
def disablePrint(self):
if self.allowPrint == False:
f = open(os.devnull, 'w')
sys.stdout = f
def enablePrint(self):
if self.allowPrint == False:
sys.stdout = sys.__stdout__
def __init__(self, allowPrint):
self.allowPrint = allowPrint
self.mm = ModuleManager
self.ch = command_handler.Commandhandler(self.mm, True)
def loadModule(self, module):
self.disablePrint()
try:
self.ch.handle("use "+module)
modadd = sys.modules["modules."+module]
if modadd.conf['apisupport'] == False:
raise ApiNotSupported("This module doesn't support API!")
except core.exceptions.ModuleNotFound:
self.enablePrint()
raise ModuleNotFound("Error: module not found!")
except:
self.enablePrint()
raise
self.enablePrint()
def unloadModule(self):
self.disablePrint()
try:
self.ch.handle("back")
except:
self.enablePrint()
raise
self.enablePrint()
def setVariable(self, target, value):
self.disablePrint()
try:
self.ch.handle("set "+target+" "+value)
except core.exceptions.VariableError:
self.enablePrint()
raise VariableError("Error: variable not found!")
except:
self.enablePrint()
raise
self.enablePrint()
def runModule(self):
self.ModuleError = False
self.disablePrint()
try:
answer = self.ch.handle("run")
except:
self.enablePrint()
raise
self.enablePrint()
if type(answer) is core.exceptions.ModuleError:
self.ModuleError = True
return answer
def customCommand(self, command):
self.disablePrint()
try:
answer = self.ch.handle(command)
except:
self.enablePrint()
raise
self.enablePrint()
return answer
def runCommand(self, command):
self.disablePrint()
try:
self.ch.handle(command)
except:
self.enablePrint()
raise
self.enablePrint()
class ModuleNotFound(Exception):
pass
class VariableError(Exception):
pass
class ApiNotSupported(Exception):
pass
|
the-stack_0_23268 | from pathflowai import utils
from numpy.testing import assert_array_equal, assert_allclose
def test_svs2dask_array():
from .utils import download_svs
from PIL import Image
from numpy import array as to_npa
# from os import remove
id = "2e4f6316-588b-4629-adf0-7aeac358a0e2"
file = "TCGA-MR-A520-01Z-00-DX1.2F323BAC-56C9-4A0C-9C1B-2B4F776056B4.svs"
download_location = download_svs(id, file)
Image.MAX_IMAGE_PIXELS = None # SECURITY RISK!
ground_truth = to_npa(Image.open(download_location))
test = utils.svs2dask_array(download_location).compute()
crop_height, crop_width, _ = test.shape
# remove(download_location)
assert_array_equal(ground_truth[:crop_height, :crop_width, :], test)
def test_preprocessing_pipeline():
from .utils import get_tests_dir
from os.path import join, exists
tests_dir = get_tests_dir()
basename = "TCGA-18-5592-01Z-00-DX1"
input_dir = join(tests_dir, "inputs")
out_zarr = join(tests_dir, "output_zarr.zarr")
out_pkl = join(tests_dir, "output.pkl")
def capture(command):
from subprocess import Popen, PIPE
proc = Popen(command, stdout=PIPE, stderr=PIPE)
out, err = proc.communicate()
return out, err, proc.returncode
def test_segmentation():
npy_file = join(input_dir, basename + ".npy")
npy_mask = join(input_dir, basename + "_mask.npy")
# convert TCGA annotations (XML) to a
# binary mask (npy) with the following:
#
# import numpy as np
# import viewmask
# import xml.etree.ElementTree as ET
# np.save(
# './tests/inputs/TCGA-18-5592-01Z-00-DX1_mask.npy',
# viewmask.utils.xml_to_image(
# ET.parse('./tests/inputs/TCGA-18-5592-01Z-00-DX1.xml')
# )
# )
#
#
# convert TCGA input (PNG) to a
# numpy array (npy) with the following:
#
# import numpy as np
# from PIL import Image
# np.save(
# './tests/inputs/TCGA-18-5592-01Z-00-DX1.npy',
# np.array(
# Image.open('./tests/inputs/TCGA-18-5592-01Z-00-DX1.png')
# )
# )
utils.run_preprocessing_pipeline(
npy_file, npy_mask=npy_mask, out_zarr=out_zarr, out_pkl=out_pkl
)
assert exists(out_zarr)
assert exists(out_pkl)
from numpy import load as npy_to_npa
from zarr import open as open_zarr
from dask.array import from_zarr as zarr_to_da
img = zarr_to_da(open_zarr(out_zarr)).compute()
assert_array_equal(img, npy_to_npa(npy_file))
odb = join(tests_dir, "patch_information.db")
command = [
"pathflowai-preprocess",
"preprocess-pipeline",
"-odb", odb,
"--preprocess",
"--patches",
"--basename", basename,
"--input_dir", input_dir,
"--patch_size", "256",
"--intensity_threshold", "45.",
"-tc", "7",
"-t", "0.05"
]
out, err, exitcode = capture(command)
assert exists(out_zarr)
assert exists(out_pkl)
assert exists(odb)
assert exitcode == 0
from sqlite3 import connect as sql_connect
connection = sql_connect(odb)
cursor = connection.execute('SELECT * FROM "256";')
names = [description[0] for description in cursor.description]
cursor.close()
true_headers = [
"index",
"ID",
"x",
"y",
"patch_size",
"annotation",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
]
assert names == true_headers
def test_classification():
png_file = join(input_dir, basename + ".png")
xml_file = join(input_dir, basename + ".xml")
utils.run_preprocessing_pipeline(
png_file, xml_file=xml_file,
out_zarr=out_zarr, out_pkl=out_pkl
)
assert exists(out_zarr)
assert exists(out_pkl)
from PIL.Image import open as png_to_pil
from numpy import array as pil_to_npa
from zarr import open as open_zarr
from dask.array import from_zarr as zarr_to_da
img = zarr_to_da(open_zarr(out_zarr)).compute() # (1, 1000, 1000, 3)
assert_allclose(img[0], pil_to_npa(png_to_pil(png_file)))
odb = join(tests_dir, "patch_information.db")
command = [
"pathflowai-preprocess",
"preprocess-pipeline",
"-odb", odb,
"--preprocess",
"--patches",
"--basename", basename,
"--input_dir", input_dir,
"--patch_size", "256",
"--intensity_threshold", "45.",
"-t", "0.05"
]
out, err, exitcode = capture(command)
assert exists(out_zarr)
assert exists(out_pkl)
assert exists(odb)
print(err)
assert exitcode == 0
from sqlite3 import connect as sql_connect
connection = sql_connect(odb)
cursor = connection.execute('SELECT * FROM "256";')
names = [description[0] for description in cursor.description]
cursor.close()
true_headers = [
"index",
"ID",
"x",
"y",
"patch_size",
"annotation",
"0",
"1",
"2",
"3",
"4",
"5",
"6",
]
assert names == true_headers
test_segmentation()
from os import remove
from shutil import rmtree
rmtree(out_zarr)
remove(out_pkl)
# test_classification()
|
the-stack_0_23269 | def quick_sort_3partition(sorting: list, left: int, right: int) -> None:
if right <= left:
return
a = i = left
b = right
pivot = sorting[left]
while i <= b:
if sorting[i] < pivot:
sorting[a], sorting[i] = sorting[i], sorting[a]
a += 1
i += 1
elif sorting[i] > pivot:
sorting[b], sorting[i] = sorting[i], sorting[b]
b -= 1
else:
i += 1
quick_sort_3partition(sorting, left, a - 1)
quick_sort_3partition(sorting, b + 1, right)
def quick_sort_lomuto_partition(sorting: list, left: int, right: int) -> None:
"""
A pure Python implementation of quick sort algorithm(in-place)
with Lomuto partition scheme:
https://en.wikipedia.org/wiki/Quicksort#Lomuto_partition_scheme
:param sorting: sort list
:param left: left endpoint of sorting
:param right: right endpoint of sorting
:return: None
Examples:
>>> nums1 = [0, 5, 3, 1, 2]
>>> quick_sort_lomuto_partition(nums1, 0, 4)
>>> nums1
[0, 1, 2, 3, 5]
>>> nums2 = []
>>> quick_sort_lomuto_partition(nums2, 0, 0)
>>> nums2
[]
>>> nums3 = [-2, 5, 0, -4]
>>> quick_sort_lomuto_partition(nums3, 0, 3)
>>> nums3
[-4, -2, 0, 5]
"""
if left < right:
pivot_index = lomuto_partition(sorting, left, right)
quick_sort_lomuto_partition(sorting, left, pivot_index - 1)
quick_sort_lomuto_partition(sorting, pivot_index + 1, right)
def lomuto_partition(sorting: list, left: int, right: int) -> int:
"""
Example:
>>> lomuto_partition([1,5,7,6], 0, 3)
2
"""
pivot = sorting[right]
store_index = left
for i in range(left, right):
if sorting[i] < pivot:
sorting[store_index], sorting[i] = sorting[i], sorting[store_index]
store_index += 1
sorting[right], sorting[store_index] = sorting[store_index], sorting[right]
return store_index
def three_way_radix_quicksort(sorting: list) -> list:
"""
Three-way radix quicksort:
https://en.wikipedia.org/wiki/Quicksort#Three-way_radix_quicksort
First divide the list into three parts.
Then recursively sort the "less than" and "greater than" partitions.
>>> three_way_radix_quicksort([])
[]
>>> three_way_radix_quicksort([1])
[1]
>>> three_way_radix_quicksort([-5, -2, 1, -2, 0, 1])
[-5, -2, -2, 0, 1, 1]
>>> three_way_radix_quicksort([1, 2, 5, 1, 2, 0, 0, 5, 2, -1])
[-1, 0, 0, 1, 1, 2, 2, 2, 5, 5]
"""
if len(sorting) <= 1:
return sorting
return (
three_way_radix_quicksort([i for i in sorting if i < sorting[0]])
+ [i for i in sorting if i == sorting[0]]
+ three_way_radix_quicksort([i for i in sorting if i > sorting[0]])
)
if __name__ == "__main__":
import doctest
doctest.testmod(verbose=True)
user_input = input("Enter numbers separated by a comma:\n").strip()
unsorted = [int(item) for item in user_input.split(",")]
quick_sort_3partition(unsorted, 0, len(unsorted) - 1)
print(unsorted)
|
the-stack_0_23270 | from .processor import ResultProcessor
import pycocotools.mask as mask_util
import numpy as np
# class DatasetStats(object):
class CaseStats(object):
"""
currently analyze with 3 criteria:
0. has iou > thresh (0 or 1)
1. max ious
2. max scores with ious > thresh
3. average scores with ious > thresh
"""
VALUE_METHOD = 1
IOU_TYPE = "bbox"
IOU_THRESH = 0.1
def __init__(self, name, dt, gt):
self.__dict__.update(locals())
self._ious = {}
self._values = {}
@property
def ious(self):
if self.IOU_TYPE in self._ious:
ious_mat = self._ious[self.IOU_TYPE]
else:
# is_crowd = 0: intercetion over union
# is_crowd = 1: intercetion over detection
iscrowd = [0 for _ in self.gt]
dt_rois = [obj[self.IOU_TYPE] for obj in self.dt]
gt_rois = [obj[self.IOU_TYPE] for obj in self.gt]
# M x N mat, where M = #dt, N = #gt
ious_mat = mask_util.iou(dt_rois, gt_rois, iscrowd)
if ious_mat == []:
ious_mat = [0.0 for _ in self.dt]
ious_mat = np.array(ious_mat)
self._ious[self.IOU_TYPE] = ious_mat
self.scores = [p[score_type] for p in dt]
return ious_mat
@property
def values(self):
if len(self) == 0:
return []
token = (self.VALUE_METHOD, self.IOU_TYPE, self.IOU_THRESH)
if token in self._values:
values_list = self._values[token]
else:
if self.VALUE_METHOD == 0:
values_list = ((self.ious.max(0) > self.IOU_THRESH) * 1.0)
elif self.VALUE_METHOD == 1:
values_list = self.ious.max(0)
elif self.VALUE_METHOD == 2:
values_list = []
for inds in (self.ious > self.IOU_THRESH).T:
values_list.append(self.scores[inds].max())
elif self.VALUE_METHOD == 2:
values_list = []
for inds in (self.ious > self.IOU_THRESH).T:
values_list.append(self.scores[inds].mean())
else:
raise ValueError(f"unknown VALUE_METHOD{self.VALUE_METHOD}")
return values_list
def __len__(self):
return len(self.dt)
def __repr__(self):
return f"values = {self.values}"
class LabelEvaluator(ResultProcessor):
def _collect_stats(self, dataset_name):
"""
collect all neccessary stats for summarrize later
Args:
dataset_name (str)
Return:
stats (DatasetStats)
"""
predictions, dataset = self.datasets[dataset_name]
if self.verbose:
print(dataset_name, len(predictions))
dataset.load_gt_results()
case_list =[]
for uid, dt_list in predictions.items():
try:
# reloaded key becomes unicode
image_id = int(uid)
except ValueError:
# uid is actually image_uid
# which is invariant against shuffling sample dropping
image_id = dataset.get_index_from_img_uid(uid)
if image_id is None:
print(f"previous uid {uid} is not existed anymore")
continue
with_mask = "segmentation" in self.iou_types
gt_list = dataset.get_gt_results(image_id, with_mask=with_mask)
dt = self._filter_by_labels(dt_list)
gt = self._filter_by_labels(gt_list)
case = CaseStats(uid, dt, gt)
case_list.append(case)
return case_list
|
the-stack_0_23272 | # Simple clock program. Writes the exact time.
# Demo program for the I2C 16x2 Display from Ryanteck.uk
# Created by Matthew Timmons-Brown for The Raspberry Pi Guy YouTube channel
# Import necessary libraries for commuunication and display use
import lcddriver
import time
import datetime
# Load the driver and set it to "display"
# If you use something from the driver library use the "display." prefix first
display = lcddriver.lcd()
try:
print("Writing to display")
display.lcd_display_string("No time to waste", 1) # Write line of text to first line of display
while True:
display.lcd_display_string(str(datetime.datetime.now().time()), 2) # Write just the time to the display
# Program then loops with no delay (Can be added with a time.sleep)
except KeyboardInterrupt: # If there is a KeyboardInterrupt (when you press ctrl+c), exit the program and cleanup
print("Cleaning up!")
display.lcd_clear()
|
the-stack_0_23273 | from __future__ import division
import numpy as np
from scipy.misc import logsumexp
import cPickle # To store classes on files
import theano
import theano.tensor as T
def index2onehot(index, N):
"""
Transforms index to one-hot representation, for example
Input: e.g. index = [1, 2, 0], N = 4
Output: [[0, 1, 0, 0], [0, 0, 1, 0], [1, 0, 0, 0]]
"""
L = index.shape[0]
onehot = np.zeros((N, L))
for l in np.arange(L):
onehot[index[l], l] = 1
return onehot
class NumpyMLP:
"""
Basic MLP with forward-pass and gradient computation
"""
def __init__(self, geometry, actvfunc, rng=None, model_file=None):
"""
Input: geometry tuple with sizes of layer
Input: actvfunc list of strings indicating the type of activation
function. Supported 'sigmoid', 'softmax'
Input: rng string inidcating random seed
"""
# Generate random seed if not provided
if rng is None:
rng = np.random.RandomState(1234)
# CHECK THE PARAMETERS ARE VALID
self.sanity_checks(geometry, actvfunc)
# THIS DEFINES THE MLP
self.n_layers = len(geometry) - 1
if model_file:
if geometry or actvfunc:
raise ValueError("If you load a model geometry and actvfunc"
"should be None")
self.params, self.actvfunc = self.load(model_file)
else:
# Parameters are stored as [weight0, bias0, weight1, bias1, ... ]
# for consistency with the theano way of storing parameters
self.params = self.init_weights(rng, geometry, actvfunc)
self.actvfunc = actvfunc
def forward(self, x, all_outputs=False):
"""
Forward pass
all_outputs = True return intermediate activations
"""
# This will store activations at each layer and the input. This is
# needed to compute backpropagation
if all_outputs:
activations = []
# Input
tilde_z = x
for n in range(self.n_layers):
# Get weigths and bias of the layer (even and odd positions)
W = self.params[2*n]
b = self.params[2*n+1]
# Linear transformation
z = np.dot(W, tilde_z) + b
# Non-linear transformation
if self.actvfunc[n] == "sigmoid":
tilde_z = 1.0 / (1+np.exp(-z))
elif self.actvfunc[n] == "softmax":
# Softmax is computed in log-domain to prevent
# underflow/overflow
tilde_z = np.exp(z - logsumexp(z, 0))
if all_outputs:
activations.append(tilde_z)
if all_outputs:
tilde_z = activations
return tilde_z
def grads(self, x, y):
"""
Computes the gradients of the network with respect to cross entropy
error cost
"""
# Run forward and store activations for each layer
activations = self.forward(x, all_outputs=True)
# For each layer in reverse store the gradients for each parameter
nabla_params = [None] * (2*self.n_layers)
for n in np.arange(self.n_layers-1, -1, -1):
# Get weigths and bias (always in even and odd positions)
# Note that sometimes we need the weight from the next layer
W = self.params[2*n]
b = self.params[2*n+1]
if n != self.n_layers-1:
W_next = self.params[2*(n+1)]
# ----------
# Solution to Exercise 6.2
# If it is the last layer, compute the average cost gradient
# Otherwise, propagate the error backwards from the next layer
if n == self.n_layers-1:
# NOTE: This assumes cross entropy cost
if self.actvfunc[n] == 'sigmoid':
e = (activations[n]-y) / y.shape[0]
elif self.actvfunc[n] == 'softmax':
I = index2onehot(y, W.shape[0])
e = (activations[n]-I) / y.shape[0]
else:
e = np.dot(W_next.T, e)
# This is correct but confusing n+1 is n in the guide
e *= activations[n] * (1-activations[n])
# Weight gradient
nabla_W = np.zeros(W.shape)
for l in np.arange(e.shape[1]):
if n == 0:
# For the first layer, the activation is the input
nabla_W += np.outer(e[:, l], x[:, l])
else:
nabla_W += np.outer(e[:, l], activations[n-1][:, l])
# Bias gradient
nabla_b = np.sum(e, 1, keepdims=True)
# End of solution to Exercise 6.2
# ----------
# Store the gradients
nabla_params[2*n] = nabla_W
nabla_params[2*n+1] = nabla_b
return nabla_params
def init_weights(self, rng, geometry, actvfunc):
"""
Following theano tutorial at
http://deeplearning.net/software/theano/tutorial/
"""
params = []
for n in range(self.n_layers):
n_in, n_out = geometry[n:n+2]
weight = rng.uniform(low=-np.sqrt(6./(n_in+n_out)),
high=np.sqrt(6./(n_in+n_out)),
size=(n_out, n_in))
if actvfunc[n] == 'sigmoid':
weight *= 4
elif actvfunc[n] == 'softmax':
weight *= 4
bias = np.zeros((n_out, 1))
# Append parameters
params.append(weight)
params.append(bias)
return params
def sanity_checks(self, geometry, actvfunc):
# CHECK ACTIVATIONS
if actvfunc:
# Supported actvfunc
supported_acts = ['sigmoid', 'softmax']
if geometry and (len(actvfunc) != len(geometry)-1):
raise ValueError("The number of layers and actvfunc does not match")
elif any([act not in supported_acts for act in actvfunc]):
raise ValueError("Only these actvfunc supported %s" % (" ".join(supported_acts)))
# All internal layers must be a sigmoid
for internal_act in actvfunc[:-1]:
if internal_act != 'sigmoid':
raise ValueError("Intermediate layers must be sigmoid")
def save(self, model_path):
"""
Save model
"""
par = self.params + self.actvfunc
with open(model_path, 'wb') as fid:
cPickle.dump(par, fid, cPickle.HIGHEST_PROTOCOL)
def load(self, model_path):
"""
Load model
"""
with open(model_path) as fid:
par = cPickle.load(fid, cPickle.HIGHEST_PROTOCOL)
params = par[:len(par)//2]
actvfunc = par[len(par)//2:]
return params, actvfunc
def plot_weights(self, show=True, aspect='auto'):
"""
Plots the weights of the newtwork
"""
import matplotlib.pyplot as plt
plt.figure()
for n in range(self.n_layers):
# Get weights
W = self.params[2*n]
b = self.params[2*n+1]
plt.subplot(2, self.n_layers, n+1)
plt.imshow(W, aspect=aspect, interpolation='nearest')
plt.title('Layer %d Weight' % n)
plt.colorbar()
plt.subplot(2, self.n_layers, self.n_layers+(n+1))
plt.plot(b)
plt.title('Layer %d Bias' % n)
plt.colorbar()
if show:
plt.show()
class TheanoMLP(NumpyMLP):
"""
MLP VERSION USING THEANO
"""
def __init__(self, geometry, actvfunc, rng=None, model_file=None):
"""
Input: geometry tuple with sizes of layer
Input: actvfunc list of strings indicating the type of activation
function. Supported 'sigmoid', 'softmax'
Input: rng string inidcating random seed
"""
# Generate random seed if not provided
if rng is None:
rng = np.random.RandomState(1234)
# This will call NumpyMLP.__init__.py intializing
# Defining: self.n_layers self.params self.actvfunc
NumpyMLP.__init__(self, geometry, actvfunc, rng=rng, model_file=model_file)
# The parameters in the Theano MLP are stored as shared, borrowed
# variables. This data will be moved to the GPU when used
# use self.params.get_value() and self.params.set_value() to acces or
# modify the data in the shared variables
self.shared_params()
# Symbolic variables representing the input and reference output
x = T.matrix('x')
y = T.ivector('y') # Index of the correct class (int32)
# Compile forward function
self.fwd = theano.function([x], self._forward(x))
# Compile list of gradient functions
self.grs = [theano.function([x, y], _gr) for _gr in self._grads(x, y)]
def forward(self, x):
# Ensure the type matches theano selected type
x = x.astype(theano.config.floatX)
return self.fwd(x)
def grads(self, x, y):
# Ensure the type matches theano selected type
x = x.astype(theano.config.floatX)
y = y.astype('int32')
return [gr(x, y) for gr in self.grs]
def shared_params(self):
params = [None] * (2*self.n_layers)
for n in range(self.n_layers):
# Get Numpy weigths and bias (always in even and odd positions)
W = self.params[2*n]
b = self.params[2*n+1]
# IMPORTANT: Ensure the types in the variables and theano operations
# match. This is ofte a source of errors
W = W.astype(theano.config.floatX)
b = b.astype(theano.config.floatX)
# Store as shared variables
# Note that, unlike numpy, broadcasting is not active by default
W = theano.shared(value=W, borrow=True)
b = theano.shared(value=b, borrow=True, broadcastable=(False, True))
# Keep in mind that naming variables is useful when debugging
W.name = 'W%d' % (n+1)
b.name = 'b%d' % (n+1)
# Store weight and bias, now as theano shared variables
params[2*n] = W
params[2*n+1] = b
# Overwrite our params
self.params = params
def _forward(self, x, all_outputs=False):
"""
Symbolic forward pass
all_outputs = True return symbolic input and intermediate activations
"""
# This will store activations at each layer and the input. This is
# needed to compute backpropagation
if all_outputs:
activations = [x]
# Input
tilde_z = x
# ----------
# Solution to Exercise 6.4
for n in range(self.n_layers):
# Get weigths and bias (always in even and odd positions)
W = self.params[2*n]
b = self.params[2*n+1]
# Linear transformation
z = T.dot(W, tilde_z) + b
# Keep in mind that naming variables is useful when debugging
# see e.g. theano.printing.debugprint(tilde_z)
z.name = 'z%d' % (n+1)
# Non-linear transformation
if self.actvfunc[n] == "sigmoid":
tilde_z = T.nnet.sigmoid(z)
elif self.actvfunc[n] == "softmax":
tilde_z = T.nnet.softmax(z.T).T
# Name variable
tilde_z.name = 'tilde_z%d' % (n+1)
if all_outputs:
activations.append(tilde_z)
# End of solution to Exercise 6.4
# ----------
if all_outputs:
tilde_z = activations
return tilde_z
def _cost(self, x, y):
"""
Symbolic average negative log-likelihood using the soft-max output
"""
p_y = self._forward(x)
return -T.mean(T.log(p_y)[y, T.arange(y.shape[0])])
def _grads(self, x, y):
"""
Symbolic gradients
"""
# Symbolic gradients with respect to the cost
return [T.grad(self._cost(x, y), param) for param in self.params]
|
the-stack_0_23274 | from trakt.core.helpers import to_iso8601_datetime
from trakt.interfaces.base import authenticated
from trakt.interfaces.sync.core.mixins import Get, Add, Remove
class SyncHistoryInterface(Get, Add, Remove):
path = 'sync/history'
flags = {'is_watched': True}
def get(self, media=None, id=None, page=1, per_page=10, start_at=None, end_at=None, store=None, **kwargs):
if not media and id:
raise ValueError('The "id" parameter also requires the "media" parameter to be defined')
# Build parameters
params = []
if id:
params.append(id)
# Build query
query = {}
if page:
query['page'] = page
if per_page:
query['limit'] = per_page
if start_at:
query['start_at'] = to_iso8601_datetime(start_at)
if end_at:
query['end_at'] = to_iso8601_datetime(end_at)
# Request watched history
return super(SyncHistoryInterface, self).get(
media, store, params,
query=query,
flat=True,
**kwargs
)
@authenticated
def shows(self, *args, **kwargs):
return self.get(
'shows',
*args,
**kwargs
)
@authenticated
def movies(self, *args, **kwargs):
return self.get(
'movies',
*args,
**kwargs
)
|
the-stack_0_23277 | import setuptools
with open("README.md") as fp:
long_description = fp.read()
setuptools.setup(
name="ae",
version="0.0.1",
description="A sample CDK Python app",
long_description=long_description,
long_description_content_type="text/markdown",
author="author",
package_dir={"": "ae"},
packages=setuptools.find_packages(where="ae"),
install_requires=[
"aws-cdk.core==1.78.0",
"aws-cdk.aws_iam==1.78.0",
"aws-cdk.aws_sqs==1.78.0",
"aws-cdk.aws_sns==1.78.0",
"aws-cdk.aws_sns_subscriptions==1.78.0",
"aws-cdk.aws_s3==1.78.0",
],
python_requires=">=3.6",
classifiers=[
"Development Status :: 4 - Beta",
"Intended Audience :: Developers",
"License :: OSI Approved :: Apache Software License",
"Programming Language :: JavaScript",
"Programming Language :: Python :: 3 :: Only",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Topic :: Software Development :: Code Generators",
"Topic :: Utilities",
"Typing :: Typed",
],
)
|
the-stack_0_23278 | # -*- coding: utf-8 -*-
# Copyright (c) 2020, Frappe Technologies Pvt. Ltd. and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
import json
from six import string_types
from frappe.utils import getdate, get_datetime, rounded, flt, cint
from erpnext.loan_management.doctype.loan_interest_accrual.loan_interest_accrual import days_in_year
from erpnext.accounts.general_ledger import make_gl_entries, make_reverse_gl_entries
from erpnext.accounts.doctype.accounting_dimension.accounting_dimension import get_accounting_dimensions
from erpnext.controllers.accounts_controller import AccountsController
class Dunning(AccountsController):
def validate(self):
self.validate_overdue_days()
self.validate_amount()
if not self.income_account:
self.income_account = frappe.db.get_value('Company', self.company, 'default_income_account')
def validate_overdue_days(self):
self.overdue_days = (getdate(self.posting_date) - getdate(self.due_date)).days or 0
def validate_amount(self):
amounts = calculate_interest_and_amount(
self.posting_date, self.outstanding_amount, self.rate_of_interest, self.dunning_fee, self.overdue_days)
if self.interest_amount != amounts.get('interest_amount'):
self.interest_amount = flt(amounts.get('interest_amount'), self.precision('interest_amount'))
if self.dunning_amount != amounts.get('dunning_amount'):
self.dunning_amount = flt(amounts.get('dunning_amount'), self.precision('dunning_amount'))
if self.grand_total != amounts.get('grand_total'):
self.grand_total = flt(amounts.get('grand_total'), self.precision('grand_total'))
def on_submit(self):
self.make_gl_entries()
def on_cancel(self):
if self.dunning_amount:
self.ignore_linked_doctypes = ('GL Entry', 'Stock Ledger Entry')
make_reverse_gl_entries(voucher_type=self.doctype, voucher_no=self.name)
def make_gl_entries(self):
if not self.dunning_amount:
return
gl_entries = []
invoice_fields = ["project", "cost_center", "debit_to", "party_account_currency", "conversion_rate", "cost_center"]
inv = frappe.db.get_value("Sales Invoice", self.sales_invoice, invoice_fields, as_dict=1)
accounting_dimensions = get_accounting_dimensions()
invoice_fields.extend(accounting_dimensions)
dunning_in_company_currency = flt(self.dunning_amount * inv.conversion_rate)
default_cost_center = frappe.get_cached_value('Company', self.company, 'cost_center')
gl_entries.append(
self.get_gl_dict({
"account": inv.debit_to,
"party_type": "Customer",
"party": self.customer,
"due_date": self.due_date,
"against": self.income_account,
"debit": dunning_in_company_currency,
"debit_in_account_currency": self.dunning_amount,
"against_voucher": self.name,
"against_voucher_type": "Dunning",
"cost_center": inv.cost_center or default_cost_center,
"project": inv.project
}, inv.party_account_currency, item=inv)
)
gl_entries.append(
self.get_gl_dict({
"account": self.income_account,
"against": self.customer,
"credit": dunning_in_company_currency,
"cost_center": inv.cost_center or default_cost_center,
"credit_in_account_currency": self.dunning_amount,
"project": inv.project
}, item=inv)
)
make_gl_entries(gl_entries, cancel=(self.docstatus == 2), update_outstanding="No", merge_entries=False)
def resolve_dunning(doc, state):
for reference in doc.references:
if reference.reference_doctype == 'Sales Invoice' and reference.outstanding_amount <= 0:
dunnings = frappe.get_list('Dunning', filters={
'sales_invoice': reference.reference_name, 'status': ('!=', 'Resolved')})
for dunning in dunnings:
frappe.db.set_value("Dunning", dunning.name, "status", 'Resolved')
def calculate_interest_and_amount(posting_date, outstanding_amount, rate_of_interest, dunning_fee, overdue_days):
interest_amount = 0
grand_total = 0
if rate_of_interest:
interest_per_year = flt(outstanding_amount) * flt(rate_of_interest) / 100
interest_amount = (interest_per_year * cint(overdue_days)) / 365
grand_total = flt(outstanding_amount) + flt(interest_amount) + flt(dunning_fee)
dunning_amount = flt(interest_amount) + flt(dunning_fee)
return {
'interest_amount': interest_amount,
'grand_total': grand_total,
'dunning_amount': dunning_amount}
@frappe.whitelist()
def get_dunning_letter_text(dunning_type, doc, language=None):
if isinstance(doc, string_types):
doc = json.loads(doc)
if language:
filters = {'parent': dunning_type, 'language': language}
else:
filters = {'parent': dunning_type, 'is_default_language': 1}
letter_text = frappe.db.get_value('Dunning Letter Text', filters,
['body_text', 'closing_text', 'language'], as_dict=1)
if letter_text:
return {
'body_text': frappe.render_template(letter_text.body_text, doc),
'closing_text': frappe.render_template(letter_text.closing_text, doc),
'language': letter_text.language
}
|
the-stack_0_23279 | """Component unittests."""
from __future__ import division
import numpy as np
import unittest
import warnings
from six.moves import range
from six import assertRaisesRegex
from openmdao.api import Problem, ExplicitComponent, Group, IndepVarComp
from openmdao.core.component import Component
from openmdao.test_suite.components.expl_comp_simple import TestExplCompSimple
from openmdao.test_suite.components.expl_comp_array import TestExplCompArray
from openmdao.test_suite.components.impl_comp_simple import TestImplCompSimple
from openmdao.test_suite.components.impl_comp_array import TestImplCompArray
from openmdao.test_suite.components.simple_comps import TestExplCompDeprecated
from openmdao.devtools.testutil import assert_rel_error
class TestExplicitComponent(unittest.TestCase):
def test___init___simple(self):
"""Test a simple explicit component."""
comp = TestExplCompSimple()
prob = Problem(comp).setup(check=False)
# check optional metadata (desc)
self.assertEqual(
comp._var_abs2meta['input']['length']['desc'],
'length of rectangle')
self.assertEqual(
comp._var_abs2meta['input']['width']['desc'],
'width of rectangle')
self.assertEqual(
comp._var_abs2meta['output']['area']['desc'],
'area of rectangle')
prob['length'] = 3.
prob['width'] = 2.
prob.run_model()
assert_rel_error(self, prob['area'], 6.)
def test___init___array(self):
"""Test an explicit component with array inputs/outputs."""
comp = TestExplCompArray(thickness=1.)
prob = Problem(comp).setup(check=False)
prob['lengths'] = 3.
prob['widths'] = 2.
prob.run_model()
assert_rel_error(self, prob['total_volume'], 24.)
def test_error_handling(self):
"""Test error handling when adding inputs/outputs."""
comp = ExplicitComponent()
msg = "Incompatible shape for '.*': Expected (.*) but got (.*)"
with assertRaisesRegex(self, ValueError, msg):
comp.add_output('arr', val=np.ones((2,2)), shape=([2]))
with assertRaisesRegex(self, ValueError, msg):
comp.add_input('arr', val=np.ones((2,2)), shape=([2]))
msg = "Shape of indices does not match shape for '.*': Expected (.*) but got (.*)"
with assertRaisesRegex(self, ValueError, msg):
comp.add_input('arr', val=np.ones((2,2)), src_indices=[0,1])
msg = ("The shape argument should be an int, tuple, or list "
"but a '<(.*) 'numpy.ndarray'>' was given")
with assertRaisesRegex(self, TypeError, msg):
comp.add_output('arr', shape=np.array([2.]))
with assertRaisesRegex(self, TypeError, msg):
comp.add_input('arr', shape=np.array([2.]))
msg = ("The shape argument should be an int, tuple, or list "
"but a '<(.*) 'float'>' was given")
with assertRaisesRegex(self, TypeError, msg):
comp.add_output('arr', shape=2.)
with assertRaisesRegex(self, TypeError, msg):
comp.add_input('arr', shape=2.)
# check that a numpy integer type is accepted for shape
shapes = np.array([3], dtype=np.uint32)
comp.add_output('aro', shape=shapes[0])
comp.add_input('ari', shape=shapes[0])
def test_deprecated_vars_in_init(self):
"""test that deprecation warning is issued if vars are declared in __init__."""
with warnings.catch_warnings(record=True) as w:
TestExplCompDeprecated()
self.assertEqual(len(w), 2)
self.assertTrue(issubclass(w[0].category, DeprecationWarning))
self.assertTrue(issubclass(w[1].category, DeprecationWarning))
self.assertEqual(str(w[0].message),
"In the future, the 'add_input' method must be "
"called from 'setup' rather than "
"in the '__init__' function.")
self.assertEqual(str(w[1].message),
"In the future, the 'add_output' method must be "
"called from 'setup' rather than "
"in the '__init__' function.")
def test_setup_bug1(self):
# This tests a bug where, if you run setup more than once on a derived component class,
# the list of var names continually gets prepended with the component global path.
class NewBase(Component):
def __init__(self, **kwargs):
super(NewBase, self).__init__(**kwargs)
class MyComp(NewBase):
def __init__(self, **kwargs):
super(MyComp, self).__init__(**kwargs)
def setup(self):
self.add_input('x', val=0.0)
self.add_output('y', val=0.0)
prob = Problem()
model = prob.model = Group()
model.add_subsystem('comp', MyComp())
prob.setup(check=False)
comp = model.get_subsystem('comp')
self.assertEqual(comp._var_abs_names['input'], ['comp.x'])
self.assertEqual(comp._var_abs_names['output'], ['comp.y'])
prob.run_model()
prob.setup(check=False)
self.assertEqual(comp._var_abs_names['input'], ['comp.x'])
self.assertEqual(comp._var_abs_names['output'], ['comp.y'])
def test_add_input_output_dupes(self):
class Comp(ExplicitComponent):
def setup(self):
self.add_input('x', val=3.0)
self.add_input('x', val=3.0)
self.add_output('y', val=3.0)
prob = Problem()
model = prob.model = Group()
model.add_subsystem('px', IndepVarComp('x', val=3.0))
model.add_subsystem('comp', Comp())
model.connect('px.x', 'comp.x')
msg = "Variable name 'x' already exists."
with assertRaisesRegex(self, ValueError, msg):
prob.setup(check=False)
class Comp(ExplicitComponent):
def setup(self):
self.add_input('x', val=3.0)
self.add_output('y', val=3.0)
self.add_output('y', val=3.0)
prob = Problem()
model = prob.model = Group()
model.add_subsystem('px', IndepVarComp('x', val=3.0))
model.add_subsystem('comp', Comp())
model.connect('px.x', 'comp.x')
msg = "Variable name 'y' already exists."
with assertRaisesRegex(self, ValueError, msg):
prob.setup(check=False)
class Comp(ExplicitComponent):
def setup(self):
self.add_input('x', val=3.0)
self.add_output('x', val=3.0)
self.add_output('y', val=3.0)
prob = Problem()
model = prob.model = Group()
model.add_subsystem('px', IndepVarComp('x', val=3.0))
model.add_subsystem('comp', Comp())
model.connect('px.x', 'comp.x')
msg = "Variable name 'x' already exists."
with assertRaisesRegex(self, ValueError, msg):
prob.setup(check=False)
# Make sure we can reconfigure.
class Comp(ExplicitComponent):
def setup(self):
self.add_input('x', val=3.0)
self.add_output('y', val=3.0)
prob = Problem()
model = prob.model = Group()
model.add_subsystem('px', IndepVarComp('x', val=3.0))
model.add_subsystem('comp', Comp())
model.connect('px.x', 'comp.x')
prob.setup(check=False)
# pretend we reconfigured
prob.setup(check=False)
class TestImplicitComponent(unittest.TestCase):
def test___init___simple(self):
"""Test a simple implicit component."""
x = -0.5
a = np.abs(np.exp(0.5 * x) / x)
comp = TestImplCompSimple()
prob = Problem(comp).setup(check=False)
prob['a'] = a
prob.run_model()
assert_rel_error(self, prob['x'], x)
def test___init___array(self):
"""Test an implicit component with array inputs/outputs."""
comp = TestImplCompArray()
prob = Problem(comp).setup(check=False)
prob['rhs'] = np.ones(2)
prob.run_model()
assert_rel_error(self, prob['x'], np.ones(2))
class TestRangePartials(unittest.TestCase):
def test_range_partials(self):
class RangePartialsComp(ExplicitComponent):
def __init__(self, size=4):
super(RangePartialsComp, self).__init__()
self.size = size
def setup(self):
# verify that both iterable and array types are valid
# for val and src_indices arguments to add_input
self.add_input('v1', val=range(self.size),
src_indices=range(self.size))
self.add_input('v2', val=2*np.ones(self.size),
src_indices=np.array(range(self.size)))
# verify that both iterable and array types are valid
# for val, upper and lower arguments to add_output
self.add_output('vSum', val=range(self.size),
lower=np.zeros(self.size),
upper=range(self.size))
self.add_output('vProd', val=np.zeros(self.size),
lower=range(self.size),
upper=np.ones(self.size))
# verify that both iterable and list types are valid
# for rows and cols arguments to declare_partials
rows = range(self.size)
cols = list(range(self.size))
self.declare_partials(of='vProd', wrt='v1',
val=np.ones(self.size),
rows=rows, cols=cols)
def compute(self, inputs, outputs):
outputs['vSum'] = inputs['v1'] + inputs['v2']
outputs['vProd'] = inputs['v1'] * inputs['v2']
comp = RangePartialsComp()
prob = Problem(model=comp)
prob.setup(check=False)
prob.run_model()
assert_rel_error(self, prob['vSum'], np.array([2.,3.,4.,5.]), 0.00001)
assert_rel_error(self, prob['vProd'], np.array([0.,2.,4.,6.]), 0.00001)
if __name__ == '__main__':
unittest.main()
|
the-stack_0_23280 | from musp import *
from math import pi, sin, cos
from random import random
import numpy as np
tonic_freq = PitchedSound.note_frequency("F#", 3)
tempo_dur = 2.6/7
datadir = os.path.expanduser("~/.mu-sp")
def aulib(sound_dir):
return os.path.join(datadir, "audio", sound_dir)
def rhlib(rh_name):
return os.path.join(datadir, "rhythm/an_egg_rh", rh_name + ".rh")
def loctrans(far, angle, mem=[0]):
mem[0] += pi*2/200
return Location((angle, mem[0]), far)
def halftones_for_scale_deg(degree):
semitones = [0, 2, 3, 5, 7, 8, 10][int(degree) - 1]
if degree % 1 == .5:
semitones += 1
return semitones
def deg_freq(degree):
octave_mult = 1
while degree > 7:
degree -= 7
octave_mult *= 2
return tonic_freq*octave_mult * PitchedSound.temper_ratio**halftones_for_scale_deg(degree)
def fundamental_rhythm(beat):
return beat.split([3, 3, 7, 3])
def apply_rhythm(beat, rhythm_file, key_sound_map):
with open(rhythm_file) as rf:
char_times = eval(''.join(rf.readline()))
beat_map = beat.interleave_split(char_times)
for key, beats in beat_map.iteritems():
for beat in beats:
try:
for sound, loc in key_sound_map[key]:
beat.attach(sound, loc)
except:
beat.attach(*key_sound_map[key])
crystal_sound = RandomPitchedSound()
crystal_sound.populate_with_dir(aulib("crystal_ding"))
def add_tracks_fromto(tracklist, listoftracks):
for track in tracklist:
listoftracks.append(track)
def crystal_sounding(beat):
beat.set_duration(tempo_dur*7)
part1 = "565765243"
crys1 = Track("crystals...", Sound.default_rate, padding=.5, end_padding=2)
crys1_root = crys1.link_root(beat)
with open(rhlib("crys_1_j")) as rf:
char_times = eval(''.join(rf.readline()))
beats = crys1_root.interleave_split(char_times)['j']
for b, deg in zip(beats, part1):
b.attach(SpreadSound(crystal_sound.for_pitch(4*deg_freq(int(deg))), (.1, .1, .1), .02, 3), loctrans(4, pi/2))
return [crys1]
def crystal_rise(beat):
beat.set_duration(tempo_dur*7)
part2 = "34567"
crys2 = Track("more crystals...", Sound.default_rate, padding=.5, end_padding=2)
crys2_root = crys2.link_root(beat)
beats = crys2_root.split_even(14)[9:]
for b, deg in zip(beats, part2):
b.attach(crystal_sound.for_pitch(2*deg_freq(int(deg))), loctrans(4, -pi/2))
return [crys2]
def crystal_complex(beat):
beat.set_duration(tempo_dur*14)
#part3 = "78765774554"*2
part3 = "17876577547187657232"
crys3 = Track("more (muy complicated) crystals...", Sound.default_rate, padding=.5, end_padding=2)
crys3_root = crys3.link_root(beat)
beats = crys3_root.split([1, .5, .5, 1, 1, 1, 2, #groups of 7
2, 1, 2, 2,
2, 2, 1, 1, 1,
3, 1, 1, 2])
for b, deg in zip(beats, part3):
deg = int(deg) + 8
b.attach(crystal_sound.for_pitch(deg_freq(int(deg)+4)), loctrans(4, -pi/6))
return [crys3]
def apply_each_half(beat, one_beat_function, firsthalf=True, secondhalf=True):
if not firsthalf and not secondhalf:
return
sometracks = []
try:
b1, b2 = beat.beats
except:
b1, b2 = beat.split_even(2)
if firsthalf:
add_tracks_fromto(one_beat_function(b1), sometracks)
if secondhalf:
add_tracks_fromto(one_beat_function(b2), sometracks)
return sometracks
def crystal_compiled_block(beat, levels):
level_funcs = [lambda b: apply_each_half(b, crystal_sounding, True, False),
lambda b: apply_each_half(b, crystal_sounding, False, True),
lambda b: apply_each_half(b, crystal_rise, False, True),
lambda b: apply_each_half(b, crystal_rise, True, False),
crystal_complex]
allthesetracks = []
for l in levels:
add_tracks_fromto(level_funcs[l](beat), allthesetracks)
return allthesetracks
bow_violin_sound = RandomPitchedSound()
bow_violin_sound.populate_with_dir(aulib("bowed_violin"))
pluck_violin_sound = RandomPitchedSound()
pluck_violin_sound.populate_with_dir(aulib("plucked_violin_ring"))
pluck_violin_sound.populate_with_dir(aulib("plucked_violin_damp"))
def vibrato_snd_for_beat_frac(beat, deg, f, distance, sound=bow_violin_sound, h=0):
# h is vibrato hertz
vibrato_f = lambda t: PitchedSound.temper_ratio**(.25/(1.0 + np.exp(-t * 3))*sin(t*h*(2*pi)))
beat.attach(ClippedSound(ResampledSound(sound.for_pitch(deg_freq(float(deg))), vibrato_f,
cache=False), tempo_dur*f), loctrans(distance, -pi/3))
def violin_pluck_chords(beat):
violin1 = Track("Violin me once!", Sound.default_rate)
violin_root = violin1.link_root(beat)
degrees = (1, 1, 1, 1, 1, 1)
durations = (1, 1, 2, 3, 5, 2)
distances = (4, 3, 2, 4, 2, 3)
for deg, dur, dist, b in zip(degrees, durations, distances, violin_root.split(durations)):
vibrato_snd_for_beat_frac(b, deg, dur, dist/5.0, sound=pluck_violin_sound, h=7)
violin2 = Track("Violin me twice!", Sound.default_rate)
violin_root = violin2.link_root(beat)
degrees = (5, 5, 5, 4, 4, 3)
durations = [d + .05 for d in (1, 1, 2, 3, 5, 2)]
distances = (3, 3.5, 3, 2, 2, 4)
for deg, dur, dist, b in zip(degrees, durations, distances, violin_root.split(durations)):
vibrato_snd_for_beat_frac(b, deg, dur + .1, dist/5.0, sound=pluck_violin_sound, h=7)
violin3 = Track("Violin me thrice!", Sound.default_rate)
violin_root = violin3.link_root(beat)
degrees = (7, 6, 7, 7, 6, 4)
durations = [d - .05 for d in (1, 1, 2, 3, 5, 2)]
distances = (4, 3.5, 4, 3, 4, 3.5)
for deg, dur, dist, b in zip(degrees, durations, distances, violin_root.split(durations)):
vibrato_snd_for_beat_frac(b, deg, dur + .1, dist/5.0, sound=pluck_violin_sound, h=7)
return [violin1, violin2, violin3]
werb_raw = RawPitchedSound(os.path.join(aulib("werb_sine"), "werb_sine.0.110.wav"))
werb_sounds = {}
def werb_for_beat_frac(beat, degree, duration, distance):
if degree not in werb_sounds:
werb_sounds[degree] = RandomIntervalSound(werb_raw.for_pitch(.49*deg_freq(degree)), margin=.01)
werb_sound = werb_sounds[degree]
beat.attach(werb_sound.for_interval(duration*tempo_dur), loctrans(distance, pi))
def werb_under(beat):
werb = Track("werbtrack", Sound.default_rate)
werb_root = werb.link_root(beat)
for b, d in zip(werb_root.split_even(4), (1, 2, 3, 4)):
werb_for_beat_frac(b, d, 14.0/4, .5)
return [werb]
random_mid_drum = RandomSound()
random_mid_drum.populate_with_dir(aulib("snares_off"))
mid_drum = SpreadSound(random_mid_drum, (.2, .2, 0), 0, 1)
def descending_snaresoff_tuple(beat, n):
beats = [beat] if n is 1 else beat.split_even(n)
for b, i in zip(beats, range(n, 0, -1)):
b.attach(mid_drum, loctrans(i + .2, pi*2/12*i))
def mid_drum_rhythm(beat):
drum = Track("Snares off please", Sound.default_rate)
drum_root = drum.link_root(beat)
one, two, three, four, five, six, seven = drum_root.split_even(7)
descending_snaresoff_tuple(one, 2)
descending_snaresoff_tuple(two, 1)
descending_snaresoff_tuple(three, 3)
descending_snaresoff_tuple(four, 4)
descending_snaresoff_tuple(five, 1)
descending_snaresoff_tuple(six, 6)
descending_snaresoff_tuple(seven, 1)
return [drum]
def create_main(beat):
trackbag = []
for levels, crystaltest in zip([(0, 1), (0, 1, 2), (0, 1, 2, 4), (0, 1, 2, 3, 4), (2, 3, 4)],
beat.split(5)):
add_tracks_fromto(crystal_compiled_block(crystaltest, levels), trackbag)
add_tracks_fromto(violin_pluck_chords(crystaltest), trackbag)
add_tracks_fromto(werb_under(crystaltest), trackbag)
add_tracks_fromto(apply_each_half(crystaltest, mid_drum_rhythm), trackbag)
return trackbag
mainbeat = Beat()
mix = Mixer("Let's make some art, I guess...!", Sound.default_rate, create_main(mainbeat))
mix.play(quick_play=False)
|
the-stack_0_23284 | #!/usr/bin/env python
# coding: utf8
"""Example of a spaCy v2.0 pipeline component that requests all countries via
the REST Countries API, merges country names into one token, assigns entity
labels and sets attributes on country tokens, e.g. the capital and lat/lng
coordinates. Can be extended with more details from the API.
* REST Countries API: https://restcountries.eu (Mozilla Public License MPL 2.0)
* Custom pipeline components: https://spacy.io//usage/processing-pipelines#custom-components
Compatible with: spaCy v2.0.0+
Prerequisites: pip install requests
"""
from __future__ import unicode_literals, print_function
import requests
import plac
from spacy.lang.en import English
from spacy.matcher import PhraseMatcher
from spacy.tokens import Doc, Span, Token
def main():
# For simplicity, we start off with only the blank English Language class
# and no model or pre-defined pipeline loaded.
nlp = English()
rest_countries = RESTCountriesComponent(nlp) # initialise component
nlp.add_pipe(rest_countries) # add it to the pipeline
doc = nlp(u"Some text about Colombia and the Czech Republic")
print('Pipeline', nlp.pipe_names) # pipeline contains component name
print('Doc has countries', doc._.has_country) # Doc contains countries
for token in doc:
if token._.is_country:
print(token.text, token._.country_capital, token._.country_latlng,
token._.country_flag) # country data
print('Entities', [(e.text, e.label_) for e in doc.ents]) # entities
class RESTCountriesComponent(object):
"""spaCy v2.0 pipeline component that requests all countries via
the REST Countries API, merges country names into one token, assigns entity
labels and sets attributes on country tokens.
"""
name = 'rest_countries' # component name, will show up in the pipeline
def __init__(self, nlp, label='GPE'):
"""Initialise the pipeline component. The shared nlp instance is used
to initialise the matcher with the shared vocab, get the label ID and
generate Doc objects as phrase match patterns.
"""
# Make request once on initialisation and store the data
r = requests.get('https://restcountries.eu/rest/v2/all')
r.raise_for_status() # make sure requests raises an error if it fails
countries = r.json()
# Convert API response to dict keyed by country name for easy lookup
# This could also be extended using the alternative and foreign language
# names provided by the API
self.countries = {c['name']: c for c in countries}
self.label = nlp.vocab.strings[label] # get entity label ID
# Set up the PhraseMatcher with Doc patterns for each country name
patterns = [nlp(c) for c in self.countries.keys()]
self.matcher = PhraseMatcher(nlp.vocab)
self.matcher.add('COUNTRIES', None, *patterns)
# Register attribute on the Token. We'll be overwriting this based on
# the matches, so we're only setting a default value, not a getter.
# If no default value is set, it defaults to None.
Token.set_extension('is_country', default=False)
Token.set_extension('country_capital')
Token.set_extension('country_latlng')
Token.set_extension('country_flag')
# Register attributes on Doc and Span via a getter that checks if one of
# the contained tokens is set to is_country == True.
Doc.set_extension('has_country', getter=self.has_country)
Span.set_extension('has_country', getter=self.has_country)
def __call__(self, doc):
"""Apply the pipeline component on a Doc object and modify it if matches
are found. Return the Doc, so it can be processed by the next component
in the pipeline, if available.
"""
matches = self.matcher(doc)
spans = [] # keep the spans for later so we can merge them afterwards
for _, start, end in matches:
# Generate Span representing the entity & set label
entity = Span(doc, start, end, label=self.label)
spans.append(entity)
# Set custom attribute on each token of the entity
# Can be extended with other data returned by the API, like
# currencies, country code, flag, calling code etc.
for token in entity:
token._.set('is_country', True)
token._.set('country_capital', self.countries[entity.text]['capital'])
token._.set('country_latlng', self.countries[entity.text]['latlng'])
token._.set('country_flag', self.countries[entity.text]['flag'])
# Overwrite doc.ents and add entity – be careful not to replace!
doc.ents = list(doc.ents) + [entity]
for span in spans:
# Iterate over all spans and merge them into one token. This is done
# after setting the entities – otherwise, it would cause mismatched
# indices!
span.merge()
return doc # don't forget to return the Doc!
def has_country(self, tokens):
"""Getter for Doc and Span attributes. Returns True if one of the tokens
is a country. Since the getter is only called when we access the
attribute, we can refer to the Token's 'is_country' attribute here,
which is already set in the processing step."""
return any([t._.get('is_country') for t in tokens])
if __name__ == '__main__':
plac.call(main)
# Expected output:
# Pipeline ['rest_countries']
# Doc has countries True
# Colombia Bogotá [4.0, -72.0] https://restcountries.eu/data/col.svg
# Czech Republic Prague [49.75, 15.5] https://restcountries.eu/data/cze.svg
# Entities [('Colombia', 'GPE'), ('Czech Republic', 'GPE')]
|
the-stack_0_23286 | # Base design variable class
class DesignVariable(object):
"""
Note:
if you want to view aircraft or engine, you should set real value and boundary condition
if you want to optimize aircraft or engine, you should set normalized value and boundary condition
"""
def __init__(self, name):
# design variable name
self.name = name
# real value
self.val = None
# normalized value(0~1)
self.norm_val = None
# boundary condition
self.bound = [None, None] # [minimum, maximum]
# fixed flag
self.fix = False
def set_bound(self, bounds):
"""
set boundary condition
:param bounds: [minimum value, maximum value]
:return: None
"""
self.bound[0], self.bound[1] = bounds[0], bounds[1]
def set_val(self, val):
"""
set design variable's real value
:param val: the value of design variable
:return: None
"""
self.val = val
def set_norm_val(self, norm_val):
"""
set design variable normalized value
:param norm_val: the normalized value of design variable
:return: None
"""
self.norm_val = norm_val
def normalize(self):
"""
normalize real value
:return: None
"""
self.norm_val = (self.val - self.bound[0]) / (self.bound[1] - self.bound[0])
def denormalize(self):
"""
convert normalized value into real value
:return:
"""
self.val = self.bound[0] + self.norm_val * (self.bound[1] - self.bound[0])
def fixed(self, fix_flag):
"""
:param fix_flag: boolean
:return: None
"""
self.fix = fix_flag
if self.fix:
self.bound = [self.val, self.val]
# Aircraft fuselage design variables class
class AircraftFuselageDesignVariable(object):
"""
Note:
Design Variable Controller Class for Aircraft Fuselage
--Attributes--
aircraft_fuselage_dv_names: list
the list of fuselage design variables
aircraft_fuselage_dv_idx_dict: dict
dictionary in order to change name into index number
fuselage_dv_num: int
the number of fuselage design variables
aircraft_fuselage_boundaries: list[list]
the list which keeps the bounds list
aircraft_fuselage_fix_flags: list[bool]
the list which keeps boolean flag which indicates whether or not the value is fixed
fl_dv_sets: list[DesignVariableClass]
the list of design variable class
--Method--
set_bounds()
set_fix()
create_dv_sets()
"""
def __init__(self):
self.aircraft_fuselage_dv_names = ['fuselage_length', 's1_h', 's2_h', 's3_h', 's1_v', 's2_v', 's3_v']
# create dictionary combined with name and index number
self.aircraft_fuselage_dv_idx_dict = {}
for idx, name in enumerate(self.aircraft_fuselage_dv_names):
self.aircraft_fuselage_dv_idx_dict[name] = idx
self.fuselage_dv_num = len(self.aircraft_fuselage_dv_names)
self.aircraft_fuselage_boundaries = [[30, 40], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1], [0, 1]]
self.aircraft_fuselage_fix_flags = [False for _ in range(self.fuselage_dv_num)]
# Initialize design variables set
self.fl_dv_sets = [DesignVariable(name) for name in self.aircraft_fuselage_dv_names]
def set_bounds(self, name, bounds):
"""
:param name: str
design variable name
:param bounds: list
list of boundary condition(min, max)
:return: None
"""
idx = self.aircraft_fuselage_dv_idx_dict[name]
self.aircraft_fuselage_boundaries[idx] = bounds
def set_fix(self, name, flag=True):
"""
:param name: str
design variable name
:param flag: bool
flag which means fix
:return: None
"""
idx = self.aircraft_fuselage_dv_idx_dict[name]
self.aircraft_fuselage_fix_flags[idx] = flag
def create_dv_sets(self, aircraft_fuselage_dvs):
"""
:param aircraft_fuselage_dvs: list
design variables vector
:return: None
"""
# set design variable class
for idx, fd in enumerate(self.fl_dv_sets):
# set design variables
fd.set_val(aircraft_fuselage_dvs[idx])
# set boundaries
fd.set_bound(self.aircraft_fuselage_boundaries[idx])
# set fixed flag
fd.fixed(self.aircraft_fuselage_fix_flags[idx])
# Aircraft wing design variable class
class AircraftWingDesignVariable(object):
def __init__(self):
self.aircraft_wing_dv_names = ['main wing span', 'main wing AR', 'main wing taper ratio', 'main wing tc ratio',
'main wing retreat angle', 'horizontal wing span', 'horizontal wing aspect ratio',
'horizontal wing taper ratio', 'horizontal wing tc ratio',
'horizontal wing retreat angle', 'vertical wing span', 'vertical wing AR',
'vertical wing taper ratio', 'vertical wing tc ratio', 'vertical wing retreat angle']
# create dictionary combined with name and index number
self.aircraft_wing_dv_idx_dict = {}
for idx, name in enumerate(self.aircraft_wing_dv_names):
self.aircraft_wing_dv_idx_dict[name] = idx
self.aircraft_wing_boundaries = [[30, 40],
[8, 10],
[0.2, 0.3],
[0.1, 0.2],
[20, 30],
[10, 15],
[1.5, 3.0],
[0.2, 0.3],
[0.1, 0.2],
[25, 35],
[5, 10],
[1.0, 2.0],
[0.2, 0.3],
[0.1, 0.2],
[40, 50]]
self.aircraft_wing_dv_num = len(self.aircraft_wing_dv_names)
self.aircraft_wing_fix_flags = [False for _ in range(self.aircraft_wing_dv_num)]
# Initialize aircraft wing dv sets
self.aw_dv_sets = [DesignVariable(name) for name in self.aircraft_wing_dv_names]
def set_bounds(self, name, bounds):
"""
replace boundary condition
:param name: str
design variable name
:param bounds: list
list which contains minimum and maximum value
:return: None
"""
idx = self.aircraft_wing_dv_idx_dict[name]
self.aircraft_wing_boundaries[idx] = bounds
def set_fix(self, name, flag=True):
"""
decide how to cope with each design variable
:param name: str
design variable name
:param flag: Boolean
flag which indicates whether or not design variable is fixed
:return: None
"""
idx = self.aircraft_wing_dv_idx_dict[name]
self.aircraft_wing_fix_flags[idx] = flag
def create_dv_sets(self, aircraft_wing_dvs):
"""
create design variable sets
:param aircraft_wing_dvs: list
list which has the sets of design variable value
:return: None
"""
# set aircraft wing dv sets
for idx, ad in enumerate(self.aw_dv_sets):
ad.set_val(aircraft_wing_dvs[idx])
ad.set_bound(self.aircraft_wing_boundaries[idx])
ad.fixed(self.aircraft_wing_fix_flags[idx])
# Aircraft performance design variable class
class AircraftPerformanceDesignVariable(object):
def __init__(self):
self.aircraft_performance_dv_names = ['attack of angle']
self.aircraft_performance_dv_idx_dict = {}
for idx, name in enumerate(self.aircraft_performance_dv_names):
self.aircraft_performance_dv_idx_dict[name] = idx
self.aircraft_performance_boundaries = [[0, 6]]
self.aircraft_performance_dv_num = len(self.aircraft_performance_dv_names)
self.aircraft_performance_fix_flags = [False for _ in range(self.aircraft_performance_dv_num)]
# Initialize the aircraft performance dv sets
self.ap_dv_sets = [DesignVariable(name) for name in self.aircraft_performance_dv_names]
def set_bounds(self, name, bounds):
"""
replace boundary condition
:param name: str
design variable name
:param bounds: list
list which contains minimum and maximum value
:return: None
"""
idx = self.aircraft_performance_dv_idx_dict[name]
self.aircraft_performance_boundaries[idx] = bounds
def set_fix(self, name, flag=True):
"""
decide how to cope with each design variable
:param name: str
design variable name
:param flag: Boolean
flag which indicates whether or not design variable is fixed
:return: None
"""
idx = self.aircraft_performance_dv_idx_dict[name]
self.aircraft_performance_fix_flags[idx] = flag
def create_dv_sets(self, aircraft_performance_dvs):
"""
create design variable sets
:param aircraft_wing_dvs: list
list which has the sets of design variable value
:return: None
"""
# set aircraft wing dv sets
for idx, ad in enumerate(self.ap_dv_sets):
ad.set_val(aircraft_performance_dvs[idx])
ad.set_bound(self.aircraft_performance_boundaries[idx])
ad.fixed(self.aircraft_performance_fix_flags[idx])
# Engine design variable class
class EngineDesignVariable(object):
def __init__(self):
self.engine_dv_names = ['OPR', 'TIT', 'BPR', 'FPR', 'Nen', 'technical level', 'cool air lpt rate',
'cool air hpt rate',
'engine material quality', 'fan stage number', 'lpc stage number', 'hpc stage number',
'hpt stage number', 'lpt stage number', 'fan load factor', 'lpc load factor',
'hpc load factor',
'hpt load factor', 'lpt load factor', 'BPRe', 'FPRe', 'Nfan', 'engine electric efficiency',
'engine electric density']
self.engine_dv_idx_dict = {}
for idx, name in enumerate(self.engine_dv_names):
self.engine_dv_idx_dict[name] = idx
self.engine_boundaries = [[20, 40], # OPR
[1200, 1500], # TIT
[2, 8], # BPR
[1.2, 1.7], # FPR
[1, 4], # Nen
[3, 4], # tech lev
[0.0, 0.1], # cool air lpt
[0.0, 0.2], # cool air hpt
[0.5, 1.0], # engine material quality
[1, 2], # fan stage number
[2, 4], # lpc stage number
[8, 10], # hpc stage number
[1, 3], # hpt stage number
[1, 5], # lpt stage number
[0.1, 0.4], # fan load factor
[0.1, 0.4], # lpc load factor
[0.3, 0.5], # hpc load factor
[1.3, 1.6], # hpt load factor
[1.3, 1.6], # lpt load factor
[0, 10], # BPRe
[0, 1.5], # FPRe
[1, 10], # Nfan
[0.9, 0.99], # eng_electriceff
[0.052, 0.52] # eng_electric_dense
]
self.engine_dv_num = len(self.engine_dv_names)
self.engine_fix_flags = [False for _ in range(self.engine_dv_num)]
self.e_dv_sets = [DesignVariable(name) for name in self.engine_dv_names]
def set_bounds(self, name, bounds):
"""
replace boundary condition
:param name: str
design variable name
:param bounds: list
list which contains minimum and maximum value
:return: None
"""
idx = self.engine_dv_idx_dict[name]
self.engine_boundaries[idx] = bounds
def set_fix(self, name, flag=True):
"""
decide how to cope with design variable
:param name: str
design variable name
:param flag: boolean
flag which indicates whether or not design variable is fixed
:return: None
"""
idx = self.engine_dv_idx_dict[name]
self.engine_fix_flags[idx] = flag
def create_dv_sets(self, engine_dvs):
"""
create design variable sets
:param engine_dvs: list
the set of engine design variable's value
:return: None
"""
for idx, ed in enumerate(self.e_dv_sets):
ed.set_val(engine_dvs[idx])
ed.set_bound(self.engine_boundaries[idx])
ed.fixed(self.engine_fix_flags[idx])
# Joint Aircraft design variable class
class JointAircraftDesignVariable(object):
def __init__(self):
self.aircraft_mounting_dv_names = ['main wing coefficient x', 'main wing coefficient z',
'horizontal wing coefficient x',
'horizontal wing coefficient z', 'vertical wing coefficient x',
'vertical wing coefficient z']
self.aircraft_mounting_dv_idx_dict = {}
for idx, name in enumerate(self.aircraft_mounting_dv_names):
self.aircraft_mounting_dv_idx_dict[name] = idx
self.aircraft_mounting_boundaries = [[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1],
[0, 1]]
self.aircraft_mounting_dv_num = len(self.aircraft_mounting_dv_names)
self.aircraft_mounting_fix_flags = [False for _ in range(self.aircraft_mounting_dv_num)]
self.am_dv_sets = [DesignVariable(name) for name in self.aircraft_mounting_dv_names]
def set_bounds(self, name, bounds):
"""
replace boundary condition
:param name: str
design variable name
:param bounds: list
list which contains minimum and maximum value
:return: None
"""
idx = self.aircraft_mounting_dv_idx_dict[name]
self.aircraft_mounting_boundaries[idx] = bounds
def set_fix(self, name, flag=True):
"""
decide how to cope with design variable
:param name: str
design variable name
:param flag: boolean
flag which indicates whether or not design variable is fixed
:return: None
"""
idx = self.aircraft_mounting_dv_idx_dict[name]
self.aircraft_mounting_fix_flags[idx] = flag
def create_dv_sets(self, aircraft_mounting_dvs):
"""
create design variable sets
:param aircraft_mounting_dvs: list
the set of engine design variable's value
:return: None
"""
for idx, amd in enumerate(self.am_dv_sets):
amd.set_val(aircraft_mounting_dvs[idx])
amd.set_bound(self.aircraft_mounting_boundaries[idx])
amd.fixed(self.aircraft_mounting_fix_flags[idx])
# Joint Engine design variable class
class JointEngineDesignVariable(object):
def __init__(self):
self.engine_mounting_dv_names = ['core engine mounting coefficient x', 'core engine mounting coefficient y',
'core engine moiunting turnover angle', 'core engine sign',
'distributed engine mounting coefficient x',
'distributed engine mounting coefficient y',
'distributed engine mounting turnover angle', 'distributed engine sign']
self.engine_mounting_dv_idx_dict = {}
for idx, name in enumerate(self.engine_mounting_dv_names):
self.engine_mounting_dv_idx_dict[name] = idx
self.engine_mounting_boundaries = [[0, 1],
[0, 1],
[0, 90],
[-1, 1],
[0, 1],
[0, 1],
[0, 90],
[-1, 1]]
self.engine_mounting_dv_num = len(self.engine_mounting_dv_names)
self.engine_mounting_fix_flags = [False for _ in range(self.engine_mounting_dv_num)]
self.em_dv_sets = [DesignVariable(name) for name in self.engine_mounting_dv_names]
def set_bounds(self, name, bounds):
"""
replace boundary condition
:param name: str
design variable name
:param bounds: list
list which contains minimum and maximum value
:return: None
"""
idx = self.engine_mounting_dv_idx_dict[name]
self.engine_mounting_boundaries[idx] = bounds
def set_fix(self, name, flag=True):
"""
decide how to cope with design variable
:param name: str
design variable name
:param flag: boolean
flag which indicates whether or not design variable is fixed
:return: None
"""
idx = self.engine_mounting_dv_idx_dict[name]
self.engine_mounting_fix_flags[idx] = flag
def create_dv_sets(self, engine_mounting_dvs):
"""
create design variable sets
:param engine_mounting_dvs: list
the sets of design variable value
:return: None
"""
for idx, emd in enumerate(self.em_dv_sets):
emd.set_val(engine_mounting_dvs[idx])
emd.set_bound(self.engine_mounting_boundaries[idx])
emd.fixed(self.engine_mounting_fix_flags[idx])
# Electric design variable class
class ElectricDesignVariable(object):
def __init__(self):
self.electric_dv_names = ['material level', 'battery electric density']
self.electric_dv_idx_dict = {}
for idx, name in enumerate(self.electric_dv_names):
self.electric_dv_idx_dict[name] = idx
self.electric_boundaries = [[0.1, 1.0], [5.2, 10.0]]
self.electric_dv_num = len(self.electric_dv_names)
self.electric_fix_flags = [False for _ in range(self.electric_dv_num)]
self.e_dv_sets = [DesignVariable(name) for name in self.electric_dv_names]
def set_bounds(self, name, bounds):
"""
replace boundary condition
:param name: str
design variable name
:param bounds: list
list which contains minimum and maximum value
:return: None
"""
idx = self.electric_dv_idx_dict[name]
self.electric_boundaries[idx] = bounds
def set_fix(self, name, flag=True):
"""
decide how to cope with design variable
:param name: str
design variable name
:param flag: boolean
flag which indicates whether or not design variable is fixed
:return: None
"""
idx = self.electric_dv_idx_dict[name]
self.electric_fix_flags[idx] = flag
def create_dv_sets(self, electric_dvs):
"""
create design variable sets
:param electric_dvs: list
the sets of design variable value
:return: None
"""
for idx, ed in enumerate(self.e_dv_sets):
ed.set_val(electric_dvs[idx])
ed.set_bound(self.electric_boundaries[idx])
ed.fixed(self.electric_fix_flags[idx])
# BLI design variable class
class BLIDesignVariable(object):
def __init__(self):
self.bli_dv_names = ['distortion angle']
self.bli_dv_idx_dict = {}
for idx, name in enumerate(self.bli_dv_names):
self.bli_dv_idx_dict[name] = idx
self.bli_boundaries = [[0, 120]]
self.bli_dv_num = len(self.bli_dv_names)
self.bli_fix_flags = [False for _ in range(self.bli_dv_num)]
self.bli_dv_sets = [DesignVariable(name) for name in self.bli_dv_names]
def set_bounds(self, name, bounds):
"""
replace boundary condition
:param name: str
design variable name
:param bounds: list
list which contains minimum and maximum value
:return: None
"""
idx = self.bli_dv_idx_dict[name]
self.bli_boundaries[idx] = bounds
def set_fix(self, name, flag=True):
"""
decide how to cope with design variable
:param name: str
design variable name
:param flag: boolean
flag which indicates whether or not design variable is fixed
:return: None
"""
idx = self.bli_dv_idx_dict[name]
self.bli_fix_flags[idx] = flag
def create_dv_sets(self, bli_dvs):
"""
create design variable sets
:param bli_dvs: list
the sets of design variable value
:return: None
"""
for idx, bld in enumerate(self.bli_dv_sets):
bld.set_val(bli_dvs[idx])
bld.set_bound(self.bli_boundaries[idx])
bld.fixed(self.bli_fix_flags[idx])
# Mission design variable class
class MissionDesignVariable(object):
def __init__(self):
self.mission_dv_names = ['altitude', 'mach number', 'thrust at off design point', 'lift by drag', 'cruise range',
'max takeoff weight', 'passenger number', 'fuel coefficient', 'cargo weight',
'cargo volume']
self.mission_dv_idx_dict = {}
for idx, name in enumerate(self.mission_dv_names):
self.mission_dv_idx_dict[name] = idx
self.mission_boundaries = [[10000, 14000],
[0.7, 0.9],
[120000, 150000],
[15, 17],
[4500, 5000],
[75000, 80000],
[100, 200],
[0.4, 0.7],
[6000, 7000],
[30, 40]]
self.mission_dv_num = len(self.mission_dv_names)
self.mission_fix_flags = [False for _ in range(self.mission_dv_num)]
self.m_dv_sets = [DesignVariable(name) for name in self.mission_dv_names]
def set_bounds(self, name, bounds):
"""
replace boundary condition
:param name: str
design variable name
:param bounds: list
list which contains minimum and maximum value
:return: None
"""
idx = self.mission_dv_idx_dict[name]
self.mission_boundaries[idx] = bounds
def set_fix(self, name, flag=True):
"""
decide how to cope with design variable
:param name: str
design variable name
:param flag: boolean
flag which indicates whether or not design variable is fixed
:return: None
"""
idx = self.mission_dv_idx_dict[name]
self.mission_fix_flags[idx] = flag
def create_dv_sets(self, mission_dvs):
"""
create design variable sets
:param mission_dvs: list
the sets of design variable value
:return: None
"""
for idx, md in enumerate(self.m_dv_sets):
md.set_val(mission_dvs[idx])
md.set_bound(self.mission_boundaries[idx])
md.fixed(self.mission_fix_flags[idx])
# Integration Design Variables Controller
class DesignVariablesController(object):
def __init__(self):
# start index(s_idx) and final index(f_idx) for each design variables
# fuselage
self.fl_s_idx = 0
self.fl_f_idx = 0
# wing
self.wi_s_idx = 0
self.wi_f_idx = 0
# performance
self.pf_s_idx = 0
self.pf_f_idx = 0
# engine
self.e_s_idx = 0
self.e_f_idx = 0
# joint aircraft
self.ja_s_idx = 0
self.ja_f_idx = 0
# join engine
self.je_s_idx = 0
self.je_f_idx = 0
# BLI
self.bli_s_idx = 0
self.bli_f_idx = 0
# mission
self.ms_s_idx = 0
self.ms_f_idx = 0
# test code
def main():
# build aircraft fuselage design variable class
fuselage_length = 37.57 # [m]
s1_h = 0.05 # section 1 horizontal coefficient
s2_h = 0.105 # section 2 horizontal coefficient
s3_h = 0.05 # section 3 horizontal coefficient
s1_v = 0.2 # section 1 vertical coefficient
s2_v = 0.6 # section 2 vertical coefficient
s3_v = 0.2 # section 3 vertical coefficient
aircraft_fuselage_dvs = [fuselage_length, s1_h, s2_h, s3_h, s1_v, s2_v, s3_v]
# Initialization
afdv = AircraftFuselageDesignVariable()
# create the sets of design variables
afdv.create_dv_sets(aircraft_fuselage_dvs)
# confirmation
print('')
print('aircraft fuselage class')
for u in afdv.fl_dv_sets:
print(u.val)
print(u.bound)
print(u.fix)
print('')
# build aircraft wing design variable class
bm = 34.1 # main wing span [m]
ARm = 9.5 # main wing aspect ratio
tm = 0.24 # taper ratio
tcm = 0.11 # the ratio of thickness and chord at main wing
thetam = 25.0 # retreat angle of main wing[rad]
# horizontal
bh = 12.45
ARh = 2.0
th = 0.24
tch = 0.11
thetah = 31.0
# vertical
bv = 5.87
ARv = 1.2
tv = 0.24
tcv = 0.15
thetav = 49.0
aircraft_wing_dvs = [bm, ARm, tm, tcm, thetam, bh, ARh, th, tch, thetah, bv, ARv, tv, tcv, thetav]
# Initialization
awdv = AircraftWingDesignVariable()
awdv.create_dv_sets(aircraft_wing_dvs)
# confirmation
print('')
print('aircraft wing class')
for u in awdv.aw_dv_sets:
print(u.val)
print(u.bound)
print(u.fix)
print('')
# build aircraft performance design variable class
aoa = 4 # attack of angle
aircraft_performance_dvs = [aoa]
# Initialization
apdv = AircraftPerformanceDesignVariable()
apdv.create_dv_sets(aircraft_performance_dvs)
# confirmation
print('')
print('aircraft performance class')
for u in apdv.ap_dv_sets:
print(u.val)
print(u.bound)
print(u.fix)
print('')
# build engine design variable class
OPR = 30
TIT = 1400
BPR = 6.0
FPR = 1.4
Nen = 2
tech_lev = 3
cool_air_lpt = 0
cool_air_hpt = 0.15
engine_material_quality = 1.0
fan_sn = 1
lpc_sn = 2
hpc_sn = 10
hpt_sn = 1
lpt_sn = 3
fan_lf = 0.2
lpc_lf = 0.2
hpc_lf = 0.4
hpt_lf = 1.5
lpt_lf = 1.5
BPRe = 0
FPRe = 0
Nfan = 0
eng_electriceff = 0.9
eng_electric_dense = 0.52
engine_dvs = [OPR, TIT, BPR, FPR, Nen, tech_lev, cool_air_lpt, cool_air_hpt, engine_material_quality,
fan_sn, lpc_sn, hpc_sn, hpt_sn, lpt_sn, fan_lf, lpc_lf, hpc_lf, hpt_lf, lpt_lf,
BPRe, FPRe, Nfan, eng_electriceff, eng_electric_dense]
# Initialization
edv = EngineDesignVariable()
edv.create_dv_sets(engine_dvs)
# confirmation
print('')
print('engine class')
for u in edv.e_dv_sets:
print(u.val)
print(u.bound)
print(u.fix)
print('')
# build joint aircraft design variable class
acmx = 0.4
acmz = 0
achx = 0.9
achz = 0
acbx = 0.9
acbz = 0
aircraft_mounting_dvs = [acmx, acmz, achx, achz, acbx, acbz]
# Initialization
jadv = JointAircraftDesignVariable()
jadv.create_dv_sets(aircraft_mounting_dvs)
print('')
print('Joint aircraft class')
for u in jadv.am_dv_sets:
print(u.val)
print(u.bound)
print(u.fix)
print('')
# build joint engine design variable class
ecmx = 0.1
ecmy = 0.1
theta_ec = 0
sign_ec = -1
edmx = 0.1
edmy = 0.1
theta_ed = 0
sign_ed = 1
engine_mounting_dvs = [ecmx, ecmy, theta_ec, sign_ec, edmx, edmy, theta_ed, sign_ed]
# Initialization
jedv = JointEngineDesignVariable()
jedv.create_dv_sets(engine_mounting_dvs)
print('')
print('Joint engine class')
for u in jedv.em_dv_sets:
print(u.val)
print(u.bound)
print(u.fix)
print('')
# build electric design variable class
mat_lev = 1.0
bat_ele_dense = 5.2 # [kW/kg]
electric_dvs = [mat_lev, bat_ele_dense]
eldv = ElectricDesignVariable()
eldv.create_dv_sets(electric_dvs)
print('')
print('Electric class')
for u in eldv.e_dv_sets:
print(u.val)
print(u.bound)
print(u.fix)
print('')
# build BLI design variable class
dist_ang = 60
bli_dvs = [dist_ang]
blidv = BLIDesignVariable()
blidv.create_dv_sets(bli_dvs)
print('')
print('BLI class')
for u in blidv.bli_dv_sets:
print(u.val)
print(u.bound)
print(u.fix)
print('')
# build mission design variable class
altitude = 10668 # [m]
mach_number = 0.82
thrust_doff = 133000 # [N]
ld = 17
cruise_range = 4808 # [km]
mtow = 78000 # [kg]
passenger_num = 150
fuel_coef = 0.6
cargo_weight = 6300 # [kg]
cargo_volume = 37.63 # [m^3]
mission_dvs = [altitude, mach_number, thrust_doff, ld, cruise_range, mtow, passenger_num, fuel_coef, cargo_weight,
cargo_volume]
mdv = MissionDesignVariable()
mdv.create_dv_sets(mission_dvs)
print('')
print('Mission class')
for u in mdv.m_dv_sets:
print(u.val)
print(u.bound)
print(u.fix)
print('')
if __name__ == '__main__':
main()
|
the-stack_0_23287 | import random
import scipy
import numpy as np
import h5py
class DataLoader(object):
def __init__(self, cfg):
self.cfg = cfg
self.augment = cfg.data_augment
def get_data(self, mode='train'):
h5f = h5py.File('./classification/DataLoaders/mnist_background.h5', 'r')
self.x_test = np.reshape(h5f['X'][:], [12000, 28, 28, 1])
self.y_test = h5f['Y'][:]
h5f.close()
print()
def next_batch(self, start=None, end=None, mode='train'):
if mode == 'train':
x, y = self.mnist.train.next_batch(self.cfg.batch_size)
x = x.reshape((-1, self.cfg.height, self.cfg.width, self.cfg.channel))
if self.augment:
x = random_rotation_2d(x, self.cfg.max_angle)
elif mode == 'valid':
x = self.x_valid[start:end]
y = self.y_valid[start:end]
elif mode == 'test':
x = self.x_test[start:end]
y = self.y_test[start:end]
return x, y
def count_num_batch(self, batch_size, mode='train'):
if mode == 'train':
num_batch = int(self.y_train.shape[0] / batch_size)
elif mode == 'valid':
num_batch = int(self.y_valid.shape[0] / batch_size)
elif mode == 'test':
num_batch = int(self.y_test.shape[0] / batch_size)
return num_batch
def randomize(self):
""" Randomizes the order of data samples and their corresponding labels"""
permutation = np.random.permutation(self.y_train.shape[0])
shuffled_x = self.x_train[permutation, :, :, :]
shuffled_y = self.y_train[permutation]
return shuffled_x, shuffled_y
def random_rotation_2d(batch, max_angle):
""" Randomly rotate an image by a random angle (-max_angle, max_angle).
Arguments:
max_angle: `float`. The maximum rotation angle.
Returns:
batch of rotated 2D images
"""
size = batch.shape
batch = np.squeeze(batch)
batch_rot = np.zeros(batch.shape)
for i in range(batch.shape[0]):
if bool(random.getrandbits(1)):
image = np.squeeze(batch[i])
angle = random.uniform(-max_angle, max_angle)
batch_rot[i] = scipy.ndimage.interpolation.rotate(image, angle, mode='nearest', reshape=False)
else:
batch_rot[i] = batch[i]
return batch_rot.reshape(size) |
the-stack_0_23289 | """Tests for instruction layouts."""
from solana.publickey import PublicKey
from pyserum._layouts.instructions import _VERSION, INSTRUCTIONS_LAYOUT, InstructionType
from pyserum.enums import OrderType, Side
def assert_parsed_layout(instruction_type, args, raw_bytes):
parsed = INSTRUCTIONS_LAYOUT.parse(raw_bytes)
assert parsed.version == _VERSION
assert parsed.instruction_type == int(instruction_type)
if args:
assert parsed.args == args
else:
assert not parsed.args
def test_parse_initialize_market():
"""Test parsing raw initialize market data."""
args = {
"base_lot_size": 1,
"quote_lot_size": 2,
"fee_rate_bps": 3,
"vault_signer_nonce": 4,
"quote_dust_threshold": 5,
}
expected = bytes.fromhex(
"000000000001000000000000000200000000000000030004000000000000000500000000000000"
) # Raw hex from serum.js
assert INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.InitializeMarket, args=args)) == expected
assert_parsed_layout(InstructionType.InitializeMarket, args, expected)
def test_parse_new_order():
"""Test parsing raw new order data."""
args = {
"limit_price": 1,
"max_quantity": 2,
"client_id": 3,
"side": Side.Sell,
"order_type": OrderType.PostOnly,
}
expected = bytes.fromhex(
"00010000000100000001000000000000000200000000000000020000000300000000000000"
) # Raw hex from serum.js
assert INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.NewOrder, args=args)) == expected
assert_parsed_layout(InstructionType.NewOrder, args, expected)
def test_parse_match_orders():
"""Test parsing raw match orders data."""
args = {"limit": 1}
expected = bytes.fromhex("00020000000100") # Raw hex from serum.js
assert INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.MatchOrder, args=args)) == expected
assert_parsed_layout(InstructionType.MatchOrder, args, expected)
def test_parse_consume_events():
"""Test parsing raw consume events data."""
args = {"limit": 1}
expected = bytes.fromhex("00030000000100") # Raw hex from serum.js
assert INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.ConsumeEvents, args=args)) == expected
assert_parsed_layout(InstructionType.ConsumeEvents, args, expected)
def test_parse_cancel_order():
"""Test parsing raw cancel order data."""
args = {
"side": Side.Buy,
"order_id": (1234567890).to_bytes(16, "little"),
"open_orders_slot": 123,
"open_orders": bytes(PublicKey(123)),
}
expected = bytes.fromhex(
"000400000000000000d202964900000000000000000000000000000000"
"0000000000000000000000000000000000000000000000000000007b7b"
) # Raw hex from serum.js
assert INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.CancelOrder, args=args)) == expected
assert_parsed_layout(InstructionType.CancelOrder, args, expected)
def test_parse_settle_funds():
"""Test parsing raw settle funds data."""
expected = bytes.fromhex("0005000000") # Raw hex from serum.js
assert INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.SettleFunds, args=None)) == expected
assert_parsed_layout(InstructionType.SettleFunds, None, expected)
def test_parse_cancel_order_by_client_id():
"""Test parsing raw cancel order data."""
args = {"client_id": 123}
expected = bytes.fromhex("00060000007b00000000000000") # Raw hex from serum.js
assert (
INSTRUCTIONS_LAYOUT.build(dict(instruction_type=InstructionType.CancelOrderByClientID, args=args)) == expected
)
assert_parsed_layout(InstructionType.CancelOrderByClientID, args, expected)
|
the-stack_0_23290 | import gzip
import os
from typing import Optional, Dict, Any
import jsonlines
from kgx.sink.sink import Sink
class JsonlSink(Sink):
"""
JsonlSink is responsible for writing data as records
to JSON lines.
Parameters
----------
owner: Transformer
Transformer to which the GraphSink belongs
filename: str
The filename to write to
format: str
The file format (``jsonl``)
compression: Optional[str]
The compression type (``gz``)
kwargs: Any
Any additional arguments
"""
def __init__(
self,
owner,
filename: str,
format: str = "jsonl",
compression: Optional[str] = None,
**kwargs: Any,
):
super().__init__(owner)
dirname = os.path.abspath(os.path.dirname(filename))
basename = os.path.basename(filename)
nodes_filename = os.path.join(
dirname if dirname else "", f"{basename}_nodes.{format}"
)
edges_filename = os.path.join(
dirname if dirname else "", f"{basename}_edges.{format}"
)
if dirname:
os.makedirs(dirname, exist_ok=True)
if compression == "gz":
nodes_filename += f".{compression}"
edges_filename += f".{compression}"
NFH = gzip.open(nodes_filename, "wb")
self.NFH = jsonlines.Writer(NFH)
EFH = gzip.open(edges_filename, "wb")
self.EFH = jsonlines.Writer(EFH)
else:
self.NFH = jsonlines.open(nodes_filename, "w")
self.EFH = jsonlines.open(edges_filename, "w")
def write_node(self, record: Dict) -> None:
"""
Write a node record to JSON.
Parameters
----------
record: Dict
A node record
"""
self.NFH.write(record)
def write_edge(self, record: Dict) -> None:
"""
Write an edge record to JSON.
Parameters
----------
record: Dict
A node record
"""
self.EFH.write(record)
def finalize(self) -> None:
"""
Perform any operations after writing the file.
"""
self.NFH.close()
self.EFH.close()
|
the-stack_0_23291 | import streamlit as st
import base64
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from cryptography.hazmat.primitives import hashes
from cryptography.hazmat.primitives.asymmetric import padding
def utf8(s: bytes):
return str(s, 'utf-8')
def main():
st.title("Generate RSA Keys")
key_size = int(st.number_input("Key Size", value=4096, min_value=256, step=1))
private_key = rsa.generate_private_key(
public_exponent=65537,
key_size=key_size,
backend=default_backend()
)
public_key = private_key.public_key()
private_pem = private_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.PKCS8,
encryption_algorithm=serialization.NoEncryption()
)
# with open('private_key.pem', 'wb') as f:
# f.write(private_pem)
public_pem = public_key.public_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PublicFormat.SubjectPublicKeyInfo
)
# with open('public_key.pem', 'wb') as f:
# f.write(public_pem)
with st.expander("Private Key"):
st.code(private_pem.decode("utf-8"))
with st.expander("Public Key"):
st.code(public_pem.decode("utf-8"))
st.button("Refresh Keys")
# st.download_button("Download Private Key",
# data=private_pem,
# file_name="private_key.pem",
# mime="application/x-pem-file")
# st.download_button("Download Public Key",
# data=public_pem,
# file_name="public_key.pem",
# mime="application/x-pem-file")
if __name__ == '__main__':
main()
|
the-stack_0_23292 | import unittest
from sweetcase import switch, case
class TestMultilineFunctions(unittest.TestCase):
def test_basic_multiline_functions(self):
num1 = 7
num2 = 5
def addition():
result = num1 + num2
return result
def subtraction():
result = num1 - num2
return result
action = "+"
res = switch(action, [
case("+",
addition),
case("-",
subtraction)
])
self.assertEqual(res, 12, 'Should be 12')
def test_multiline_with_arguments(self):
def addition(num1, num2):
result = num1 + num2
return result
def subtraction(num1, num2):
result = num1 - num2
return result
numbers = [7, 5]
action = "-"
res = switch(action, [
case("+",
addition, arguments=numbers),
case("-",
subtraction, arguments=numbers)
])
self.assertEqual(res, 2, 'Should be 2')
if __name__ == "__main__":
unittest.main()
|
the-stack_0_23293 | from __future__ import absolute_import
from django.core.exceptions import PermissionDenied
from django.db import models
from django.contrib.auth import authenticate
from django.contrib.sites.models import Site
from django.utils.encoding import python_2_unicode_compatible
from django.utils.crypto import get_random_string
try:
from django.utils.encoding import force_text
except ImportError:
from django.utils.encoding import force_unicode as force_text
import allauth.app_settings
from allauth.account.utils import get_next_redirect_url, setup_user_email
from . import providers
from .fields import JSONField
class SocialAppManager(models.Manager):
def get_current(self, provider):
site = Site.objects.get_current()
return self.get(sites__id=site.id,
provider=provider)
@python_2_unicode_compatible
class SocialApp(models.Model):
objects = SocialAppManager()
provider = models.CharField(max_length=30,
choices=providers.registry.as_choices())
name = models.CharField(max_length=40)
client_id = models.CharField(max_length=100,
help_text='App ID, or consumer key')
key = models.CharField(max_length=100,
blank=True,
help_text='Key (Stack Exchange only)')
secret = models.CharField(max_length=100,
help_text='API secret, client secret, or'
' consumer secret')
# Most apps can be used across multiple domains, therefore we use
# a ManyToManyField. Note that Facebook requires an app per domain
# (unless the domains share a common base name).
# blank=True allows for disabling apps without removing them
sites = models.ManyToManyField(Site, blank=True)
def __str__(self):
return self.name
class SocialAccount(models.Model):
user = models.ForeignKey(allauth.app_settings.USER_MODEL)
provider = models.CharField(max_length=30,
choices=providers.registry.as_choices())
# Just in case you're wondering if an OpenID identity URL is going
# to fit in a 'uid':
#
# Ideally, URLField(max_length=1024, unique=True) would be used
# for identity. However, MySQL has a max_length limitation of 255
# for URLField. How about models.TextField(unique=True) then?
# Well, that won't work either for MySQL due to another bug[1]. So
# the only way out would be to drop the unique constraint, or
# switch to shorter identity URLs. Opted for the latter, as [2]
# suggests that identity URLs are supposed to be short anyway, at
# least for the old spec.
#
# [1] http://code.djangoproject.com/ticket/2495.
# [2] http://openid.net/specs/openid-authentication-1_1.html#limits
uid = models.CharField(max_length=255)
last_login = models.DateTimeField(auto_now=True)
date_joined = models.DateTimeField(auto_now_add=True)
extra_data = JSONField(default='{}')
class Meta:
unique_together = ('provider', 'uid')
def authenticate(self):
return authenticate(account=self)
def __str__(self):
return force_text(self.user)
def get_profile_url(self):
return self.get_provider_account().get_profile_url()
def get_avatar_url(self):
return self.get_provider_account().get_avatar_url()
def get_provider(self):
return providers.registry.by_id(self.provider)
def get_provider_account(self):
return self.get_provider().wrap_account(self)
@python_2_unicode_compatible
class SocialToken(models.Model):
app = models.ForeignKey(SocialApp)
account = models.ForeignKey(SocialAccount)
token = models \
.TextField(help_text='"oauth_token" (OAuth1) or access token (OAuth2)')
token_secret = models \
.CharField(max_length=200,
blank=True,
help_text='"oauth_token_secret" (OAuth1) or refresh'
' token (OAuth2)')
expires_at = models.DateTimeField(blank=True, null=True)
class Meta:
unique_together = ('app', 'account')
def __str__(self):
return self.token
class SocialLogin(object):
"""
Represents a social user that is in the process of being logged
in. This consists of the following information:
`account` (`SocialAccount` instance): The social account being
logged in. Providers are not responsible for checking whether or
not an account already exists or not. Therefore, a provider
typically creates a new (unsaved) `SocialAccount` instance. The
`User` instance pointed to by the account (`account.user`) may be
prefilled by the provider for use as a starting point later on
during the signup process.
`token` (`SocialToken` instance): An optional access token token
that results from performing a successful authentication
handshake.
`state` (`dict`): The state to be preserved during the
authentication handshake. Note that this state may end up in the
url -- do not put any secrets in here. It currently only contains
the url to redirect to after login.
`email_addresses` (list of `EmailAddress`): Optional list of
e-mail addresses retrieved from the provider.
"""
def __init__(self, account, token=None, email_addresses=[]):
if token:
assert token.account is None or token.account == account
token.account = account
self.token = token
self.account = account
self.email_addresses = email_addresses
self.state = {}
def connect(self, request, user):
self.account.user = user
self.save(request, connect=True)
def save(self, request, connect=False):
"""
Saves a new account. Note that while the account is new,
the user may be an existing one (when connecting accounts)
"""
assert not self.is_existing
user = self.account.user
user.save()
self.account.user = user
self.account.save()
if self.token:
self.token.account = self.account
self.token.save()
if connect:
# TODO: Add any new email addresses automatically?
pass
else:
# fix for VIDK-36 in YT
self.email_addresses = filter(lambda x: x.user_id, self.email_addresses)
# endfix
setup_user_email(request, user, self.email_addresses)
@property
def is_existing(self):
"""
Account is temporary, not yet backed by a database record.
"""
return self.account.pk
def lookup(self):
"""
Lookup existing account, if any.
"""
assert not self.is_existing
try:
a = SocialAccount.objects.get(provider=self.account.provider,
uid=self.account.uid)
# Update account
a.extra_data = self.account.extra_data
self.account = a
a.save()
# Update token
if self.token:
assert not self.token.pk
try:
t = SocialToken.objects.get(account=self.account,
app=self.token.app)
t.token = self.token.token
if self.token.token_secret:
# only update the refresh token if we got one
# many oauth2 providers do not resend the refresh token
t.token_secret = self.token.token_secret
t.expires_at = self.token.expires_at
t.save()
self.token = t
except SocialToken.DoesNotExist:
self.token.account = a
self.token.save()
except SocialAccount.DoesNotExist:
pass
def get_redirect_url(self, request):
url = self.state.get('next')
return url
@classmethod
def state_from_request(cls, request):
state = {}
next_url = get_next_redirect_url(request)
if next_url:
state['next'] = next_url
state['process'] = request.REQUEST.get('process', 'login')
return state
@classmethod
def stash_state(cls, request):
state = cls.state_from_request(request)
verifier = get_random_string()
request.session['socialaccount_state'] = (state, verifier)
return verifier
@classmethod
def unstash_state(cls, request):
if 'socialaccount_state' not in request.session:
raise PermissionDenied()
state, verifier = request.session.pop('socialaccount_state')
return state
@classmethod
def verify_and_unstash_state(cls, request, verifier):
if 'socialaccount_state' not in request.session:
raise PermissionDenied()
state, verifier2 = request.session.pop('socialaccount_state')
if verifier != verifier2:
raise PermissionDenied()
return state
|
the-stack_0_23297 | # Copyright 2020 The Meson development team
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from pathlib import Path
from .traceparser import CMakeTraceParser
from ..envconfig import CMakeSkipCompilerTest
from ..mesonlib import MachineChoice
from .common import language_map, cmake_get_generator_args
from .. import mlog
import shutil
import typing as T
from enum import Enum
from textwrap import dedent
if T.TYPE_CHECKING:
from .executor import CMakeExecutor
from ..envconfig import MachineInfo, Properties, CMakeVariables
from ..environment import Environment
class CMakeExecScope(Enum):
SUBPROJECT = 'subproject'
DEPENDENCY = 'dependency'
class CMakeToolchain:
def __init__(self, cmakebin: 'CMakeExecutor', env: 'Environment', for_machine: MachineChoice, exec_scope: CMakeExecScope, build_dir: Path, preload_file: T.Optional[Path] = None) -> None:
self.env = env
self.cmakebin = cmakebin
self.for_machine = for_machine
self.exec_scope = exec_scope
self.preload_file = preload_file
self.build_dir = build_dir
self.build_dir = self.build_dir.resolve()
self.toolchain_file = build_dir / 'CMakeMesonToolchainFile.cmake'
self.cmcache_file = build_dir / 'CMakeCache.txt'
self.minfo = self.env.machines[self.for_machine]
self.properties = self.env.properties[self.for_machine]
self.compilers = self.env.coredata.compilers[self.for_machine]
self.cmakevars = self.env.cmakevars[self.for_machine]
self.cmakestate = self.env.coredata.cmake_cache[self.for_machine]
self.variables = self.get_defaults()
self.variables.update(self.cmakevars.get_variables())
# Determine whether CMake the compiler test should be skipped
skip_status = self.properties.get_cmake_skip_compiler_test()
self.skip_check = skip_status == CMakeSkipCompilerTest.ALWAYS
if skip_status == CMakeSkipCompilerTest.DEP_ONLY and self.exec_scope == CMakeExecScope.DEPENDENCY:
self.skip_check = True
if not self.properties.get_cmake_defaults():
self.skip_check = False
assert self.toolchain_file.is_absolute()
def write(self) -> Path:
if not self.toolchain_file.parent.exists():
self.toolchain_file.parent.mkdir(parents=True)
self.toolchain_file.write_text(self.generate(), encoding='utf-8')
self.cmcache_file.write_text(self.generate_cache(), encoding='utf-8')
mlog.cmd_ci_include(self.toolchain_file.as_posix())
return self.toolchain_file
def get_cmake_args(self) -> T.List[str]:
args = ['-DCMAKE_TOOLCHAIN_FILE=' + self.toolchain_file.as_posix()]
if self.preload_file is not None:
args += ['-DMESON_PRELOAD_FILE=' + self.preload_file.as_posix()]
return args
@staticmethod
def _print_vars(vars: T.Dict[str, T.List[str]]) -> str:
res = ''
for key, value in vars.items():
res += 'set(' + key
for i in value:
res += f' "{i}"'
res += ')\n'
return res
def generate(self) -> str:
res = dedent('''\
######################################
### AUTOMATICALLY GENERATED FILE ###
######################################
# This file was generated from the configuration in the
# relevant meson machine file. See the meson documentation
# https://mesonbuild.com/Machine-files.html for more information
if(DEFINED MESON_PRELOAD_FILE)
include("${MESON_PRELOAD_FILE}")
endif()
''')
# Escape all \ in the values
for key, value in self.variables.items():
self.variables[key] = [x.replace('\\', '/') for x in value]
# Set compiler
if self.skip_check:
self.update_cmake_compiler_state()
res += '# CMake compiler state variables\n'
for lang, vars in self.cmakestate:
res += f'# -- Variables for language {lang}\n'
res += self._print_vars(vars)
res += '\n'
res += '\n'
# Set variables from the current machine config
res += '# Variables from meson\n'
res += self._print_vars(self.variables)
res += '\n'
# Add the user provided toolchain file
user_file = self.properties.get_cmake_toolchain_file()
if user_file is not None:
res += dedent('''
# Load the CMake toolchain file specified by the user
include("{}")
'''.format(user_file.as_posix()))
return res
def generate_cache(self) -> str:
if not self.skip_check:
return ''
res = ''
for name, v in self.cmakestate.cmake_cache.items():
res += f'{name}:{v.type}={";".join(v.value)}\n'
return res
def get_defaults(self) -> T.Dict[str, T.List[str]]:
defaults = {} # type: T.Dict[str, T.List[str]]
# Do nothing if the user does not want automatic defaults
if not self.properties.get_cmake_defaults():
return defaults
# Best effort to map the meson system name to CMAKE_SYSTEM_NAME, which
# is not trivial since CMake lacks a list of all supported
# CMAKE_SYSTEM_NAME values.
SYSTEM_MAP = {
'android': 'Android',
'linux': 'Linux',
'windows': 'Windows',
'freebsd': 'FreeBSD',
'darwin': 'Darwin',
} # type: T.Dict[str, str]
# Only set these in a cross build. Otherwise CMake will trip up in native
# builds and thing they are cross (which causes TRY_RUN() to break)
if self.env.is_cross_build(when_building_for=self.for_machine):
defaults['CMAKE_SYSTEM_NAME'] = [SYSTEM_MAP.get(self.minfo.system, self.minfo.system)]
defaults['CMAKE_SYSTEM_PROCESSOR'] = [self.minfo.cpu_family]
defaults['CMAKE_SIZEOF_VOID_P'] = ['8' if self.minfo.is_64_bit else '4']
sys_root = self.properties.get_sys_root()
if sys_root:
defaults['CMAKE_SYSROOT'] = [sys_root]
def make_abs(exe: str) -> str:
if Path(exe).is_absolute():
return exe
p = shutil.which(exe)
if p is None:
return exe
return p
# Set the compiler variables
for lang, comp_obj in self.compilers.items():
exe_list = [make_abs(x) for x in comp_obj.get_exelist()]
prefix = 'CMAKE_{}_'.format(language_map.get(lang, lang.upper()))
if not exe_list:
continue
elif len(exe_list) == 2:
defaults[prefix + 'COMPILER'] = [exe_list[1]]
defaults[prefix + 'COMPILER_LAUNCHER'] = [exe_list[0]]
else:
defaults[prefix + 'COMPILER'] = exe_list
if comp_obj.get_id() == 'clang-cl':
defaults['CMAKE_LINKER'] = comp_obj.get_linker_exelist()
return defaults
def update_cmake_compiler_state(self) -> None:
# Check if all variables are already cached
if self.cmakestate.languages.issuperset(self.compilers.keys()):
return
# Generate the CMakeLists.txt
mlog.debug('CMake Toolchain: Calling CMake once to generate the compiler state')
languages = list(self.compilers.keys())
lang_ids = [language_map.get(x, x.upper()) for x in languages]
cmake_content = dedent(f'''
cmake_minimum_required(VERSION 3.7)
project(CompInfo {' '.join(lang_ids)})
''')
build_dir = Path(self.env.scratch_dir) / '__CMake_compiler_info__'
build_dir.mkdir(parents=True, exist_ok=True)
cmake_file = build_dir / 'CMakeLists.txt'
cmake_file.write_text(cmake_content, encoding='utf-8')
# Generate the temporary toolchain file
temp_toolchain_file = build_dir / 'CMakeMesonTempToolchainFile.cmake'
temp_toolchain_file.write_text(CMakeToolchain._print_vars(self.variables), encoding='utf-8')
# Configure
trace = CMakeTraceParser(self.cmakebin.version(), build_dir)
self.cmakebin.set_exec_mode(print_cmout=False, always_capture_stderr=trace.requires_stderr())
cmake_args = []
cmake_args += trace.trace_args()
cmake_args += cmake_get_generator_args(self.env)
cmake_args += [f'-DCMAKE_TOOLCHAIN_FILE={temp_toolchain_file.as_posix()}', '.']
rc, _, raw_trace = self.cmakebin.call(cmake_args, build_dir=build_dir, disable_cache=True)
if rc != 0:
mlog.warning('CMake Toolchain: Failed to determine CMake compilers state')
return
# Parse output
trace.parse(raw_trace)
self.cmakestate.cmake_cache = {**trace.cache}
vars_by_file = {k.name: v for (k, v) in trace.vars_by_file.items()}
for lang in languages:
lang_cmake = language_map.get(lang, lang.upper())
file_name = f'CMake{lang_cmake}Compiler.cmake'
vars = vars_by_file.setdefault(file_name, {})
vars[f'CMAKE_{lang_cmake}_COMPILER_FORCED'] = ['1']
self.cmakestate.update(lang, vars)
|
the-stack_0_23299 | from NeuralNet import NeuralNet
import keras
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten, BatchNormalization
from keras.layers import Conv2D, MaxPooling2D
from keras import backend
class NeuralNetGamma(NeuralNet):
def beginTraining(self):
self.setTrainingParameters(100000, 5000, 16, 10)
def defineModel(self, inputShape : tuple, outputSize : int):
model = Sequential()
model.add(Conv2D(32, (3, 3), activation='relu', padding = 'same', input_shape=inputShape))
model.add(Conv2D(32, (3, 3), activation='relu', padding = 'same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same'))
model.add(Conv2D(64, (3, 3), activation='relu', padding = 'same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(128, (3, 3), activation='relu', padding = 'same'))
model.add(Conv2D(128, (3, 3), activation='relu', padding = 'same'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(1028, activation='relu'))
model.add(Dense(1028, activation='relu'))
model.add(Dense(128, activation='relu'))
model.add(Dense(outputSize, activation='linear'))
return model
|
the-stack_0_23301 | import pytest
from rest_framework import status
from rest_framework.reverse import reverse
from api.compliance.tests.factories import ComplianceSiteCaseFactory, ComplianceVisitCaseFactory
from api.organisations.tests.factories import SiteFactory
from api.staticdata.statuses.enums import CaseStatusEnum
from api.staticdata.statuses.libraries.get_case_status import get_case_status_by_status
from test_helpers.clients import DataTestClient
from api.users.models import UserOrganisationRelationship
class ComplianceExporterViewTests(DataTestClient):
def compare_compliance_case_in_list(self, data, case, site):
self.assertEqual(data["site_name"], str(site.name))
self.assertEqual(data["address"]["address_line_1"], site.address.address_line_1)
self.assertEqual(data["address"]["address_line_2"], site.address.address_line_2)
self.assertEqual(data["address"]["city"], site.address.city)
self.assertEqual(data["address"]["region"], site.address.region)
self.assertEqual(data["address"]["postcode"], site.address.postcode)
self.assertEqual(data["address"]["country"]["id"], site.address.country.id)
def test_get_exporter_compliance_case_list_1(self):
comp_case_1 = ComplianceSiteCaseFactory(
organisation=self.organisation,
site=self.organisation.primary_site,
status=get_case_status_by_status(CaseStatusEnum.OPEN),
)
site_2 = SiteFactory(organisation=self.organisation)
comp_case_2 = ComplianceSiteCaseFactory(
organisation=self.organisation, site=site_2, status=get_case_status_by_status(CaseStatusEnum.OPEN),
)
site_3 = SiteFactory(organisation=self.organisation)
comp_case_3 = ComplianceSiteCaseFactory(
organisation=self.organisation, site=site_3, status=get_case_status_by_status(CaseStatusEnum.OPEN),
)
self.exporter_user.set_role(self.organisation, self.exporter_super_user_role)
url = reverse("compliance:exporter_site_list")
response = self.client.get(url, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()["results"]
self.assertEqual(len(response_data), 3)
comp_cases = [comp_case_1, comp_case_2, comp_case_3]
comp_case_ids = [str(comp_case.id) for comp_case in comp_cases]
response_data_ids = [data["id"] for data in response_data]
self.assertEqual(set(comp_case_ids), set(response_data_ids))
comp_case_1_response_data = response_data[response_data_ids.index(str(comp_case_1.id))]
comp_case_2_response_data = response_data[response_data_ids.index(str(comp_case_2.id))]
comp_case_3_response_data = response_data[response_data_ids.index(str(comp_case_3.id))]
self.compare_compliance_case_in_list(comp_case_1_response_data, comp_case_1, self.organisation.primary_site)
self.compare_compliance_case_in_list(comp_case_2_response_data, comp_case_2, site_2)
self.compare_compliance_case_in_list(comp_case_3_response_data, comp_case_3, site_3)
@pytest.mark.xfail(reason="Failing randomly, marking as fail temporarily as it is not applicable for SIEL licences")
def test_get_exporter_compliance_case_list_2(self):
user_org_relationship = UserOrganisationRelationship.objects.get(user=self.exporter_user)
comp_case_1 = ComplianceSiteCaseFactory(
organisation=self.organisation,
site=self.organisation.primary_site,
status=get_case_status_by_status(CaseStatusEnum.OPEN),
)
self.organisation.primary_site.users.add(user_org_relationship)
site_2 = SiteFactory(organisation=self.organisation)
site_2.users.add(user_org_relationship)
comp_case_2 = ComplianceSiteCaseFactory(
organisation=self.organisation, site=site_2, status=get_case_status_by_status(CaseStatusEnum.OPEN),
)
site_3 = SiteFactory(organisation=self.organisation)
comp_case_3 = ComplianceSiteCaseFactory(
organisation=self.organisation, site=site_3, status=get_case_status_by_status(CaseStatusEnum.OPEN),
)
url = reverse("compliance:exporter_site_list")
response = self.client.get(url, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()["results"]
self.assertEqual(len(response_data), 2)
self.compare_compliance_case_in_list(response_data[0], comp_case_1, self.organisation.primary_site)
self.compare_compliance_case_in_list(response_data[1], comp_case_2, site_2)
def test_exporter_site_details(self):
self.exporter_user.set_role(self.organisation, self.exporter_super_user_role)
comp_case_1 = ComplianceSiteCaseFactory(
organisation=self.organisation,
site=self.organisation.primary_site,
status=get_case_status_by_status(CaseStatusEnum.OPEN),
)
url = reverse("compliance:exporter_site_detail", kwargs={"pk": comp_case_1.id})
response = self.client.get(url, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.assertEqual(response_data["id"], str(comp_case_1.id))
self.assertEqual(response_data["reference_code"], comp_case_1.reference_code)
self.assertEqual(response_data["site_name"], comp_case_1.site.name)
self.assertEqual(response_data["address"]["address_line_1"], comp_case_1.site.address.address_line_1)
self.assertEqual(response_data["address"]["address_line_2"], comp_case_1.site.address.address_line_2)
self.assertEqual(response_data["address"]["city"], comp_case_1.site.address.city)
self.assertEqual(response_data["address"]["region"], comp_case_1.site.address.region)
self.assertEqual(response_data["address"]["postcode"], comp_case_1.site.address.postcode)
self.assertEqual(response_data["address"]["country"]["id"], comp_case_1.site.address.country.id)
self.assertEqual(response_data["is_primary_site"], True)
def compare_compliance_visit_list_details(self, data, case):
self.assertEqual(data["id"], str(case.id))
self.assertEqual(data["reference_code"], case.reference_code)
self.assertEqual(data["visit_date"], case.visit_date.strftime("%Y-%m-%d"))
self.assertEqual(data["exporter_user_notification_count"], 0)
if case.case_officer:
self.assertEqual(data["case_officer_first_name"], case.case_officer.first_name)
self.assertEqual(data["case_officer_last_name"], case.case_officer.last_name)
else:
self.assertEqual(data["case_officer_first_name"], None)
self.assertEqual(data["case_officer_last_name"], None)
def test_exporter_get_compliance_visits(self):
self.exporter_user.set_role(self.organisation, self.exporter_super_user_role)
comp_visit_1 = ComplianceVisitCaseFactory(
organisation=self.organisation, status=get_case_status_by_status(CaseStatusEnum.OPEN)
)
comp_site_case = comp_visit_1.site_case
comp_visit_2 = ComplianceVisitCaseFactory(
organisation=self.organisation,
status=get_case_status_by_status(CaseStatusEnum.OPEN),
site_case=comp_site_case,
)
comp_visit_2.case_officer = self.gov_user
comp_visit_2.save()
url = reverse("compliance:exporter_visit_case_list", kwargs={"pk": comp_site_case.id})
response = self.client.get(url, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()["results"]
self.assertEqual(len(response_data), 2)
self.compare_compliance_visit_list_details(response_data[0], comp_visit_1)
self.compare_compliance_visit_list_details(response_data[1], comp_visit_2)
def test_exporter_get_visit_details(self):
self.exporter_user.set_role(self.organisation, self.exporter_super_user_role)
comp_visit_1 = ComplianceVisitCaseFactory(
organisation=self.organisation, status=get_case_status_by_status(CaseStatusEnum.OPEN)
)
comp_visit_1.case_officer = self.gov_user
comp_visit_1.save()
url = reverse("compliance:exporter_visit_case_detail", kwargs={"pk": comp_visit_1.id})
response = self.client.get(url, **self.exporter_headers)
self.assertEqual(response.status_code, status.HTTP_200_OK)
response_data = response.json()
self.assertEqual(response_data["id"], str(comp_visit_1.id))
self.assertEqual(response_data["reference_code"], comp_visit_1.reference_code)
self.assertEqual(response_data["visit_date"], comp_visit_1.visit_date.strftime("%Y-%m-%d"))
self.assertEqual(response_data["case_officer_first_name"], comp_visit_1.case_officer.first_name)
self.assertEqual(response_data["case_officer_last_name"], comp_visit_1.case_officer.last_name)
|
the-stack_0_23302 | import typing
def cumprod(mod: int, a: typing.List[int]) -> typing.List[int]:
a = a.copy()
for i in range(len(a) - 1):
a[i + 1] = a[i + 1] * a[i] % mod
return a
def factorial(mod: int, n: int) -> typing.List[int]:
fact = list(range(n))
fact[0] = 1
return cumprod(mod, fact)
def factorial_inverse(p: int, n: int) -> typing.List[int]:
ifact = list(range(1, n + 1))
ifact[-1] = pow(factorial(p, n)[-1], p - 2, p)
return cumprod(p, ifact[::-1])[::-1]
def make_choose(p: int, n: int) -> typing.Callable[[int, int], int]:
fact = factorial(p, n)
ifact = factorial_inverse(p, n)
def choose(n: int, k: int) -> int:
nonlocal fact, ifact
if k < 0 or n < k: return 0
return fact[n] * ifact[n - k] % p * ifact[k] % p
return choose
def main() -> typing.NoReturn:
n, m = map(int, input().split())
# inclusion exclusion principle
MOD = 10 ** 9 + 7
choose = make_choose(MOD, 1 << 20)
fact = factorial(MOD, 1 << 20)
def p(n: int, k: int) -> int:
return choose(n, k) * fact[k] % MOD
s = 0
sign = 1
for k in range(n + 1):
s += choose(m, k) * p(n, k) % MOD * (p(m - k, n - k) ** 2 % MOD) % MOD * sign
s %= MOD
sign *= -1
print(s)
main() |
the-stack_0_23303 | # coding: utf-8
# Copyright (c) 2016, 2021, Oracle and/or its affiliates. All rights reserved.
# This software is dual-licensed to you under the Universal Permissive License (UPL) 1.0 as shown at https://oss.oracle.com/licenses/upl or Apache License 2.0 as shown at http://www.apache.org/licenses/LICENSE-2.0. You may choose either license.
from .typed_object import TypedObject
from oci.util import formatted_flat_dict, NONE_SENTINEL, value_allowed_none_or_none_sentinel # noqa: F401
from oci.decorators import init_model_state_from_kwargs
@init_model_state_from_kwargs
class InputField(TypedObject):
"""
The input field for an operator.
"""
def __init__(self, **kwargs):
"""
Initializes a new InputField object with values from keyword arguments. The default value of the :py:attr:`~oci.data_integration.models.InputField.model_type` attribute
of this class is ``INPUT_FIELD`` and it should not be changed.
The following keyword arguments are supported (corresponding to the getters/setters of this class):
:param model_type:
The value to assign to the model_type property of this InputField.
Allowed values for this property are: "SHAPE", "INPUT_PORT", "SHAPE_FIELD", "INPUT_FIELD", "DERIVED_FIELD", "MACRO_FIELD", "OUTPUT_FIELD", "DYNAMIC_PROXY_FIELD", "OUTPUT_PORT", "DYNAMIC_INPUT_FIELD", "PROXY_FIELD", "PARAMETER"
:type model_type: str
:param key:
The value to assign to the key property of this InputField.
:type key: str
:param model_version:
The value to assign to the model_version property of this InputField.
:type model_version: str
:param parent_ref:
The value to assign to the parent_ref property of this InputField.
:type parent_ref: oci.data_integration.models.ParentReference
:param config_values:
The value to assign to the config_values property of this InputField.
:type config_values: oci.data_integration.models.ConfigValues
:param object_status:
The value to assign to the object_status property of this InputField.
:type object_status: int
:param name:
The value to assign to the name property of this InputField.
:type name: str
:param description:
The value to assign to the description property of this InputField.
:type description: str
:param type:
The value to assign to the type property of this InputField.
:type type: oci.data_integration.models.BaseType
:param labels:
The value to assign to the labels property of this InputField.
:type labels: list[str]
"""
self.swagger_types = {
'model_type': 'str',
'key': 'str',
'model_version': 'str',
'parent_ref': 'ParentReference',
'config_values': 'ConfigValues',
'object_status': 'int',
'name': 'str',
'description': 'str',
'type': 'BaseType',
'labels': 'list[str]'
}
self.attribute_map = {
'model_type': 'modelType',
'key': 'key',
'model_version': 'modelVersion',
'parent_ref': 'parentRef',
'config_values': 'configValues',
'object_status': 'objectStatus',
'name': 'name',
'description': 'description',
'type': 'type',
'labels': 'labels'
}
self._model_type = None
self._key = None
self._model_version = None
self._parent_ref = None
self._config_values = None
self._object_status = None
self._name = None
self._description = None
self._type = None
self._labels = None
self._model_type = 'INPUT_FIELD'
@property
def type(self):
"""
Gets the type of this InputField.
:return: The type of this InputField.
:rtype: oci.data_integration.models.BaseType
"""
return self._type
@type.setter
def type(self, type):
"""
Sets the type of this InputField.
:param type: The type of this InputField.
:type: oci.data_integration.models.BaseType
"""
self._type = type
@property
def labels(self):
"""
Gets the labels of this InputField.
Labels are keywords or labels that you can add to data assets, dataflows and so on. You can define your own labels and use them to categorize content.
:return: The labels of this InputField.
:rtype: list[str]
"""
return self._labels
@labels.setter
def labels(self, labels):
"""
Sets the labels of this InputField.
Labels are keywords or labels that you can add to data assets, dataflows and so on. You can define your own labels and use them to categorize content.
:param labels: The labels of this InputField.
:type: list[str]
"""
self._labels = labels
def __repr__(self):
return formatted_flat_dict(self)
def __eq__(self, other):
if other is None:
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
return not self == other
|
the-stack_0_23304 | """distutils.command.build_ext
Implements the Distutils 'build_ext' command, for building extension
modules (currently limited to C extensions, should accommodate C++
extensions ASAP)."""
import contextlib
import os
import re
import sys
from distutils.core import Command
from distutils.errors import *
from distutils.sysconfig import customize_compiler, get_python_version
from distutils.sysconfig import get_config_h_filename
from distutils.dep_util import newer_group
from distutils.extension import Extension
from distutils.util import get_platform
from distutils import log
from . import py37compat
from site import USER_BASE
# An extension name is just a dot-separated list of Python NAMEs (ie.
# the same as a fully-qualified module name).
extension_name_re = re.compile(r'^[a-zA-Z_][a-zA-Z_0-9]*(\.[a-zA-Z_][a-zA-Z_0-9]*)*$')
def show_compilers():
from distutils.ccompiler import show_compilers
show_compilers()
class build_ext(Command):
description = "build C/C++ extensions (compile/link to build directory)"
# XXX thoughts on how to deal with complex command-line options like
# these, i.e. how to make it so fancy_getopt can suck them off the
# command line and make it look like setup.py defined the appropriate
# lists of tuples of what-have-you.
# - each command needs a callback to process its command-line options
# - Command.__init__() needs access to its share of the whole
# command line (must ultimately come from
# Distribution.parse_command_line())
# - it then calls the current command class' option-parsing
# callback to deal with weird options like -D, which have to
# parse the option text and churn out some custom data
# structure
# - that data structure (in this case, a list of 2-tuples)
# will then be present in the command object by the time
# we get to finalize_options() (i.e. the constructor
# takes care of both command-line and client options
# in between initialize_options() and finalize_options())
sep_by = " (separated by '%s')" % os.pathsep
user_options = [
('build-lib=', 'b', "directory for compiled extension modules"),
('build-temp=', 't', "directory for temporary files (build by-products)"),
(
'plat-name=',
'p',
"platform name to cross-compile for, if supported "
"(default: %s)" % get_platform(),
),
(
'inplace',
'i',
"ignore build-lib and put compiled extensions into the source "
+ "directory alongside your pure Python modules",
),
(
'include-dirs=',
'I',
"list of directories to search for header files" + sep_by,
),
('define=', 'D', "C preprocessor macros to define"),
('undef=', 'U', "C preprocessor macros to undefine"),
('libraries=', 'l', "external C libraries to link with"),
(
'library-dirs=',
'L',
"directories to search for external C libraries" + sep_by,
),
('rpath=', 'R', "directories to search for shared C libraries at runtime"),
('link-objects=', 'O', "extra explicit link objects to include in the link"),
('debug', 'g', "compile/link with debugging information"),
('force', 'f', "forcibly build everything (ignore file timestamps)"),
('compiler=', 'c', "specify the compiler type"),
('parallel=', 'j', "number of parallel build jobs"),
('swig-cpp', None, "make SWIG create C++ files (default is C)"),
('swig-opts=', None, "list of SWIG command line options"),
('swig=', None, "path to the SWIG executable"),
('user', None, "add user include, library and rpath"),
]
boolean_options = ['inplace', 'debug', 'force', 'swig-cpp', 'user']
help_options = [
('help-compiler', None, "list available compilers", show_compilers),
]
def initialize_options(self):
self.extensions = None
self.build_lib = None
self.plat_name = None
self.build_temp = None
self.inplace = 0
self.package = None
self.include_dirs = None
self.define = None
self.undef = None
self.libraries = None
self.library_dirs = None
self.rpath = None
self.link_objects = None
self.debug = None
self.force = None
self.compiler = None
self.swig = None
self.swig_cpp = None
self.swig_opts = None
self.user = None
self.parallel = None
def finalize_options(self):
from distutils import sysconfig
self.set_undefined_options(
'build',
('build_lib', 'build_lib'),
('build_temp', 'build_temp'),
('compiler', 'compiler'),
('debug', 'debug'),
('force', 'force'),
('parallel', 'parallel'),
('plat_name', 'plat_name'),
)
if self.package is None:
self.package = self.distribution.ext_package
self.extensions = self.distribution.ext_modules
# Make sure Python's include directories (for Python.h, pyconfig.h,
# etc.) are in the include search path.
py_include = sysconfig.get_python_inc()
plat_py_include = sysconfig.get_python_inc(plat_specific=1)
if self.include_dirs is None:
self.include_dirs = self.distribution.include_dirs or []
if isinstance(self.include_dirs, str):
self.include_dirs = self.include_dirs.split(os.pathsep)
# If in a virtualenv, add its include directory
# Issue 16116
if sys.exec_prefix != sys.base_exec_prefix:
self.include_dirs.append(os.path.join(sys.exec_prefix, 'include'))
# Put the Python "system" include dir at the end, so that
# any local include dirs take precedence.
self.include_dirs.extend(py_include.split(os.path.pathsep))
if plat_py_include != py_include:
self.include_dirs.extend(plat_py_include.split(os.path.pathsep))
self.ensure_string_list('libraries')
self.ensure_string_list('link_objects')
# Life is easier if we're not forever checking for None, so
# simplify these options to empty lists if unset
if self.libraries is None:
self.libraries = []
if self.library_dirs is None:
self.library_dirs = []
elif isinstance(self.library_dirs, str):
self.library_dirs = self.library_dirs.split(os.pathsep)
if self.rpath is None:
self.rpath = []
elif isinstance(self.rpath, str):
self.rpath = self.rpath.split(os.pathsep)
# for extensions under windows use different directories
# for Release and Debug builds.
# also Python's library directory must be appended to library_dirs
if os.name == 'nt':
# the 'libs' directory is for binary installs - we assume that
# must be the *native* platform. But we don't really support
# cross-compiling via a binary install anyway, so we let it go.
self.library_dirs.append(os.path.join(sys.exec_prefix, 'libs'))
if sys.base_exec_prefix != sys.prefix: # Issue 16116
self.library_dirs.append(os.path.join(sys.base_exec_prefix, 'libs'))
if self.debug:
self.build_temp = os.path.join(self.build_temp, "Debug")
else:
self.build_temp = os.path.join(self.build_temp, "Release")
# Append the source distribution include and library directories,
# this allows distutils on windows to work in the source tree
self.include_dirs.append(os.path.dirname(get_config_h_filename()))
self.library_dirs.append(sys.base_exec_prefix)
# Use the .lib files for the correct architecture
if self.plat_name == 'win32':
suffix = 'win32'
else:
# win-amd64
suffix = self.plat_name[4:]
new_lib = os.path.join(sys.exec_prefix, 'PCbuild')
if suffix:
new_lib = os.path.join(new_lib, suffix)
self.library_dirs.append(new_lib)
# For extensions under Cygwin, Python's library directory must be
# appended to library_dirs
if sys.platform[:6] == 'cygwin':
if not sysconfig.python_build:
# building third party extensions
self.library_dirs.append(
os.path.join(
sys.prefix, "lib", "python" + get_python_version(), "config"
)
)
else:
# building python standard extensions
self.library_dirs.append('.')
# For building extensions with a shared Python library,
# Python's library directory must be appended to library_dirs
# See Issues: #1600860, #4366
if sysconfig.get_config_var('Py_ENABLE_SHARED'):
if not sysconfig.python_build:
# building third party extensions
self.library_dirs.append(sysconfig.get_config_var('LIBDIR'))
else:
# building python standard extensions
self.library_dirs.append('.')
# The argument parsing will result in self.define being a string, but
# it has to be a list of 2-tuples. All the preprocessor symbols
# specified by the 'define' option will be set to '1'. Multiple
# symbols can be separated with commas.
if self.define:
defines = self.define.split(',')
self.define = [(symbol, '1') for symbol in defines]
# The option for macros to undefine is also a string from the
# option parsing, but has to be a list. Multiple symbols can also
# be separated with commas here.
if self.undef:
self.undef = self.undef.split(',')
if self.swig_opts is None:
self.swig_opts = []
else:
self.swig_opts = self.swig_opts.split(' ')
# Finally add the user include and library directories if requested
if self.user:
user_include = os.path.join(USER_BASE, "include")
user_lib = os.path.join(USER_BASE, "lib")
if os.path.isdir(user_include):
self.include_dirs.append(user_include)
if os.path.isdir(user_lib):
self.library_dirs.append(user_lib)
self.rpath.append(user_lib)
if isinstance(self.parallel, str):
try:
self.parallel = int(self.parallel)
except ValueError:
raise DistutilsOptionError("parallel should be an integer")
def run(self):
from distutils.ccompiler import new_compiler
# 'self.extensions', as supplied by setup.py, is a list of
# Extension instances. See the documentation for Extension (in
# distutils.extension) for details.
#
# For backwards compatibility with Distutils 0.8.2 and earlier, we
# also allow the 'extensions' list to be a list of tuples:
# (ext_name, build_info)
# where build_info is a dictionary containing everything that
# Extension instances do except the name, with a few things being
# differently named. We convert these 2-tuples to Extension
# instances as needed.
if not self.extensions:
return
# If we were asked to build any C/C++ libraries, make sure that the
# directory where we put them is in the library search path for
# linking extensions.
if self.distribution.has_c_libraries():
build_clib = self.get_finalized_command('build_clib')
self.libraries.extend(build_clib.get_library_names() or [])
self.library_dirs.append(build_clib.build_clib)
# Setup the CCompiler object that we'll use to do all the
# compiling and linking
self.compiler = new_compiler(
compiler=self.compiler,
verbose=self.verbose,
dry_run=self.dry_run,
force=self.force,
)
customize_compiler(self.compiler)
# If we are cross-compiling, init the compiler now (if we are not
# cross-compiling, init would not hurt, but people may rely on
# late initialization of compiler even if they shouldn't...)
if os.name == 'nt' and self.plat_name != get_platform():
self.compiler.initialize(self.plat_name)
# And make sure that any compile/link-related options (which might
# come from the command-line or from the setup script) are set in
# that CCompiler object -- that way, they automatically apply to
# all compiling and linking done here.
if self.include_dirs is not None:
self.compiler.set_include_dirs(self.include_dirs)
if self.define is not None:
# 'define' option is a list of (name,value) tuples
for (name, value) in self.define:
self.compiler.define_macro(name, value)
if self.undef is not None:
for macro in self.undef:
self.compiler.undefine_macro(macro)
if self.libraries is not None:
self.compiler.set_libraries(self.libraries)
if self.library_dirs is not None:
self.compiler.set_library_dirs(self.library_dirs)
if self.rpath is not None:
self.compiler.set_runtime_library_dirs(self.rpath)
if self.link_objects is not None:
self.compiler.set_link_objects(self.link_objects)
# Now actually compile and link everything.
self.build_extensions()
def check_extensions_list(self, extensions):
"""Ensure that the list of extensions (presumably provided as a
command option 'extensions') is valid, i.e. it is a list of
Extension objects. We also support the old-style list of 2-tuples,
where the tuples are (ext_name, build_info), which are converted to
Extension instances here.
Raise DistutilsSetupError if the structure is invalid anywhere;
just returns otherwise.
"""
if not isinstance(extensions, list):
raise DistutilsSetupError(
"'ext_modules' option must be a list of Extension instances"
)
for i, ext in enumerate(extensions):
if isinstance(ext, Extension):
continue # OK! (assume type-checking done
# by Extension constructor)
if not isinstance(ext, tuple) or len(ext) != 2:
raise DistutilsSetupError(
"each element of 'ext_modules' option must be an "
"Extension instance or 2-tuple"
)
ext_name, build_info = ext
log.warn(
"old-style (ext_name, build_info) tuple found in "
"ext_modules for extension '%s' "
"-- please convert to Extension instance",
ext_name,
)
if not (isinstance(ext_name, str) and extension_name_re.match(ext_name)):
raise DistutilsSetupError(
"first element of each tuple in 'ext_modules' "
"must be the extension name (a string)"
)
if not isinstance(build_info, dict):
raise DistutilsSetupError(
"second element of each tuple in 'ext_modules' "
"must be a dictionary (build info)"
)
# OK, the (ext_name, build_info) dict is type-safe: convert it
# to an Extension instance.
ext = Extension(ext_name, build_info['sources'])
# Easy stuff: one-to-one mapping from dict elements to
# instance attributes.
for key in (
'include_dirs',
'library_dirs',
'libraries',
'extra_objects',
'extra_compile_args',
'extra_link_args',
):
val = build_info.get(key)
if val is not None:
setattr(ext, key, val)
# Medium-easy stuff: same syntax/semantics, different names.
ext.runtime_library_dirs = build_info.get('rpath')
if 'def_file' in build_info:
log.warn("'def_file' element of build info dict " "no longer supported")
# Non-trivial stuff: 'macros' split into 'define_macros'
# and 'undef_macros'.
macros = build_info.get('macros')
if macros:
ext.define_macros = []
ext.undef_macros = []
for macro in macros:
if not (isinstance(macro, tuple) and len(macro) in (1, 2)):
raise DistutilsSetupError(
"'macros' element of build info dict "
"must be 1- or 2-tuple"
)
if len(macro) == 1:
ext.undef_macros.append(macro[0])
elif len(macro) == 2:
ext.define_macros.append(macro)
extensions[i] = ext
def get_source_files(self):
self.check_extensions_list(self.extensions)
filenames = []
# Wouldn't it be neat if we knew the names of header files too...
for ext in self.extensions:
filenames.extend(ext.sources)
return filenames
def get_outputs(self):
# Sanity check the 'extensions' list -- can't assume this is being
# done in the same run as a 'build_extensions()' call (in fact, we
# can probably assume that it *isn't*!).
self.check_extensions_list(self.extensions)
# And build the list of output (built) filenames. Note that this
# ignores the 'inplace' flag, and assumes everything goes in the
# "build" tree.
outputs = []
for ext in self.extensions:
outputs.append(self.get_ext_fullpath(ext.name))
return outputs
def build_extensions(self):
# First, sanity-check the 'extensions' list
self.check_extensions_list(self.extensions)
if self.parallel:
self._build_extensions_parallel()
else:
self._build_extensions_serial()
def _build_extensions_parallel(self):
workers = self.parallel
if self.parallel is True:
workers = os.cpu_count() # may return None
try:
from concurrent.futures import ThreadPoolExecutor
except ImportError:
workers = None
if workers is None:
self._build_extensions_serial()
return
with ThreadPoolExecutor(max_workers=workers) as executor:
futures = [
executor.submit(self.build_extension, ext) for ext in self.extensions
]
for ext, fut in zip(self.extensions, futures):
with self._filter_build_errors(ext):
fut.result()
def _build_extensions_serial(self):
for ext in self.extensions:
with self._filter_build_errors(ext):
self.build_extension(ext)
@contextlib.contextmanager
def _filter_build_errors(self, ext):
try:
yield
except (CCompilerError, DistutilsError, CompileError) as e:
if not ext.optional:
raise
self.warn('building extension "%s" failed: %s' % (ext.name, e))
def build_extension(self, ext):
sources = ext.sources
if sources is None or not isinstance(sources, (list, tuple)):
raise DistutilsSetupError(
"in 'ext_modules' option (extension '%s'), "
"'sources' must be present and must be "
"a list of source filenames" % ext.name
)
# sort to make the resulting .so file build reproducible
sources = sorted(sources)
ext_path = self.get_ext_fullpath(ext.name)
depends = sources + ext.depends
if not (self.force or newer_group(depends, ext_path, 'newer')):
log.debug("skipping '%s' extension (up-to-date)", ext.name)
return
else:
log.info("building '%s' extension", ext.name)
# First, scan the sources for SWIG definition files (.i), run
# SWIG on 'em to create .c files, and modify the sources list
# accordingly.
sources = self.swig_sources(sources, ext)
# Next, compile the source code to object files.
# XXX not honouring 'define_macros' or 'undef_macros' -- the
# CCompiler API needs to change to accommodate this, and I
# want to do one thing at a time!
# Two possible sources for extra compiler arguments:
# - 'extra_compile_args' in Extension object
# - CFLAGS environment variable (not particularly
# elegant, but people seem to expect it and I
# guess it's useful)
# The environment variable should take precedence, and
# any sensible compiler will give precedence to later
# command line args. Hence we combine them in order:
extra_args = ext.extra_compile_args or []
macros = ext.define_macros[:]
for undef in ext.undef_macros:
macros.append((undef,))
objects = self.compiler.compile(
sources,
output_dir=self.build_temp,
macros=macros,
include_dirs=ext.include_dirs,
debug=self.debug,
extra_postargs=extra_args,
depends=ext.depends,
)
# XXX outdated variable, kept here in case third-part code
# needs it.
self._built_objects = objects[:]
# Now link the object files together into a "shared object" --
# of course, first we have to figure out all the other things
# that go into the mix.
if ext.extra_objects:
objects.extend(ext.extra_objects)
extra_args = ext.extra_link_args or []
# Detect target language, if not provided
language = ext.language or self.compiler.detect_language(sources)
self.compiler.link_shared_object(
objects,
ext_path,
libraries=self.get_libraries(ext),
library_dirs=ext.library_dirs,
runtime_library_dirs=ext.runtime_library_dirs,
extra_postargs=extra_args,
export_symbols=self.get_export_symbols(ext),
debug=self.debug,
build_temp=self.build_temp,
target_lang=language,
)
def swig_sources(self, sources, extension):
"""Walk the list of source files in 'sources', looking for SWIG
interface (.i) files. Run SWIG on all that are found, and
return a modified 'sources' list with SWIG source files replaced
by the generated C (or C++) files.
"""
new_sources = []
swig_sources = []
swig_targets = {}
# XXX this drops generated C/C++ files into the source tree, which
# is fine for developers who want to distribute the generated
# source -- but there should be an option to put SWIG output in
# the temp dir.
if self.swig_cpp:
log.warn("--swig-cpp is deprecated - use --swig-opts=-c++")
if (
self.swig_cpp
or ('-c++' in self.swig_opts)
or ('-c++' in extension.swig_opts)
):
target_ext = '.cpp'
else:
target_ext = '.c'
for source in sources:
(base, ext) = os.path.splitext(source)
if ext == ".i": # SWIG interface file
new_sources.append(base + '_wrap' + target_ext)
swig_sources.append(source)
swig_targets[source] = new_sources[-1]
else:
new_sources.append(source)
if not swig_sources:
return new_sources
swig = self.swig or self.find_swig()
swig_cmd = [swig, "-python"]
swig_cmd.extend(self.swig_opts)
if self.swig_cpp:
swig_cmd.append("-c++")
# Do not override commandline arguments
if not self.swig_opts:
for o in extension.swig_opts:
swig_cmd.append(o)
for source in swig_sources:
target = swig_targets[source]
log.info("swigging %s to %s", source, target)
self.spawn(swig_cmd + ["-o", target, source])
return new_sources
def find_swig(self):
"""Return the name of the SWIG executable. On Unix, this is
just "swig" -- it should be in the PATH. Tries a bit harder on
Windows.
"""
if os.name == "posix":
return "swig"
elif os.name == "nt":
# Look for SWIG in its standard installation directory on
# Windows (or so I presume!). If we find it there, great;
# if not, act like Unix and assume it's in the PATH.
for vers in ("1.3", "1.2", "1.1"):
fn = os.path.join("c:\\swig%s" % vers, "swig.exe")
if os.path.isfile(fn):
return fn
else:
return "swig.exe"
else:
raise DistutilsPlatformError(
"I don't know how to find (much less run) SWIG "
"on platform '%s'" % os.name
)
# -- Name generators -----------------------------------------------
# (extension names, filenames, whatever)
def get_ext_fullpath(self, ext_name):
"""Returns the path of the filename for a given extension.
The file is located in `build_lib` or directly in the package
(inplace option).
"""
fullname = self.get_ext_fullname(ext_name)
modpath = fullname.split('.')
filename = self.get_ext_filename(modpath[-1])
if not self.inplace:
# no further work needed
# returning :
# build_dir/package/path/filename
filename = os.path.join(*modpath[:-1] + [filename])
return os.path.join(self.build_lib, filename)
# the inplace option requires to find the package directory
# using the build_py command for that
package = '.'.join(modpath[0:-1])
build_py = self.get_finalized_command('build_py')
package_dir = os.path.abspath(build_py.get_package_dir(package))
# returning
# package_dir/filename
return os.path.join(package_dir, filename)
def get_ext_fullname(self, ext_name):
"""Returns the fullname of a given extension name.
Adds the `package.` prefix"""
if self.package is None:
return ext_name
else:
return self.package + '.' + ext_name
def get_ext_filename(self, ext_name):
r"""Convert the name of an extension (eg. "foo.bar") into the name
of the file from which it will be loaded (eg. "foo/bar.so", or
"foo\bar.pyd").
"""
from distutils.sysconfig import get_config_var
ext_path = ext_name.split('.')
ext_suffix = get_config_var('EXT_SUFFIX')
return os.path.join(*ext_path) + ext_suffix
def get_export_symbols(self, ext):
"""Return the list of symbols that a shared extension has to
export. This either uses 'ext.export_symbols' or, if it's not
provided, "PyInit_" + module_name. Only relevant on Windows, where
the .pyd file (DLL) must export the module "PyInit_" function.
"""
name = ext.name.split('.')[-1]
try:
# Unicode module name support as defined in PEP-489
# https://www.python.org/dev/peps/pep-0489/#export-hook-name
name.encode('ascii')
except UnicodeEncodeError:
suffix = 'U_' + name.encode('punycode').replace(b'-', b'_').decode('ascii')
else:
suffix = "_" + name
initfunc_name = "PyInit" + suffix
if initfunc_name not in ext.export_symbols:
ext.export_symbols.append(initfunc_name)
return ext.export_symbols
def get_libraries(self, ext):
"""Return the list of libraries to link against when building a
shared extension. On most platforms, this is just 'ext.libraries';
on Windows, we add the Python library (eg. python20.dll).
"""
# The python library is always needed on Windows. For MSVC, this
# is redundant, since the library is mentioned in a pragma in
# pyconfig.h that MSVC groks. The other Windows compilers all seem
# to need it mentioned explicitly, though, so that's what we do.
# Append '_d' to the python import library on debug builds.
if sys.platform == "win32":
from distutils._msvccompiler import MSVCCompiler
if not isinstance(self.compiler, MSVCCompiler):
template = "python%d%d"
if self.debug:
template = template + '_d'
pythonlib = template % (
sys.hexversion >> 24,
(sys.hexversion >> 16) & 0xFF,
)
# don't extend ext.libraries, it may be shared with other
# extensions, it is a reference to the original list
return ext.libraries + [pythonlib]
else:
# On Android only the main executable and LD_PRELOADs are considered
# to be RTLD_GLOBAL, all the dependencies of the main executable
# remain RTLD_LOCAL and so the shared libraries must be linked with
# libpython when python is built with a shared python library (issue
# bpo-21536).
# On Cygwin (and if required, other POSIX-like platforms based on
# Windows like MinGW) it is simply necessary that all symbols in
# shared libraries are resolved at link time.
from distutils.sysconfig import get_config_var
link_libpython = False
if get_config_var('Py_ENABLE_SHARED'):
# A native build on an Android device or on Cygwin
if hasattr(sys, 'getandroidapilevel'):
link_libpython = True
elif sys.platform == 'cygwin':
link_libpython = True
elif '_PYTHON_HOST_PLATFORM' in os.environ:
# We are cross-compiling for one of the relevant platforms
if get_config_var('ANDROID_API_LEVEL') != 0:
link_libpython = True
elif get_config_var('MACHDEP') == 'cygwin':
link_libpython = True
if link_libpython:
ldversion = get_config_var('LDVERSION')
return ext.libraries + ['python' + ldversion]
return ext.libraries + py37compat.pythonlib()
|
the-stack_0_23305 | """
Using set operations to check who went to the party
"""
# Present guests using Intersection of sets
friends = {'alice', 'lucy', 'braiden', 'kim'}
guests = {'jane', 'lucy', 'paula', 'alice', 'mary'}
present = friends.intersection(guests) # | (friends & guests)
print(present)
# Absent friends using difference of sets
absent = friends.difference(guests)
print(absent)
# Constructing a dictionary from lists
names = ['paula', 'rose', 'mary']
last_seen = [12, 3, 5]
lists_to_dict = {names[i]: last_seen[i] for i in range(len(names))}
print(lists_to_dict)
# Constructing a dictionary from lists using the function zip()
# Zip gives a list of tuples [(paula,12),(rose,3),...]
# Dict takes the list and puts it in a dictionary matching the tuples
lists_to_dict = dict(zip(names, last_seen))
print(lists_to_dict)
for key, value in lists_to_dict.items():
print(f'{key} : {value}')
|
the-stack_0_23309 | # ------------------------------------
# Copyright (c) Microsoft Corporation.
# Licensed under the MIT License.
# ------------------------------------
from unittest import mock
from urllib.parse import urlparse
from azure.core.credentials import AccessToken
from azure.identity import CredentialUnavailableError
from azure.identity._constants import EnvironmentVariables
from azure.identity._internal.user_agent import USER_AGENT
from azure.identity.aio import VSCodeCredential
from azure.core.pipeline.policies import SansIOHTTPPolicy
import pytest
from helpers import build_aad_response, mock_response, Request
from helpers_async import async_validating_transport, AsyncMockTransport, wrap_in_future
@pytest.mark.asyncio
async def test_no_scopes():
"""The credential should raise ValueError when get_token is called with no scopes"""
credential = VSCodeCredential()
with pytest.raises(ValueError):
await credential.get_token()
@pytest.mark.asyncio
async def test_policies_configurable():
policy = mock.Mock(spec_set=SansIOHTTPPolicy, on_request=mock.Mock())
async def send(*_, **__):
return mock_response(json_payload=build_aad_response(access_token="**"))
with mock.patch(VSCodeCredential.__module__ + ".get_credentials", return_value="VALUE"):
credential = VSCodeCredential(policies=[policy], transport=mock.Mock(send=send))
await credential.get_token("scope")
assert policy.on_request.called
@pytest.mark.asyncio
async def test_user_agent():
transport = async_validating_transport(
requests=[Request(required_headers={"User-Agent": USER_AGENT})],
responses=[mock_response(json_payload=build_aad_response(access_token="**"))],
)
with mock.patch(VSCodeCredential.__module__ + ".get_credentials", return_value="VALUE"):
credential = VSCodeCredential(transport=transport)
await credential.get_token("scope")
@pytest.mark.asyncio
@pytest.mark.parametrize("authority", ("localhost", "https://localhost"))
async def test_request_url(authority):
"""the credential should accept an authority, with or without scheme, as an argument or environment variable"""
tenant_id = "expected_tenant"
access_token = "***"
parsed_authority = urlparse(authority)
expected_netloc = parsed_authority.netloc or authority # "localhost" parses to netloc "", path "localhost"
expected_refresh_token = "refresh-token"
async def mock_send(request, **kwargs):
actual = urlparse(request.url)
assert actual.scheme == "https"
assert actual.netloc == expected_netloc
assert actual.path.startswith("/" + tenant_id)
assert request.body["refresh_token"] == expected_refresh_token
return mock_response(json_payload={"token_type": "Bearer", "expires_in": 42, "access_token": access_token})
credential = VSCodeCredential(tenant_id=tenant_id, transport=mock.Mock(send=mock_send), authority=authority)
with mock.patch(VSCodeCredential.__module__ + ".get_credentials", return_value=expected_refresh_token):
token = await credential.get_token("scope")
assert token.token == access_token
# authority can be configured via environment variable
with mock.patch.dict("os.environ", {EnvironmentVariables.AZURE_AUTHORITY_HOST: authority}, clear=True):
credential = VSCodeCredential(tenant_id=tenant_id, transport=mock.Mock(send=mock_send))
with mock.patch(VSCodeCredential.__module__ + ".get_credentials", return_value=expected_refresh_token):
await credential.get_token("scope")
assert token.token == access_token
@pytest.mark.asyncio
async def test_credential_unavailable_error():
with mock.patch(VSCodeCredential.__module__ + ".get_credentials", return_value=None):
credential = VSCodeCredential()
with pytest.raises(CredentialUnavailableError):
token = await credential.get_token("scope")
@pytest.mark.asyncio
async def test_redeem_token():
expected_token = AccessToken("token", 42)
expected_value = "value"
mock_client = mock.Mock(spec=object)
token_by_refresh_token = mock.Mock(return_value=expected_token)
mock_client.obtain_token_by_refresh_token = wrap_in_future(token_by_refresh_token)
mock_client.get_cached_access_token = mock.Mock(return_value=None)
with mock.patch(VSCodeCredential.__module__ + ".get_credentials", return_value=expected_value):
credential = VSCodeCredential(_client=mock_client)
token = await credential.get_token("scope")
assert token is expected_token
token_by_refresh_token.assert_called_with(("scope",), expected_value)
@pytest.mark.asyncio
async def test_cache_refresh_token():
expected_token = AccessToken("token", 42)
mock_client = mock.Mock(spec=object)
token_by_refresh_token = mock.Mock(return_value=expected_token)
mock_client.obtain_token_by_refresh_token = wrap_in_future(token_by_refresh_token)
mock_client.get_cached_access_token = mock.Mock(return_value=None)
mock_get_credentials = mock.Mock(return_value="VALUE")
with mock.patch(VSCodeCredential.__module__ + ".get_credentials", mock_get_credentials):
credential = VSCodeCredential(_client=mock_client)
token = await credential.get_token("scope")
assert mock_get_credentials.call_count == 1
token = await credential.get_token("scope")
assert mock_get_credentials.call_count == 1
@pytest.mark.asyncio
async def test_no_obtain_token_if_cached():
expected_token = AccessToken("token", 42)
mock_client = mock.Mock(should_refresh=lambda _: False)
token_by_refresh_token = mock.Mock(return_value=expected_token)
mock_client.obtain_token_by_refresh_token = wrap_in_future(token_by_refresh_token)
mock_client.get_cached_access_token = mock.Mock(return_value="VALUE")
with mock.patch(VSCodeCredential.__module__ + ".get_credentials", return_value="VALUE"):
credential = VSCodeCredential(_client=mock_client)
token = await credential.get_token("scope")
assert token_by_refresh_token.call_count == 0
|
the-stack_0_23310 | # Copyright 2019 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""This script is used to synthesize generated parts of this library."""
import os
import synthtool as s
import synthtool.gcp as gcp
import logging
logging.basicConfig(level=logging.DEBUG)
gapic = gcp.GAPICGenerator()
common = gcp.CommonTemplates()
for version in ['V1', 'V1Beta1']:
lower_version = version.lower()
library = gapic.php_library(
service='scheduler',
version=lower_version,
config_path=f'/google/cloud/scheduler/artman_cloudscheduler_{lower_version}.yaml',
artman_output_name=f'google-cloud-cloudscheduler-{lower_version}')
# copy all src
s.move(library / f'src/{version}')
# copy proto files to src also
s.move(library / f'proto/src/Google/Cloud/Scheduler', f'src/')
s.move(library / f'tests/')
# copy GPBMetadata file to metadata
s.move(library / f'proto/src/GPBMetadata/Google/Cloud/Scheduler', f'metadata/')
# document and utilize apiEndpoint instead of serviceAddress
s.replace(
"**/Gapic/*GapicClient.php",
r"'serviceAddress' =>",
r"'apiEndpoint' =>")
s.replace(
"**/Gapic/*GapicClient.php",
r"@type string \$serviceAddress\n\s+\*\s+The address",
r"""@type string $serviceAddress
* **Deprecated**. This option will be removed in a future major release. Please
* utilize the `$apiEndpoint` option instead.
* @type string $apiEndpoint
* The address""")
s.replace(
"**/Gapic/*GapicClient.php",
r"\$transportConfig, and any \$serviceAddress",
r"$transportConfig, and any `$apiEndpoint`")
# prevent proto messages from being marked final
s.replace(
"src/V*/**/*.php",
r"final class",
r"class")
# Replace "Unwrapped" with "Value" for method names.
s.replace(
"src/V*/**/*.php",
r"public function ([s|g]\w{3,})Unwrapped",
r"public function \1Value"
)
# fix year
s.replace(
'src/**/**/*.php',
r'Copyright \d{4}',
r'Copyright 2019')
s.replace(
'tests/**/**/*Test.php',
r'Copyright \d{4}',
r'Copyright 2019')
# V1 is GA, so remove @experimental tags
s.replace(
'src/V1/**/*Client.php',
r'^(\s+\*\n)?\s+\*\s@experimental\n',
'')
# Change the wording for the deprecation warning.
s.replace(
'src/*/*_*.php',
r'will be removed in the next major release',
'will be removed in a future release')
|
the-stack_0_23313 | def analyse(path):
"""
Analyse bowtie output files
Parameters
----------
path : path to the directory where input files are located. Input files should be in .txt format.
Returns
-------
CSV files containing reads information including mapped and unmapped read segments, genomic covearge of reads, NCBI-IDs and organisms.
"""
os.chdir(path)
os.mkdir('output')
new_path=os.path.join(path,'output')
nic = pd.read_csv("ncbiid.csv",header=None)
names = pd.read_csv("names.csv", sep=" ", header=None)
for infile in os.listdir(path):
if infile.endswith("txt"):
f_name=infile.split('.')[0]
file = pd.read_csv(infile, sep="\t", header=None)
file = file.assign(**{"ncbi_id": nic,"names":names})
file.rename(columns = {0:'Organism',1:'Genome_size', 2:f'{f_name}_mapped_read_segments', 3:f'{f_name}_unmapped_read_segments'}, inplace=True)
# keep rows where mapped reads is not zero
new_file = file.loc[file[f'{f_name}_mapped_read_segments'] > 0, :]
# add read length and genome coverage columns
# new_file["read_length"] = [161 for i in range(len(new_file['Organism']))]
new_file.insert(4, "read_length", [161 for i in range(len(new_file.index))], True)
# calculate genome coverage and then add to the data frame
cal = (new_file[f'{f_name}_mapped_read_segments'] * new_file['read_length']) / new_file['Genome_size']
# new_file["genome_coverage"] = cal #assigning like this creates a settingWithCopyWarning
new_file.insert(5, "genome_coverage", cal, True)
# select rows with > 10 genome coverage and save to a new file
final_file = new_file.loc[new_file["genome_coverage"] > 10, :]
final_file.to_csv(f'{new_path}/{infile}.csv',index=False)
return new_path
def combine_data(new_path):
"""
Merge two or more dataframes by common keys
Parameters
----------
path : path to the directory where input .csv files are located
Returns
-------
concatenated .csv files by common organisms
"""
from functools import reduce
os.chdir(new_path)
os.mkdir('final_output')
final_path=os.path.join(new_path,'final_output')
names = []
for infile in os.listdir(new_path):
if infile.endswith("csv"):
f_name=infile.split('.')[0]
file = pd.read_csv(infile)
new_file = file[['Organism',f'{f_name}_mapped_read_segments']]
names.append(new_file)
namesf = reduce(lambda left,right: pd.merge(left,right,on=['Organism'],
how='outer'), names) # outer: use union of keys from both frames
namesf = namesf.fillna(0)
namesf = namesf.reindex(sorted(namesf.columns), axis=1) #sorting columns by their name
namesf = namesf.set_index('Organism')
namesf.to_csv(f"{final_path}/file.csv", index=True)
if __name__ == "__main__":
import argparse
import os
import pandas as pd
parser = argparse.ArgumentParser(description = "This script is for analysing bowtie output files")
parser.add_argument("--path", type=str, help="Path of the directory where input files are stored")
args = parser.parse_args()
path = args.path
new_path = analyse(path)
combine_data(new_path)
from datetime import datetime
print(f"\n Analysis completed at {datetime.now().strftime('%H:%M:%S')} \n")
|
the-stack_0_23314 | # This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Builds qubit operators for all Hamiltonian terms in the protein folding problem."""
from typing import Union
import numpy as np
from qiskit.opflow import OperatorBase, PauliOp, PauliSumOp
from .bead_contacts.contact_map import ContactMap
from .bead_distances.distance_map import DistanceMap
from .exceptions.invalid_side_chain_exception import (
InvalidSideChainException,
)
from .exceptions.invalid_size_exception import InvalidSizeException
from .penalty_parameters import PenaltyParameters
from .peptide.pauli_ops_builder import _build_full_identity
from .qubit_utils.qubit_fixing import _fix_qubits
from .peptide.beads.base_bead import BaseBead
from .peptide.peptide import Peptide
class QubitOpBuilder:
"""Builds qubit operators for all Hamiltonian terms in the protein folding problem."""
def __init__(
self, peptide: Peptide, pair_energies: np.ndarray, penalty_parameters: PenaltyParameters
):
"""Builds qubit operators for all Hamiltonian terms in the protein folding problem.
Args:
peptide: A Peptide object that includes all information about a protein.
pair_energies: Numpy array of pair energies for amino acids.
penalty_parameters: A PenaltyParameters object storing the values of all penalty
parameters.
"""
self._peptide = peptide
self._pair_energies = pair_energies
self._penalty_parameters = penalty_parameters
self._contact_map = ContactMap(peptide)
self._distance_map = DistanceMap(peptide)
_side_chain_hot_vector = self._peptide.get_side_chain_hot_vector()
self._has_side_chain_second_bead = (
_side_chain_hot_vector[1] if len(_side_chain_hot_vector) > 1 else False
)
def _build_qubit_op(self) -> Union[PauliSumOp, PauliOp]:
"""
Builds a qubit operator for a total Hamiltonian for a protein folding problem. It includes
8 terms responsible for chirality, geometry and nearest neighbors interactions.
Returns:
A total Hamiltonian for the protein folding problem.
Raises:
InvalidSizeException: if chains of invalid/incompatible sizes provided.
InvalidSideChainException: if side chains on forbidden indices provided.
"""
side_chain = self._peptide.get_side_chain_hot_vector()
main_chain_len = len(self._peptide.get_main_chain)
if len(side_chain) != main_chain_len:
raise InvalidSizeException("side_chain_lens size not equal main_chain_len")
if side_chain[0] == 1 or side_chain[-1] == 1:
raise InvalidSideChainException(
"First and last main beads are not allowed to have a side chain. Nonempty "
"residue provided for an invalid side chain."
)
num_qubits = 4 * pow(main_chain_len - 1, 2)
full_id = _build_full_identity(num_qubits)
h_chiral = self._create_h_chiral()
if h_chiral != 0:
h_chiral = full_id ^ h_chiral
h_back = self._create_h_back()
if h_back != 0:
h_back = full_id ^ h_back
h_scsc = self._create_h_scsc() if self._penalty_parameters.penalty_1 else 0
h_bbbb = self._create_h_bbbb() if self._penalty_parameters.penalty_1 else 0
h_short = self._create_h_short()
if h_short != 0:
h_short = full_id ^ h_short
h_bbsc, h_scbb = (
self._create_h_bbsc_and_h_scbb() if self._penalty_parameters.penalty_1 else (0, 0)
)
h_total = h_chiral + h_back + h_short + h_bbbb + h_bbsc + h_scbb + h_scsc
return h_total.reduce()
def _create_turn_operators(self, lower_bead: BaseBead, upper_bead: BaseBead) -> OperatorBase:
"""
Creates a qubit operator for consecutive turns.
Args:
lower_bead: A bead with a smaller index in the chain.
upper_bead: A bead with a bigger index in the chain.
Returns:
A qubit operator for consecutive turns.
"""
(
lower_bead_indic_0,
lower_bead_indic_1,
lower_bead_indic_2,
lower_bead_indic_3,
) = lower_bead.indicator_functions
(
upper_bead_indic_0,
upper_bead_indic_1,
upper_bead_indic_2,
upper_bead_indic_3,
) = upper_bead.indicator_functions
turns_operator = _fix_qubits(
lower_bead_indic_0 @ upper_bead_indic_0
+ lower_bead_indic_1 @ upper_bead_indic_1
+ lower_bead_indic_2 @ upper_bead_indic_2
+ lower_bead_indic_3 @ upper_bead_indic_3,
self._has_side_chain_second_bead,
)
return turns_operator
def _create_h_back(self) -> Union[PauliSumOp, PauliOp]:
"""
Creates Hamiltonian that imposes the geometrical constraint wherein consecutive turns along
the same axis are penalized by a factor, penalty_back. Note, that the first two turns are
omitted (fixed in optimization) due to symmetry degeneracy.
Returns:
Contribution to Hamiltonian in symbolic notation that penalizes consecutive turns
along the same axis.
"""
main_chain = self._peptide.get_main_chain
penalty_back = self._penalty_parameters.penalty_back
h_back = 0
for i in range(len(main_chain) - 2):
h_back += penalty_back * self._create_turn_operators(main_chain[i], main_chain[i + 1])
h_back = _fix_qubits(h_back, self._has_side_chain_second_bead)
return h_back
def _create_h_chiral(self) -> Union[PauliSumOp, PauliOp]:
"""
Creates a penalty/constrain term to the total Hamiltonian that imposes that all the position
of all side chain beads impose the right chirality. Note that the position of the side chain
bead at a location (i) is determined by the turn indicators at i - 1 and i. In the absence
of side chains, this function returns a value of 0.
Returns:
Hamiltonian term that imposes the right chirality.
"""
main_chain = self._peptide.get_main_chain
main_chain_len = len(main_chain)
h_chiral = 0
# 2 stands for 2 qubits per turn, another 2 stands for main and side qubit register
full_id = _build_full_identity(2 * 2 * (main_chain_len - 1))
for i in range(1, len(main_chain) + 1):
upper_main_bead = main_chain[i - 1]
if upper_main_bead.side_chain is None:
continue
upper_side_bead = upper_main_bead.side_chain[0]
lower_main_bead = main_chain[i - 2]
(
lower_main_bead_indic_0,
lower_main_bead_indic_1,
lower_main_bead_indic_2,
lower_main_bead_indic_3,
) = lower_main_bead.indicator_functions
(
upper_main_bead_indic_0,
upper_main_bead_indic_1,
upper_main_bead_indic_2,
upper_main_bead_indic_3,
) = upper_main_bead.indicator_functions
(
upper_side_bead_indic_0,
upper_side_bead_indic_1,
upper_side_bead_indic_2,
upper_side_bead_indic_3,
) = upper_side_bead.indicator_functions
turn_coeff = int((1 - (-1) ** i) / 2)
h_chiral += self._build_chiral_term(
full_id,
lower_main_bead_indic_1,
lower_main_bead_indic_2,
lower_main_bead_indic_3,
turn_coeff,
upper_main_bead_indic_1,
upper_main_bead_indic_2,
upper_main_bead_indic_3,
upper_side_bead_indic_0,
)
h_chiral += self._build_chiral_term(
full_id,
lower_main_bead_indic_0,
lower_main_bead_indic_3,
lower_main_bead_indic_2,
turn_coeff,
upper_main_bead_indic_0,
upper_main_bead_indic_3,
upper_main_bead_indic_2,
upper_side_bead_indic_1,
)
h_chiral += self._build_chiral_term(
full_id,
lower_main_bead_indic_0,
lower_main_bead_indic_1,
lower_main_bead_indic_3,
turn_coeff,
upper_main_bead_indic_0,
upper_main_bead_indic_1,
upper_main_bead_indic_3,
upper_side_bead_indic_2,
)
h_chiral += self._build_chiral_term(
full_id,
lower_main_bead_indic_0,
lower_main_bead_indic_2,
lower_main_bead_indic_1,
turn_coeff,
upper_main_bead_indic_0,
upper_main_bead_indic_2,
upper_main_bead_indic_1,
upper_side_bead_indic_3,
)
h_chiral = _fix_qubits(h_chiral, self._has_side_chain_second_bead)
return h_chiral
def _build_chiral_term(
self,
full_id,
lower_main_bead_indic_b,
lower_main_bead_indic_c,
lower_main_bead_indic_d,
turn_coeff,
upper_main_bead_indic_b,
upper_main_bead_indic_c,
upper_main_bead_indic_d,
upper_side_bead_indic_a,
):
return (
self._penalty_parameters.penalty_chiral
* (full_id - upper_side_bead_indic_a)
@ (
(1 - turn_coeff)
* (
lower_main_bead_indic_b @ upper_main_bead_indic_c
+ lower_main_bead_indic_c @ upper_main_bead_indic_d
+ lower_main_bead_indic_d @ upper_main_bead_indic_b
)
+ turn_coeff
* (
lower_main_bead_indic_c @ upper_main_bead_indic_b
+ lower_main_bead_indic_d @ upper_main_bead_indic_c
+ lower_main_bead_indic_b @ upper_main_bead_indic_d
)
)
)
def _create_h_bbbb(self) -> Union[PauliSumOp, PauliOp]:
"""
Creates Hamiltonian term corresponding to a 1st neighbor interaction between
main/backbone (BB) beads.
Returns:
Hamiltonian term corresponding to a 1st neighbor interaction between main/backbone (
BB) beads.
"""
penalty_1 = self._penalty_parameters.penalty_1
h_bbbb = 0
main_chain_len = len(self._peptide.get_main_chain)
for i in range(1, main_chain_len - 3):
for j in range(i + 5, main_chain_len + 1):
if (j - i) % 2 == 0:
continue
h_bbbb += (self._contact_map.lower_main_upper_main[i][j]) ^ (
self._distance_map._first_neighbor(
self._peptide, i, 0, j, 0, penalty_1, self._pair_energies
)
)
try:
h_bbbb += (self._contact_map.lower_main_upper_main[i][j]) ^ (
self._distance_map._second_neighbor(
self._peptide, i - 1, 0, j, 0, penalty_1, self._pair_energies
)
)
except (IndexError, KeyError):
pass
try:
h_bbbb += (self._contact_map.lower_main_upper_main[i][j]) ^ (
self._distance_map._second_neighbor(
self._peptide, i + 1, 0, j, 0, penalty_1, self._pair_energies
)
)
except (IndexError, KeyError):
pass
try:
h_bbbb += (self._contact_map.lower_main_upper_main[i][j]) ^ (
self._distance_map._second_neighbor(
self._peptide, i, 0, j - 1, 0, penalty_1, self._pair_energies
)
)
except (IndexError, KeyError):
pass
try:
h_bbbb += (self._contact_map.lower_main_upper_main[i][j]) ^ (
self._distance_map._second_neighbor(
self._peptide, i, 0, j + 1, 0, penalty_1, self._pair_energies
)
)
except (IndexError, KeyError):
pass
h_bbbb = _fix_qubits(h_bbbb, self._has_side_chain_second_bead)
return h_bbbb
def _create_h_bbsc_and_h_scbb(self) -> Union[PauliSumOp, PauliOp]:
"""
Creates Hamiltonian term corresponding to 1st neighbor interaction between
main/backbone (BB) and side chain (SC) beads. In the absence
of side chains, this function returns a value of 0.
Returns:
Tuple of Hamiltonian terms consisting of backbone and side chain interactions.
"""
penalty_1 = self._penalty_parameters.penalty_1
h_bbsc = 0
h_scbb = 0
main_chain_len = len(self._peptide.get_main_chain)
side_chain = self._peptide.get_side_chain_hot_vector()
for i in range(1, main_chain_len - 3):
for j in range(i + 4, main_chain_len + 1):
if (j - i) % 2 == 1:
continue
if side_chain[j - 1] == 1:
h_bbsc += self._contact_map.lower_main_upper_side[i][j] ^ (
self._distance_map._first_neighbor(
self._peptide, i, 0, j, 1, penalty_1, self._pair_energies
)
+ self._distance_map._second_neighbor(
self._peptide, i, 0, j, 0, penalty_1, self._pair_energies
)
)
try:
h_bbsc += self._contact_map.lower_side_upper_side[i][
j
] ^ self._distance_map._first_neighbor(
self._peptide, i, 1, j, 1, penalty_1, self._pair_energies
)
except (IndexError, KeyError, TypeError):
pass
try:
h_bbsc += self._contact_map.lower_main_upper_side[i][
j
] ^ self._distance_map._second_neighbor(
self._peptide, i + 1, 0, j, 1, penalty_1, self._pair_energies
)
except (IndexError, KeyError, TypeError):
pass
try:
h_bbsc += self._contact_map.lower_main_upper_side[i][
j
] ^ self._distance_map._second_neighbor(
self._peptide, i - 1, 0, j, 1, penalty_1, self._pair_energies
)
except (IndexError, KeyError, TypeError):
pass
if side_chain[i - 1] == 1:
h_scbb += self._contact_map.lower_side_upper_main[i][j] ^ (
self._distance_map._first_neighbor(
self._peptide, i, 1, j, 0, penalty_1, self._pair_energies
)
+ self._distance_map._second_neighbor(
self._peptide, i, 0, j, 0, penalty_1, self._pair_energies
)
)
try:
h_scbb += self._contact_map.lower_side_upper_main[i][
j
] ^ self._distance_map._second_neighbor(
self._peptide, i, 1, j, 1, penalty_1, self._pair_energies
)
except (IndexError, KeyError, TypeError):
pass
try:
h_scbb += self._contact_map.lower_side_upper_main[i][
j
] ^ self._distance_map._second_neighbor(
self._peptide, i, 1, j + 1, 0, penalty_1, self._pair_energies
)
except (IndexError, KeyError, TypeError):
pass
try:
h_scbb += self._contact_map.lower_side_upper_main[i][
j
] ^ self._distance_map._second_neighbor(
self._peptide, i, 1, j - 1, 0, penalty_1, self._pair_energies
)
except (IndexError, KeyError, TypeError):
pass
h_bbsc = _fix_qubits(h_bbsc, self._has_side_chain_second_bead)
h_scbb = _fix_qubits(h_scbb, self._has_side_chain_second_bead)
return h_bbsc, h_scbb
def _create_h_scsc(self) -> Union[PauliSumOp, PauliOp]:
"""
Creates Hamiltonian term corresponding to 1st neighbor interaction between
side chain (SC) beads. In the absence of side chains, this function
returns a value of 0.
Returns:
Hamiltonian term consisting of side chain pairwise interactions
"""
penalty_1 = self._penalty_parameters.penalty_1
h_scsc = 0
main_chain_len = len(self._peptide.get_main_chain)
side_chain = self._peptide.get_side_chain_hot_vector()
for i in range(1, main_chain_len - 3):
for j in range(i + 5, main_chain_len + 1):
if (j - i) % 2 == 0:
continue
if side_chain[i - 1] == 0 or side_chain[j - 1] == 0:
continue
h_scsc += self._contact_map.lower_side_upper_side[i][j] ^ (
self._distance_map._first_neighbor(
self._peptide, i, 1, j, 1, penalty_1, self._pair_energies
)
+ self._distance_map._second_neighbor(
self._peptide, i, 1, j, 0, penalty_1, self._pair_energies
)
+ self._distance_map._second_neighbor(
self._peptide, i, 0, j, 1, penalty_1, self._pair_energies
)
)
return _fix_qubits(h_scsc, self._has_side_chain_second_bead)
def _create_h_short(self) -> Union[PauliSumOp, PauliOp]:
"""
Creates Hamiltonian constituting interactions between beads that are no more than
4 beads apart. If no side chains are present, this function returns 0.
Returns:
Contribution to energetic Hamiltonian for interactions between beads that are no more
than 4 beads apart.
"""
main_chain_len = len(self._peptide.get_main_chain)
side_chain = self._peptide.get_side_chain_hot_vector()
h_short = 0
for i in range(1, main_chain_len - 2):
# checks interactions between beads no more than 4 beads apart
if side_chain[i - 1] == 1 and side_chain[i + 2] == 1:
op1 = self._create_turn_operators(
self._peptide.get_main_chain[i + 1],
self._peptide.get_main_chain[i - 1].side_chain[0],
)
op2 = self._create_turn_operators(
self._peptide.get_main_chain[i - 1],
self._peptide.get_main_chain[i + 2].side_chain[0],
)
coeff = float(
self._pair_energies[i][1][i + 3][1]
+ 0.1
* (self._pair_energies[i][1][i + 3][0] + self._pair_energies[i][0][i + 3][1])
)
composed = op1 @ op2
h_short += (coeff * composed).reduce()
h_short = _fix_qubits(h_short, self._has_side_chain_second_bead)
return h_short
|
the-stack_0_23316 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Jun 23 21:52:43 2021
@author: q
Goal : Create a SGD* Perceptron algorithm,
* Stochastic Gradient Descent
"""
# =============================================================================
# imports
# =============================================================================
# dataset generator
from sklearn.datasets import make_blobs
# train test split
from sklearn.model_selection import train_test_split
# data hadling
import pandas as pd
import numpy as np
# data visualization
import matplotlib.pyplot as plt
import seaborn as sns
# =============================================================================
# program test
# =============================================================================
if __name__ == '__main__':
X, y = make_blobs(n_samples = 500,
centers = 2,
random_state = 0,
cluster_std = 0.8)
# use binary values as -1, 1
y = pd.Series(y).replace(0, -1).values
# visualize data
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.coolwarm)
plt.show()
# train test split
X_train, X_test, y_train, y_test = train_test_split(X, y,
train_size = 0.5,
random_state=0)
# perceptron sgd
# initalize Weight Vectors to random values, same size as features
W = np.random.uniform(low = -1.0, high = 1.0, size = len(X_train[0]))
# initalize b to random value
b = np.random.uniform(low = -1.0, high = 1.0, size = None)
T = 100
learning_rate = 0.01
# max iteration for convergence
for t in range(T):
# iterate the data loop
L = len(X_train)
"""Stochastic Gradient Descent"""
rows = [x for x in range(L)]
stochastic = [np.random.choice(rows, size = None, replace = True) for x in range(L)]
for i in stochastic:
# select the data point and label
X_, y_ = np.array([X_train[i][0], X_train[i][1]]), y_train[i]
# evaluate the decision boundary with signed distance
if np.sign(y_ * (np.dot(W.T, X_) + b)) < 0:
# update decision boundary
W = W + learning_rate * (X_ * y_)
b = b + y_
error = 0
L = len(X_test)
for i in range(L):
# select the data point and label
X_, y_ = np.array([X_test[i][0], X_test[i][1]]), y_test[i]
# evaluate the decision boundary with signed distance
if np.sign( y_ * (np.dot(W.T, X_) + b)) < 0:
# count errors
error += 1
print('Accuracy Score : ', 1 - (error/ L))
# calc and plot decision boundary
min_x1, max_x1 = X_train[:, 0].min(), X_train[:, 0].max()
n = 40
x1 = [x for x in np.linspace(start = min_x1, stop = max_x1, num = n)]
# calc decision boundary
slope = -W[0] / W[1]
intercept = -b / W[1]
decision_boundary = [slope * x + intercept for x in x1]
# visualize decision boundary
plt.plot(x1, decision_boundary, c = 'black', linestyle = 'dashed')
plt.scatter(X_test[:, 0], X_test[:, 1], c=y_test, cmap=plt.cm.coolwarm)
plt.show()
|
the-stack_0_23317 | from credmon.CredentialMonitors.AbstractCredentialMonitor import AbstractCredentialMonitor
from credmon.utils import atomic_rename, api_endpoints
try:
from requests_oauthlib import OAuth2Session
except ImportError:
OAuth2Session = None
import os
import time
import json
import glob
import tempfile
import re
try:
import htcondor
except ImportError:
htcondor = None
class OAuthCredmon(AbstractCredentialMonitor):
use_token_metadata = True
def __init__(self, *args, **kw):
super(OAuthCredmon, self).__init__(*args, **kw)
def should_renew(self, username, token_name):
access_token_path = os.path.join(self.cred_dir, username, token_name + '.use')
metadata_path = os.path.join(self.cred_dir, username, token_name + '.meta')
# check if access token exists
if not os.path.exists(access_token_path):
return True
try:
with open(access_token_path, 'r') as f:
access_token = json.load(f)
except IOError as ie:
self.log.warning("Could not open access token %s: %s", access_token_path, str(ie))
return True
except ValueError as ve:
self.log.warning("The access token file at %s is invalid; could not parse as JSON: %s", access_token_path, str(ve))
return True
# load metadata to check if access token uses a refresh token
if self.use_token_metadata:
try:
with open(metadata_path, 'r') as f:
token_metadata = json.load(f)
except IOError as ie:
self.log.warning("Could not find metadata file %s: %s", metadata_path, ie)
except ValueError as ve:
self.log.warning("The metadata file at %s is invalid; could not parse as JSON: %s", metadata_path, str(ve))
else:
if 'use_refresh_token' in token_metadata:
if token_metadata['use_refresh_token'] == False:
return False
lifetime_fraction = api_endpoints.token_lifetime_fraction(token_metadata['token_url'])
else:
lifetime_fraction = 0.5
# compute token refresh time
create_time = os.path.getctime(access_token_path)
refresh_time = create_time + (float(access_token['expires_in']) * lifetime_fraction)
# check if token is past its refresh time
if time.time() > refresh_time:
return True
return False
def should_delete(self, username, token_name):
mark_path = os.path.join(self.cred_dir, username, token_name + '.mark')
# check if mark file exists
if os.path.exists(mark_path):
try:
mtime = os.stat(mark_path).st_mtime
except OSError as e:
self.log.error('Could not stat %s', mark_path)
return False
# if mark file is older than 24 hours (or CREDMON_OAUTH_TOKEN_LIFETIME if defined), delete tokens
self.log.debug('Mark file is %d seconds old', int(time.time() - mtime))
if htcondor is not None and 'CREDMON_OAUTH_TOKEN_LIFETIME' in htcondor.param:
if time.time() - mtime > int(htcondor.param['CREDMON_OAUTH_TOKEN_LIFETIME']):
return True
elif time.time() - mtime > 24*60*60:
return True
return False
def refresh_access_token(self, username, token_name):
if OAuth2Session is None:
raise ImportError("No module named OAuth2Session")
# load the refresh token
refresh_token_path = os.path.join(self.cred_dir, username, token_name + '.top')
try:
with open(refresh_token_path, 'r') as f:
refresh_token = json.load(f)
except IOError as ie:
self.log.error("Could not open refresh token %s: %s", refresh_token_path, str(ie))
return False
except ValueError as ve:
self.log.error("The format of the refresh token file %s is invalid; could not parse as JSON: %s", refresh_token_path, str(ve))
return False
# load metadata
metadata_path = os.path.join(self.cred_dir, username, token_name + '.meta')
try:
with open(metadata_path, 'r') as f:
token_metadata = json.load(f)
except IOError as ie:
self.log.error("Could not open metadata file %s: %s", metadata_path, str(ie))
return False
except ValueError as ve:
self.log.error("The metadata file at %s is invalid; could not parse as JSON: %s", metadata_path, str(ve))
return False
# refresh the token (provides both new refresh and access tokens)
oauth_client = OAuth2Session(token_metadata['client_id'], token = refresh_token)
new_token = oauth_client.refresh_token(token_metadata['token_url'],
client_id = token_metadata['client_id'],
client_secret = token_metadata['client_secret'])
try:
refresh_token = {u'refresh_token': new_token.pop('refresh_token')}
except KeyError:
self.log.error("No %s refresh token returned for %s", token_name, username)
return False
# write tokens to tmp files
(tmp_fd, tmp_refresh_token_path) = tempfile.mkstemp(dir = self.cred_dir)
with os.fdopen(tmp_fd, 'w') as f:
json.dump(refresh_token, f)
(tmp_fd, tmp_access_token_path) = tempfile.mkstemp(dir = self.cred_dir)
with os.fdopen(tmp_fd, 'w') as f:
json.dump(new_token, f)
# atomically move new tokens in place
access_token_path = os.path.join(self.cred_dir, username, token_name + '.use')
try:
atomic_rename(tmp_access_token_path, access_token_path)
atomic_rename(tmp_refresh_token_path, refresh_token_path)
except OSError as e:
self.log.error(e)
return False
else:
return True
def delete_tokens(self, username, token_name):
exts = ['.top', '.use', '.meta', '.mark']
base_path = os.path.join(self.cred_dir, username, token_name)
success = True
for ext in exts:
if os.path.exists(base_path + ext):
try:
os.unlink(base_path + ext)
except OSError as e:
self.log.debug('Could not remove %s: %s', base_path + ext, e.strerror)
success = False
else:
self.log.debug('Could not find %s', base_path + ext)
return success
def check_access_token(self, access_token_path):
(basename, token_filename) = os.path.split(access_token_path)
(cred_dir, username) = os.path.split(basename)
token_name = os.path.splitext(token_filename)[0] # strip .use
# OAuthCredmon only handles OAuth access tokens, which must have metadata files
metadata_path = os.path.join(self.cred_dir, username, token_name + '.meta')
if not os.path.exists(metadata_path):
self.log.debug('Skipping check of %s token files for user %s, no metadata found', token_name, username)
return
if self.should_delete(username, token_name):
self.log.info('%s tokens for user %s are marked for deletion', token_name, username)
success = self.delete_tokens(username, token_name)
if success:
self.log.info('Successfully deleted %s token files for user %s', token_name, username)
else:
self.log.error('Failed to delete all %s token files for user %s', token_name, username)
elif self.should_renew(username, token_name):
self.log.info('Refreshing %s tokens for user %s', token_name, username)
success = self.refresh_access_token(username, token_name)
if success:
self.log.info('Successfully refreshed %s tokens for user %s', token_name, username)
else:
self.log.error('Failed to refresh %s tokens for user %s', token_name, username)
def scan_tokens(self):
# loop over all access tokens in the cred_dir
access_token_files = glob.glob(os.path.join(self.cred_dir, '*', '*.use'))
for access_token_file in access_token_files:
self.check_access_token(access_token_file)
# also cleanup any stale key files
self.cleanup_key_files()
def cleanup_key_files(self):
# key filenames are hashes with str len 64
key_file_re = re.compile(r'^[a-f0-9]{64}$')
# loop over all possible key files in cred_dir
key_files = glob.glob(os.path.join(self.cred_dir, '?'*64))
for key_file in key_files:
if ((not key_file_re.match(os.path.basename(key_file)))
or os.path.isdir(key_file)):
continue
try:
ctime = os.stat(key_file).st_ctime
except OSError as os_error:
self.log.error('Could not stat key file %s: %s', key_file, os_error)
continue
# remove key files if over 12 hours old
if time.time() - ctime > 12*3600:
self.log.info('Removing stale key file %s', os.path.basename(key_file))
try:
os.unlink(key_file)
except OSError as os_error:
self.log.error('Could not remove key file %s: %s', key_file, os_error)
|
the-stack_0_23318 | import cv2
import numpy as np
img = cv2.imread(r"..\lena.jpg")
rows, cols = img.shape[:2]
mapx = np.zeros(img.shape[:2], np.float32)
mapy = np.zeros(img.shape[:2], np.float32)
for i in range(rows):
for j in range(cols):
if 0.25 * cols < i < 0.75 * cols and 0.25 * rows < j < 0.75 * rows:
mapx.itemset((i, j), 2 * (j - cols * 0.25) + 0.5)
mapy.itemset((i, j), 2 * (i - rows * 0.25) + 0.5)
else:
mapx.itemset((i, j), 0)
mapy.itemset((i, j), 0)
rst = cv2.remap(img, mapx, mapy, cv2.INTER_LINEAR)
cv2.imshow("original", img)
cv2.imshow("result", rst)
cv2.waitKey()
cv2.destroyAllWindows()
|
the-stack_0_23321 | # -*- coding: utf-8 -*-
# @Author: YangZhou
# @Date: 2017-06-16 18:34:55
# @Last Modified by: YangZhou
# @Last Modified time: 2017-10-29 18:20:33
from aces.graph import fig, setLegend, pl
import numpy as np
from aces.algorithm.kpoints import filter_along_direction as fad
from aces.io.shengbte import get_qpoints_full as gqf, get_gruneisen, get_omega
from aces.f import binmeanx
markers = ['^', 's', "8"]
colors = "k,r,b,g,purple".split(',')
import matplotlib
matplotlib.rcParams['ytick.minor.width'] = 1.5
matplotlib.rcParams['ytick.minor.size'] = 3
with fig('gruneisen.eps'):
fi, axes = pl.subplots(2, 3, sharex=True, sharey=True, figsize=(10, 6))
for j in range(2):
if j == 0:
vs = "2l1,2lhex,3l1".split(',')
if j == 1:
vs = "4l1,5l1,Silicene".split(',')
for i, v in enumerate(vs):
for s in ['z', 'a']:
ax = axes[j, i]
dir = "%s/0/SHENG" % v
if v == "2l1":
dir = "2l1/0/sheng.1/"
gru = get_gruneisen(dir)
omega = get_omega(dir)
qpoints_full, idx = gqf(dir)
omega_full = omega[idx]
gru_full = gru[idx]
dth = np.pi / 10
if s == "z":
phi = 0.0
else:
phi = np.pi / 2.0
fil = fad(qpoints_full, phi, dth)
q = np.c_[
omega_full[fil].flatten(),
gru_full[fil].flatten()]
q = np.nan_to_num(q)
x, y = binmeanx(q, [0, 20.49], 1.5)
mfc = [colors[i], 'w'][s == 'a']
ls = ['-', '-.'][s == 'a']
ax.plot(x, np.abs(y), ls=ls,
marker=markers[i],
markersize=9,
markeredgecolor=colors[i],
markerfacecolor=mfc,
color=colors[i],
label=v + s)
ax.set_xlim([0, 20.49])
setLegend(ax, fontsize=10)
fi.text(0.5, 0.04, 'Phonon Frequency(THz)', ha='center')
fi.text(0.05, 0.5, 'Gruneisen Parameter', va='center', rotation='vertical')
fi.subplots_adjust(
left=None,
bottom=None,
right=None,
top=None,
wspace=0,
hspace=0)
|
the-stack_0_23322 | """
sentry.rules.conditions.minimum_level
~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
:copyright: (c) 2010-2014 by the Sentry Team, see AUTHORS for more details.
:license: BSD, see LICENSE for more details.
"""
from __future__ import absolute_import
from collections import OrderedDict
from django import forms
from sentry.constants import LOG_LEVELS, LOG_LEVELS_MAP
from sentry.rules.conditions.base import EventCondition
LEVEL_CHOICES = OrderedDict(
[("{0}".format(k), v) for k, v in sorted(LOG_LEVELS.items(), key=lambda x: x[0], reverse=True)]
)
class MatchType(object):
EQUAL = 'eq'
LESS_OR_EQUAL = 'lte'
GREATER_OR_EQUAL = 'gte'
MATCH_CHOICES = OrderedDict(
[
(MatchType.EQUAL, 'equal to'), (MatchType.LESS_OR_EQUAL, 'less than or equal to'),
(MatchType.GREATER_OR_EQUAL, 'greater than or equal to')
]
)
class LevelEventForm(forms.Form):
level = forms.ChoiceField(
choices=LEVEL_CHOICES.items(),
)
match = forms.ChoiceField(
choices=MATCH_CHOICES.items(),
)
class LevelCondition(EventCondition):
form_cls = LevelEventForm
label = 'An event\'s level is {match} {level}'
form_fields = {
'level': {'type': 'choice', 'choices': LEVEL_CHOICES.items()},
'match': {'type': 'choice', 'choices': MATCH_CHOICES.items()}
}
def passes(self, event, state, **kwargs):
desired_level = self.get_option('level')
desired_match = self.get_option('match')
if not (desired_level and desired_match):
return False
desired_level = int(desired_level)
# Fetch the event level from the tags since event.level is
# event.group.level which may have changed
try:
level = LOG_LEVELS_MAP[event.get_tag('level')]
except KeyError:
return False
if desired_match == MatchType.EQUAL:
return level == desired_level
elif desired_match == MatchType.GREATER_OR_EQUAL:
return level >= desired_level
elif desired_match == MatchType.LESS_OR_EQUAL:
return level <= desired_level
return False
def render_label(self):
data = {
'level': LEVEL_CHOICES[self.data['level']],
'match': MATCH_CHOICES[self.data['match']],
}
return self.label.format(**data)
|
the-stack_0_23323 | # -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.db import models, migrations
class Migration(migrations.Migration):
dependencies = [
('cms', '__first__'),
('forms', '0001_initial'),
]
operations = [
migrations.CreateModel(
name='PluginForm',
fields=[
('cmsplugin_ptr', models.OneToOneField(parent_link=True, auto_created=True, primary_key=True, serialize=False, to='cms.CMSPlugin', on_delete=models.CASCADE)),
('form', models.ForeignKey(to='forms.Form', on_delete=models.CASCADE)),
],
options={
'abstract': False,
},
bases=('cms.cmsplugin',),
),
]
|
the-stack_0_23325 | # -*- coding: utf-8 -*-
# Auteur: Jianying LIU
from bs4 import BeautifulSoup
from MPAscraper import MPA_Scrapper
import re
import json
URL_ROOT = 'https://fr.wikipedia.org/wiki/'
def scrapy_langues_info(dico_langue):
langues_info = []
for id_lang, lang in dico_langue.items():
url = URL_ROOT + lang
print(f"Scrapy en cours: {url}")
driver = "None"
scrapper = MPA_Scrapper(url,driver)
html = scrapper.getHTMLText(url)
html_parser = BeautifulSoup(html, 'html.parser')
table = html_parser.find('table', {'class': 'infobox_v2'})
rows = table.select('tr')
for r in rows:
if r.th:
# print(r.th.text.strip())
if r.th.text.strip() == "Pays":
pays = r.td.text.strip()
pays_count = len(pays.split(","))
# langues_info[id_lang]["pays"] = pays
if r.th.text.strip() == "Nombre de locuteurs":
nbre_locuteurs = r.td.text.strip()
nbre_locuteurs = re.sub(r"\[.+\]", "", nbre_locuteurs)
chiffre_loc = re.sub(r"\(.+\)", "", nbre_locuteurs)
chiffre_loc = re.sub(r'millions.*$', '000000', chiffre_loc)
chiffre_loc = re.sub(r"[^0-9]", "", chiffre_loc)
chiffre_loc = re.sub(u"\u00a0", "", chiffre_loc)
chiffre_loc = int(chiffre_loc.strip())
# langues_info[id_lang]["nbre_locuteurs"] = nbre_locuteurs
langues_info.append({"id":id_lang, "url": url, "pays":pays, "pays_count":pays_count, "nbre_locuteurs":nbre_locuteurs, "chiffre": chiffre_loc})
return langues_info
def main():
dico_langue = {'gb':'Ngiemboon', 'ful':'Peul', 'med':'Medumba', 'swh':'Swahili', 'yb':'Yemba',
'so':'Sonink%C3%A9_(langue)', 'bu':'Boulou_(langue)', 'gho':'Ghomala%CA%BC', 'mbo':'Mbo_(langue_du_Cameroun)',
'ma':'Mafa_(langue)', 'dl':'Douala_(langue)', 'bs':'Bassa_(langue_bantoue)', 'fe':'Nufi',
'et':'Eton_(langue_bantoue)', 'mu':'Moussey_(langue)', 'ok':'Oku_(langue)', 'ŋg':'Ngemba_(langue)'}
lang_info = scrapy_langues_info(dico_langue)
json_text = json.dumps(lang_info, indent=4)
with open(f"scrapying_results/langues/lang_info.json","w",encoding="utf8") as f:
f.write(json_text)
if __name__ == "__main__":
main()
|
the-stack_0_23327 | import bz2
import datetime
import json
import logging
import re
from concurrent.futures.thread import ThreadPoolExecutor
from pathlib import Path
import matplotlib.pyplot as plt
import metpy.calc as mpcalc
import numpy as np
import pytz
import requests
import requests.sessions
from flask import Flask, make_response
from flask_json import FlaskJSON
from google.cloud import storage
from metpy.plots import SkewT
from metpy.units import units
from opencensus.trace import config_integration
from requests.adapters import HTTPAdapter
from urllib3 import Retry
from opencensus.ext.stackdriver import trace_exporter as stackdriver_exporter
import opencensus.trace.tracer
import skewt.plot.config as config
app = Flask(__name__)
FlaskJSON(app)
logger = logging.getLogger(__name__)
logging.basicConfig(format='%(asctime)s %(message)s', level=logging.DEBUG)
# Metrics
exporter = stackdriver_exporter.StackdriverExporter()
tracer = opencensus.trace.tracer.Tracer(
exporter=exporter,
sampler=opencensus.trace.tracer.samplers.AlwaysOnSampler()
)
config_integration.trace_integrations(['requests'])
class WeatherModelSoundingMetaData:
def __init__(self, model_time, valid_time):
self.model_time = model_time
self.valid_time = valid_time
class WeatherModelSounding:
def __init__(self, latitude, longitude, p, T, QV, Td, U, V, HHL, metadata):
self.latitude = latitude
self.longitude = longitude
self.p = p
self.T = T
self.QV = QV
self.Td = Td
self.U = U
self.V = V
self.HHL = HHL
self.metadata = metadata
latitude_pretty = str(abs(round(latitude, 2)))
if latitude < 0:
latitude_pretty = latitude_pretty + "S"
else:
latitude_pretty = latitude_pretty + "N"
longitude_pretty = str(abs(round(longitude, 2)))
if longitude > 0:
longitude_pretty = longitude_pretty + "E"
else:
longitude_pretty = longitude_pretty + "W"
self.latitude_pretty = latitude_pretty
self.longitude_pretty = longitude_pretty
class SkewTResult:
def __init__(self, model_time, valid_time, plot_full, plot_detail):
self.model_time = model_time
self.valid_time = valid_time
self.plot_full = plot_full
self.plot_detail = plot_detail
def __json__(self):
return {
"model_time": self.model_time,
"valid_time": self.valid_time,
"plot_full": self.plot_full,
"plot_detail": self.plot_detail
}
def download_bz2(url, target_file, session=requests.sessions.Session()):
r = session.get(url)
r.raise_for_status()
decompressor = bz2.BZ2Decompressor()
with open(target_file, 'wb') as f:
for chunk in r.iter_content(chunk_size=8192):
if chunk:
f.write(decompressor.decompress(chunk))
def download_content():
Path("tmp").mkdir(parents=True, exist_ok=True)
download_bz2(config.content_log_url, "tmp/content.log")
def latest_run(model, valid_time):
with tracer.span(name="download_content"):
download_content()
pattern = re.compile("./%s" \
"/grib" \
"\\/(\d{2})" \
"/t" \
"/icon-eu_europe_regular-lat-lon_model-level_(\d{10})_(\d{3})_1_T.grib2.bz2" % model)
max_t: int = 0
result = None
with tracer.span(name="parse_content"):
for i, line in enumerate(open('tmp/content.log')):
for match in re.finditer(pattern, line):
matches = match.groups()
match_valid_at = datetime.datetime.strptime(matches[1], "%Y%m%d%H")
match_valid_at = pytz.timezone('UTC').localize(match_valid_at)
match_valid_at = match_valid_at + datetime.timedelta(hours=int(matches[2]))
delta_t = abs((match_valid_at - valid_time).total_seconds())
if delta_t <= 30 * 60 and int(matches[1]) > max_t:
result = matches
max_t = int(matches[1])
return result
class AllLevelData:
def __init__(self, data, model_time, valid_time):
self.data = data
self.model_time = model_time
self.valid_time = valid_time
def parameter_all_levels(model, run, parameter, latitude, longitude, level_type="model_level", session=requests.Session()):
with tracer.span(name="load_parameters") as span:
run_hour = run[0]
run_datetime = run[1]
timestep = int(run[2])
logging.info(f"Loading sounding for latitude={latitude} longitude={longitude} with "
f"run_hour={run_hour} run_datetime={run_datetime} timestep={timestep} "
f"level_type={level_type} and parameter={parameter}")
span.add_attribute("parameter", parameter)
span.add_attribute("run_hour", str(run_hour))
span.add_attribute("run_datetime", str(run_datetime))
span.add_attribute("timestep", str(timestep))
levels = np.floor(np.linspace(60, 0, config.level_workers)).astype(int).tolist()
urls = list()
for i in range(0, len(levels) - 1):
base = levels[i]
top = levels[i + 1] + 1
# example URL:
# https://nwp-sounding-mw5zsrftba-ew.a.run.app/48.21/16.37/06/2020030406/4/p
url = f"{config.sounding_api}" \
f"/{latitude}" \
f"/{longitude}" \
f"/{run_hour}" \
f"/{run_datetime}" \
f"/{timestep}" \
f"/{parameter}" \
f"?level_type={level_type}" \
f"&base={base}" \
f"&top={top}"
urls.append(url)
result = AllLevelData(data=np.empty(0), model_time=None, valid_time=None)
with ThreadPoolExecutor(max_workers=config.level_workers) as executor:
responses = list(executor.map(session.get, urls))
for response in responses:
response.raise_for_status()
json_result = json.loads(response.content)
result.data = np.append(result.data, np.array(json_result["data"]))
json_first = json.loads(responses[0].content)
result.model_time = np.datetime64(json_result["model_time"])
result.valid_time = np.datetime64(json_result["valid_time"])
return result
def find_closest_model_level(p, needle):
return np.abs(p.to("hPa") - needle.to("hPa")).argmin()
def full_level_height(HHL, idx):
return (HHL[idx] + HHL[idx + 1]) / 2
def load_weather_model_sounding(latitude, longitude, valid_time):
with tracer.span(name="latest_run"):
model = "icon-eu"
run = latest_run(model, valid_time)
http_session = session()
with ThreadPoolExecutor(max_workers=config.parameter_all_levels_workers) as executor:
p_future = executor.submit(parameter_all_levels, model, run, "p", latitude, longitude, session=http_session)
T_future = executor.submit(parameter_all_levels, model, run, "T", latitude, longitude, session=http_session)
QV_future = executor.submit(parameter_all_levels, model, run, "QV", latitude, longitude, session=http_session)
U_future = executor.submit(parameter_all_levels, model, run, "U", latitude, longitude, session=http_session)
V_future = executor.submit(parameter_all_levels, model, run, "V", latitude, longitude, session=http_session)
HHL_future = executor.submit(parameter_all_levels, model, run, "HHL", latitude, longitude, "time_invariant", session=http_session)
# Pressure Pa
p_raw = p_future.result()
p = p_raw.data
# Temperature K
T = T_future.result().data
# Specific Humidty kg/kg
QV = QV_future.result().data
# Dewpoint K
Td = mpcalc.dewpoint_from_specific_humidity(QV * units("kg/kg"), T * units.K, p * units.Pa)
# Wind m/s
U = U_future.result().data
V = V_future.result().data
# Height above MSL for model level
HHL = HHL_future.result().data
meta_data = WeatherModelSoundingMetaData(p_raw.model_time, p_raw.valid_time)
return WeatherModelSounding(latitude, longitude, p, T, QV, Td, U, V, HHL, meta_data)
def session():
http_session = requests.Session()
retry = Retry(total=config.sounding_api_retries, read=config.sounding_api_retries,
connect=config.sounding_api_retries,
backoff_factor=1.5, status_forcelist=(429, 500, 502, 504),
respect_retry_after_header=True)
http_adapter = HTTPAdapter(max_retries=retry, pool_connections=1000, pool_maxsize=1000)
http_session.mount('http://', http_adapter)
http_session.mount('https://', http_adapter)
return http_session
def plot_skewt_icon(sounding, parcel=None, base=1000, top=100, skew=45):
model_time = np.datetime_as_string(sounding.metadata.model_time, unit='m')
valid_time = np.datetime_as_string(sounding.metadata.valid_time, unit='m')
top_idx = find_closest_model_level(sounding.p * units.Pa, top * units("hPa"))
fig = plt.figure(figsize=(11, 11), constrained_layout=True)
skew = SkewT(fig, rotation=skew)
skew.plot(sounding.p * units.Pa, sounding.T * units.K, 'r')
skew.plot(sounding.p * units.Pa, sounding.Td, 'b')
skew.plot_barbs(sounding.p[:top_idx] * units.Pa, sounding.U[:top_idx] * units.mps,
sounding.V[:top_idx] * units.mps, plot_units=units.knot, alpha=0.6, xloc=1.13, x_clip_radius=0.3)
if parcel == "surface-based":
prof = mpcalc.parcel_profile(sounding.p * units.Pa, sounding.T[0] * units.K, sounding.Td[0]).to('degC')
skew.plot(sounding.p * units.Pa, prof, 'y', linewidth=2)
# Add the relevant special lines
skew.plot_dry_adiabats()
skew.plot_moist_adiabats()
skew.plot_mixing_lines()
skew.plot(sounding.p * units.Pa, np.zeros(len(sounding.p)) * units.degC, "#03d3fc", linewidth=1)
skew.ax.set_ylim(base, top)
plt.title(f"Model run: {model_time}Z", loc='left')
plt.title(f"Valid time: {valid_time}Z", fontweight='bold', loc='right')
plt.xlabel("Temperature [°C]")
plt.ylabel("Pressure [hPa]")
fig.suptitle(f"ICON-EU Model for {sounding.latitude_pretty}, {sounding.longitude_pretty}", fontsize=14)
ax1 = plt.gca()
ax2 = ax1.twinx() # instantiate a second axes that shares the same x-axis
color = '#333333'
ax2.set_ylabel('Geometric Altitude [kft]', color=color) # we already handled the x-label with ax1
ax2_data = (sounding.p * units.Pa).to('hPa')
ax2.plot(np.zeros(len(ax2_data)), ax2_data, color=color, alpha=0.0)
ax2.tick_params(axis='y', labelcolor=color)
ax2.set_yscale('log')
ax2.set_ylim((base, top))
ticks = np.linspace(base, top, num=10)
ideal_ticks = np.geomspace(base, top, 20)
real_tick_idxs = [find_closest_model_level(sounding.p * units.Pa, p_level * units("hPa")) for p_level in
ideal_ticks]
ticks = (sounding.p * units.Pa).to("hPa")[real_tick_idxs]
full_levels = [full_level_height(sounding.HHL, idx) for idx in real_tick_idxs]
tick_labels = np.around((full_levels * units.m).m_as("kft"), decimals=1)
ax2.set_yticks(ticks)
ax2.set_yticklabels(tick_labels)
ax2.minorticks_off()
return fig
@app.route("/<float:latitude>/<float(signed=True):longitude>/<valid_at>")
def skewt(latitude, longitude, valid_at):
with tracer.span(name="skewt") as span:
span.add_attribute("latitude", str(latitude))
span.add_attribute("longitude", str(longitude))
span.add_attribute("valid_at", str(valid_at))
valid_at_parsed = datetime.datetime.strptime(valid_at, "%Y%m%d%H")
valid_at_parsed = pytz.timezone('UTC').localize(valid_at_parsed)
with tracer.span(name="sounding"):
sounding = load_weather_model_sounding(latitude, longitude, valid_at_parsed)
with tracer.span(name="plotting"):
model_time = str(np.datetime_as_string(sounding.metadata.model_time))
valid_time = str(np.datetime_as_string(sounding.metadata.valid_time))
model_time_for_file_name = str(np.datetime_as_string(sounding.metadata.model_time, unit='m')).replace(":", "_")
valid_time_for_file_name = str(np.datetime_as_string(sounding.metadata.valid_time, unit='m')).replace(":", "_")
full_plot = plot_skewt_icon(sounding=sounding, parcel="surface-based")
full_plot_filename = f"plot_{sounding.latitude_pretty}_{sounding.longitude_pretty}_" \
f"{model_time_for_file_name}_{valid_time_for_file_name}_full.png"
full_plot.savefig(full_plot_filename)
detail_plot = plot_skewt_icon(sounding=sounding, parcel="surface-based", base=1000, top=500, skew=15)
detail_plot_filename = f"plot_{sounding.latitude_pretty}_{sounding.longitude_pretty}_" \
f"{model_time_for_file_name}_{valid_time_for_file_name}_detail.png"
detail_plot.savefig(detail_plot_filename)
with tracer.span(name="cloud_upload"):
# Google Cloud Upload
storage_client = storage.Client()
bucket = storage_client.bucket(config.bucket_name)
blob_full = bucket.blob(full_plot_filename)
blob_detail = bucket.blob(detail_plot_filename)
with ThreadPoolExecutor(max_workers=2) as executor:
executor.submit(blob_full.upload_from_filename, full_plot_filename)
executor.submit(blob_detail.upload_from_filename, detail_plot_filename)
result = json.dumps(SkewTResult(model_time, valid_time,
config.bucket_public_url + full_plot_filename,
config.bucket_public_url + detail_plot_filename).__dict__)
response = make_response(result)
response.mimetype = 'application/json'
return response
if __name__ == "__main__":
app.run()
|
the-stack_0_23328 | # Copyright lowRISC contributors.
# Licensed under the Apache License, Version 2.0, see LICENSE for details.
# SPDX-License-Identifier: Apache-2.0
# TODO(drewmacrae) this should be in rules_cc
# pending resolution of https://github.com/bazelbuild/rules_cc/issues/75
load("//rules:bugfix.bzl", "find_cc_toolchain")
"""Rules to build OpenTitan for the RiscV target"""
OPENTITAN_CPU = "@bazel_embedded//constraints/cpu:riscv32"
OPENTITAN_PLATFORM = "@bazel_embedded//platforms:opentitan_rv32imc"
_targets_compatible_with = {
OPENTITAN_PLATFORM: [OPENTITAN_CPU],
}
def _platforms_transition_impl(settings, attr):
return {"//command_line_option:platforms": attr.platform}
_platforms_transition = transition(
implementation = _platforms_transition_impl,
inputs = [],
outputs = ["//command_line_option:platforms"],
)
def _obj_transform(ctx):
cc_toolchain = find_cc_toolchain(ctx)
outputs = []
for src in ctx.files.srcs:
binary = ctx.actions.declare_file("{}.{}".format(src.basename, ctx.attr.suffix))
outputs.append(binary)
ctx.actions.run(
outputs = [binary],
inputs = [src] + cc_toolchain.all_files.to_list(),
arguments = [
"--output-target",
ctx.attr.format,
src.path,
binary.path,
],
executable = cc_toolchain.objcopy_executable,
)
return [DefaultInfo(files = depset(outputs), data_runfiles = ctx.runfiles(files = outputs))]
obj_transform = rule(
implementation = _obj_transform,
cfg = _platforms_transition,
attrs = {
"srcs": attr.label_list(allow_files = True),
"suffix": attr.string(default = "bin"),
"format": attr.string(default = "binary"),
"platform": attr.string(default = OPENTITAN_PLATFORM),
"_cc_toolchain": attr.label(default = Label("@bazel_tools//tools/cpp:current_cc_toolchain")),
"_allowlist_function_transition": attr.label(
default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
),
},
toolchains = ["@rules_cc//cc:toolchain_type"],
)
def _elf_to_disassembly(ctx):
cc_toolchain = find_cc_toolchain(ctx)
outputs = []
for src in ctx.files.srcs:
disassembly = ctx.actions.declare_file("{}.dis".format(src.basename))
outputs.append(disassembly)
ctx.actions.run_shell(
outputs = [disassembly],
inputs = [src] + cc_toolchain.all_files.to_list(),
arguments = [
cc_toolchain.objdump_executable,
src.path,
disassembly.path,
],
command = "$1 --disassemble --headers --line-numbers --source $2 > $3",
)
return [DefaultInfo(files = depset(outputs), data_runfiles = ctx.runfiles(files = outputs))]
elf_to_disassembly = rule(
implementation = _elf_to_disassembly,
cfg = _platforms_transition,
attrs = {
"srcs": attr.label_list(allow_files = True),
"platform": attr.string(default = OPENTITAN_PLATFORM),
"_cc_toolchain": attr.label(default = Label("@bazel_tools//tools/cpp:current_cc_toolchain")),
"_allowlist_function_transition": attr.label(
default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
),
},
toolchains = ["@rules_cc//cc:toolchain_type"],
incompatible_use_toolchain_transition = True,
)
def _elf_to_scrambled(ctx):
outputs = []
for src in ctx.files.srcs:
scrambled = ctx.actions.declare_file("{}.scr.40.vmem".format(src.basename))
outputs.append(scrambled)
ctx.actions.run(
outputs = [scrambled],
inputs = [
src,
ctx.files._tool[0],
ctx.files._config[0],
],
arguments = [
ctx.files._config[0].path,
src.path,
scrambled.path,
],
executable = ctx.files._tool[0].path,
)
return [DefaultInfo(files = depset(outputs), data_runfiles = ctx.runfiles(files = outputs))]
elf_to_scrambled = rule(
implementation = _elf_to_scrambled,
cfg = _platforms_transition,
attrs = {
"srcs": attr.label_list(allow_files = True),
"platform": attr.string(default = OPENTITAN_PLATFORM),
"_tool": attr.label(default = "//hw/ip/rom_ctrl/util:scramble_image.py", allow_files = True),
"_config": attr.label(default = "//hw/top_earlgrey:data/autogen/top_earlgrey.gen.hjson", allow_files = True),
"_allowlist_function_transition": attr.label(
default = "@bazel_tools//tools/allowlists/function_transition_allowlist",
),
},
)
def opentitan_binary(
name,
platform = OPENTITAN_PLATFORM,
per_device_deps = {
"verilator": ["//sw/device/lib/arch:sim_verilator"],
"dv": ["//sw/device/lib/arch:sim_dv"],
"fpga_nexysvideo": ["//sw/device/lib/arch:fpga_nexysvideo"],
"cw310": ["//sw/device/lib/arch:fpga_cw310"],
},
output_bin = True,
output_disassembly = True,
output_scrambled = False,
**kwargs):
"""A helper macro for generating OpenTitan binary artifacts.
This macro is mostly a wrapper around cc_binary, but creates artifacts
for each of the keys in `per_device_deps`. The actual artifacts
created are an ELF file, a BIN file, the disassembly and the scrambled
ROM image. Each of these output targets performs a bazel transition to
the RV32I toolchain to build the target under the correct compiler.
Args:
@param name: The name of this rule.
@param platform: The target platform for the artifacts.
@param per_device_deps: The deps for each of the execution environments.
@param output_bin: Whether or not to emit a BIN file.
@param output_disassembly: Whether or not to emit a disassembly file.
@param output_scrambled: Whether or not to emit a SCR file.
@param **kwargs: Arguments to forward to `cc_binary`.
Emits rules:
For each device in per_device_deps entry:
cc_binary named: name_device
obj_transform named: name_device_elf
optionally:
obj_transform named: name_device_bin
elf_to_dissassembly named: name_device_dis
elf_to_scrambled named: name_device_scr
filegroup named: name
with all the generated rules
"""
copts = kwargs.pop("copts", []) + [
"-nostdlib",
"-ffreestanding",
]
linkopts = kwargs.pop("linkopts", []) + [
"-nostartfiles",
"-nostdlib",
]
deps = kwargs.pop("deps", [])
targets = []
for (device, dev_deps) in per_device_deps.items():
devname = "{}_{}".format(name, device)
native.cc_binary(
name = devname,
deps = deps + dev_deps,
target_compatible_with = _targets_compatible_with[platform],
copts = copts,
linkopts = linkopts,
**kwargs
)
targets.append(":" + devname + "_elf")
obj_transform(
name = devname + "_elf",
srcs = [devname],
format = "elf32-little",
suffix = "elf",
platform = platform,
)
if output_bin:
targets.append(":" + devname + "_bin")
obj_transform(
name = devname + "_bin",
srcs = [devname],
platform = platform,
)
if output_disassembly:
targets.append(":" + devname + "_dis")
elf_to_disassembly(
name = devname + "_dis",
srcs = [devname],
platform = platform,
)
if output_scrambled:
targets.append(":" + devname + "_scr")
elf_to_scrambled(
name = devname + "_scr",
srcs = [devname],
platform = platform,
)
native.filegroup(
name = name,
srcs = targets,
)
|
the-stack_0_23329 | # sql/sqltypes.py
# Copyright (C) 2005-2022 the SQLAlchemy authors and contributors
# <see AUTHORS file>
#
# This module is part of SQLAlchemy and is released under
# the MIT License: https://www.opensource.org/licenses/mit-license.php
"""SQL specific types.
"""
from __future__ import annotations
import collections.abc as collections_abc
import datetime as dt
import decimal
import enum
import json
import pickle
from typing import Any
from typing import cast
from typing import Dict
from typing import List
from typing import Optional
from typing import overload
from typing import Sequence
from typing import Tuple
from typing import Type
from typing import TYPE_CHECKING
from typing import TypeVar
from typing import Union
from . import coercions
from . import elements
from . import operators
from . import roles
from . import type_api
from .base import NO_ARG
from .base import SchemaEventTarget
from .cache_key import HasCacheKey
from .elements import _NONE_NAME
from .elements import quoted_name
from .elements import Slice
from .elements import TypeCoerce as type_coerce # noqa
from .type_api import Emulated
from .type_api import NativeForEmulated # noqa
from .type_api import to_instance
from .type_api import TypeDecorator
from .type_api import TypeEngine
from .type_api import TypeEngineMixin
from .type_api import Variant # noqa
from .visitors import InternalTraversal
from .. import event
from .. import exc
from .. import inspection
from .. import util
from ..engine import processors
from ..util import langhelpers
from ..util import OrderedDict
from ..util.typing import Literal
if TYPE_CHECKING:
from .operators import OperatorType
from .type_api import _BindProcessorType
from .type_api import _ComparatorFactory
from .type_api import _ResultProcessorType
from ..engine.interfaces import Dialect
_T = TypeVar("_T", bound="Any")
_CT = TypeVar("_CT", bound=Any)
_TE = TypeVar("_TE", bound="TypeEngine[Any]")
class HasExpressionLookup(TypeEngineMixin):
"""Mixin expression adaptations based on lookup tables.
These rules are currently used by the numeric, integer and date types
which have detailed cross-expression coercion rules.
"""
@property
def _expression_adaptations(self):
raise NotImplementedError()
class Comparator(TypeEngine.Comparator[_CT]):
_blank_dict = util.EMPTY_DICT
def _adapt_expression(
self,
op: OperatorType,
other_comparator: TypeEngine.Comparator[Any],
) -> Tuple[OperatorType, TypeEngine[Any]]:
othertype = other_comparator.type._type_affinity
if TYPE_CHECKING:
assert isinstance(self.type, HasExpressionLookup)
lookup = self.type._expression_adaptations.get(
op, self._blank_dict
).get(othertype, self.type)
if lookup is othertype:
return (op, other_comparator.type)
elif lookup is self.type._type_affinity:
return (op, self.type)
else:
return (op, to_instance(lookup))
comparator_factory: _ComparatorFactory[Any] = Comparator
class Concatenable(TypeEngineMixin):
"""A mixin that marks a type as supporting 'concatenation',
typically strings."""
class Comparator(TypeEngine.Comparator[_T]):
def _adapt_expression(
self,
op: OperatorType,
other_comparator: TypeEngine.Comparator[Any],
) -> Tuple[OperatorType, TypeEngine[Any]]:
if op is operators.add and isinstance(
other_comparator,
(Concatenable.Comparator, NullType.Comparator),
):
return operators.concat_op, self.expr.type
else:
return super(Concatenable.Comparator, self)._adapt_expression(
op, other_comparator
)
comparator_factory: _ComparatorFactory[Any] = Comparator
class Indexable(TypeEngineMixin):
"""A mixin that marks a type as supporting indexing operations,
such as array or JSON structures.
.. versionadded:: 1.1.0
"""
class Comparator(TypeEngine.Comparator[_T]):
def _setup_getitem(self, index):
raise NotImplementedError()
def __getitem__(self, index):
(
adjusted_op,
adjusted_right_expr,
result_type,
) = self._setup_getitem(index)
return self.operate(
adjusted_op, adjusted_right_expr, result_type=result_type
)
comparator_factory = Comparator
class String(Concatenable, TypeEngine[str]):
"""The base for all string and character types.
In SQL, corresponds to VARCHAR.
The `length` field is usually required when the `String` type is
used within a CREATE TABLE statement, as VARCHAR requires a length
on most databases.
"""
__visit_name__ = "string"
def __init__(
# note pylance appears to require the "self" type in a constructor
# for the _T type to be correctly recognized when we send the
# class as the argument, e.g. `column("somecol", String)`
self,
length=None,
collation=None,
):
"""
Create a string-holding type.
:param length: optional, a length for the column for use in
DDL and CAST expressions. May be safely omitted if no ``CREATE
TABLE`` will be issued. Certain databases may require a
``length`` for use in DDL, and will raise an exception when
the ``CREATE TABLE`` DDL is issued if a ``VARCHAR``
with no length is included. Whether the value is
interpreted as bytes or characters is database specific.
:param collation: Optional, a column-level collation for
use in DDL and CAST expressions. Renders using the
COLLATE keyword supported by SQLite, MySQL, and PostgreSQL.
E.g.::
>>> from sqlalchemy import cast, select, String
>>> print(select(cast('some string', String(collation='utf8'))))
SELECT CAST(:param_1 AS VARCHAR COLLATE utf8) AS anon_1
.. note::
In most cases, the :class:`.Unicode` or :class:`.UnicodeText`
datatypes should be used for a :class:`_schema.Column` that expects
to store non-ascii data. These datatypes will ensure that the
correct types are used on the database.
"""
self.length = length
self.collation = collation
def _resolve_for_literal(self, value):
# I was SO PROUD of my regex trick, but we dont need it.
# re.search(r"[^\u0000-\u007F]", value)
if value.isascii():
return _STRING
else:
return _UNICODE
def literal_processor(self, dialect):
def process(value):
value = value.replace("'", "''")
if dialect.identifier_preparer._double_percents:
value = value.replace("%", "%%")
return "'%s'" % value
return process
def bind_processor(self, dialect):
return None
def result_processor(self, dialect, coltype):
return None
@property
def python_type(self):
return str
def get_dbapi_type(self, dbapi):
return dbapi.STRING
class Text(String):
"""A variably sized string type.
In SQL, usually corresponds to CLOB or TEXT. In general, TEXT objects
do not have a length; while some databases will accept a length
argument here, it will be rejected by others.
"""
__visit_name__ = "text"
class Unicode(String):
"""A variable length Unicode string type.
The :class:`.Unicode` type is a :class:`.String` subclass that assumes
input and output strings that may contain non-ASCII characters, and for
some backends implies an underlying column type that is explicitly
supporting of non-ASCII data, such as ``NVARCHAR`` on Oracle and SQL
Server. This will impact the output of ``CREATE TABLE`` statements and
``CAST`` functions at the dialect level.
The character encoding used by the :class:`.Unicode` type that is used to
transmit and receive data to the database is usually determined by the
DBAPI itself. All modern DBAPIs accommodate non-ASCII strings but may have
different methods of managing database encodings; if necessary, this
encoding should be configured as detailed in the notes for the target DBAPI
in the :ref:`dialect_toplevel` section.
In modern SQLAlchemy, use of the :class:`.Unicode` datatype does not
imply any encoding/decoding behavior within SQLAlchemy itself. In Python
3, all string objects are inherently Unicode capable, and SQLAlchemy
does not produce bytestring objects nor does it accommodate a DBAPI that
does not return Python Unicode objects in result sets for string values.
.. warning:: Some database backends, particularly SQL Server with pyodbc,
are known to have undesirable behaviors regarding data that is noted
as being of ``NVARCHAR`` type as opposed to ``VARCHAR``, including
datatype mismatch errors and non-use of indexes. See the section
on :meth:`.DialectEvents.do_setinputsizes` for background on working
around unicode character issues for backends like SQL Server with
pyodbc as well as cx_Oracle.
.. seealso::
:class:`.UnicodeText` - unlengthed textual counterpart
to :class:`.Unicode`.
:meth:`.DialectEvents.do_setinputsizes`
"""
__visit_name__ = "unicode"
def __init__(self, length=None, **kwargs):
"""
Create a :class:`.Unicode` object.
Parameters are the same as that of :class:`.String`.
"""
super(Unicode, self).__init__(length=length, **kwargs)
class UnicodeText(Text):
"""An unbounded-length Unicode string type.
See :class:`.Unicode` for details on the unicode
behavior of this object.
Like :class:`.Unicode`, usage the :class:`.UnicodeText` type implies a
unicode-capable type being used on the backend, such as
``NCLOB``, ``NTEXT``.
"""
__visit_name__ = "unicode_text"
def __init__(self, length=None, **kwargs):
"""
Create a Unicode-converting Text type.
Parameters are the same as that of :class:`_expression.TextClause`.
"""
super(UnicodeText, self).__init__(length=length, **kwargs)
class Integer(HasExpressionLookup, TypeEngine[int]):
"""A type for ``int`` integers."""
__visit_name__ = "integer"
if TYPE_CHECKING:
@util.ro_memoized_property
def _type_affinity(self) -> Type[Integer]:
...
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
@property
def python_type(self):
return int
def literal_processor(self, dialect):
def process(value):
return str(int(value))
return process
@util.memoized_property
def _expression_adaptations(self):
return {
operators.add: {
Date: Date,
Integer: self.__class__,
Numeric: Numeric,
},
operators.mul: {
Interval: Interval,
Integer: self.__class__,
Numeric: Numeric,
},
operators.truediv: {Integer: Numeric, Numeric: Numeric},
operators.floordiv: {Integer: self.__class__, Numeric: Numeric},
operators.sub: {Integer: self.__class__, Numeric: Numeric},
}
class SmallInteger(Integer):
"""A type for smaller ``int`` integers.
Typically generates a ``SMALLINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = "small_integer"
class BigInteger(Integer):
"""A type for bigger ``int`` integers.
Typically generates a ``BIGINT`` in DDL, and otherwise acts like
a normal :class:`.Integer` on the Python side.
"""
__visit_name__ = "big_integer"
_N = TypeVar("_N", bound=Union[decimal.Decimal, float])
class Numeric(HasExpressionLookup, TypeEngine[_N]):
"""A type for fixed precision numbers, such as ``NUMERIC`` or ``DECIMAL``.
This type returns Python ``decimal.Decimal`` objects by default, unless
the :paramref:`.Numeric.asdecimal` flag is set to False, in which case
they are coerced to Python ``float`` objects.
.. note::
The :class:`.Numeric` type is designed to receive data from a database
type that is explicitly known to be a decimal type
(e.g. ``DECIMAL``, ``NUMERIC``, others) and not a floating point
type (e.g. ``FLOAT``, ``REAL``, others).
If the database column on the server is in fact a floating-point
type, such as ``FLOAT`` or ``REAL``, use the :class:`.Float`
type or a subclass, otherwise numeric coercion between
``float``/``Decimal`` may or may not function as expected.
.. note::
The Python ``decimal.Decimal`` class is generally slow
performing; cPython 3.3 has now switched to use the `cdecimal
<https://pypi.org/project/cdecimal/>`_ library natively. For
older Python versions, the ``cdecimal`` library can be patched
into any application where it will replace the ``decimal``
library fully, however this needs to be applied globally and
before any other modules have been imported, as follows::
import sys
import cdecimal
sys.modules["decimal"] = cdecimal
Note that the ``cdecimal`` and ``decimal`` libraries are **not
compatible with each other**, so patching ``cdecimal`` at the
global level is the only way it can be used effectively with
various DBAPIs that hardcode to import the ``decimal`` library.
"""
__visit_name__ = "numeric"
_default_decimal_return_scale = 10
def __init__(
self,
precision=None,
scale=None,
decimal_return_scale=None,
asdecimal=True,
):
"""
Construct a Numeric.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``.
:param scale: the numeric scale for use in DDL ``CREATE TABLE``.
:param asdecimal: default True. Return whether or not
values should be sent as Python Decimal objects, or
as floats. Different DBAPIs send one or the other based on
datatypes - the Numeric type will ensure that return values
are one or the other across DBAPIs consistently.
:param decimal_return_scale: Default scale to use when converting
from floats to Python decimals. Floating point values will typically
be much longer due to decimal inaccuracy, and most floating point
database types don't have a notion of "scale", so by default the
float type looks for the first ten decimal places when converting.
Specifying this value will override that length. Types which
do include an explicit ".scale" value, such as the base
:class:`.Numeric` as well as the MySQL float types, will use the
value of ".scale" as the default for decimal_return_scale, if not
otherwise specified.
.. versionadded:: 0.9.0
When using the ``Numeric`` type, care should be taken to ensure
that the asdecimal setting is appropriate for the DBAPI in use -
when Numeric applies a conversion from Decimal->float or float->
Decimal, this conversion incurs an additional performance overhead
for all result columns received.
DBAPIs that return Decimal natively (e.g. psycopg2) will have
better accuracy and higher performance with a setting of ``True``,
as the native translation to Decimal reduces the amount of floating-
point issues at play, and the Numeric type itself doesn't need
to apply any further conversions. However, another DBAPI which
returns floats natively *will* incur an additional conversion
overhead, and is still subject to floating point data loss - in
which case ``asdecimal=False`` will at least remove the extra
conversion overhead.
"""
self.precision = precision
self.scale = scale
self.decimal_return_scale = decimal_return_scale
self.asdecimal = asdecimal
@property
def _effective_decimal_return_scale(self):
if self.decimal_return_scale is not None:
return self.decimal_return_scale
elif getattr(self, "scale", None) is not None:
return self.scale
else:
return self._default_decimal_return_scale
def get_dbapi_type(self, dbapi):
return dbapi.NUMBER
def literal_processor(self, dialect):
def process(value):
return str(value)
return process
@property
def python_type(self):
if self.asdecimal:
return decimal.Decimal
else:
return float
def bind_processor(self, dialect):
if dialect.supports_native_decimal:
return None
else:
return processors.to_float
def result_processor(self, dialect, coltype):
if self.asdecimal:
if dialect.supports_native_decimal:
# we're a "numeric", DBAPI will give us Decimal directly
return None
else:
# we're a "numeric", DBAPI returns floats, convert.
return processors.to_decimal_processor_factory(
decimal.Decimal,
self.scale
if self.scale is not None
else self._default_decimal_return_scale,
)
else:
if dialect.supports_native_decimal:
return processors.to_float
else:
return None
@util.memoized_property
def _expression_adaptations(self):
return {
operators.mul: {
Interval: Interval,
Numeric: self.__class__,
Integer: self.__class__,
},
operators.truediv: {
Numeric: self.__class__,
Integer: self.__class__,
},
operators.add: {Numeric: self.__class__, Integer: self.__class__},
operators.sub: {Numeric: self.__class__, Integer: self.__class__},
}
class Float(Numeric[_N]):
"""Type representing floating point types, such as ``FLOAT`` or ``REAL``.
This type returns Python ``float`` objects by default, unless the
:paramref:`.Float.asdecimal` flag is set to True, in which case they
are coerced to ``decimal.Decimal`` objects.
.. note::
The :class:`.Float` type is designed to receive data from a database
type that is explicitly known to be a floating point type
(e.g. ``FLOAT``, ``REAL``, others)
and not a decimal type (e.g. ``DECIMAL``, ``NUMERIC``, others).
If the database column on the server is in fact a Numeric
type, such as ``DECIMAL`` or ``NUMERIC``, use the :class:`.Numeric`
type or a subclass, otherwise numeric coercion between
``float``/``Decimal`` may or may not function as expected.
"""
__visit_name__ = "float"
scale = None
@overload
def __init__(
self: Float[float],
precision: Optional[int] = ...,
asdecimal: Literal[False] = ...,
decimal_return_scale: Optional[int] = ...,
):
...
@overload
def __init__(
self: Float[decimal.Decimal],
precision: Optional[int] = ...,
asdecimal: Literal[True] = ...,
decimal_return_scale: Optional[int] = ...,
):
...
def __init__(
self: Float[_N],
precision: Optional[int] = None,
asdecimal: bool = False,
decimal_return_scale: Optional[int] = None,
):
r"""
Construct a Float.
:param precision: the numeric precision for use in DDL ``CREATE
TABLE``. Backends **should** attempt to ensure this precision
indicates a number of digits for the generic
:class:`_sqltypes.Float` datatype.
.. note:: For the Oracle backend, the
:paramref:`_sqltypes.Float.precision` parameter is not accepted
when rendering DDL, as Oracle does not support float precision
specified as a number of decimal places. Instead, use the
Oracle-specific :class:`_oracle.FLOAT` datatype and specify the
:paramref:`_oracle.FLOAT.binary_precision` parameter. This is new
in version 2.0 of SQLAlchemy.
To create a database agnostic :class:`_types.Float` that
separately specifies binary precision for Oracle, use
:meth:`_types.TypeEngine.with_variant` as follows::
from sqlalchemy import Column
from sqlalchemy import Float
from sqlalchemy.dialects import oracle
Column(
"float_data",
Float(5).with_variant(oracle.FLOAT(binary_precision=16), "oracle")
)
:param asdecimal: the same flag as that of :class:`.Numeric`, but
defaults to ``False``. Note that setting this flag to ``True``
results in floating point conversion.
:param decimal_return_scale: Default scale to use when converting
from floats to Python decimals. Floating point values will typically
be much longer due to decimal inaccuracy, and most floating point
database types don't have a notion of "scale", so by default the
float type looks for the first ten decimal places when converting.
Specifying this value will override that length. Note that the
MySQL float types, which do include "scale", will use "scale"
as the default for decimal_return_scale, if not otherwise specified.
.. versionadded:: 0.9.0
""" # noqa: E501
self.precision = precision
self.asdecimal = asdecimal
self.decimal_return_scale = decimal_return_scale
def result_processor(self, dialect, coltype):
if self.asdecimal:
return processors.to_decimal_processor_factory(
decimal.Decimal, self._effective_decimal_return_scale
)
elif dialect.supports_native_decimal:
return processors.to_float
else:
return None
class Double(Float[_N]):
"""A type for double ``FLOAT`` floating point types.
Typically generates a ``DOUBLE`` or ``DOUBLE_PRECISION`` in DDL,
and otherwise acts like a normal :class:`.Float` on the Python
side.
.. versionadded:: 2.0
"""
__visit_name__ = "double"
class DateTime(HasExpressionLookup, TypeEngine[dt.datetime]):
"""A type for ``datetime.datetime()`` objects.
Date and time types return objects from the Python ``datetime``
module. Most DBAPIs have built in support for the datetime
module, with the noted exception of SQLite. In the case of
SQLite, date and time types are stored as strings which are then
converted back to datetime objects when rows are returned.
For the time representation within the datetime type, some
backends include additional options, such as timezone support and
fractional seconds support. For fractional seconds, use the
dialect-specific datatype, such as :class:`.mysql.TIME`. For
timezone support, use at least the :class:`_types.TIMESTAMP` datatype,
if not the dialect-specific datatype object.
"""
__visit_name__ = "datetime"
def __init__(self, timezone=False):
"""Construct a new :class:`.DateTime`.
:param timezone: boolean. Indicates that the datetime type should
enable timezone support, if available on the
**base date/time-holding type only**. It is recommended
to make use of the :class:`_types.TIMESTAMP` datatype directly when
using this flag, as some databases include separate generic
date/time-holding types distinct from the timezone-capable
TIMESTAMP datatype, such as Oracle.
"""
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
def _resolve_for_literal(self, value):
with_timezone = value.tzinfo is not None
if with_timezone and not self.timezone:
return DATETIME_TIMEZONE
else:
return self
@property
def python_type(self):
return dt.datetime
@util.memoized_property
def _expression_adaptations(self):
# Based on https://www.postgresql.org/docs/current/\
# static/functions-datetime.html.
return {
operators.add: {Interval: self.__class__},
operators.sub: {Interval: self.__class__, DateTime: Interval},
}
class Date(HasExpressionLookup, TypeEngine[dt.date]):
"""A type for ``datetime.date()`` objects."""
__visit_name__ = "date"
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.date
@util.memoized_property
def _expression_adaptations(self):
# Based on https://www.postgresql.org/docs/current/\
# static/functions-datetime.html.
return {
operators.add: {
Integer: self.__class__,
Interval: DateTime,
Time: DateTime,
},
operators.sub: {
# date - integer = date
Integer: self.__class__,
# date - date = integer.
Date: Integer,
Interval: DateTime,
# date - datetime = interval,
# this one is not in the PG docs
# but works
DateTime: Interval,
},
}
class Time(HasExpressionLookup, TypeEngine[dt.time]):
"""A type for ``datetime.time()`` objects."""
__visit_name__ = "time"
def __init__(self, timezone=False):
self.timezone = timezone
def get_dbapi_type(self, dbapi):
return dbapi.DATETIME
@property
def python_type(self):
return dt.time
def _resolve_for_literal(self, value):
with_timezone = value.tzinfo is not None
if with_timezone and not self.timezone:
return TIME_TIMEZONE
else:
return self
@util.memoized_property
def _expression_adaptations(self):
# Based on https://www.postgresql.org/docs/current/\
# static/functions-datetime.html.
return {
operators.add: {Date: DateTime, Interval: self.__class__},
operators.sub: {Time: Interval, Interval: self.__class__},
}
class _Binary(TypeEngine[bytes]):
"""Define base behavior for binary types."""
def __init__(self, length=None):
self.length = length
def literal_processor(self, dialect):
def process(value):
# TODO: this is useless for real world scenarios; implement
# real binary literals
value = value.decode(
dialect._legacy_binary_type_literal_encoding
).replace("'", "''")
return "'%s'" % value
return process
@property
def python_type(self):
return bytes
# Python 3 - sqlite3 doesn't need the `Binary` conversion
# here, though pg8000 does to indicate "bytea"
def bind_processor(self, dialect):
if dialect.dbapi is None:
return None
DBAPIBinary = dialect.dbapi.Binary
def process(value):
if value is not None:
return DBAPIBinary(value)
else:
return None
return process
# Python 3 has native bytes() type
# both sqlite3 and pg8000 seem to return it,
# psycopg2 as of 2.5 returns 'memoryview'
def result_processor(self, dialect, coltype):
def process(value):
if value is not None:
value = bytes(value)
return value
return process
def coerce_compared_value(self, op, value):
"""See :meth:`.TypeEngine.coerce_compared_value` for a description."""
if isinstance(value, str):
return self
else:
return super(_Binary, self).coerce_compared_value(op, value)
def get_dbapi_type(self, dbapi):
return dbapi.BINARY
class LargeBinary(_Binary):
"""A type for large binary byte data.
The :class:`.LargeBinary` type corresponds to a large and/or unlengthed
binary type for the target platform, such as BLOB on MySQL and BYTEA for
PostgreSQL. It also handles the necessary conversions for the DBAPI.
"""
__visit_name__ = "large_binary"
def __init__(self, length=None):
"""
Construct a LargeBinary type.
:param length: optional, a length for the column for use in
DDL statements, for those binary types that accept a length,
such as the MySQL BLOB type.
"""
_Binary.__init__(self, length=length)
class SchemaType(SchemaEventTarget, TypeEngineMixin):
"""Add capabilities to a type which allow for schema-level DDL to be
associated with a type.
Supports types that must be explicitly created/dropped (i.e. PG ENUM type)
as well as types that are complimented by table or schema level
constraints, triggers, and other rules.
:class:`.SchemaType` classes can also be targets for the
:meth:`.DDLEvents.before_parent_attach` and
:meth:`.DDLEvents.after_parent_attach` events, where the events fire off
surrounding the association of the type object with a parent
:class:`_schema.Column`.
.. seealso::
:class:`.Enum`
:class:`.Boolean`
"""
_use_schema_map = True
name: Optional[str]
def __init__(
self,
name=None,
schema=None,
metadata=None,
inherit_schema=False,
quote=None,
_create_events=True,
):
if name is not None:
self.name = quoted_name(name, quote)
else:
self.name = None
self.schema = schema
self.metadata = metadata
self.inherit_schema = inherit_schema
self._create_events = _create_events
if _create_events and self.metadata:
event.listen(
self.metadata,
"before_create",
util.portable_instancemethod(self._on_metadata_create),
)
event.listen(
self.metadata,
"after_drop",
util.portable_instancemethod(self._on_metadata_drop),
)
def _set_parent(self, column, **kw):
# set parent hook is when this type is associated with a column.
# Column calls it for all SchemaEventTarget instances, either the
# base type and/or variants in _variant_mapping.
# we want to register a second hook to trigger when that column is
# associated with a table. in that event, we and all of our variants
# may want to set up some state on the table such as a CheckConstraint
# that will conditionally render at DDL render time.
# the base SchemaType also sets up events for
# on_table/metadata_create/drop in this method, which is used by
# "native" types with a separate CREATE/DROP e.g. Postgresql.ENUM
column._on_table_attach(util.portable_instancemethod(self._set_table))
def _variant_mapping_for_set_table(self, column):
if column.type._variant_mapping:
variant_mapping = dict(column.type._variant_mapping)
variant_mapping["_default"] = column.type
else:
variant_mapping = None
return variant_mapping
def _set_table(self, column, table):
if self.inherit_schema:
self.schema = table.schema
elif self.metadata and self.schema is None and self.metadata.schema:
self.schema = self.metadata.schema
if not self._create_events:
return
variant_mapping = self._variant_mapping_for_set_table(column)
event.listen(
table,
"before_create",
util.portable_instancemethod(
self._on_table_create, {"variant_mapping": variant_mapping}
),
)
event.listen(
table,
"after_drop",
util.portable_instancemethod(
self._on_table_drop, {"variant_mapping": variant_mapping}
),
)
if self.metadata is None:
# if SchemaType were created w/ a metadata argument, these
# events would already have been associated with that metadata
# and would preclude an association with table.metadata
event.listen(
table.metadata,
"before_create",
util.portable_instancemethod(
self._on_metadata_create,
{"variant_mapping": variant_mapping},
),
)
event.listen(
table.metadata,
"after_drop",
util.portable_instancemethod(
self._on_metadata_drop,
{"variant_mapping": variant_mapping},
),
)
def copy(self, **kw):
return self.adapt(
cast("Type[TypeEngine[Any]]", self.__class__),
_create_events=True,
)
@overload
def adapt(self, cls: Type[_TE], **kw: Any) -> _TE:
...
@overload
def adapt(self, cls: Type[TypeEngineMixin], **kw: Any) -> TypeEngine[Any]:
...
def adapt(
self, cls: Type[Union[TypeEngine[Any], TypeEngineMixin]], **kw: Any
) -> TypeEngine[Any]:
kw.setdefault("_create_events", False)
return super().adapt(cls, **kw)
def create(self, bind, checkfirst=False):
"""Issue CREATE DDL for this type, if applicable."""
t = self.dialect_impl(bind.dialect)
if isinstance(t, SchemaType) and t.__class__ is not self.__class__:
t.create(bind, checkfirst=checkfirst)
def drop(self, bind, checkfirst=False):
"""Issue DROP DDL for this type, if applicable."""
t = self.dialect_impl(bind.dialect)
if isinstance(t, SchemaType) and t.__class__ is not self.__class__:
t.drop(bind, checkfirst=checkfirst)
def _on_table_create(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if isinstance(t, SchemaType) and t.__class__ is not self.__class__:
t._on_table_create(target, bind, **kw)
def _on_table_drop(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if isinstance(t, SchemaType) and t.__class__ is not self.__class__:
t._on_table_drop(target, bind, **kw)
def _on_metadata_create(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if isinstance(t, SchemaType) and t.__class__ is not self.__class__:
t._on_metadata_create(target, bind, **kw)
def _on_metadata_drop(self, target, bind, **kw):
if not self._is_impl_for_variant(bind.dialect, kw):
return
t = self.dialect_impl(bind.dialect)
if isinstance(t, SchemaType) and t.__class__ is not self.__class__:
t._on_metadata_drop(target, bind, **kw)
def _is_impl_for_variant(self, dialect, kw):
variant_mapping = kw.pop("variant_mapping", None)
if not variant_mapping:
return True
# for types that have _variant_mapping, all the impls in the map
# that are SchemaEventTarget subclasses get set up as event holders.
# this is so that constructs that need
# to be associated with the Table at dialect-agnostic time etc. like
# CheckConstraints can be set up with that table. they then add
# to these constraints a DDL check_rule that among other things
# will check this _is_impl_for_variant() method to determine when
# the dialect is known that we are part of the table's DDL sequence.
# since PostgreSQL is the only DB that has ARRAY this can only
# be integration tested by PG-specific tests
def _we_are_the_impl(typ):
return (
typ is self or isinstance(typ, ARRAY) and typ.item_type is self
)
if dialect.name in variant_mapping and _we_are_the_impl(
variant_mapping[dialect.name]
):
return True
elif dialect.name not in variant_mapping:
return _we_are_the_impl(variant_mapping["_default"])
class Enum(String, SchemaType, Emulated, TypeEngine[Union[str, enum.Enum]]):
"""Generic Enum Type.
The :class:`.Enum` type provides a set of possible string values
which the column is constrained towards.
The :class:`.Enum` type will make use of the backend's native "ENUM"
type if one is available; otherwise, it uses a VARCHAR datatype.
An option also exists to automatically produce a CHECK constraint
when the VARCHAR (so called "non-native") variant is produced;
see the :paramref:`.Enum.create_constraint` flag.
The :class:`.Enum` type also provides in-Python validation of string
values during both read and write operations. When reading a value
from the database in a result set, the string value is always checked
against the list of possible values and a ``LookupError`` is raised
if no match is found. When passing a value to the database as a
plain string within a SQL statement, if the
:paramref:`.Enum.validate_strings` parameter is
set to True, a ``LookupError`` is raised for any string value that's
not located in the given list of possible values; note that this
impacts usage of LIKE expressions with enumerated values (an unusual
use case).
.. versionchanged:: 1.1 the :class:`.Enum` type now provides in-Python
validation of input values as well as on data being returned by
the database.
The source of enumerated values may be a list of string values, or
alternatively a PEP-435-compliant enumerated class. For the purposes
of the :class:`.Enum` datatype, this class need only provide a
``__members__`` method.
When using an enumerated class, the enumerated objects are used
both for input and output, rather than strings as is the case with
a plain-string enumerated type::
import enum
class MyEnum(enum.Enum):
one = 1
two = 2
three = 3
t = Table(
'data', MetaData(),
Column('value', Enum(MyEnum))
)
connection.execute(t.insert(), {"value": MyEnum.two})
assert connection.scalar(t.select()) is MyEnum.two
Above, the string names of each element, e.g. "one", "two", "three",
are persisted to the database; the values of the Python Enum, here
indicated as integers, are **not** used; the value of each enum can
therefore be any kind of Python object whether or not it is persistable.
In order to persist the values and not the names, the
:paramref:`.Enum.values_callable` parameter may be used. The value of
this parameter is a user-supplied callable, which is intended to be used
with a PEP-435-compliant enumerated class and returns a list of string
values to be persisted. For a simple enumeration that uses string values,
a callable such as ``lambda x: [e.value for e in x]`` is sufficient.
.. versionadded:: 1.1 - support for PEP-435-style enumerated
classes.
.. seealso::
:class:`_postgresql.ENUM` - PostgreSQL-specific type,
which has additional functionality.
:class:`.mysql.ENUM` - MySQL-specific type
"""
__visit_name__ = "enum"
def __init__(self, *enums, **kw):
r"""Construct an enum.
Keyword arguments which don't apply to a specific backend are ignored
by that backend.
:param \*enums: either exactly one PEP-435 compliant enumerated type
or one or more string labels.
.. versionadded:: 1.1 a PEP-435 style enumerated class may be
passed.
:param create_constraint: defaults to False. When creating a
non-native enumerated type, also build a CHECK constraint on the
database against the valid values.
.. note:: it is strongly recommended that the CHECK constraint
have an explicit name in order to support schema-management
concerns. This can be established either by setting the
:paramref:`.Enum.name` parameter or by setting up an
appropriate naming convention; see
:ref:`constraint_naming_conventions` for background.
.. versionchanged:: 1.4 - this flag now defaults to False, meaning
no CHECK constraint is generated for a non-native enumerated
type.
:param metadata: Associate this type directly with a ``MetaData``
object. For types that exist on the target database as an
independent schema construct (PostgreSQL), this type will be
created and dropped within ``create_all()`` and ``drop_all()``
operations. If the type is not associated with any ``MetaData``
object, it will associate itself with each ``Table`` in which it is
used, and will be created when any of those individual tables are
created, after a check is performed for its existence. The type is
only dropped when ``drop_all()`` is called for that ``Table``
object's metadata, however.
The value of the :paramref:`_schema.MetaData.schema` parameter of
the :class:`_schema.MetaData` object, if set, will be used as the
default value of the :paramref:`_types.Enum.schema` on this object
if an explicit value is not otherwise supplied.
.. versionchanged:: 1.4.12 :class:`_types.Enum` inherits the
:paramref:`_schema.MetaData.schema` parameter of the
:class:`_schema.MetaData` object if present, when passed using
the :paramref:`_types.Enum.metadata` parameter.
:param name: The name of this type. This is required for PostgreSQL
and any future supported database which requires an explicitly
named type, or an explicitly named constraint in order to generate
the type and/or a table that uses it. If a PEP-435 enumerated
class was used, its name (converted to lower case) is used by
default.
:param native_enum: Use the database's native ENUM type when
available. Defaults to True. When False, uses VARCHAR + check
constraint for all backends. When False, the VARCHAR length can be
controlled with :paramref:`.Enum.length`; currently "length" is
ignored if native_enum=True.
:param length: Allows specifying a custom length for the VARCHAR
when :paramref:`.Enum.native_enum` is False. By default it uses the
length of the longest value.
.. versionadded:: 1.3.16
:param schema: Schema name of this type. For types that exist on the
target database as an independent schema construct (PostgreSQL),
this parameter specifies the named schema in which the type is
present.
If not present, the schema name will be taken from the
:class:`_schema.MetaData` collection if passed as
:paramref:`_types.Enum.metadata`, for a :class:`_schema.MetaData`
that includes the :paramref:`_schema.MetaData.schema` parameter.
.. versionchanged:: 1.4.12 :class:`_types.Enum` inherits the
:paramref:`_schema.MetaData.schema` parameter of the
:class:`_schema.MetaData` object if present, when passed using
the :paramref:`_types.Enum.metadata` parameter.
Otherwise, if the :paramref:`_types.Enum.inherit_schema` flag is set
to ``True``, the schema will be inherited from the associated
:class:`_schema.Table` object if any; when
:paramref:`_types.Enum.inherit_schema` is at its default of
``False``, the owning table's schema is **not** used.
:param quote: Set explicit quoting preferences for the type's name.
:param inherit_schema: When ``True``, the "schema" from the owning
:class:`_schema.Table`
will be copied to the "schema" attribute of this
:class:`.Enum`, replacing whatever value was passed for the
``schema`` attribute. This also takes effect when using the
:meth:`_schema.Table.to_metadata` operation.
:param validate_strings: when True, string values that are being
passed to the database in a SQL statement will be checked
for validity against the list of enumerated values. Unrecognized
values will result in a ``LookupError`` being raised.
.. versionadded:: 1.1.0b2
:param values_callable: A callable which will be passed the PEP-435
compliant enumerated type, which should then return a list of string
values to be persisted. This allows for alternate usages such as
using the string value of an enum to be persisted to the database
instead of its name.
.. versionadded:: 1.2.3
:param sort_key_function: a Python callable which may be used as the
"key" argument in the Python ``sorted()`` built-in. The SQLAlchemy
ORM requires that primary key columns which are mapped must
be sortable in some way. When using an unsortable enumeration
object such as a Python 3 ``Enum`` object, this parameter may be
used to set a default sort key function for the objects. By
default, the database value of the enumeration is used as the
sorting function.
.. versionadded:: 1.3.8
:param omit_aliases: A boolean that when true will remove aliases from
pep 435 enums. defaults to ``True``.
.. versionchanged:: 2.0 This parameter now defaults to True.
"""
self._enum_init(enums, kw)
@property
def _enums_argument(self):
if self.enum_class is not None:
return [self.enum_class]
else:
return self.enums
def _enum_init(self, enums, kw):
"""internal init for :class:`.Enum` and subclasses.
friendly init helper used by subclasses to remove
all the Enum-specific keyword arguments from kw. Allows all
other arguments in kw to pass through.
"""
self.native_enum = kw.pop("native_enum", True)
self.create_constraint = kw.pop("create_constraint", False)
self.values_callable = kw.pop("values_callable", None)
self._sort_key_function = kw.pop("sort_key_function", NO_ARG)
length_arg = kw.pop("length", NO_ARG)
self._omit_aliases = kw.pop("omit_aliases", True)
_disable_warnings = kw.pop("_disable_warnings", False)
values, objects = self._parse_into_values(enums, kw)
self._setup_for_values(values, objects, kw)
self.validate_strings = kw.pop("validate_strings", False)
if self.enums:
self._default_length = length = max(len(x) for x in self.enums)
else:
self._default_length = length = 0
if length_arg is not NO_ARG:
if self.native_enum:
if not _disable_warnings:
util.warn(
"Enum 'length' argument is currently ignored unless "
"native_enum is specified as False, including for DDL "
"that renders VARCHAR in any case. This may change "
"in a future release."
)
else:
if not _disable_warnings and length_arg < length:
raise ValueError(
"When provided, length must be larger or equal"
" than the length of the longest enum value. %s < %s"
% (length_arg, length)
)
length = length_arg
self._valid_lookup[None] = self._object_lookup[None] = None
super(Enum, self).__init__(length=length)
if self.enum_class:
kw.setdefault("name", self.enum_class.__name__.lower())
SchemaType.__init__(
self,
name=kw.pop("name", None),
schema=kw.pop("schema", None),
metadata=kw.pop("metadata", None),
inherit_schema=kw.pop("inherit_schema", False),
quote=kw.pop("quote", None),
_create_events=kw.pop("_create_events", True),
)
def _parse_into_values(self, enums, kw):
if not enums and "_enums" in kw:
enums = kw.pop("_enums")
if len(enums) == 1 and hasattr(enums[0], "__members__"):
self.enum_class = enums[0]
_members = self.enum_class.__members__
if self._omit_aliases is True:
# remove aliases
members = OrderedDict(
(n, v) for n, v in _members.items() if v.name == n
)
else:
members = _members
if self.values_callable:
values = self.values_callable(self.enum_class)
else:
values = list(members)
objects = [members[k] for k in members]
return values, objects
else:
self.enum_class = None
return enums, enums
def _setup_for_values(self, values, objects, kw):
self.enums = list(values)
self._valid_lookup = dict(zip(reversed(objects), reversed(values)))
self._object_lookup = dict(zip(values, objects))
self._valid_lookup.update(
[
(value, self._valid_lookup[self._object_lookup[value]])
for value in values
]
)
@property
def sort_key_function(self):
if self._sort_key_function is NO_ARG:
return self._db_value_for_elem
else:
return self._sort_key_function
@property
def native(self):
return self.native_enum
def _db_value_for_elem(self, elem):
try:
return self._valid_lookup[elem]
except KeyError as err:
# for unknown string values, we return as is. While we can
# validate these if we wanted, that does not allow for lesser-used
# end-user use cases, such as using a LIKE comparison with an enum,
# or for an application that wishes to apply string tests to an
# ENUM (see [ticket:3725]). While we can decide to differentiate
# here between an INSERT statement and a criteria used in a SELECT,
# for now we're staying conservative w/ behavioral changes (perhaps
# someone has a trigger that handles strings on INSERT)
if not self.validate_strings and isinstance(elem, str):
return elem
else:
raise LookupError(
"'%s' is not among the defined enum values. "
"Enum name: %s. Possible values: %s"
% (
elem,
self.name,
langhelpers.repr_tuple_names(self.enums),
)
) from err
class Comparator(String.Comparator[str]):
type: String
def _adapt_expression(
self,
op: OperatorType,
other_comparator: TypeEngine.Comparator[Any],
) -> Tuple[OperatorType, TypeEngine[Any]]:
op, typ = super(Enum.Comparator, self)._adapt_expression(
op, other_comparator
)
if op is operators.concat_op:
typ = String(self.type.length)
return op, typ
comparator_factory = Comparator
def _object_value_for_elem(self, elem):
try:
return self._object_lookup[elem]
except KeyError as err:
raise LookupError(
"'%s' is not among the defined enum values. "
"Enum name: %s. Possible values: %s"
% (
elem,
self.name,
langhelpers.repr_tuple_names(self.enums),
)
) from err
def __repr__(self):
return util.generic_repr(
self,
additional_kw=[
("native_enum", True),
("create_constraint", False),
("length", self._default_length),
],
to_inspect=[Enum, SchemaType],
)
def as_generic(self, allow_nulltype=False):
if hasattr(self, "enums"):
args = self.enums
else:
raise NotImplementedError(
"TypeEngine.as_generic() heuristic "
"is undefined for types that inherit Enum but do not have "
"an `enums` attribute."
)
return util.constructor_copy(
self, self._generic_type_affinity, *args, _disable_warnings=True
)
def adapt_to_emulated(self, impltype, **kw):
kw.setdefault("validate_strings", self.validate_strings)
kw.setdefault("name", self.name)
kw["_disable_warnings"] = True
kw.setdefault("schema", self.schema)
kw.setdefault("inherit_schema", self.inherit_schema)
kw.setdefault("metadata", self.metadata)
kw.setdefault("_create_events", False)
kw.setdefault("native_enum", self.native_enum)
kw.setdefault("values_callable", self.values_callable)
kw.setdefault("create_constraint", self.create_constraint)
kw.setdefault("length", self.length)
kw.setdefault("omit_aliases", self._omit_aliases)
assert "_enums" in kw
return impltype(**kw)
def adapt(self, impltype, **kw):
kw["_enums"] = self._enums_argument
kw["_disable_warnings"] = True
return super(Enum, self).adapt(impltype, **kw)
def _should_create_constraint(self, compiler, **kw):
if not self._is_impl_for_variant(compiler.dialect, kw):
return False
return (
not self.native_enum or not compiler.dialect.supports_native_enum
)
@util.preload_module("sqlalchemy.sql.schema")
def _set_table(self, column, table):
schema = util.preloaded.sql_schema
SchemaType._set_table(self, column, table)
if not self.create_constraint:
return
variant_mapping = self._variant_mapping_for_set_table(column)
e = schema.CheckConstraint(
type_coerce(column, String()).in_(self.enums),
name=_NONE_NAME if self.name is None else self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint,
{"variant_mapping": variant_mapping},
),
_type_bound=True,
)
assert e.table is table
def literal_processor(self, dialect):
parent_processor = super(Enum, self).literal_processor(dialect)
def process(value):
value = self._db_value_for_elem(value)
if parent_processor:
value = parent_processor(value)
return value
return process
def bind_processor(self, dialect):
parent_processor = super(Enum, self).bind_processor(dialect)
def process(value):
value = self._db_value_for_elem(value)
if parent_processor:
value = parent_processor(value)
return value
return process
def result_processor(self, dialect, coltype):
parent_processor = super(Enum, self).result_processor(dialect, coltype)
def process(value):
if parent_processor:
value = parent_processor(value)
value = self._object_value_for_elem(value)
return value
return process
def copy(self, **kw):
return SchemaType.copy(self, **kw)
@property
def python_type(self):
if self.enum_class:
return self.enum_class
else:
return super(Enum, self).python_type
class PickleType(TypeDecorator[object]):
"""Holds Python objects, which are serialized using pickle.
PickleType builds upon the Binary type to apply Python's
``pickle.dumps()`` to incoming objects, and ``pickle.loads()`` on
the way out, allowing any pickleable Python object to be stored as
a serialized binary field.
To allow ORM change events to propagate for elements associated
with :class:`.PickleType`, see :ref:`mutable_toplevel`.
"""
impl = LargeBinary
cache_ok = True
def __init__(
self,
protocol=pickle.HIGHEST_PROTOCOL,
pickler=None,
comparator=None,
impl=None,
):
"""
Construct a PickleType.
:param protocol: defaults to ``pickle.HIGHEST_PROTOCOL``.
:param pickler: defaults to pickle. May be any object with
pickle-compatible ``dumps`` and ``loads`` methods.
:param comparator: a 2-arg callable predicate used
to compare values of this type. If left as ``None``,
the Python "equals" operator is used to compare values.
:param impl: A binary-storing :class:`_types.TypeEngine` class or
instance to use in place of the default :class:`_types.LargeBinary`.
For example the :class: `_mysql.LONGBLOB` class may be more effective
when using MySQL.
.. versionadded:: 1.4.20
"""
self.protocol = protocol
self.pickler = pickler or pickle
self.comparator = comparator
super(PickleType, self).__init__()
if impl:
self.impl = to_instance(impl)
def __reduce__(self):
return PickleType, (self.protocol, None, self.comparator)
def bind_processor(self, dialect):
impl_processor = self.impl_instance.bind_processor(dialect)
dumps = self.pickler.dumps
protocol = self.protocol
if impl_processor:
fixed_impl_processor = impl_processor
def process(value):
if value is not None:
value = dumps(value, protocol)
return fixed_impl_processor(value)
else:
def process(value):
if value is not None:
value = dumps(value, protocol)
return value
return process
def result_processor(self, dialect, coltype):
impl_processor = self.impl_instance.result_processor(dialect, coltype)
loads = self.pickler.loads
if impl_processor:
fixed_impl_processor = impl_processor
def process(value):
value = fixed_impl_processor(value)
if value is None:
return None
return loads(value)
else:
def process(value):
if value is None:
return None
return loads(value)
return process
def compare_values(self, x, y):
if self.comparator:
return self.comparator(x, y)
else:
return x == y
class Boolean(SchemaType, Emulated, TypeEngine[bool]):
"""A bool datatype.
:class:`.Boolean` typically uses BOOLEAN or SMALLINT on the DDL side,
and on the Python side deals in ``True`` or ``False``.
The :class:`.Boolean` datatype currently has two levels of assertion
that the values persisted are simple true/false values. For all
backends, only the Python values ``None``, ``True``, ``False``, ``1``
or ``0`` are accepted as parameter values. For those backends that
don't support a "native boolean" datatype, an option exists to
also create a CHECK constraint on the target column
.. versionchanged:: 1.2 the :class:`.Boolean` datatype now asserts that
incoming Python values are already in pure boolean form.
"""
__visit_name__ = "boolean"
native = True
def __init__(
self,
create_constraint=False,
name=None,
_create_events=True,
):
"""Construct a Boolean.
:param create_constraint: defaults to False. If the boolean
is generated as an int/smallint, also create a CHECK constraint
on the table that ensures 1 or 0 as a value.
.. note:: it is strongly recommended that the CHECK constraint
have an explicit name in order to support schema-management
concerns. This can be established either by setting the
:paramref:`.Boolean.name` parameter or by setting up an
appropriate naming convention; see
:ref:`constraint_naming_conventions` for background.
.. versionchanged:: 1.4 - this flag now defaults to False, meaning
no CHECK constraint is generated for a non-native enumerated
type.
:param name: if a CHECK constraint is generated, specify
the name of the constraint.
"""
self.create_constraint = create_constraint
self.name = name
self._create_events = _create_events
def _should_create_constraint(self, compiler, **kw):
if not self._is_impl_for_variant(compiler.dialect, kw):
return False
return (
not compiler.dialect.supports_native_boolean
and compiler.dialect.non_native_boolean_check_constraint
)
@util.preload_module("sqlalchemy.sql.schema")
def _set_table(self, column, table):
schema = util.preloaded.sql_schema
if not self.create_constraint:
return
variant_mapping = self._variant_mapping_for_set_table(column)
e = schema.CheckConstraint(
type_coerce(column, self).in_([0, 1]),
name=_NONE_NAME if self.name is None else self.name,
_create_rule=util.portable_instancemethod(
self._should_create_constraint,
{"variant_mapping": variant_mapping},
),
_type_bound=True,
)
assert e.table is table
@property
def python_type(self):
return bool
_strict_bools = frozenset([None, True, False])
def _strict_as_bool(self, value):
if value not in self._strict_bools:
if not isinstance(value, int):
raise TypeError("Not a boolean value: %r" % (value,))
else:
raise ValueError(
"Value %r is not None, True, or False" % (value,)
)
return value
def literal_processor(self, dialect):
compiler = dialect.statement_compiler(dialect, None)
true = compiler.visit_true(None)
false = compiler.visit_false(None)
def process(value):
return true if self._strict_as_bool(value) else false
return process
def bind_processor(self, dialect):
_strict_as_bool = self._strict_as_bool
_coerce: Union[Type[bool], Type[int]]
if dialect.supports_native_boolean:
_coerce = bool
else:
_coerce = int
def process(value):
value = _strict_as_bool(value)
if value is not None:
value = _coerce(value)
return value
return process
def result_processor(self, dialect, coltype):
if dialect.supports_native_boolean:
return None
else:
return processors.int_to_boolean
class _AbstractInterval(HasExpressionLookup, TypeEngine[dt.timedelta]):
@util.memoized_property
def _expression_adaptations(self):
# Based on https://www.postgresql.org/docs/current/\
# static/functions-datetime.html.
return {
operators.add: {
Date: DateTime,
Interval: self.__class__,
DateTime: DateTime,
Time: Time,
},
operators.sub: {Interval: self.__class__},
operators.mul: {Numeric: self.__class__},
operators.truediv: {Numeric: self.__class__},
}
@util.ro_non_memoized_property
def _type_affinity(self) -> Type[Interval]:
return Interval
class Interval(Emulated, _AbstractInterval, TypeDecorator[dt.timedelta]):
"""A type for ``datetime.timedelta()`` objects.
The Interval type deals with ``datetime.timedelta`` objects. In
PostgreSQL, the native ``INTERVAL`` type is used; for others, the
value is stored as a date which is relative to the "epoch"
(Jan. 1, 1970).
Note that the ``Interval`` type does not currently provide date arithmetic
operations on platforms which do not support interval types natively. Such
operations usually require transformation of both sides of the expression
(such as, conversion of both sides into integer epoch values first) which
currently is a manual procedure (such as via
:attr:`~sqlalchemy.sql.expression.func`).
"""
impl = DateTime
epoch = dt.datetime.utcfromtimestamp(0)
cache_ok = True
def __init__(self, native=True, second_precision=None, day_precision=None):
"""Construct an Interval object.
:param native: when True, use the actual
INTERVAL type provided by the database, if
supported (currently PostgreSQL, Oracle).
Otherwise, represent the interval data as
an epoch value regardless.
:param second_precision: For native interval types
which support a "fractional seconds precision" parameter,
i.e. Oracle and PostgreSQL
:param day_precision: for native interval types which
support a "day precision" parameter, i.e. Oracle.
"""
super(Interval, self).__init__()
self.native = native
self.second_precision = second_precision
self.day_precision = day_precision
class Comparator(
TypeDecorator.Comparator[_CT],
_AbstractInterval.Comparator[_CT],
):
pass
comparator_factory = Comparator
@property
def python_type(self):
return dt.timedelta
def adapt_to_emulated(self, impltype, **kw):
return _AbstractInterval.adapt(self, impltype, **kw)
def coerce_compared_value(self, op, value):
return self.impl_instance.coerce_compared_value(op, value)
def bind_processor(
self, dialect: Dialect
) -> _BindProcessorType[dt.timedelta]:
if TYPE_CHECKING:
assert isinstance(self.impl_instance, DateTime)
impl_processor = self.impl_instance.bind_processor(dialect)
epoch = self.epoch
if impl_processor:
fixed_impl_processor = impl_processor
def process(
value: Optional[dt.timedelta],
) -> Any:
if value is not None:
dt_value = epoch + value
else:
dt_value = None
return fixed_impl_processor(dt_value)
else:
def process(
value: Optional[dt.timedelta],
) -> Any:
if value is not None:
dt_value = epoch + value
else:
dt_value = None
return dt_value
return process
def result_processor(
self, dialect: Dialect, coltype: Any
) -> _ResultProcessorType[dt.timedelta]:
if TYPE_CHECKING:
assert isinstance(self.impl_instance, DateTime)
impl_processor = self.impl_instance.result_processor(dialect, coltype)
epoch = self.epoch
if impl_processor:
fixed_impl_processor = impl_processor
def process(value: Any) -> Optional[dt.timedelta]:
dt_value = fixed_impl_processor(value)
if dt_value is None:
return None
return dt_value - epoch
else:
def process(value: Any) -> Optional[dt.timedelta]:
if value is None:
return None
return value - epoch # type: ignore
return process
class JSON(Indexable, TypeEngine[Any]):
"""Represent a SQL JSON type.
.. note:: :class:`_types.JSON`
is provided as a facade for vendor-specific
JSON types. Since it supports JSON SQL operations, it only
works on backends that have an actual JSON type, currently:
* PostgreSQL - see :class:`sqlalchemy.dialects.postgresql.JSON` and
:class:`sqlalchemy.dialects.postgresql.JSONB` for backend-specific
notes
* MySQL - see
:class:`sqlalchemy.dialects.mysql.JSON` for backend-specific notes
* SQLite as of version 3.9 - see
:class:`sqlalchemy.dialects.sqlite.JSON` for backend-specific notes
* Microsoft SQL Server 2016 and later - see
:class:`sqlalchemy.dialects.mssql.JSON` for backend-specific notes
:class:`_types.JSON` is part of the Core in support of the growing
popularity of native JSON datatypes.
The :class:`_types.JSON` type stores arbitrary JSON format data, e.g.::
data_table = Table('data_table', metadata,
Column('id', Integer, primary_key=True),
Column('data', JSON)
)
with engine.connect() as conn:
conn.execute(
data_table.insert(),
{"data": {"key1": "value1", "key2": "value2"}}
)
**JSON-Specific Expression Operators**
The :class:`_types.JSON`
datatype provides these additional SQL operations:
* Keyed index operations::
data_table.c.data['some key']
* Integer index operations::
data_table.c.data[3]
* Path index operations::
data_table.c.data[('key_1', 'key_2', 5, ..., 'key_n')]
* Data casters for specific JSON element types, subsequent to an index
or path operation being invoked::
data_table.c.data["some key"].as_integer()
.. versionadded:: 1.3.11
Additional operations may be available from the dialect-specific versions
of :class:`_types.JSON`, such as
:class:`sqlalchemy.dialects.postgresql.JSON` and
:class:`sqlalchemy.dialects.postgresql.JSONB` which both offer additional
PostgreSQL-specific operations.
**Casting JSON Elements to Other Types**
Index operations, i.e. those invoked by calling upon the expression using
the Python bracket operator as in ``some_column['some key']``, return an
expression object whose type defaults to :class:`_types.JSON` by default,
so that
further JSON-oriented instructions may be called upon the result type.
However, it is likely more common that an index operation is expected
to return a specific scalar element, such as a string or integer. In
order to provide access to these elements in a backend-agnostic way,
a series of data casters are provided:
* :meth:`.JSON.Comparator.as_string` - return the element as a string
* :meth:`.JSON.Comparator.as_boolean` - return the element as a boolean
* :meth:`.JSON.Comparator.as_float` - return the element as a float
* :meth:`.JSON.Comparator.as_integer` - return the element as an integer
These data casters are implemented by supporting dialects in order to
assure that comparisons to the above types will work as expected, such as::
# integer comparison
data_table.c.data["some_integer_key"].as_integer() == 5
# boolean comparison
data_table.c.data["some_boolean"].as_boolean() == True
.. versionadded:: 1.3.11 Added type-specific casters for the basic JSON
data element types.
.. note::
The data caster functions are new in version 1.3.11, and supersede
the previous documented approaches of using CAST; for reference,
this looked like::
from sqlalchemy import cast, type_coerce
from sqlalchemy import String, JSON
cast(
data_table.c.data['some_key'], String
) == type_coerce(55, JSON)
The above case now works directly as::
data_table.c.data['some_key'].as_integer() == 5
For details on the previous comparison approach within the 1.3.x
series, see the documentation for SQLAlchemy 1.2 or the included HTML
files in the doc/ directory of the version's distribution.
**Detecting Changes in JSON columns when using the ORM**
The :class:`_types.JSON` type, when used with the SQLAlchemy ORM, does not
detect in-place mutations to the structure. In order to detect these, the
:mod:`sqlalchemy.ext.mutable` extension must be used. This extension will
allow "in-place" changes to the datastructure to produce events which
will be detected by the unit of work. See the example at :class:`.HSTORE`
for a simple example involving a dictionary.
**Support for JSON null vs. SQL NULL**
When working with NULL values, the :class:`_types.JSON` type recommends the
use of two specific constants in order to differentiate between a column
that evaluates to SQL NULL, e.g. no value, vs. the JSON-encoded string of
``"null"``. To insert or select against a value that is SQL NULL, use the
constant :func:`.null`. This symbol may be passed as a parameter value
specifically when using the :class:`_types.JSON` datatype, which contains
special logic that interprets this symbol to mean that the column value
should be SQL NULL as opposed to JSON ``"null"``::
from sqlalchemy import null
conn.execute(table.insert(), {"json_value": null()})
To insert or select against a value that is JSON ``"null"``, use the
constant :attr:`_types.JSON.NULL`::
conn.execute(table.insert(), {"json_value": JSON.NULL})
The :class:`_types.JSON` type supports a flag
:paramref:`_types.JSON.none_as_null` which when set to True will result
in the Python constant ``None`` evaluating to the value of SQL
NULL, and when set to False results in the Python constant
``None`` evaluating to the value of JSON ``"null"``. The Python
value ``None`` may be used in conjunction with either
:attr:`_types.JSON.NULL` and :func:`.null` in order to indicate NULL
values, but care must be taken as to the value of the
:paramref:`_types.JSON.none_as_null` in these cases.
**Customizing the JSON Serializer**
The JSON serializer and deserializer used by :class:`_types.JSON`
defaults to
Python's ``json.dumps`` and ``json.loads`` functions; in the case of the
psycopg2 dialect, psycopg2 may be using its own custom loader function.
In order to affect the serializer / deserializer, they are currently
configurable at the :func:`_sa.create_engine` level via the
:paramref:`_sa.create_engine.json_serializer` and
:paramref:`_sa.create_engine.json_deserializer` parameters. For example,
to turn off ``ensure_ascii``::
engine = create_engine(
"sqlite://",
json_serializer=lambda obj: json.dumps(obj, ensure_ascii=False))
.. versionchanged:: 1.3.7
SQLite dialect's ``json_serializer`` and ``json_deserializer``
parameters renamed from ``_json_serializer`` and
``_json_deserializer``.
.. seealso::
:class:`sqlalchemy.dialects.postgresql.JSON`
:class:`sqlalchemy.dialects.postgresql.JSONB`
:class:`sqlalchemy.dialects.mysql.JSON`
:class:`sqlalchemy.dialects.sqlite.JSON`
.. versionadded:: 1.1
"""
__visit_name__ = "JSON"
hashable = False
NULL = util.symbol("JSON_NULL")
"""Describe the json value of NULL.
This value is used to force the JSON value of ``"null"`` to be
used as the value. A value of Python ``None`` will be recognized
either as SQL NULL or JSON ``"null"``, based on the setting
of the :paramref:`_types.JSON.none_as_null` flag; the
:attr:`_types.JSON.NULL`
constant can be used to always resolve to JSON ``"null"`` regardless
of this setting. This is in contrast to the :func:`_expression.null`
construct,
which always resolves to SQL NULL. E.g.::
from sqlalchemy import null
from sqlalchemy.dialects.postgresql import JSON
# will *always* insert SQL NULL
obj1 = MyObject(json_value=null())
# will *always* insert JSON string "null"
obj2 = MyObject(json_value=JSON.NULL)
session.add_all([obj1, obj2])
session.commit()
In order to set JSON NULL as a default value for a column, the most
transparent method is to use :func:`_expression.text`::
Table(
'my_table', metadata,
Column('json_data', JSON, default=text("'null'"))
)
While it is possible to use :attr:`_types.JSON.NULL` in this context, the
:attr:`_types.JSON.NULL` value will be returned as the value of the
column,
which in the context of the ORM or other repurposing of the default
value, may not be desirable. Using a SQL expression means the value
will be re-fetched from the database within the context of retrieving
generated defaults.
"""
def __init__(self, none_as_null=False):
"""Construct a :class:`_types.JSON` type.
:param none_as_null=False: if True, persist the value ``None`` as a
SQL NULL value, not the JSON encoding of ``null``. Note that when this
flag is False, the :func:`.null` construct can still be used to
persist a NULL value, which may be passed directly as a parameter
value that is specially interpreted by the :class:`_types.JSON` type
as SQL NULL::
from sqlalchemy import null
conn.execute(table.insert(), {"data": null()})
.. note::
:paramref:`_types.JSON.none_as_null` does **not** apply to the
values passed to :paramref:`_schema.Column.default` and
:paramref:`_schema.Column.server_default`; a value of ``None``
passed for these parameters means "no default present".
Additionally, when used in SQL comparison expressions, the
Python value ``None`` continues to refer to SQL null, and not
JSON NULL. The :paramref:`_types.JSON.none_as_null` flag refers
explicitly to the **persistence** of the value within an
INSERT or UPDATE statement. The :attr:`_types.JSON.NULL`
value should be used for SQL expressions that wish to compare to
JSON null.
.. seealso::
:attr:`.types.JSON.NULL`
"""
self.none_as_null = none_as_null
class JSONElementType(TypeEngine[Any]):
"""Common function for index / path elements in a JSON expression."""
_integer = Integer()
_string = String()
def string_bind_processor(self, dialect):
return self._string._cached_bind_processor(dialect)
def string_literal_processor(self, dialect):
return self._string._cached_literal_processor(dialect)
def bind_processor(self, dialect):
int_processor = self._integer._cached_bind_processor(dialect)
string_processor = self.string_bind_processor(dialect)
def process(value):
if int_processor and isinstance(value, int):
value = int_processor(value)
elif string_processor and isinstance(value, str):
value = string_processor(value)
return value
return process
def literal_processor(self, dialect):
int_processor = self._integer._cached_literal_processor(dialect)
string_processor = self.string_literal_processor(dialect)
def process(value):
if int_processor and isinstance(value, int):
value = int_processor(value)
elif string_processor and isinstance(value, str):
value = string_processor(value)
return value
return process
class JSONIndexType(JSONElementType):
"""Placeholder for the datatype of a JSON index value.
This allows execution-time processing of JSON index values
for special syntaxes.
"""
class JSONIntIndexType(JSONIndexType):
"""Placeholder for the datatype of a JSON index value.
This allows execution-time processing of JSON index values
for special syntaxes.
"""
class JSONStrIndexType(JSONIndexType):
"""Placeholder for the datatype of a JSON index value.
This allows execution-time processing of JSON index values
for special syntaxes.
"""
class JSONPathType(JSONElementType):
"""Placeholder type for JSON path operations.
This allows execution-time processing of a path-based
index value into a specific SQL syntax.
"""
class Comparator(Indexable.Comparator[_T], Concatenable.Comparator[_T]):
"""Define comparison operations for :class:`_types.JSON`."""
def _setup_getitem(self, index):
if not isinstance(index, str) and isinstance(
index, collections_abc.Sequence
):
index = coercions.expect(
roles.BinaryElementRole,
index,
expr=self.expr,
operator=operators.json_path_getitem_op,
bindparam_type=JSON.JSONPathType,
)
operator = operators.json_path_getitem_op
else:
index = coercions.expect(
roles.BinaryElementRole,
index,
expr=self.expr,
operator=operators.json_getitem_op,
bindparam_type=JSON.JSONIntIndexType
if isinstance(index, int)
else JSON.JSONStrIndexType,
)
operator = operators.json_getitem_op
return operator, index, self.type
def as_boolean(self):
"""Cast an indexed value as boolean.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_boolean()
).where(
mytable.c.json_column['some_data'].as_boolean() == True
)
.. versionadded:: 1.3.11
"""
return self._binary_w_type(Boolean(), "as_boolean")
def as_string(self):
"""Cast an indexed value as string.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_string()
).where(
mytable.c.json_column['some_data'].as_string() ==
'some string'
)
.. versionadded:: 1.3.11
"""
return self._binary_w_type(String(), "as_string")
def as_integer(self):
"""Cast an indexed value as integer.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_integer()
).where(
mytable.c.json_column['some_data'].as_integer() == 5
)
.. versionadded:: 1.3.11
"""
return self._binary_w_type(Integer(), "as_integer")
def as_float(self):
"""Cast an indexed value as float.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_float()
).where(
mytable.c.json_column['some_data'].as_float() == 29.75
)
.. versionadded:: 1.3.11
"""
return self._binary_w_type(Float(), "as_float")
def as_numeric(self, precision, scale, asdecimal=True):
"""Cast an indexed value as numeric/decimal.
e.g.::
stmt = select(
mytable.c.json_column['some_data'].as_numeric(10, 6)
).where(
mytable.c.
json_column['some_data'].as_numeric(10, 6) == 29.75
)
.. versionadded:: 1.4.0b2
"""
return self._binary_w_type(
Numeric(precision, scale, asdecimal=asdecimal), "as_numeric"
)
def as_json(self):
"""Cast an indexed value as JSON.
e.g.::
stmt = select(mytable.c.json_column['some_data'].as_json())
This is typically the default behavior of indexed elements in any
case.
Note that comparison of full JSON structures may not be
supported by all backends.
.. versionadded:: 1.3.11
"""
return self.expr
def _binary_w_type(self, typ, method_name):
if not isinstance(
self.expr, elements.BinaryExpression
) or self.expr.operator not in (
operators.json_getitem_op,
operators.json_path_getitem_op,
):
raise exc.InvalidRequestError(
"The JSON cast operator JSON.%s() only works with a JSON "
"index expression e.g. col['q'].%s()"
% (method_name, method_name)
)
expr = self.expr._clone()
expr.type = typ
return expr
comparator_factory = Comparator
@property
def python_type(self):
return dict
@property # type: ignore # mypy property bug
def should_evaluate_none(self):
"""Alias of :attr:`_types.JSON.none_as_null`"""
return not self.none_as_null
@should_evaluate_none.setter
def should_evaluate_none(self, value):
self.none_as_null = not value
@util.memoized_property
def _str_impl(self):
return String()
def _make_bind_processor(self, string_process, json_serializer):
if string_process:
def process(value):
if value is self.NULL:
value = None
elif isinstance(value, elements.Null) or (
value is None and self.none_as_null
):
return None
serialized = json_serializer(value)
return string_process(serialized)
else:
def process(value):
if value is self.NULL:
value = None
elif isinstance(value, elements.Null) or (
value is None and self.none_as_null
):
return None
return json_serializer(value)
return process
def bind_processor(self, dialect):
string_process = self._str_impl.bind_processor(dialect)
json_serializer = dialect._json_serializer or json.dumps
return self._make_bind_processor(string_process, json_serializer)
def result_processor(self, dialect, coltype):
string_process = self._str_impl.result_processor(dialect, coltype)
json_deserializer = dialect._json_deserializer or json.loads
def process(value):
if value is None:
return None
if string_process:
value = string_process(value)
return json_deserializer(value)
return process
class ARRAY(
SchemaEventTarget, Indexable, Concatenable, TypeEngine[Sequence[Any]]
):
"""Represent a SQL Array type.
.. note:: This type serves as the basis for all ARRAY operations.
However, currently **only the PostgreSQL backend has support for SQL
arrays in SQLAlchemy**. It is recommended to use the PostgreSQL-specific
:class:`sqlalchemy.dialects.postgresql.ARRAY` type directly when using
ARRAY types with PostgreSQL, as it provides additional operators
specific to that backend.
:class:`_types.ARRAY` is part of the Core in support of various SQL
standard functions such as :class:`_functions.array_agg`
which explicitly involve
arrays; however, with the exception of the PostgreSQL backend and possibly
some third-party dialects, no other SQLAlchemy built-in dialect has support
for this type.
An :class:`_types.ARRAY` type is constructed given the "type"
of element::
mytable = Table("mytable", metadata,
Column("data", ARRAY(Integer))
)
The above type represents an N-dimensional array,
meaning a supporting backend such as PostgreSQL will interpret values
with any number of dimensions automatically. To produce an INSERT
construct that passes in a 1-dimensional array of integers::
connection.execute(
mytable.insert(),
{"data": [1,2,3]}
)
The :class:`_types.ARRAY` type can be constructed given a fixed number
of dimensions::
mytable = Table("mytable", metadata,
Column("data", ARRAY(Integer, dimensions=2))
)
Sending a number of dimensions is optional, but recommended if the
datatype is to represent arrays of more than one dimension. This number
is used:
* When emitting the type declaration itself to the database, e.g.
``INTEGER[][]``
* When translating Python values to database values, and vice versa, e.g.
an ARRAY of :class:`.Unicode` objects uses this number to efficiently
access the string values inside of array structures without resorting
to per-row type inspection
* When used with the Python ``getitem`` accessor, the number of dimensions
serves to define the kind of type that the ``[]`` operator should
return, e.g. for an ARRAY of INTEGER with two dimensions::
>>> expr = table.c.column[5] # returns ARRAY(Integer, dimensions=1)
>>> expr = expr[6] # returns Integer
For 1-dimensional arrays, an :class:`_types.ARRAY` instance with no
dimension parameter will generally assume single-dimensional behaviors.
SQL expressions of type :class:`_types.ARRAY` have support for "index" and
"slice" behavior. The Python ``[]`` operator works normally here, given
integer indexes or slices. Arrays default to 1-based indexing.
The operator produces binary expression
constructs which will produce the appropriate SQL, both for
SELECT statements::
select(mytable.c.data[5], mytable.c.data[2:7])
as well as UPDATE statements when the :meth:`_expression.Update.values`
method
is used::
mytable.update().values({
mytable.c.data[5]: 7,
mytable.c.data[2:7]: [1, 2, 3]
})
The :class:`_types.ARRAY` type also provides for the operators
:meth:`.types.ARRAY.Comparator.any` and
:meth:`.types.ARRAY.Comparator.all`. The PostgreSQL-specific version of
:class:`_types.ARRAY` also provides additional operators.
.. versionadded:: 1.1.0
.. seealso::
:class:`sqlalchemy.dialects.postgresql.ARRAY`
"""
__visit_name__ = "ARRAY"
_is_array = True
zero_indexes = False
"""If True, Python zero-based indexes should be interpreted as one-based
on the SQL expression side."""
class Comparator(Indexable.Comparator[_T], Concatenable.Comparator[_T]):
"""Define comparison operations for :class:`_types.ARRAY`.
More operators are available on the dialect-specific form
of this type. See :class:`.postgresql.ARRAY.Comparator`.
"""
def _setup_getitem(self, index):
arr_type = cast(ARRAY, self.type)
if isinstance(index, slice):
return_type = arr_type
if arr_type.zero_indexes:
index = slice(index.start + 1, index.stop + 1, index.step)
slice_ = Slice(
index.start, index.stop, index.step, _name=self.expr.key
)
return operators.getitem, slice_, return_type
else:
if arr_type.zero_indexes:
index += 1
if arr_type.dimensions is None or arr_type.dimensions == 1:
return_type = arr_type.item_type
else:
adapt_kw = {"dimensions": arr_type.dimensions - 1}
return_type = arr_type.adapt(
arr_type.__class__, **adapt_kw
)
return operators.getitem, index, return_type
def contains(self, *arg, **kw):
raise NotImplementedError(
"ARRAY.contains() not implemented for the base "
"ARRAY type; please use the dialect-specific ARRAY type"
)
@util.preload_module("sqlalchemy.sql.elements")
def any(self, other, operator=None):
"""Return ``other operator ANY (array)`` clause.
.. note:: This method is an :class:`_types.ARRAY` - specific
construct that is now superseded by the :func:`_sql.any_`
function, which features a different calling style. The
:func:`_sql.any_` function is also mirrored at the method level
via the :meth:`_sql.ColumnOperators.any_` method.
Usage of array-specific :meth:`_types.ARRAY.Comparator.any`
is as follows::
from sqlalchemy.sql import operators
conn.execute(
select(table.c.data).where(
table.c.data.any(7, operator=operators.lt)
)
)
:param other: expression to be compared
:param operator: an operator object from the
:mod:`sqlalchemy.sql.operators`
package, defaults to :func:`.operators.eq`.
.. seealso::
:func:`_expression.any_`
:meth:`.types.ARRAY.Comparator.all`
"""
elements = util.preloaded.sql_elements
operator = operator if operator else operators.eq
# send plain BinaryExpression so that negate remains at None,
# leading to NOT expr for negation.
return elements.BinaryExpression(
coercions.expect(roles.ExpressionElementRole, other),
elements.CollectionAggregate._create_any(self.expr),
operator,
)
@util.preload_module("sqlalchemy.sql.elements")
def all(self, other, operator=None):
"""Return ``other operator ALL (array)`` clause.
.. note:: This method is an :class:`_types.ARRAY` - specific
construct that is now superseded by the :func:`_sql.any_`
function, which features a different calling style. The
:func:`_sql.any_` function is also mirrored at the method level
via the :meth:`_sql.ColumnOperators.any_` method.
Usage of array-specific :meth:`_types.ARRAY.Comparator.all`
is as follows::
from sqlalchemy.sql import operators
conn.execute(
select(table.c.data).where(
table.c.data.all(7, operator=operators.lt)
)
)
:param other: expression to be compared
:param operator: an operator object from the
:mod:`sqlalchemy.sql.operators`
package, defaults to :func:`.operators.eq`.
.. seealso::
:func:`_expression.all_`
:meth:`.types.ARRAY.Comparator.any`
"""
elements = util.preloaded.sql_elements
operator = operator if operator else operators.eq
# send plain BinaryExpression so that negate remains at None,
# leading to NOT expr for negation.
return elements.BinaryExpression(
coercions.expect(roles.ExpressionElementRole, other),
elements.CollectionAggregate._create_all(self.expr),
operator,
)
comparator_factory = Comparator
def __init__(
self, item_type, as_tuple=False, dimensions=None, zero_indexes=False
):
"""Construct an :class:`_types.ARRAY`.
E.g.::
Column('myarray', ARRAY(Integer))
Arguments are:
:param item_type: The data type of items of this array. Note that
dimensionality is irrelevant here, so multi-dimensional arrays like
``INTEGER[][]``, are constructed as ``ARRAY(Integer)``, not as
``ARRAY(ARRAY(Integer))`` or such.
:param as_tuple=False: Specify whether return results
should be converted to tuples from lists. This parameter is
not generally needed as a Python list corresponds well
to a SQL array.
:param dimensions: if non-None, the ARRAY will assume a fixed
number of dimensions. This impacts how the array is declared
on the database, how it goes about interpreting Python and
result values, as well as how expression behavior in conjunction
with the "getitem" operator works. See the description at
:class:`_types.ARRAY` for additional detail.
:param zero_indexes=False: when True, index values will be converted
between Python zero-based and SQL one-based indexes, e.g.
a value of one will be added to all index values before passing
to the database.
"""
if isinstance(item_type, ARRAY):
raise ValueError(
"Do not nest ARRAY types; ARRAY(basetype) "
"handles multi-dimensional arrays of basetype"
)
if isinstance(item_type, type):
item_type = item_type()
self.item_type = item_type
self.as_tuple = as_tuple
self.dimensions = dimensions
self.zero_indexes = zero_indexes
@property
def hashable(self):
return self.as_tuple
@property
def python_type(self):
return list
def compare_values(self, x, y):
return x == y
def _set_parent(self, column, outer=False, **kw):
"""Support SchemaEventTarget"""
if not outer and isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent(column, **kw)
def _set_parent_with_dispatch(self, parent):
"""Support SchemaEventTarget"""
super(ARRAY, self)._set_parent_with_dispatch(parent, outer=True)
if isinstance(self.item_type, SchemaEventTarget):
self.item_type._set_parent_with_dispatch(parent)
class TupleType(TypeEngine[Tuple[Any, ...]]):
"""represent the composite type of a Tuple."""
_is_tuple_type = True
types: List[TypeEngine[Any]]
def __init__(self, *types):
self._fully_typed = NULLTYPE not in types
self.types = [
item_type() if isinstance(item_type, type) else item_type
for item_type in types
]
def _resolve_values_to_types(self, value: Any) -> TupleType:
if self._fully_typed:
return self
else:
return TupleType(
*[
_resolve_value_to_type(elem) if typ is NULLTYPE else typ
for typ, elem in zip(self.types, value)
]
)
def result_processor(self, dialect, coltype):
raise NotImplementedError(
"The tuple type does not support being fetched "
"as a column in a result row."
)
class REAL(Float[_N]):
"""The SQL REAL type.
.. seealso::
:class:`_types.Float` - documentation for the base type.
"""
__visit_name__ = "REAL"
class FLOAT(Float[_N]):
"""The SQL FLOAT type.
.. seealso::
:class:`_types.Float` - documentation for the base type.
"""
__visit_name__ = "FLOAT"
class DOUBLE(Double[_N]):
"""The SQL DOUBLE type.
.. versionadded:: 2.0
.. seealso::
:class:`_types.Double` - documentation for the base type.
"""
__visit_name__ = "DOUBLE"
class DOUBLE_PRECISION(Double[_N]):
"""The SQL DOUBLE PRECISION type.
.. versionadded:: 2.0
.. seealso::
:class:`_types.Double` - documentation for the base type.
"""
__visit_name__ = "DOUBLE_PRECISION"
class NUMERIC(Numeric[_N]):
"""The SQL NUMERIC type.
.. seealso::
:class:`_types.Numeric` - documentation for the base type.
"""
__visit_name__ = "NUMERIC"
class DECIMAL(Numeric[_N]):
"""The SQL DECIMAL type.
.. seealso::
:class:`_types.Numeric` - documentation for the base type.
"""
__visit_name__ = "DECIMAL"
class INTEGER(Integer):
"""The SQL INT or INTEGER type.
.. seealso::
:class:`_types.Integer` - documentation for the base type.
"""
__visit_name__ = "INTEGER"
INT = INTEGER
class SMALLINT(SmallInteger):
"""The SQL SMALLINT type.
.. seealso::
:class:`_types.SmallInteger` - documentation for the base type.
"""
__visit_name__ = "SMALLINT"
class BIGINT(BigInteger):
"""The SQL BIGINT type.
.. seealso::
:class:`_types.BigInteger` - documentation for the base type.
"""
__visit_name__ = "BIGINT"
class TIMESTAMP(DateTime):
"""The SQL TIMESTAMP type.
:class:`_types.TIMESTAMP` datatypes have support for timezone
storage on some backends, such as PostgreSQL and Oracle. Use the
:paramref:`~types.TIMESTAMP.timezone` argument in order to enable
"TIMESTAMP WITH TIMEZONE" for these backends.
"""
__visit_name__ = "TIMESTAMP"
def __init__(self, timezone=False):
"""Construct a new :class:`_types.TIMESTAMP`.
:param timezone: boolean. Indicates that the TIMESTAMP type should
enable timezone support, if available on the target database.
On a per-dialect basis is similar to "TIMESTAMP WITH TIMEZONE".
If the target database does not support timezones, this flag is
ignored.
"""
super(TIMESTAMP, self).__init__(timezone=timezone)
def get_dbapi_type(self, dbapi):
return dbapi.TIMESTAMP
class DATETIME(DateTime):
"""The SQL DATETIME type."""
__visit_name__ = "DATETIME"
class DATE(Date):
"""The SQL DATE type."""
__visit_name__ = "DATE"
class TIME(Time):
"""The SQL TIME type."""
__visit_name__ = "TIME"
class TEXT(Text):
"""The SQL TEXT type."""
__visit_name__ = "TEXT"
class CLOB(Text):
"""The CLOB type.
This type is found in Oracle and Informix.
"""
__visit_name__ = "CLOB"
class VARCHAR(String):
"""The SQL VARCHAR type."""
__visit_name__ = "VARCHAR"
class NVARCHAR(Unicode):
"""The SQL NVARCHAR type."""
__visit_name__ = "NVARCHAR"
class CHAR(String):
"""The SQL CHAR type."""
__visit_name__ = "CHAR"
class NCHAR(Unicode):
"""The SQL NCHAR type."""
__visit_name__ = "NCHAR"
class BLOB(LargeBinary):
"""The SQL BLOB type."""
__visit_name__ = "BLOB"
class BINARY(_Binary):
"""The SQL BINARY type."""
__visit_name__ = "BINARY"
class VARBINARY(_Binary):
"""The SQL VARBINARY type."""
__visit_name__ = "VARBINARY"
class BOOLEAN(Boolean):
"""The SQL BOOLEAN type."""
__visit_name__ = "BOOLEAN"
class NullType(TypeEngine[None]):
"""An unknown type.
:class:`.NullType` is used as a default type for those cases where
a type cannot be determined, including:
* During table reflection, when the type of a column is not recognized
by the :class:`.Dialect`
* When constructing SQL expressions using plain Python objects of
unknown types (e.g. ``somecolumn == my_special_object``)
* When a new :class:`_schema.Column` is created,
and the given type is passed
as ``None`` or is not passed at all.
The :class:`.NullType` can be used within SQL expression invocation
without issue, it just has no behavior either at the expression
construction level or at the bind-parameter/result processing level.
:class:`.NullType` will result in a :exc:`.CompileError` if the compiler
is asked to render the type itself, such as if it is used in a
:func:`.cast` operation or within a schema creation operation such as that
invoked by :meth:`_schema.MetaData.create_all` or the
:class:`.CreateTable`
construct.
"""
__visit_name__ = "null"
_isnull = True
def literal_processor(self, dialect):
def process(value):
raise exc.CompileError(
"Don't know how to render literal SQL value: %r" % (value,)
)
return process
class Comparator(TypeEngine.Comparator[_T]):
def _adapt_expression(
self,
op: OperatorType,
other_comparator: TypeEngine.Comparator[Any],
) -> Tuple[OperatorType, TypeEngine[Any]]:
if isinstance(
other_comparator, NullType.Comparator
) or not operators.is_commutative(op):
return op, self.expr.type
else:
return other_comparator._adapt_expression(op, self)
comparator_factory = Comparator
class TableValueType(HasCacheKey, TypeEngine[Any]):
"""Refers to a table value type."""
_is_table_value = True
_traverse_internals = [
("_elements", InternalTraversal.dp_clauseelement_list),
]
def __init__(self, *elements):
self._elements = [
coercions.expect(roles.StrAsPlainColumnRole, elem)
for elem in elements
]
class MatchType(Boolean):
"""Refers to the return type of the MATCH operator.
As the :meth:`.ColumnOperators.match` is probably the most open-ended
operator in generic SQLAlchemy Core, we can't assume the return type
at SQL evaluation time, as MySQL returns a floating point, not a boolean,
and other backends might do something different. So this type
acts as a placeholder, currently subclassing :class:`.Boolean`.
The type allows dialects to inject result-processing functionality
if needed, and on MySQL will return floating-point values.
.. versionadded:: 1.0.0
"""
NULLTYPE = NullType()
BOOLEANTYPE = Boolean()
STRINGTYPE = String()
INTEGERTYPE = Integer()
MATCHTYPE = MatchType()
TABLEVALUE = TableValueType()
DATETIME_TIMEZONE = DateTime(timezone=True)
TIME_TIMEZONE = Time(timezone=True)
_DATETIME = DateTime()
_TIME = Time()
_STRING = String()
_UNICODE = Unicode()
_type_map: Dict[Type[Any], TypeEngine[Any]] = {
int: Integer(),
float: Float(),
bool: BOOLEANTYPE,
decimal.Decimal: Numeric(),
dt.date: Date(),
dt.datetime: _DATETIME,
dt.time: _TIME,
dt.timedelta: Interval(),
type(None): NULLTYPE,
bytes: LargeBinary(),
str: _STRING,
}
_type_map_get = _type_map.get
def _resolve_value_to_type(value: Any) -> TypeEngine[Any]:
_result_type = _type_map_get(type(value), False)
if _result_type is False:
# use inspect() to detect SQLAlchemy built-in
# objects.
insp = inspection.inspect(value, False)
if (
insp is not None
and
# foil mock.Mock() and other impostors by ensuring
# the inspection target itself self-inspects
insp.__class__ in inspection._registrars
):
raise exc.ArgumentError(
"Object %r is not legal as a SQL literal value" % (value,)
)
return NULLTYPE
else:
return _result_type._resolve_for_literal( # type: ignore [union-attr]
value
)
# back-assign to type_api
type_api.BOOLEANTYPE = BOOLEANTYPE
type_api.STRINGTYPE = STRINGTYPE
type_api.INTEGERTYPE = INTEGERTYPE
type_api.NULLTYPE = NULLTYPE
type_api.MATCHTYPE = MATCHTYPE
type_api.INDEXABLE = INDEXABLE = Indexable
type_api.TABLEVALUE = TABLEVALUE
type_api._resolve_value_to_type = _resolve_value_to_type
|
the-stack_0_23330 | """Test suite for wasabi."""
from unittest import TestCase
from os import getcwd, makedirs, environ, remove
from os.path import isfile, dirname
from random import randint
from functools import wraps
from metasub_utils.wasabi import WasabiBucket
def with_aws_credentials(func):
"""Make a default AWS credential file in the home dir."""
@wraps(func)
def decorated_function(self, *args, **kwargs):
cred_filename = environ['HOME'] + '/.aws/credentials'
if isfile(cred_filename):
return func(self, *args, **kwargs)
makedirs(dirname(cred_filename), exist_ok=True)
access_key, secret_key = environ['AWS_ACCESS_KEY'], environ['AWS_SECRET_ACCESS_KEY']
creds = f'[default]\naws_access_key_id={access_key}\naws_secret_access_key={secret_key}\n'
creds += f'[wasabi]\naws_access_key_id={access_key}\naws_secret_access_key={secret_key}\n'
with open(cred_filename, 'w') as cf:
cf.write(creds)
return func(self, *args, **kwargs)
return decorated_function
class TestWasabi(TestCase):
"""Test suite for wasabi."""
@with_aws_credentials
def test_download_file(self):
bucket = WasabiBucket(profile_name='wasabi')
local_name = f'{getcwd()}/temp_{randint(0, 1000 * 1000)}'
bucket.download('Scripts/downloadem.sh', local_name, False)
bucket.close()
self.assertTrue(isfile(local_name))
self.assertTrue(len(open(local_name).read()) > 0)
remove(local_name)
@with_aws_credentials
def test_list_raw(self):
bucket = WasabiBucket(profile_name='wasabi')
raw_reads = bucket.list_raw(city_name='paris')
bucket.close()
self.assertTrue(raw_reads)
@with_aws_credentials
def test_list_from_project(self):
"""Test that we can filter sample names by project."""
bucket = WasabiBucket(profile_name='wasabi')
raw_reads = bucket.list_raw(project_name='tigress')
bucket.close()
self.assertTrue(len(raw_reads) == 2 * 83)
@with_aws_credentials
def test_list_from_city_project(self):
"""Test that we can filter sample names by city and project."""
bucket = WasabiBucket(profile_name='wasabi')
raw_reads = bucket.list_raw(city_name='swansea', project_name='tigress')
bucket.close()
self.assertTrue(len(raw_reads) == 2 * 6)
@with_aws_credentials
def test_list_contigs_from_city(self):
"""Test that we can filter sample names by city and project."""
bucket = WasabiBucket(profile_name='wasabi')
contigs = bucket.list_contigs(city_name='paris')
bucket.close()
self.assertTrue(len(contigs) == 14)
@with_aws_credentials
def test_download_from_city_project(self):
"""Test that we do not get an obvious error on download."""
bucket = WasabiBucket(profile_name='wasabi')
bucket.download_raw(city_name='swansea', project_name='tigress')
bucket.close()
|
the-stack_0_23331 | import unittest
from os import path
from tests.helpers import load_json, TMP_FOLDER, DOC_FOLDER
from tools.schema import SchemaType, schema_pythonify, schema_compare
class TestSchema(unittest.TestCase):
def test_hd4(self):
data = load_json(path.join(DOC_FOLDER, 'HD4_02_05_00_00.json'))
p = SchemaType.parse(data)
self.assertEqual(p.schema_version, 2)
self.assertEqual(p.version, 4.0)
self.assertIsInstance(p.version, float)
self.assertIsInstance(p.schema_version, int)
self.assertEqual(len(p.commands.keys()), 64)
self.assertEqual(len(p.modes.keys()), 6)
def test_hd7(self):
data = load_json(path.join(DOC_FOLDER, 'HD7_01_01_51_00.json'))
p = SchemaType.parse(data)
self.assertEqual(p.schema_version, 4, 'expected schema_version=4')
self.assertEqual(p.version, 2, 'expected version=2')
self.assertEqual(len(p.commands.keys()), 88)
self.assertEqual(len(p.modes.keys()), 7)
@unittest.skip('To chatty in console')
def test_hd4_hd7_comparison(self):
hd7 = SchemaType.parse(load_json(path.join(DOC_FOLDER, 'HD7_01_01_51_00.json')))
hd4 = SchemaType.parse(load_json(path.join(DOC_FOLDER, 'HD4_02_05_00_00.json')))
schema_compare(hd4, hd7, 'HD4', 'HD7')
def test_hd4_pythonify(self):
hd4 = SchemaType.parse(load_json(path.join(DOC_FOLDER, 'HD4_02_05_00_00.json')))
schema_pythonify(hd4, path.join(TMP_FOLDER, 'pythonify.py'))
if __name__ == '__main__':
unittest.main()
|
the-stack_0_23332 | """Hello world for Sapien.
Concepts:
- Engine and scene
- Renderer, viewer, lighting
- Run a simulation loop
Notes:
- For one process, you can only create one engine and one renderer.
"""
import sapien.core as sapien
from sapien.utils import Viewer
import numpy as np
def main():
engine = sapien.Engine() # Create a physical simulation engine
renderer = sapien.VulkanRenderer() # Create a Vulkan renderer
engine.set_renderer(renderer) # Bind the renderer and the engine
scene = engine.create_scene() # Create an instance of simulation world (aka scene)
scene.set_timestep(1 / 100.0) # Set the simulation frequency
# NOTE: How to build actors (rigid bodies) is elaborated in create_actors.py
scene.add_ground(altitude=0) # Add a ground
actor_builder = scene.create_actor_builder()
actor_builder.add_box_collision(half_size=[0.5, 0.5, 0.5])
actor_builder.add_box_visual(half_size=[0.5, 0.5, 0.5], color=[1., 0., 0.])
box = actor_builder.build(name='box') # Add a box
box.set_pose(sapien.Pose(p=[0, 0, 0.5]))
# Add some lights so that you can observe the scene
scene.set_ambient_light([0.5, 0.5, 0.5])
scene.add_directional_light([0, 1, -1], [0.5, 0.5, 0.5])
viewer = Viewer(renderer) # Create a viewer (window)
viewer.set_scene(scene) # Bind the viewer and the scene
# The coordinate frame in Sapien is: x(forward), y(left), z(upward)
# The principle axis of the camera is the x-axis
viewer.set_camera_xyz(x=-4, y=0, z=2)
# The rotation of the free camera is represented as [roll(x), pitch(-y), yaw(-z)]
# The camera now looks at the origin
viewer.set_camera_rpy(r=0, p=-np.arctan2(2, 4), y=0)
viewer.window.set_camera_parameters(near=0.05, far=100, fovy=1)
while not viewer.closed: # Press key q to quit
scene.step() # Simulate the world
scene.update_render() # Update the world to the renderer
viewer.render()
if __name__ == '__main__':
main()
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.