metadata
dict | text
stringlengths 60
3.49M
|
---|---|
{
"source": "jmolmo/ansible-runner-service",
"score": 2
} |
#### File: ansible-runner-service/runner_service/utils.py
```python
import os
import shlex
import shutil
import socket
import getpass
from subprocess import Popen, PIPE
from threading import Timer
from cryptography.hazmat.backends import default_backend
from cryptography.hazmat.primitives.asymmetric import rsa
from cryptography.hazmat.primitives import serialization
from OpenSSL import crypto
from runner_service import configuration
import logging
logger = logging.getLogger(__name__)
class RunnerServiceError(Exception):
pass
def create_directory(dir_path):
""" Create directory if it doesn't exist """
if not os.path.exists(dir_path):
os.makedirs(dir_path)
def fread(file_path):
""" return the contents of the given file """
with open(file_path, 'r') as file_fd:
return file_fd.read().strip()
def create_self_signed_cert(cert_dir, cert_pfx):
"""
Looks in cert_dir for the key files (using the cert_pfx name), and either
returns if they exist, or create them if they're missing.
"""
cert_filename = os.path.join(cert_dir,
"{}.crt".format(cert_pfx))
key_filename = os.path.join(cert_dir,
"{}.key".format(cert_pfx))
logger.debug("Checking for the SSL keys in {}".format(cert_dir))
if os.path.exists(cert_filename) \
or os.path.exists(key_filename):
logger.info("Using existing SSL files in {}".format(cert_dir))
return (cert_filename, key_filename)
else:
logger.info("Existing SSL files not found in {}".format(cert_dir))
logger.info("Self-signed cert will be created - expiring in {} "
"years".format(configuration.settings.cert_expiration))
# create a key pair
k = crypto.PKey()
k.generate_key(crypto.TYPE_RSA, 2048)
# create a self-signed cert
cert = crypto.X509()
cert.get_subject().C = "US"
cert.get_subject().ST = "North Carolina"
cert.get_subject().L = "Raliegh"
cert.get_subject().O = "Red Hat" # noqa: E741
cert.get_subject().OU = "Ansible"
cert.get_subject().CN = socket.gethostname()
cert.set_serial_number(1000)
cert.gmtime_adj_notBefore(0)
# define cert expiration period(years)
cert.gmtime_adj_notAfter(configuration.settings.cert_expiration * 365 * 24 * 60 * 60) # noqa
cert.set_issuer(cert.get_subject())
cert.set_pubkey(k)
cert.sign(k, 'sha512')
# create cert_dir if it doesn't exist
create_directory(cert_dir)
logger.debug("Writing crt file to {}".format(cert_filename))
with open(os.path.join(cert_dir, cert_filename), "wt") as cert_fd:
cert_fd.write(crypto.dump_certificate(crypto.FILETYPE_PEM, cert).decode('utf-8')) # noqa
logger.debug("Writing key file to {}".format(key_filename))
with open(os.path.join(cert_dir, key_filename), "wt") as key_fd:
key_fd.write(crypto.dump_privatekey(crypto.FILETYPE_PEM, k).decode('utf-8')) # noqa
return (cert_filename, key_filename)
def rm_r(path):
if not os.path.exists(path):
return
if os.path.isfile(path) or os.path.islink(path):
os.unlink(path)
else:
shutil.rmtree(path)
def ssh_create_key(ssh_dir, user=None):
if not user:
user = getpass.getuser()
prv_key = rsa.generate_private_key(
public_exponent=65537,
key_size=4096,
backend=default_backend())
pub_key = prv_key.public_key()
prv_file = os.path.join(ssh_dir, 'ssh_key')
pub_file = os.path.join(ssh_dir, 'ssh_key.pub')
# create ssh_dir if it doesn't exist
create_directory(ssh_dir)
# export the private key
try:
with open(prv_file, "wb") as f:
f.write(prv_key.private_bytes(
encoding=serialization.Encoding.PEM,
format=serialization.PrivateFormat.TraditionalOpenSSL,
encryption_algorithm=serialization.NoEncryption()))
except (OSError, IOError) as err:
msg = "Unable to write to private key to '{}': {}".format(ssh_dir, err)
logger.critical(msg)
raise RunnerServiceError(msg)
except Exception as err:
logger.critical("Unknown error writing private key: {}".format(err))
raise
else:
# python3 syntax
os.chmod(prv_file, 0o600)
logger.info("Created SSH private key @ '{}'".format(prv_file))
# export the public key
try:
with open(pub_file, "wb") as f:
f.write(pub_key.public_bytes(
encoding=serialization.Encoding.OpenSSH,
format=serialization.PublicFormat.OpenSSH))
except (OSError, IOError) as err:
msg = "Unable to write public ssh key to {}: {}".format(ssh_dir, err)
logger.critical(msg)
raise RunnerServiceError(msg)
except Exception as err:
logger.critical("Unknown error creating the public key "
"to {}: {}".format(ssh_dir, err))
raise
else:
# python3 syntax
os.chmod(pub_file, 0o600)
logger.info("Created SSH public key @ '{}'".format(pub_file))
class HostNotFound(Exception):
pass
class SSHNotAccessible(Exception):
pass
class SSHTimeout(Exception):
pass
class SSHIdentityFailure(Exception):
pass
class SSHAuthFailure(Exception):
pass
class SSHUnknownError(Exception):
pass
class SSHClient(object):
def __init__(self, user, host, identity, timeout=1, port=22):
self.user = user
self.port = port
self.host = host
self.timeout = timeout
self.identity_file = identity
def connect(self):
def timeout_handler():
proc.kill()
raise SSHTimeout
socket.setdefaulttimeout(self.timeout)
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.connect((self.host, self.port))
except socket.gaierror:
raise HostNotFound
except ConnectionRefusedError:
raise SSHNotAccessible
except socket.timeout:
raise SSHTimeout
else:
s.shutdown(socket.SHUT_RDWR)
s.close()
# Now try and use the identity file to passwordless ssh
cmd = ('ssh -o "StrictHostKeyChecking=no" '
'-o "IdentitiesOnly=yes" '
' -o "PasswordAuthentication=no" '
' -i {} '
'{}@{} python --version'.format(self.identity_file, self.user, self.host))
proc = Popen(shlex.split(cmd), stdout=PIPE, stderr=PIPE)
timer = Timer(self.timeout, timeout_handler)
try:
timer.start()
stdout, stderr = proc.communicate()
except Exception as e:
raise SSHUnknownError(e)
else:
if 'permission denied' in stderr.decode().lower():
raise SSHAuthFailure(stderr)
finally:
timer.cancel()
def ssh_connect_ok(host, user=None):
if not user:
if configuration.settings.target_user:
user = configuration.settings.target_user
else:
user = getpass.getuser()
priv_key = os.path.join(configuration.settings.playbooks_root_dir,
"env/ssh_key")
if not os.path.exists(priv_key):
return False, "FAILED:SSH key(s) missing from ansible-runner-service"
target = SSHClient(
user=user,
host=host,
identity=priv_key,
timeout=configuration.settings.ssh_timeout
)
try:
target.connect()
except HostNotFound:
return False, "NOCONN:SSH error - '{}' not found; check DNS or " \
"/etc/hosts".format(host)
except SSHNotAccessible:
return False, "NOCONN:SSH target '{}' not contactable; host offline" \
", port 22 blocked, sshd running?".format(host)
except SSHTimeout:
return False, "TIMEOUT:SSH timeout waiting for response from " \
"'{}'".format(host)
except SSHAuthFailure:
return False, "NOAUTH:SSH auth error - passwordless ssh not " \
"configured for '{}'".format(host)
else:
return True, "OK:SSH connection check to {} successful".format(host)
``` |
{
"source": "jmolmo/kcli",
"score": 2
} |
#### File: kvirt/krpc/kcli_pb2_grpc.py
```python
import grpc
import kvirt.krpc.kcli_pb2 as kcli__pb2
class KcliStub(object):
"""Missing associated documentation comment in .proto file"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.console = channel.unary_unary(
'/Kcli/console',
request_serializer=kcli__pb2.vm.SerializeToString,
response_deserializer=kcli__pb2.cmd.FromString,
)
self.info = channel.unary_unary(
'/Kcli/info',
request_serializer=kcli__pb2.vm.SerializeToString,
response_deserializer=kcli__pb2.vminfo.FromString,
)
self.list = channel.unary_unary(
'/Kcli/list',
request_serializer=kcli__pb2.client.SerializeToString,
response_deserializer=kcli__pb2.vmlist.FromString,
)
self.list_disks = channel.unary_unary(
'/Kcli/list_disks',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.diskslist.FromString,
)
self.list_flavors = channel.unary_unary(
'/Kcli/list_flavors',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.flavorslist.FromString,
)
self.list_images = channel.unary_unary(
'/Kcli/list_images',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.imageslist.FromString,
)
self.list_networks = channel.unary_unary(
'/Kcli/list_networks',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.networkslist.FromString,
)
self.list_isos = channel.unary_unary(
'/Kcli/list_isos',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.isoslist.FromString,
)
self.list_pools = channel.unary_unary(
'/Kcli/list_pools',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.poolslist.FromString,
)
self.list_subnets = channel.unary_unary(
'/Kcli/list_subnets',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.subnetslist.FromString,
)
self.restart = channel.unary_unary(
'/Kcli/restart',
request_serializer=kcli__pb2.vm.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.serial_console = channel.unary_unary(
'/Kcli/serial_console',
request_serializer=kcli__pb2.vm.SerializeToString,
response_deserializer=kcli__pb2.cmd.FromString,
)
self.ssh = channel.unary_unary(
'/Kcli/ssh',
request_serializer=kcli__pb2.vm.SerializeToString,
response_deserializer=kcli__pb2.sshcmd.FromString,
)
self.scp = channel.unary_unary(
'/Kcli/scp',
request_serializer=kcli__pb2.scpdetails.SerializeToString,
response_deserializer=kcli__pb2.sshcmd.FromString,
)
self.start = channel.unary_unary(
'/Kcli/start',
request_serializer=kcli__pb2.vm.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.stop = channel.unary_unary(
'/Kcli/stop',
request_serializer=kcli__pb2.vm.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.delete = channel.unary_unary(
'/Kcli/delete',
request_serializer=kcli__pb2.vm.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.get_lastvm = channel.unary_unary(
'/Kcli/get_lastvm',
request_serializer=kcli__pb2.client.SerializeToString,
response_deserializer=kcli__pb2.vm.FromString,
)
self.delete_image = channel.unary_unary(
'/Kcli/delete_image',
request_serializer=kcli__pb2.image.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.create_network = channel.unary_unary(
'/Kcli/create_network',
request_serializer=kcli__pb2.network.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.delete_network = channel.unary_unary(
'/Kcli/delete_network',
request_serializer=kcli__pb2.network.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.create_pool = channel.unary_unary(
'/Kcli/create_pool',
request_serializer=kcli__pb2.pool.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.delete_pool = channel.unary_unary(
'/Kcli/delete_pool',
request_serializer=kcli__pb2.pool.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
class KcliServicer(object):
"""Missing associated documentation comment in .proto file"""
def console(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def info(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_disks(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_flavors(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_images(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_networks(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_isos(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_pools(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_subnets(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def restart(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def serial_console(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ssh(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def scp(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def start(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def stop(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def delete(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def get_lastvm(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def delete_image(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def create_network(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def delete_network(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def create_pool(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def delete_pool(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_KcliServicer_to_server(servicer, server):
rpc_method_handlers = {
'console': grpc.unary_unary_rpc_method_handler(
servicer.console,
request_deserializer=kcli__pb2.vm.FromString,
response_serializer=kcli__pb2.cmd.SerializeToString,
),
'info': grpc.unary_unary_rpc_method_handler(
servicer.info,
request_deserializer=kcli__pb2.vm.FromString,
response_serializer=kcli__pb2.vminfo.SerializeToString,
),
'list': grpc.unary_unary_rpc_method_handler(
servicer.list,
request_deserializer=kcli__pb2.client.FromString,
response_serializer=kcli__pb2.vmlist.SerializeToString,
),
'list_disks': grpc.unary_unary_rpc_method_handler(
servicer.list_disks,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.diskslist.SerializeToString,
),
'list_flavors': grpc.unary_unary_rpc_method_handler(
servicer.list_flavors,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.flavorslist.SerializeToString,
),
'list_images': grpc.unary_unary_rpc_method_handler(
servicer.list_images,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.imageslist.SerializeToString,
),
'list_networks': grpc.unary_unary_rpc_method_handler(
servicer.list_networks,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.networkslist.SerializeToString,
),
'list_isos': grpc.unary_unary_rpc_method_handler(
servicer.list_isos,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.isoslist.SerializeToString,
),
'list_pools': grpc.unary_unary_rpc_method_handler(
servicer.list_pools,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.poolslist.SerializeToString,
),
'list_subnets': grpc.unary_unary_rpc_method_handler(
servicer.list_subnets,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.subnetslist.SerializeToString,
),
'restart': grpc.unary_unary_rpc_method_handler(
servicer.restart,
request_deserializer=kcli__pb2.vm.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'serial_console': grpc.unary_unary_rpc_method_handler(
servicer.serial_console,
request_deserializer=kcli__pb2.vm.FromString,
response_serializer=kcli__pb2.cmd.SerializeToString,
),
'ssh': grpc.unary_unary_rpc_method_handler(
servicer.ssh,
request_deserializer=kcli__pb2.vm.FromString,
response_serializer=kcli__pb2.sshcmd.SerializeToString,
),
'scp': grpc.unary_unary_rpc_method_handler(
servicer.scp,
request_deserializer=kcli__pb2.scpdetails.FromString,
response_serializer=kcli__pb2.sshcmd.SerializeToString,
),
'start': grpc.unary_unary_rpc_method_handler(
servicer.start,
request_deserializer=kcli__pb2.vm.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'stop': grpc.unary_unary_rpc_method_handler(
servicer.stop,
request_deserializer=kcli__pb2.vm.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'delete': grpc.unary_unary_rpc_method_handler(
servicer.delete,
request_deserializer=kcli__pb2.vm.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'get_lastvm': grpc.unary_unary_rpc_method_handler(
servicer.get_lastvm,
request_deserializer=kcli__pb2.client.FromString,
response_serializer=kcli__pb2.vm.SerializeToString,
),
'delete_image': grpc.unary_unary_rpc_method_handler(
servicer.delete_image,
request_deserializer=kcli__pb2.image.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'create_network': grpc.unary_unary_rpc_method_handler(
servicer.create_network,
request_deserializer=kcli__pb2.network.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'delete_network': grpc.unary_unary_rpc_method_handler(
servicer.delete_network,
request_deserializer=kcli__pb2.network.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'create_pool': grpc.unary_unary_rpc_method_handler(
servicer.create_pool,
request_deserializer=kcli__pb2.pool.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'delete_pool': grpc.unary_unary_rpc_method_handler(
servicer.delete_pool,
request_deserializer=kcli__pb2.pool.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Kcli', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Kcli(object):
"""Missing associated documentation comment in .proto file"""
@staticmethod
def console(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/console',
kcli__pb2.vm.SerializeToString,
kcli__pb2.cmd.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def info(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/info',
kcli__pb2.vm.SerializeToString,
kcli__pb2.vminfo.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/list',
kcli__pb2.client.SerializeToString,
kcli__pb2.vmlist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_disks(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/list_disks',
kcli__pb2.empty.SerializeToString,
kcli__pb2.diskslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_flavors(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/list_flavors',
kcli__pb2.empty.SerializeToString,
kcli__pb2.flavorslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_images(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/list_images',
kcli__pb2.empty.SerializeToString,
kcli__pb2.imageslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_networks(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/list_networks',
kcli__pb2.empty.SerializeToString,
kcli__pb2.networkslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_isos(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/list_isos',
kcli__pb2.empty.SerializeToString,
kcli__pb2.isoslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_pools(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/list_pools',
kcli__pb2.empty.SerializeToString,
kcli__pb2.poolslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_subnets(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/list_subnets',
kcli__pb2.empty.SerializeToString,
kcli__pb2.subnetslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def restart(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/restart',
kcli__pb2.vm.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def serial_console(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/serial_console',
kcli__pb2.vm.SerializeToString,
kcli__pb2.cmd.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def ssh(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/ssh',
kcli__pb2.vm.SerializeToString,
kcli__pb2.sshcmd.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def scp(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/scp',
kcli__pb2.scpdetails.SerializeToString,
kcli__pb2.sshcmd.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def start(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/start',
kcli__pb2.vm.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def stop(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/stop',
kcli__pb2.vm.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/delete',
kcli__pb2.vm.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def get_lastvm(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/get_lastvm',
kcli__pb2.client.SerializeToString,
kcli__pb2.vm.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def delete_image(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/delete_image',
kcli__pb2.image.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def create_network(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/create_network',
kcli__pb2.network.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def delete_network(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/delete_network',
kcli__pb2.network.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def create_pool(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/create_pool',
kcli__pb2.pool.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def delete_pool(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kcli/delete_pool',
kcli__pb2.pool.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
class KconfigStub(object):
"""Missing associated documentation comment in .proto file"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.create_vm = channel.unary_unary(
'/Kconfig/create_vm',
request_serializer=kcli__pb2.vmprofile.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.get_config = channel.unary_unary(
'/Kconfig/get_config',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.config.FromString,
)
self.get_version = channel.unary_unary(
'/Kconfig/get_version',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.version.FromString,
)
self.create_host = channel.unary_unary(
'/Kconfig/create_host',
request_serializer=kcli__pb2.client.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.delete_host = channel.unary_unary(
'/Kconfig/delete_host',
request_serializer=kcli__pb2.client.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.delete_container = channel.unary_unary(
'/Kconfig/delete_container',
request_serializer=kcli__pb2.container.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.delete_lb = channel.unary_unary(
'/Kconfig/delete_lb',
request_serializer=kcli__pb2.lb.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.delete_kube = channel.unary_unary(
'/Kconfig/delete_kube',
request_serializer=kcli__pb2.kube.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.delete_plan = channel.unary_unary(
'/Kconfig/delete_plan',
request_serializer=kcli__pb2.plan.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.delete_profile = channel.unary_unary(
'/Kconfig/delete_profile',
request_serializer=kcli__pb2.profile.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.delete_repo = channel.unary_unary(
'/Kconfig/delete_repo',
request_serializer=kcli__pb2.repo.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.list_containers = channel.unary_unary(
'/Kconfig/list_containers',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.containerslist.FromString,
)
self.list_container_images = channel.unary_unary(
'/Kconfig/list_container_images',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.imageslist.FromString,
)
self.list_hosts = channel.unary_unary(
'/Kconfig/list_hosts',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.clientslist.FromString,
)
self.list_keywords = channel.unary_unary(
'/Kconfig/list_keywords',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.keywordslist.FromString,
)
self.list_kubes = channel.unary_unary(
'/Kconfig/list_kubes',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.kubeslist.FromString,
)
self.list_lbs = channel.unary_unary(
'/Kconfig/list_lbs',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.lbslist.FromString,
)
self.list_plans = channel.unary_unary(
'/Kconfig/list_plans',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.planslist.FromString,
)
self.list_profiles = channel.unary_unary(
'/Kconfig/list_profiles',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.profileslist.FromString,
)
self.list_products = channel.unary_unary(
'/Kconfig/list_products',
request_serializer=kcli__pb2.product.SerializeToString,
response_deserializer=kcli__pb2.productslist.FromString,
)
self.list_repos = channel.unary_unary(
'/Kconfig/list_repos',
request_serializer=kcli__pb2.empty.SerializeToString,
response_deserializer=kcli__pb2.reposlist.FromString,
)
self.restart_container = channel.unary_unary(
'/Kconfig/restart_container',
request_serializer=kcli__pb2.container.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.start_container = channel.unary_unary(
'/Kconfig/start_container',
request_serializer=kcli__pb2.container.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.stop_container = channel.unary_unary(
'/Kconfig/stop_container',
request_serializer=kcli__pb2.container.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.autostart_plan = channel.unary_unary(
'/Kconfig/autostart_plan',
request_serializer=kcli__pb2.plan.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.noautostart_plan = channel.unary_unary(
'/Kconfig/noautostart_plan',
request_serializer=kcli__pb2.plan.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.start_plan = channel.unary_unary(
'/Kconfig/start_plan',
request_serializer=kcli__pb2.plan.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.stop_plan = channel.unary_unary(
'/Kconfig/stop_plan',
request_serializer=kcli__pb2.plan.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
self.switch_host = channel.unary_unary(
'/Kconfig/switch_host',
request_serializer=kcli__pb2.client.SerializeToString,
response_deserializer=kcli__pb2.result.FromString,
)
class KconfigServicer(object):
"""Missing associated documentation comment in .proto file"""
def create_vm(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def get_config(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def get_version(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def create_host(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def delete_host(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def delete_container(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def delete_lb(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def delete_kube(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def delete_plan(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def delete_profile(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def delete_repo(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_containers(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_container_images(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_hosts(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_keywords(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_kubes(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_lbs(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_plans(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_profiles(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_products(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def list_repos(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def restart_container(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def start_container(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def stop_container(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def autostart_plan(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def noautostart_plan(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def start_plan(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def stop_plan(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def switch_host(self, request, context):
"""Missing associated documentation comment in .proto file"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_KconfigServicer_to_server(servicer, server):
rpc_method_handlers = {
'create_vm': grpc.unary_unary_rpc_method_handler(
servicer.create_vm,
request_deserializer=kcli__pb2.vmprofile.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'get_config': grpc.unary_unary_rpc_method_handler(
servicer.get_config,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.config.SerializeToString,
),
'get_version': grpc.unary_unary_rpc_method_handler(
servicer.get_version,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.version.SerializeToString,
),
'create_host': grpc.unary_unary_rpc_method_handler(
servicer.create_host,
request_deserializer=kcli__pb2.client.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'delete_host': grpc.unary_unary_rpc_method_handler(
servicer.delete_host,
request_deserializer=kcli__pb2.client.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'delete_container': grpc.unary_unary_rpc_method_handler(
servicer.delete_container,
request_deserializer=kcli__pb2.container.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'delete_lb': grpc.unary_unary_rpc_method_handler(
servicer.delete_lb,
request_deserializer=kcli__pb2.lb.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'delete_kube': grpc.unary_unary_rpc_method_handler(
servicer.delete_kube,
request_deserializer=kcli__pb2.kube.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'delete_plan': grpc.unary_unary_rpc_method_handler(
servicer.delete_plan,
request_deserializer=kcli__pb2.plan.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'delete_profile': grpc.unary_unary_rpc_method_handler(
servicer.delete_profile,
request_deserializer=kcli__pb2.profile.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'delete_repo': grpc.unary_unary_rpc_method_handler(
servicer.delete_repo,
request_deserializer=kcli__pb2.repo.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'list_containers': grpc.unary_unary_rpc_method_handler(
servicer.list_containers,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.containerslist.SerializeToString,
),
'list_container_images': grpc.unary_unary_rpc_method_handler(
servicer.list_container_images,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.imageslist.SerializeToString,
),
'list_hosts': grpc.unary_unary_rpc_method_handler(
servicer.list_hosts,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.clientslist.SerializeToString,
),
'list_keywords': grpc.unary_unary_rpc_method_handler(
servicer.list_keywords,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.keywordslist.SerializeToString,
),
'list_kubes': grpc.unary_unary_rpc_method_handler(
servicer.list_kubes,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.kubeslist.SerializeToString,
),
'list_lbs': grpc.unary_unary_rpc_method_handler(
servicer.list_lbs,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.lbslist.SerializeToString,
),
'list_plans': grpc.unary_unary_rpc_method_handler(
servicer.list_plans,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.planslist.SerializeToString,
),
'list_profiles': grpc.unary_unary_rpc_method_handler(
servicer.list_profiles,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.profileslist.SerializeToString,
),
'list_products': grpc.unary_unary_rpc_method_handler(
servicer.list_products,
request_deserializer=kcli__pb2.product.FromString,
response_serializer=kcli__pb2.productslist.SerializeToString,
),
'list_repos': grpc.unary_unary_rpc_method_handler(
servicer.list_repos,
request_deserializer=kcli__pb2.empty.FromString,
response_serializer=kcli__pb2.reposlist.SerializeToString,
),
'restart_container': grpc.unary_unary_rpc_method_handler(
servicer.restart_container,
request_deserializer=kcli__pb2.container.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'start_container': grpc.unary_unary_rpc_method_handler(
servicer.start_container,
request_deserializer=kcli__pb2.container.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'stop_container': grpc.unary_unary_rpc_method_handler(
servicer.stop_container,
request_deserializer=kcli__pb2.container.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'autostart_plan': grpc.unary_unary_rpc_method_handler(
servicer.autostart_plan,
request_deserializer=kcli__pb2.plan.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'noautostart_plan': grpc.unary_unary_rpc_method_handler(
servicer.noautostart_plan,
request_deserializer=kcli__pb2.plan.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'start_plan': grpc.unary_unary_rpc_method_handler(
servicer.start_plan,
request_deserializer=kcli__pb2.plan.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'stop_plan': grpc.unary_unary_rpc_method_handler(
servicer.stop_plan,
request_deserializer=kcli__pb2.plan.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
'switch_host': grpc.unary_unary_rpc_method_handler(
servicer.switch_host,
request_deserializer=kcli__pb2.client.FromString,
response_serializer=kcli__pb2.result.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'Kconfig', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Kconfig(object):
"""Missing associated documentation comment in .proto file"""
@staticmethod
def create_vm(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/create_vm',
kcli__pb2.vmprofile.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def get_config(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/get_config',
kcli__pb2.empty.SerializeToString,
kcli__pb2.config.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def get_version(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/get_version',
kcli__pb2.empty.SerializeToString,
kcli__pb2.version.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def create_host(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/create_host',
kcli__pb2.client.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def delete_host(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/delete_host',
kcli__pb2.client.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def delete_container(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/delete_container',
kcli__pb2.container.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def delete_lb(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/delete_lb',
kcli__pb2.lb.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def delete_kube(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/delete_kube',
kcli__pb2.kube.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def delete_plan(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/delete_plan',
kcli__pb2.plan.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def delete_profile(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/delete_profile',
kcli__pb2.profile.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def delete_repo(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/delete_repo',
kcli__pb2.repo.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_containers(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/list_containers',
kcli__pb2.empty.SerializeToString,
kcli__pb2.containerslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_container_images(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/list_container_images',
kcli__pb2.empty.SerializeToString,
kcli__pb2.imageslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_hosts(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/list_hosts',
kcli__pb2.empty.SerializeToString,
kcli__pb2.clientslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_keywords(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/list_keywords',
kcli__pb2.empty.SerializeToString,
kcli__pb2.keywordslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_kubes(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/list_kubes',
kcli__pb2.empty.SerializeToString,
kcli__pb2.kubeslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_lbs(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/list_lbs',
kcli__pb2.empty.SerializeToString,
kcli__pb2.lbslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_plans(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/list_plans',
kcli__pb2.empty.SerializeToString,
kcli__pb2.planslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_profiles(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/list_profiles',
kcli__pb2.empty.SerializeToString,
kcli__pb2.profileslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_products(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/list_products',
kcli__pb2.product.SerializeToString,
kcli__pb2.productslist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def list_repos(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/list_repos',
kcli__pb2.empty.SerializeToString,
kcli__pb2.reposlist.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def restart_container(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/restart_container',
kcli__pb2.container.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def start_container(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/start_container',
kcli__pb2.container.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def stop_container(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/stop_container',
kcli__pb2.container.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def autostart_plan(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/autostart_plan',
kcli__pb2.plan.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def noautostart_plan(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/noautostart_plan',
kcli__pb2.plan.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def start_plan(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/start_plan',
kcli__pb2.plan.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def stop_plan(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/stop_plan',
kcli__pb2.plan.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def switch_host(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/Kconfig/switch_host',
kcli__pb2.client.SerializeToString,
kcli__pb2.result.FromString,
options, channel_credentials,
call_credentials, compression, wait_for_ready, timeout, metadata)
```
#### File: kcli/tests/test_kvirt.py
```python
import random
import string
import time
from kvirt.config import Kconfig
# from kvirt.defaults import TEMPLATES
class TestK:
@classmethod
def setup_class(self):
"""
"""
self.template = "centos7"
self.config = Kconfig()
self.k = self.config.k
name = "test-%s" % ''.join(random.choice(string.ascii_lowercase) for i in range(5))
self.poolpath = "/var/lib/libvirt/%s" % name
self.name = name
def test_list(self):
k = self.k
result = k.list()
assert result is not None
def test_create_pool(self):
k = self.k
k.create_pool(name=self.name, poolpath=self.poolpath)
assert True
def test_download_template(self):
config = self.config
result = config.handle_host(pool=self.name, templates=[self.template], download=True)
assert result["result"] == "success"
def test_create_network(self):
k = self.k
counter = random.randint(1, 254)
k.create_network(name=self.name, cidr='10.0.%s.0/24' % counter, dhcp=True)
assert True
def test_create_vm(self):
config = self.config
k = self.k
time.sleep(10)
result = config.create_vm(self.name, 'CentOS-7-x86_64-GenericCloud.qcow2', overrides={}, k=k)
assert result["result"] == "success"
# k.create(self.name, numcpus=1, memory=512, pool=self.name, nets=[self.name])
# status = k.status(self.name)
# print(status)
# assert status is not None
def test_add_disk(self):
k = self.k
k.add_disk(name=self.name, size=1, pool=self.name)
assert True
def test_stop_vm(self):
k = self.k
result = k.stop(self.name)
assert result["result"] == "success"
def test_start_vm(self):
k = self.k
result = k.start(self.name)
assert result["result"] == "success"
def test_delete_vm(self):
k = self.k
result = k.delete(self.name)
assert result["result"] == "success"
@classmethod
def teardown_class(self):
"""
"""
print("Cleaning stuff")
k = self.k
time.sleep(10)
k.delete_network(self.name)
# k.delete_image(TEMPLATES[self.template])
k.delete_pool(self.name, full=True)
``` |
{
"source": "jmolmo/managed-tenants-cli",
"score": 2
} |
#### File: core/tasks_loader/environment.py
```python
from managedtenants.data.environments import ENVIRONMENTS
class Environment:
def __init__(self, environment, args):
self.name = environment
self.ocm_api_insecure = args.ocm_api_insecure
if args.ocm_api:
self.ocm_api = args.ocm_api
else:
self.ocm_api = ENVIRONMENTS[self.name]["ocm_api"]
def __repr__(self):
return f"{self.__class__.__name__}({repr(self.name)})"
```
#### File: managedtenants/utils/quay_api.py
```python
import os
import requests
from sretoolbox.utils import retry
from sretoolbox.utils.logger import get_text_logger
class QuayAPIError(Exception):
"""Used when there are errors with the Quay API."""
def __init__(self, message, response):
super().__init__(message)
self.response = response
def retry_hook(exception):
"""Retries on 5xx QuayApiError and all other requests exceptions."""
if (
isinstance(exception, QuayAPIError)
and exception.response.status_code < 500
):
raise exception
# Ignore all other exceptions
# https://docs.python-requests.org/en/latest/api/#exceptions
class QuayAPI:
"""
Abstraction around the Quay.io API.
View swagger docs here: https://docs.quay.io/api/swagger/.
"""
def __init__(self, org="osd-addons", token=None, base_url="quay.io"):
"""
Creates a Quay API abstraction.
:param org: (optional) Name of your quay organization.
Default: 'osd-addons'
:param token: (optional) Quay OAuth Application token (no robot account)
Default: value of env QUAY_APITOKEN
:param base_url: (optional) Quay base API server url. Default: 'quay.io'
:raise ValueError: invalid empty token
"""
self.token = _get_token_or_fail(token)
self.org = org
self.headers = {
"Content-Type": "application/json",
"Authorization": f"Bearer {self.token}",
}
self.api_url = f"https://{base_url}/api/v1"
self.log = get_text_logger("app")
def ensure_repo(self, repo_name, dry_run=False):
"""
Validates that the required quay repository exists.
Robot accounts are configured to get automatic write access on new
repositories so we do not need to modify the permissions.
:return: true if repo exists or was created successfully
:rtype: bool
:raise QuayApiError: the operator failed
"""
if dry_run:
return True
if not self.repo_exists(repo_name):
self.log.info(
"Creating Quay repository %s",
f"{self.org}/{repo_name}",
)
return self.repo_create(repo_name)
self.log.info(
"Quay repository %s already exists.",
f"{self.org}/{repo_name}",
)
return True
def repo_exists(self, repo_name):
"""
Checks if a repo exists.
:param repo_name: Name of the repository
:type repo_name: str
:return: response.status_code is 2XX
:rtype: bool
:raise QuayApiError: the operation failed
"""
url = f"{self.api_url}/repository/{self.org}/{repo_name}"
params = {
"includeTags": False,
"includeStats": False,
}
response = self._api(
method=requests.get, url=url, dont_raise_for=[404], params=params
)
return _is_200(response.status_code)
def repo_create(self, repo_name):
"""
Creates a public repository called repo_name.
:param repo_name: Name of the repository
:type repo_name: str
:return: response.status_code is 2XX
:rtype: bool
:raise QuayApiError: the operation fails
"""
url = f"{self.api_url}/repository"
params = {
"repo_kind": "image",
"namespace": self.org,
"visibility": "public",
"repository": repo_name,
"description": "",
}
response = self._api(requests.post, url, json=params)
return _is_200(response.status_code)
@retry(hook=retry_hook)
def _api(self, method, url, dont_raise_for=None, **kwargs):
dont_raise_for = [] if dont_raise_for is None else dont_raise_for
response = method(url, headers=self.headers, **kwargs)
# Don't raise for certain HTTP response code, e.g.: 404 not found.
if response.status_code not in dont_raise_for:
_raise_for_status(response, method, url, **kwargs)
self.log.info("JSON response: %s", response.json())
self.log.info("status_code: %s", response.status_code)
return response
def _is_200(status_code):
return 200 <= status_code < 300
def _get_token_or_fail(token):
res = token if token is not None else os.environ.get("QUAY_APIKEY")
if token == "":
raise ValueError("Invalid empty QUAY_APIKEY environment variable.")
return res
def _raise_for_status(response, method, url, **kwargs):
try:
response.raise_for_status()
except requests.exceptions.HTTPError as exception:
method = method.__name__.upper()
error_message = f"Error {method} {url}\n{exception}\n"
if kwargs.get("params"):
error_message += f"params: {kwargs['params']}\n"
if kwargs.get("json"):
error_message += f"json: {kwargs['json']}\n"
error_message += f"original error: {response.text}"
raise QuayAPIError(error_message, response)
```
#### File: tests/sss/test_deadmanssnitch.py
```python
import pytest
from tests.testutils.addon_helpers import ( # noqa: F401
addon_with_deadmanssnitch,
)
@pytest.mark.parametrize(
"addon_str",
[
"addon_with_deadmanssnitch",
],
)
def test_deadmansnitch(addon_str, request):
addon = request.getfixturevalue(addon_str)
expected_data = {
"metadata": {
"name": f"addon-{addon.metadata['id']}",
},
"spec": {
"clusterDeploymentSelector": {
"matchExpressions": [
{
"key": (
f"api.openshift.com/addon-{addon.metadata['id']}"
),
"operator": "In",
"values": ["true"],
}
]
},
"snitchNamePostFix": addon.metadata["deadmanssnitch"][
"snitchNamePostFix"
],
"tags": addon.metadata["deadmanssnitch"]["tags"],
"targetSecretRef": {
"name": f"{addon.metadata['id']}-deadmanssnitch",
"namespace": addon.metadata["targetNamespace"],
},
},
}
sss_walker = addon.sss.walker()
deadmanssnitch_obj = sss_walker["dms"]
assert deadmanssnitch_obj is not None
assert deadmanssnitch_obj.get("metadata") is not None
assert deadmanssnitch_obj.get("spec") is not None
metadata, spec = deadmanssnitch_obj.get("metadata"), deadmanssnitch_obj.get(
"spec"
)
assert metadata.get("name") == "addon-test-operator"
assert (
spec.get("clusterDeploymentSelector")
== expected_data["spec"]["clusterDeploymentSelector"]
)
assert (
spec.get("snitchNamePostFix")
== expected_data["spec"]["snitchNamePostFix"]
)
assert spec.get("tags") == expected_data["spec"]["tags"]
assert (
spec.get("targetSecretRef") == expected_data["spec"]["targetSecretRef"]
)
```
#### File: tests/utils/test_change_detector.py
```python
from pathlib import Path
import pytest
from managedtenants.utils.git import ChangeDetector
ROOT = Path("/addons")
@pytest.mark.parametrize(
"data",
[
{
"parents": set(
[
ROOT / "addon-one",
ROOT / "addon-two",
ROOT / "addon-three",
]
),
"children": set(
[
ROOT / "addon-one/some/file",
ROOT / "addon-one/another/file",
]
),
"expected": set([ROOT / "addon-one"]),
},
{
"parents": set(
[
ROOT / "addon-one",
ROOT / "addon-two",
ROOT / "addon-three",
]
),
"children": set(
[
ROOT / "addon-one/some/file",
ROOT / "addon-two/another/file",
ROOT / "addon-three/yippy",
]
),
"expected": set(
[
ROOT / "addon-one",
ROOT / "addon-two",
ROOT / "addon-three",
]
),
},
{
"parents": set(
[
ROOT / "addon-one",
ROOT / "addon-two",
ROOT / "addon-three",
]
),
"children": set(
[
ROOT / "addon-four/some/file",
ROOT / "addon-five/another/file",
ROOT / "addon-six/yippy",
]
),
"expected": set(),
},
],
)
def test_change_detector_intersect(data):
cd = ChangeDetector(addons_dir="unused")
got = cd._intersect(data["parents"], data["children"])
assert len(got) == len(data["expected"])
for e in data["expected"]:
assert e in got
``` |
{
"source": "jmolvr/alertAPI",
"score": 2
} |
#### File: alertAPI/alert/consumers.py
```python
from asgiref.sync import async_to_sync
from channels.layers import get_channel_layer
from channels.generic.websocket import AsyncJsonWebsocketConsumer
from alert.models import Alert
from alert.serializers import AlertSerializer
from django.db.models.signals import post_save, post_delete
from django.dispatch import receiver
class AlertConsumer(AsyncJsonWebsocketConsumer):
async def connect(self):
group_name = AlertSerializer.get_group() # apenas para padronizar
self.room_name = self.scope['url_route']['kwargs']['room_name']
self.room_group_name = group_name + "_" + self.room_name
await self.channel_layer.group_add(
self.room_group_name,
self.channel_name
)
await self.accept()
async def disconnect(self, close_code):
await self.channel_layer.group_discard(
self.room_group_name,
self.channel_name
)
async def receive_json(self, text_data):
group_name = "alertas"
self.groups.append(group_name)
async def notify(self, event):
await self.send_json(event['content'])
@receiver([post_save, post_delete], sender=Alert)
def update_alerts(sender, instance, **kwargs):
alertas = Alert.objects.order_by('-created_at')
alertas_unsolved = alertas.filter(status=0).order_by("-created_at")
serializer = AlertSerializer(alertas, many=True)
serializer_unsolved = AlertSerializer(alertas_unsolved, many=True)
group_name = "alertas_todos"
group_name_unsolved = "alertas_unsolved"
channel_layer = get_channel_layer()
content = {
"type": "UPDATE_ALERTS",
"payload": serializer.data
}
content_unsolved = {
"type": "UPDATE_ALERTS",
"payload": serializer_unsolved.data
}
async_to_sync(channel_layer.group_send)(
group_name,
{
"type": "notify",
"content": content,
}
)
async_to_sync(channel_layer.group_send)(
group_name_unsolved,
{
"type": "notify",
"content": content_unsolved,
}
)
```
#### File: alertAPI/alert/serializers.py
```python
from rest_framework import serializers
from django.core.exceptions import ObjectDoesNotExist, ValidationError
from .models import Alert, Tipo, LocalUnifap
from user.serializers import UserSerializer
from channels.layers import get_channel_layer
class TipoCustomSerializer(serializers.ModelSerializer):
class Meta:
model = Tipo
exclude = []
class LocalUnifapSerializer(serializers.ModelSerializer):
class Meta:
model = LocalUnifap
exclude = []
class AlertSerializer(serializers.ModelSerializer):
tipo = TipoCustomSerializer(read_only=True)
local = LocalUnifapSerializer(read_only=True)
owner = UserSerializer(read_only=True)
class Meta:
model = Alert
exclude = []
def create(self, validated_data):
return Alert.objects.create(**validated_data)
def update(self, instance, validate_data):
request = self.context['request']
if request.user.is_admin:
instance.descricao = instance.descricao
instance.prazo = validate_data.get('prazo', instance.prazo)
instance.feedback = validate_data.get(
'feedback', instance.feedback)
else:
instance.descricao = validate_data.get(
'descricao', instance.descricao)
instance.status = validate_data.get('status', instance.status)
instance.save()
return instance
def to_internal_value(self, data):
request = self.context['request']
if request.method == "PUT":
return self.put_validate_data(request, data)
latitude = data.get('latitude')
longitude = data.get('longitude')
descricao = data.get('descricao')
local = data.get('local')
tipo = data.get('tipo')
image = data.get('image')
if latitude is None:
raise serializers.ValidationError({
'latitude': 'This field is required'
})
if longitude is None:
raise serializers.ValidationError({
'longitude': 'This field is required'
})
if not descricao:
raise serializers.ValidationError({
'descricao': 'This field is required'
})
if not local:
raise serializers.ValidationError({
'local': 'This field is required'
})
if not tipo:
raise serializers.ValidationError({
'tipo': 'This field is required'
})
try:
tipo = Tipo.objects.get(nome=tipo)
except:
raise serializers.ValidationError({
'tipo': 'Tipo {} não está registrado'.format(tipo)
})
try:
local = LocalUnifap.objects.get(nome=local)
except:
local = LocalUnifap(nome=local)
local.save()
return {
'latitude': latitude,
'longitude': longitude,
'descricao': descricao,
'tipo': tipo,
'local': local,
'image': image,
'owner': request.user,
}
def put_validate_data(self, request, data):
descricao = data.get("descricao")
prazo = data.get('prazo')
status = data.get('status')
feedback = data.get('feedback')
return({
'descricao': descricao,
'prazo': prazo,
'status': status,
'feedback': feedback
})
@staticmethod
def get_group():
return 'alertas'
``` |
{
"source": "jmonrods/pyeis",
"score": 3
} |
#### File: jmonrods/pyeis/arv.py
```python
import numpy as np
#Function serie circuit calculation
def s(*argv):
z=0
for arg in argv:
if (arg!=None):
z = z + arg
return z
#Function parallel circuit calculation
def p(*argv):
z=0
for arg in argv:
if (arg!=None):
z = z + (1/arg)
z = (1/z)
return z
def impedance(freq,circ,comps,param):
#Replacing simbols with values in the circuit
for f in range(len(param)):
circ = circ.replace(comps[f],str(param[f]),1)
#Calcule equivalent impedace
Zeq = eval(circ)
#Store impedance parameters
sol = [freq,Zeq.real,Zeq.imag,abs(Zeq),np.angle(Zeq, deg=True)]
return sol
```
#### File: jmonrods/pyeis/datasim.py
```python
import numpy as np
import math
from arv import *
from replot2 import *
#===#Initial parameters to test the algorithm#===#
#circ = 's(R1,p(R1,C1),p(R1,C1),p(R1,C1))' #Circuit Configuration
#param = [30,30,60,30,60,30,60]
#freq = np.array([1.0000e+06, 7.9436e+05, 6.3100e+05, 5.0127e+05, 3.9814e+05, 3.1623e+05, 2.5119e+05, 1.9963e+05, 1.5850e+05, 1.2592e+05, 1.0002e+05, 7.9512e+04, 6.3105e+04, 5.0215e+04, 3.9902e+04, 3.1699e+04, 2.5137e+04, 1.9980e+04, 1.5879e+04, 1.2598e+04, 1.0020e+04, 7.9282e+03, 6.3079e+03, 5.0347e+03, 3.9931e+03, 3.1441e+03, 2.5195e+03, 2.0008e+03, 1.5807e+03, 1.2536e+03, 1.0007e+03, 7.9003e+02, 6.2881e+02, 5.0223e+02, 4.0015e+02, 3.1550e+02, 2.5202e+02, 2.0032e+02, 1.5801e+02, 1.2556e+02, 9.9734e+01, 7.9449e+01, 6.3345e+01, 4.9867e+01, 3.8422e+01, 3.1250e+01, 2.4934e+01, 2.0032e+01, 1.5625e+01, 1.2467e+01, 9.9734e+00, 7.9719e+00, 6.3516e+00, 5.0134e+00, 3.9860e+00, 3.1758e+00, 2.5161e+00, 1.9955e+00, 1.5879e+00, 1.2581e+00, 9.9777e-01, 7.9274e-01, 6.2903e-01, 4.9888e-01, 3.9860e-01, 3.1672e-01, 2.5148e-01, 1.9998e-01, 1.5879e-01, 1.2601e-01, 1.0016e-01])
#Function for impedance calculation
def zcalc(elem,p,f):
#Global parameters
w = 2*f*(math.pi)
ii = complex(0,1)
if (elem=='R'):
z = p[0]
elif (elem=='C'):
z = 1/(ii*w*p[0])
elif (elem=='L'):
z = ii*w*p[0]
elif (elem=='E'):
z = 1/(p[0]*(ii*w)**p[1])
else:
print('Error')
return z
def simulated (circ,param,freq):
#Simulated data matrix (list)
DataSim = []
#Creating Impedance Matrix
z = np.zeros((len(freq),len(param)),dtype=complex)
#print(z)
#Parameter to delete from the Circuit String
delete = ['p','s','(',')',',']
element = circ
comp=[]
#Deleting the characters from the circuit string
for i in range(len(delete)):
element = element.replace(delete[i],'')
#Calculation for impedance for each element of the circuit
k=0 #index of element we are using
idd=0
for j in range(0,len(element),2):
nlp = int(element[j+1]) #Getting quantity of parameters of the element
actparam = param[0:nlp] #Getting the actual initial parameter for the circuit element
param = param[nlp:] #Removing the used parameter
#Calculate the impedance for the actual element
for i in range(len(freq)):
z[i][k] = zcalc(element[j],actparam,freq[i])
#Updating index of the element
if idd!=circ.index(element[j+1]):
circ = circ.replace(element[j:j+2],element[j]+str(k),1)
idd=circ.index(element[j+1])
comp.append((element[j]+str(k)))
else:
novo = circ[idd+1:]
id2 = novo.index(element[j+1])
idd = idd + id2+1
circ = circ[:idd]+str(k)+circ[idd+1:]
comp.append((element[j]+str(k)))
k = k + 1
#print('circuito: ',circ)
#Calculating Simulated Data for all the frequencys
#Simulated Data will be stored in the DataSim variable
for h in range(len(z)):
impe = impedance(freq[h],circ,comp,z[h])
DataSim.append(impe)
# print('\n'.join([''.join(['{:4}'.format(item) for item in row])
# for row in z]))
#print(z)
return np.array(DataSim)
#data = simulated (circ,param,freq)
#datat = [data]
#print(datat)
#replot2(datat,'Bode')
```
#### File: jmonrods/pyeis/eistoolbox.py
```python
from tkinter import *
import pandas as pd
import PIL.Image
from PIL import ImageTk
#from PIL import ImageTk,Image
from upfiles import *
from replot2 import *
from fitting_eng import *
#from radscirc import *
# GLOBAL VARIABLES ============================================================
circStr = "s(R1,p(R1,C1),p(R1,C1),p(R1,C1))"
param= '[100,100,1e-6,100,1e-6,100,1e-6]'
LB= "[0,0,0,0,0,0,0]"
HB= "[9999,9999,9999,9999,9999,9999,9999]"
# FUNCTION DEFINITIONS ========================================================
#Function to load files
def loadfiles():
#Declaring global variable
global DataM
#Code actions
DataM = upfiles()
dqty.set(len(DataM))
#print(len(DataM), "files loaded!")
#Optimizer Method Selection
def chmethod(selection):
#Declaring global variable
global Opmethod
#Assigning new value
Opmethod = selection
#Weight Selection
def selweight(selection):
#Declaring global variable
global weightsel
#Assigning new value
weightsel = selection
#Input data plotting
def inprint(selection):
replot2(DataM,selection)
#Function to convert a string list into a float list
def str2flt(a):
#Parameter to delete from the String
delete = ['[',']']
change = [',']
element = a
#Deleting the characters from the string
for i in range(len(delete)):
element = element.replace(delete[i],'')
#Changing the commas from the string
for i in range(len(change)):
element = element.replace(change[i],' ')
floats_list = []
for item in element.split():
floats_list.append(float(item))
return(floats_list)
#Output data plotting
def outprint(selection):
#print(DataOut)
replot2(DataOut,selection)
#Circuit String configuration selection
def fcircuit():
loopc = Tk()
r = IntVar()
global circStr
global param
global LB
global HB
#Images for circuits
#c_image1 = ImageTk.PhotoImage(Image.open("images/c_ladder.png"))
#c_image2 = ImageTk.PhotoImage(Image.open("images/c_randles.png"))
#c_image3 = ImageTk.PhotoImage(Image.open("images/c_voigt2.png"))
#c_image4 = ImageTk.PhotoImage(Image.open("images/c_voigt3.png"))
#Label
def option(value):
#global variables
global circStr
global param
global LB
global HB
#Actions
if (value==1):
circStr= 's(R1,p(C1,s(R1,p(R1,C1))))'
param= '[100,1e-6,100,100,1e-6]'
LB= '[0,0,0,0,0]'
HB= '[9999,9999,9999,9999,9999]'
elif (value==2):
circStr= 's(R1,p(R1,C1))'
param= '[100,100,1e-6]'
LB= '[0,0,0]'
HB= '[9999,9999,9999]'
elif (value==3):
circStr= 's(R1,p(R1,C1),p(R1,C1))'
param= '[100,100,1e-6,100,1e-6]'
LB= '[0,0,0,0,0]'
HB= '[9999,9999,9999,9999,9999]'
elif (value==4):
circStr= 's(R1,p(R1,C1),p(R1,C1),p(R1,C1))'
param= '[100,100,1e-6,100,1e-6,100,1e-6]'
LB= '[0,0,0,0,0,0,0]'
HB= '[9999,9999,9999,9999,9999,9999,9999]'
ent_fitstring.delete(0,END) # sets default string for the circuit
ent_fitstring.insert(0,circStr) # sets default string for the circuit
ent_initparam.delete(0,END) # sets default initial parameters for the circuit
ent_initparam.insert(0,param) # sets default initial parameters for the circuit
ent_lowerlim.delete(0,END) # sets default Lower limits for the circuit
ent_lowerlim.insert(0,LB) # sets default Lower limits for the circuit
ent_upperlim.delete(0,END) # sets default Upper limits for the circuit
ent_upperlim.insert(0,HB) # sets default Upper limits for the circuit
#Buttons to select a circuit
#Radiobutton(loopc, image=c_image1, variable=r, value=1, command=lambda: option(1)).pack()
#Radiobutton(loopc, image=c_image2, variable=r, value=2, command=lambda: option(2)).pack()
#Radiobutton(loopc, image=c_image3, variable=r, value=3, command=lambda: option(3)).pack()
#Radiobutton(loopc, image=c_image4, variable=r, value=4, command=lambda: option(4)).pack()
Radiobutton(loopc, text="ladder", variable=r, value=1, command=lambda: option(1)).pack()
Radiobutton(loopc, text="randles", variable=r, value=2, command=lambda: option(2)).pack()
Radiobutton(loopc, text="voigt2", variable=r, value=3, command=lambda: option(3)).pack()
Radiobutton(loopc, text="voigt3", variable=r, value=4, command=lambda: option(4)).pack()
bnt = Button(loopc, text='Close Window', command= loopc.destroy).pack()
loopc.mainloop()
#Fitting rutine and data proccesing
def printv():
global DataOut
#print(Opmethod)
#print(weightsel,'\n')
paramIni = str2flt(param)
LBi = str2flt(LB)
UBi = str2flt(HB)
Mxiter = ent_iterations.get()
[pbest,bcirc] = fitting_engine(DataM, circStr, paramIni, LBi, UBi, Opmethod, weightsel, Mxiter)
DataOut = bcirc
print('Q Data Out:',len(DataOut))
print(pbest)
# GRAPHICAL USER INTERFACE ELEMENTS ===========================================
# main window
eistoolbox = Tk()
# GLOBAL VARIABLE DEFINITIONS =================================================
dqty = IntVar()
DataM = []
weightsel = 'Unitary'
Opmethod = 'Powell'
#c_image1 = ImageTk.PhotoImage(PIL.Image.open("images/c_ladder.png"))
#c_image2 = ImageTk.PhotoImage(PIL.Image.open("images/c_randles.png"))
#c_image3 = ImageTk.PhotoImage(PIL.Image.open("images/c_voigt2.png"))
#c_image4 = ImageTk.PhotoImage(PIL.Image.open("images/c_voigt3.png"))
# title
title = Label(eistoolbox, text="eistoolbox")
title.pack()
# separator: input data management
txt_idm = Label(eistoolbox, text="Input Data Management")
txt_idm.pack()
# frame: load input data
frm_load = Frame(eistoolbox)
frm_load.pack()
btn_loadfiles = Button(eistoolbox, text="Add files...", command=loadfiles)
btn_loadfiles.pack(in_=frm_load, side=LEFT)
txt_nfilesloaded2 = Label(eistoolbox, text= "data files loaded")
txt_nfilesloaded2.pack(in_=frm_load, side=RIGHT)
txt_nfilesloaded = Label(eistoolbox, textvariable=str(dqty))
txt_nfilesloaded.pack(in_=frm_load, side=RIGHT)
# frame: input data plots
frm_inputplots = Frame(eistoolbox)
frm_inputplots.pack()
txt_plots1 = Label(eistoolbox, text="Plots:")
txt_plots1.pack(in_=frm_inputplots, side=LEFT)
btn_plotnyq1 = Button(eistoolbox, text="Nyq", command=lambda:inprint('Nyquist'))
btn_plotnyq1.pack(in_=frm_inputplots, side=LEFT)
btn_plotbode1 = Button(eistoolbox, text="Bod", command=lambda:inprint('Bode'))
btn_plotbode1.pack(in_=frm_inputplots, side=LEFT)
btn_plotreim1 = Button(eistoolbox, text="Re/Im", command=lambda:inprint('Re/Im'))
btn_plotreim1.pack(in_=frm_inputplots, side=LEFT)
# Some Space
txt_spc = Label(eistoolbox, text=" ")
txt_spc.pack()
# frame: circuit model configuration
txt_cmc = Label(eistoolbox, text="Circuit Model Configuration")
txt_cmc.pack()
#Select circuit model
frm_circs = Frame(eistoolbox)
frm_circs.pack()
txt_circs = Label(eistoolbox, text="Circuit Model:")
txt_circs.pack(in_=frm_circs, side=LEFT)
btn_circuit = Button(eistoolbox, text="Selection", command=fcircuit)#, command=lambda:inprint('Nyquist'))
btn_circuit.pack(in_=frm_circs, side=LEFT)
#Circuit parameter
frm_fitstring = Frame(eistoolbox)
frm_fitstring.pack()
txt_fitstring = Label(eistoolbox, text="Fitting string:")
txt_fitstring.pack(in_=frm_fitstring, side=LEFT)
ent_fitstring = Entry(eistoolbox, width=40)
ent_fitstring.pack(in_=frm_fitstring, side=RIGHT)
ent_fitstring.insert(0,circStr) # sets default string for the circuit
frm_initparam = Frame(eistoolbox)
frm_initparam.pack()
txt_initparam = Label(eistoolbox, text="Initial params:")
txt_initparam.pack(in_=frm_initparam, side=LEFT)
ent_initparam = Entry(eistoolbox, width=40)
ent_initparam.pack(in_=frm_initparam, side=RIGHT)
ent_initparam.insert(0,param) # sets default initial parameters for the circuit
frm_lowerlim = Frame(eistoolbox)
frm_lowerlim.pack()
txt_lowerlim = Label(eistoolbox, text="Lower limits:")
txt_lowerlim.pack(in_=frm_lowerlim, side=LEFT)
ent_lowerlim = Entry(eistoolbox, width=40)
ent_lowerlim.pack(in_=frm_lowerlim, side=RIGHT)
ent_lowerlim.insert(0,LB) # sets default Lower limits for the circuit
frm_upperlim = Frame(eistoolbox)
frm_upperlim.pack()
txt_upperlim = Label(eistoolbox, text="Upper limits:")
txt_upperlim.pack(in_=frm_upperlim, side=LEFT)
ent_upperlim = Entry(eistoolbox, width=40)
ent_upperlim.pack(in_=frm_upperlim, side=RIGHT)
ent_upperlim.insert(0,HB) # sets default Upper limits for the circuit
# Some Space
txt_spc2 = Label(eistoolbox, text=" ")
txt_spc2.pack()
# frame: fitting configuration
txt_fc = Label(eistoolbox, text="Fitting Configuration")
txt_fc.pack()
frm_algorithm = Frame(eistoolbox)
frm_algorithm.pack()
txt_algorithm = Label(eistoolbox, text="Algorithm:")
txt_algorithm.pack(in_=frm_algorithm, side=LEFT)
algorithm = StringVar(eistoolbox) # tk variable to store algorithm selection
choices_algorithm = {'Powell','Nelder-Mead','L-BFGS-B','SLSQP'} # available algorithms
algorithm.set('Powell') # sets the default option
opt_algorithm = OptionMenu(eistoolbox,algorithm,*choices_algorithm, command=chmethod) # the * unpacks the list
opt_algorithm.pack(in_=frm_algorithm, side=RIGHT)
frm_weighting = Frame(eistoolbox)
frm_weighting.pack()
txt_weighting = Label(eistoolbox, text="Weighting:")
txt_weighting.pack(in_=frm_weighting, side=LEFT)
weighting = StringVar(eistoolbox) # tk variable to store weighting selection
choices_weighting = {'Unitary','Proportional','Other'} # available algorithms
weighting.set('Unitary') # sets the default option
opt_weighting = OptionMenu(eistoolbox,weighting,*choices_weighting, command=selweight) # the * unpacks the list
opt_weighting.pack(in_=frm_weighting, side=RIGHT)
frm_iterations = Frame(eistoolbox)
frm_iterations.pack()
txt_iterations = Label(eistoolbox, text="Iterations:")
txt_iterations.pack(in_=frm_iterations, side=LEFT)
ent_iterations = Entry(eistoolbox, width=15)
ent_iterations.pack(in_=frm_iterations, side=RIGHT)
ent_iterations.insert(0,"1000") # sets default value to 1000
btn_fit = Button(eistoolbox, text="FIT", command=printv)
btn_fit.pack()
# Some Space
txt_spc3 = Label(eistoolbox, text=" ")
txt_spc3.pack()
# frame: output data plots
txt_odm = Label(eistoolbox, text="Output Data Management")
txt_odm.pack()
frm_outputplots = Frame(eistoolbox)
frm_outputplots.pack()
txt_plots2 = Label(eistoolbox, text="Plots:")
txt_plots2.pack(in_=frm_outputplots, side=LEFT)
btn_plotnyq2 = Button(eistoolbox, text="Nyq", command=lambda:outprint('Nyquist'))
btn_plotnyq2.pack(in_=frm_outputplots, side=LEFT)
btn_plotbode2 = Button(eistoolbox, text="Bod", command=lambda:outprint('Bode'))
btn_plotbode2.pack(in_=frm_outputplots, side=LEFT)
btn_plotreim2 = Button(eistoolbox, text="Re/Im", command=lambda:outprint('Re/Im'))
btn_plotreim2.pack(in_=frm_outputplots, side=LEFT)
# Version number
txt_version = Label(eistoolbox, text="eistoolbox - Version 1.2")
txt_version.pack()
# MAIN LOOP ===================================================================
eistoolbox.mainloop() # this is the main loop, waits for events to happen
```
#### File: jmonrods/pyeis/fitting_eng.py
```python
from datasim import *
from upfiles import *
import numpy as np
from scipy.optimize import minimize
#=====#Initial parameters to test the algorithm#=====#
##param0 = [100,100,1e-6,100,1e-6,100,1e-6]
##circ0 = 's(R1,p(R1,C1),p(R1,C1),p(R1,C1))'
##data0 = upfiles()
##LB1 = [5,5,0,5,0,5,0]
##UB1 = [300,300,10,300,10,300,10]
#Main Function for Fitting Engine
def fitting_engine(data, circ, param, LB, UB, opmeth, weith, maxiter):
#Listo to store all the fitted impedances
bcirc=[]
#Getting the frequency from the experimental data
freq = data[0][:,0]
#Getting the real and imaginary component from the experimental data
dataE = np.array(data)
#print(len(dataE)) Quantity of CSV added
zrzi = np.array([dataE[:,:,1],dataE[:,:,2]])
(pbest)=curfit(param,circ,freq,zrzi,LB,UB,opmeth,weith,maxiter)
for t in range(len(zrzi[0])):
bcirc.append(simulated(circ,pbest[t],freq))
#print(bcirc)
return [pbest,bcirc]
#print(pbest,bcirc)
def curfit(param,circ,freq,zrzi,LB,UB,opmeth,weith,maxiter):
#Increasing the initial parameters for the optimization
nlist = []
for i in range(len(zrzi[0])):
nlist.append(param)
param = nlist
#counter to run the optimization on each csv file selected
k=0
#Distance function
#Returns the distance between the simulated and the experimental impedance
def distance(param):
#Variables to store all the distances
dist = 0
#Using the weith selected
if (weith=='Unitary'): #Unitary weith
wreal = np.ones((zrzi[0].shape))
wimag = np.ones((zrzi[0].shape))
elif (weith=='Proportional'): #Proportional Weith
wreal = 1/((zrzi[0,:])**2)
wimag = 1/((zrzi[1,:])**2)
else: #Unitary weith as default
wreal = 1
wimag = 1
#Get the simulated data from external function
dataS = simulated(circ,param,freq)
#Calculating distances for each data-frequency (on the current CSV file)
for j in range(len(freq)):
#Addition of all distances per each frequency data
dist = dist + ((wreal[k][j])*(zrzi[0][k][j]-dataS[j][1])**2 + (wimag[k][j])*(zrzi[1][k][j]-dataS[j][2])**2)
return dist
#distance(param) #call distance function
#Getting pairs of boundaries
bnd = []
for i in range(len(LB)):
bnd.append((LB[i],UB[i]))
#Doing optimization on each group of experimental data
for u in range(len(zrzi[0])):
print('Optimizing data: ',k+1)
#Using optimize method selected
if (opmeth=='Nelder-Mead'): #Optimization Method Nelder-Mead
sol = minimize(distance, param[u], method='Nelder-Mead', options={'maxiter': 1000})
elif (opmeth=='L-BFGS-B'): #Optimization Method L-BFGS-B
sol = minimize(distance, param, method='L-BFGS-B', bounds=bnd, options={'maxiter': 1000})
elif (opmeth=='SLSQP'): #Optimization Method SLSQP
sol = minimize(distance, param, method='SLSQP', jac=None, options={'maxiter': 1000})
elif (opmeth=='Powell'): #Optimization Method Powell
sol = minimize(distance, param[0], method='Powell', bounds=bnd, options={'maxiter': 1000})
else: #Powell Optimization Method as default
sol = minimize(distance, param, method='Powell', bounds=bnd, options={'maxiter': 1000})
k=k+1
param[u] = sol.x
print('Optimization ready')
#print(sol)
return (param)
#fitting_engine(data, circ, param, LB, UB, opmeth, weith, maxiter)
#fitting_engine(data0, circ0, param0, LB1, UB1, 'Powell', 'Proportional', 2)
#print(r)
```
#### File: jmonrods/pyeis/radscirc.py
```python
from tkinter import *
from PIL import ImageTk,Image
loopc = Tk()
#Images for circuits
c_image1 = ImageTk.PhotoImage(file="images/c_ladder.png")
c_image2 = ImageTk.PhotoImage(file="images/c_randles.png")
c_image3 = ImageTk.PhotoImage(file="images/c_voigt2.png")
c_image4 = ImageTk.PhotoImage(file="images/c_voigt3.png")
#Circuit Parameters
circStr='1'
param='1'
LB='1'
HB='1'
def fscircuit():
r = IntVar()
#Label
def option(value):
#global variables
global circStr
global param
global LB
global HB
#Actions
if (value==1):
circStr= 's(R1,p(C1,s(R1,p(R1,C1))))'
param= '[100,1e-6,100,100,1e-6]'
LB= '[0,0,0,0,0]'
HB= '[9999,9999,9999,9999,9999]'
elif (value==2):
circStr= 's(R1,p(R1,C1))'
param= '[100,100,1e-6]'
LB= '[0,0,0]'
HB= '[9999,9999,9999]'
elif (value==3):
circStr= 's(R1,p(R1,C1),p(R1,C1))'
param= '[100,100,1e-6,100,1e-6]'
LB= '[0,0,0,0,0]'
HB= '[9999,9999,9999,9999,9999]'
elif (value==4):
circStr= 's(R1,p(R1,C1),p(R1,C1),p(R1,C1))'
param= '[100,100,1e-6,100,1e-6,100,1e-6]'
LB= '[0,0,0,0,0,0,0]'
HB= '[9999,9999,9999,9999,9999,9999,9999]'
#print(circStr,'\n',param,'\n',LB,'\n',HB,'\n')
#Buttons to select a circuit
Radiobutton(loopc, image=c_image1, variable=r, value=1, command=lambda: option(1)).pack()
Radiobutton(loopc, image=c_image2, variable=r, value=2, command=lambda: option(2)).pack()
Radiobutton(loopc, image=c_image3, variable=r, value=3, command=lambda: option(3)).pack()
Radiobutton(loopc, image=c_image4, variable=r, value=4, command=lambda: option(4)).pack()
#Radiobutton(loopc, text="ladder", variable=r, value=1, command=lambda: option(1)).pack()
#Radiobutton(loopc, text="randles", variable=r, value=2, command=lambda: option(2)).pack()
#Radiobutton(loopc, text="voigt2", variable=r, value=3, command=lambda: option(3)).pack()
#Radiobutton(loopc, text="voigt3", variable=r, value=4, command=lambda: option(4)).pack()
bnt = Button(loopc, text='Close Window', command= loopc.destroy).pack()
loopc.mainloop()
return (circStr,param,LB,HB)
#print (circStr,param,LB,HB)
#(a, b, c, d)=fscircuit()
#print(a, b, c, d)
``` |
{
"source": "jmonsalverodilla/house_prices_regression_model",
"score": 3
} |
#### File: house_prices_regression_model/house_prices_regression_model/predict.py
```python
import typing as t
import numpy as np
import pandas as pd
from house_prices_regression_model import __version__ as VERSION
from house_prices_regression_model.processing.data_manager import load_pipeline
from house_prices_regression_model.config.core import load_config_file, SETTINGS_PATH
from house_prices_regression_model.processing.data_validation import validate_inputs
# Config files
config = load_config_file(SETTINGS_PATH)
PIPELINE_ARTIFACT_NAME = config["PIPELINE_ARTIFACT_NAME"]
pipeline_file_name = f"{PIPELINE_ARTIFACT_NAME}_v{VERSION}.pkl"
cb_pipe = load_pipeline(file_name=pipeline_file_name)
#Function
def make_prediction(*,input_data: t.Union[pd.DataFrame, dict],) -> list:
"""Make a prediction using a saved model pipeline."""
df = pd.DataFrame(input_data)
validated_df, error_dict = validate_inputs(input_data=df)
errors_list = list(error_dict.values())
results = {'model_output': None}
if error_dict == {}:
log_predictions = cb_pipe.predict(validated_df)
predictions = [np.exp(pred) for pred in log_predictions]
results['model_output'] = predictions
else:
results['model_output'] = 'Errors making prediction:' + ' '.join(map(str, errors_list))
return results
```
#### File: house_prices_regression_model/house_prices_regression_model/train_pipeline.py
```python
import numpy as np
from pathlib import Path
#Module imports
from house_prices_regression_model.pipeline import cb_pipe
from house_prices_regression_model.config.core import load_config_file, DATASET_DIR, FILE_NAME_DATA_TRAIN, SETTINGS_PATH
from house_prices_regression_model.processing.data_manager import load_dataset, save_pipeline
# Config file
config = load_config_file(SETTINGS_PATH)
TARGET = config["TARGET"]
VARS_CAT = config["VARS_CAT"]
TRAIN_DATA_PATH = Path(f"{DATASET_DIR}/{FILE_NAME_DATA_TRAIN}")
#Run training
def run_training() -> None:
"""Train the model."""
# read training data
df = load_dataset(data_path=TRAIN_DATA_PATH)
df[TARGET] = np.log(df[TARGET])
# fit model
cb_params_fit = {'cb__cat_features': VARS_CAT}
cb_pipe.fit(X=df,
y=df[TARGET],
**cb_params_fit)
# persist trained model
save_pipeline(pipeline_to_persist=cb_pipe)
if __name__ == "__main__":
run_training()
``` |
{
"source": "jmonsalverodilla/Python-Microservices-Web-App",
"score": 3
} |
#### File: Python-Microservices-Web-App/backend/models.py
```python
from flask_sqlalchemy import SQLAlchemy
from datetime import datetime,timedelta
db = SQLAlchemy()
class Predictions(db.Model):
id = db.Column('ID',db.Integer, primary_key=True)
OverallQual = db.Column(db.Integer,nullable=False)
GrLivArea = db.Column(db.Integer, nullable=False)
TotalBsmtSF = db.Column(db.Integer, nullable=False)
CentralAir = db.Column(db.Text, nullable=False)
FireplaceQu = db.Column(db.Text, nullable=True)
BsmtFinSF1 = db.Column(db.Integer, nullable=False)
LotArea = db.Column(db.Integer, nullable=False)
GarageCars = db.Column(db.Integer, nullable=False)
YearBuilt = db.Column(db.Integer, nullable=True)
KitchenQual = db.Column(db.Text, nullable=True)
Prediction = db.Column(db.Text, nullable=True)
Time = db.Column(db.DateTime, default=datetime.now) #datetime.now() + timedelta(hours=2) just get the time when the app is init
def __repr__(self):
return '<ID %r>' % self.id
``` |
{
"source": "jmonsalverodilla/Recommender-Systems",
"score": 3
} |
#### File: jmonsalverodilla/Recommender-Systems/eda.py
```python
import streamlit as st
import ast
import plotly.graph_objects as go
import plotly.express as px
from plotly.subplots import make_subplots
##############################FUNCTIONS#####################################
#MOVIES
def main_movies(df_metadata_complete):
#Figure
df = df_metadata_complete.sort_values(by="revenue", ascending=False).head(10)
trace1 = go.Bar(x=df["title"],y=df["revenue"])
#Figure
df = df_metadata_complete.sort_values(by="vote_count",ascending=False).head(10)
trace2 = go.Bar(x=df["title"],y=df["vote_count"])
#Figure
fig = make_subplots(rows=1, cols=2, subplot_titles=("Movies with highest revenues",
"Movies with highest vote_count"))
fig.append_trace(trace1, 1, 1)
fig.append_trace(trace2, 1, 2)
fig.update_layout(barmode="stack").update_layout(title='<b>Movies</b>',autosize=False,width=900,height=400,showlegend=False)
return fig
#CORRELATION BETWEEN BUDGET AND REVENUE
def correlation(df_metadata_complete):
fig = px.scatter(df_metadata_complete, x="budget", y="revenue", trendline="ols",title="Correlation budget vs revenue")
fig.update_layout(title="<b>Correlation budget vs revenue</b>",autosize=False,width=900,height=400)
results = px.get_trendline_results(fig).px_fit_results.iloc[0].summary()
return fig,results
#PRODUCTION COMPANIES
def production_companies(df_metadata_complete):
df_metadata_complete['production_company_name'] = df_metadata_complete['production_companies'].fillna('[]').apply(ast.literal_eval).apply(lambda x: [i['name'] for i in x] if isinstance(x, list) else []).str[0].fillna('NA')
#Top 10 production companies
movies_per_company = df_metadata_complete[df_metadata_complete['production_company_name']!='NA'].groupby("production_company_name").agg({'title':'count'}).reset_index().rename(columns={'title':'number_of_movies'}).sort_values(by="number_of_movies",ascending=False).head(10)
revenue_per_company = df_metadata_complete[df_metadata_complete['production_company_name']!='NA'].groupby("production_company_name").agg({'revenue':'sum'}).reset_index().rename(columns={'revenue':'total_revenue'}).sort_values(by="total_revenue",ascending=False).head(10)
#Figure
fig = make_subplots(rows=1, cols=2,subplot_titles=("Number of movies (top 10 production companies)",
"Total revenue (top 10 production companies)"))
trace1 = go.Bar(x=movies_per_company["production_company_name"],y=movies_per_company["number_of_movies"])
trace2 = go.Bar(x=revenue_per_company["production_company_name"],y=revenue_per_company["total_revenue"])
fig.append_trace(trace1, 1, 1)
fig.append_trace(trace2, 1, 2)
fig.update_layout(barmode="stack").update_layout(title = "<b>Production companies</b>",autosize=False,width=900,height=400,showlegend=False)
return fig
#SPOKEN LANGUAGES
#Let's get the language
def spoken_languages(df_metadata_complete):
df_metadata_complete['language'] = df_metadata_complete['spoken_languages'].fillna('[]').apply(ast.literal_eval).apply(lambda x: [i['iso_639_1'] for i in x] if isinstance(x, list) else []).str[0].fillna('NA')
#Top 10 languages
languages = df_metadata_complete[df_metadata_complete['language']!='NA'].groupby("language").agg({'title':'count'}).reset_index().rename(columns={'title':'number_of_movies'}).sort_values(by="number_of_movies",ascending=False)
top_10_languages = languages[languages['language']!='en'].head(10)
#Figure
fig = go.Figure(data=[go.Pie(labels=top_10_languages['language'].tolist(),
values=top_10_languages['number_of_movies'].tolist(),
hole=.3)])
fig.update_layout(title="<b>Distribution of spoken languages (English not included)</b>",autosize=False,width=900,height=400)
return fig
#DIRECTORS WITH HIGHEST REVENUES
def directors_revenue(df_metadata_complete):
directors_revenue = df_metadata_complete.groupby('director')['revenue'].sum().reset_index().rename(columns={'revenue':'total_revenue'}).sort_values(by="total_revenue",ascending=False).head(10)
#Directors with highest number of movies
directors_movies = df_metadata_complete.groupby('director')['title'].count().reset_index().rename(columns={'title':'number_of_movies'}).sort_values(by="number_of_movies",ascending=False).head(10)
#Figure
fig = make_subplots(rows=1, cols=2,subplot_titles=("Top 10 directors with highest total revenues",
"Top 10 directors with highest number of movies"))
trace1 = go.Bar(x=directors_revenue["director"],y=directors_revenue["total_revenue"])
trace2 = go.Bar(x=directors_movies["director"],y=directors_movies["number_of_movies"])
fig.append_trace(trace1, 1, 1)
fig.append_trace(trace2, 1, 2)
fig.update_layout(title="<b>Directors</b>",autosize=False,width=900,height=400,showlegend=False)
return fig
# SETTING PAGE CONFIG TO WIDE MODE
#st.set_page_config(layout="wide")
#lottie_book = load_lottieurl('https://assets4.lottiefiles.com/temp/lf20_aKAfIn.json')
def load_page(df_metadata_complete):
###Streamlit app
_, row1, _ = st.beta_columns((0.01, 20, 0.01))
with row1:
st.markdown("<h1 style='text-align: center; color: black;'> 📊 Exploratory Data Analysis</h1>", unsafe_allow_html=True)
st.write('')
with st.beta_expander('View movies (sorted by revenue)'):
st.write(df_metadata_complete.drop(columns=['soup']))
fig = main_movies(df_metadata_complete)
st.plotly_chart(fig)
fig,results = correlation(df_metadata_complete)
st.plotly_chart(fig)
with st.beta_expander('View correlation results'):
st.write(results)
for i in range(3):
st.write(" ")
fig = production_companies(df_metadata_complete)
st.plotly_chart(fig)
fig = spoken_languages(df_metadata_complete)
st.plotly_chart(fig)
fig = directors_revenue(df_metadata_complete)
st.plotly_chart(fig)
```
#### File: Recommender-Systems/other_alternative/collaborative_filtering.py
```python
import pandas as pd
pd.options.display.max_colwidth = 1000
pd.set_option("display.max_columns",100)
pd.set_option("display.max_rows",100)
#Visualization libraries
import plotly.express as px
#Dashboarding
import streamlit as st
##########################Functions###############################
@st.cache(show_spinner=False)
def get_most_likely_items_similarity_dict(*,items,max_number_of_predictions,dict_similarity):
d = {}
for movie in items:
dict_movie = dict_similarity[movie]
values = list(dict_movie.values())
columns = list(dict_movie.keys())
df_movie = pd.DataFrame(data = [values], columns=columns)
d[movie] = df_movie
df_similarity_filtered = pd.concat(d.values(), ignore_index=True).fillna(0)
df_most_similar_items = df_similarity_filtered.sum(axis = 0).reset_index().rename(columns={'index':'title', 0:'similarity'}).sort_values(by="similarity",ascending=False)
fig = px.bar(df_most_similar_items.head(max_number_of_predictions), x="title",y="similarity",title="Recommended movies",
labels={'similarity': "Similarity"}, height=500)
return fig
###################################APP######################################
# SETTING PAGE CONFIG TO WIDE MODE
#st.set_page_config(layout="wide")
def load_page(dict_similarity_collaborative_filtering):
###Streamlit app
row1_spacer1, row1_1, row1_spacer2 = st.beta_columns((0.01, 3.2, 0.01))
with row1_1:
st.markdown("<h1 style='text-align: center; color: black;'> 🎬 Collaborative Filtering Movie Recommender</h1>", unsafe_allow_html=True)
st.image('./images/item_based_collaborative_filtering.jpg', use_column_width=True)
#if st.checkbox('view data'):
# st.subheader('Raw data')
# st.write(df_ratings_complete)
# st.write("\n")
# User search
st.markdown("## Select your favourite movie/movies in order to get recommendations")
selected_titles = st.multiselect(label="Selected movie/movies",
options=list(dict_similarity_collaborative_filtering.keys()),
default = ["The Dark Knight Rises"])
# Number of recommendations
number_of_recommendations = st.number_input("Number of recommended movies", value=10, step=1)
#################################ITEM-TO-ITEM COLLABORATIVE FILTERING USING COSINE SIMILARITIES##########################
if st.button("Get recommendations"):
if len(selected_titles) != 0:
fig = get_most_likely_items_similarity_dict(items=selected_titles,
max_number_of_predictions=number_of_recommendations,
dict_similarity=dict_similarity_collaborative_filtering)
st.plotly_chart(fig)
else:
st.write("You need to select at least a movie you liked in order to get recommendations")
#print(type(st.session_state))
#for var in st.session_state:
# print(var)
``` |
{
"source": "jmonson/aws_okta_keyman",
"score": 2
} |
#### File: aws_okta_keyman/aws_okta_keyman/main.py
```python
from __future__ import unicode_literals
import sys
from aws_okta_keyman.keyman import Keyman
def entry_point():
"""Zero-argument entry point for use with setuptools/distribute."""
keyman = Keyman(sys.argv)
raise SystemExit(keyman.main())
if __name__ == '__main__':
entry_point()
``` |
{
"source": "jmons/ramlwrap",
"score": 2
} |
#### File: ramlwrap/utils/validation.py
```python
import importlib
import inspect
import json
import logging
import sys
from jsonschema import validate
from jsonschema.exceptions import ValidationError
from django.conf import settings
from django.http.response import HttpResponse, HttpResponseNotAllowed, JsonResponse
from django.views.decorators.csrf import csrf_exempt
from . exceptions import FatalException
logger = logging.getLogger(__name__)
class ContentType:
"""Represents http content types."""
JSON = 'application/json'
def __init__(self):
"""Initialisation function."""
pass
class Endpoint:
"""
Endpoint that represents one url in the service. Each endpoint
contains Actions which represent a request method that the endpoint
supports.
"""
url = None
request_method_mapping = None
def __init__(self, url):
"""Initialisation function."""
self.url = url
self.request_method_mapping = {}
def parse_regex(self, regex_dict):
"""
Replace dynamic template in url with corresponding regex for a dynamic value
:param regex_dict: dictionary of dynamic id names to regex
e.g. {'dynamic_id': '(?P<dynamic_id>[a-zA-Z]+)'}
"""
for regex_key, regex in regex_dict.items():
string_to_replace = "{%s}" % regex_key
self.url = self.url.replace(string_to_replace, regex)
def add_action(self, request_method, action):
"""Add an action mapping for the given request method type.
:param request_method: http method type to map the action to.
:param action: the action to map to the request.
:returns: returns nothing.
"""
self.request_method_mapping[request_method] = action
@csrf_exempt
def serve(self, request, **dynamic_values):
"""Serve the request to the current endpoint. The validation and response
that is returned depends on the incoming request http method type.
:param request: incoming http request that must be served correctly.
:param dynamic_values: kwargs of dynamic id names against actual value to substitute into url
e.g. {'dynamic_id': 'aBc'}
:returns: returns the HttpResponse, content of which is created by the target function.
"""
if request.method in self.request_method_mapping:
action = self.request_method_mapping[request.method]
response = _validate_api(request, action, dynamic_values)
else:
response = HttpResponseNotAllowed(self.request_method_mapping.keys())
if isinstance(response, HttpResponse):
return response
else:
return HttpResponse(response)
class Action:
"""
Maps out the api definition associated with the parent Endpoint.
One of these will be created per http request method type.
"""
example = None
schema = None
target = None
query_parameter_checks = None
resp_content_type = None
requ_content_type = None
regex = None
def __init__(self):
"""Initialisation function."""
pass
def _validate_query_params(params, checks):
"""
Function to validate HTTP GET request params. If there are checks to be
performed then they will be; these will be items such as length and type
checks defined in the definition file.
:param params: incoming request parameters.
:param checks: dict of param to rule to validate with.
:raises ValidationError: raised when any query parameter fails any
of its checks defined in the checks param.
:returns: true if validated, otherwise raises an exception when fails.
"""
# If validation checks, check the params. If not, pass.
if checks:
for param in checks:
# If the expected param is in the query.
if param in params:
for check, rule in checks[param].items():
if rule is not None:
error_message = 'QueryParam [%s] failed validation check [%s]:[%s]' % (param, check, rule)
if check == 'minLength':
if len(params.get(param)) < rule:
raise ValidationError(error_message)
elif check == 'maxLength':
if len(params.get(param)) > rule:
raise ValidationError(error_message)
elif check == 'type':
if rule == 'number':
try:
float(params.get(param))
except ValueError:
raise ValidationError(error_message)
# If the require param isn't in the query.
elif checks[param]['required'] is True:
raise ValidationError('QueryParam [%s] failed validation check [Required]:[True]' % param)
return True
def _generate_example(action):
"""
This is used by both GET and POST when returning an example
"""
# The original method of generating straight from the example is bad
# because v2 parser now has an object, which also allows us to do the
# headers correctly
ret_data = action.example
# FIXME: not sure about this content thing
if action.resp_content_type == "application/json":
ret_data = json.dumps(action.example)
return HttpResponse(ret_data, content_type=action.resp_content_type)
def _validate_api(request, action, dynamic_values=None):
"""
Validate APIs content.
:param request: incoming http request.
:param action: action object containing data used to validate
and serve the request.
:param dynamic_values: dict of dynamic id names against actual values to substitute into url
e.g. {'dynamic_id': 'aBc'}
:returns: returns the HttpResponse generated by the action target.
"""
if action.query_parameter_checks:
# Following raises exception on fail or passes through.
_validate_query_params(request.GET, action.query_parameter_checks)
error_response = None
if request.body:
error_response = _validate_body(request, action)
if error_response:
response = error_response
else:
if action.target:
if dynamic_values:
# If there was a dynamic value, pass it through
response = action.target(request, **dynamic_values)
else:
response = action.target(request)
else:
response = _generate_example(action)
if not isinstance(response, HttpResponse):
# As we weren't given a HttpResponse, we need to create one
# and handle the data correctly.
if action.resp_content_type == ContentType.JSON:
response = HttpResponse(json.dumps(response), content_type="application/json")
else:
# FIXME: write more types in here
raise Exception("Unsuported response content type - contact @jmons for future feature request")
return response
def _validate_body(request, action):
error_response = None
# Grab the content-type coming in from the request
if "headers" in request.META:
request_content_type = request.META["headers"]["content-type"]
else:
request_content_type = request.META["CONTENT_TYPE"]
content_type_matched = False
# Set the actual content_type we are using in this request
action.requ_content_type = request_content_type
# Check the schema had content-types defined
if hasattr(action, 'request_content_type_options'):
for x in action.request_content_type_options:
# Check if the incoming content-type matches the allowed type in the schema and is JSON type
if x == request_content_type == str(ContentType.JSON):
content_type_matched = True
# If the expected request body is JSON, we need to load it.
if action.request_options[request_content_type]["schema"]:
# If there is any schema, we'll validate it.
try:
data = json.loads(request.body.decode('utf-8'))
validate(data, action.request_options[request_content_type]["schema"])
except Exception as e:
# Check the value is in settings, and that it is not None
if hasattr(settings,
'RAMLWRAP_VALIDATION_ERROR_HANDLER') and settings.RAMLWRAP_VALIDATION_ERROR_HANDLER:
error_response = _call_custom_handler(e, request, action)
else:
error_response = _validation_error_handler(e)
else:
# Otherwise just load it (no validation as no schema).
data = json.loads(request.body.decode('utf-8'))
break
# Incoming content type wasn't json but it does match one of the options in the raml so just decode it as is
elif x == request_content_type:
content_type_matched = True
try:
data = request.body.decode('utf-8')
except UnicodeDecodeError:
# Just send the body if it cannot be decoded
data = request.body
break
else:
# There were no content type options in the schema so just load the data
content_type_matched = True
try:
data = request.body.decode('utf-8')
except UnicodeDecodeError:
# Just send the body if it cannot be decoded
data = request.body
if not content_type_matched:
error_response = _validation_error_handler(ValidationError("Invalid Content Type for this request: {}".format(request_content_type), validator="invalid"))
if not error_response:
request.validated_data = data
return error_response
def _validation_error_handler(e):
"""
Default validation handler for when a ValidationError occurs.
This behaviour can be overriden in the settings file.
:param e: exception raised that must be handled.
:returns: HttpResponse with status depending on the error.
ValidationError will return a 422 with json info on the cause.
Otherwise a FatalException is raised.
"""
if isinstance(e, ValidationError):
message = 'Validation failed. {}'.format(e.message)
error_response = {
'message': message,
'code': e.validator
}
logger.info(message)
error_resp = JsonResponse(error_response, status=422)
else:
raise FatalException('Malformed JSON in the request.', 400)
return error_resp
def _call_custom_handler(e, request, action):
"""
Dynamically import and call the custom validation error handler
defined by the user in the django settings file.
:param e: exception raised that must be handled.
:param request: incoming http request that must be served correctly.
:param action: action object containing data used to validate and serve the request.
:returns: response returned from the custom handler, given the exception.
"""
handler_full_path = settings.RAMLWRAP_VALIDATION_ERROR_HANDLER
handler_method = handler_full_path.split('.')[-1]
handler_class_path = '.'.join(handler_full_path.split('.')[0:-1])
handler = getattr(importlib.import_module(handler_class_path), handler_method)
if _num_arguments_to_pass(handler) == 3:
return handler(e, request, action)
else:
# Handle old versions that still only accept the exception
return handler(e)
def _num_arguments_to_pass(handler):
if sys.version_info[0] < 3:
# Python 2
args = inspect.getargspec(handler).args
return len(args)
else:
# Python 3
signature = inspect.signature(handler)
return len(signature.parameters)
```
#### File: RamlWrapTest/tests/test_validation.py
```python
import json
import os
import sys
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), "../../../")))
from ramlwrap.utils.validation import _validate_api, Action, ContentType, Endpoint
from django.conf import settings
from django.http.response import HttpResponse, HttpResponseNotAllowed
from django.test import TestCase, Client
from django.test.client import RequestFactory
from jsonschema.exceptions import ValidationError
def _mock_post_target(request, dynamic_value=None):
"""Return true if the validated data is correct."""
if request.validated_data == {"testkey": "testvalue"}:
valid = True
else:
valid = False
return valid
def _mock_post_target_json_resp(request, dynamic_value=None):
"""Mock function that returns some json for a post."""
return {"valid": True}
def _mock_get_target(request, dynamic_value=None):
"""Return true if the validated data is correct."""
valid = True
if not request.GET.get("param1") == "2":
valid = False
if not request.GET.get("param2") == "hello":
valid = False
return {"valid": valid}
class ValidationTestCase(TestCase):
"""TestCase for ramlwrap validation functionality."""
client = None
def setUp(self):
self.client = Client()
def test_raml_schema_validation(self):
"""Test that when schema is present, it is used to
validate the incoming request.
"""
response = self.client.post("/api", data="{}", content_type="application/json")
self.assertEquals(422, response.status_code)
self.assertEqual({"message": "Validation failed. 'data' is a required property", "code": "required"},
json.loads(response.content.decode('utf-8')))
def test_get_with_valid_params(self):
"""
Test that a get api with valid query params doesn't raise
an exception.
"""
self.client.get("/api/3", {"param2": "test5", "param3": 2})
def test_get_with_invalid_params(self):
"""
Test that a get api with invalid query params raises
a ValidationError.
"""
invalid_params = [
# param2 must be minLength 5.
{"param2": "test", "param3": 2},
# param3 must be a number.
{"param1": "1", "param2": "12345", "param3": "2sadasd"},
# param2 must be maxLength 10.
{"param1": "1", "param2": "12345678910", "param3": "2"},
# param2 is required.
{"param1": 1, "param3": 2}
]
for params in invalid_params:
with self.assertRaises(ValidationError):
self.client.get("/api/3", params)
def test_post_with_valid_params(self):
"""Test that a post api with valid query params doesn't raise
an exception.
"""
self.client.post("/api/3?param2=one2345¶m3=2")
def test_post_with_invalid_params(self):
"""Test that a post api with invalid query params raises
a ValidationError.
"""
invalid_params = [
# param2 must be minLength 5.
"param2=test¶m3=2",
# param3 must be a number.
"param1=1¶m2=12345¶m3=2sadasd",
# param2 must be maxLength 10.
"param1=1¶m2=12345678910¶m3=2",
# param2 is required.
"param1=1¶m3=2"
]
for params in invalid_params:
with self.assertRaises(ValidationError):
self.client.get("/api/3?%s" % params)
def test_post_with_valid_content_types(self):
"""
Check that all content types defined in the raml file are valid
"""
valid_content_types = [
"application/json",
"application/x-www-form-urlencoded"
]
for content_type in valid_content_types:
response = self.client.post('/api/multi_content_type', data="{}", content_type=content_type)
self.assertEquals(response.status_code, 200)
def test_post_with_invalid_content_types(self):
"""
Check that making a request with a content type which doesn't match the one in the schema, fails
"""
valid_content_types = [
"text/plain",
"application/xml"
]
for content_type in valid_content_types:
response = self.client.post('/api/multi_content_type', data="{}", content_type=content_type)
self.assertEquals(response.status_code, 422)
def test_post_with_no_content_types(self):
"""
Check that making a request with a content type
but to a url which has no defined content types in the schema, passes
"""
# Raml file doesn't define content types, so content type validation doesn't occur
content_types = [
"application/json",
"application/x-www-form-urlencoded",
"text/plain",
"application/xml"
]
for content_type in content_types:
response = self.client.post('/api/no_content_type', data="{}", content_type=content_type)
self.assertEquals(response.status_code, 200)
def test_validation_handler(self):
"""
Test that given a custom validation handler path, it is called.
Test that if no handler is given, the default handler is used.
"""
# Test that the custom method is called and a response is returned.
settings.RAMLWRAP_VALIDATION_ERROR_HANDLER = "RamlWrapTest.utils.validation_handler.custom_validation_response"
response = self.client.post("/api", data="{}", content_type="application/json")
self.assertEquals(418, response.status_code)
# Test that the custom method is called and an exception is raised.
settings.RAMLWRAP_VALIDATION_ERROR_HANDLER = "RamlWrapTest.utils.validation_handler.custom_validation_exception"
with self.assertRaises(NotImplementedError):
response = self.client.post("/api", data="{}", content_type="application/json")
# Test that the default is called.
settings.RAMLWRAP_VALIDATION_ERROR_HANDLER = None
response = self.client.post("/api", data="{}", content_type="application/json")
self.assertEquals(422, response.status_code)
delattr(settings, "RAMLWRAP_VALIDATION_ERROR_HANDLER")
response = self.client.post("/api", data="{}", content_type="application/json")
self.assertEquals(422, response.status_code)
def test_validation_handler_with_request_action(self):
"""
Test that if the handler handles request and action, these are passed through
"""
endpoint = "/api"
# Test that the custom method is called and a response is returned.
settings.RAMLWRAP_VALIDATION_ERROR_HANDLER = "RamlWrapTest.utils.validation_handler.custom_validation_with_request_action"
response = self.client.post(endpoint, data="{}", content_type="application/json")
self.assertEquals(200, response.status_code)
expected_json_body = {
"path": endpoint,
"content_type": "application/json"
}
self.assertEqual(json.loads(response.content.decode("utf-8")), expected_json_body)
def test_no_schema_validation_passes_through(self):
"""Test that given an action with no schema and a request
with a json body, the body is passed through."""
action = Action()
action.resp_content_type = ContentType.JSON
action.target = _mock_post_target
request = RequestFactory().post(
path="api/4",
data=json.dumps({"testkey": "testvalue"}),
content_type="application/json")
self.assertTrue(_validate_api(request, action))
def test_validated_get_passes_through(self):
"""Test that a valid get request passes through
to the action and the correct response is returned."""
action = Action()
action.target = _mock_get_target
action.resp_content_type = ContentType.JSON
request = RequestFactory().get("api/3", {"param1": 2, "param2": "hello"})
resp = _validate_api(request, action)
self.assertTrue(resp.__class__ is HttpResponse)
self.assertEqual(resp.content.decode("utf-8"), json.dumps({"valid": True}))
def test_validated_post_passes_through(self):
"""Test that a valid post request passes through
to the action and the correct response is returned."""
action = Action()
action.target = _mock_post_target_json_resp
action.resp_content_type = ContentType.JSON
request = RequestFactory().post("/api/3?param2=one2345¶m3=2")
resp = _validate_api(request, action)
self.assertTrue(resp.__class__ is HttpResponse)
self.assertEqual(resp.content.decode("utf-8"), json.dumps({"valid": True}))
def test_unsupported_method_returns_not_allowed(self):
"""Test that when a request is made for an
unsupported method, a 405 is returned with correct list of permitted methods.
"""
endpoint = Endpoint("/api/3")
endpoint.request_method_mapping = {
"GET": {},
"POST": {}
}
request = RequestFactory().generic(
"/api/3",
"UNSUPPORTED_METHOD",
data=json.dumps({"testkey": "testvalue"}),
content_type="application/json")
resp = endpoint.serve(request)
self.assertTrue(resp.__class__ is HttpResponseNotAllowed)
allowed_methods = self._parse_allowed_methods(resp)
self.assertEqual(allowed_methods, ["GET", "POST"])
def test_unknown_method_returns_not_allowed(self):
"""Test that when a request is made for an unknown
method, a 405 is returned with correct list of permitted methods.
"""
endpoint = Endpoint("/api/3")
endpoint.request_method_mapping = {
"GET": {},
}
request = RequestFactory().post(
"/api/3",
data=json.dumps({"testkey": "testvalue"}),
content_type="application/json")
resp = endpoint.serve(request)
self.assertTrue(resp.__class__ is HttpResponseNotAllowed)
allowed_methods = self._parse_allowed_methods(resp)
self.assertEqual(allowed_methods, ["GET"])
def _parse_allowed_methods(self, resp):
allowed_methods_with_spacing = resp['Allow'].split(',')
allowed_methods = []
for method in allowed_methods_with_spacing:
allowed_methods.append(method.strip())
allowed_methods.sort()
return allowed_methods
```
#### File: RamlWrapTest/utils/validation_handler.py
```python
from django.http.response import HttpResponse
def custom_validation_response(e):
"""
Custom validation handler to override the default
and return a HttpResponse I'm a teapot code.
"""
return HttpResponse(status=418)
def custom_validation_exception(e):
"""
Custom validation handler to override the default
and raise an exception.
"""
raise NotImplementedError(e)
def custom_validation_with_request_action(e, request, action):
"""
Custom validation handler to override the default
and return a HttpResponse I'm a teapot code.
"""
return {
"path": request.path,
"content_type": action.requ_content_type
}
``` |
{
"source": "jmontag43/udacity_quadcopter",
"score": 2
} |
#### File: jmontag43/udacity_quadcopter/agent.py
```python
import copy
import numpy as np
import random
from collections import namedtuple, deque
from keras import layers, models, optimizers
from keras import backend as K
class ReplayBuffer:
def __init__(self, buffer_size, batch_size):
self.memory = deque(maxlen=buffer_size)
self.batch_size = batch_size
self.experience = namedtuple("Experience",
field_names=["state", "action", "reward", "next_state", "done"])
def add(self, state, action, reward, next_state, done):
e = self.experience(state, action, reward, next_state, done)
self.memory.append(e)
def sample(self, batch_size=64):
return random.sample(self.memory, k=self.batch_size)
def __len__(self):
return len(self.memory)
class Actor:
def __init__(self, state_size, action_size, action_low, action_high):
self.state_size = state_size
self.action_size = action_size
self.action_low = action_low
self.action_high = action_high
self.action_range = self.action_high - self.action_low
self.build_model()
def build_model(self):
input_states = layers.Input(shape=(self.state_size,), name='states')
layer_1 = layers.Dense(units=32, activation='relu')(input_states)
layer_2 = layers.Dense(units=64, activation='relu')(layer_1)
layer_3 = layers.Dense(units=32, activation='relu')(layer_2)
raw_actions = layers.Dense(units=self.action_size, activation='sigmoid',
name='raw_actions')(layer_3)
actions = layers.Lambda(lambda x: (x * self.action_range) + self.action_low,
name='actions')(raw_actions)
self.model = models.Model(inputs=input_states, outputs=actions)
gradients = layers.Input(shape=(self.action_size,))
loss = K.mean(-gradients * actions)
optimizer = optimizers.Adam()
updates_op = optimizer.get_updates(params=self.model.trainable_weights, loss=loss)
self.train_fn = K.function(
inputs=[self.model.input, gradients, K.learning_phase()],
outputs=[],
updates=updates_op)
class Critic:
def __init__(self, state_size, action_size):
self.state_size = state_size
self.action_size = action_size
self.build_model()
def build_model(self):
states = layers.Input(shape=(self.state_size,), name='states')
actions = layers.Input(shape=(self.action_size,), name='actions')
net_states = layers.Dense(units=32, activation='relu')(states)
net_states = layers.Dense(units=64, activation='relu')(net_states)
net_actions = layers.Dense(units=32, activation='relu')(actions)
net_actions = layers.Dense(units=64, activation='relu')(net_actions)
net = layers.Add()([net_states, net_actions])
net = layers.Activation('relu')(net)
Q_values = layers.Dense(units=1, name='q_values')(net)
self.model = models.Model(inputs=[states, actions], outputs=Q_values)
optimizer = optimizers.Adam()
self.model.compile(optimizer=optimizer, loss='mse')
action_gradients = K.gradients(Q_values, actions)
self.get_action_gradients = K.function(
inputs=[*self.model.input, K.learning_phase()],
outputs=action_gradients)
class OUNoise:
def __init__(self, size, mu, theta, sigma):
self.mu = mu * np.ones(size)
self.theta = theta
self.sigma = sigma
self.reset()
def reset(self):
self.state = copy.copy(self.mu)
def sample(self):
x = self.state
dx = self.theta * (self.mu - x) + self.sigma * np.random.randn(len(x))
self.state = x + dx
return self.state
class DDPG():
episode_rewards = []
total_reward = 0
def __init__(self, task):
self.task = task
self.state_size = task.state_size
self.action_size = task.action_size
self.action_low = task.action_low
self.action_high = task.action_high
self.actor_local = Actor(self.state_size, self.action_size, self.action_low, self.action_high)
self.actor_target = Actor(self.state_size, self.action_size, self.action_low, self.action_high)
self.critic_local = Critic(self.state_size, self.action_size)
self.critic_target = Critic(self.state_size, self.action_size)
self.critic_target.model.set_weights(self.critic_local.model.get_weights())
self.actor_target.model.set_weights(self.actor_local.model.get_weights())
self.exploration_mu = 1.2
self.exploration_theta = 0.15
self.exploration_sigma = 0.3
self.noise = OUNoise(self.action_size, self.exploration_mu, self.exploration_theta, self.exploration_sigma)
self.buffer_size = 1000000
self.batch_size = 64
self.memory = ReplayBuffer(self.buffer_size, self.batch_size)
self.gamma = 0.99
self.tau = 0.001
self.episode_rewards = []
self.total_reward = 0
def reset_episode(self):
self.noise.reset()
state = self.task.reset()
self.last_state = state
return state
def update_rewards(self):
self.episode_rewards.append(self.total_reward)
self.total_reward = 0
def step(self, action, reward, next_state, done):
self.memory.add(self.last_state, action, reward, next_state, done)
self.total_reward += reward
if len(self.memory) > self.batch_size:
experiences = self.memory.sample()
self.learn(experiences)
self.last_state = next_state
def act(self, state):
state = np.reshape(state, [-1, self.state_size])
action = self.actor_local.model.predict(state)[0]
return list(action + self.noise.sample())
def learn(self, experiences):
states = np.vstack([e.state for e in experiences if e is not None])
actions = np.array([e.action for e in experiences if e is not None]).astype(np.float32).reshape(-1, self.action_size)
rewards = np.array([e.reward for e in experiences if e is not None]).astype(np.float32).reshape(-1, 1)
dones = np.array([e.done for e in experiences if e is not None]).astype(np.uint8).reshape(-1, 1)
next_states = np.vstack([e.next_state for e in experiences if e is not None])
actions_next = self.actor_target.model.predict_on_batch(next_states)
Q_targets_next = self.critic_target.model.predict_on_batch([next_states, actions_next])
Q_targets = rewards + self.gamma * Q_targets_next * (1 - dones)
self.critic_local.model.train_on_batch(x=[states, actions], y=Q_targets)
action_gradients = np.reshape(self.critic_local.get_action_gradients([states, actions, 0]), (-1, self.action_size))
self.actor_local.train_fn([states, action_gradients, 1])
self.soft_update(self.critic_local.model, self.critic_target.model)
self.soft_update(self.actor_local.model, self.actor_target.model)
def soft_update(self, local_model, target_model):
local_weights = np.array(local_model.get_weights())
target_weights = np.array(target_model.get_weights())
assert len(local_weights) == len(target_weights), "Local and target model parameters must have the same size"
new_weights = self.tau * local_weights + (1 - self.tau) * target_weights
target_model.set_weights(new_weights)
``` |
{
"source": "jmontalvo94/PINN_system_identification",
"score": 3
} |
#### File: PINN_system_identification/PINNs/run_system_identification.py
```python
import numpy as np
import time
from PINNs.create_example_parameters import create_example_parameters
from PINNs.create_data import create_data
from PINNs.PinnModel import PinnModel
def run_system_identification():
# load or create a file with all simulation parameters such that a simulation is repeatable
# to illustrate the working principle, examples for 1 and 4 buses are implemented
simulation_parameters = create_example_parameters(n_buses=4)
# at this point the training data are provided
# here we simulate a dataset based on the previously defined simulation parameters
x_training, y_training = create_data(simulation_parameters=simulation_parameters)
# creating the model including building it and setting the options for the optimiser, the loss function and the
# loss weights --> see PinnModel.py
model = PinnModel(simulation_parameters=simulation_parameters)
np.set_printoptions(precision=3)
print('Starting training')
total_start_time = time.time()
for n_epochs, batch_size in zip(simulation_parameters['training']['epoch_schedule'],
simulation_parameters['training']['batching_schedule']):
epoch_start_time = time.time()
model.fit(x_training,
y_training,
epochs=n_epochs,
batch_size=batch_size,
verbose=0,
shuffle=True)
epoch_end_time = time.time()
print(f'Trained for {n_epochs} epochs with batch size {batch_size} '
f'in {epoch_end_time - epoch_start_time:.2f} seconds.')
model.PinnLayer.print_relative_error()
total_end_time = time.time()
print(f'Total training time: {total_end_time - total_start_time:.1f} seconds')
if __name__ == "__main__":
run_system_identification()
``` |
{
"source": "jmontane/skill-ddg",
"score": 2
} |
#### File: jmontane/skill-ddg/__init__.py
```python
import requests
from mycroft.messagebus.message import Message
from mycroft.skills.core import intent_handler
from mycroft.configuration import LocalConf, USER_CONFIG
from mycroft.skills.common_query_skill import CommonQuerySkill, CQSMatchLevel
from adapt.intent import IntentBuilder
from google_trans_new import google_translator
from RAKEkeywords import Rake
import logging
from tempfile import gettempdir
from os.path import join, isfile, expanduser
from padatious import IntentContainer
logging.getLogger("urllib3.connectionpool").setLevel("INFO")
class DuckDuckGoSkill(CommonQuerySkill):
def __init__(self):
super().__init__()
self.translator = google_translator()
self.tx_cache = {} # avoid translating twice
self.duck_cache = {}
self.rake = Rake() # only english for now
# for usage in tell me more / follow up questions
self.idx = 0
self.results = []
self.image = None
# subparser, intents just for this skill
# not part of main intent service
intent_cache = expanduser(self.config_core['padatious']['intent_cache'])
self.intents = IntentContainer(intent_cache)
def initialize(self):
self.load_intents()
# check for conflicting skills just in case
# done after all skills loaded to ensure proper shutdown
self.add_event("mycroft.skills.initialized",
self.blacklist_default_skill)
def load_intents(self):
# TODO intents for other infobox fields
for intent in ["who", "birthdate"]:
path = self.find_resource(intent + '.intent', "locale")
if path:
self.intents.load_intent(intent, path)
self.intents.train(single_thread=True)
def get_intro_message(self):
# blacklist conflicting skills on install
self.blacklist_default_skill()
def blacklist_default_skill(self):
# load the current list of already blacklisted skills
blacklist = self.config_core["skills"]["blacklisted_skills"]
# check the folder name (skill_id) of the skill you want to replace
skill_id = "mycroft-fallback-duck-duck-go.mycroftai"
# add the skill to the blacklist
if skill_id not in blacklist:
self.log.debug("Blacklisting official mycroft skill")
blacklist.append(skill_id)
# load the user config file (~/.mycroft/mycroft.conf)
conf = LocalConf(USER_CONFIG)
if "skills" not in conf:
conf["skills"] = {}
# update the blacklist field
conf["skills"]["blacklisted_skills"] = blacklist
# save the user config file
conf.store()
# tell the intent service to unload the skill in case it was loaded already
# this should avoid the need to restart
self.bus.emit(Message("detach_skill", {"skill_id": skill_id}))
def stop(self):
self.gui.release()
# intents
@intent_handler("search_duck.intent")
def handle_search(self, message):
query = message.data["query"]
summary = self.ask_the_duck(query)
if summary:
self.speak_result()
else:
answer, _, _ = self.parse_subintents(query)
if answer:
self.speakr(answer)
else:
self.speak_dialog("no_answer")
@intent_handler(IntentBuilder("DuckMore").require("More").
require("DuckKnows"))
def handle_tell_more(self, message):
""" Follow up query handler, "tell me more"."""
query = message.data["DuckKnows"]
data, related_queries = self.get_infobox(query)
# TODO maybe do something with the infobox data ?
self.speak_result()
# common query
def parse_subintents(self, utt):
# Get response from intents, this is a subparser that will handle
# queries about the infobox returned by duckduckgo
# eg. when was {person} born
match = self.intents.calc_intent(utt)
level = CQSMatchLevel.CATEGORY
data = match.matches
intent = match.name
score = match.conf
data["intent"] = intent
data["score"] = score
query = utt
if score > 0.8:
level = CQSMatchLevel.EXACT
elif score > 0.5:
level = CQSMatchLevel.CATEGORY
elif score > 0.3:
level = CQSMatchLevel.GENERAL
else:
intent = None
self.log.debug("DuckDuckGo Intent: " + str(intent))
if "person" in data:
query = data["person"]
summary = self.ask_the_duck(query)
answer = summary
if summary:
answer = self.results[0]
infobox, related_queries = self.get_infobox(query)
self.log.debug("DuckDuckGo infobox: " + str(infobox))
data["infobox"] = infobox
data["related_queries"] = related_queries
if intent == "birthdate":
answer = infobox.get("born")
data["query"] = query
data["answer"] = answer
data["image"] = self.image
if not answer:
level = CQSMatchLevel.GENERAL
return answer, level, data
def CQS_match_query_phrase(self, utt):
self.log.debug("DuckDuckGo query: " + utt)
answer, match, data = self.parse_subintents(utt)
if answer:
self.idx += 1
return (utt, match, answer, data)
# extract most relevant keyword
utt = self.translate(utt, "en", self.lang)
keywords = self.rake.extract_keywords(utt)
self.log.debug("Extracted keywords: " + str(keywords))
# TODO better selection / merging of top keywords with same
# confidence??
for kw in keywords:
query = kw[0]
self.log.debug("Selected keyword: " + query)
summary = self.ask_the_duck(query, translate=False)
if summary:
self.idx += 1
return (utt, CQSMatchLevel.GENERAL, self.results[0],
{'query': query, 'answer': self.results[0],
"keywords": keywords, "image": self.image})
def CQS_action(self, phrase, data):
""" If selected show gui """
self.display_ddg(data["answer"], data["image"])
# duck duck go api
def ask_the_duck(self, query, translate=True):
if translate:
# Automatic translation to English
utt = self.translate(query, "en", self.lang)
else:
utt = query
# cache so we dont hit the api twice for the same query
if query not in self.duck_cache:
self.duck_cache[query] = requests.get("https://api.duckduckgo.com",
params={"format": "json",
"q": utt}).json()
data = self.duck_cache[query]
# GUI
self.gui.clear() # clear previous answer just in case
title = data.get("Heading")
self.image = data.get("Image", "")
# summary
summary = data.get("AbstractText")
if not summary:
return None
self.log.debug("DuckDuckGo answer: " + summary)
# context for follow up questions
# TODO intents for this, with this context intents can look up all data
self.set_context("DuckKnows", query)
self.idx = 0
self.results = summary.split(". ")
return summary
def display_ddg(self, summary, image):
if image.startswith("/"):
image = "https://duckduckgo.com" + image
self.gui['summary'] = summary
self.gui['imgLink'] = image
self.gui.show_page("DuckDelegate.qml", override_idle=60)
def speak_result(self):
if self.idx + 1 > len(self.results):
# TODO ask user if he wants to hear about related topics
self.speak_dialog("thats all")
self.remove_context("ddg")
self.idx = 0
else:
if self.image:
self.display_ddg(self.results[self.idx], self.image)
self.speak(self.results[self.idx])
self.idx += 1
def get_infobox(self, query):
if query not in self.duck_cache:
self.ask_the_duck(query)
data = self.duck_cache[query]
# info
related_topics = [t.get("Text") for t in data.get("RelatedTopics", [])]
infobox = {}
infodict = data.get("Infobox") or {}
for entry in infodict.get("content", []):
k = entry["label"].lower().strip()
infobox[k] = entry["value"]
return infobox, related_topics
def translate(self, utterance, lang_tgt=None, lang_src="en"):
lang_tgt = lang_tgt or self.lang
# if langs are the same do nothing
if not lang_tgt.startswith(lang_src):
if lang_tgt not in self.tx_cache:
self.tx_cache[lang_tgt] = {}
# if translated before, dont translate again
if utterance in self.tx_cache[lang_tgt]:
# get previous translated value
translated_utt = self.tx_cache[lang_tgt][utterance]
else:
# translate this utterance
translated_utt = self.translator.translate(utterance,
lang_tgt=lang_tgt,
lang_src=lang_src).strip()
# save the translation if we need it again
self.tx_cache[lang_tgt][utterance] = translated_utt
self.log.debug("translated {src} -- {tgt}".format(src=utterance,
tgt=translated_utt))
else:
translated_utt = utterance.strip()
return translated_utt
def create_skill():
return DuckDuckGoSkill()
``` |
{
"source": "jmontgom10/Mimir_pyPol",
"score": 2
} |
#### File: Mimir_pyPol/diagnostics/computeGradientCorrelations.py
```python
import os
import sys
import glob
import numpy as np
from astropy.io import ascii
from astropy.table import Table as Table
from astropy.table import Column as Column
from astropy.convolution import convolve, convolve_fft, Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats
from photutils import detect_threshold, detect_sources
from scipy.ndimage.filters import median_filter, gaussian_filter
from photutils import Background2D
# Add the AstroImage class
import astroimage as ai
# Add the header handler to the BaseImage class
sys.path.insert(0, 'C:\\Users\\Jordan\\Libraries\\python\\Mimir_pyPol')
from Mimir_header_handler import Mimir_header_handler
# This is the location of all PPOL reduction directory
PPOL_dir = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\PPOL_reduced\\201611\\notPreFlattened'
# Build the path to the S3_Asotrometry files
S3_dir = os.path.join(PPOL_dir, 'S3_Astrometry')
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611\\'
# This is the location of the (pre-computed) star masks for the B-images
maskDir = os.path.join(pyPol_data, 'Masks')
starMaskDir = os.path.join(maskDir, 'starMasks')
# Build the path to the supersky directory
hwpImagesDir = os.path.join(pyPol_data, 'bkgFreeHWPimages')
if (not os.path.isdir(hwpImagesDir)):
os.mkdir(hwpImagesDir, 0o755)
# Read in the indexFile data and select the filenames
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='csv')
# Read in the kokopelli mask
from astropy.io import fits
kokopelliHDUlist = fits.open('..\\kokopelliMask.fits')
kokopelliMask = (kokopelliHDUlist[0].data > 0)
# Read in the 2MASS masks
TMASSdir = "..\\2MASSimages"
# Set the instrument to 2MASS
ai.set_instrument('2MASS')
# Read in all the 2MASS images and store them in a dictionary for quick reference
TMASS_Hfiles = np.array(glob.glob(os.path.join(TMASSdir, '*H_mask.fits')))
TMASS_Kfiles = np.array(glob.glob(os.path.join(TMASSdir, '*Ks_mask.fits')))
# Read in the 2MASS images
TMASS_HmaskList = [ai.reduced.ReducedScience.read(f) for f in TMASS_Hfiles]
TMASS_KmaskList = [ai.reduced.ReducedScience.read(f) for f in TMASS_Kfiles]
# Parse the targets for each file
TMASS_Htargets = [os.path.basename(f).split('_')[0] for f in TMASS_Hfiles]
TMASS_Ktargets = [os.path.basename(f).split('_')[0] for f in TMASS_Kfiles]
# Store these masks in a dictionary
TMASS_HimgDict = dict(zip(
TMASS_Htargets,
TMASS_HmaskList
))
TMASS_KimgDict = dict(zip(
TMASS_Ktargets,
TMASS_KmaskList
))
TMASS_masks = {
'H': TMASS_HimgDict,
'Ks': TMASS_KimgDict
}
# Set the instrument to Mimir
ai.reduced.ReducedScience.set_header_handler(Mimir_header_handler)
ai.set_instrument('Mimir')
# Read in the flat images
flatDir = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\BDP_Data\\201611\\10_Flat_Field_Images'
flatImgs = np.array([
ai.reduced.ReducedScience.read(f)
for f in glob.glob(os.path.join(flatDir, '*.fits'))
])
# Get the flat filter from the flat list
flatFilters = np.array([
flat.filter
for flat in flatImgs
])
# Get the HWPs fromm the flat list
HWPstepList = np.array([
0, 33, 67, 100,
133, 167, 200, 233,
267, 300, 333, 367,
400, 433, 467, 500
])
HWPlist = np.arange(16, dtype=int) + 1
IPPAlist = np.array(4*[0, 45, 90, 135])
HWPstep_to_HWP = dict(zip(HWPstepList, HWPlist))
flatHWPs = np.array([
HWPstep_to_HWP[flat.header['HWP']]
for flat in flatImgs
])
# # Define a (very) quick plane-fitting function
# def fitPlaneSVD(XYZ):
# """Solves for thebest fitting plane to the provided (x,y,z) points"""
# [rows,cols] = XYZ.shape
# # Set up constraint equations of the form AB = 0,
# # where B is a column vector of the plane coefficients
# # in the form b(1)*X + b(2)*Y +b(3)*Z + b(4) = 0.
# p = (np.ones((rows,1)))
# AB = np.hstack([XYZ,p])
# [u, d, v] = np.linalg.svd(AB,0)
# B = v[3,:]; # Solution is last column of v.
# nn = np.linalg.norm(B[0:3])
# B = B / nn
# # return B[0:3]
# return B
# Define a plane fitting function for use within this method only
def planeFit(points):
"""
p, n = planeFit(points)
Given an array, points, of shape (d,...)
representing points in d-dimensional space,
fit an d-dimensional plane to the points.
Return a point, p, on the plane (the point-cloud centroid),
and the normal, n.
"""
points = np.reshape(points, (np.shape(points)[0], -1)) # Collapse trialing dimensions
assert points.shape[0] <= points.shape[1], "There are only {} points in {} dimensions.".format(points.shape[1], points.shape[0])
ctr = points.mean(axis=1)
x = points - ctr[:,np.newaxis]
M = np.dot(x, x.T) # Could also use np.cov(x) here.
return ctr, np.linalg.svd(M)[0][:,-1]
# Define a quick mode function
def mode(array):
"""An estimate of the statistical mode of this array"""
# SUPER fast and sloppy mode estimate:
mean, median, std = sigma_clipped_stats(array)
quickModeEst = 3*median - 2*mean
# Compute an approximately 3-sigma range about this
modeRegion = quickModeEst + std*np.array([-1.5, +1.5])
# Now compute the number of bins to generate in this range
numBins = np.int(np.ceil(0.2*(np.max(modeRegion) - np.min(modeRegion))))
bins = np.linspace(modeRegion[0], modeRegion[1], numBins)
# Loop through larger and larger binning until find unique solution
foundMode = False
while not foundMode:
# Generate a histogram of the flat field
hist, flatBins = np.histogram(array.flatten(), bins=bins)
# Locate the histogram maximum
maxInds = (np.where(hist == np.max(hist)))[0]
if maxInds.size == 1:
# Grab the index of the maximum value and shrink
maxInd = maxInds[0]
foundMode = True
else:
# Shrink the NUMBER of bins to help find a unqiue maximum
numBins *= 0.9
bins = np.linspace(modeRegion[0], modeRegion[1], numBins)
# Estimate flatMode from histogram maximum
flatMode = np.mean(flatBins[maxInd:maxInd+2])
return flatMode
################################################################################
# Determine which parts of the fileIndex pertain to science images
useFiles = np.where(fileIndex['USE'] == 1)
# Cull the file index to only include files selected for use
fileIndex = fileIndex[useFiles]
# Group the fileIndex by...
# 1. FILTER
# 2. Night
# 3. Dither (pattern)
# 4. HWP Angle
# 5. ABBA value
# fileIndexByGroup = fileIndex.group_by(['FILTER', 'Night',
# 'Dither', 'HWP', 'ABBA'])
fileIndexByGroup = fileIndex.group_by(['GROUP_ID', 'HWP'])
gradientDict = {
'NGC2023':{
'H':{
'gx':[],
'gy':[],
'HWP':[]
},
'Ks':{
'gx':[],
'gy':[],
'HWP':[]
}
},
'NGC7023':{
'H':{
'gx':[],
'gy':[],
'HWP':[]
},
'Ks':{
'gx':[],
'gy':[],
'HWP':[]
}
},
'M78':{
'H':{
'gx':[],
'gy':[],
'HWP':[]
},
'Ks':{
'gx':[],
'gy':[],
'HWP':[]
}
}
}
# Loop through each grouping
for group in fileIndexByGroup.groups:
# Grab the current target information
thisGroupName = str(np.unique(group['OBJECT'].data)[0])
thisTarget = str(np.unique(group['TARGET'].data)[0])
thisGroupID = str(np.unique(group['GROUP_ID'].data)[0])
thisFilter = str(np.unique(group['FILTER'].data)[0])
thisHWP = str(np.unique(group['HWP'].data)[0])
# if thisFilter == 'H': continue
# Figure out which flat image to use
thisFlatInd = np.where(
np.logical_and(
flatFilters == thisFilter,
flatHWPs == int(thisHWP)
)
)
thisFlat = (flatImgs[thisFlatInd])[0]
# Find the 2MASS mask for this image
this2MASSmask = TMASS_masks[thisFilter][thisTarget]
numImgs = len(group)
print('\nProcessing {0} images for'.format(numImgs))
print('\tOBJECT : {0}'.format(thisGroupName))
print('\tFILTER : {0}'.format(thisFilter))
print('\tHWP : {0}'.format(thisHWP))
Ainds = np.where(group['AB'] == 'A')
Binds = np.where(group['AB'] == 'B')
Afiles = group[Ainds]['FILENAME']
BimgFiles = [os.path.join(S3_dir, f) for f in group[Binds]['FILENAME']]
BmaskFiles = [os.path.join(starMaskDir, f) for f in group[Binds]['FILENAME']]
# Catch the case where there are no B images to use (skip it!)
if len(BimgFiles) == 0: continue
# Think about what to do in the case of only one B image (skip it for now)
if len(BimgFiles) == 1: continue
# Quickly read in both B images
Bimgs = [ai.reduced.ReducedScience.read(f) for f in BimgFiles]
Btimes = np.array([img.julianDate for img in Bimgs])
B1ind = Btimes.argmin()
B2ind = Btimes.argmax()
# Read in both masks and create a combined mask
Bmasks = [ai.reduced.ReducedScience.read(f) for f in BmaskFiles]
combinedBmask = False
for Bmask in Bmasks:
combinedBmask = np.logical_or(combinedBmask, Bmask.data.astype(bool))
for Afile in Afiles:
# Build the output file
outFile = os.path.join(hwpImagesDir, Afile)
# Check if this file already exists
if os.path.isfile(outFile):
print('File {} already exists... skipping to next group'.format(os.path.basename(outFile)))
continue
# Read in this Aimg
Aimg = ai.reduced.ReducedScience.read(
os.path.join(S3_dir, Afile)
)
# Locate pixels in this frame within the 2MASS region
ny, nx = Aimg.shape
yy, xx = np.mgrid[0:ny, 0:nx]
RA, Dec = Aimg.wcs.wcs_pix2world(xx, yy, 0)
xx2, yy2 = this2MASSmask.wcs.wcs_world2pix(RA, Dec, 0)
xx2, yy2 = xx2.round().astype(int), yy2.round().astype(int)
# Check if these pixls are outside the accepable bounds and trim if necessary
goodXind = np.where(np.sum(xx2 > 1, axis=0) == ny)[0]
lf = np.min(goodXind)
goodXind = np.where(np.sum(xx2 < nx - 2, axis=0) == ny)[0]
rt = np.max(goodXind) + 1
goodYind = np.where(np.sum(yy2 > 0, axis=1) == nx)[0]
bt = np.min(goodYind)
goodYind = np.where(np.sum(yy2 < ny - 2, axis=1) == nx)[0]
tp = np.max(goodYind) + 1
yy2, xx2, = yy2[bt:tp, lf:rt], xx2[bt:tp, lf:rt]
# Locate which corresponding pixels fall in background regions
thisMask = np.zeros((ny, nx), dtype=bool)
thisMask[bt:tp, lf:rt] = this2MASSmask.data[yy2, xx2].astype(bool)
thisMask = np.logical_or(thisMask, Aimg.data < -1e5)
# Create the absolute mask of all A and B images
totalMask = np.logical_or(combinedBmask, thisMask)
maskedInds = np.where(totalMask)
unmaskedInds = np.where(np.logical_not(totalMask))
# Construct the linear time interpolation variable
c1 = (Aimg.julianDate - np.min(Btimes))/(np.max(Btimes) - np.min(Btimes))
# Construct the interpolated background image
Aimg1 = Aimg - (c1*Bimgs[B1ind] + (1-c1)*Bimgs[B2ind])
# Divide the *thermal-emission-free* image by the flat
Aimg1 = Aimg1/thisFlat
# Correct for the oversubtraction of airglow
# Find the mode of the unmasked pixels for A and B frames
Amode = mode(Aimg.data[unmaskedInds])
B1mode = mode(Bimgs[B1ind].data[unmaskedInds])
B2mode = mode(Bimgs[B2ind].data[unmaskedInds])
# Compute the difference in the on-target and off-target sky modes
BmodeAtAtime = (c1*B1mode + (1-c1)*B2mode)
Amode = mode(Aimg.data[unmaskedInds])
# Compute the difference between the apparent mode and the expected mode
AmodeDiff = Amode - BmodeAtAtime
# # Remove this 'oversubtraction' effect
# # (possible undersubtraction in same cases)
# Aimg1 = Aimg1 - (AmodeDiff*Aimg1.unit)
# Compute a grid of median values
yy, xx = np.mgrid[13+25:1014:50, 12+25:1013:50]
yyEdge, xxEdge = np.ogrid[13:1014:50, 12:1013:50]
yyCen, xxCen = np.ogrid[13+25:1014:50, 12+25:1013:50]
yyEdge, xxEdge = yyEdge.flatten(), xxEdge.flatten()
yyCen, xxCen = yyCen.flatten(), xxCen.flatten()
# Use 50-pixel wide bins starting at (xoff, yoff) = (13, 12)
yoff, xoff = 13, 12
dy, dx = 50, 50
# Loop through each grid location
maskedArr = Aimg1.data.copy()
maskedArr[maskedInds] = np.NaN
medianArr = np.zeros((20,20))
for ix, x1 in enumerate(xxCen):
for iy, y1 in enumerate(yyCen):
# Grab the patch for this zone
bt, tp = yyEdge[iy], yyEdge[iy+1]
lf, rt = xxEdge[ix], xxEdge[ix+1]
thisPatch = maskedArr[bt:tp, lf:rt]
# Check if there is enough data to do a reasonable median estimate
if np.sum(np.isfinite(thisPatch)) < (0.25*thisPatch.size):
medianArr[iy, ix] = np.NaN
else:
# Compute the median in this grid cell
medianArr[iy, ix] = np.nanmedian(thisPatch)
# Compute a plane-fit to this median filtered image
medianInds = np.where(np.isfinite(medianArr))
xyzPts = np.array([xx[medianInds], yy[medianInds], medianArr[medianInds]])
# gradientPlaneFit = fitPlaneSVD(XYZ)
# b(1)*X + b(2)*Y +b(3)*Z + b(4) = 0.
#
# gradientArr = (
# gradientPlaneFit[0]*xx +
# gradientPlaneFit[1]*yy +
# )
point, normalVec = planeFit(xyzPts)
# # Grab the airmasses
# Bairmass = [Bimg.airmass for Bimg in Bimgs]
# Store the gradient values
gradientDict[thisTarget][thisFilter]['gx'].append(-normalVec[0]/normalVec[2])
gradientDict[thisTarget][thisFilter]['gy'].append(-normalVec[1]/normalVec[2])
gradientDict[thisTarget][thisFilter]['HWP'].append(int(thisHWP))
# # Compute the value of the fited plane background
# gradientArr = (
# point[2] +
# (-normalVec[0]/normalVec[2])*(xx - point[0]) +
# (-normalVec[1]/normalVec[2])*(yy - point[1])
# )
#
# # Compute the residual array
# residArr = medianArr - gradientArr
#
# import matplotlib.pyplot as plt
# plt.ion()
# plt.figure()
# plt.imshow(medianArr, origin='lower', interpolation = 'nearest')
#
# plt.figure()
# plt.imshow(gradientArr, origin='lower', interpolation='nearest')
#
# plt.figure()
# plt.imshow(residArr, origin='lower', interpolation='nearest')
# import pdb; pdb.set_trace()
# plt.close('all')
# # Subtract the plane from the Aimg1
# ny, nx = Aimg1.shape
# yy, xx = np.mgrid[0:ny, 0:nx]
# gradientArr = (
# point[2] +
# (-normalVec[0]/normalVec[2])*(xx - point[0]) +
# (-normalVec[1]/normalVec[2])*(yy - point[1])
# )
# tmpData = Aimg1.data - gradientArr
# Aimg1.data = tmpData
#
# # Now that the "nearby star-scattered-light" has been subtracted...
# # Divide the *thermal-emission-free* image by the flat
# Aimg1 = Aimg1/thisFlat
#
# # Recompute the median of this subtracted array
# # Compute a grid of median values
# yy, xx = np.mgrid[13+25:1014:50, 12+25:1013:50]
# yyEdge, xxEdge = np.ogrid[13:1014:50, 12:1013:50]
# yyCen, xxCen = np.ogrid[13+25:1014:50, 12+25:1013:50]
#
# yyEdge, xxEdge = yyEdge.flatten(), xxEdge.flatten()
# yyCen, xxCen = yyCen.flatten(), xxCen.flatten()
#
# # Use 50-pixel wide bins starting at (xoff, yoff) = (13, 12)
# yoff, xoff = 13, 12
# dy, dx = 50, 50
#
# # Loop through each grid location
# maskedArr = Aimg1.data.copy()
# maskedArr[maskedInds] = np.NaN
# medianArr = np.zeros((20,20))
# for ix, x1 in enumerate(xxCen):
# for iy, y1 in enumerate(yyCen):
# # Grab the patch for this zone
# bt, tp = yyEdge[iy], yyEdge[iy+1]
# lf, rt = xxEdge[ix], xxEdge[ix+1]
# thisPatch = maskedArr[bt:tp, lf:rt]
#
# # Check if there is enough data to do a reasonable median estimate
# if np.sum(np.isfinite(thisPatch)) < (0.25*thisPatch.size):
# medianArr[iy, ix] = np.NaN
# else:
# # Compute the median in this grid cell
# medianArr[iy, ix] = np.nanmedian(thisPatch)
#
#
# plt.figure()
# plt.imshow(medianArr, origin='lower', interpolation = 'nearest')
import pdb; pdb.set_trace()
print('Done!')
```
#### File: Mimir_pyPol/oldCode/01_buildIndex.py
```python
import os
import sys
import time
import pdb
import numpy as np
import matplotlib.pyplot as plt
from astropy.table import Table, Column
import astropy.coordinates as coord
import astropy.units as u
from astropy.io import fits
from scipy import stats
# Add the AstroImage class
sys.path.append("C:\\Users\\Jordan\\Libraries\\python\\AstroImage")
from AstroImage import AstroImage
################################################################################
# Define a recursive file search which takes a parent directory and returns all
# the FILES (not DIRECTORIES) beneath that node.
def recursive_file_search(parentDir, exten='', fileList=[]):
# Query the elements in the directory
subNodes = os.listdir(parentDir)
# Loop through the nodes...
for node in subNodes:
# If this node is a directory,
thisPath = os.path.join(parentDir, node)
if os.path.isdir(thisPath):
# then drop down recurse the function
recursive_file_search(thisPath, exten, fileList)
else:
# otherwise test the extension,
# and append the node to the fileList
if len(exten) > 0:
# If an extension was defined,
# then test if this file is the right extension
exten1 = (exten[::-1]).upper()
if (thisPath[::-1][0:len(exten1)]).upper() == exten1:
fileList.append(thisPath)
else:
fileList.append(thisPath)
# Return the final list to the user
return fileList
################################################################################
#Setup the path delimeter for this operating system
delim = os.path.sep
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# This is the location of all PPOL reduction directory
PPOL_dir = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\PPOL_reduced'
# Build the path to the S3_Asotremtry files
S3dir = os.path.join(PPOL_dir, 'S3_Astrometry')
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_data'
# The Montgomery-Clemens reflection nebula project used the pattern
# A (on-target), B (off-target), B (off-target), A (on-target)
# To indicate a group with the opposite pattern
# A (off-target), B (on-target), B (on-target), A (off-target)
# simply include the name of the OBJECT header keyword
ABBAswap = []
# Construct a list of all the reduced BDP files (recursively suearching)
fileList = recursive_file_search(S3dir, exten='.fits')
#Sort the fileList
fileNums = [''.join((file.split(delim).pop().split('.'))[0:2]) for file in fileList]
fileNums = [file.split('_')[0] for file in fileNums]
sortInds = np.argsort(np.array(fileNums, dtype = np.int64))
fileList = [fileList[ind] for ind in sortInds]
#==============================================================================
# ***************************** INDEX *****************************************
# Build an index of the file type and binning, and write it to disk
#==============================================================================
# Check if a file index already exists... if it does then just read it in
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
# Record a list of HWP angles to be checked
HWPlist = np.array([34, 4556, 2261, 6784, 9011, 13534, 11306, 15761,
18056, 22511, 20284, 24806, 27034, 31556, 29261, 33784])
HWPlist.sort()
# Loop through each night and test for image type
print('\nCategorizing files by groups.\n')
startTime = time.time()
# Begin by initalizing some arrays to store the image classifications
telRA = []
telDec = []
name = []
waveBand = []
HWPang = []
binType = []
expTime = []
night = []
fileCounter = 0
percentage = 0
#Loop through each file in the fileList variable
for file in fileList:
# Read in the image
tmpImg = AstroImage(file)
# Grab the RA and Dec from the header
# Parse the pointing for this file
tmpRA = coord.Angle(tmpImg.header['TELRA'], unit=u.hour)
tmpDec = coord.Angle(tmpImg.header['TELDEC'], unit=u.degree)
telRA.append(tmpRA.degree)
telDec.append(tmpDec.degree)
# Classify each file type and binning
tmpName = tmpImg.header['OBJECT']
if len(tmpName) < 1:
tmpName = 'blank'
name.append(tmpName)
# Parse the HWP number
tmpHWP = round(100*tmpImg.header['HWP_ANG'])
HWPdiff = np.abs(HWPlist - tmpHWP)
tmpHWP = (np.where(HWPdiff == np.min(HWPdiff)))[0][0] + 1
HWPang.append(tmpHWP)
# Parse the waveband
waveBand.append(tmpImg.header['FILTNME2'])
# Test the binning of this file
binTest = tmpImg.header['CRDELT*']
if binTest[0] == binTest[1]:
binType.append(int(binTest[0]))
# Grab the night of this observation
tmpNight = (tmpImg.header['DATE-OBS'])[0:10]
tmpNight = tmpNight.translate({ord(i):None for i in '-'})
night.append(tmpNight)
# Grab the exposure time of this observation
tmpExpTime = tmpImg.header['EXPTIME']
expTime.append(tmpExpTime)
# Count the files completed and print update progress message
fileCounter += 1
percentage1 = np.floor(fileCounter/len(fileList)*100)
if percentage1 != percentage:
print('completed {0:3g}%'.format(percentage1), end='\r')
percentage = percentage1
endTime = time.time()
numFiles = len(fileList)
print(('\n{0} File processing completed in {1:g} seconds'.
format(numFiles, (endTime -startTime))))
# Write the file index to disk
fileIndex = Table([fileList, telRA, telDec, name, waveBand, HWPang, binType,
expTime, night], names = ['Filename', 'RA', 'Dec', 'Name', 'Waveband',
'HWP', 'Binning', 'Exp Time', 'Night'])
fileIndex.add_column(Column(name='Use',
data=np.ones((numFiles)),
dtype=np.int),
index=0)
# Group by "Name"
groupFileIndex = fileIndex.group_by('Name')
# Grab the file-number orderd indices for the groupFileIndex
fileIndices = np.argsort(groupFileIndex['Filename'])
# Loop through each "Name" and assign it a "Target" value
targetList = []
ditherList = []
PPOLnameList = []
for group in groupFileIndex.groups:
# Select this groups properties
thisName = np.unique(group['Name'].data)
thisWaveband = np.unique(group['Waveband'].data)
thisExpTime = np.unique(group['Exp Time'].data)
# Test if the group name truely is unique
if len(thisName) == 1:
thisName = str(thisName[0])
else:
print('There is more than one name in this group!')
pdb.set_trace()
# Test if the waveband is truely uniqe name truely is unique
if len(thisWaveband) == 1:
thisWaveband = str(thisWaveband[0])
else:
print('There is more than one waveband in this group!')
pdb.set_trace()
# Test if the exposure time is truely uniqe name truely is unique
if len(thisExpTime) == 1:
thisExpTime = str(thisExpTime[0])
else:
print('There is more than one exposure time in this group!')
pdb.set_trace()
# Grab the file numbers for this group
thisGroupFileNums = []
for thisFile in group['Filename']:
# Parse the file number for this file
thisFileNum = os.path.basename(thisFile)
thisFileNum = thisFileNum.split('_')[0]
thisFileNum = int(''.join(thisFileNum.split('.')))
thisGroupFileNums.append(thisFileNum)
# Sort the file numbers from greatest to least
thisGroupFileNums = sorted(thisGroupFileNums)
# Grab the first and last image numbers
firstImg = str(min(thisGroupFileNums))
firstImg = firstImg[0:8] + '.' + firstImg[8:]
lastImg = str(max(thisGroupFileNums))
lastImg = lastImg[0:8] + '.' + lastImg[8:]
# Count the number of elements in this group
groupLen = len(group)
# Print diagnostic information
print('\nProcessing {0} images for'.format(groupLen))
print('\tGroup : {0}'.format(thisName))
print('\tWaveband : {0}'.format(thisWaveband))
print('\tExptime : {0}'.format(thisExpTime))
print('\tFirst Img : {0}'.format(firstImg))
print('\tLast Img : {0}'.format(lastImg))
print('')
# Add the "PPOLname"
# thisPPOLname = input('\nEnter the PPOL name for group "{0}": '.format(thisName))
#
# Add the "Target" column to the fileIndex
# thisTarget = input('\nEnter the target for group "{0}": '.format(thisName))
#
# Ask the user to supply the dither pattern for this group
# thisDitherEntered = False
# while not thisDitherEntered:
# # Have the user select option 1 or 2
# print('\nEnter the dither patttern for group "{0}": '.format(thisName))
# thisDither = input('[1: ABBA, 2: HEX]: ')
#
# # Test if the numbers 1 or 2 were entered
# try:
# thisDither = np.int(thisDither)
# if (thisDither == 1) or (thisDither == 2):
# # If so, then reassign as a string
# thisDither = ['ABBA', 'HEX'][(thisDither-1)]
# thisDitherEntered = True
# except:
# print('Response not recognized')
# Use the following code to skip over manual entry (comment out lines above)
thisPPOLname = thisName
thisTarget = (thisName.split('_'))[0]
thisDither = "ABBA"
# Add these elements to the target list
PPOLnameList.extend([thisPPOLname]*groupLen)
targetList.extend([thisTarget]*groupLen)
ditherList.extend([thisDither]*groupLen)
# Add the "PPOL name" "Target" and "Dither columns"
groupFileIndex.add_column(Column(name='Target',
data = np.array(targetList)),
index = 2)
groupFileIndex.add_column(Column(name='PPOL Name',
data = np.array(PPOLnameList)),
index = 3)
groupFileIndex.add_column(Column(name='Dither',
data = np.array(ditherList)),
index = 7)
# Re-sort by file-number
fileSortInds = np.argsort(groupFileIndex['Filename'])
fileIndex1 = groupFileIndex[fileSortInds]
#==============================================================================
# ************************** ABBA PARSER **************************************
# Loop through all the groups in the index and parse the ABBA dithers
#==============================================================================
ABBAlist = np.repeat('X', len(fileIndex1))
fileIndexByName = fileIndex1.group_by(['PPOL Name'])
ABBAlist = []
for key, group in zip(fileIndexByName.groups.keys, fileIndexByName.groups):
print('\nParsing ABBA values for PPOL group ', key['PPOL Name'])
# For each of each group file, we will need two pieces of information
# 1) The FILENUMBER (essentially the date plus the nightly file number)
# 2) The HWP_ANGLE (the rotation of the HWP)
# Using this information, we can parse which files are A vs. B
# For later reference, let's grab the group name
thisName = np.unique(group['Name'].data)
# Grab the file numbers for this group
thisGroupFileNums = [''.join((file.split(delim).pop().split('.'))[0:2])
for file in group['Filename'].data]
thisGroupFileNums = [int(file.split('_')[0]) for file in thisGroupFileNums]
thisGroupFileNums = np.array(thisGroupFileNums)
# Grab the HWP, RA, and Decs for this group
thisGroupHWPs = group['HWP'].data
thisGroupRAs = group['RA'].data
thisGroupDecs = group['Dec'].data
# Compute the incremental step for each image in the sequence
numIncr = thisGroupFileNums - np.roll(thisGroupFileNums, 1)
numIncr[0] = 1
# Check if the mean increment is about 2, and skip if it is...
meanIncr = (10.0*np.mean(numIncr))/10.0
skipGroup = False
if (meanIncr >= 1.85) and (meanIncr <= 2.15):
print(key['Name'] + 'appears to already have been parsed')
skipGroup = True
if skipGroup: continue
# Find where the HWP changes
# This is not quite the right algorithm anymore...
HWPshifts = (thisGroupHWPs != np.roll(thisGroupHWPs, 1))
HWPshifts[0] = False
if np.sum(HWPshifts) < 0.5*len(group):
#****************************************************
# This group has the 16(ABBA) dither type.
#****************************************************
print('Dither type 16(ABBA)')
# Find places where the HWP change corresponds to a numIncr of 1
ABBArestart = np.logical_and(HWPshifts, (numIncr == 1))
if np.sum(ABBArestart) > 0:
# Check for the index of these ABBA restart points
ABBArestartInd = np.where(ABBArestart)
# Find the index of the first restart
firstRestartInd = np.min(ABBArestartInd)
# Find the amount of shift needed to coincide ABBAinds with ABBArestart
numSkips = round(np.sum(numIncr[0:firstRestartInd])) - firstRestartInd
ABBAshift = (64 - (firstRestartInd + numSkips)) % 4
ABBAinds = (thisGroupFileNums - thisGroupFileNums[0] + ABBAshift) % 4
else:
print('The HWP shifts are not well mapped, so a solution is not possible.')
pdb.set_trace()
# Setup the dither pattern array
if thisName in ABBAswap:
# Setup the reverse ABBA array
print('Using reverse ABBA values for this group')
ABBAarr = np.array(['B','A','A','B'])
else:
# Setup the normal ABBA array
ABBAarr = np.array(['A','B','B','A'])
# Grab the ABBA values for each file
thisGroupABBAs = ABBAarr[ABBAinds]
# Parse the indices for A images and B images
Ainds = np.where(thisGroupABBAs == 'A')
Binds = np.where(thisGroupABBAs == 'B')
else:
#****************************************************
# This group has the (16A, 16B, 16B, 16A) dither type.
#****************************************************
print('Dither type (16A, 16B, 16B, 16A)')
# Setup the group dither pattern array (16*A, 16*B, 16*B, 16*A)
As = np.repeat('A', 16)
Bs = np.repeat('B', 16)
if thisName in ABBAswap:
# Setup the reverse ABBA array
print('Using reverse ABBA values for this group')
ABBAarr = np.array([Bs, As, As, Bs]).flatten()
else:
# Setup the normal ABBA array
ABBAarr = np.array([As, Bs, Bs, As]).flatten()
# Figure out if any of the first images were dropped
HWPorder = np.array([1,3,2,4,5,7,6,8,9,11,10,12,13,15,14,16])
firstHWP = (np.where(HWPorder == thisGroupHWPs[0]))[0][0]
# Determine which ABBAinds to use
HWPdiff = np.abs(HWPlist - thisGroupHWPs[0])
firstHWPind = np.where(HWPdiff == np.min(HWPdiff))
ABBAinds = thisGroupFileNums - thisGroupFileNums[0] + firstHWP
# Grab the ABBA values for each file
thisGroupABBAs = ABBAarr[ABBAinds]
# Parse the indices for A images and B images
Ainds = np.where(thisGroupABBAs == 'A')
Binds = np.where(thisGroupABBAs == 'B')
# Double check that the pointing for each group is correct.
outliersPresent = True
while outliersPresent:
# Compute the median pointings for A and B dithers
A_medRA = np.median(thisGroupRAs[Ainds])
A_medDec = np.median(thisGroupDecs[Ainds])
B_medRA = np.median(thisGroupRAs[Binds])
B_medDec = np.median(thisGroupDecs[Binds])
# Compute the (RA, Dec) offsets from the median pointings
A_delRA = thisGroupRAs[Ainds] - A_medRA
A_delDec = thisGroupDecs[Ainds] - A_medDec
B_delRA = thisGroupRAs[Binds] - B_medRA
B_delDec = thisGroupDecs[Binds] - B_medDec
# Search for outliers in either RA **OR** Dec
# (more than 1 arcmin off median pointing).
A_RA_out = np.abs(A_delRA) > 1.0/60.0
A_Dec_out = np.abs(A_delDec) > 1.0/60.0
B_RA_out = np.abs(B_delRA) > 1.0/60.0
B_Dec_out = np.abs(B_delDec) > 1.0/60.0
# Set a flag to determine if there are still any outliers
outliersPresent = (np.sum(np.logical_or(A_RA_out, A_Dec_out)) +
np.sum(np.logical_or(B_RA_out, B_Dec_out)) > 0)
# If there **DO** still seem to be outliers present,
# then swap offending images between groups.
if outliersPresent:
print('Repairing pointing outliers')
pdb.set_trace()
# First identify offending images from each group
A_out = np.logical_or(A_RA_out, A_Dec_out)
B_out = np.logical_or(B_RA_out, B_Dec_out)
# Now identify which of the Aind and Binds need to be swapped
if np.sum(A_out) > 0:
AswapInds = Ainds[np.where(A_out)]
AkeepInds = Ainds[np.where(np.logical_not(A_out))]
if np.sum(B_out) > 0:
BswapInds = Binds[np.where(B_out)]
BkeepInds = Binds[np.where(np.logical_not(B_out))]
# Reconstruct the Ainds and Binds arrays
Ainds = np.concatenate([AkeepInds, BswapInds])
Binds = np.concatenate([BkeepInds, AswapInds])
# Sort the newly constructed Ainds and Binds arrays
AsortArr = SORT(Ainds)
Ainds = Ainds[AsortArr]
BsortArr = SORT(Binds)
Binds = Binds[BsortArr]
# Count the number of images in each group
AimgCount = N_ELEMENTS(Ainds)
BimgCount = N_ELEMENTS(Binds)
# *************************************
# Now that we have checked for errors,
# add these ABBA values to the ABBAlist
# *************************************
ABBAlist.extend(thisGroupABBAs)
# Now that we have the indices for A and B images for this group,
# we need to add them to the column to be added to the file index
fileIndexByName.add_column(Column(name='ABBA',
data=np.array(ABBAlist)),
index = 8)
# Re-sort by file-number
fileSortInds = np.argsort(fileIndexByName['Filename'])
fileIndex1 = fileIndexByName[fileSortInds]
#==============================================================================
# ********************* Write the file to disk ********************************
# Now that all the information for this dataset has been parsed,
# write the full index to disk.
#==============================================================================
print('')
print('***************************')
print('Writing final index to disk')
print('***************************')
fileIndex1.write(indexFile, format='csv')
```
#### File: Mimir_pyPol/oldCode/03a_build_HWP_bkgImages.py
```python
import os
import sys
import numpy as np
from astropy.io import ascii
from astropy.table import Table as Table
from astropy.table import Column as Column
from astropy.convolution import convolve, convolve_fft, Gaussian2DKernel
from astropy.stats import gaussian_fwhm_to_sigma, sigma_clipped_stats
from photutils import detect_threshold, detect_sources
from scipy.ndimage.filters import median_filter, gaussian_filter
# Add the AstroImage class
import astroimage as ai
# Add the header handler to the BaseImage class
from Mimir_header_handler import Mimir_header_handler
ai.reduced.ReducedScience.set_header_handler(Mimir_header_handler)
ai.set_instrument('mimir')
# This is the location of all PPOL reduction directory
PPOL_dir = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\PPOL_reduced\\201611'
# Build the path to the S3_Asotrometry files
S3_dir = os.path.join(PPOL_dir, 'S3_Astrometry')
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\Mimir_data\\pyPol_Reduced\\201611\\'
# Build the path to the supersky directory
bkgImagesDir = os.path.join(pyPol_data, 'bkgImages')
if (not os.path.isdir(bkgImagesDir)):
os.mkdir(bkgImagesDir, 0o755)
# Read in the indexFile data and select the filenames
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='csv')
# Read in the kokopelli mask
from astropy.io import fits
kokopelliHDUlist = fits.open('kokopelliMask.fits')
kokopelliMask = (kokopelliHDUlist[0].data > 0)
################################################################################
# Define a function to locate even dim stars in the image
################################################################################
from scipy.signal import medfilt
def find_dim_stars(array):
# Perform a (3x3) median filter
medArr3 = medfilt(array, 3)
medArr9 = medfilt(array, 9)
# Compute array statistics
mean, median, stddev = sigma_clipped_stats(medArr3)
# Locate pixels with more that 3-sigma deviation from the local median
starPix = (medArr3 - medArr9)/stddev > 2
# Clean up the edge-effects (and kokopelli)
starPix[0:20, :] = False
starPix[-21:-1, :] = False
starPix[:, 0:20] = False
starPix[:, -21:-1] = False
starPix[kokopelliMask] = False
# Dialate the pixel mask
sigma = 4.0 * gaussian_fwhm_to_sigma # FWHM = 3.0
# Build a kernel for detecting pixels above the threshold
kernel = Gaussian2DKernel(sigma, x_size=9, y_size=9)
kernel.normalize()
starPix1 = convolve_fft(
starPix.astype(float),
kernel.array
)
starPix1 = (starPix1 > 0.01)
# Clean up the edge-effects
starPix1[0:20, :] = False
starPix1[-21:-1, :] = False
starPix1[:, 0:20] = False
starPix1[:, -21:-1] = False
# Expand a second time to be conservative
starPix11 = convolve_fft(
starPix1.astype(float),
kernel.array
)
return starPix11 > 0.01
################################################################################
# Determine which parts of the fileIndex pertain to science images
useFiles = np.where(fileIndex['USE'] == 1)
# Cull the file index to only include files selected for use
fileIndex = fileIndex[useFiles]
# Group the fileIndex by...
# 1. FILTER
# 2. Night
# 3. Dither (pattern)
# 4. HWP Angle
# 5. ABBA value
# fileIndexByGroup = fileIndex.group_by(['FILTER', 'Night',
# 'Dither', 'HWP', 'ABBA'])
fileIndexByGroup = fileIndex.group_by(['GROUP_ID', 'HWP', 'AB'])
# Loop through each grouping
for group in fileIndexByGroup.groups:
# Check if we're dealing with the A or B positions
thisABBA = str(np.unique(group['AB'].data)[0])
# Skip over the A images
if thisABBA == 'A': continue
# Grab the current target information
thisGroupName = str(np.unique(group['OBJECT'].data)[0])
thisGroupID = str(np.unique(group['GROUP_ID'].data)[0])
thisFilter = str(np.unique(group['FILTER'].data)[0])
thisHWP = str(np.unique(group['HWP'].data)[0])
# Test if this target-waveband-HWPang combo was previously processed
outFile = os.path.join(
bkgImagesDir,
'{}_G{}_HWP{}.fits'.format(thisGroupName, thisGroupID, thisHWP)
)
if os.path.isfile(outFile):
print('File ' + os.path.basename(outFile) +
' already exists... skipping to next group')
continue
numImgs = len(group)
print('\nProcessing {0} images for'.format(numImgs))
print('\tOBJECT : {0}'.format(thisGroupName))
print('\tFILTER : {0}'.format(thisFilter))
print('\tHWP : {0}'.format(thisHWP))
# Read in all the relevant images and backgrounds for constructing this HWP image
thisFileList = [os.path.join(S3_dir, f) for f in group['FILENAME']]
imgList = [ai.reduced.ReducedScience.read(file1) for file1 in thisFileList]
bkgList = [b for b in group['BACKGROUND']]
# Finn in all the stars (including the dim ones) with NaNs
cleanImgList = []
for img, bkg in zip(imgList, bkgList):
# Locate the pixels inside the very dim (small stars)
starPix = find_dim_stars(img.data)
# Locate the pixels with counts below -1e5
badPix = img.data < -1e5
# Build the combined mask
maskPix = np.logical_or(starPix, badPix)
# Divide by background level and fill the star pixels with nans
cleanImg = img.copy()
cleanArray = img.data.copy()
cleanArray /= bkg
cleanArray[maskPix] = np.nan
cleanImg.data = cleanArray
# Place the nan-filled array in the cleanImgLIst
cleanImgList.append(cleanImg)
# Test if this should be continued
if numImgs == 0:
print("Well that's odd... it shouldn't be possible to have zero images.")
import pdb; pdb.set_trace()
continue
if numImgs == 1:
print("Only one image found. Masking stars and inpainting")
# Inpaint the "star pixels"
superskyInpainter = ai.utilitywrappers.Inpainter(cleanImgList[0])
superskyImage = superskyInpainter.inpaint_nans()
elif numImgs >= 2:
# Construct an image stack of the off-target images
imageStack = ai.utilitywrappers.ImageStack(cleanImgList)
# Build a supersky image from these off-target images
superskyImage = imageStack.produce_supersky()
# Identify the "bad pixels" and inpaint them
badPix = superskyImage.data < 0.50
superskyInpainter = ai.utilitywrappers.Inpainter(superskyImage)
superskyImage2 = superskyInpainter.inpaint_nans(badPix)
# Should I compute and force one more normalization by the median?
# For now, yes...
_, median, _ = sigma_clipped_stats(superskyImage2.data)
superskyImage2 = superskyImage2/median
# Write the repaired image to disk
superskyImage2.write(outFile, dtype=np.float32)
print('Done!')
``` |
{
"source": "jmontgom10/pyPol",
"score": 3
} |
#### File: jmontgom10/pyPol/04_buildMasks.py
```python
import os
import sys
import numpy as np
from matplotlib import pyplot as plt
from astropy.table import Table, Column
from astropy.visualization import ZScaleInterval
# TODO: build a "MaskBuilder" class to manage all these variables and actions.
# Define the mask directory as a global variable
global maskDir
# Add the AstroImage class
import astroimage as ai
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# This is the location of all pyBDP data (index, calibration images, reduced...)
pyBDP_data = 'C:\\Users\\Jordan\\FITS_data\\PRISM_data\\pyBDP_data\\201612'
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\PRISM_data\\pyPol_data\\201612'
# The user can speed up the process by defining the "Target" values from
# the fileIndex to be considered for masking.
# Masks can onlybe produced for targets in this list.
targets = ['NGC2023', 'NGC7023']
# This is the location of the pyBDP processed Data
pyBDP_reducedDir = os.path.join(pyBDP_data, 'pyBDP_reduced_images')
# Setup new directory for polarimetry data
maskDir = os.path.join(pyPol_data, 'Masks')
if (not os.path.isdir(maskDir)):
os.mkdir(maskDir, 0o755)
# Read in the indexFile data and select the filenames
print('\nReading file index from disk')
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='ascii.csv')
# Determine which parts of the fileIndex pertain to on-target science images
useFiles = np.logical_and(
fileIndex['USE'] == 1,
fileIndex['DITHER_TYPE'] == 'ABBA'
)
useFiles = np.logical_and(
useFiles,
fileIndex['AB'] == 'A'
)
# Further restrict the selection to only include the selected targets
targetFiles = np.zeros((len(fileIndex),), dtype=bool)
for target in targets:
targetFiles = np.logical_or(
targetFiles,
fileIndex['TARGET'] == target
)
# Cull the fileIndex to ONLY include the specified targets
goodTargetRows = np.logical_and(useFiles, targetFiles)
targetRowInds = np.where(goodTargetRows)
fileIndex = fileIndex[targetRowInds]
#******************************************************************************
# Define the event handlers for clicking and keying on the image display
#******************************************************************************
def on_click(event):
global xList, yList, xx, yy
global fig, brushSize, axarr, maskImg, thisAxImg
x, y = event.xdata, event.ydata
# xList.append(x)
# yList.append(y)
# Compute distances from the click and update mask array
dist = np.sqrt((xx - x)**2 + (yy - y)**2)
maskInds = np.where(dist < brushSize*5)
if event.button == 1:
tmpData = maskImg.data
tmpData[maskInds] = 1
maskImg.data = tmpData
if (event.button == 2) or (event.button == 3):
tmpData = maskImg.data
tmpData[maskInds] = 0
maskImg.data = tmpData
# Update contour plot (clear old lines redo contouring)
axarr[1].collections = []
axarr[1].contour(xx, yy, maskImg.data, levels=[0.5], colors='white', alpha = 0.2)
# Update the display
fig.canvas.draw()
def on_key(event):
global fileList, targetList, fig, imgNum, brushSize
global maskDir, maskImg
global prevImg, thisImg, nextImg
global prevAxImg, thisAxImg, nextAxImg
global prevTarget, thisTarget, nextTarget
global prevMin, thisMin, nextMin
global prevMax, thisMax, nextMax
global prevLabel, thisLabel, nextLabel
# Handle brush sizing
if event.key == '1':
brushSize = 1
elif event.key == '2':
brushSize = 2
elif event.key == '3':
brushSize = 3
elif event.key == '4':
brushSize = 4
elif event.key == '5':
brushSize = 5
elif event.key == '6':
brushSize = 6
# Increment the image number
if event.key == 'right' or event.key == 'left':
if event.key == 'right':
#Advance to the next image
imgNum += 1
# Read in the new files
prevImg = thisImg
thisImg = nextImg
nextImg = ai.ReducedScience.read(fileList[(imgNum + 1) % len(fileList)])
# Update target info
prevTarget = thisTarget
thisTarget = nextTarget
nextTarget = targetList[(imgNum + 1) % len(fileList)]
# Build the image scaling intervals
zScaleGetter = ZScaleInterval()
# Compute new image display minima
prevMin = thisMin
thisMin = nextMin
nextMin, _ = zScaleGetter.get_limits(nextImg.data)
# Compute new image display maxima
prevMax = thisMax
thisMax = nextMax
_, nextMax = zScaleGetter.get_limits(nextImg.data)
if event.key == 'left':
#Move back to the previous image
imgNum -= 1
# Read in the new files
nextImg = thisImg
thisImg = prevImg
prevImg = ai.ReducedScience.read(fileList[(imgNum - 1) % len(fileList)])
# Update target info
nextTarget = thisTarget
thisTarget = prevTarget
prevTarget = targetList[(imgNum - 1) % len(fileList)]
# Build the image scaling intervals
zScaleGetter = ZScaleInterval()
# Compute new image display minima
nextMin = thisMin
thisMin = prevMin
prevMin, _ = zScaleGetter.get_limits(prevImg.data)
# Compute new image display maxima
nextMax = thisMax
thisMax = prevMax
_, prevMax = zScaleGetter.get_limits(prevImg.data)
#*******************************
# Update the displayed mask
#*******************************
# Check which mask files might be usable...
prevMaskFile = os.path.join(maskDir,
os.path.basename(prevImg.filename))
thisMaskFile = os.path.join(maskDir,
os.path.basename(thisImg.filename))
nextMaskFile = os.path.join(maskDir,
os.path.basename(nextImg.filename))
if os.path.isfile(thisMaskFile):
# If the mask for this file exists, use it
print('using this mask: ',os.path.basename(thisMaskFile))
maskImg = ai.ReducedScience.read(thisMaskFile)
elif os.path.isfile(prevMaskFile) and (prevTarget == thisTarget):
# Otherwise check for the mask for the previous file
print('using previous mask: ',os.path.basename(prevMaskFile))
maskImg = ai.ReducedScience.read(prevMaskFile)
elif os.path.isfile(nextMaskFile) and (nextTarget == thisTarget):
# Then check for the mask of the next file
print('using next mask: ',os.path.basename(nextMaskFile))
maskImg = ai.ReducedScience.read(nextMaskFile)
else:
# If none of those files exist, build a blank slate
# Build a mask template (0 = not masked, 1 = masked)
maskImg = thisImg.copy()
maskImg.filename = thisMaskFile
maskImg = maskImg.astype(np.int16)
# Make sure the uncertainty array is removed from the image
try:
del maskImg.uncertainty
except:
pass
# Update contour plot (clear old lines redo contouring)
axarr[1].collections = []
axarr[1].contour(xx, yy, maskImg.data, levels=[0.5], colors='white', alpha = 0.2)
# Reassign image display limits
prevAxImg.set_clim(vmin = prevMin, vmax = prevMax)
thisAxImg.set_clim(vmin = thisMin, vmax = thisMax)
nextAxImg.set_clim(vmin = nextMin, vmax = nextMax)
# Display the new images
prevAxImg.set_data(prevImg.data)
thisAxImg.set_data(thisImg.data)
nextAxImg.set_data(nextImg.data)
# Update the annotation
axList = fig.get_axes()
axList[1].set_title(os.path.basename(thisImg.filename))
prevStr = (str(prevImg.header['OBJECT']) + '\n' +
str(prevImg.header['FILTNME3'] + '\n' +
str(prevImg.header['POLPOS'])))
thisStr = (str(thisImg.header['OBJECT']) + '\n' +
str(thisImg.header['FILTNME3'] + '\n' +
str(thisImg.header['POLPOS'])))
nextStr = (str(nextImg.header['OBJECT']) + '\n' +
str(nextImg.header['FILTNME3'] + '\n' +
str(nextImg.header['POLPOS'])))
prevLabel.set_text(prevStr)
thisLabel.set_text(thisStr)
nextLabel.set_text(nextStr)
# Update the display
fig.canvas.draw()
# Save the generated mask
if event.key == 'enter':
# Make sure the header has the right values
maskImg.header = thisImg.header
# TODO: make sure the mask ONLY has what it needs
# i.e., remove uncertainty and convert to np.ubyte type.
# Write the mask to disk
maskBasename = os.path.basename(thisImg.filename)
maskFullname = os.path.join(maskDir, maskBasename)
print('Writing mask for file {}'.format(maskBasename))
maskImg.write(maskFullname, clobber=True)
# Clear out the mask values
if event.key == 'backspace':
# Clear out the mask array
maskImg.data = maskImg.data * np.byte(0)
# Update contour plot (clear old lines redo contouring)
axarr[1].collections = []
axarr[1].contour(xx, yy, maskImg.data, levels=[0.5], colors='white', alpha = 0.2)
# Update the display
fig.canvas.draw()
#******************************************************************************
#******************************************************************************
# This is the main script that will load in file names and prepare for plotting
#******************************************************************************
# Declare global variables
#global xList, yList
global xx, yy
global fileList, targetList, fig, imgNum, maskImg
global prevImg, thisImg, nextImg
global prevTarget, thisTarget, nextTarget
global prevAxImg, thisAxImg, nextAxImg
global prevMin, thisMin, nextMin
global prevMax, thisMax, nextMax
global prevLabel, thisLabel, nextLabel
xList = []
yList = []
imgNum = 0 # This number will be the FIRST image to be displayed center...
brushSize = 3 # (5xbrushSize pix) is the size of the region masked
#******************************************************************************
# This script will run the mask building step of the pyPol reduction
#******************************************************************************
# Group the fileIndex by...
# 1. Target
# 2. Waveband
# 3. Dither (pattern)
# 4. Polaroid Angle
fileIndexByTarget = fileIndex.group_by(
['TARGET', 'FILTER', 'POLPOS']
)
# Add the information to the fileList and targetList variables
fileList = fileIndexByTarget['FILENAME'].data.tolist()
targetList = fileIndexByTarget['TARGET'].data.tolist()
#*************************************
# Now prepare to plot the first images
#*************************************
# Read in an image for masking
prevImg = ai.ReducedScience.read(fileList[imgNum - 1])
thisImg = ai.ReducedScience.read(fileList[imgNum])
nextImg = ai.ReducedScience.read(fileList[imgNum + 1])
# Log the targets of the curent panes
prevTarget = targetList[imgNum - 1]
thisTarget = targetList[imgNum]
nextTarget = targetList[imgNum + 1]
###
# For some reason the prevTarget, thisTarget, and nextTaret
# variables are not accessible from the event managers the way that
# prevImg, thisImg, and nextImg are.
# I have definitely declared them to be global variables...
# Perhaps they're getting treated as local variables
# because they are modified elsewhere???
# Test if a mask has already been generated for this images
maskFile = os.path.join(maskDir, os.path.basename(thisImg.filename))
if os.path.isfile(maskFile):
# If the mask file exists, use it
maskImg = ai.ReducedScience.read(maskFile)
else:
# If the mask file does not exist, build a blank slate
# Build a mask template (0 = not masked, 1 = masked)
maskImg = thisImg.copy()
maskImg.filename = maskFile
maskImg = maskImg.astype(np.int16)
# Generate 2D X and Y position maps
maskShape = maskImg.shape
grids = np.mgrid[0:maskShape[0], 0:maskShape[1]]
xx = grids[1]
yy = grids[0]
# Build the image displays
# Start by preparing a 1x3 plotting area
fig, axarr = plt.subplots(1, 3, sharey=True)
# Build the image scaling intervals
zScaleGetter = ZScaleInterval()
# Compute image count scaling
prevMin, prevMax = zScaleGetter.get_limits(prevImg.data)
thisMin, thisMax = zScaleGetter.get_limits(thisImg.data)
nextMin, nextMax = zScaleGetter.get_limits(nextImg.data)
# prevMin = np.median(prevImg.data) - 0.25*np.std(prevImg.data)
# prevMax = np.median(prevImg.data) + 2*np.std(prevImg.data)
# thisMin = np.median(thisImg.data) - 0.25*np.std(thisImg.data)
# thisMax = np.median(thisImg.data) + 2*np.std(thisImg.data)
# nextMin = np.median(nextImg.data) - 0.25*np.std(nextImg.data)
# nextMax = np.median(nextImg.data) + 2*np.std(nextImg.data)
# Populate each axis with its image
prevAxImg = prevImg.show(axes = axarr[0], cmap='viridis',
vmin = prevMin, vmax = prevMax, noShow = True)
thisAxImg = thisImg.show(axes = axarr[1], cmap='viridis',
vmin = thisMin, vmax = thisMax, noShow = True)
nextAxImg = nextImg.show(axes = axarr[2], cmap='viridis',
vmin = nextMin, vmax = nextMax, noShow = True)
# Add a contour of the mask array
maskContour = axarr[1].contour(xx, yy, maskImg.data,
levels=[0.5], origin='lower', colors='white', alpha = 0.2)
# Rescale the figure and setup the spacing between images
#fig.set_figheight(5.575, forward=True)
#fig.set_figwidth(17.0, forward=True)
fig.set_size_inches(17, 5.675, forward=True)
plt.subplots_adjust(left = 0.04, bottom = 0.04, right = 0.98, top = 0.96,
wspace = 0.02, hspace = 0.02)
# Add some figure annotation
thisTitle = axarr[1].set_title(os.path.basename(thisImg.filename))
prevStr = (str(prevImg.header['OBJECT']) + '\n' +
str(prevImg.header['FILTNME3'] + '\n' +
str(prevImg.header['POLPOS'])))
thisStr = (str(thisImg.header['OBJECT']) + '\n' +
str(thisImg.header['FILTNME3'] + '\n' +
str(thisImg.header['POLPOS'])))
nextStr = (str(nextImg.header['OBJECT']) + '\n' +
str(nextImg.header['FILTNME3'] + '\n' +
str(nextImg.header['POLPOS'])))
prevLabel = axarr[0].text(20, 875, prevStr,
color = 'white', size = 'medium')
thisLabel = axarr[1].text(20, 875, thisStr,
color = 'white', size = 'medium')
nextLabel = axarr[2].text(20, 875, nextStr,
color = 'white', size = 'medium')
thisShape = thisImg.shape
redLines = axarr[1].plot([thisShape[0]/2, thisShape[0]/2], [0, thisShape[1]],
'-r',
[0, thisShape[0]], [thisShape[1]/2, thisShape[1]/2],
'-r', alpha = 0.4)
#********************************************
#log this for future use!
#********************************************
# A more standard way to handle mouse clicks?
#xyList = fig.ginput(n=-1, timeout=-30, show_clicks=True,
# mouse_add=1, mouse_pop=3, mouse_stop=2)
#********************************************
# Connect the event manager...
cid1 = fig.canvas.mpl_connect('button_press_event',on_click)
cid2 = fig.canvas.mpl_connect('key_press_event', on_key)
# NOW show the image (without continuing execution)
# plt.ion()
plt.show()
# plt.ioff()
#
# pdb.set_trace()
# Disconnect the event manager and close the figure
fig.canvas.mpl_disconnect(cid1)
fig.canvas.mpl_disconnect(cid2)
# Close the plot
plt.close()
```
#### File: jmontgom10/pyPol/06b_polCalConstants.py
```python
import os
import sys
import copy
# Scipy/numpy imports
import numpy as np
from scipy import odr
# Import statsmodels for robust linear regression
import statsmodels.api as smapi
# Astropy imports
from astropy.table import Table, Column, hstack, join
import astropy.units as u
from astropy.coordinates import SkyCoord
from astropy.stats import sigma_clipped_stats
from photutils import (centroid_com, aperture_photometry, CircularAperture,
CircularAnnulus)
# Import plotting utilities
from matplotlib import pyplot as plt
# Import the astroimage package
import astroimage as ai
# This script will compute the photometry of polarization standard stars
# and output a file containing the polarization position angle
# additive correction and the polarization efficiency of the PRISM instrument.
#==============================================================================
# *********************** CUSTOM USER CODE ************************************
# this is where the user specifies where the raw data is stored
# and some of the subdirectory structure to find the actual .FITS images
#==============================================================================
# Define how the font will appear in the plots
font = {'family': 'sans-serif',
'color': 'black',
'weight': 'normal',
'size': 14
}
# This is the location where all pyPol data will be saved
pyPol_data = 'C:\\Users\\Jordan\\FITS_data\\PRISM_data\\pyPol_data\\201612'
# This is the name of the file in which the calibration constants will be stored
polCalConstantsFile = os.path.join(pyPol_data, 'polCalConstants.csv')
# Read in the indexFile data and select the filenames
print('\nReading file index from disk')
indexFile = os.path.join(pyPol_data, 'reducedFileIndex.csv')
fileIndex = Table.read(indexFile, format='ascii.csv')
# Group the fileIndex by waveband
fileIndexByWaveband = fileIndex.group_by(['FILTER'])
# Retrieve the waveband values within specified the calibration data
wavebands = np.unique(fileIndexByWaveband['FILTER'])
# Initalize a table to store all the measured polarizatino calibration constants
calTable = Table(names=('FILTER', 'PE', 's_PE', 'PAsign', 'D_PA', 's_D_PA'),
dtype=('S1', 'f8', 'f8', 'i8', 'f8', 'f8'))
# Also initalize a dictionary to store ALL of the polarization data
allPolCalDict = {}
# Loop through each waveband and compute the calibration constants from the data
# available for that waveband.
for thisFilter in wavebands:
# Update the user on processing status
print('\nProcessing calibration data for')
print('Filter : {0}'.format(thisFilter))
# Define the polarization standard files
thisFilename = 'polStandardTable_{0}.csv'.format(thisFilter)
polTableFile = os.path.join(pyPol_data, thisFilename)
# Read in the polarization calibration data file
polCalTable = Table.read(polTableFile, format='ascii.csv')
###############
# Get PE value
###############
# # Grab the column names of the polarization measurements
# polStart = lambda s: s.startswith('P_' + thisFilter)
# polBool = list(map(polStart, polCalTable.keys()))
# polInds = np.where(polBool)
# polKeys = np.array(polCalTable.keys())[polInds]
# Initalize a dictionary to store all the calibration measurements
tmpDict1 = {
'value':[],
'uncert':[]}
tmpDict2 = {
'expected':copy.deepcopy(tmpDict1),
'measured':copy.deepcopy(tmpDict1)}
polCalDict = {
'P':copy.deepcopy(tmpDict2),
'PA':copy.deepcopy(tmpDict2)}
# Quickly build a list of calibration keys
calKeyList = ['_'.join([prefix, thisFilter])
for prefix in ['P', 'sP', 'PA', 'sPA']]
# Loop over each row in the calibration data table
for istandard, standard in enumerate(polCalTable):
# Grab the appropriate row for this standard (as a table object)
standardTable = polCalTable[np.array([istandard])]
# Trim off unnecessary rows before looping over what remains
standardTable.remove_columns(['Name', 'RA_1950', 'Dec_1950'])
# Now loop over the remaining keys and
for key in standardTable.keys():
# Test if this is a calibration value
if key in calKeyList: continue
# Test if this value is masked
if standardTable[key].data.mask: continue
# If this is an unmasked, non-calibration value, then store it!
# Find out the proper calibration key for polCalTable
calKeyInd = np.where([key.startswith(k) for k in calKeyList])
thisCalKey = calKeyList[calKeyInd[0][0]]
# Begin by parsing which key we're dealing with
dictKey = (key.split('_'))[0]
if dictKey.endswith('A'):
dictKey = 'PA'
elif dictKey.endswith('P'):
dictKey = 'P'
else:
print('funky keys!')
pdb.set_trace()
# Parse whether this is a value or an uncertainty
if key.startswith('s'):
val_sig = 'uncert'
else:
val_sig = 'value'
# Store the expected value
try:
polCalDict[dictKey]['expected'][val_sig].append(
standardTable[thisCalKey].data.data[0])
except:
pdb.set_trace()
# Store the measured value
polCalDict[dictKey]['measured'][val_sig].append(
standardTable[key].data.data[0])
###################
# Identify Outliers
###################
# Grab the FULL set of expected and measured polarization values
expectedPol = np.array(polCalDict['P']['expected']['value'])
uncertInExpectedPol = np.array(polCalDict['P']['expected']['uncert'])
measuredPol = np.array(polCalDict['P']['measured']['value'])
uncertInMeasuredPol = np.array(polCalDict['P']['measured']['uncert'])
# Run a statsmodels linear regression and test for outliers
OLSmodel = smapi.OLS(
expectedPol,
measuredPol,
hasconst=False
)
OLSregression = OLSmodel.fit()
# Find the outliers
outlierTest = OLSregression.outlier_test()
outlierBool = [t[2] < 0.5 for t in outlierTest]
# Grab the FULL set of expected and measured polarization values
expectedPA = np.array(polCalDict['PA']['expected']['value'])
uncertInExpectedPA = np.array(polCalDict['PA']['expected']['uncert'])
measuredPA = np.array(polCalDict['PA']['measured']['value'])
uncertInMeasuredPA = np.array(polCalDict['PA']['measured']['uncert'])
# Run a statsmodels linear regression and test for outliers
OLSmodel = smapi.OLS(
expectedPA,
measuredPA,
hasconst=True
)
OLSregression = OLSmodel.fit()
# Find the outliers
outlierTest = OLSregression.outlier_test()
outlierBool = np.logical_or(
outlierBool,
[t[2] < 0.5 for t in outlierTest]
)
# Cull the list of Ps and PAs
goodInds = np.where(np.logical_not(outlierBool))
expectedPol = expectedPol[goodInds]
uncertInExpectedPol = uncertInExpectedPol[goodInds]
measuredPol = measuredPol[goodInds]
uncertInMeasuredPol = uncertInMeasuredPol[goodInds]
expectedPA = expectedPA[goodInds]
uncertInExpectedPA = uncertInExpectedPA[goodInds]
measuredPA = measuredPA[goodInds]
uncertInMeasuredPA = uncertInMeasuredPA[goodInds]
# TODO: print an update to the user on the polarization values culled
###############
# Get PE value
###############
# Close any remaining plots before proceeding to show the user the graphical
# summary of the calibration data.
plt.close('all')
# Define the model to be used in the fitting
def PE(slope, x):
return slope*x
# Set up ODR with the model and data.
PEmodel = odr.Model(PE)
data = odr.RealData(
expectedPol,
measuredPol,
sx=uncertInExpectedPol,
sy=uncertInMeasuredPol
)
# Initalize the full odr model object
odrObj = odr.ODR(data, PEmodel, beta0=[1.])
# Run the regression.
PEout = odrObj.run()
# Use the in-built pprint method to give us results.
print(thisFilter + '-band PE fitting results')
PEout.pprint()
print('\n\nGenerating P plot')
plt.ion()
fig = plt.figure()
ax = fig.add_subplot(1,1,1)
ax.errorbar(
polCalDict['P']['expected']['value'],
polCalDict['P']['measured']['value'],
xerr=polCalDict['P']['expected']['uncert'],
yerr=polCalDict['P']['measured']['uncert'],
ecolor='b', linestyle='None', marker=None)
xlim = ax.get_xlim()
ax.plot([0,max(xlim)], PE(PEout.beta[0], np.array([0,max(xlim)])), 'g')
plt.xlabel('Cataloged P [%]')
plt.ylabel('Measured P [%]')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xlim = 0, xlim[1]
ylim = 0, ylim[1]
ax.set_xlim(xlim)
ax.set_ylim(ylim)
plt.title(thisFilter + '-band Polarization Efficiency')
#Compute where the annotation should be placed
ySpan = np.max(ylim) - np.min(ylim)
xSpan = np.max(xlim) - np.min(xlim)
xtxt = 0.1*xSpan + np.min(xlim)
ytxt = 0.9*ySpan + np.min(ylim)
plt.text(xtxt, ytxt, 'PE = {0:4.3g} +/- {1:4.3g}'.format(
PEout.beta[0], PEout.sd_beta[0]), fontdict=font)
import pdb; pdb.set_trace()
# Test if a polarization efficiency greater than one was retrieved...
if PEout.beta[0] > 1.0:
print('Polarization Efficiency greater than one detected.')
print('Forcing PE constant to be 1.0')
PEout.beta[0] = 1.0
###############
# Get PA offset
###############
# Fit a model to the PA1 vs. PA0 data
# Define the model to be used in the fitting
def deltaPA(B, x):
return B[0]*x + B[1]
# Set up ODR with the model and data.
deltaPAmodel = odr.Model(deltaPA)
data = odr.RealData(
expectedPA,
measuredPA,
sx=uncertInExpectedPA,
sy=uncertInMeasuredPA
)
# On first pass, just figure out what the sign is
odrObj = odr.ODR(data, deltaPAmodel, beta0=[0.0, 90.0])
dPAout = odrObj.run()
PAsign = np.round(dPAout.beta[0])
# Build the proper fitter class with the slope fixed
odrObj = odr.ODR(data, deltaPAmodel, beta0=[PAsign, 90.0], ifixb=[0,1])
# Run the regression.
dPAout = odrObj.run()
# Use the in-built pprint method to give us results.
print(thisFilter + '-band delta PA fitting results')
dPAout.pprint()
# For ease of reference, convert the expected and measured values to arrays
PA0 = np.array(polCalDict['PA']['expected']['value'])
PA1 = np.array(polCalDict['PA']['measured']['value'])
# Apply the correction terms
dPAval = dPAout.beta[1]
PAcor = ((PAsign*(PA1 - dPAval)) + 720.0) % 180.0
# TODO
# Check if PAcor values are closer corresponding PA0_V values
# by adding or subtracting 180
PA0 = np.array(polCalDict['PA']['expected']['value'])
PAminus = np.abs((PAcor - 180) - PA0 ) < np.abs(PAcor - PA0)
if np.sum(PAminus) > 0:
PAcor[np.where(PAminus)] = PAcor[np.where(PAminus)] - 180
PAplus = np.abs((PAcor + 180) - PA0 ) < np.abs(PAcor - PA0)
if np.sum(PAplus) > 0:
PAcor[np.where(PAplus)] = PAcor[np.where(PAplus)] + 180
# Do a final regression to plot-test if things are right
data = odr.RealData(
PA0,
PAcor,
sx=polCalDict['PA']['expected']['uncert'],
sy=polCalDict['PA']['measured']['uncert']
)
odrObj = odr.ODR(data, deltaPAmodel, beta0=[1.0, 0.0], ifixb=[0,1])
dPAcor = odrObj.run()
# Plot up the results
# PA measured vs. PA true
print('\n\nGenerating PA plot')
fig.delaxes(ax)
ax = fig.add_subplot(1,1,1)
#ax.errorbar(PA0_V, PA1_V, xerr=sPA0_V, yerr=sPA1_V,
# ecolor='b', linestyle='None', marker=None)
#ax.plot([0,max(PA0_V)], deltaPA(dPAout.beta, np.array([0,max(PA0_V)])), 'g')
ax.errorbar(PA0, PAcor,
xerr=polCalDict['PA']['expected']['uncert'],
yerr=polCalDict['PA']['measured']['uncert'],
ecolor='b', linestyle='None', marker=None)
xlim = ax.get_xlim()
ax.plot([0,max(xlim)], deltaPA(dPAcor.beta, np.array([0, max(xlim)])), 'g')
plt.xlabel('Cataloged PA [deg]')
plt.ylabel('Measured PA [deg]')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xlim = 0, xlim[1]
ax.set_xlim(xlim)
plt.title(thisFilter + '-band PA offset')
#Compute where the annotation should be placed
ySpan = np.max(ylim) - np.min(ylim)
xSpan = np.max(xlim) - np.min(xlim)
xtxt = 0.1*xSpan + np.min(xlim)
ytxt = 0.9*ySpan + np.min(ylim)
plt.text(xtxt, ytxt, 'PA offset = {0:4.3g} +/- {1:4.3g}'.format(
dPAout.beta[1], dPAout.sd_beta[1]), fontdict=font)
pdb.set_trace()
# Now that all the calibration constants have been estimated and the results
# shown to the user (in theory for their sanity-test approval), store the
# final calibration data in the calTable variable
calTable.add_row([thisFilter, PEout.beta[0], PEout.sd_beta[0],
np.int(PAsign), dPAout.beta[1], dPAout.sd_beta[1]])
# Store a copy of polCalDict in allPolCalDict
allPolCalDict[thisFilter] = copy.deepcopy(polCalDict)
# Now double check if the PA offsets are agreeable. If not, keep them separate,
# but otherwise attempt to combine them...
#####################################################
# Check if a single deltaPA value is appropriate
#####################################################
# Extract the originally estimated dPA values from the table
dPAvalues = calTable['D_PA'].data
dPAsigmas = calTable['s_D_PA'].data
# Compute all possible differences in dPAs and their uncertainties
D_dPAmatrix = np.zeros(2*dPAvalues.shape)
s_D_dPAmatrix = np.ones(2*dPAvalues.shape)
for i in range(len(dPAvalues)):
for j in range(len(dPAvalues)):
# Skip over trivial or redundant elements
if j <= i: continue
D_dPAmatrix[i,j] = np.abs(dPAvalues[i] - dPAvalues[j])
s_D_dPAmatrix[i,j] = np.sqrt(dPAsigmas[i]**2 + dPAsigmas[j]**2)
# Check if this these two values are significantly different from each-other
if (D_dPAmatrix/s_D_dPAmatrix > 3.0).any():
print('Some of these calibration constants are significantly different.')
print('Leave them as they are.')
else:
PA0 = []
PA1 = []
sPA0 = []
sPA1 = []
for key, val in allPolCalDict.items():
PA0.extend(val['PA']['expected']['value'])
PA1.extend(val['PA']['measured']['value'])
sPA0.extend(val['PA']['expected']['uncert'])
sPA1.extend(val['PA']['measured']['uncert'])
# Do a final regression to plot-test if things are right
data = odr.RealData(PA0, PA1, sx=sPA0, sy=sPA1)
# On first pass, just figure out what the sign is
odrObj = odr.ODR(data, deltaPAmodel, beta0=[0.0, 90.0])
dPAout = odrOjb.run()
PAsign = np.round(dPAout.beta[0])
# Build the proper fitter class with the slope fixed
odrObj = odr.ODR(data, deltaPAmodel, beta0=[PAsign, 90.0], ifixb=[0,1])
# Run the regression.
dPAout = odrObj.run()
# Use the in-built pprint method to give us results.
print('Final delta PA fitting results')
dPAout.pprint()
# Apply the correction terms
dPAval = dPAout.beta[1]
PAcor = ((PAsign*(PA1 - dPAval)) + 720.0) % 180.0
# Check if the correct PAs need 180 added or subtracted.
PAminus = np.abs((PAcor - 180) - PA0 ) < np.abs(PAcor - PA0)
if np.sum(PAminus) > 0:
PAcor[np.where(PAminus)] = PAcor[np.where(PAminus)] - 180
PAplus = np.abs((PAcor + 180) - PA0 ) < np.abs(PAcor - PA0)
if np.sum(PAplus) > 0:
PAcor[np.where(PAplus)] = PAcor[np.where(PAplus)] + 180
# # Save corrected values for possible future use
# PAcor_R = PAcor.copy()
# Do a final regression to plot-test if things are right
data = odr.RealData(PA0, PAcor, sx=sPA0, sy=sPA1)
odrObj = odr.ODR(data, deltaPAmodel, beta0=[1.0, 0.0], ifixb=[0,1])
dPAcor = odrObj.run()
# Plot up the results
# PA measured vs. PA true
print('\n\nGenerating PA plot')
fig.delaxes(ax)
ax = fig.add_subplot(1,1,1)
#ax.errorbar(PA0_R, PA1, xerr=sPA0_R, yerr=sPA1,
# ecolor='b', linestyle='None', marker=None)
#ax.plot([0,max(PA0_R)], deltaPA(dPAout.beta, np.array([0,max(PA0_R)])), 'g')
ax.errorbar(PA0, PAcor, xerr=sPA0, yerr=sPA1,
ecolor='b', linestyle='None', marker=None)
ax.plot([0,max(PA0)], deltaPA(dPAcor.beta, np.array([0, max(PA0)])), 'g')
plt.xlabel('Cataloged PA [deg]')
plt.ylabel('Measured PA [deg]')
xlim = ax.get_xlim()
ylim = ax.get_ylim()
xlim = 0, xlim[1]
ax.set_xlim(xlim)
plt.title('Final Combined PA offset')
#Compute where the annotation should be placed
ySpan = np.max(ylim) - np.min(ylim)
xSpan = np.max(xlim) - np.min(xlim)
xtxt = 0.1*xSpan + np.min(xlim)
ytxt = 0.9*ySpan + np.min(ylim)
plt.text(xtxt, ytxt, 'PA offset = {0:4.3g} +/- {1:4.3g}'.format(
dPAout.beta[1], dPAout.sd_beta[1]))
# Pause for a double check from the user
pdb.set_trace()
# User approves, close the plot and proceed
plt.close()
plt.ioff()
# Update the calibration table
calTable['D_PA'] = dPAout.beta[1]
calTable['s_D_PA'] = dPAout.sd_beta[1]
print('Writing calibration data to disk')
calTable.write(polCalConstantsFile, format='ascii.csv')
print('Calibration tasks completed!')
``` |
{
"source": "jmontoyac/disk-space",
"score": 3
} |
#### File: jmontoyac/disk-space/awsFunctions.py
```python
import boto3
from botocore.exceptions import NoCredentialsError
ACCESS_KEY = ''
SECRET_KEY = ''
def upload_to_aws(local_file, bucket, s3_file):
s3 = boto3.client('s3', aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY)
try:
s3.upload_file(local_file, bucket, s3_file)
print("Upload Successful")
return True
except FileNotFoundError:
print("The file was not found")
return False
except NoCredentialsError:
print("Credentials not available")
return False
def getUsedSpace(aBucketName):
s3 = boto3.resource('s3', aws_access_key_id=ACCESS_KEY,
aws_secret_access_key=SECRET_KEY)
space = 0
for bucket in s3.buckets.all():
myBucketName = bucket.name
for key in bucket.objects.all():
space = space + key.size
# print(key.key)
print('Used space in bucket ' + myBucketName +
' ' + str(space // (2 ** 20)) + ' Megabytes')
# Main
localFile = '/images/gotIt.jpg'
s3File = 'imagesTest/gotIt.jpg'
bucketName = 'voti-public'
#uploaded = upload_to_aws(localFile, bucketName, s3File)
usedSpace = getUsedSpace(bucketName)
```
#### File: jmontoyac/disk-space/getSpace.py
```python
import psutil
import os
from datetime import datetime
import rabbitFunctions as rabbit
import deleteFiles
# TODO Read value form voti.conf during installation through Ansible role
# warningLimit = ${BUCKET_WARNING_LIMIT}
warningLimit = 80.0 # Usage percentage to warn
def getDirectorySize(dir):
total_size = 0
start_path = '/images' # To get size of current directory
for path, dirs, files in os.walk(start_path):
for f in files:
fp = os.path.join(path, f)
total_size += os.path.getsize(fp)
size = total_size // (2**30)
print("Directory size: " + str(size) + " GiB")
return size
# psutil library disk info
# UNITS_MAPPING = [
# (1<<50, ' PB'),
# (1<<40, ' TB'),
# (1<<30, ' GB'),
# (1<<20, ' MB'),
# (1<<10, ' KB'),
def getDiskInfo():
diskInfo = psutil.disk_usage('/images')
percent_used = (diskInfo.used * 100 / diskInfo.total)
body_bucket = {
"bucket_id": "buck-001",
"date_time": str(datetime.now()),
"total_capacity": str(diskInfo.total // (2 ** 20)),
"total_used": str(diskInfo.used // (2 ** 20)),
"percentage_used": str(round(percent_used, 2))
}
return body_bucket
# Main
body = getDiskInfo()
if float(body["percentage_used"]) >= warningLimit:
print("Disk limit exceeded")
else:
print("Disk limit Not yet exceeded, time: " + str(datetime.now()))
# Send bucket data to Rabbit
rabbit.publish_to_rabbit('disk_info', body, 'rabbitmq')
deleteFiles.createTestData(10)
``` |
{
"source": "jmontp/prosthetic_adaptation",
"score": 3
} |
#### File: prosthetic_adaptation/kmodel/function_bases.py
```python
import numpy as np
class Basis:
def __init__(self, n, var_name):
self.n = n
self.var_name = var_name
#Need to implement with other subclasses
def evaluate(self,x):
pass
#Need to implement the derivative of this also
def evaluate_derivative(self,x):
pass
def evaluate_conditional(self,x,apply_derivative,num_derivatives=1):
if(apply_derivative == True):
return self.evaluate_derivative(x,num_derivatives)
else:
return self.evaluate(x)
##Define classes that will be used to calculate kronecker products in real time
#This will create a Polynomial Basis with n entries
# The variable name is also needed
class PolynomialBasis(Basis):
def __init__(self, n, var_name):
Basis.__init__(self,n,var_name)
self.size = n
#This function will evaluate the model at the given x value
def evaluate(self,x):
return np.polynomial.polynomial.polyvander(x, self.n-1)
#This will create a Polynomial Basis with n harmonic frequencies
# The variable name is also needed
class FourierBasis(Basis):
def __init__(self, n, var_name):
Basis.__init__(self, n, var_name)
self.size = 2*n+1
#This function will evaluate the model at the given x value
def evaluate(self,x):
x = x.reshape(-1,1)
#l is used to generate the coefficients of the series
l = np.arange(1,self.n+1).reshape(1,-1)
#Initialize everything as one to get
result = np.ones((x.shape[0],self.size))
result[:,1:self.n+1] = np.cos(2*np.pi*x @ l)
result[:,self.n+1:] = np.sin(2*np.pi*x @ l)
return result
class LegendreBasis(Basis):
"Legendre polynomials are on [-1,1]"
def __init__(self, n, var_name):
Basis.__init__(self, n, var_name)
self.size = n
def evaluate(self,x):
return np.polynomial.legendre.legvander(x, self.n-1)
class ChebyshevBasis(Basis):
"Chebyshev polynomials are on [-1,1]"
def __init__(self, n, var_name):
Basis.__init__(self, n, var_name)
self.size = n
def evaluate(self, x):
return np.polynomial.chebyshev.chebvander(x, self.n-1)
class HermiteBasis(Basis):
"Hermite polynomials are on [-inf,inf]"
def __init__(self, n, var_name):
Basis.__init__(self, n, var_name)
self.size = n
def evaluate(self,x):
return np.polynomial.hermite_e.hermevander(x, self.n-1)
``` |
{
"source": "jmontp/prosthetic-adaptation",
"score": 3
} |
#### File: prosthetic-adaptation/kmodel/function_bases.py
```python
import pandas as pd
import numpy as np
#import cupy as np
import math
import pickle
from os import path
from sklearn.decomposition import PCA
from functools import lru_cache
#--------------------------
#Need to create two objects:
#Basis object:
# basis(x): takes input and returns the value
# basis_name:
# basis size
# basis params
# variable name
#Dont use basis directly, its just a blueprint to what you need to implement
class Basis:
def __init__(self, n, var_name):
self.n = n
self.var_name = var_name
#Need to implement with other subclasses
def evaluate(self,x):
pass
#Need to implement the derivative of this also
def evaluate_derivative(self,x):
pass
def evaluate_conditional(self,x,apply_derivative,num_derivatives=1):
if(apply_derivative == True):
return self.evaluate_derivative(x,num_derivatives)
else:
return self.evaluate(x)
##Define classes that will be used to calculate kronecker products in real time
#This will create a Polynomial Basis with n entries
# The variable name is also needed
class PolynomialBasis(Basis):
def __init__(self, n, var_name):
Basis.__init__(self,n,var_name)
self.size = n
#This function will evaluate the model at the given x value
def evaluate(self,x):
#result = [math.pow(x,i) for i in range(0,self.n)]
#Power will evaluate elementwise by the power defined in the second
#argument. Arrange will generate a list from 0-n-1, therefore it will
#evaluate the power of each element
x_array = np.repeat(x,self.n,axis=1)
power_array = np.arange(self.n)
output = np.power(x_array,power_array)
return output
#This function will evaluate the derivative of the model at the given
# x value
#TODO: Unit test please, please please
def evaluate_derivative(self,x,num_derivatives=1):
if(num_derivatives == 0):
return self.evaluate(x)
if(num_derivatives < self.size):
x_array = np.repeat(x,self.size,axis=1)
coefficient_array = np.arange(self.n)
temp_array = np.arange(self.n)
for i in range(1,num_derivatives):
temp_array = temp_array-1
coefficient_array = coefficient_array*(temp_array)
#Generate power array
power_array = np.arange(-num_derivatives,self.size-num_derivatives)
#Set negative indices to zero
power_array = np.where(power_array<0,0,power_array)
return (np.power(x_array,power_array)*coefficient_array)
else:
return np.repeat(0,self.size)
#This will create a Polynomial Basis with n harmonic frequencies
# The variable name is also needed
class FourierBasis(Basis):
def __init__(self, n, var_name):
Basis.__init__(self, n, var_name)
self.size = 2*n+1
#This function will evaluate the model at the given x value
def evaluate(self,x):
#l is used to generate the coefficients of the series
l = np.arange(1,self.n+1).reshape(1,-1)
#Initialize everything as one to get
result = np.ones((x.shape[0],self.size))
#Add the sine and cos part
result[:,1:self.n+1] = np.cos(2*np.pi*x @ l)
result[:,self.n+1:] = np.sin(2*np.pi*x @ l)
return result
#This function will evaluate the derivative of the model at the given
# x value
def evaluate_derivative(self,x,num_derivatives=1):
if (num_derivatives == 0):
return self.evaluate(x)
#l is used to generate the coefficients of the series
l = np.arange(1,self.n+1).reshape(1,-1)
#Initialize everything as one to get
result = np.zeros((x.shape[0],self.size))
#Add the sine and cos part
#https://www.wolframalpha.com/input/?i=d%5En+cos%282*pi*a*x%29%2Fdx%5En
result[:,1:self.n+1] = np.power((2*np.pi*l),num_derivatives)*np.cos(0.5*np.pi*(num_derivatives + 4*x @ l))
#https://www.wolframalpha.com/input/?i=d%5En+sin%282*pi*a*x%29%2Fdx%5En
result[:,self.n+1:] = np.power((2*np.pi*l),num_derivatives)*np.sin(0.5*np.pi*(num_derivatives + 4*x @ l))
return result
#Not really using this right now so keeping in the old format
class BernsteinBasis(Basis):
def __init__(self, n, var_name):
Basis.__init__(self, n, var_name)
self.size = n
def evaluate(self,x):
basis = [math.comb(self.n,i)*math.pow(x,i)*math.pow((1-x),(self.n-i)) for i in range(0,self.n+1)];
return np.array(basis)
def evaluate_derivative(self,x):
#raise NotImplementedError "Bernstain Basis derivative not implmented"
pass
``` |
{
"source": "jmontp/Prosthetic_Adaptation",
"score": 3
} |
#### File: Prosthetic_Adaptation/kmodel/kronecker_model.py
```python
import numpy as np
import pandas as pd
import pickle
from sklearn.decomposition import PCA
#Relative Imports
from .context import math_utils
#Set test assert_pd for speed boost
math_utils.test_pd = False
#Model Object:
# The kronecker model will take in multiple models and then calculate the
# kronecker product of all of them in runtime
#--------------------------
class KroneckerModel:
def __init__(self, output_name,*funcs,subjects=None,num_gait_fingerprint=4):
self.funcs = funcs
#Calculate the size of the parameter array
#Additionally, pre-allocate arrays for kronecker products intermediaries
# to speed up results
self.output_name = output_name
self.order = []
size = 1
for func in funcs:
#Since we multiply left to right, the total size will be on the left
#and the size for the new row will be on the right
print((str(size), str(func.size)))
size = size * func.size
self.order.append(func.var_name)
self.size = size
self.num_states = len(funcs)
self.subjects = {}
self.one_left_out_subjects = {}
self.num_gait_fingerprint = num_gait_fingerprint
self.gait_fingerprint_names = ["gf"+str(i) for i in range(1,num_gait_fingerprint+1)]
#Todo: Add average pca coefficient
self.cross_model_personalization_map = None
self.cross_model_inter_subject_average = None
if(subjects is not None):
self.add_subject(subjects)
self.fit_subjects()
self.calculate_pmap(n=num_gait_fingerprint)
#Add a subject without running any fitting
def add_subject(self,subjects):
import os
print("CWD is: " + os.getcwd())
for subject,filename in subjects:
self.subjects[subject] = \
{'filename': filename, \
'dataframe': pd.read_parquet(filename, columns=[self.output_name,*self.order])
}
def evaluate_pandas(self, dataframe):
rows = dataframe.shape[0]
output = np.array(1).reshape(1,1,1)
for func in self.funcs:
#Knronecker product per state
#Get data
var_data = dataframe[func.var_name].values
var_data2 = var_data
intermediary_output = func.evaluate(var_data2)
output = (output[:,np.newaxis,:]*intermediary_output[:,:,np.newaxis]).reshape(rows,-1)
return output
def evaluate_numpy(self,states):
rows = states.shape[1]
#Make sure we only have the amount of states that we want
phase_states = states[:self.num_gait_fingerprint,:]
output = np.array(1).reshape(1,1,1)
for func,state in zip(self.funcs,phase_states):
state_t = (state.T)[:,np.newaxis]
eval_value = func.evaluate(state_t).reshape(rows,-1,1)
output = (output[:,np.newaxis,:]*eval_value).reshape(rows,-1)
return output
def evaluate_gait_fingerprint_numpy(self,states):
phase_states = states[:self.num_states]
gait_fingerprints = states[self.num_states:]
xi = (self.personalization_map @ gait_fingerprints) + self.inter_subject_average_fit
row_vector = self.evaluate_numpy(phase_states)
return row_vector @ xi
def evaluate_gait_fingerprint_cross_model_numpy(self,states):
phase_states = states[:self.num_states]
gait_fingerprints = states[self.num_states:]
xi = (self.cross_model_personalization_map @ gait_fingerprints) + self.cross_model_inter_subject_average
row_vector = self.evaluate_numpy(phase_states)
return row_vector @ xi
def least_squares(self,dataframe,output,splits=50):
#Initialize matrices to build them up with
# low rank updates
RTR = np.zeros((self.size,self.size))
yTR = np.zeros((1,self.size))
RTy = np.zeros((self.size,1))
yTy = 0
num_rows = len(dataframe.index)
#Divide the dataset to sizes that can be fit in memory
# for least squares calculations
for sub_dataframe in np.array_split(dataframe,splits):
#Get the regressor matrix for the chunk
R = self.evaluate_pandas(sub_dataframe)
#Get the expected output
y = sub_dataframe[output].values[:,np.newaxis]
#Calculate the rank update
RTR_ = R.T @ R
#print("RTR rank: {} RTR shape {}".format(np.linalg.matrix_rank(RTR,hermitian=True),RTR.shape))
#Add the low rank update to the accumulator matrices
RTR += RTR_
yTR += y.T @ R
RTy += R.T @ y
yTy += y.T @ y
#If you have a low rank reggressor you cannot estimate
try:
x = np.linalg.solve(RTR, RTy)
residual = np.sqrt((x.T @ RTR @ x - x.T @ RTy - yTR @ x + yTy)/num_rows)
result = (x, num_rows, residual, RTR, RTy, yTR, yTy)
except:
print('Singular RTR in optimal fit least squares')
x = 0
residual = float('inf')
result = (0, num_rows, residual, RTR, RTy, yTR, yTy)
return result
def fit_subjects(self):
#Get the optimal least squares fit for every subject
for name,subject_dict in self.subjects.items():
print("Doing " + name)
print(subject_dict['filename'])
data = subject_dict['dataframe']
output = self.least_squares(data,self.output_name)
subject_dict['optimal_xi'] = output[0]
subject_dict['num_rows'] = output[1]
subject_dict['least_squares_info'] = output[3:]
def calculate_pmap(self,n):
"""
Calculate the personalization map
This function assumes that you already have calculated the optimal fits
for every subject.
Inputs:
n - The amount of gait fingerprints that the personalization map will have
Outputs:
Personalization map
Personalization map vanilla pca
Least-sqaure based gait fingerprint for every person
"""
########
# Vanilla pca implementation
########
#Get the number of subjects
num_subjects = len(self.subjects.values())
#Get all the gait fingerprints into a matrix
XI = np.array([subject['optimal_xi'] for subject in self.subjects.values()]).reshape(num_subjects,-1)
#Get the mean gait fingerprint
XI_mean = XI.mean(axis=0).reshape(1,-1)
#Get the deviations from the average matrix
XI_0 = XI - XI_mean
#Initialize PCA object for vanilla pca calculation
pca = PCA(n_components=min(num_subjects,XI_0.shape[1]))
pca.fit(XI_0)
########
# Our scaled pca implementation
########
#Calculate the scaled version of the pca
scaled_pca = self.scaled_pca_single_model()
#Select the components based on the amount of gait fingerprints
local_vanilla_pmap = (pca.components_[:n,:]).T
local_pmap = scaled_pca[:,:n]
#Calculate gait fingerprints for every individual
for subject_dict in self.subjects.values():
#Get least squares info
RTR, RTy, yTR, yTy = subject_dict['least_squares_info']
#Get scaled personalization map gait fingerprint
pmap = local_pmap
avg_fit = self.inter_subject_average_fit
#Reuse the least square usage of the personalization map
RTR_prime = (pmap.T) @ RTR @ pmap
RTy_prime = (pmap.T) @ RTy-(pmap.T) @ RTR @ avg_fit
subject_dict['gait_coefficients'] = np.linalg.solve(RTR_prime,RTy_prime)
#Get bad (naive pca) personalization map gait fingerprint
pmap = local_vanilla_pmap
avg_fit = self.inter_subject_average_fit
RTR_prime = (pmap.T) @ RTR @ pmap
RTy_prime = (pmap.T) @ RTy-(pmap.T) @ RTR @ avg_fit
subject_dict['gait_coefficients_vanilla_pca'] = np.linalg.solve(RTR_prime,RTy_prime)
#Get the personalization map with our rmse scaling
self.personalization_map = local_pmap
#Get the personalization map with normal pca
self.personalization_map_vanilla = local_vanilla_pmap
def scaled_pca_single_model(self, XI=None):
"""
Calculate the rmse scaled PCA from the paper
"""
#Get the number of subjects
num_subjects = len(self.subjects)
#Calculate the matrix of user fits
Ξ = np.array([subject['optimal_xi'] for subject in self.subjects.values()]).reshape(num_subjects,-1)
#Calculate the scaling factors
G_total = 0
N_total = 0
for name,subject_dict in self.subjects.items():
G_total += subject_dict['least_squares_info'][0]
N_total += subject_dict['num_rows']
#This is equation eq:inner_regressor in the paper!
G = G_total/N_total
#Get the personalization map and the pca info
personalization_map, pca_info = scaled_pca(Ξ,G)
self.inter_subject_average_fit = pca_info['inter_subject_average_fit']
return personalization_map
def add_left_out_subject(self,subjects):
for subject,filename in subjects:
self.one_left_out_subjects[subject] = \
{'filename': filename, \
'dataframe': pd.read_parquet(filename, columns=[self.output_name,*self.order]), \
'optimal_xi': [], \
'least_squares_info': [], \
'pca_axis': [], \
'pca_coefficients': [] \
}
subject_dict = self.one_left_out_subjects[subject]
print("One left out fit: " + subject)
data = subject_dict['dataframe']
pmap_scaled = self.personalization_map
pmap_vanilla = self.personalization_map_vanilla
output = self.least_squares(data,self.output_name)
subject_dict['optimal_xi'] = output[0]
subject_dict['num_rows'] = output[1]
subject_dict['least_squares_info'] = output[3:]
xi_avg = self.inter_subject_average_fit
RTR, RTy, yTR, yTy = subject_dict['least_squares_info']
RTR_prime = (pmap_scaled.T) @ RTR @ pmap_scaled
RTy_prime = (pmap_scaled.T) @ RTy-(pmap_scaled.T) @ RTR @ xi_avg
gf_scaled = np.linalg.solve(RTR_prime,RTy_prime)
subject_dict['gait_coefficients'] = (gf_scaled)
RTR, RTy, yTR, yTy = subject_dict['least_squares_info']
RTR_prime = (pmap_vanilla.T) @ RTR @ pmap_vanilla
RTy_prime = (pmap_vanilla.T) @ RTy-(pmap_vanilla.T) @ RTR @ xi_avg
gf_unscaled = np.linalg.solve(RTR_prime,RTy_prime)
subject_dict['gait_coefficients_unscaled'] = (gf_unscaled)
def __str__(self):
output = ''
for func in self.funcs:
func_type = type(func).__name__
if(func_type == 'Polynomial_Basis'):
basis_identifier = 'P'
elif (func_type == 'Fourier_Basis'):
basis_identifier = 'F'
elif (func_type == 'Bernstein_Basis'):
basis_identifier = 'B'
else:
raise TypeError("This is not a basis")
output += func.var_name + '-' + str(func.n)+ basis_identifier + '--'
return output
def get_order(self):
return self.order
def scaled_pca(Ξ,G):
math_utils.assert_pd(G, 'G in scaled pca')
#Diagonalize the matrix G as G = OVO
eig, O = np.linalg.eigh(G)
V = np.diagflat(eig)
#print("Gramian {}".format(G))
#Additionally, all the eigenvalues are true
for e in eig:
#print("Eigenvalue: {}".format(e))
assert (e >= 0)
assert( e > 0) # pd
# Verify that it diagonalized correctly G = O (eig) O.T
assert(np.linalg.norm(G - O @ V @ O.T)< 1e-7 * np.linalg.norm(G)) # passes
#This is based on the equation in eq:Qdef
# Q G Q = I
Q = np.zeros((O.shape[0],O.shape[0]))
Qinv = np.zeros((O.shape[0],O.shape[0]))
for i in range(len(eig)):
Q += O[:,[i]] @ O[:,[i]].T * 1/np.sqrt(eig[i])
Qinv += O[:,[i]] @ O[:,[i]].T * np.sqrt(eig[i])
# Q = sum([O[:,[i]] @ O[:,[i]].T * 1/np.sqrt(eig[i]) for i in range(len(eig))])
# Qinv = sum([O[:,[i]] @ O[:,[i]].T * np.sqrt(eig[i]) for i in range(len(eig))])
#Change of basis conversions
def param_to_orthonormal(ξ):
return Qinv @ ξ
def param_from_orthonormal(ξ):
return Q @ ξ
def matrix_to_orthonormal(Ξ):
return Ξ @ Qinv
#Get the average coefficients
ξ_avg = np.mean(Ξ, axis=0)
#Save the intersubject average model
inter_subject_average_fit = ξ_avg[:,np.newaxis]
#Substract the average coefficients
Ξ0 = Ξ - ξ_avg
##Todo: The pca axis can also be obtained with pca instead of eigenvalue
## decomposition
#Calculate the coefficients in the orthonormal space
Ξ0prime = matrix_to_orthonormal(Ξ0)
#Get the covariance matrix for this
Σ = Ξ0prime.T @ Ξ0prime / (Ξ0prime.shape[0]-1)
#Calculate the eigendecomposition of the covariance matrix
ψinverted, Uinverted = np.linalg.eigh(Σ)
#Eigenvalues are obtained from smalles to bigger, make it bigger to smaller
ψs = np.flip(ψinverted)
scaled_pca_eigenvalues = ψs
Ψ = np.diagflat(ψs)
#If we change the eigenvalues we also need to change the eigenvectors
U = np.flip(Uinverted, axis=1)
#Run tests to make sure that this is working
assert(np.linalg.norm(Σ - U @ Ψ @ U.T)< 1e-7 * np.linalg.norm(Σ)) # passes
for i in range(len(ψs)-1):
assert(ψs[i] > ψs[i+1])
#Define the amount principles axis that we want
η = Ξ.shape[1]
pca_axis_array = []
#Convert from the new basis back to the original basis vectors
for i in range (0,η):
pca_axis_array.append(param_from_orthonormal(U[:,i]*np.sqrt(ψs[i])))
scaled_pca_components = np.array(pca_axis_array).T
pca_info = {'all_components': scaled_pca_components,
'eigenvalues':scaled_pca_eigenvalues,
'inter_subject_average_fit':inter_subject_average_fit}
#Return the personalization map
return scaled_pca_components[:,:], pca_info
def calculate_cross_model_p_map(models):
"""
Calculate the gait fingerprint across different models
"""
#Get the number of models
num_models = len(models)
subjects = models[0].subjects.keys()
#Concatenate all the model fits
XI_list = [models[0].subjects[subject]['optimal_xi'] for subject in subjects]
for model in models[1:]:
for i,subject in enumerate(subjects):
XI_list[i] = np.concatenate((XI_list[i], model.subjects[subject]['optimal_xi']), axis=0)
#Convert the model lists into a 2D np array
XI = np.array(XI_list)
XI = XI.reshape(XI.shape[:2])
#Calculate the scalling coefficients
G_total = [0 for i in range(num_models)]
N_total = [0 for i in range(num_models)]
for i, model in enumerate(models):
for name,subject_dict in model.subjects.items():
G_total[i] += subject_dict['least_squares_info'][0]
N_total[i] += subject_dict['num_rows']
#This is equation eq:inner_regressor in the paper!
G_list = [G_total[i]/N_total[i] for i in range(num_models)]
#Make sure that everything is positive definite
for individual_G in G_list:
math_utils.assert_pd(individual_G, "Individual G in G_list")
#from https://stackoverflow.com/questions/42154606/python-numpy-how-to-construct-a-big-diagonal-arraymatrix-from-two-small-array
def diag_block_mat_boolindex(L):
shp = L[0].shape
mask = np.kron(np.eye(len(L)), np.ones(shp))==1
out = np.zeros(np.asarray(shp)*len(L),dtype=float)
out[mask] = np.concatenate(L).ravel()
return out
#Diagonalize the weights
G = diag_block_mat_boolindex(G_list)
personalization_map, pca_info = scaled_pca(XI, G)
avg_fit = pca_info['inter_subject_average_fit']
print(f'cross personalization_map {personalization_map.shape} avg_fit:{avg_fit.shape}')
#Get the number of gait fingerprints
num_gf = models[0].num_gait_fingerprint
#Assign each personalization map to the corresponding model
#Create even splits
split_personalization_map = []
split_average_fit = []
for i in range(len(models)):
pmap_size = int(personalization_map.shape[0]/num_models)
temp1 = personalization_map[i*pmap_size:(i+1)*pmap_size,:]
temp2 = avg_fit[i*pmap_size:(i+1)*pmap_size,:]
#Make sure you have the correct number of gait fingerprints
split_personalization_map.append(temp1[:,:num_gf])
split_average_fit.append(temp2[:,:num_gf])
#Todo
#set the model for each part
for i,mod in enumerate(models):
mod.cross_model_personalization_map = split_personalization_map[i]
mod.cross_model_inter_subject_average = split_average_fit[i]
#For every subject, calculate the cross model thing
for j,subject in enumerate(mod.subjects.keys()):
#For every model, add to the least square matrix
for i,mod in enumerate(models):
#Get least squares info
RTR, RTy, yTR, yTy = mod.subjects[subject]['least_squares_info']
pmap = mod.cross_model_personalization_map
avg_fit = mod.cross_model_inter_subject_average
print(f'j:{j} i:{i} RTR: {RTR.shape} RTy: {RTy.shape} pmap: {pmap.shape} avg_fit: {avg_fit.shape}')
RTR_prime = (pmap.T) @ RTR @ pmap
RTy_prime = (pmap.T) @ RTy - (pmap.T) @ RTR @ avg_fit
if i == 0:
RTR_prime_stack = RTR_prime
RTy_prime_stack = RTy_prime
else:
RTR_prime_stack += RTR_prime
RTy_prime_stack += RTy_prime
gait_fingerprint = np.linalg.solve(RTR_prime_stack,RTy_prime_stack)
for i,mod2 in enumerate(models):
mod2.subjects[subject]['cross_model_gait_coefficients_unscaled'] = gait_fingerprint
#Save the model so that you can use them later
def model_saver(model,filename):
with open(filename,'wb') as file:
pickle.dump(model,file)
#Load the model from a file
def model_loader(filename):
with open(filename,'rb') as file:
return pickle.load(file)
########################################################################################################################################################################
#//////////////////////////////////////////////////////////////////////////////////##//////////////////////////////////////////////////////////////////////////////////#
########################################################################################################################################################################
```
#### File: scripts/data_modification/flatten_dataport.py
```python
from os import remove
import h5py
import numpy as np
import pandas as pd
from pandas import DataFrame
from functools import lru_cache
def get_column_name(column_string_list, num_last_keys):
filter_strings = ['right', 'left']
filtered_list = filter(
lambda x: not x in filter_strings, column_string_list)
column_string = '_'.join(filtered_list)
return column_string
def get_end_points(d, out_dict, parent_key='', sep='/', num_last_keys=4):
for k, v in d.items():
new_key = parent_key + sep + k if parent_key else k
if isinstance(v, h5py._hl.group.Group):
get_end_points(v, out_dict, new_key, sep=sep)
# Where the magic happens when you reach an end point
else:
column_string_list = (parent_key+sep+k).split(sep)[-num_last_keys:]
column_string = get_column_name(column_string_list, num_last_keys)
if (column_string not in out_dict):
out_dict[column_string] = [[new_key], [v]]
else:
out_dict[column_string][0].append(new_key)
out_dict[column_string][1].append(v)
#Debug where the bad strides are in the datasets
def determine_zero_data_strides():
pass
#%%
file_name = '../local-storage/InclineExperiment.mat'
h5py_file = h5py.File(file_name)['Gaitcycle']
# Iterate through all the subjects, make a file per subject to keep it RAM bound
for subject in h5py_file.keys():
if subject != 'AB05':
continue
# Initialize variables for the subject
data = h5py_file[subject]
save_name = '../local-storage/test/dataport_flattened_partial_{}.parquet'.format(
subject)
# Store all the end points
columns_to_endpoint_list = {}
get_end_points(data, columns_to_endpoint_list)
for joint in columns_to_endpoint_list.keys():
joint_data_trial_name = zip(*columns_to_endpoint_list[joint])
total_datasets = len(columns_to_endpoint_list[joint][1])
sum = 0
bad_strides = 0
for num_dataset,trial_name_dataset in enumerate(joint_data_trial_name):
trial_name, dataset = trial_name_dataset
total_rows = dataset.shape[0]
for row_number, row in enumerate(dataset):
num_digits = np.count_nonzero(row)
if(num_digits == 0):
bad_strides += 1
if(row_number+1 != total_rows and "forceplate" not in joint):
print(subject + " " + joint + " dataset " + trial_name + " " + str(num_dataset) + "/" + str(total_datasets) + " bad row: " + str(row_number + 1) + "/" + str(total_rows))
sum += 1
#print(subject + " " + joint + " total bad strides = " + str(bad_strides) + " Total strides: " + str(sum))
# This is a helper function to determine where trials have different strides
def determine_different_strides():
pass
#%%
feature1 = 'jointangles_ankle_x'
feature2 = 'jointmoment_knee_x'
print("Comparing " + feature1 + " to " + feature2)
file_name = '../local-storage/InclineExperiment.mat'
h5py_file = h5py.File(file_name)['Gaitcycle']
bad_datasets = []
# Iterate through all the subjects, make a file per subject to keep it RAM bound
for subject in h5py_file.keys():
if(subject != "AB05"):
continue
# Initialize variables for the subject
data = h5py_file[subject]
# Store all the end points
columns_to_endpoint_list = {}
get_end_points(data, columns_to_endpoint_list)
#Get the data for the features that we want
data_feature1 = columns_to_endpoint_list[feature1]
data_feature2 = columns_to_endpoint_list[feature2]
bad_run_filter = lambda x: (x[:,:],[]) if (np.count_nonzero(x[-1,:]) or np.count_nonzero(x[-2,:]) == 0) else (x[:-1,:],[x.shape[0]-1])
#Update the dataset based on the filter implementation
data_feature1[1] = [bad_run_filter(x)[0] for x in data_feature1[1]]
data_feature2[1] = [bad_run_filter(x)[0] for x in data_feature2[1]]
#Create a mapping from trial to trial data shape
#Initialzie to zero
trial_length_dict_feature1 = {
x.split('/')[0] + " " + x.split('/')[-3]: [] for x in data_feature1[0]}
trial_length_dict_feature2 = {
x.split('/')[0] + " " + x.split('/')[-3]: [] for x in data_feature2[0]}
trial_to_dataset = {}
#Initialize the dictionary taking into conisderation left and right legs
for trial_long,data in zip(*data_feature1):
trial = trial_long.split('/')[0] + " " + trial_long.split('/')[-3]
trial_length_dict_feature1[trial].append(data)
for trial_long,data in zip(*data_feature2):
trial = trial_long.split('/')[0] + " " + trial_long.split('/')[-3]
trial_length_dict_feature2[trial].append(data)
sum_len1 = 0
sum_len2 = 0
#Verify each trial shape
for trial in trial_length_dict_feature1.keys():
trial_data_pair = zip(trial_length_dict_feature1[trial],
trial_length_dict_feature2[trial])
for single_data_trial1, single_data_trial2 in trial_data_pair:
len1 = single_data_trial1.shape[0]*single_data_trial1.shape[1]
len2 = single_data_trial2.shape[0]*single_data_trial2.shape[1]
if len1 != len2:
bad_datasets.append((single_data_trial1,single_data_trial2))
pass
print("!!!!!!!!!!!!!!!!! This trial does not match " + subject + " " + trial + " len1 " + str(len1) + " len2 " + str(len2))
else:
pass
print("Good " + subject + " " + trial + " len1 " + str(len1) + " len2 " + str(len2))
for dataset_pair in bad_datasets:
print(np.count_nonzero(np.array(dataset_pair[1]).flatten()[-150:]))
bad_datasets_np = [ (x[:,:], y[:,:]) for x,y in bad_datasets]
bad_datasets_np.insert(0,(feature1,feature2))
#Conclusion, there are datasets that have zero final stride inconsistently
# I found a way to identify them
# Need to implement into the dataset flattening technique
# I think they are all on the left leg
#%%
def quick_flatten_dataport():
pass
# %%
file_name = '../local-storage/InclineExperiment.mat'
h5py_file = h5py.File(file_name)['Gaitcycle']
# Iterate through all the subjects, make a file per subject to keep it RAM bound
for subject in h5py_file.keys():
#Uncomment if you want to debug a specific subject
# if subject != "AB05":
# continue
print("Flattening subject: " + subject)
# Initialize variables for the subject
data = h5py_file[subject]
save_name = '../local-storage/test/dataport_flattened_partial_{}.parquet'.format(
subject)
# Store all the end points
columns_to_endpoint_list = {}
get_end_points(data, columns_to_endpoint_list)
# This dictionary stores dataframes based on the amount of strides that
# they have
strides_to_dataframes_dict = {}
# Which column will be used to get information about each row
selected_column = 'jointangles_ankle_x'
# Main loop - process each potential column
for column_name, endpoint_list in columns_to_endpoint_list.items():
# If the enpoints contain any of this, ignore the endpoint
if('subjectdetails' in column_name or
'cycles' in column_name or
'stepsout' in column_name or
'description' in column_name or
'mean' in column_name or
'std' in column_name):
#print(column_name + " " + str(len(endpoint_list[1])) + " (ignored)")
continue
# Else: this is a valid column
#print(column_name + " " + str(len(endpoint_list[1])))
# Filter the endpoint list for bad
#This version removes elements just in the end
bad_run_filter = lambda x: (x[:,:],[]) if (np.count_nonzero(x[-1,:]) or np.count_nonzero(x[-2,:]) == 0) else (x[:-1,:],[x.shape[0]-1])
#This version removes elements in the middle
# def bad_run_filter(trial_dataset):
# remove_list = []
# for row_index,row in enumerate(trial_dataset):
# num_digits = np.count_nonzero(row)
# if(num_digits == 0):
# remove_list.append(row_index)
# #Remove elements
# # if remove list is empty, nothing is deleted
# return np.delete(trial_dataset[:,:], remove_list, axis=0), remove_list
endpoint_list_filtered = [bad_run_filter(x)[0] for x in endpoint_list[1]]
# Get the data to add it to a dataframe
data_array = np.concatenate(endpoint_list_filtered, axis=0).flatten()
# Calculate how many strides are in the dataframe
len_arr = data_array.shape[0]
len_key = len_arr/150.0
# Add the key to the dataframe
try:
strides_to_dataframes_dict[len_key][column_name] = data_array
except KeyError:
strides_to_dataframes_dict[len_key] = DataFrame()
strides_to_dataframes_dict[len_key][column_name] = data_array
# All the dataframes have been created, add information about phase and task
# Helper functions to get time, ramp to append task information to dataframe
@lru_cache(maxsize=5)
def get_time(trial, leg):
return data[trial]['cycles'][leg]['time']
@lru_cache(maxsize=5)
def get_ramp(trial):
return data[data[trial]['description'][1][1]][0][0]
@lru_cache(maxsize=5)
def get_speed(trial):
return data[data[trial]['description'][1][0]][0][0]
# Iterate by row to get phase information
# Ugly but effective
# We need to use the unfiltered version to get the remove_list again
# This is used to filter the time column
endpoint_list = columns_to_endpoint_list[selected_column]
# Create lists to store all the phase dot and stride length information
trials = []
legs = []
phase_dot_list = []
stride_length_list = []
# Iterate by trial to get time, speed
for experiment_name, dataset in zip(*endpoint_list):
filtered_dataset, remove_list = bad_run_filter(dataset)
endpoint_split = experiment_name.split('/')
trial = endpoint_split[0]
leg = endpoint_split[-3]
trials.extend([trial]*filtered_dataset.shape[0]*filtered_dataset.shape[1])
legs.extend([leg]*filtered_dataset.shape[0]*filtered_dataset.shape[1])
time = get_time(trial, leg)
speed = get_speed(trial)
#Filter out times that are not being used since there is no data
time = np.delete(time,remove_list,axis=0)
time_delta = (time[:, -1]-time[:, 0])
phase_dot_list.append(np.repeat(1/time_delta, 150))
stride_length_list.append(np.repeat(speed*time_delta, 150))
# Get the corresponding dataframe to the selected column
df = None
for dataframe in strides_to_dataframes_dict.values():
if selected_column in dataframe.columns:
df = dataframe
# print(len(trials))
# print(df.shape[0])
df['ramp'] = [get_ramp(trial) for trial in trials]
df['speed'] = [get_speed(trial) for trial in trials]
df['trial'] = trials
# We don't want phase to reach one because 0=1 in terms of phase
phase = np.linspace(0, (1-1/150), 150)
df['leg'] = legs
df['phase'] = np.tile(phase, int(df.shape[0]/150))
df['phase_dot'] = np.concatenate(phase_dot_list, axis=0)
df['stride_length'] = np.concatenate(stride_length_list, axis=0)
print("Number of columns: " + str(len(df.columns)))
print("Columns: " + df.columns)
print("strides to length " + str([(strides,len(dataset.columns)) for strides, dataset in strides_to_dataframes_dict.items()]))
# Comment out to not save
df.to_parquet(save_name)
# Uncomment break to just get one person
#break
# %%
def add_global_shank_angle():
pass
# %%
# #Get the subjects
subjects = [
('AB10', '../local-storage/test/dataport_flattened_partial_AB10.parquet')]
for i in range(1, 10):
subjects.append(
('AB0'+str(i), '../local-storage/test/dataport_flattened_partial_AB0'+str(i)+'.parquet'))
for subject in subjects:
df = pd.read_parquet(subject[1])
print(df.columns)
# Create the shank angles based on foot and ankle
# df.drop(columns=['jointangle_shank_x','jointangle_shank_y','jointangle_shank_z'])
df['jointangles_shank_x'] = df['jointangles_foot_x'] + \
df['jointangles_ankle_x']
df['jointangles_shank_y'] = df['jointangles_foot_y'] + \
df['jointangles_ankle_y']
df['jointangles_shank_z'] = df['jointangles_foot_z'] + \
df['jointangles_ankle_z']
# Create the thigh angle based on pelvis and ip
df['jointangles_thigh_x'] = df['jointangles_pelvis_x'] + \
df['jointangles_hip_x']
df['jointangles_thigh_y'] = df['jointangles_pelvis_y'] + \
df['jointangles_hip_y']
df['jointangles_thigh_z'] = df['jointangles_pelvis_z'] + \
df['jointangles_hip_z']
# Calculate the derivative of foot dot manually
shank_anles_cutoff = df['jointangles_shank_x'].values[:-1]
shank_angles_future = df['jointangles_shank_x'].values[1:]
phase_rate = df['phase_dot'].values[:-1]
measured_shank_derivative = (
shank_angles_future-shank_anles_cutoff)*(phase_rate)*150
measured_shank_derivative = np.append(measured_shank_derivative, 0)
df['jointangles_shank_dot_x'] = measured_shank_derivative
# Calculate the derivative of foot dot manually
foot_anles_cutoff = df['jointangles_foot_x'].values[:-1]
foot_angles_future = df['jointangles_foot_x'].values[1:]
measured_foot_derivative = (
foot_angles_future-foot_anles_cutoff)*(phase_rate)*150
measured_foot_derivative = np.append(measured_foot_derivative, 0)
df['jointangles_foot_dot_x'] = measured_foot_derivative
# Calculate the derivative of knee dot manually
anles_cutoff = df['jointangles_knee_x'].values[:-1]
angles_future = df['jointangles_knee_x'].values[1:]
measured_foot_derivative = (
angles_future-anles_cutoff)*(phase_rate)*150
measured_foot_derivative = np.append(measured_foot_derivative, 0)
df['jointangles_knee_dot_x'] = measured_foot_derivative
# Calculate the derivative of hip dot manually
anles_cutoff = df['jointangles_hip_x'].values[:-1]
angles_future = df['jointangles_hip_x'].values[1:]
measured_foot_derivative = (
angles_future-anles_cutoff)*(phase_rate)*150
measured_foot_derivative = np.append(measured_foot_derivative, 0)
df['jointangles_hip_dot_x'] = measured_foot_derivative
# Calculate the derivative of thigh dot manually
anles_cutoff = df['jointangles_thigh_x'].values[:-1]
angles_future = df['jointangles_thigh_x'].values[1:]
measured_foot_derivative = (
angles_future-anles_cutoff)*(phase_rate)*150
measured_foot_derivative = np.append(measured_foot_derivative, 0)
df['jointangles_thigh_dot_x'] = measured_foot_derivative
df.to_parquet(subject[1])
# %%
if __name__ == '__main__':
quick_flatten_dataport()
add_global_shank_angle()
#determine_different_strides()
#determine_zero_data_strides()
pass
# %%
```
#### File: scripts/tests/ekf_ekf_unit_test.py
```python
import matplotlib.pyplot as plt
################################################################################################
################################################################################################
#Unit testing
def ekf_unit_test():
pass
#%%
#Phase, Phase Dot, Ramp, Step Length, 4 gait fingerprints
initial_state_dict = {'phase': [0],
'phase_dot': [1],
'step_length': [1],
'ramp': [0],
'gf1': [0],
'gf2': [0],
'gf3': [0],
'gf4': [0],
'gf5': [0]}
# initial_state = pd.DataFrame(initial_state_dict)
#Phase, Phase, Dot, Step_length, ramp
initial_state = np.array([[0.5,0.5,0.5,0.5,
0.5,0.5,0.5,0.5,0.5]]).T
train_models = False
if train_models == True:
#Determine the phase models
phase_model = Fourier_Basis(5,'phase')
phase_dot_model = Polynomial_Basis(3,'phase_dot')
step_length_model = Polynomial_Basis(3,'step_length')
ramp_model = Polynomial_Basis(3,'ramp')
# #Get the subjects
subjects = [('AB10','../local-storage/test/dataport_flattened_partial_AB10.parquet')]
for i in range(1,10):
subjects.append(('AB0'+str(i),'../local-storage/test/dataport_flattened_partial_AB0'+str(i)+'.parquet'))
model_foot = Kronecker_Model('jointangles_foot_x',phase_model,phase_dot_model,step_length_model,ramp_model,subjects=subjects,num_gait_fingerprint=4)
model_saver(model_foot,'foot_model.pickle')
model_shank = Kronecker_Model('jointangles_shank_x',phase_model,phase_dot_model,step_length_model,ramp_model,subjects=subjects,num_gait_fingerprint=4)
model_saver(model_shank,'shank_model.pickle')
model_foot_dot = copy.deepcopy(model_foot)
model_foot_dot.time_derivative = True
model_saver(model_foot_dot,'foot_dot_model.pickle')
model_shank_dot = copy.deepcopy(model_shank)
model_shank_dot.time_derivative = True
model_saver(model_shank_dot,'shank_dot_model.pickle')
else:
model_foot = model_loader('foot_model.pickle')
model_shank = model_loader('shank_model.pickle')
model_foot_dot = model_loader('foot_dot_model.pickle')
model_shank_dot = model_loader('shank_dot_model.pickle')
models = [model_foot,model_shank,model_foot_dot,model_shank_dot]
state_names = list(initial_state_dict.keys())
measurement_model = Measurement_Model(state_names,models)
num_states = len(state_names)
num_outputs = len(models)
initial_state_covariance = np.eye(num_states)*1e-7
R = np.eye(num_outputs)
Q = np.eye(num_states)
Q[4,4] *= 2
Q[5,5] *= 2
Q[6,6] *= 2
Q[7,7] *= 2
d_model = Gait_Dynamic_Model()
ekf = Extended_Kalman_Filter(initial_state,initial_state_covariance, d_model, Q, measurement_model, R)
time_step = 0.001
#Really just want to prove that we can do one interation of this
#Dont really want to pove much more than this since we would need actual data for that
control_input_u = 0
sensor_measurements = np.array([[1,1,1,1]]).T
iterations = 100
state_history = np.zeros((iterations,len(initial_state)))
try:
for i in range(iterations):
state_history[i,:] = ekf.calculate_next_estimates(time_step, control_input_u, sensor_measurements)[0].T
except KeyboardInterrupt:
pass
print(state_history[:,0])
plt.plot(state_history[:,:])
plt.show()
def ekf_unit_test_simple_model():
#%%
#Mass spring system
#state = [x,xdot]
#state_dot = [xdot, xddot]
#state_k+1 = R*state, R = rotation matrix with det 1
#(you are not adding or subtracting energy from the system)
class SimpleDynamicModel():
def __init__(self):
#Rotation matrix to represent state dynamics
self.R = lambda theta: np.array([[np.cos(theta),np.sin(theta)],
[-np.sin(theta),np.cos(theta)]])
#Rotational velocity in radians per sec
self.omega = 2
def f_jacobean(self, current_state, time_step):
return self.R(self.omega*time_step)
def f_function(self, current_state, time_step):
return self.R(self.omega*time_step) @ current_state
class SimpleMeasurementModel():
def evaluate_h_func(self,current_state):
return np.eye(2) @ current_state
def evaluate_dh_func(self,current_state):
return np.eye(2)
#Setup simulation
initial_state = np.array([[0,1]]).T
initial_state_covariance = np.eye(2)*1e-7
d_model = SimpleDynamicModel()
measurement_model = SimpleMeasurementModel()
#Sensor noise
R = np.eye(2)
#Process noise
Q = np.eye(2)*1e-2
ekf = Extended_Kalman_Filter(initial_state,initial_state_covariance, d_model, Q, measurement_model, R)
actual_state = np.array([[1,0]]).T
#Setup time shenanigans
iterations = 1001
total_time = 10
iterator = np.linspace(0,total_time,iterations)
time_step = iterator[1] - iterator[0]
#Setup state history tracking
state_history = np.zeros((iterations,2*len(initial_state)))
for i,t in enumerate(iterator):
actual_state = d_model.f_function(actual_state, time_step)
predicted_state,_ = ekf.calculate_next_estimates(time_step, 0, actual_state)
state_history[i,:2] = predicted_state.T
state_history[i,2:] = actual_state.T
#%matplotlib qt
plt.plot(state_history)
plt.legend(["Predicted Position", "Predicted Velocity",
"Actual Position", "Actual Velocity"])
plt.show()
#%%
def profiling():
pass
#%%
import pstats
from pstats import SortKey
p = pstats.Stats('profile.txt')
p.sort_stats(SortKey.CUMULATIVE).print_stats(10)
#%%
if(__name__=='__main__'):
#ekf_unit_test()
ekf_unit_test_simple_model()
```
#### File: scripts/tests/ekf_measurement_model_unit_test.py
```python
def unit_test():
pass
#%%
import os,sys
PACKAGE_PARENT = '../model_fitting/'
SCRIPT_DIR = os.path.dirname(os.path.realpath(os.path.join(os.getcwd(), os.path.expanduser(__file__))))
new_path = os.path.normpath(os.path.join(SCRIPT_DIR, PACKAGE_PARENT))
print(new_path)
sys.path.insert(0,new_path)
from function_bases import Polynomial_Basis, Fourier_Basis
from kronecker_model import Kronecker_Model, model_saver, model_loader
train_models = False
if train_models == True:
#Determine the phase models
phase_model = Fourier_Basis(10,'phase')
phase_dot_model = Polynomial_Basis(1,'phase_dot')
step_length_model = Polynomial_Basis(3,'step_length')
ramp_model = Polynomial_Basis(4,'ramp')
# #Get the subjects
subjects = [('AB10','../local-storage/test/dataport_flattened_partial_AB10.parquet')]
for i in range(1,10):
subjects.append(('AB0'+str(i),'../local-storage/test/dataport_flattened_partial_AB0'+str(i)+'.parquet'))
model_foot = Kronecker_Model('jointangles_foot_x',phase_model,phase_dot_model,step_length_model,ramp_model,subjects=subjects,num_gait_fingerprint=5)
model_saver(model_foot,'foot_model.pickle')
model_shank = Kronecker_Model('jointangles_shank_x',phase_model,phase_dot_model,step_length_model,ramp_model,subjects=subjects,num_gait_fingerprint=5)
model_saver(model_shank,'shank_model.pickle')
model_foot_dot = Kronecker_Model('jointangles_foot_x',phase_model,phase_dot_model,step_length_model,ramp_model,subjects=subjects,num_gait_fingerprint=5,time_derivative=True)
model_saver(model_foot_dot,'foot_dot_model.pickle')
model_shank_dot = Kronecker_Model('jointangles_shank_x',phase_model,phase_dot_model,step_length_model,ramp_model,subjects=subjects,num_gait_fingerprint=5,time_derivative=True)
model_saver(model_shank_dot,'shank_dot_model.pickle')
else:
model_foot = model_loader('../model_fitting/foot_model.pickle')
model_shank = model_loader('../model_fitting/shank_model.pickle')
model_foot_dot = model_loader('../model_fitting/foot_dot_model.pickle')
model_shank_dot = model_loader('../model_fitting/shank_dot_model.pickle')
initial_state_dict = {'phase': [0],
'phase_dot': [1],
'step_length': [1],
'ramp': [0],
'gf1': [0],
'gf2': [0],
'gf3': [0],
'gf4': [0],
'gf5':[0]}
models = [model_foot,model_shank,model_foot_dot,model_shank_dot]
state_names = list(initial_state_dict.keys())
num_states = len(state_names)
num_models = len(models)
measurement_model = Measurement_Model(state_names,models)
state = (np.array([[1.0,1.0,1.0,1.0,
1.0,1.0,1.0,1.0,1.0]]).T)*0.5
manual_derivative = np.zeros((num_models,num_states))
for row in range(num_models):
for col in range(num_states):
state_plus_delta = state.copy()
delta = 1e-14
state_plus_delta[col,0] += delta
#print("State" + str(state))
#print("State +" + str(state_plus_delta))
f_state = measurement_model.evaluate_h_func(state)[row]
f_delta = measurement_model.evaluate_h_func(state_plus_delta)[row]
manual_derivative[row,col] = (f_delta-f_state)/(delta)
print("Manual" + str(manual_derivative))
print("Rank manual rank: {}".format(np.linalg.matrix_rank(manual_derivative)))
expected_derivative = measurement_model.evaluate_dh_func(state)
print("Expected" + str(expected_derivative))
print("Rank expected rank: {}".format(np.linalg.matrix_rank(expected_derivative)))
print("Difference" + str(manual_derivative - expected_derivative))
print("Norm of expected - manual: {}".format(np.linalg.norm(expected_derivative-manual_derivative)))
#%%
if __name__=='__main__':
unit_test()
``` |
{
"source": "jmoo512/flask-boilerplate",
"score": 2
} |
#### File: flask-boilerplate/kbd/__init__.py
```python
import os
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
from flask_migrate import Migrate, MigrateCommand
from flask_cors import CORS
from config import Config
APP_ROOT = os.path.abspath(os.path.dirname(__file__))
APP_STATIC = os.path.join(APP_ROOT, 'static')
db = SQLAlchemy()
migrate = Migrate()
cors=CORS()
def create_app(config_class=Config):
app = Flask(__name__)
db.init_app(app)
migrate.init_app(app,db)
cors.init_app(app)
app.config.from_object(config_class)
with app.app_context():
#import blueprints
from kbd.core.views import core
#register blueprints
app.register_blueprint(core)
return app
#config switching
#if app.config['ENV']=='development':
# app.config.from_object('config.DevelopmentgConfig')
#elif app.config['ENV']=='staging':
# app.config.from_object('config.StagingConfig')
#else:
# app.config.from_object('config.ProductionConfig')
``` |
{
"source": "jmoon0830/web-scraping-challenge",
"score": 3
} |
#### File: jmoon0830/web-scraping-challenge/scrape_mars.py
```python
import requests
import pandas as pd
import json
from splinter import Browser
from splinter.exceptions import ElementDoesNotExist
from bs4 import BeautifulSoup as bs
import time
def scrape():
#part 1
executable_path = {'executable_path': 'C:\\Users\\jmoon\\gt\\gt-inc-data-pt-05-2020-u-c//chromedriver.exe'}
browser = Browser('chrome', **executable_path, headless=False)
url = "https://mars.nasa.gov/news/"
browser.visit(url)
time.sleep(10)
html = browser.html
soup = bs(html, 'html.parser')
results = soup.find_all('div',class_='list_text')
for result in results:
if result.find('a'):
try:
news_title = result.find('a').text
news_p = result.find('div',class_='article_teaser_body').text
except:
pass
#part 2
url2 = "https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars"
browser.visit(url2)
time.sleep(10)
html2 = browser.html
soup2 = bs(html2, 'lxml')
results2 = soup2.find_all('div',class_='carousel_items')
for result in results2:
image_url = result.find('article')['style'].strip('background-image: url();')
full_url = f"https://www.jpl.nasa.gov{image_url}"
featured_image_url = full_url.replace("'",'')
#part 3
url3 = "https://twitter.com/marswxreport?lang=en"
browser.visit(url3)
time.sleep(10)
html3 = browser.html
soup3 = bs(html3,'lxml')
results3 = soup3.find_all('div',class_="css-901oao r-hkyrab r-1qd0xha r-a023e6 r-16dba41 r-ad9z0x r-bcqeeo r-bnwqim r-qvutc0")
mars_weather = results3[0].find('span',class_='css-901oao css-16my406 r-1qd0xha r-ad9z0x r-bcqeeo r-qvutc0').text
#part 4
url4 = 'https://space-facts.com/mars/'
browser.visit(url4)
time.sleep(5)
tables = pd.read_html(url4)
df = tables[0]
df.columns = ['Fact','Stat']
mars_table = df.to_html()
#part 5
url_list = ['https://astrogeology.usgs.gov/search/map/Mars/Viking/cerberus_enhanced','https://astrogeology.usgs.gov/search/map/Mars/Viking/schiaparelli_enhanced','https://astrogeology.usgs.gov/search/map/Mars/Viking/syrtis_major_enhanced','https://astrogeology.usgs.gov/search/map/Mars/Viking/valles_marineris_enhanced']
hemisphere_image_urls = []
for url in url_list:
browser.visit(url)
html = browser.html
time.sleep(5)
soup = bs(html,'lxml')
results = soup.find_all('div',class_='downloads')
for result in results:
if result.find('a')['href']:
img_url = result.find('a')['href']
title = soup.find('h2',class_='title').text
hemisphere_image_urls.append({'img_url':img_url,'title':title})
mars_dictionary = {
"headline":news_title,
"article":news_p,
"image_url":featured_image_url,
"mars_tweet":mars_weather,
"mars_table":mars_table,
"hemisphere_urls":hemisphere_image_urls
}
browser.quit()
return mars_dictionary
``` |
{
"source": "jmoon1506/pytaridx",
"score": 2
} |
#### File: pytaridx/pytaridx/main.py
```python
from argparse import ArgumentParser, RawTextHelpFormatter
import glob
from multiprocessing import Pool, Manager
import os
import sys
import pytaridx
def setup_parser():
parser = ArgumentParser(
prog="pytridx",
description="A CLI for managing pytaridx generated tar files.",
formatter_class=RawTextHelpFormatter,
)
subparsers = parser.add_subparsers(dest='subparser')
# Reindexing related command line utilities and arguments
reindex = subparsers.add_parser(
"reindex",
help="Tools for reindexing pytaridx generated files."
)
reindex.add_argument("-n", "--nprocesses", type=int, default=1,
help="Number of processes to use for reindexing [DEFAULT: %(default)d]"
)
reindex.add_argument("tarfiles", type=str, nargs="+",
help="Paths or globs of pytaridx IndexedTarFiles to reindex."
)
reindex.set_defaults(func=reindex_tarfiles)
return parser
def reindex_tarfiles(args):
pool = Pool(processes=args.nprocesses)
manager = Manager()
queue = manager.Queue()
for item in args.tarfiles:
_f_path = os.path.abspath(item)
if os.path.isfile(_f_path):
queue.put(item)
continue
for path in glob.glob(_f_path):
queue.put(path)
pool.map(process_reindex, [queue for i in range(args.nprocesses)])
print("Finished.")
def process_reindex(queue):
while not queue.empty():
_tar = os.path.abspath(queue.get())
print("Processing '%s'..." % (_tar))
try:
_tree = pytaridx.IndexedTarFile()
_tree.reindex(_tar)
except Exception as exep:
print("Failed to process '%s'." % (_tar))
print("Exception: %s" % (exep.msg))
continue
def main():
parser = setup_parser()
args = parser.parse_args()
rc = args.func(args)
sys.exit(rc)
```
#### File: pytaridx/pytaridx/reindex.py
```python
import sys
import pytaridx
# ------------------------------------------------------------------------------
def main():
if len(sys.argv) <= 1:
ustring = "Usage: python reindex.py file1.tar [file2.tar file3.tar ...]"
ustring += "\n\n"
ustring += "This program (re-)creates indices for the tar files named on the command line.\n"
print(ustring)
else:
T = pytaridx.IndexedTarFile()
print(f"Creating new indixes for {len(sys.argv)-1} tar files.")
for x in sys.argv[1:]:
print(f"Creating index for file ({x})...")
T.reindex(x)
print("Finished.")
if __name__ == '__main__':
main()
# ------------------------------------------------------------------------------
``` |
{
"source": "jmooo/adventofcode",
"score": 4
} |
#### File: adventofcode/2015/day01.py
```python
with open('day01.input') as f:
input = list(f.read().rstrip())
def part1():
floor = 0
for command in input:
if command == "(":
floor += 1
else:
floor -= 1
return floor
def part2():
floor = 0
for index, command in enumerate(input):
if command == "(":
floor += 1
else:
floor -= 1
# enumerate is 0 indexed but the question is 1 indexed
if floor < 0:
return index+1
print(part1())
print(part2())
```
#### File: adventofcode/2016/day03.py
```python
with open('day03.input') as f:
data = [[int(x) for x in line.split()] for line in f]
def numvalid(triangles):
valid = 0
for x in triangles:
t = sorted(x)
if t[0] + t[1] > t[2]:
valid += 1
return valid
def part2():
i = 0
triangles = []
while i < len(data):
# zip is great for reformatting the input triangle lists!
newdata = zip(data[i], data[i+1], data[i+2])
for t in newdata:
triangles.append(t)
i = i+3
return numvalid(triangles)
print(numvalid(data))
print(part2())
```
#### File: adventofcode/2017/day08.py
```python
with open('day08.input') as f:
# create a list of lists
# each line consists of 7 tokens in the format: e dec -568 if f != 0
# convert numbers to ints, using an lstrip trick to handle negatives
commands = [[int(x) if x.lstrip('-').isdigit() \
else x for x in line.strip().split(' ') if x != 'if'] for line in f]
def resolver(cmd, registers):
# unpack the command
reg, op, val = cmd
if op == '==':
return True if registers[reg] == val else False
elif op == '!=':
return True if registers[reg] != val else False
elif op == '>':
return True if registers[reg] > val else False
elif op == '>=':
return True if registers[reg] >= val else False
elif op == '<':
return True if registers[reg] < val else False
elif op == '<=':
return True if registers[reg] <= val else False
def solve():
# the first index of every command will be the register name
# initalize it with value 0
registers = { command[0]: 0 for command in commands }
highest = 0
for cmd in commands:
# check if the specified 'if' statement is True
if resolver(cmd[-3:], registers):
# if was True, execute command
reg, op, val = cmd[:3]
if op == 'inc':
registers[reg] += val
else:
registers[reg] -= val
# track highest value ever seen
if max(registers.values()) > highest:
highest = max(registers.values())
return max(registers.values()), highest
print(solve())
```
#### File: adventofcode/2017/day11.py
```python
with open('day11.input') as f:
data = [x for x in f.read().strip().split(',')]
class Wanderer():
def __init__(self, path, start_location=(0,0)):
self.path = path
self.location = start_location
self.max_distance_from_origin = 0
def wander(self):
# Follow your path where it may take you my child
for step in self.path:
self.hex_move(step)
# Did we wander farther from home than ever before?
if self.distance_from_origin() > self.max_distance_from_origin:
self.max_distance_from_origin = self.distance_from_origin()
def distance_from_origin(self):
return max((self.location[0]), abs(self.location[1]))
def hex_move(self, direction):
if direction == 'n':
self.location = (self.location[0], self.location[1]-1)
elif direction == 'ne':
self.location = (self.location[0]+1, self.location[1]-1)
elif direction == 'se':
self.location = (self.location[0]+1, self.location[1])
elif direction == 's':
self.location = (self.location[0], self.location[1]+1)
elif direction == 'sw':
self.location = (self.location[0]-1, self.location[1]+1)
elif direction == 'nw':
self.location = (self.location[0]-1, self.location[1])
def searchparty():
w = Wanderer(path=data)
w.wander()
print("part1: wandered to:", w.location, "steps from origin:", w.distance_from_origin())
print("part2: greatest distance:", w.max_distance_from_origin)
def example1():
cases = [(('ne','ne','ne'), 3),
(('ne','ne','sw','sw'), 0),
(('ne','ne','s','s'), 2),
(('se','sw','se','sw','sw'), 3)]
for path, answer in cases:
w = Wanderer(path=path)
w.wander()
print("example1: wandered to:", w.location, "steps from origin:", w.distance_from_origin())
assert answer == w.distance_from_origin()
example1()
searchparty()
```
#### File: adventofcode/2018/day01.py
```python
import itertools
with open('day01.input') as f:
data = [int(x) for x in f]
def part1():
return sum(data)
def part2():
current = 0
known_frequencies = {0}
for offset in itertools.cycle(data):
current = current + offset
if current in known_frequencies:
return current
else:
known_frequencies.add(current)
print(part1())
print(part2())
```
#### File: adventofcode/2018/day03.py
```python
import re
import numpy as np
# print big ol' grids
np.set_printoptions(threshold=np.nan)
# Parse each line into the format:
# [id, x, y, width, height]
with open('day03.input') as f:
data = []
for line in f:
data.append([int(x) for x in re.findall(r'\d+', line)])
def solve(gridsize):
# create fabric grid
grid = np.zeros(gridsize, np.int8)
# Build grid with number of claims for each square
for id, x, y, width, height in data:
for row in range(height):
for col in range(width):
grid[y+row][x+col] += 1
# Search grid for single claim that has only one claim for each square
claim = -1
for id, x, y, width, height in data:
# Take a slice of the grid
# first slice (y:y+height): selects your rows
# second slice (x:x+width): selects the columns within those rows
s = grid[y:y+height, x:x+width]
# If every cell grid-slice equals one (ie: a single claim on each cell)
# it will equal the area (width*height), and we've got our winner!
if s.sum() == width*height:
claim = id
break;
# Ignore squares that have 0 or 1 claims, that's fine it isn't double-booked!
return (np.count_nonzero(grid[grid > 1]), claim)
print(solve((1000,1000)))
``` |
{
"source": "jmooradi/local-data-api",
"score": 3
} |
#### File: local-data-api/local_data_api/exceptions.py
```python
from abc import ABC
from typing import Dict, Optional
from fastapi import HTTPException
class DataAPIException(HTTPException, ABC):
STATUS_CODE: int
def __init__(self, message: str, headers: Optional[Dict] = None):
self.message: str = message
super().__init__(status_code=self.STATUS_CODE, headers=headers)
@property
def code(self) -> str:
return self.__class__.__name__
class BadRequestException(DataAPIException):
STATUS_CODE = 400
class ForbiddenException(DataAPIException):
STATUS_CODE = 403
class InternalServerErrorException(DataAPIException):
def __init__(self, message: Optional[str] = None):
super().__init__(message or 'InternalServerError')
STATUS_CODE = 500
class NotFoundException(DataAPIException):
STATUS_CODE = 404
class ServiceUnavailableError(DataAPIException):
STATUS_CODE = 503
```
#### File: tests/test_resource/test_mysql.py
```python
from __future__ import annotations
from local_data_api.resources import MySQL
def test_create_connection_maker(mocker):
mock_connect = mocker.patch('local_data_api.resources.mysql.pymysql.connect')
connection_maker = MySQL.create_connection_maker(
host='127.0.0.1',
port=3306,
user_name='root',
password='<PASSWORD>',
engine_kwargs={'auto_commit': True},
)
connection_maker()
mock_connect.assert_called_once_with(
auto_commit=True, host='127.0.0.1', password='<PASSWORD>', port=3306, user='root'
)
mock_connect = mocker.patch('local_data_api.resources.mysql.pymysql.connect')
connection_maker = MySQL.create_connection_maker()
connection_maker()
mock_connect.assert_called_once_with()
``` |
{
"source": "jmoore001/JM_Custom_Maya_Tools",
"score": 3
} |
#### File: JM_Custom_Maya_Tools/Scripts/CreateProject.py
```python
import maya.cmds as cmds
import maya.mel as mel
import os
import sys
user = os.environ.get('USER')
path = 'C:/Users/' + user + '/Documents/maya/JM_Custom_Maya_Tools/Scripts'
if path not in sys.path:
sys.path.append(path)
import Edits
Edits.Edits()
class CreateProject(object):
def __init__(*args):
windowSize = (400, 180)
windowName = 'projectWindow'
windowTitle = 'Index Project Window'
def CloseWindow(*args):
if cmds.window(windowName, q = True, exists = True):
cmds.deleteUI(windowName, window = True)
CloseWindow()
def MakeFolder(*args):
name = cmds.textField(userInput, q = True, text = True)
chosenDirectory = cmds.button(directButton, q = True, l = True)
if chosenDirectory == 'Choose Directory':
cmds.warning('Must have a directory chosen to create project')
return
if len(name) == 0:
cmds.warning('Must choose a name for project')
return
fileList = cmds.getFileList(fld = chosenDirectory)
for f in fileList:
if f == name:
cmds.warning("Project already exists with name '{0}' in '{1}'".format(name, chosenDirectory))
return
projFolder = chosenDirectory + '/' + name
print(projFolder)
os.makedirs(projFolder)
def create_folder( directory ):
if not os.path.exists( directory ):
os.makedirs( directory )
maya_dir = projFolder
create_folder( maya_dir )
for file_rule in cmds.workspace(query=True, fileRuleList=True):
file_rule_dir = cmds.workspace(fileRuleEntry=file_rule)
maya_file_rule_dir = os.path.join( maya_dir, file_rule_dir)
create_folder( maya_file_rule_dir )
os.rmdir(projFolder + '/autosave')
os.rmdir(projFolder + '/clips')
os.rmdir(projFolder + '/movies')
os.rmdir(projFolder + '/sceneAssembly')
os.rmdir(projFolder + '/scripts')
os.rmdir(projFolder + '/sound')
os.rmdir(projFolder + '/Time Editor/Clip Exports')
os.rmdir(projFolder + '/Time Editor')
unityDir = projFolder + '/unity'
fbxDir = projFolder + '/fbx'
texturesDir = projFolder + '/textures'
os.makedirs( unityDir )
os.makedirs( fbxDir )
os.makedirs( texturesDir )
evalCommand = 'setProject \"' + maya_dir + '\"'
mel.eval(evalCommand)
cmds.warning("Project \'" + projFolder + "\' successfully created!")
CloseWindow()
def ChooseDirectory(*args):
directory = cmds.fileDialog2(ds = 2,fm = 2, cap = 'Choose Directory For Project', okc = 'Set')
if directory == None:
return
cleanDirect = str(directory)[2:-2]
cmds.button(directButton, edit = True, l = cleanDirect)
projWindow = cmds.window(windowName, t = windowTitle, widthHeight = windowSize, s = False)
parentLayout = cmds.rowColumnLayout(adjustableColumn = True)
cmds.text('Create Index AR Project')
cmds.separator(h = 10)
directButton = cmds.button(l = 'Choose Directory', c = ChooseDirectory)
cmds.separator(h = 10)
cmds.text('Choose Name for Project')
global userInput
userInput = cmds.textField()
cmds.separator(h = 10)
cmds.button(l = 'Create', c = MakeFolder)
cmds.button(l = 'Close', c = CloseWindow)
cmds.showWindow()
cmds.window(windowName, e = True, widthHeight = windowSize)
CreateProject()
```
#### File: JM_Custom_Maya_Tools/Scripts/JMCustomMarkingMenu.py
```python
import maya.cmds as cmds
import os
import sys
user = os.environ.get('USER')
path = 'C:/Users/{}/Documents/maya/JM_Custom_Maya_Tools/Scripts'.format(user)
if path not in sys.path:
sys.path.append(path)
class JMCustomToolsMarkingMenu(object):
def __init__(self):
self.RemoveOld()
self.Build()
def RemoveOld(self):
if cmds.popupMenu('JMCustomMarkingMenu', ex=True):
cmds.deleteUI('JMCustomMarkingMenu')
def Build(self):
customMenu = cmds.popupMenu('JMCustomMarkingMenu', ctl = True, alt = True, mm = True, b = 3, pmo = True, pmc = self.BuildMarkingMenu, p = "viewPanes")
def BuildMarkingMenu(self, menu, parent):
user = os.environ.get('USER')
path = 'C:/Users/{}/Documents/maya/JM_Custom_Maya_Tools/Scripts'.format(user)
if path not in sys.path:
sys.path.append(path)
iconFolder = 'C:/Users/{}/Documents/maya/JM_Custom_Maya_Tools/Icons'.format(user)
def LibraryCommand(*args):
import KitbashUI
KitbashUI.KitbashUI()
def AssignUVMatCommand(*args):
import AssignUVMaterials
AssignUVMaterials.ApplyUVsUI()
def ApplyUVsCommand(*args):
import applysameUVs
applysameUVs.ApplySameUVs()
def CurvesToPolyCommand(*args):
import curvestopoly
curvestopoly.CurvesToPolygons()
def GroupUVsCommand(*args):
import GroupUV
GroupUV.GroupByUVs()
def QCToolCommand(*args):
import QCTool
QCTool.QCUI()
def CreateProjectCommand(*args):
import CreateProject
CreateProject.CreateProject()
def ToolKitCommand(*args):
import InitilizeTools
InitilizeTools.CustomToolsJM()
cmds.menuItem(p=menu, l="Library", rp="S", i=iconFolder + '/KitbashUI.png',c = LibraryCommand)
cmds.menuItem(p=menu, l="Assign Materials By UVs", rp="W", i=iconFolder + '/AssignUVMaterials.png',c = AssignUVMatCommand)
cmds.menuItem(p=menu, l="Apply Same UVs", rp="E", i=iconFolder + '/applysameUVs.png',c = ApplyUVsCommand)
cmds.menuItem(p=menu, l="Curves To Geometry", rp="SE", i=iconFolder + '/curvestopoly.png',c = CurvesToPolyCommand)
cmds.menuItem(p=menu, l="Group By UVs", rp="SW", i=iconFolder + '/GroupUV.png',c = GroupUVsCommand)
cmds.menuItem(p=menu, l="Quality Control", rp="NW", i=iconFolder + '/QCTool.png',c = QCToolCommand)
cmds.menuItem(p=menu, l="Create Project", rp="NE", i=iconFolder + '/CreateProject.png',c = CreateProjectCommand)
cmds.menuItem(p=menu, l="Tool Kit", rp="N", i=iconFolder + '/CustomToolsIcon.png',c = ToolKitCommand)
JMCustomToolsMarkingMenu()
``` |
{
"source": "jmorais/covidbot",
"score": 3
} |
#### File: covidbot/local/alerts.py
```python
from random import choices
from typing import Callable
import humanize
from .covid import Covid
from .graph import Graph
from .image import Image
class Alerts(Covid, Graph, Image):
def __init__(self):
super().__init__()
@property
def chosen_data(self) -> Callable:
"""
Chooses at random with weighted distribution whether to get
data for the whole world or any specific country. We want
to post countries more.
"""
chosen: Callable = choices(
[
self.world_data,
self.random_country_data(),
self.random_country_graph(),
self.random_image(),
],
weights=[0.2, 0.5, 0.2, 0.1],
k=1,
)
return chosen[0]
def generate(self):
"""
Generates the alert.
Data for a given country looks like this:
{'country': 'Malta', 'cases': 21, 'todayCases': 3, 'deaths': 0, 'todayDeaths': 0, 'recovered': 2, 'critical': 0}
Data for the world looks like:
{'cases': 162386, 'deaths': 5984, 'recovered': 75967}
"""
data = self.chosen_data
if data.get("image"):
self.__image(data)
elif data.get("graph"):
self.__graph(data)
elif not data.get("country"):
self.__world(data)
elif data.get("cases") == 0:
self.__no_cases(data)
elif data.get("cases") == data.get("todayCases"):
self.__first_batch(data)
elif data.get("deaths") == data.get("todayDeaths") and data.get("deaths") != 0:
self.__first_deaths(data)
else:
self.__country(data)
def __image(self, data):
self.post(
f"Guidance from the World Health Organization (WHO)",
media_ids=[self.media_id],
)
def __graph(self, data):
cases = data["cases"]
country = data["country"]
self.post(
f"Evolution of number of cases for {country}, with a total confirmed of {humanize.intcomma(cases)}",
media_ids=[self.media_id],
)
def __world(self, data):
cases = data["cases"]
deaths = data["deaths"]
rate = round(deaths / cases * 100, 2)
self.post(
f"Latest worldwide COVID-19 data: {humanize.intcomma(cases)} cases, {humanize.intcomma(deaths)} deaths.\n\nA {rate}% fatality rate."
)
def __country(self, data):
cases = data["cases"]
deaths = data["deaths"]
rate = round(deaths / cases * 100, 2)
self.post(
f"Latest COVID-19 data for {data['country']}: {humanize.intcomma(cases)} case{'s' if cases > 1 else ''}, of those {humanize.intcomma(data['todayCases'])} today; {humanize.intcomma(deaths)} death{'s' if deaths > 1 else ''}, of those {humanize.intcomma(data['todayDeaths'])} today.\n\nA {rate}% fatality rate."
)
def __first_batch(self, data):
cases = data["cases"]
deaths = data["deaths"]
self.post(
f"First case{'s' if cases > 1 else ''} of COVID-19 confirmed in {data['country']}: {humanize.intcomma(cases)} case{'s' if cases > 1 else ''}, with {humanize.intcomma(deaths)} death{'s' if deaths > 1 else ''} reported."
)
def __first_deaths(self, data):
cases = data["cases"]
deaths = data["deaths"]
rate = round(deaths / cases * 100, 2)
self.post(
f"First death{'s' if cases > 1 else ''} by COVID-19 reported in {data['country']}: {humanize.intcomma(deaths)} {'people' if cases > 1 else 'person'} have died out of {humanize.intcomma(cases)} confirmed cases.\n\nA {rate}% fatality rate."
)
def __no_cases(self, data):
self.post(
f"Latest COVID-19 data: {data['country']} still reports no infections or deaths."
)
```
#### File: covidbot/local/graph.py
```python
from typing import ClassVar
import numpy as np
import pandas as pd
from matplotlib import pyplot as plt
import matplotlib.dates as mdates
from .twitter import Twitter
from .utils import distribution
class Graph(Twitter):
cases_csv: ClassVar[
str
] = "https://raw.githubusercontent.com/CSSEGISandData/COVID-19/master/csse_covid_19_data/csse_covid_19_time_series/time_series_19-covid-Confirmed.csv"
def __init__(self):
super().__init__()
self.df = ( # Get the data in a format we want to work with
pd.read_csv(self.cases_csv)
.drop(columns=["Province/State", "Lat", "Long"])
.groupby("Country/Region")
.sum()
.replace(0, np.nan)
.dropna()
.rename(
columns=lambda x: datetime.strptime(x, "%m/%d/%y").strftime("%d%b%Y")
)
)
self.df = self.df.sort_values(by=self.df.columns[-1], ascending=False)
def random_country(self):
"""
Gets a country at random. Weights more highly for countries
with more cases, using a Pareto distribution.
"""
indexes = self.df.index
dist = distribution(len(indexes))
chosen = np.random.choice(indexes, 1, p=dist)[0]
data = self.df.loc[chosen, :]
return chosen, data
def make_graph(self, country, series):
"""
Generates graph for a given country, returns the total
number of cases. This method is doing two things at the
same time, which is less than ideal but this is where we
landed for now... may revisit later.
"""
plt.figure(figsize=(12, 6.75))
ax = plt.gca()
ax.set_yscale("log") # Use logarithmic scale for clarity
plt.style.use("seaborn-darkgrid") # This is a nice theme to use.
mx = max(map(int, list(series)))
plt.yticks(
[mx, mx // 10, mx // 100, mx // 500, mx // 2500],
[
int(round(mx, len(str(mx)) * -1 + 2)),
int(round(mx // 10, len(str(mx // 10)) * -1 + 2)),
int(round(mx // 100, len(str(mx // 100)) * -1 + 2)),
int(round(mx // 500, len(str(mx // 500)) * -1 + 2)),
int(round(mx // 2500, len(str(mx // 2500)) * -1 + 2)),
],
)
ax.xaxis.set_major_formatter(mdates.DateFormatter("%d%b%Y"))
plt.margins(0.02)
plt.title(f"COVID-19 cases: {country}")
fig = plt.figure()
fig.autofmt_xdate()
series.plot(marker="o")
plt.savefig("/tmp/plot.png", bbox_inches="tight")
return mx
def random_country_graph(self):
"""
Generates graph, returns data to create an alert.
"""
country, data = self.random_country()
cases_total = self.make_graph(country, data)
self.media_id = self.upload_image("/tmp/plot.png")
return {"graph": True, "cases": cases_total, "country": country}
``` |
{
"source": "jmorakuebler/tdp-pizza-api",
"score": 3
} |
#### File: app/pizzas/permissions.py
```python
from rest_framework import permissions
class StaffOrSuperuserPermission(permissions.BasePermission):
"""
Permiso personalizado para otorgar acceso usuarios autenticados que sean
staff o superusuario.
"""
def has_permission(self, request, view):
return bool(
request.user and
request.user.is_authenticated and
(request.user.is_staff or request.user.is_superuser)
)
``` |
{
"source": "J-Moravec/CloneFinderAPI",
"score": 3
} |
#### File: clonefinder/alignments/FreqToMegaSeq.py
```python
from clonefinder.tsp_profiles.TumorSampleProfileList import TumorSampleProfileList
class FreqToMegaSeq:
"""
Generate a MEGA sequence alignment file from a TumorSampleProfile
A MEGA dna sequence alignment will be generated based on presence/absence
of SNVs in the tumor sample profile. A sequence is generated for each
tumor sample where a an 'A' represents absence of SNV at a given site
and a 'T' represents presence of SNV at a given site
"""
def __init__(self):
self._mega_seqs = []
def initialize(self, tumor_sample_profiles, remove_duplicates = True):
self._mega_seqs.append('#MEGA')
self._mega_seqs.append('!Title ' + tumor_sample_profiles.name + ';')
self._mega_seqs.append('!Format datatype=dna' + ';')
self._mega_seqs.append(' ')
self._mega_allseqs = []
num_sites = tumor_sample_profiles.num_read_counts()
hg19 = self._get_hg19_sequence(num_sites)
self._mega_seqs.append('#hg19')
self._mega_seqs.append(hg19)
for profile in tumor_sample_profiles:
name = profile.name
seqdata = profile.get_alignment_string()
self._mega_allseqs.append('#' + name)
self._mega_allseqs.append(seqdata)
if remove_duplicates == True:
if seqdata in self._mega_seqs:
continue
self._mega_seqs.append('#' + name)
self._mega_seqs.append(seqdata)
def _get_hg19_sequence(self, num_sites):
result = ''
if num_sites > 0:
index = 1
while index <= num_sites:
result = result + 'A'
index += 1
return result
def get_mega_alignment(self):
return self._mega_seqs
def get_mega_alignment_string(self):
result = ''
for item in self._mega_seqs:
result += item + "\n"
return result
def save_mega_alignment_to_file(self, filename):
destination = open(filename,'w')
destination.write(self.get_mega_alignment_string())
destination.close()
def get_mega_allalignment(self):
return self._mega_allseqs
```
#### File: clonefinder/config/FormatInput.py
```python
class FormatInput():
def add_low_quality_SNV_info(self,CNV_information,total_read, alt_read,total_read_cut,mutant_read_cut):
New_CNV_information={}
for tumor in CNV_information:
CNVinfo=CNV_information[tumor]
TotRead=total_read[tumor]
AltRead=alt_read[tumor]
Len=len(CNVinfo)
c=0
NewIn=[]
while c<Len:
In=CNVinfo[c]
if CNVinfo[c]=='normal':
if TotRead[c]<total_read_cut or (AltRead[c]!=0 and AltRead[c]<mutant_read_cut): In='Bad-normal'
NewIn.append(In)
c+=1
New_CNV_information[tumor]=NewIn
return New_CNV_information
def ccf2snv(self, ccf_file, read_coverage):
CCF=ccf_file
ReadCov=float(read_coverage)
Out=CCF[:-4]+'snv.txt'
OutCNV=CCF[:-4]+'snv-CNV.txt'
Tu2CCF=self.ListColStr(CCF)
TuOrder=[]
out=''
outCNV=''
for Tu in Tu2CCF:
TuOrder.append(Tu)
out+=Tu+':ref\t'+Tu+':alt\t'
outCNV+=Tu+'\t'
out=out[:-1]+'\n'
outCNV=outCNV[:-1]+'\n'
Len=len(Tu2CCF[Tu])
c=0
while c<Len:
for Tu in TuOrder:
Mut=int(ReadCov*float(Tu2CCF[Tu][c])/2)
Ref=int(ReadCov-Mut)
out+=str(Ref)+'\t'+str(Mut)+'\t'
outCNV+='normal\t'
out=out[:-1]+'\n'
outCNV=outCNV[:-1]+'\n'
c+=1
self.save_file(Out,out)
self.save_file(OutCNV,outCNV)
def ListColStr(self, File):
File=open(File,'r').readlines()
NameOrder,Name2Col=self.GetHead(File[0])
File=File[1:]
Tu2Freq={}
for Tu in NameOrder:
Tu2Freq[Tu]=[]
for i in File:
i=i.strip().split('\t')
for Tu in Name2Col:
Tu2Freq[Tu].append(i[Name2Col[Tu]])
return Tu2Freq
def GetHead(self, Head):
Head=Head.strip().split('\t')
Len=len(Head)
c=0
Name2Col={}
NameOrder=[]
while c<Len:
Name2Col[Head[c]]=c
NameOrder.append(Head[c])
c+=1
return NameOrder,Name2Col
def save_file(self, Out, out):
OutF=open(Out,'w')
OutF.write(out)
OutF.close()
def cnv2snv(self, Ta, CNV):
Out=Ta[:-4]+'snv.txt'
SOrder, Samp2CNVfreqIn ,SNVnum,AAA=self.ObsFreqTaHead(CNV)
Tu2TotRead ,Tu2SNVfre=self.GetTotRead(Ta)
out=open(CNV,'r').readlines()[0]
c=0
while c<SNVnum:
for S in SOrder:
OriFre=Tu2SNVfre[S][c]
MutCopyFra=Samp2CNVfreqIn[S][c]
AdjFre=OriFre/(MutCopyFra*2)
NewMut=int(round(AdjFre*Tu2TotRead[S][c],0))
NewRef=Tu2TotRead[S][c]-NewMut
out+=str(NewRef)+'\t'+str(NewMut)+'\t'
out=out[:-1]+'\n'
c+=1
self.save_file(Out,out)
def snv2snv(self, Ta, CNVmake):
Out=Ta[:-4]+'snv.txt'
OutCNV=Ta[:-4]+'snv-CNV.txt'
Ta=open(Ta,'r').readlines()
SOrder, Samp2Col = self.GetHeadObsFreqTaHead(Ta[0].strip())
out=''
outCNV=''
for Sample in SOrder:
out+=Sample+':ref\t'+Sample+':alt\t'
outCNV+=Sample+'\t'
out=out[:-1]+'\n'
outCNV=outCNV[:-1]+'\n'
Ta=Ta[1:]
for i in Ta:
i=i.strip().split('\t')
for S in SOrder:
out+=i[Samp2Col[S+':ref']]+'\t'+i[Samp2Col[S+':alt']]+'\t'
outCNV+='normal\t'
out=out[:-1]+'\n'
outCNV=outCNV[:-1]+'\n'
self.save_file(Out,out)
if CNVmake=='withCNVfile': self.save_file(OutCNV,outCNV)
def ObsFreqTaHead(self, Freq):
Freq=open(Freq,'r').readlines()
SampOrder,Name2Col=self.GetHeadObsFreqTaHead(Freq[0])
Samp2FreqIn={}
Samp2TotRead={}
for Samp in SampOrder:
Samp2FreqIn[Samp]=[]
Samp2TotRead[Samp]=[]
Freq=Freq[1:]
SNVnum=0
for i in Freq:
i=i.strip().split('\t')
TMP={}
for Samp in SampOrder:
MutC=int(i[Name2Col[Samp+':alt']])
RefC=int(i[Name2Col[Samp+':ref']])
MutFreq=1.0*MutC/(MutC+RefC)
Tot=MutC+RefC
Samp2FreqIn[Samp].append(MutFreq)
Samp2TotRead[Samp].append(Tot)
SNVnum+=1
return SampOrder, Samp2FreqIn ,SNVnum,Samp2TotRead
def GetHeadObsFreqTaHead(self, Head):
Head=Head.strip().split('\t')
SampOrder=[]
Name2Col={}
c=0
Len=len(Head)
while c<Len:
i=Head[c]
if i.find(':')!=-1:
Samp=i.split(':')[0]
Code=Samp in SampOrder
if Code!=True:
SampOrder.append(Samp)
Name2Col[i]=c
c+=1
return SampOrder,Name2Col
def GetHeadObsFreqTaHead1(self, Head): #as a string
Head=Head.strip().split('\t')
SampOrder=[]
Name2Col={}
c=0
Len=len(Head)
while c<Len:
i=Head[c]
if i.find(':')!=-1:
Samp=i.split(':')[0].replace(' ','')
Code=Samp in SampOrder
if Code!=True:
SampOrder.append(Samp)
Name2Col[i.replace(' ','')]=c
c+=1
return SampOrder,Name2Col
def GetTotRead(self, Obs):
Obs=open(Obs,'r').readlines()
TuOrder,Tu2Col =self.GetHeadObsFreqTaHead1(Obs[0])
Tu2SNVfre={}
Tu2TotRead={}
Obs=Obs[1:]
for i in Obs:
i=i.strip().split('\t')
for Tu in TuOrder:
Mut=int(i[Tu2Col[Tu+':'+'alt']])
Tot=int(i[Tu2Col[Tu+':'+'ref']])+Mut
Fre=1.0*Mut/Tot
Code=Tu in Tu2SNVfre
if Code!=True:
Tu2SNVfre[Tu]=[]
Tu2TotRead[Tu]=[]
Tu2SNVfre[Tu].append(Fre)
Tu2TotRead[Tu].append(Tot)
return Tu2TotRead ,Tu2SNVfre
```
#### File: clonefinder/significance_test/cluster_test.py
```python
from clonefinder.alignments.MegaAlignment import MegaAlignment
import scipy
import os
import sys
class cluster_test():
def remove_insignificant_clones(self, v_obs, CloFre_clone, clone_seq_builder, Tu2CNV, Cut):
Align=MegaAlignment()
OutAncAll='SigTest.txt'
outAncAll='tumor\tDecsendant-Ancestor\tSNV posi\tType\tObsFre\n'
Clone_list, clone_seq_dic = Align.name2seq(clone_seq_builder)
new_clone_freq={}
new_clone_seq_dic={}
for tumor in v_obs:
CNV=Tu2CNV[tumor]
Clo2Fre=CloFre_clone['T-'+tumor]
ObsFre = v_obs[tumor]
clone_order=[]
MutNum2Clo={}
MutNum_ls=[]
for Clo in Clo2Fre:
if Clo2Fre[Clo]>0:
MutPosLs=Align.GetMutPos(clone_seq_dic['#'+Clo])
MutNum=len(MutPosLs)
if (MutNum in MutNum2Clo)!=True: MutNum2Clo[MutNum]=[]
MutNum2Clo[MutNum].append(Clo)
MutNum_ls.append(MutNum)
MutNum_ls=list(set(MutNum_ls))
MutNum_ls.sort(reverse=True)
for MutNum in MutNum_ls:
clone_order+=MutNum2Clo[MutNum]
CloNum=len(clone_order)
C1Max=CloNum-1
InsigLs=[]
C1=0
while C1<C1Max:
Clo1=clone_seq_dic['#'+clone_order[C1]]
num_sites=len(Clo1)
Min_num=0.01*num_sites
C2=C1+1
while C2<CloNum:
Clo2=clone_seq_dic['#'+clone_order[C2]]
Share=[]
Unique=[]
c=0
while c< num_sites:
if CNV[c]=='normal':
if Clo1[c]=='T' and Clo2[c]=='T':
Share.append(ObsFre[c])
outAncAll+=tumor+'\t'+clone_order[C1]+'-'+clone_order[C2]+'\t'+str(c)+'\tShare\t'+str(ObsFre[c])+'\n'
elif Clo1[c]=='T' and Clo2[c]=='A':
Unique.append(ObsFre[c])
outAncAll+=tumor+'\t'+clone_order[C1]+'-'+clone_order[C2]+'\t'+str(c)+'\tUnique\t'+str(ObsFre[c])+'\n'
c+=1
if (len(Share)<3 or len(Unique)<3) or (len(Share)<Min_num or len(Unique)<Min_num): P=1
else:
P=scipy.stats.ttest_ind(Share,Unique, equal_var = False)
P=P[-1]
if P>Cut:
if clone_order[C1].find('Clu')!=-1 and clone_order[C2].find('Clu')==-1: InsigLs.append(clone_order[C1])
else: InsigLs.append(clone_order[C2])
C2+=1
C1+=1
InsigLs=list(set(InsigLs))
if InsigLs!=[]: print('insignificant clones', tumor, InsigLs)
new_clone_fre_in={}
for Clo in Clo2Fre:
if Clo2Fre[Clo]>0 and InsigLs.count(Clo)==0:
new_clone_fre_in[Clo]=Clo2Fre[Clo]
new_clone_seq_dic['#'+Clo]=clone_seq_dic['#'+Clo]
new_clone_freq['T-'+tumor]= new_clone_fre_in
new_seq_builder=Align.UpMeg(new_clone_seq_dic,[])
return new_seq_builder, new_clone_freq
def remove_insignificant_clones_add(self, v_obs, CloFre_clone, clone_seq_builder, Tu2CNV, Cut):
Align=MegaAlignment()
OutAncAll='SigTest.txt'
outAncAll='tumor\tDecsendant-Ancestor\tSNV posi\tType\tObsFre\n'
Clone_list, clone_seq_dic = Align.name2seq(clone_seq_builder)
new_clone_freq={}
new_clone_seq_dic={}
for tumor in v_obs:
CNV=Tu2CNV[tumor]
Clo2Fre=CloFre_clone['T-'+tumor]
ObsFre = v_obs[tumor]
add_clone_freq={}
clone_order=[]
MutNum2Clo={}
MutNum_ls=[]
for Clo in Clo2Fre:
if Clo2Fre[Clo]>0:
MutPosLs=Align.GetMutPos(clone_seq_dic['#'+Clo])
MutNum=len(MutPosLs)
if (MutNum in MutNum2Clo)!=True: MutNum2Clo[MutNum]=[]
MutNum2Clo[MutNum].append(Clo)
MutNum_ls.append(MutNum)
MutNum_ls=list(set(MutNum_ls))
MutNum_ls.sort(reverse=True)
for MutNum in MutNum_ls:
clone_order+=MutNum2Clo[MutNum]
CloNum=len(clone_order)
C1Max=CloNum-1
InsigLs=[]
add_clone_freq[tumor]=[]
C1=0
while C1<C1Max:
Clo1=clone_seq_dic['#'+clone_order[C1]]
num_sites=len(Clo1)
Min_num=0.01*num_sites
C2=C1+1
while C2<CloNum:
Clo2=clone_seq_dic['#'+clone_order[C2]]
Share=[]
Unique=[]
c=0
while c< num_sites:
if CNV[c]=='normal':
if Clo1[c]=='T' and Clo2[c]=='T':
Share.append(ObsFre[c])
outAncAll+=tumor+'\t'+clone_order[C1]+'-'+clone_order[C2]+'\t'+str(c)+'\tShare\t'+str(ObsFre[c])+'\n'
elif Clo1[c]=='T' and Clo2[c]=='A':
Unique.append(ObsFre[c])
outAncAll+=tumor+'\t'+clone_order[C1]+'-'+clone_order[C2]+'\t'+str(c)+'\tUnique\t'+str(ObsFre[c])+'\n'
c+=1
if (len(Share)<3 or len(Unique)<3) or (len(Share)<Min_num or len(Unique)<Min_num): P=1
else:
P=scipy.stats.ttest_ind(Share,Unique, equal_var = False)
P=P[-1]
if P>Cut:
if clone_order[C1].find('Clu')!=-1 and clone_order[C2].find('Clu')==-1:
InsigLs.append(clone_order[clone_order[C1]])
else:
InsigLs.append(clone_order[C2])
C2+=1
C1+=1
InsigLs=list(set(InsigLs))
if InsigLs!=[]: print('insignificant clones', tumor, InsigLs)
new_clone_fre_in={}
for Clo in Clo2Fre:
New_cloe_hit=[]
if Clo2Fre[Clo]>0 and InsigLs.count(Clo)==0:
if (Clo in new_clone_fre_in)!=True: new_clone_fre_in[Clo]=0
new_clone_fre_in[Clo]+=Clo2Fre[Clo]
new_clone_seq_dic['#'+Clo]=clone_seq_dic['#'+Clo]
New_cloe_hit.append(Clo)
print(tumor, InsigLs, new_clone_fre_in)
for InsigClo in InsigLs:
Index=clone_order.index(InsigClo)
Cont='y'
while Cont=='y':
NextClo=clone_order[Index-1]
if InsigLs.count(NextClo)==0 and Clo2Fre[NextClo]>0:
Cont='n'
else: Index=Index-1
print(NextClo,InsigClo,Clo2Fre[InsigClo])
if (NextClo in new_clone_fre_in)!=True: new_clone_fre_in[NextClo]=0
CurTot= new_clone_fre_in[NextClo]+Clo2Fre[InsigClo]
if CurTot<=1: new_clone_fre_in[NextClo]+= Clo2Fre[InsigClo]
else: new_clone_fre_in[NextClo]=1.0
new_clone_freq[tumor]= new_clone_fre_in
print(new_clone_fre_in)
print(new_clone_freq)
new_seq_builder=Align.UpMeg(new_clone_seq_dic,[])
return new_seq_builder, new_clone_freq
``` |
{
"source": "J-Moravec/pairtree",
"score": 2
} |
#### File: pairtree/comparison/count_trees.py
```python
import argparse
import pickle
import numpy as np
from numba import njit
@njit
def count_trees(tau, phi, order, traversal):
assert traversal == 'dfs' or traversal == 'bfs'
K = len(tau)
expected_colsum = np.ones(K)
expected_colsum[0] = 0
first_partial = np.copy(tau)
np.fill_diagonal(first_partial, 0)
first_delta = np.copy(phi)
partial_trees = [(1, first_partial, first_delta)]
completed_trees = 0
while len(partial_trees) > 0:
if traversal == 'dfs':
to_resolve, P, delta = partial_trees.pop()
else:
to_resolve, P, delta = partial_trees.pop(0)
#to_resolve, P, delta = partial_trees[0]
#partial_trees = partial_trees[1:]
if to_resolve == K:
assert np.all(expected_colsum == np.sum(P, axis=0))
assert np.all(0 <= delta) and np.all(delta <= 1)
np.fill_diagonal(P, 1)
completed_trees += 1
continue
R = order[to_resolve]
parents = np.nonzero(P[:,R])[0]
for parent in parents:
P_prime = np.copy(P)
P_prime[:,R] = 0
P_prime[parent,R] = 1
if np.any(delta[parent] - phi[R] < 0):
continue
delta_prime = np.copy(delta)
delta_prime[parent] -= phi[R]
partial_trees.append((to_resolve + 1, P_prime, delta_prime))
return completed_trees
@njit
def make_order(phi):
phisum = np.sum(phi, axis=1)
order = np.argsort(-phisum)
assert order[0] == 0
return order
@njit
def make_tau(phi, order):
K, S = phi.shape
tau = np.eye(K)
for I in range(K):
for J in range(I + 1, K):
I_prime = order[I]
J_prime = order[J]
assert not np.all(phi[I_prime] == phi[J_prime])
if np.all(phi[I_prime] >= phi[J_prime]):
tau[I_prime,J_prime] = 1
return tau
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('sim_data_fn')
args = parser.parse_args()
with open(args.sim_data_fn, 'rb') as dataf:
simdata = pickle.load(dataf)
phi, true_tree = simdata['phi'], simdata['adjm']
order = make_order(phi)
tau = make_tau(phi, order)
num_trees = count_trees(tau, phi, order, 'dfs')
print(args.sim_data_fn, num_trees)
main()
```
#### File: pairtree/comparison/extract_runtimes.py
```python
import argparse
import csv
import os
import re
MISSING = -1
def extract_runid(cmd):
match = re.search(r'(sim_[^\.]+)', cmd)
assert match is not None
return match.groups()[0]
def extract_runtimes(logfn):
runtimes = {}
with open(logfn) as F:
reader = csv.DictReader(F, delimiter='\t')
for line in reader:
runid = extract_runid(line['Command'])
runtime = float(line['JobRuntime'])
runtimes[runid] = runtime
return runtimes
def load_batches(batches):
results = {}
for logfn in batches:
batch = os.path.basename(logfn).split('.')[1]
results[batch] = extract_runtimes(logfn)
return results
def combine_batches(results):
runids = set([runid for batch in results.values() for runid in batch.keys()])
combined = {}
for K in runids:
combined[K] = {}
for batch_name, batch_results in results.items():
if K in batch_results:
combined[K][batch_name] = batch_results[K]
else:
combined[K][batch_name] = MISSING
return combined
def print_runtimes(methods, results):
print('runid', *methods, sep=',')
for runid in sorted(results.keys()):
times = [str(results[runid][M]) for M in methods]
print(runid, *times, sep=',')
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('batches', nargs='+')
args = parser.parse_args()
batches = load_batches(args.batches)
methods = sorted(batches.keys())
results = combine_batches(batches)
print_runtimes(methods, results)
if __name__ == '__main__':
main()
```
#### File: pairtree/comparison/impute_missing_mutphis.py
```python
import argparse
import numpy as np
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', 'lib'))
import inputparser
import mutphi
import common
def sort_mutphi(mphi):
sorted_vids = common.sort_vids(mphi.vids)
mapping = [mphi.vids.index(V) for V in sorted_vids]
assert sorted_vids == [mphi.vids[idx] for idx in mapping]
sorted_logprobs = np.array([mphi.logprobs[idx] for idx in mapping])
return mutphi.Mutphi(
vids = sorted_vids,
assays = mphi.assays,
logprobs = sorted_logprobs,
)
def impute(ssmfn, params, mphi):
clustered = set([V for C in params['clusters'] for V in C])
mphi_vids = set(mphi.vids)
missing = list(clustered - mphi_vids)
if len(missing) == 0:
sys.exit()
variants = inputparser.load_ssms(ssmfn)
missing_reads = np.array([variants[V]['total_reads'] for V in missing]).astype(np.float)
assert np.all(missing_reads >= 1)
# Assign uniform probability based on total read count.
missing_logprobs = np.log(1 / missing_reads)
combined = mutphi.Mutphi(
vids = list(mphi.vids) + missing,
assays = mphi.assays,
logprobs = np.vstack((mphi.logprobs, missing_logprobs)),
)
return combined
def score(logprobs):
assert np.all(logprobs <= 0)
score = -np.sum(logprobs)
score /= logprobs.size
# Convert to bits.
score /= np.log(2)
return score
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('ssm_fn')
parser.add_argument('params_fn')
parser.add_argument('mutphi_fn')
args = parser.parse_args()
params = inputparser.load_params(args.params_fn)
orig_mphi = mutphi.load_mutphi(args.mutphi_fn)
mphi = impute(args.ssm_fn, params, orig_mphi)
mphi = sort_mutphi(mphi)
mutphi.write_mutphi(mphi, args.mutphi_fn)
old, new = score(orig_mphi.logprobs), score(mphi.logprobs)
#print('score_cmp', old, new, new - old, (new - old) > 0)
if __name__ == '__main__':
main()
```
#### File: comparison/lichee/convert_inputs.py
```python
import argparse
import numpy as np
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'lib'))
import common
import inputparser
def write_snvs(variants, sampnames, garbage, snv_fn, normal_vaf=0.0):
sampnames = ['Normal'] + sampnames
snv_indices = {}
with open(snv_fn, 'w') as F:
print('#chr', 'position', 'description', *sampnames, sep='\t', file=F)
vids = common.sort_vids(variants.keys())
idx = 1
for vid in vids:
if vid in garbage:
continue
vaf = (variants[vid]['var_reads'] / variants[vid]['total_reads']).tolist()
vaf = [normal_vaf] + vaf
print('1', idx, vid, *vaf, sep='\t', file=F)
snv_indices[vid] = idx
idx += 1
return snv_indices
def extract_mat(variants, key):
mat = np.array([V[key] for V in variants])
return mat
def write_clusters(variants, clusters, snv_indices, cluster_fn, normal_vaf=0.0):
rows = []
for cluster in clusters:
cvars = [variants[V] for V in cluster]
var_reads = np.sum(extract_mat(cvars, 'var_reads'), axis=0)
total_reads = np.sum(extract_mat(cvars, 'total_reads'), axis=0)
cvaf = (var_reads / total_reads).tolist()
cvaf = [normal_vaf] + cvaf
sampmask = '0' + (len(cvaf) - 1)*'1'
snv_idxs = [str(snv_indices[V]) for V in common.sort_vids(cluster)]
rows.append([sampmask] + cvaf + [','.join(snv_idxs)])
with open(cluster_fn, 'w') as F:
for row in rows:
print(*row, sep='\t', file=F)
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--uniform-proposal', action='store_true')
parser.add_argument('ssm_fn')
parser.add_argument('params_fn')
parser.add_argument('lichee_snv_fn')
parser.add_argument('lichee_cluster_fn')
args = parser.parse_args()
variants = inputparser.load_ssms(args.ssm_fn)
params = inputparser.load_params(args.params_fn)
sampnames = params['samples']
clusters = params['clusters']
garbage = set(params['garbage'])
snv_indices = write_snvs(variants, sampnames, garbage, args.lichee_snv_fn)
write_clusters(variants, clusters, snv_indices, args.lichee_cluster_fn)
if __name__ == '__main__':
main()
```
#### File: comparison/neutree/make_mutrels.py
```python
import argparse
import numpy as np
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'lib'))
import evalutil
import neutree
def are_same_clusterings(clusterings):
# Tuple conversion probably isn't necessary, but it makes everything
# hashable, so it's probably a good idea.
_convert_to_tuples = lambda C: tuple([tuple(cluster) for cluster in C])
first_C = _convert_to_tuples(clusterings[0])
for C in clusterings[1:]:
if _convert_to_tuples(C) != first_C:
return False
return True
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('neutree_fn')
parser.add_argument('mutrel_fn')
args = parser.parse_args()
ntree = neutree.load(args.neutree_fn)
clusterings = ntree.clusterings
if are_same_clusterings(clusterings):
mrel = evalutil.make_mutrel_from_trees_and_single_clustering(ntree.structs, ntree.logscores, ntree.counts, clusterings[0])
else:
mrel = evalutil.make_mutrel_from_trees_and_unique_clusterings(ntree.structs, ntree.logscores, clusterings)
mrel = evalutil.add_garbage(mrel, ntree.garbage)
evalutil.save_sorted_mutrel(mrel, args.mutrel_fn)
if __name__ == '__main__':
main()
```
#### File: comparison/pairtree/eval_garbage.py
```python
import argparse
import json
import numpy as np
def sort_vids(vids):
return sorted(vids, key = lambda V: int(V[1:]))
def _load_params(fns):
params = {}
for name, fn in fns.items():
with open(fn) as F:
params[name] = json.load(F)
return params
def _parse_params(params):
truth_nongarb = set([vid for clust in params['truth']['clusters'] for vid in clust])
truth_garb = set(params['truth']['garbage'])
result_garb = set(params['result']['garbage'])
assert len(truth_nongarb & truth_garb) == 0
all_vids = sort_vids(truth_nongarb | truth_garb)
truth = np.array([vid in truth_garb for vid in all_vids])
result = np.array([vid in result_garb for vid in all_vids])
return (truth, result)
def _calc_metrics(truth, result):
notresult = np.logical_not(result)
nottruth = np.logical_not(truth)
mets = {
'tp': sum( truth & result),
'fp': sum(nottruth & result),
'tn': sum(nottruth & notresult),
'fn': sum( truth & notresult),
}
for K in mets.keys():
# Convert to native Python type from NumPy to permit JSON serialization.
# These will exist as a mix of native and NumPy types, so I need to allow
# for either.
mets[K] = getattr(mets[K], 'tolist', lambda: mets[K])()
return mets
def main():
parser = argparse.ArgumentParser(
description='HELLO',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--runid')
parser.add_argument('params_true_fn')
parser.add_argument('params_garbremoved_fn')
args = parser.parse_args()
params = _load_params({'truth': args.params_true_fn, 'result': args.params_garbremoved_fn,})
truth, result = _parse_params(params)
mets = _calc_metrics(truth, result)
print(json.dumps(mets))
if __name__ == '__main__':
main()
```
#### File: comparison/pairtree/fix_omegas.py
```python
import argparse
import numpy as np
import scipy.stats
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'lib'))
import inputparser
def _fix_omegas(ssms, print_bad=False):
# Note that SSMs are modified in place.
percentile = 1e-10
vaf_threshold = 0.5
# To flag only extreme cases, set this above 1.0 -- e.g., to 1.5.
phi_mle_threshold = 1.0
fixed_omega = 1.0
alpha0 = 0.5
beta0 = 0.5
bad = 0
total = 0
for vid, V in ssms.items():
vaf_alpha = alpha0 + V['var_reads']
vaf_beta = beta0 + V['ref_reads']
phi_mle = V['var_reads'] / (V['omega_v'] * V['total_reads'])
bad_omega = np.logical_and.reduce((
# Only flag variants that haven't already had `omega_v` adjusted.
np.isclose(0.5, V['omega_v']),
# Is the true VAF extremely unlikely to be less than 0.5?
scipy.stats.beta.cdf(vaf_threshold, vaf_alpha, vaf_beta) < percentile,
# Is the phi MLE likely to be too high?
phi_mle > phi_mle_threshold,
))
if print_bad and np.any(bad_omega):
print(np.vstack((
V['var_reads'][bad_omega],
V['total_reads'][bad_omega],
phi_mle[bad_omega],
)))
print('')
V['omega_v'][bad_omega] = fixed_omega
bad += np.sum(bad_omega)
total += len(bad_omega)
fixed_prop = bad / total
return fixed_prop
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('in_ssm_fn')
parser.add_argument('out_ssm_fn')
args = parser.parse_args()
np.set_printoptions(linewidth=400, precision=3, threshold=sys.maxsize, suppress=True)
np.seterr(divide='raise', invalid='raise', over='raise')
ssms = inputparser.load_ssms(args.in_ssm_fn)
fixed_prop = _fix_omegas(ssms, print_bad=False)
print('fixed_omegas=%s' % fixed_prop)
inputparser.write_ssms(ssms, args.out_ssm_fn)
if __name__ == '__main__':
main()
```
#### File: comparison/pairtree/subset_samples.py
```python
import argparse
import random
import json
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'lib'))
import inputparser
def _is_good(cand, last):
if last is None:
return 'D'
elif last == 'D':
return cand == 'R1'
elif last == 'R1':
return cand.startswith('Diagnosis Xeno')
elif last.startswith('Diagnosis Xeno'):
return cand.startswith('Relapse Xeno')
elif last.startswith('Relapse Xeno'):
return cand.startswith('Diagnosis Xeno')
else:
raise Exception('Unknown last choice: %s' % last)
def _select_subset(sampnames, C, must_include):
# Special-case the instance where the user wants every sample.
if len(sampnames) == C:
return list(sampnames)
assert set(must_include).issubset(set(sampnames))
# Duplicate.
subset = list(must_include)
candidates = set(sampnames) - set(subset)
last = subset[-1] if len(subset) > 0 else None
while len(subset) < C:
cands = list(candidates)
random.shuffle(cands)
for cand in cands:
if _is_good(cand, last):
subset.append(cand)
candidates.remove(cand)
last = cand
break
else:
raise Exception('Could not find any candidate after %s' % last)
return subset
def _select_samp_subsets(sampnames, counts, all_must_include=None):
subsets = []
if all_must_include is None:
all_must_include = []
for C in sorted(counts):
assert 0 < C <= len(sampnames)
must_include = subsets[-1] if len(subsets) > 0 else all_must_include
subsets.append(_select_subset(sampnames, C, must_include))
return subsets
def _filter_ssms(ssms, samp_idxs):
new_ssms = {}
for sidx, ssm in ssms.items():
# Duplicate so as to not modify original.
new_ssms[sidx] = dict(ssm)
for K in ('var_reads', 'ref_reads', 'total_reads', 'omega_v', 'vaf'):
new_ssms[sidx][K] = ssms[sidx][K][samp_idxs]
return new_ssms
def _find_idxs(sampnames, subset):
idxs = [sampnames.index(mem) for mem in subset]
return idxs
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--counts', required=True)
parser.add_argument('in_ssm_fn')
parser.add_argument('in_params_fn')
parser.add_argument('out_base')
args = parser.parse_args()
random.seed(1337)
counts = [int(C) for C in args.counts.split(',')]
assert len(counts) == len(set(counts))
ssms = inputparser.load_ssms(args.in_ssm_fn)
params = inputparser.load_params(args.in_params_fn)
sampnames = params['samples']
# Always include diagnosis sample, on assumption we're working with
# SJbALL022609 from Steph for the paper congraph figure.
subsets = _select_samp_subsets(sampnames, counts, all_must_include=['D'])
for subset in subsets:
idxs = _find_idxs(sampnames, subset)
new_ssms = _filter_ssms(ssms, idxs)
new_params = dict(params)
new_params['samples'] = subset
out_base = '%s_S%s' % (args.out_base, len(subset))
inputparser.write_ssms(new_ssms, out_base + '.ssm')
with open(out_base + '.params.json', 'w') as F:
json.dump(new_params, F)
if __name__ == '__main__':
main()
```
#### File: comparison/pastri/convert_inputs.py
```python
import sys
import os
import numpy as np
import argparse
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..', 'lib'))
import clustermaker
import inputparser
def extract_matrix(variants, key):
return np.array([variants[K][key] for K in sorted(variants.keys(), key = lambda vid: int(vid[1:]))])
def write_matrices(*matrices, outfn):
with open(outfn, 'w') as F:
for name, matrix in matrices:
print('> %s' % name, file=F)
print(matrix.shape, file=F)
for row in matrix:
print(*row, sep='\t', file=F)
# PASTRI's example matrices also have a blank trailing line, so mine will as well.
print('', file=F)
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--uniform-proposal', action='store_true')
parser.add_argument('ssm_fn')
parser.add_argument('params_fn')
parser.add_argument('pastri_allele_counts_fn')
parser.add_argument('pastri_proposal_fn')
args = parser.parse_args()
variants = inputparser.load_ssms(args.ssm_fn)
params = inputparser.load_params(args.params_fn)
clusters = params['clusters']
supervars = clustermaker.make_cluster_supervars(clusters, variants)
matrices = {
'var_reads': extract_matrix(supervars, 'var_reads'),
'total_reads': extract_matrix(supervars, 'total_reads'),
'alpha': extract_matrix(supervars, 'var_reads'),
'beta': extract_matrix(supervars, 'total_reads'),
}
if args.uniform_proposal:
matrices['alpha'][:] = 1
matrices['beta'][:] = 2
C_max = 15
matrices['alpha'] = matrices['alpha'][:C_max,]
matrices['beta'] = matrices['beta'][:C_max,]
write_matrices(('A', matrices['var_reads']), ('D', matrices['total_reads']), outfn = args.pastri_allele_counts_fn)
write_matrices(('Alpha', matrices['alpha']), ('Beta', matrices['beta']), outfn = args.pastri_proposal_fn)
if __name__ == '__main__':
main()
```
#### File: comparison/plotter/plot_entropy.py
```python
import pandas as pd
import argparse
import numpy as np
import plotly
import re
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import plotter
from plotter import MISSING
S_COLOURS = {S: plotter.hex2rgb(C) for S, C in {
1: '#ef553b',
3: '#636efa',
10: '#00cc96',
30: '#ab63fa',
100: '#ffa15a',
}.items()}
def partition(results, keys):
keys = set(keys)
key_vals = {K: [] for K in keys}
for runid in results.runid:
params = re.findall('[A-Z]\d+', runid)
for P in params:
key, val = P[0], int(P[1:])
if key not in keys:
continue
key_vals[key].append(val)
for K in key_vals.keys():
results[K] = key_vals[K]
unique = {K: sorted(set(V)) for K, V in key_vals.items()}
return (results, unique)
def plot(results, unique_keys, result_key, ytitle, shared_y=False, log_y=False):
K_vals = unique_keys['K']
S_vals = unique_keys['S']
fig = plotly.subplots.make_subplots(
rows = 1,
cols = len(K_vals),
subplot_titles = [plotter.pluralize(K, 'subclone') for K in K_vals],
shared_yaxes = shared_y,
x_title = 'Tissue samples',
y_title = ytitle,
)
min_y, max_y = np.inf, -np.inf
for Kidx, K in enumerate(K_vals):
for S in S_vals:
KS_rows = [row for idx, row in results.iterrows() if row['S'] == S and row['K'] == K]
if len(KS_rows) == 0:
continue
trace = {
'type': 'box',
'y': [row[result_key] for row in KS_rows],
'text': [row['runid'] for row in KS_rows],
'name': '%s' % S,
'marker': {
'outliercolor': plotter.format_colour(S_COLOURS[S], 0.5),
'color': plotter.format_colour(S_COLOURS[S], 0.5),
},
'line': {
'color': plotter.format_colour(S_COLOURS[S]),
},
}
#min_y = np.min([min_y, np.min(trace['y'])])
#max_y = np.max([max_y, np.max(trace['y'])])
#if log_y:
# trace['y'] = np.log10(trace['y'])
fig.add_trace(trace, row=1, col=Kidx+1)
fig.update_layout(
showlegend = False,
title_text = ytitle,
)
fig.update_xaxes(
tickangle = 0,
type = 'category',
)
if log_y:
fig.update_yaxes(type = 'log')
#floor, ceil = np.floor(np.log10(min_y)), np.ceil(np.log10(max_y)) + 1
#N = int(ceil - floor)
#tickvals = np.linspace(floor, ceil, num=(N+1)).astype(np.int)
#print(tickvals, floor, ceil)
#assert np.allclose(tickvals, tickvals.astype(np.int))
#fig.update_yaxes(
# tickmode = 'array',
# tickvals = tickvals,
# ticktext = ['%s' % T for T in tickvals],
#)
return fig
def main():
parser = argparse.ArgumentParser(
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--mutrels', dest='mutrel_fn')
parser.add_argument('entropy_fn')
parser.add_argument('plot_fn')
args = parser.parse_args()
results = pd.read_csv(args.entropy_fn)
if args.mutrel_fn is not None:
mrel_meth, mrel_fn = args.mutrel_fn.split('=', 1)
mrel = pd.read_csv(mrel_fn)
mrel = mrel.filter(('runid', mrel_meth))
mrel = mrel.rename(columns = {mrel_meth: 'mutrel'})
assert MISSING not in mrel['mutrel']
assert set(results['runid']) == set(mrel['runid'])
results = pd.merge(results, mrel, on='runid', how='outer')
results['H_trees_pairtree_3_minus_H_trees_truth'] = results['H_trees_pairtree_3'] - results['H_trees_truth']
results, unique_keys = partition(results, ('K', 'S'))
figs = {}
export_dims = {}
for name, title, shared_y, log_y in (
('true_trees', 'Trees consistent with<br>true lineage frequencies', False, True),
('jsd_parents_mean', 'Parent JSD between tree parents<br>and Pairtree parents (bits)', True, False),
('H_trees_pairtree_3_minus_H_trees_truth', 'Difference in tree entropy distribution<br>between Pairtree and truth (bits)', False, False),
('mutrel', 'Pairwise relation error (bits)', True, False),
):
figs[name] = plot(results, unique_keys, name, title, shared_y, log_y)
export_dims[name] = (700, 400)
figs['true_trees'].update_yaxes(rangemode = 'tozero')
#for idx, K in enumerate(unique_keys['K']):
# logmax = np.log10(np.max(results['true_trees'][results['K'] == K]))
# figs['true_trees'].update_yaxes(range = [-0.1, np.ceil(logmax)], row=1, col=idx+1)
plotter.write_figs(figs, args.plot_fn, export_dims)
if __name__ == '__main__':
main()
```
#### File: comparison/plotter/plot_single_vs_others.py
```python
import argparse
import numpy as np
import plotly.graph_objs as go
import sys
import os
sys.path.append(os.path.join(os.path.dirname(__file__), '..'))
import plotter
from plotter import MISSING
import plotly
def make_boxes(results, methods, single):
assert single in methods
others = plotter.sort_methods(set(methods) - set((single,)))
traces = []
for M in others:
points = [(row['runid'], row[M] - row[single]) for idx, row in results.iterrows() \
if MISSING not in (row[single], row[M])]
if len(points) == 0:
continue
runids, Y = zip(*points)
traces.append(go.Box(
y = Y,
text = runids,
name = '%s (%s runs)' % (plotter.HAPPY_METHOD_NAMES.get(M, M), len(points)),
boxpoints = 'all',
jitter = 0.3,
pointpos = 1.8,
))
assert len(traces) > 0
return traces
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('--template', default='seaborn')
parser.add_argument('--max-y')
parser.add_argument('--score-type', required=True, choices=('mutrel', 'mutphi', 'mutdistl1', 'mutdistl2'))
parser.add_argument('--baseline')
parser.add_argument('results_fn')
parser.add_argument('single_method')
parser.add_argument('plot_fn')
args = parser.parse_args()
results, methods = plotter.load_results(args.results_fn)
plot_type = 'box'
plotter.munge(results, methods, args.baseline, args.score_type, plot_type)
for key in ('K', 'S'):
results = plotter.augment(results, key)
boxes = make_boxes(results, methods, args.single_method)
figs = {
f'scores_{args.single_method}_vs_others': plotter.make_fig(
boxes,
args.template,
plotter.make_score_ytitle(args.score_type, args.plot_fn),
args.max_y,
log_y_axis = False,
layout_options = {
},
),
}
plotter.write_figs(figs, args.plot_fn)
if __name__ == '__main__':
main()
```
#### File: comparison/pwgs/count_clusters.py
```python
import argparse
import numpy as np
import sys
import os
sys.path += [
os.path.join(os.path.dirname(__file__), '..', '..', 'lib'),
os.path.expanduser('~/.apps/phylowgs')
]
from pwgsresults.result_loader import ResultLoader
import util
def count_clusters(results):
tidxs = np.array(sorted(results.tree_summary.keys()))
llhs = np.array([results.tree_summary[tidx]['llh'] for tidx in tidxs])
probs = util.softmax(llhs)
clusters = np.array([len(results.tree_summary[tidx]['populations']) for tidx in tidxs]) - 1
expected_clusters = np.sum(probs * clusters)
return expected_clusters
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('tree_summary',
help='JSON-formatted tree summaries')
parser.add_argument('mutation_list',
help='JSON-formatted list of mutations')
parser.add_argument('mutation_assignment',
help='JSON-formatted list of SSMs and CNVs assigned to each subclone')
args = parser.parse_args()
results = ResultLoader(args.tree_summary, args.mutation_list, args.mutation_assignment)
C = count_clusters(results)
print(C)
if __name__ == '__main__':
main()
```
#### File: comparison/sciclone/convert_outputs.py
```python
import argparse
import json
import csv
from collections import defaultdict
import os
import sys
sys.path.append(os.path.join(os.path.dirname(__file__), '..', '..'))
import inputparser
def convert_clusters(scresultsfn, varid_map):
clusters = defaultdict(list)
garbage = []
with open(scresultsfn) as F:
R = csv.DictReader(F, delimiter='\t')
for row in R:
chrom, pos, cluster = row['chr'], int(row['st']), row['cluster']
varid = varid_map['%s_%s' % (chrom, pos)]
if cluster == 'NA':
garbage.append(varid)
else:
cluster = int(cluster)
clusters[cluster].append(varid)
cids = sorted(clusters.keys())
assert set(cids) == set(range(1, len(cids) + 1))
clusters = [clusters[cid] for cid in cids]
return (clusters, garbage)
def build_variant_to_varid_map(variants):
varid_map = {'%s_%s' % (V['chrom'], V['pos']): int(V['id'][1:]) for V in variants.values()}
# Ensure no duplicate entries exist.
assert len(varid_map) == len(variants)
return varid_map
def add_missing_sex_variants_to_garbage(variants, clusters, garbage):
# I run SciClone without sex variants, since I don't know how to specify
# total numbers of locus according to their inputs -- maybe I need to make a
# quasi-CNA covering all of X and Y in males, but I looked into this and
# couldn't figure it out. As such, just mark all sex variants as garbage.
existing = set([V for C in clusters for V in C] + list(garbage))
vids = sorted([int(V[1:]) for V in variants.keys()])
for vid, var in variants.items():
vid = int(vid[1:])
if vid in existing:
continue
assert var['chrom'] in ('X', 'Y')
garbage.append(vid)
def write_results(clusters, garbage, params_fn_orig, params_fn_modified):
params = inputparser.load_params(params_fn_orig)
for K in ('clusters', 'garbage'):
if K in params:
del params[K]
params['clusters'] = clusters
params['garbage'] = garbage
with open(params_fn_modified, 'w') as F:
json.dump(params, F)
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('ssm_fn')
parser.add_argument('scresults_fn')
parser.add_argument('params_fn_orig')
parser.add_argument('params_fn_modified')
args = parser.parse_args()
variants = inputparser.load_ssms(args.ssm_fn)
varid_map = build_variant_to_varid_map(variants)
clusters, garbage = convert_clusters(args.scresults_fn, varid_map)
add_missing_sex_variants_to_garbage(variants, clusters, garbage)
write_results(clusters, garbage, args.params_fn_orig, args.params_fn_modified)
main()
```
#### File: pairtree/lib/lhmath_numba.py
```python
import ctypes
from numba.extending import get_cython_function_address
import numba
import binom
import numpy as np
from common import Models
from scipy import LowLevelCallable
import util
# TODO: Can I just replace this with `util.lbeta`? Would it be faster / less bullshit?
def _make_betainc():
addr = get_cython_function_address('scipy.special.cython_special', 'betainc')
betainc_type = ctypes.CFUNCTYPE(ctypes.c_double, ctypes.c_double, ctypes.c_double, ctypes.c_double)
func = betainc_type(addr)
return func
# Signature: betacdf(A, B, X)
betacdf = _make_betainc()
# Specifically for scalars.
@numba.njit
def _binom_logpmf(X, N, P):
val = binom.logpmf(
np.array([X], dtype=np.int64),
np.array([N], dtype=np.int64),
np.array([P], dtype=np.float64),
)
return val[0]
@numba.njit
def _make_lower(phi1, midx):
if midx == Models.A_B:
return 0
elif midx == Models.B_A:
return phi1
elif midx == Models.diff_branches:
return 0
else:
return np.nan
@numba.njit
def _make_upper(phi1, midx):
if midx == Models.A_B:
return phi1
elif midx == Models.B_A:
return 1
elif midx == Models.diff_branches:
return 1 - phi1
else:
return np.nan
def _integral_separate_clusters(args):
phi1, V1_var_reads, V1_ref_reads, V1_omega, V2_var_reads, V2_ref_reads, V2_omega, midx, logsub = args
V1_total_reads = V1_var_reads + V1_ref_reads
logP = _binom_logpmf(
V1_var_reads,
V1_total_reads,
V1_omega * phi1,
)
upper = _make_upper(phi1, midx)
lower = _make_lower(phi1, midx)
A = V2_var_reads + 1
B = V2_ref_reads + 1
betainc_upper = betacdf(A, B, V2_omega * upper)
betainc_lower = betacdf(A, B, V2_omega * lower)
if util.isclose(betainc_upper, betainc_lower):
return 0
logP += np.log(betainc_upper - betainc_lower)
logP -= logsub
return np.exp(logP)
def _integral_same_cluster(args):
phi1, V1_var_reads, V1_ref_reads, V1_omega, V2_var_reads, V2_ref_reads, V2_omega, logsub = args
V1_total_reads = V1_var_reads + V1_ref_reads
V2_total_reads = V2_var_reads + V2_ref_reads
X = np.array([V1_var_reads, V2_var_reads])
N = np.array([V1_total_reads, V2_total_reads])
P = np.array([V1_omega * phi1, V2_omega * phi1])
B = binom.logpmf(X, N, P)
logP = np.log(np.sqrt(2)) + B[0] + B[1] - logsub
return np.exp(logP)
# See:
# https://stackoverflow.com/questions/51109429
# https://stackoverflow.com/questions/49683653
def _make_jitted_integrand(integrand):
jitted = numba.njit(integrand)
# This is the function that scipy.integrate.quad can call.
@numba.cfunc(numba.types.float64(numba.types.intc, numba.types.CPointer(numba.types.float64)))
def integrand_cfunc(N, XX):
vals = numba.carray(XX, N)
return jitted(vals)
return LowLevelCallable(integrand_cfunc.ctypes)
integral_separate_clusters = _make_jitted_integrand(_integral_separate_clusters)
integral_same_cluster = _make_jitted_integrand(_integral_same_cluster)
```
#### File: pairtree/lib/phi_fitter_projection.py
```python
import numpy as np
import common
import ctypes
import numpy.ctypeslib as npct
import subprocess
import os
import sys
MIN_VARIANCE = 1e-4
def _convert_adjm_to_adjlist(adjm):
adjm = np.copy(adjm)
assert np.all(np.diag(adjm) == 1)
# Make undirected.
adjm += adjm.T
np.fill_diagonal(adjm, 0)
assert np.all(np.logical_or(adjm == 0, adjm == 1))
adjl = []
for I, row in enumerate(adjm):
adjl.append(np.flatnonzero(row))
return adjl
def fit_etas(adj, superclusters, supervars):
svids = common.extract_vids(supervars)
R = np.array([supervars[svid]['ref_reads'] for svid in svids])
V = np.array([supervars[svid]['var_reads'] for svid in svids])
T = R + V
omega = np.array([supervars[svid]['omega_v'] for svid in svids])
M, S = T.shape
phi_hat = V / (omega * T)
phi_hat = np.maximum(0, phi_hat)
phi_hat = np.minimum(1, phi_hat)
phi_hat = np.insert(phi_hat, 0, 1, axis=0)
# Make Quaid happy with `V_hat` and `T_hat`. I don't really understand why
# we're doing this, but I believe that, when `V = 0` and `T` is relatively
# small, this will result in `var_phi_hat` being larger than it would
# otherwise be. Without using `V_hat` and `T_hat`, `var_phi_hat` would be
# zero here, and so would be bumped up to the floor of 1e-8 below.
V_hat = V + 1
T_hat = T + 2
var_phi_hat = V_hat*(1 - V_hat/T_hat) / (T_hat*omega)**2
var_phi_hat = np.insert(var_phi_hat, 0, MIN_VARIANCE, axis=0)
var_phi_hat = np.maximum(MIN_VARIANCE, var_phi_hat)
assert var_phi_hat.shape == (M+1, S)
eta = np.zeros((M+1, S))
for sidx in range(S):
eta[:,sidx] = _fit_eta_S(adj, phi_hat[:,sidx], var_phi_hat[:,sidx])
assert not np.any(np.isnan(eta))
assert np.allclose(0, eta[eta < 0])
eta[eta < 0] = 0
return eta
def _fit_eta_S_nancheck(adj, phi_hat, var_phi_hat):
# sim_K100_S100_T50_M1000_G100_run1 revealed a weird issue where a particular
# call to _project_ppm() will return NaNs. This is reproducible insofar as
# running Pairtree again with the same seed ("1") will cause this failure at
# exactly the same point for exactly the same tree sample. In this instance,
# 101 of the 10,000 entries of the `eta` matrix will be NaN. However, calling
# _project_ppm() immediately after with the same inputs will work fine. Out
# of 705 simulations, each of which sampled 3000 trees (meaning >2M trees
# sampled), this was the only case where I saw this failure.
#
# To work around this, if the first call to _project_ppm() returns NaNs, make
# two additional attempts.
max_attempts = 3
for attempt in range(max_attempts):
eta = _fit_eta_S_ctypes(adj, phi_hat, var_phi_hat)
if not np.any(np.isnan(eta)):
return eta
print('eta contains NaN, retrying ...', file=sys.stderr)
raise Exception('eta still contains NaN after %s attempt(s)' % max_attempts)
def _project_ppm(adjm, phi_hat, var_phi_hat, root):
assert phi_hat.ndim == var_phi_hat.ndim == 1
inner_flag = 0
compute_eta = 1
M = len(phi_hat)
S = 1
eta = np.empty(M, dtype=np.double)
assert M >= 1
assert var_phi_hat.shape == (M,)
# This is called `gamma_init` in the C code from B&J.
gamma_init = var_phi_hat
phi_hat = phi_hat / gamma_init
adjl = _convert_adjm_to_adjlist(adjm)
deg = np.array([len(children) for children in adjl], dtype=np.short)
adjl_mat = np.zeros((M,M), dtype=np.short)
for rowidx, row in enumerate(adjl):
adjl_mat[rowidx,:len(row)] = row
# Method signature:
# realnumber tree_cost_projection(
# shortint inner_flag,
# shortint compute_M_flag,
# realnumber *M,
# shortint num_nodes,
# shortint T,
# realnumber *data,
# realnumber gamma_init[],
# shortint root_node,
# edge *tree,
# shortint *adjacency_mat,
# shortint *final_degrees,
# shortint *adj_list
# );
c_double_p = ctypes.POINTER(ctypes.c_double)
c_short_p = ctypes.POINTER(ctypes.c_short)
# Ensure arrays are C-contiguous.
eta = np.require(eta, requirements='C')
phi_hat = np.require(phi_hat, requirements='C')
gamma_init = np.require(gamma_init, requirements='C')
deg = np.require(deg, requirements='C')
adjl_mat = np.require(adjl_mat, requirements='C')
cost = _project_ppm.tree_cost_projection(
inner_flag,
compute_eta,
eta,
M,
S,
phi_hat,
gamma_init,
root,
None,
None,
deg,
adjl_mat,
)
return eta
def _init_project_ppm():
real_arr_1d = npct.ndpointer(dtype=np.float64, ndim=1, flags='C')
short_arr_1d = npct.ndpointer(dtype=ctypes.c_short, ndim=1, flags='C')
short_arr_2d = npct.ndpointer(dtype=ctypes.c_short, ndim=2, flags='C')
class Edge(ctypes.Structure):
_fields_ = [('first', ctypes.c_short), ('second', ctypes.c_short)]
c_edge_p = ctypes.POINTER(Edge)
c_short_p = ctypes.POINTER(ctypes.c_short)
lib_path = os.path.join(os.path.dirname(__file__), 'projectppm', 'bin', 'libprojectppm.so')
assert lib_path is not None, 'Could not find libprojectppm'
lib = ctypes.cdll.LoadLibrary(lib_path)
func = lib.tree_cost_projection
func.argtypes = [
ctypes.c_short,
ctypes.c_short,
real_arr_1d,
ctypes.c_short,
ctypes.c_short,
real_arr_1d,
real_arr_1d,
ctypes.c_short,
c_edge_p,
c_short_p,
short_arr_1d,
short_arr_2d,
]
func.restype = ctypes.c_double
_project_ppm.tree_cost_projection = func
_init_project_ppm()
def _fit_eta_S_ctypes(adj, phi_hat, var_phi_hat):
assert phi_hat.ndim == var_phi_hat.ndim == 1
M = len(phi_hat)
assert M >= 1
assert var_phi_hat.shape == (M,)
root = 0
eta = _project_ppm(adj, phi_hat, var_phi_hat, root)
return eta
def _prepare_subprocess_inputs(adjm, phi, prec_sqrt):
_arr2floatstr = lambda arr: ' '.join([f'{E:.10f}' for E in np.array(arr).flatten()])
_arr2intstr = lambda arr: ' '.join([f'{E:d}' for E in np.array(arr).flatten()])
assert phi.ndim == prec_sqrt.ndim == 1
M = len(phi)
assert prec_sqrt.shape == (M,)
assert M >= 1
root = 0
adjl = _convert_adjm_to_adjlist(adjm)
deg = [len(children) for children in adjl]
should_calc_eta = 1
calcphi_input = [
_arr2intstr((M, 1)),
_arr2floatstr(phi),
_arr2floatstr(prec_sqrt),
str(root),
_arr2intstr(deg),
]
calcphi_input += [_arr2intstr(row) for row in adjl]
calcphi_input += [
str(should_calc_eta),
'' # There will be a trailing newline, as B&J's code requires.
]
joined = '\n'.join(calcphi_input)
return joined
def _fit_eta_S_subprocess(adj, phi_hat_S, var_phi_hat_S):
prec_sqrt_S = 1 / np.sqrt(var_phi_hat_S)
calcphi_input = _prepare_subprocess_inputs(adj, phi_hat_S, prec_sqrt_S)
result = subprocess.run([os.path.join(os.path.dirname(__file__), 'projectppm', 'bin', 'projectppm')], input=calcphi_input, capture_output=True, encoding='UTF-8')
result.check_returncode()
lines = result.stdout.strip().split('\n')
assert len(lines) == 2
cost = float(lines[0])
eta_S = np.array([float(E) for E in lines[1].split(' ')])
assert len(eta_S) == len(phi_hat_S)
return eta_S
_fit_eta_S = _fit_eta_S_nancheck
```
#### File: pairtree/unused/test_lh.py
```python
import numpy as np
import lh
import util
import common
import sys
def create_vars():
variants = {
#'V1': {'var_reads': [18], 'total_reads': [100]},
#'V2': {'var_reads': [10], 'total_reads': [100]},
'V1': {'var_reads': [500], 'total_reads': [1000]},
'V2': {'var_reads': [100], 'total_reads': [1000]},
#'V1': {'var_reads': [1702], 'total_reads': [4069]},
#'V2': {'var_reads': [2500], 'total_reads': [19100]},
#'V1': {'var_reads': [0], 'total_reads': [200]},
#'V2': {'var_reads': [179], 'total_reads': [356]},
}
print(sorted(variants.items()))
S = 1
for vid, V in variants.items():
for K in ('var_reads', 'total_reads'):
V[K] = np.array(S*V[K]).astype(np.int)
V['id'] = vid
V['ref_reads'] = V['total_reads'] - V['var_reads']
V['vaf'] = V['var_reads'].astype(np.float) / V['total_reads']
V['omega_v'] = np.array(S*[0.5])
variants = {vid: common.convert_variant_dict_to_tuple(V) for vid, V in variants.items()}
return (variants['V1'], variants['V2'])
def main():
np.set_printoptions(linewidth=400, precision=3, threshold=sys.maxsize, suppress=True)
np.seterr(divide='raise', invalid='raise')
V1, V2 = create_vars()
estimators = (
# lh.calc_lh_quad will be slower than usual on its first invocation due to
# Numba JIT compilation. Don't be alarmed by seemingly poor runtime from it
# as a result.
lh.calc_lh_quad,
lh.calc_lh_mc_1D,
lh.calc_lh_mc_2D,
lh.calc_lh_mc_2D_dumb,
lh.calc_lh_grid,
)
max_estimator_len = max([len(M.__name__) for M in estimators])
for M in estimators:
M_name = M.__name__
M = util.time_exec(M)
evidence_per_sample = M(V1, V2)
evidence_per_sample[:,common.Models.garbage] = lh.calc_garbage(V1, V2)
evidence = np.sum(evidence_per_sample, axis=0)
print(
M_name.ljust(max_estimator_len),
'%.3f ms' % util.time_exec._ms,
evidence,
util.softmax(evidence),
sep='\t',
)
main()
```
#### File: pairtree/unused/test_phis.py
```python
import numpy as np
import argparse
import scipy.stats
import inputparser
import clustermaker
import phi_fitter
import common
MIN_FLOAT = np.finfo(np.float).min
def calc_binom_params(supervars):
svids = common.extract_vids(supervars)
V = np.array([supervars[svid]['var_reads'] for svid in svids])
R = np.array([supervars[svid]['ref_reads'] for svid in svids])
omega_v = np.array([supervars[svid]['omega_v'] for svid in svids])
assert np.all(omega_v == 0.5)
N = V + R
return (V, N, omega_v)
def _calc_llh_phi_binom(phi, supervars):
V, N, omega_v = calc_binom_params(supervars)
K, S = phi.shape
for arr in V, N, omega_v:
assert arr.shape == (K-1, S)
assert np.allclose(1, phi[0])
P = omega_v * phi[1:]
phi_llh = scipy.stats.binom.logpmf(V, N, P)
phi_llh = np.sum(phi_llh)
assert not np.isnan(phi_llh)
# Prevent LLH of -inf.
phi_llh = np.maximum(phi_llh, MIN_FLOAT)
return phi_llh
def calc_beta_params(supervars):
svids = common.extract_vids(supervars)
V = np.array([supervars[svid]['var_reads'] for svid in svids])
R = np.array([supervars[svid]['ref_reads'] for svid in svids])
omega_v = np.array([supervars[svid]['omega_v'] for svid in svids])
assert np.all(omega_v == 0.5)
# Since these are supervars, we can just take 2*V and disregard omega_v, since
# supervariants are always diploid (i.e., omega_v = 0.5).
alpha = 2*V + 1
# Must ensure beta is > 0.
beta = np.maximum(1, R - V + 1)
assert np.all(alpha > 0) and np.all(beta > 0)
return (alpha, beta)
def _calc_llh_phi_beta(phi, supervars):
alpha, beta = calc_beta_params(supervars)
K, S = phi.shape
assert alpha.shape == beta.shape == (K-1, S)
assert np.allclose(1, phi[0])
phi_llh = scipy.stats.beta.logpdf(phi[1:,:], alpha, beta)
phi_llh = np.sum(phi_llh)
# I had NaNs creep into my LLH when my alpha and beta params were invalid
# (i.e., when I had elements of beta that were <= 0).
assert not np.isnan(phi_llh)
# Prevent LLH of -inf.
phi_llh = np.maximum(phi_llh, MIN_FLOAT)
return phi_llh
def _adj2parents(adj):
adj = np.copy(adj)
np.fill_diagonal(adj, 0)
return np.argmax(adj[:,1:], axis=0)
def _parents2adj(parents):
M = len(parents) + 1
adjm = np.eye(M)
adjm[parents, range(1, M)] = 1
return adjm
def print_init(supervars, adj):
svids = common.extract_vids(supervars)
R = np.array([supervars[svid]['ref_reads'] for svid in svids])
V = np.array([supervars[svid]['var_reads'] for svid in svids])
T = R + V
omega = np.array([supervars[svid]['omega_v'] for svid in svids])
M, S = T.shape
phi_hat = V / (omega * T)
phi_hat = np.insert(phi_hat, 0, 1, axis=0)
print('parents', _adj2parents(adj))
print('V')
print(V)
print('T')
print(T)
print()
print_method('phi_hat', phi_hat, supervars)
print()
def print_method(method, phi, supervars):
llh_binom = _calc_llh_phi_binom(phi, supervars)
llh_beta = _calc_llh_phi_beta(phi, supervars)
print(f'{method} llh_binom={llh_binom:.3f} llh_beta={llh_beta:.3f}')
print(phi)
def main():
parser = argparse.ArgumentParser(
description='LOL HI THERE',
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('ssm_fn')
parser.add_argument('params_fn')
args = parser.parse_args()
variants = inputparser.load_ssms(args.ssm_fn)
params = inputparser.load_params(args.params_fn)
clusters = params['clusters']
supervars = clustermaker.make_cluster_supervars(clusters, variants)
superclusters = clustermaker.make_superclusters(supervars)
# Add empty initial cluster, which serves as tree root.
superclusters.insert(0, [])
M = len(superclusters)
iterations = 1000
parallel = 0
parents = [[0, 0, 0], [0, 1, 2]]
for P in parents:
adj = _parents2adj(P)
print_init(supervars, adj)
for method in ('projection', 'rprop', 'graddesc'):
phi, eta = phi_fitter._fit_phis(adj, superclusters, supervars, method, iterations, parallel)
# Sometimes the `projection` fitter will return zeros, which result in an
# LLH of -inf if the number of variant reads `V` is non-zero, since
# `Binom(X=V > 0, | N=V+R, p=0) = 0`. To avoid this, set a floor of 1e-6
# on phi values.
phi = np.maximum(1e-6, phi)
print_method(method, phi, supervars)
print()
if __name__ == '__main__':
main()
``` |
{
"source": "jmorenoamor/api-connect-code-quality",
"score": 2
} |
#### File: jmorenoamor/api-connect-code-quality/main.py
```python
import os
import sys
import oyaml as yaml
import json
class GithubAction():
def __init__(self):
pass
def gh_debug(self, message):
print(f"::debug::{message}")
def gh_warning(self, message):
print(f"::warning::{message}")
def gh_error(self, message):
print(f"::error::{message}")
def gh_status(self, parameter, value):
print(f"::set-output name={parameter}::{value}")
class APIConnectQualityCheck(GithubAction):
def __init__(self):
self.quality_errors = []
self.exceptions = {}
self.rules_ignored = False
def load_yaml(self, filename, encoding='utf-8'):
with open(filename, 'r', encoding=encoding) as file:
return yaml.safe_load(file)
def safeget(self, dct, *keys):
for key in keys:
try:
dct = dct[key]
except KeyError:
return None
return dct
def check(self, assertion, message, artifact, rule):
if not assertion:
if self.exceptions.get(rule, None):
self.gh_warning(f"{rule}: {artifact}: {message} - Ignorada por: {self.exceptions[rule]['reason']}")
self.rules_ignored = True
return "skipped"
else:
self.quality_errors.append(f"{rule}: {artifact}: {message}")
self.gh_warning(f"{rule}: {artifact}: {message}")
return "ko"
return "ok"
def check_product(self, product_path):
product = self.load_yaml(product_path)
product_name = product['info']['title']
# Comprobar que la versión del producto se compone solo de major y minor
version = product['info']['version']
self.check(rule="P001",
assertion=len(version.split('.')) == 2,
artifact=product_name,
message=f"El código de versión '{version}' no es correcto.")
# Comprobar los planes de suscripción
self.check(rule="P002",
assertion=len(product['plans']) == 3,
artifact=product_name,
message=f"El número de planes de suscripción no es correcto.")
# Comprobar la visibilidad del producto
self.check(rule="P003",
assertion=self.safeget(product, 'visibility', 'view', 'type') == "public",
artifact=product_name,
message=f"La visibilidad del producto debe ser pública.")
# Comprobar la configuración de suscripción al producto
self.check(rule="P004",
assertion=self.safeget(product, 'visibility', 'subscribe', 'type') == "authenticated",
artifact=product_name,
message=f"La suscripción´del producto debe ser solo para usuarios autenticados.")
# Comprobar el formato de referencia de APIs
for api_name in product['apis']:
api = product['apis'][api_name]
self.check(rule="P005",
assertion='name' not in api,
artifact=product_name,
message=f"El api {api_name} está referenciado por nombre.")
# Eliminar el numero de version del nombre del API
clean_reference = api['$ref'].split('_')[0]
if not clean_reference.endswith(".yaml"):
clean_reference += ".yaml"
self.gh_debug(f"Cleaned {api['$ref']} to {clean_reference}")
api_path = os.path.join(os.path.dirname(product_path), clean_reference)
self.check(rule="P006",
assertion=os.path.exists(api_path),
artifact=product_name,
message=f"El API '{api_name}' referenciado no existe.")
self.check_api(api_path)
def check_api(self, api_path):
api = self.load_yaml(api_path)
api_name = f"{api['info']['title']} ({api['info']['x-ibm-name']})"
# Comprobar que la versión del producto se compone solo de major y minor
version = api['info']['version']
self.check(rule="A001",
assertion=len(version.split('.')) == 2,
artifact=api_name,
message=f"El código de versión '{version}' no es correcto.")
# Comprobar el esquema de seguridad
security_schema = {
"type": "apiKey",
"in": "header",
"name": "X-IBM-Client-Id"
}
client_id_header = self.safeget(api, 'securityDefinitions', 'clientIdHeader')
# self.gh_debug(json.dumps(client_id_header, indent=2))
self.check(rule="A002",
assertion=client_id_header is not None,
artifact=api_name,
message=f"El esquema de seguridad no está definido correctamente.")
self.check(rule="A003",
assertion=client_id_header == security_schema,
artifact=api_name,
message=f"El esquema de seguridad no está definido correctamente.")
# Comprobar el activity log
activity_schema = {
"success-content": "payload",
"error-content": "payload",
"enabled": True
}
activty_log = self.safeget(api, 'x-ibm-configuration', 'activity-log')
# self.gh_debug(json.dumps(activty_log, indent=2))
self.check(rule="A004",
assertion=activty_log is not None,
artifact=api_name,
message=f"El almacenamiento de actividad no está definido correctamente.")
self.check(rule="A005",
assertion=activty_log == activity_schema,
artifact=api_name,
message=f"El almacenamiento de actividad no está definido correctamente.")
# Comprobar las políticas
for policy in api['x-ibm-configuration']['assembly']:
self.check_assembly(api['x-ibm-configuration']['assembly'][policy])
def check_assembly(self, assembly):
for policy in assembly:
self.check_policy(policy)
def check_policy(self, policy):
policy_type = list(policy.keys())[0]
policy = policy[policy_type]
if policy_type != "default":
self.gh_debug(f"Checking policy {policy.get('title')}")
if policy_type == "gatewayscript":
pass
elif policy_type == "default":
self.check_assembly(policy)
elif policy_type == "switch":
for case in [p for p in policy['case'] if "condition" in p]:
self.check_assembly(case['execute'])
for case in [p for p in policy['case'] if "otherwise" in p]:
self.check_assembly(case['otherwise'])
elif policy_type == "invoke":
self.check(rule="A100",
assertion=policy.get('verb') != "keep",
artifact=policy.get('title'),
message="El verbo de las políticas invoke debe especificarse de forma explícita.")
def run(self):
product_path = os.getenv("INPUT_PRODUCT")
rules_path = os.getenv("INPUT_RULES")
if rules_path and os.path.exists(rules_path):
rules = self.load_yaml(rules_path)
self.exceptions = rules.get('exceptions', [])
else:
self.gh_error(f"No existe el fichero de reglas {rules_path}")
self.gh_status("result", "warning")
if not product_path or not os.path.exists(product_path):
self.gh_error(f"No existe el fichero de producto {product_path}")
self.gh_status("result", "error")
exit(99)
self.check_product(product_path)
if self.quality_errors:
self.gh_status("result", "error")
exit(99)
elif self.rules_ignored:
self.gh_status("result", "warning")
exit(0)
else:
self.gh_status("result", "ok")
exit(0)
if __name__ == "__main__":
action = APIConnectQualityCheck()
action.run()
``` |
{
"source": "jmorenoamor/pyapic",
"score": 2
} |
#### File: pyapic/pyapic/APIConnect.py
```python
import os
import sys
import oyaml as yaml
import json
import logging
import requests
import argparse
import traceback
import dateutil.parser
logger = logging.getLogger(__name__)
class APIConnectError(Exception):
def __init__(self, message):
super().__init__(message)
class APIConnect:
def __init__(self, manager, debug=False):
self.token = None
self.manager = manager
self.debug_requests = debug
self.verify_ssl = True
self.consumer_organization = False
def debug_response(self, response):
if self.debug_requests:
logger.debug(json.dumps(response.json(), indent=2))
def login(self, username, password, realm):
url = f'https://{self.manager}/api/token'
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
}
payload = {
"username": username,
"password": password,
"realm": realm,
"client_id":"caa87d9a-8cd7-4686-8b6e-ee2cdc5ee267",
"client_secret":"3ecff363-7eb3-44be-9e07-6d4386c48b0b",
"grant_type":"password"
}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=self.verify_ssl)
self.debug_response(response)
response.raise_for_status()
self.token = response.json()['access_token']
return self.token
def keystores(self, organization):
url = f'https://{self.manager}/api/orgs/{organization}/keystores'
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
response = requests.get(url, headers=headers, verify=self.verify_ssl)
self.debug_response(response)
response.raise_for_status()
return response.json()['results']
def create_keystore(self, organization, title, keystore, password=<PASSWORD>, summary=None):
url = f'https://{self.manager}/api/orgs/{organization}/keystores'
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
payload = {
"title": title,
"summary": summary,
"password": password,
"keystore": keystore,
}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=self.verify_ssl)
self.debug_response(response)
response.raise_for_status()
return response.json()
def delete_keystore(self, organization, name):
url = f'https://{self.manager}/api/orgs/{organization}/keystores/{name}'
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
response = requests.delete(url, headers=headers, verify=self.verify_ssl)
self.debug_response(response)
response.raise_for_status()
return None
def tls_client_profiles(self, organization):
url = f'https://{self.manager}/api/orgs/{organization}/tls-client-profiles'
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
response = requests.get(url, headers=headers, verify=self.verify_ssl)
self.debug_response(response)
response.raise_for_status()
return response.json()['results']
def delete_tls_client_profile(self, organization, name):
url = f'https://{self.manager}/api/orgs/{organization}/tls-client-profiles?confirm={organization}'
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
response = requests.delete(url, headers=headers, verify=self.verify_ssl)
self.debug_response(response)
response.raise_for_status()
return None
def create_tls_client_profile(self, organization, title, keystore, summary=None):
url = f'https://{self.manager}/api/orgs/{organization}/tls-client-profiles'
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
payload = {
"ciphers": [
"ECDHE_ECDSA_WITH_AES_256_GCM_SHA384",
"ECDHE_RSA_WITH_AES_256_GCM_SHA384",
"ECDHE_ECDSA_WITH_AES_256_CBC_SHA384",
"ECDHE_RSA_WITH_AES_256_CBC_SHA384",
"ECDHE_ECDSA_WITH_AES_256_CBC_SHA",
"ECDHE_RSA_WITH_AES_256_CBC_SHA",
"DHE_DSS_WITH_AES_256_GCM_SHA384",
"DHE_RSA_WITH_AES_256_GCM_SHA384",
"DHE_RSA_WITH_AES_256_CBC_SHA256",
"DHE_DSS_WITH_AES_256_CBC_SHA256",
"DHE_RSA_WITH_AES_256_CBC_SHA",
"DHE_DSS_WITH_AES_256_CBC_SHA",
"RSA_WITH_AES_256_GCM_SHA384",
"RSA_WITH_AES_256_CBC_SHA256",
"RSA_WITH_AES_256_CBC_SHA",
"ECDHE_ECDSA_WITH_AES_128_GCM_SHA256",
"ECDHE_RSA_WITH_AES_128_GCM_SHA256",
"ECDHE_ECDSA_WITH_AES_128_CBC_SHA256",
"ECDHE_RSA_WITH_AES_128_CBC_SHA256",
"ECDHE_ECDSA_WITH_AES_128_CBC_SHA",
"ECDHE_RSA_WITH_AES_128_CBC_SHA",
"DHE_DSS_WITH_AES_128_GCM_SHA256",
"DHE_RSA_WITH_AES_128_GCM_SHA256",
"DHE_RSA_WITH_AES_128_CBC_SHA256",
"DHE_DSS_WITH_AES_128_CBC_SHA256",
"DHE_RSA_WITH_AES_128_CBC_SHA",
"DHE_DSS_WITH_AES_128_CBC_SHA",
"RSA_WITH_AES_128_GCM_SHA256",
"RSA_WITH_AES_128_CBC_SHA256",
"RSA_WITH_AES_128_CBC_SHA"
],
"title": title,
"version": "1.0.0",
"summary": summary,
"insecure_server_connections": False,
"server_name_indication": True,
"keystore_url": keystore,
"protocols": [
"tls_v1.0",
"tls_v1.1",
"tls_v1.2"
]
}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=self.verify_ssl)
self.debug_response(response)
response.raise_for_status()
return response.json()
def catalog_tls_client_profiles(self, organization, catalog):
url = f'https://{self.manager}/api/catalogs/{organization}/{catalog}/configured-tls-client-profiles'
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
response = requests.get(url, headers=headers, verify=self.verify_ssl)
self.debug_response(response)
response.raise_for_status()
return response.json()['results']
def delete_configured_tls_client_profile(self, organization, catalog, name):
url = f'https://{self.manager}/api/catalogs/{organization}/{catalog}/configured-tls-client-profiles/{name}?confirm={catalog}'
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
response = requests.delete(url, headers=headers, verify=self.verify_ssl)
self.debug_response(response)
response.raise_for_status()
return None
def add_tls_client_profile(self, organization, catalog, tls_client_profile):
url = f'https://{self.manager}/api/catalogs/{organization}/{catalog}/configured-tls-client-profiles'
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
payload = {
"tls_client_profile_url": tls_client_profile
}
response = requests.post(url, data=json.dumps(payload), headers=headers, verify=self.verify_ssl)
self.debug_response(response)
response.raise_for_status()
return response.json()
def catalog_properties_create(self, organization, catalog, properties):
url = f'https://{self.manager}/api/catalogs/{organization}/{catalog}/properties'
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
payload = properties
response = requests.patch(url, data=json.dumps(payload), headers=headers, verify=self.verify_ssl)
self.debug_response(response)
response.raise_for_status()
return response.json()
def catalog_properties(self, organization, catalog):
url = f'https://{self.manager}/api/catalogs/{organization}/{catalog}/properties'
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
response = requests.get(url, headers=headers, verify=self.verify_ssl)
self.debug_response(response)
response.raise_for_status()
return response.json()
def catalog_products(self, organization, catalog):
url = f'https://{self.manager}/api/catalogs/{organization}/{catalog}/products'
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
response = requests.get(url, headers=headers, verify=self.verify_ssl)
self.debug_response(response)
response.raise_for_status()
return response.json()
def product_publish(self, organization, catalog, product, files, space=None):
if space:
url = f'https://{self.manager}/api/spaces/{organization}/{catalog}/{space}/publish'
else:
url = f'https://{self.manager}/api/catalogs/{organization}/{catalog}/publish'
headers = {
# 'Content-Type': 'multipart/form-data', ¡Do not set Content-Type!, requests will do it
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
response = requests.post(url, headers=headers, files=files, verify=self.verify_ssl, timeout=300)
self.debug_response(response)
response.raise_for_status()
return response.json()
def product_get(self, organization, catalog, product, version=None):
url = f"https://{self.manager}/api/catalogs/{organization}/{catalog}/products/{product}"
if version:
url += f"/{version}"
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
response = requests.get(url, headers=headers, verify=False)
self.debug_response(response)
response.raise_for_status()
return response.json()
def last_published(self, products):
# Get the last published product from a list
published = [p for p in products['results'] if p['state'] == "published"]
products = sorted(published, key=lambda x: dateutil.parser.isoparse(x['updated_at']), reverse=True)
return products[0] if products else None
def subscription_create(self, product, organization, catalog, application, plan, consumer_organization=None):
consumer_organization = consumer_organization or self.consumer_organization
if not consumer_organization:
raise APIConnectError("Consumer organization not specified")
url = f"https://{self.manager}/api/apps/{organization}/{catalog}/{consumer_organization}/{application}/subscriptions"
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": f"Bearer {self.token}"
}
payload = {
# "title": "{product} {application} {plan}",
"plan": plan,
"product_url": product
}
response = requests.post(url, headers=headers, data=json.dumps(payload), verify=self.verify_ssl, timeout=300)
self.debug_response(response)
response.raise_for_status()
return response.json()
def get_subscriptions(self, application, catalog):
consumer_organization = consumer_organization or self.consumer_organization
if not consumer_organization:
raise APIConnectError("Consumer organization not specified")
url = f"https://{self.manager}/api/apps/{organization}/{catalog}/{consumer_organization}/{application}/subscriptions"
headers = {
'Content-Type': 'application/json',
"Accept": "application/json",
"Authorization": "Bearer %s" % SESSION['token']
}
response = requests.get(url, headers=headers, verify=False)
# print(json.dumps(response.json(), indent=2))
response.raise_for_status()
return response.json()
``` |
{
"source": "jmorenobl/django-hattori",
"score": 3
} |
#### File: django-hattori/tests/models.py
```python
from django.db import models
class Person(models.Model):
first_name = models.CharField(max_length=100)
last_name = models.CharField(max_length=100)
class Meta:
verbose_name = 'Person'
verbose_name_plural = 'Persons'
def __str__(self):
return '{} {}'.format(self.first_name, self.last_name)
```
#### File: django-hattori/tests/test_command.py
```python
import pytest
from model_mommy import mommy
from model_mommy.recipe import seq
from tests.models import Person
from django.core.management import call_command
@pytest.mark.django_db
class TestCommand(object):
@staticmethod
def data(num_items=10):
return mommy.make(Person, first_name=seq('first_name-'), last_name=seq('last_name-'), _quantity=num_items)
def test_data_creation(self):
self.data()
assert Person.objects.count() == 10
last = Person.objects.last()
assert last.first_name == 'first_name-10'
@pytest.mark.parametrize('num_items', [
20000,
40000,
])
def test_simple_command(self, num_items):
self.data(num_items=num_items)
assert Person.objects.filter(first_name__startswith='first_name-').exists()
call_command('anonymize_db')
assert not Person.objects.filter(first_name__startswith='first_name-').exists()
``` |
{
"source": "jmorenov/BBOMO",
"score": 3
} |
#### File: jmorenov/BBOMO/ZDT6.py
```python
import math
def F1(x):
return 1 - math.exp(-4*x[0]) * math.pow(math.sin(6 * math.pi * x[0]), 6)
def F2(x):
def g(x2m):
sum = 0
for i in x2m:
sum = sum + i
return 1 + 9 * math.pow(sum/(len(x) - 1), 0.25)
def h(f1, g):
return 1 - (f1 / g) * (f1 / g)
x2m = x[1:len(x)]
return g(x2m) * h(F1(x), g(x2m))
``` |
{
"source": "jmorenov/RA-practica1-vrep",
"score": 3
} |
#### File: RA-practica1-vrep/code/reactivecontrollerv110.py
```python
import time
import numpy as np
import random
PROXIMITY_LIMIT_ALL = 0.1
PROXIMITY_LIMIT_FRONT = 0.7
VELOCITY_BASE = 1.0
VELOCITY_MAX = 2.0
VELOCITY_MIN = 0.5
TIME_STEP = 0.010
def controller(remoteConnection):
lspeed = +VELOCITY_BASE
rspeed = +VELOCITY_BASE
endSimulation = False
while (remoteConnection.isConnectionEstablished() and endSimulation == False):
proximitySonars = np.array(remoteConnection.readAllSensors(8))
if proximitySonars.min() <= PROXIMITY_LIMIT_ALL:
minProximityIndex = proximitySonars.argmin()
if minProximityIndex == 3 or minProximityIndex == 4:
remoteConnection.printMessage('Collision detected! Simulation ended')
lspeed = 0.0
rspeed = 0.0
else:
minProximityOrientation = remoteConnection.getSensorAngle(minProximityIndex + 1)
randomOrientation = random.uniform(-10, +10)
remoteConnection.setAngle(minProximityOrientation + randomOrientation)
elif proximitySonars[3] <= PROXIMITY_LIMIT_FRONT or proximitySonars[4] <= PROXIMITY_LIMIT_FRONT:
maxDistanceIndex = proximitySonars.argmax()
if (proximitySonars[maxDistanceIndex] == 1):
maxDistanceIndexes = np.where(proximitySonars == 1)[0]
maxDistanceIndex = random.randint(0, len(maxDistanceIndexes) - 1)
maxDistanceOrientation = remoteConnection.getSensorAngle(maxDistanceIndex + 1)
randomOrientation = random.uniform(0, maxDistanceOrientation)
remoteConnection.setAngle(randomOrientation)
else:
if lspeed < VELOCITY_MAX and rspeed < VELOCITY_MAX and proximitySonars.min() == 1:
lspeed += 0.1
rspeed += 0.1
elif lspeed > VELOCITY_MIN and rspeed > VELOCITY_MIN and proximitySonars.min() != 1:
lspeed -= 0.1
rspeed -= 0.1
remoteConnection.setLeftMotorVelocity(lspeed)
remoteConnection.setRightMotorVelocity(rspeed)
time.sleep(TIME_STEP)
``` |
{
"source": "jmorenov/RA-practica2-opencv",
"score": 3
} |
#### File: jmorenov/RA-practica2-opencv/images.py
```python
import cv2
import numpy as np
def get_hist(img):
hist = cv2.calcHist([img], [0], None, [256], [0, 256])
cv2.normalize(hist, hist, alpha=0, beta=1, norm_type=cv2.NORM_MINMAX, dtype=cv2.CV_32F)
return hist
def get_hists_of_images_node(node, frames_per_node):
directory = 'Frames/Nodo_' + str(node)
hists = []
for i in range(1, frames_per_node + 1):
imagePath = directory + '/imagen' + str(i) + '.jpg'
image = read(imagePath)
hist = get_hist(image)
hists.append(hist)
return hists
def load_frames(number_of_nodes, frames_per_node):
responses = []
data = []
for i in range(1, number_of_nodes + 1):
print 'Loading images: Node ' + str(i) + '...'
hists = get_hists_of_images_node(i, frames_per_node)
responses = np.concatenate((responses, [i] * frames_per_node))
data.extend(hists)
data = np.float32(data)
responses = np.float32(responses)
return data, responses
def read(image_path):
return cv2.imread(image_path, 0)
def rotate_image(image, angle):
(h, w) = image.shape[:2]
center = (w / 2, h / 2)
M = cv2.getRotationMatrix2D(center, angle, 1.0)
rotated = cv2.warpAffine(image, M, (w, h))
return rotated
``` |
{
"source": "jmorenov/RA-practica3-RTTStar",
"score": 3
} |
#### File: RA-practica3-RTTStar/code/main.py
```python
import rrt_planning
import map1 as map
show_animation = True
def main():
print("start RRT path planning")
rrt = rrt_planning.RRT(start=map.start, goal=map.goal,
randArea=map.randArea, obstacleList=map.obstacleList)
path = rrt.Planning(animation=show_animation)
# Draw final path
if show_animation:
rrt.DrawFinalGraph(path)
if __name__ == '__main__':
main()
```
#### File: RA-practica3-RTTStar/code/rrt_planning.py
```python
import matplotlib.pyplot as plt
import random
import math
import copy
show_animation = True
class RRT():
def __init__(self, start, goal, obstacleList,
randArea, expandDis=1.0, goalSampleRate=5, maxIter=500):
"""
start:Start Position [x,y]
goal:Goal Position [x,y]
obstacleList:obstacle Positions [[x,y,size],...]
randArea:Ramdom Samping Area [min,max]
"""
self.start = Node(start[0], start[1])
self.end = Node(goal[0], goal[1])
self.minrand = randArea[0]
self.maxrand = randArea[1]
self.expandDis = expandDis
self.goalSampleRate = goalSampleRate
self.maxIter = maxIter
self.obstacleList = obstacleList
def Planning(self, animation=True):
self.nodeList = [self.start]
while True:
# Random Sampling
if random.randint(0, 100) > self.goalSampleRate:
rnd = [random.uniform(self.minrand, self.maxrand), random.uniform(
self.minrand, self.maxrand)]
else:
rnd = [self.end.x, self.end.y]
# Find nearest node
nind = self.GetNearestListIndex(self.nodeList, rnd)
# expand tree
nearestNode = self.nodeList[nind]
theta = math.atan2(rnd[1] - nearestNode.y, rnd[0] - nearestNode.x)
newNode = copy.deepcopy(nearestNode)
newNode.x += self.expandDis * math.cos(theta)
newNode.y += self.expandDis * math.sin(theta)
newNode.parent = nind
if not self.__CollisionCheck(newNode, self.obstacleList):
continue
self.nodeList.append(newNode)
# check goal
dx = newNode.x - self.end.x
dy = newNode.y - self.end.y
d = math.sqrt(dx * dx + dy * dy)
if d <= self.expandDis:
print("Goal!!")
break
if animation:
self.DrawGraph(rnd)
path = [[self.end.x, self.end.y]]
lastIndex = len(self.nodeList) - 1
while self.nodeList[lastIndex].parent is not None:
node = self.nodeList[lastIndex]
path.append([node.x, node.y])
lastIndex = node.parent
path.append([self.start.x, self.start.y])
return path
def DrawFinalGraph(self, path):
self.DrawGraph()
plt.plot([x for (x, y) in path], [y for (x, y) in path], '-r')
plt.grid(True)
plt.show()
def DrawGraph(self, rnd=None):
plt.clf()
if rnd is not None:
plt.plot(rnd[0], rnd[1], "^k")
for node in self.nodeList:
if node.parent is not None:
plt.plot([node.x, self.nodeList[node.parent].x], [
node.y, self.nodeList[node.parent].y], "-g")
for (ox, oy, size) in self.obstacleList:
plt.plot(ox, oy, "ok", ms=30 * size)
plt.plot(self.start.x, self.start.y, "xr")
plt.plot(self.end.x, self.end.y, "xr")
plt.axis([-2, 15, -2, 15])
plt.grid(True)
plt.pause(0.01)
def GetNearestListIndex(self, nodeList, rnd):
dlist = [(node.x - rnd[0]) ** 2 + (node.y - rnd[1])
** 2 for node in nodeList]
minind = dlist.index(min(dlist))
return minind
def __CollisionCheck(self, node, obstacleList):
for (ox, oy, size) in obstacleList:
dx = ox - node.x
dy = oy - node.y
d = math.sqrt(dx * dx + dy * dy)
if d <= size:
return False # collision
return True # safe
class Node():
def __init__(self, x, y):
self.x = x
self.y = y
self.parent = None
``` |
{
"source": "jmorgadov/clismo",
"score": 4
} |
#### File: clismo/automata/state.py
```python
from __future__ import annotations
from typing import Any, Callable, List
from clismo.automata.transition import Transition
class State:
"""
A state of an automata.
Attributes
----------
name : str
The name of the state.
transitions : List[Transition]
The transitions of the state.
"""
def __init__(self, name: str, on_visited: Callable = None) -> None:
self.name = name
self.transitions: List[Transition] = []
self.automata = None
self.on_visited = on_visited
def visited(self):
"""
Calls the ``on_visited`` callback if it is defined.
"""
if self.on_visited:
self.on_visited()
def substitute(self, other_state: State) -> State:
"""
Substitute the state with another one.
Parameters
----------
other_state : State
The other state.
Returns
-------
State
The substituted state.
"""
self.name = other_state.name
self.transitions = other_state.transitions
return self
def merge(self, other_state: State) -> State:
"""
Merge the state with another one.
Parameters
----------
other_state : State
The other state.
Returns
-------
State
The merged state.
"""
for trans in other_state.transitions:
to_state = self if trans.to_state is other_state else trans.to_state
new_t = Transition(
self,
to_state,
trans.condition,
trans.action,
trans.negated,
)
self.transitions.append(new_t)
return self
def next_state(self, cond: Any):
"""
Get the next state given a condition.
Parameters
----------
cond : Any
The condition.
Returns
-------
State
The next state.
"""
for trans in self.transitions:
if trans.check_condition(cond):
return trans.to_state
return None
def show(self) -> None:
"""
Show the state.
"""
print(self)
for trans in self.transitions:
print(f" {trans}")
def copy(self) -> State:
"""
Copy the state.
Returns
-------
State
The copied state.
"""
new_state = State(self.name)
new_state.transitions = self.transitions
return new_state
def __str__(self) -> str:
return self.name
def __repr__(self):
return str(self)
```
#### File: clismo/builtin/cs_built_in_functions.py
```python
import builtins
import math
import random
from time import sleep
import clismo.exceptions as excpt
from clismo.lang.type import Instance, Type
__BUILTINS = {}
cs_float = Type.get("float")
cs_int = Type.get("int")
cs_str = Type.get("str")
cs_bool = Type.get("bool")
cs_none = Type.get("none")
cs_list = Type.get("list")
def builtin_func(func_name, return_type):
def deco(func):
__BUILTINS[func_name] = (func, return_type)
return func
return deco
def resolve(func_name):
return __BUILTINS.get(func_name, None)
# region Numeric Functions
@builtin_func("abs", cs_float)
def cs_abs(x: cs_float):
if x.type.subtype(cs_float):
return cs_float(builtins.abs(x.value))
raise TypeError("abs() argument must be a float")
@builtin_func("bin", cs_str)
def cs_bin(x: cs_int):
if x.type.subtype(cs_int):
return cs_str(builtins.bin(x.value))
raise TypeError("bin() argument must be an int")
@builtin_func("round", cs_float)
def cs_round(x, ndigits=None):
if x.type.subtype(cs_float):
if ndigits is None:
return cs_float(builtins.round(x.value))
if ndigits.type.subtype(cs_int):
return cs_float(builtins.round(x.value, ndigits.value))
raise TypeError("round() second argument must be an int")
raise TypeError("round() argument must be a float")
@builtin_func("rand", cs_float)
def cs_rand():
return cs_float(random.random())
@builtin_func("randint", cs_int)
def cs_randint(a, b):
if a.type.subtype(cs_int) and b.type.subtype(cs_int):
return cs_int(random.randint(a.value, b.value))
raise TypeError("randint() arguments must be int")
@builtin_func("norm", cs_float)
def cs_norm():
return cs_float(random.normalvariate(0, 1))
@builtin_func("sqrt", cs_float)
def cs_sqrt(x):
if x.type.subtype(cs_float):
return cs_float(math.sqrt(x.value))
raise TypeError("sqrt() argument must be a float")
@builtin_func("log", cs_float)
def log(x, base):
if x.type.subtype(cs_float):
if base.type.subtype(cs_int):
return cs_float(math.log(x.value, base.value))
raise TypeError("log() second argument must be an int")
raise TypeError("log() argument must be a float")
@builtin_func("log2", cs_float)
def log2(x):
if x.type.subtype(cs_float):
return cs_float(math.log2(x.value))
raise TypeError("log2() argument must be a float")
@builtin_func("exp", cs_float)
def exp(x):
if x.type.subtype(cs_float):
return cs_float(math.exp(x.value))
raise TypeError("exp() argument must be a float")
@builtin_func("ceil", cs_int)
def ceil(x):
if x.type.subtype(cs_int):
return cs_int(math.ceil(x.value))
raise TypeError("ceil() argument must be an int")
@builtin_func("floor", cs_int)
def floor(x):
if x.type.subtype(cs_int):
return cs_int(math.floor(x.value))
raise TypeError("floor() argument must be an int")
@builtin_func("sin", cs_float)
def sin(x):
if x.type.subtype(cs_float):
return cs_float(math.sin(x.value))
raise TypeError("sin() argument must be a float")
@builtin_func("cos", cs_float)
def cos(x):
if x.type.subtype(cs_float):
return cs_float(math.cos(x.value))
raise TypeError("cos() argument must be a float")
@builtin_func("tan", cs_float)
def tan(x):
if x.type.subtype(cs_float):
return cs_float(math.tan(x.value))
raise TypeError("tan() argument must be a float")
# endregion
# region List Functions
@builtin_func("len", cs_int)
def cs_len(x):
if x.type.subtype(cs_list):
return cs_int(len(x.value))
raise TypeError("len() argument must be a list")
@builtin_func("get_at", lambda x, i: Type.get(x.type_name[:-5]))
def cs_get_at(x, index):
if x.type.subtype(cs_list):
if index.type.subtype(cs_int):
return x.value[index.value]
raise TypeError("get_at() second argument must be an int")
raise TypeError("get_at() argument must be a list")
@builtin_func("set_at", cs_none)
def cs_set_at(x, index, obj):
if x.type.subtype(cs_list):
if index.type.subtype(cs_int):
x.value[index.value] = obj
return cs_none()
raise TypeError("set_at() second argument must be an int")
raise TypeError("set_at() argument must be a list")
@builtin_func("append", lambda x, o: x)
def cs_append(x, obj):
if x.type.subtype(cs_list):
x.value.append(obj)
return x
raise TypeError("append() argument must be a list")
@builtin_func("list", lambda x: Type.get(f"{x}_list"))
def cs_new_list(type_):
list_type = Type.get(f"{type_.value}_list")
return Instance(list_type, [])
# endregion
# region String Functions
@builtin_func("startswith", cs_bool)
def cs_startswith(x, y):
if x.type.subtype(cs_str) and y.type.subtype(cs_str):
return x.value.startswith(y.value)
raise TypeError("startswith() arguments must be strings")
@builtin_func("isdigit", cs_bool)
def cs_isdigit(x):
if x.type.subtype(cs_str):
return x.value.isdigit(x.value)
raise TypeError("isdigit() argument must be a str")
@builtin_func("lower", cs_str)
def cs_lower(x):
if x.type.subtype(cs_str):
return x.value.lower(x.value)
raise TypeError("lower() argument must be a str")
@builtin_func("capitalize", cs_str)
def cs_capitalize(x):
if x.type.subtype(cs_str):
return x.value.capitalize(x.value)
raise TypeError("capitalize() argument must be a str")
# endregion
```
#### File: clismo/clismo/cs_ast.py
```python
from __future__ import annotations
import enum
from typing import Any, List
from clismo.compiler.generic_ast import AST
class Operator(enum.Enum):
ADD = enum.auto()
SUB = enum.auto()
MUL = enum.auto()
DIV = enum.auto()
MOD = enum.auto()
POW = enum.auto()
AND = enum.auto()
OR = enum.auto()
LSHIFT = enum.auto()
RSHIFT = enum.auto()
BIT_AND = enum.auto()
BIT_XOR = enum.auto()
BIT_OR = enum.auto()
FLOORDIV = enum.auto()
EQ = enum.auto()
NOT_EQ = enum.auto()
LT = enum.auto()
LTE = enum.auto()
GT = enum.auto()
GTE = enum.auto()
NOT = enum.auto()
UADD = enum.auto()
USUB = enum.auto()
INVERT = enum.auto()
class Program(AST):
__slots__ = ("stmts",)
def __init__(self, stmts: List[ObjDef]):
self.stmts = stmts
class ObjDef(AST):
def __init__(
self,
obj_type: str,
name: str,
body: List[AST],
):
self.obj_type = obj_type
self.name = name
self.body = body
class ClientDef(ObjDef):
__slots__ = ("name", "body")
def __init__(self, name, body):
super().__init__("client", name, body)
class ServerDef(ObjDef):
__slots__ = ("name", "body")
def __init__(self, name, body):
super().__init__("server", name, body)
class StepDef(ObjDef):
__slots__ = ("name", "body")
def __init__(self, name, body):
super().__init__("step", name, body)
class SimulationDef(ObjDef):
__slots__ = ("name", "body")
def __init__(self, name, body):
super().__init__("simulation", name, body)
class Attr(AST):
__slots__ = ("name", "value")
def __init__(self, name: str, value: Expr):
self.name = name
self.value = value
class Function(AST):
__slots__ = ("name", "info", "body")
def __init__(self, name: str, info: List[Name], body: List[Stmt]):
self.name = name
self.info = info
self.body = body
class Stmt(AST):
pass
class Assign(Stmt):
__slots__ = ("name", "value", "decl")
def __init__(self, name: str, value: Expr, decl: bool = False):
self.name = name
self.value = value
self.decl = decl
class If(Stmt):
__slots__ = ("cond", "then", "els")
def __init__(self, cond: Expr, then: List[Stmt], els: List[Stmt] = None):
self.cond = cond
self.then = then
self.els = els or []
class Return(Stmt):
__slots__ = ("value",)
def __init__(self, value: Expr):
self.value = value
class Loop(Stmt):
__slots__ = ("target", "start", "end", "step", "body")
def __init__(
self,
target: str,
body: List[Stmt],
start: Expr = None,
end: Expr = None,
step: Expr = None,
):
self.target = target
self.body = body
self.start = start
self.end = end
self.step = step
class EndLoop(Stmt):
pass
class NextLoop(Stmt):
pass
class Expr(AST):
pass
class Call(Expr):
__slots__ = ("name", "args")
def __init__(self, name: str, args: List[Expr] = None):
self.name = name
self.args = args or []
class BinOp(Expr):
__slots__ = ("left", "op", "right")
def __init__(self, left: Expr, op, right: Expr):
self.left = left
self.op = op
self.right = right
class UnaryOp(Expr):
__slots__ = ("op", "expr")
def __init__(self, op, expr: Expr):
self.op = op
self.expr = expr
class ListExpr(Expr):
__slots__ = ("elements",)
def __init__(self, elements: List[Expr]):
self.elements = elements
class Name(Expr):
__slots__ = ("name",)
def __init__(self, name: str):
self.name = name
def show(self):
return f"Name: {self.name}"
class Constant(Expr):
__slots__ = ("value",)
def __init__(self, value: Any):
self.value = value
def show(self):
return f"Constant: {self.value}"
```
#### File: clismo/lang/type.py
```python
from __future__ import annotations
from typing import Any, Callable, Dict, List
class Type:
cs_types = {}
def __init__(self, type_name: str, parent: Type = None):
self.type_name = type_name
self.attributes: Dict[str, Any] = {}
self.parent = parent
Type.cs_types[type_name] = self
def add_attribute(self, attribute: str, default: Any = None):
self.attributes[attribute] = default
def method(self, method_name: str):
def method_wrapper(func):
self.add_attribute(method_name, func)
return func
return method_wrapper
def get_attribute(self, attribute_name: str):
for attribute in self.attributes:
if attribute.name == attribute_name:
return attribute
return None
def get_attr_dict(self):
all_attrs = {}
if self.parent:
all_attrs.update(self.parent.get_attr_dict())
all_attrs.update(self.attributes)
return all_attrs
def subtype(self, other: Type):
if self.type_name == "any":
return True
if not isinstance(other, tuple):
other = (other,)
for subtype in other:
if self.type_name == subtype.type_name:
return True
if self.parent is None:
return False
return self.parent.subtype(other)
def __call__(self, value):
return Instance(self, value)
@staticmethod
def new(type_name: str, *args, **kwargs):
if type_name not in Type.cs_types:
raise ValueError(f"{type_name} is not a valid Clismo type")
return Type.cs_types[type_name](*args, **kwargs)
@staticmethod
def get(type_name: str):
if type_name == "list":
return (
cs_list_of_float,
cs_list_of_int,
cs_list_of_str,
cs_list_of_bool,
cs_list_of_client,
cs_list_of_server,
cs_list_of_step,
cs_list_of_simulation,
)
if type_name not in Type.cs_types:
raise ValueError(f"{type_name} is not a valid Clismo type")
return Type.cs_types[type_name]
@staticmethod
def get_type(value):
if isinstance(value, str):
return Type.get("str")
if isinstance(value, bool):
return Type.get("bool")
if isinstance(value, int):
return Type.get("int")
if isinstance(value, float):
return Type.get("float")
if value is None:
return Type.get("none")
if isinstance(value, dict):
return Type.get("dict")
if isinstance(value, tuple):
return Type.get("tuple")
if callable(value):
return Type.get("function")
return value
@staticmethod
def resolve_type(value):
val_type = Type.get_type(value)
return val_type(value)
def __repr__(self):
return f"<Type {self.type_name}>"
def __str__(self):
return f"<Type {self.type_name}>"
class Instance:
def __init__(self, _type: Type, value):
self.type = _type
self.value = value
cs_object = Type("object")
cs_float = Type("float", cs_object)
cs_int = Type("int", cs_float)
cs_bool = Type("bool", cs_int)
cs_str = Type("str", cs_object)
cs_none = Type("none", cs_object)
cs_any = Type("any", cs_object)
cs_client = Type("client", cs_object)
cs_server = Type("server", cs_object)
cs_step = Type("step", cs_object)
cs_simulation = Type("simulation", cs_object)
cs_list_of_float = Type("float_list", cs_object)
cs_list_of_int = Type("int_list", cs_object)
cs_list_of_str = Type("str_list", cs_object)
cs_list_of_bool = Type("bool_list", cs_object)
cs_list_of_client = Type("client_list", cs_object)
cs_list_of_server = Type("server_list", cs_object)
cs_list_of_step = Type("step_list", cs_object)
cs_list_of_simulation = Type("simulation_list", cs_object)
```
#### File: clismo/sim/client_server_model.py
```python
from queue import PriorityQueue
from typing import Callable, List
class Server:
def __init__(self, func: Callable, cost: float = 1.0):
self.func = func
self.cost = cost
class ClientServerModel:
def __init__(
self,
arrival_func: Callable,
servers: List[Server],
config: List[int],
client_limit: int = None,
time_limit: float = None,
):
self.arrival_func = arrival_func
if len(config) != len(servers):
raise ValueError("config and servers must have same length")
for conf in config:
if conf <= 0:
raise ValueError("config values must be positive")
self.servers = servers
self.config = config
self.events = PriorityQueue()
self.servers_queue = [0] * len(servers)
self.servers_in_use = [0] * len(servers)
self.time = 0.0
self.clients = 0
if client_limit is None and time_limit is None:
raise ValueError("Either client_limit or time_limit must be specified")
self.client_limit = client_limit
self.time_limit = time_limit
def run(self):
self.time = 0
self.clients = 0
self.servers_queue = [0] * len(self.servers)
self.servers_in_use = [0] * len(self.servers)
while True:
if self.events.empty():
self.events.put((self.time + self.arrival_func(), 0))
time, server = self.events.get()
if self.time_limit is not None and time > self.time_limit:
break
self.time = time
if server > 0:
if self.servers_queue[server - 1]:
self.servers_queue[server - 1] -= 1
self.events.put(
(self.time + self.servers[server - 1].func(), server)
)
else:
self.servers_in_use[server - 1] -= 1
if server == len(self.servers):
self.clients += 1
if self.client_limit is not None and self.clients > self.client_limit:
break
continue
if server == 0:
self.events.put((self.time + self.arrival_func(), 0))
if self.servers_in_use[server] >= self.config[server]:
self.servers_queue[server] += 1
else:
self.servers_in_use[server] += 1
self.events.put((self.time + self.servers[server].func(), server + 1))
```
#### File: clismo/sim/server.py
```python
from clismo.sim.optimizable_obj import OptimizableObject
class Server(OptimizableObject):
def __init__(self, name, func, **attrs):
super().__init__()
self.name = name
self.func = func
self.attrs = attrs
self.in_use = False
def attend_client(self, client):
self.in_use = True
return self.func(self, client)
@staticmethod
def ghost():
return Server("ghost", lambda s, c: None)
def __lt__(self, other):
return self.name < other.name
def get(self):
return Server(self.name, self.func, **self.attrs)
def __repr__(self):
return self.name
```
#### File: clismo/sim/simulation.py
```python
from queue import PriorityQueue
from clismo.sim.optimizable_obj import OptimizableObject
from clismo.sim.server import Server
class Simulation(OptimizableObject):
def __init__(self, name, steps, time_limit=None, client_limit=None):
super().__init__("steps", "client_types")
self.name = name
self.arrival_funcs = []
self.client_types = []
self.steps = steps
self.time = 0
self.clients = 0
self.added_clients = 0
self.events = PriorityQueue()
self.time_limit = time_limit
self.client_limit = client_limit
self.minimize_func = None
self.attrs = {}
def add_arrival_func(self, arrival_func, client):
self.arrival_funcs.append((arrival_func, client))
self.client_types.append(client)
def __run_arrivals(self, type_ = None):
for arrival_func, client_type in self.arrival_funcs:
if type_ is not None and client_type.name != type_:
continue
delta_time = arrival_func()
client = client_type.get()
self.events.put(
(self.time + delta_time, delta_time, client, Server.ghost(), 0)
)
self.added_clients += 1
def run(self, verbose=False):
if self.time_limit is None and self.client_limit is None:
raise ValueError("Either time_limit or client_limit must be specified")
if not self.arrival_funcs:
raise ValueError("No arrival functions specified")
self.time = 0
self.clients = 0
self.events = PriorityQueue()
for step in self.steps:
step.clients_queue = []
for i, server in enumerate(step.servers):
step.servers[i] = server.get()
while True:
if self.events.empty():
self.__run_arrivals()
time, delta_time, client, last_server, step = self.events.get()
if step < len(self.steps) and verbose:
print(
f"{round(time, 3):>10} {client.name} "
f"arrived at {self.steps[step].name}",
)
elif verbose:
print(
f"{round(time, 3):>10} {client.name} out of system",
)
if self.time_limit is not None and time > self.time_limit:
break
self.time = time
if step > 0:
last_server.in_use = False
client.on_server_out(last_server, delta_time)
event_time, server = self.steps[step - 1].next_client()
if event_time is not None:
self.events.put(
(self.time + event_time, event_time, client, server, step)
)
if step == len(self.steps):
self.clients += 1
if self.client_limit is not None and self.clients >= self.client_limit:
break
continue
if step == 0:
self.__run_arrivals(client.name)
event_time, server = self.steps[step].receive_client(client)
if event_time is not None:
self.events.put(
(self.time + event_time, event_time, client, server, step + 1)
)
if verbose:
print()
def __repr__(self):
return self.name
```
#### File: clismo/sim/step.py
```python
from random import choice
from clismo.sim.optimizable_obj import OptimizableObject
class Step(OptimizableObject):
def __init__(self, name, servers=None):
super().__init__("servers")
self.name = name
self.servers = servers or []
self.clients_queue = []
self.attrs = {}
def add_server(self, server):
self.servers.append(server)
def assign_client_to_server(self, client):
free_servers = [server for server in self.servers if not server.in_use]
rand_server = choice(free_servers)
rand_server.in_use = True
return rand_server.attend_client(client), rand_server
def receive_client(self, client):
if self.clients_queue or all(server.in_use for server in self.servers):
self.clients_queue.append(client)
return None, None
return self.assign_client_to_server(client)
def next_client(self):
if self.clients_queue:
client = self.clients_queue.pop(0)
return self.assign_client_to_server(client)
return None, None
def __repr__(self):
return self.name
```
#### File: clismo/tests/test_csre.py
```python
import pytest
from clismo.automata import Automata
from clismo import csre
def test_simple_check():
assert csre.check("a", "a")
assert csre.check("a", "b") == False
assert csre.check("a", "aa") == False
assert csre.check("ab", "ab")
assert csre.check("ab", "aab") == False
def test_star_op():
assert csre.check("a*", "")
assert csre.check("a*", "a")
assert csre.check("a*", "aa")
assert csre.check("a*b", "aaab")
assert csre.check("a*b", "aaa") == False
def test_or_op():
assert csre.check("a|b", "a")
assert csre.check("a|b", "b")
assert csre.check("a|b", "c") == False
assert csre.check("a|b|c", "c")
def test_escape_char():
assert csre.check(r"\(a", "a") == False
assert csre.check(r"\(a", "(a")
assert csre.check(r"a\*", "a*")
assert csre.check(r"a\*", "a") == False
assert csre.check(r"a\**", "a***")
assert csre.check(r"a\**", "a")
assert csre.check(r"a\\*", "a\\\\")
def test_special_chars():
assert csre.check(r"a..*b", "afoob")
assert csre.check(r"a.*b", "ab")
assert csre.check(r"a.*b", "afoob")
assert csre.check(r"a\sb", "a b")
assert csre.check(r"a\nb", "a\nb")
assert csre.check(r"a\tb", "a\tb")
assert csre.check(r"a\rb", "a\rb")
assert csre.check(r"a\a*b", "afoob")
assert csre.check(r"a\a*b", "aFoob") == False
assert csre.check(r"a\A*b", "aFOOb")
assert csre.check(r"a\A*b", "aFoob") == False
assert csre.check(r"a(\A|\a)*b", "aFoob")
assert csre.check(r"a\db", "a5b")
assert csre.check(r"a\d*b", "a5x4b") == False
assert csre.check(r"a\d*.\db", "a5x4b")
def test_combined_op():
assert csre.check("aa*|b*", "a")
assert csre.check("aa*|b*", "b")
assert csre.check("aa*|b*", "")
assert csre.check("aa*b*", "a")
assert csre.check("aa*b*", "b") == False
assert csre.check("aa*b*", "ab")
assert csre.check("aa*b*", "aab")
assert csre.check("(a|b)*", "aabbababa")
def test_negation():
assert csre.check(r"(^a)", "b")
assert csre.check(r"(^a)", "a") == False
assert csre.check(r"(^a)(^a)*", "bcdef")
assert csre.check(r"'((^')|(\\'))*(^\\)'", "'asfew'")
assert csre.check(r"'((^')|(\\'))*(^\\)'", "'ab\\'") == False
assert csre.check(r"'((^')|(\\'))*(^\\)'", "'asfew\\'a") == False
assert csre.check(r"'((^')|(\\'))*(^\\)'", "'asfew\\'a'")
assert csre.check(r"'((^')|(\\'))*(^\\)'", "'asfew' foo 'bar'") == False
assert csre.check(r"'((^')|(\\'))*(^\\)'", "'asfew\\' foo \\'bar'")
def test_match():
assert csre.match("a", "a")
assert csre.match("a", "b") is None
re_match = csre.match("a", "aaaa")
assert re_match
assert re_match.end == 1
re_match = csre.match(r"'((^')|(\\'))*(^\\)'", "'aaa'")
assert re_match
assert re_match.end == 5
re_match = csre.match(r"'((^')|(\\'))*(^\\)'", "'aaa' foo")
assert re_match
assert re_match.end == 5
re_match = csre.match(r"'((^')|(\\'))*(^\\)'", "'aaa' foo 'bar'")
assert re_match
assert re_match.end == 5
re_match = csre.match(r"'((^')|(\\'))*(^\\)'", "'aaa\\' foo \\'bar'")
assert re_match
assert re_match.end == 17
``` |
{
"source": "jmorgadov/liter",
"score": 2
} |
#### File: liter/liter/console.py
```python
import typer
from pathlib import Path
import subprocess
from liter.changelog import generate_changelogs
from liter.version import change_version
app = typer.Typer()
@app.command()
def changelog(start_in: str = typer.Option(default=None),
last: bool = typer.Option(default=False)):
generate_changelogs(start_in, last)
@app.command()
def version(vtype: str = typer.Argument(default='patch'),
force: bool = typer.Option(default=False)):
change_version(vtype, force)
``` |
{
"source": "jmorgadov/nesim",
"score": 3
} |
#### File: nesim/devices/device.py
```python
import abc
from pathlib import Path
from typing import Dict
import logging
from nesim.devices.send_receiver import SendReceiver
from nesim.devices.cable import DuplexCableHead
class Device(metaclass=abc.ABCMeta):
"""
Representa un dispositivo.
Parameters
----------
name : str
Nombre del dispositivo.
ports : Dict[str, SendReceiver]
Puertos del dispositivo.
Cada puerto está asociado a un ``SendReceiver``. Si para un puerto
dado el cable asociado al ``SendReceiver`` es ``None`` significa
que este puerto no tiene ningún cable conectado.
Attributes
----------
name : str
Nombre del dispositivo.
ports : Dict[str, SendReceiver]
Puertos del dispositivo.
Cada puerto está asociado a un ``SendReceiver``. Si para un puerto
dado el cable asociado al ``SendReceiver`` es ``None`` significa
que este puerto no tiene ningún cable conectado.
logs : List[str]
Logs del dispositivo.
sim_time : int
Timepo de ejecución de la simulación.
Este valor se actualiza en cada llamado a la función ``update``.
"""
def __init__(self, name: str, ports: Dict[str, SendReceiver]):
self.name = name
self.ports = ports
self.logs = []
self.sim_time = 0
@abc.abstractproperty
def is_active(self):
"""bool : Estado del dispositivo."""
def port_name(self, port: int):
"""
Devuelve el nombre de un puerto dado su número.
Parameters
----------
port : int
Número del puerto.
Este valor debe ser mayor o igual a 1 y menor o igual que la
cantidad total de puertos del dispositivo.
"""
return f'{self.name}_{port}'
def port_number(self, port: str):
"""
Devuelve el nombre de un puerto dado su número.
Parameters
----------
port : int
Número del puerto.
Este valor debe ser mayor o igual a 1 y menor o igual que la
cantidad total de puertos del dispositivo.
"""
return int(port.split('_')[-1])
def reset(self):
"""
Función que se ejecuta al inicio de cada ciclo de simulación para cada
dispositivo.
"""
def update(self, time: int):
"""
Función que se ejecuta en el ciclo de la simulación por cada
dispositivo.
Parameters
----------
time : int
Timepo de ejecución de la simulación.
"""
self.sim_time = time
@abc.abstractmethod
def connect(self, cable_head: DuplexCableHead, port_name: str):
"""
Conecta un cable dado a un puerto determinado.
Parameters
----------
cable_head : DuplexCableHead
Uno de los extremos del cable a conectar.
port_name : str
Nombre del puerto en el que será conectado el cable.
"""
def disconnect(self, port_name: str):
"""
Desconecta un puerto de un dispositivo.
Parameters
----------
port_name : str
Nombre del puerto a desconectar.
"""
self.ports[port_name] = None
def log(self, time: int, msg: str, info: str = ''):
"""
Escribe un log en el dispositivo.
Los logs de cada dispositivo se guardarán en archivos separados
al finalizar la simulación.
Parameters
----------
time : int
Timepo de ejecución de la simulación.
msg : str
Mensaje que guardará.
info : str
Información adicional.
"""
log_msg = f'| {time: ^10} | {self.name: ^12} | {msg: ^14} | {info: <30} |'
self.logs.append(log_msg)
logging.info(log_msg)
def save_log(self, path: str = ''):
"""
Guarda los logs del dispositivo en una ruta dada.
Parameters
----------
path : str
Ruta donde se guardarán los logs. (Por defecto en la raíz)
"""
output_folder = Path(path)
output_folder.mkdir(parents=True, exist_ok=True)
output_path = output_folder / Path(f'{self.name}.txt')
with open(str(output_path), 'w+') as file:
header = f'| {"Time (ms)": ^10} | {"Device":^12} | {"Action" :^14} | {"Info": ^30} |'
file.write(f'{"-" * len(header)}\n')
file.write(f'{header}\n')
file.write(f'{"-" * len(header)}\n')
file.write('\n'.join(self.logs))
file.write(f'\n{"-" * len(header)}\n')
```
#### File: nesim/devices/frame_sender.py
```python
import abc
from typing import Dict, List
from nesim.devices.multiple_port_device import MultiplePortDevice
from nesim.frame import Frame
class FrameSender(MultiplePortDevice, metaclass=abc.ABCMeta):
"""
Representa un dispositivo capaz de enviar frames.
Attributes
----------
mac_addrs: Dict[int, List[int]]
Tabla que contiene la dirección MAC de cada puerto.
"""
def __init__(self, name: str, ports_count: int, signal_time: int):
self.mac_addrs: Dict[int, List[int]] = {}
super().__init__(name, ports_count, signal_time)
def send(self, data: List[int], package_size = None, port: int = 1):
"""
Agrega nuevos datos para ser enviados a la lista de datos.
Parameters
----------
data : List[List[int]]
Datos a ser enviados.
"""
if package_size is None:
package_size = len(data)
packages = []
while data:
packages.append(data[:package_size])
data = data[package_size:]
send_receiver = self.ports[self.port_name(port)]
send_receiver.send(packages)
def send_frame(self, mac: List[int], data: List[int], port: int = 1):
"""
Ordena a un host a enviar un frame determinado a una dirección mac
determinada.
Parameters
----------
host_name : str
Nombre del host que envía la información.
mac : List[int]
Mac destino.
data : List[int]
Frame a enviar.
"""
frame = Frame.build(mac, self.mac_addrs[port], data)
print(f'[{self.sim_time:>6}] {self.name + " - " + str(port):>18} send: {frame}')
self.send(frame.bit_data, port=port)
```
#### File: nesim/nesim/frame.py
```python
from __future__ import annotations
from typing import List
from random import randint, random
from nesim.ip import IP, IPPacket
from nesim import utils
from nesim.devices.error_detection import get_error_detection_data
from nesim.devices.utils import data_size, extend_to_byte_divisor, from_bit_data_to_hex, from_bit_data_to_number, from_number_to_bit_data, from_str_to_bin
class Frame():
def __init__(self, bit_data: List[int]) -> None:
self.is_valid = False
if len(bit_data) < 48:
return
self.to_mac = from_bit_data_to_number(bit_data[:16])
self.from_mac = from_bit_data_to_number(bit_data[16:32])
self.frame_data_size = from_bit_data_to_number(bit_data[32:40]) * 8
self.error_size = from_bit_data_to_number(bit_data[40:48]) * 8
total_size = self.frame_data_size + self.error_size
if len(bit_data) - 48 < total_size:
return
top_data_pos = 48 + 8*self.frame_data_size
self.data = bit_data[48: top_data_pos]
self.error_data = bit_data[top_data_pos: top_data_pos + 8 * self.error_size]
self.bit_data = bit_data
self.is_valid = True
self.additional_info = ''
if self.frame_data_size / 8 == 8:
arpq = from_str_to_bin('ARPQ')
ip = ''.join(map(str, self.data[32:64]))
mac_dest_str = ''.join(map(str, bit_data[:16]))
arpq_data = ''.join(map(str, self.data[:32]))
if arpq_data.endswith(arpq):
if mac_dest_str == '1'*16:
self.additional_info = f'(ARPQ) Who is {IP.from_bin(ip)} ?'
else:
self.additional_info = '(ARPQ) response'
def __str__(self) -> str:
from_mac = from_bit_data_to_hex(from_number_to_bit_data(self.from_mac, 16))
to_mac = from_bit_data_to_hex(from_number_to_bit_data(self.to_mac, 16))
data = from_bit_data_to_hex(self.data)
valid, packet = IPPacket.parse(self.data)
if valid:
data = str(packet)
return f'{from_mac} -> {to_mac} | {data} | {self.additional_info}'
def __repr__(self) -> str:
return str(self)
@staticmethod
def build(dest_mac: List[int], orig_mac: List[int], data: List[int]) -> Frame:
data = extend_to_byte_divisor(data)
e_size, e_data = get_error_detection_data(
data, utils.CONFIG['error_detection']
)
rand = random()
if rand < utils.CONFIG['error_prob']:
ind = randint(0, len(data) - 1)
data[ind] = (data[ind] + 1) % 2
size = data_size(data)
final_data = dest_mac + \
orig_mac + \
size + \
e_size + \
data + \
e_data
frame = Frame(final_data)
return frame
```
#### File: nesim/nesim/simulation.py
```python
from io import UnsupportedOperation
from nesim.devices.ip_packet_sender import IPPacketSender
from nesim.devices.router import Route, Router
from nesim.ip import IP
from typing import Dict, List
from nesim.devices.switch import Switch
from nesim.devices.hub import Hub
from nesim.devices import Device, Duplex, Host
import nesim.utils as utils
class NetSimulation():
"""
Clase principal encargada de ejecutar una simulación.
Parameters
----------
output_path : str
Ruta donde se guardarán los logs de la simulación al finalizar.
la misma. (Por defecto es ``output``).
"""
def __init__(self, output_path: str = 'output'):
utils.check_config()
self.instructions = []
self.signal_time = utils.CONFIG['signal_time']
self.output_path = output_path
self.inst_index = 0
self.time = 0
self.pending_devices = []
self.port_to_device: Dict[str, Device] = {}
self.devices: Dict[str, Device] = {}
self.disconnected_devices: Dict[str, Device] = {}
self.hosts: Dict[str, Host] = {}
self.end_delay = self.signal_time
@property
def is_running(self):
"""
bool : Indica si la simulación todavía está en ejecución.
"""
device_sending = any([d.is_active for d in self.devices.values()])
running = self.instructions or device_sending
if not running:
self.end_delay -= 1
return self.end_delay > 0
def add_device(self, device: Device):
"""
Añade un dispositivo a la simulación.
Parameters
----------
device : Device
Dispositivo a añadir.
"""
if device.name in self.devices.keys():
raise ValueError(
f'The device name {device.name} is already taken.')
self.devices[device.name] = device
if isinstance(device, Host):
self.hosts[device.name] = device
for port in device.ports.keys():
self.port_to_device[port] = device
def connect(self, port1, port2):
"""
Conecta dos puertos mediante un cable.
Parameters
----------
port1, port2 : str
Nombres de los puertos a conectar.
"""
if port1 not in self.port_to_device.keys():
raise ValueError(f'Unknown port {port1}')
if port2 not in self.port_to_device.keys():
raise ValueError(f'Unknown port {port2}')
dev1 = self.port_to_device[port1]
dev2 = self.port_to_device[port2]
if dev1.name in self.disconnected_devices.keys():
self.disconnected_devices.pop(dev1.name)
self.add_device(dev1)
if dev2.name in self.disconnected_devices.keys():
self.disconnected_devices.pop(dev2.name)
self.add_device(dev2)
is_simple = isinstance(dev1, Hub) or isinstance(dev2, Hub)
cab = Duplex(simple=is_simple)
dev1.sim_time = self.time
dev2.sim_time = self.time
self.port_to_device[port1].connect(cab.head_1, port1)
self.port_to_device[port2].connect(cab.head_2, port2)
def send(self, host_name: str, data: List[int],
package_size: int = 8):
"""
Ordena a un host a enviar una serie de datos determinada.
Parameters
----------
host_name : str
Nombre del host que enviará la información.
data : List[int]
Datos a enviar.
"""
if host_name not in self.hosts.keys():
raise ValueError(f'Unknown host {host_name}')
self.hosts[host_name].send(data, package_size)
def send_frame(self, host_name: str, mac: List[int], data: List[int]):
"""
Ordena a un host a enviar un frame determinado a una dirección mac
determinada.
Parameters
----------
host_name : str
Nombre del host que envía la información.
mac : List[int]
Mac destino.
data : List[int]
Frame a enviar.
"""
if host_name not in self.hosts.keys():
raise ValueError(f'Unknown host {host_name}')
self.hosts[host_name].send_frame(mac, data)
def send_ip_package(self, host_name: str, ip_dest: IP, data: List[int]):
"""
Env'ia un paquete IP a una dirección determinada.
Parameters
----------
host_name : str
Host que envía el paquete.
ip_dest : IP
Dirección IP destino.
data : List[int]
Datos a enviar.
Raises
------
ValueError
Si el host no existe.
"""
if host_name not in self.hosts.keys():
raise ValueError(f'Unknown host {host_name}')
self.hosts[host_name].send_by_ip(ip_dest, data)
def ping_to(self, host_name: str, ip_dest: IP):
"""
Ejecuta la instrucción ``ping``.
Parameters
----------
host_name : str
Host que ejecuta la acción.
ip_dest : IP
IP destino.
Raises
------
ValueError
Si el host no existe.
"""
if host_name not in self.hosts.keys():
raise ValueError(f'Unknown host {host_name}')
self.hosts[host_name].send_ping_to(ip_dest)
def route(self, device_name: str, action: str = 'reset',
route: Route = None):
"""
Ejecuta una de las acciones realcionadas con las rutas: ``add``,
``remove``, ``reset``
Parameters
----------
device_name : str
Nombre del dispositivo al que se le ejecuta la acción.
action : str, optional
Acción a ejecutar.
route : Route, optional
Ruta a añadir o eliminar.
"""
router: Router = self.devices[device_name]
if action == 'add':
router.add_route(route)
elif action == 'remove':
router.remove_route(route)
else:
router.reset_routes()
def disconnect(self, port: str):
"""
Desconecta un puerto.
Parameters
----------
port : str
Puerto a desconectar.
"""
if port not in self.port_to_device.keys():
raise ValueError(f'Unknown port {port}')
dev = self.port_to_device[port]
dev.disconnect(port)
if dev.name in self.hosts.keys():
self.hosts.pop(dev.name)
self.devices.pop(dev.name)
self.disconnected_devices[dev.name] = dev
return
if isinstance(dev, Hub):
for cable in dev.ports.values():
if cable is not None:
break
else:
self.devices.pop(dev.name)
self.disconnected_devices[dev.name] = dev
if isinstance(dev, Switch):
for send_receiver in dev.ports.values():
if send_receiver.cable_head is not None:
break
else:
self.devices.pop(dev.name)
self.disconnected_devices[dev.name] = dev
def start(self, instructions):
"""
Comienza la simulación dada una lista de instrucciones.
Parameters
----------
instructions : List[Instruction]
Lista de instrucciones a ejecutar en la simulación.
"""
self.instructions = instructions
self.time = 0
while self.is_running:
self.update()
for device in self.devices.values():
device.save_log(self.output_path)
def assign_mac_addres(self, device_name, mac, interface):
"""
Asigna una dirección mac a un host.
Parameters
----------
device_name : str
Nombre del dispositivo al cual se le asigna la dirección mac.
mac : List[int]
Dirección mac.
"""
self.devices[device_name].mac_addrs[interface] = mac
def assign_ip_addres(self, device_name, ip: IP, mask: IP, interface: int):
"""
Asigna una dirección mac a un host.
Parameters
----------
device_name : str
Nombre del dispositivo al cual se le asigna la dirección mac.
mac : List[int]
Dirección mac.
"""
device: IPPacketSender = self.devices[device_name]
if not isinstance(device, IPPacketSender):
raise UnsupportedOperation(f'Can not set ip to {device_name}')
device.ips[interface] = ip
device.masks[interface] = mask
def update(self):
"""
Ejecuta un ciclo de la simulación actualizando el estado de la
misma.
Esta función se ejecuta una vez por cada milisegundo simulado.
"""
current_insts = []
while self.instructions and self.time == self.instructions[0].time:
current_insts.append(self.instructions.pop(0))
for instr in current_insts:
instr.execute(self)
for device in self.devices.values():
device.reset()
for host in self.hosts.values():
host.update(self.time)
for dev in self.devices.values():
if isinstance(dev, Switch) or type(dev) == Router:
dev.update(self.time)
for _ in range(len(self.devices)):
for device in self.devices.values():
if isinstance(device, Hub):
device.update(self.time)
for dev in self.devices.values():
if isinstance(dev, Switch) or type(dev) == Router:
dev.receive()
for host in self.hosts.values():
host.receive()
self.time += 1
```
#### File: nesim/nesim/utils.py
```python
from pathlib import Path
CONFIG = {
'signal_time' : 10,
'error_detection' : 'simple_hash',
'error_prob' : 0.001,
}
_CONFIG_FILE_NAME = 'config.txt'
def _set_config_val(key: str, value):
if key == 'signal_time':
CONFIG[key] = int(value)
if key == 'error_detection':
CONFIG[key] = value
if key == 'error_prob':
CONFIG[key] = float(value)
def check_config():
path = Path(_CONFIG_FILE_NAME)
if path.exists():
with open(_CONFIG_FILE_NAME, 'r') as file:
lines = file.readlines()
for line in lines:
key, value = line.split()
_set_config_val(key, value)
else:
with open(_CONFIG_FILE_NAME, 'w+') as file:
file.writelines([
'signal_time 10\n',
'error_detection simple_hash\n',
'error_prob 0.001',
])
``` |
{
"source": "jmorgadov/NumLab",
"score": 3
} |
#### File: numlab/automata/automata.py
```python
from __future__ import annotations
import logging
from typing import Any, Dict, Iterable, List, Set, Tuple, Union
from numlab.automata.state import State
from numlab.automata.transition import Transition
_ATMT_COUNT = 0
class Automata:
"""
An automata.
Parameters
----------
name : str
The name of the automata.
Attributes
----------
name : str
The name of the automata.
states : Dict[str, State]
The states of the automata.
start_states : List[State]
The start states of the automata.
end_states : List[State]
The end states of the automata.
"""
def __init__(self, name: str = None) -> None:
if name is None:
global _ATMT_COUNT
name = f"atmt_{_ATMT_COUNT}"
_ATMT_COUNT += 1
self.name = name
self.states: Dict[str, State] = {}
self.start_states: List[State] = []
self.end_states: List[State] = []
self._pos = 0
self._input = None
self._current_state: State = None
self._processes: List[Tuple[State, int]] = []
self._processes_idx: int = 0
def __getattr__(self, item: str) -> Any:
if item in self.states:
return self.states[item]
raise AttributeError(f"No attribute {item}")
@property
def alphabet(self) -> Set[Tuple[Any, bool]]:
"""
Get the alphabet of the automata.
Returns
-------
List[Any]
The alphabet of the automata.
"""
alphabet = set()
for state in self.states.values():
for transition in state.transitions:
if transition.is_epsilon:
continue
if isinstance(transition.condition, str):
alphabet.add(transition.condition)
else:
alphabet.update(transition.condition)
return alphabet
def concatenate(self, other: Automata, set_single: bool = False) -> Automata:
"""
Concatenate the automata with another one.
Parameters
----------
other : Automata
The other automata.
set_single : bool, optional
Whether to set the automata to have a single start and end state
when needed, by default False.
Returns
-------
Automata
The concatenated automata.
Raises
------
ValueError
If the current automata has multiple end states and ``set_single`` is
False.
ValueError
If the other automata has multiple start states and ``set_single`` is
False.
"""
if len(self.end_states) != 1:
if set_single:
self.set_single_end()
else:
raise ValueError(f"Automata {self.name} has multiple end states.")
if len(other.start_states) != 1:
if set_single:
other.set_single_start()
else:
raise ValueError(f"Automata {other.name} has multiple start states.")
other = other.flat()
other_first_state = other.start_state
other_last_state = other.end_state
self.end_state.merge(other_first_state)
if other_last_state == other_first_state:
other_last_state = self.end_state
for state in other.states.values():
for trans in state.transitions:
if trans.to_state is other_first_state:
trans.to_state = self.end_state
self.end_states = [other_last_state]
return self
@property
def pos(self) -> int:
"""Position of the automata on the input"""
return self._pos
@property
def start_state(self) -> State:
"""Get the start state of the automata."""
if len(self.start_states) == 1:
return self.start_states[0]
raise ValueError("The automata has multiple start states.")
@property
def end_state(self) -> State:
"""Get the end state of the automata."""
if len(self.end_states) == 1:
return self.end_states[0]
raise ValueError("The automata has multiple end states.")
def add_state(
self,
state: Union[str, State] = None,
start: bool = False,
end: bool = False,
name: str = None,
) -> State:
"""
Add a state to the automata.
Parameters
----------
state : Union[str, State]
The name of the state to add or the state itself.
start : bool
Whether the state is a start state.
end : bool
Whether the state is an end state.
Returns
-------
State
The added state.
"""
if state is None:
state = State(f"q{len(self.states)}")
if isinstance(state, str):
if state in self.states:
raise ValueError(f"State {state} already exists.")
state = State(state)
state.automata = self
name = name if name is not None else state.name
self.states[name] = state
if start:
self.start_states.append(state)
if end:
self.end_states.append(state)
return state
def add_transition(
self,
from_state: Union[str, State],
to_state: Union[str, State],
condition: Any = None,
action: int = None,
negated: bool = False,
) -> None:
"""
Add a transition to the automata.
Parameters
----------
from_state : Union[str, State]
The state from which the transition starts.
to_state : Union[str, State]
The state to which the transition goes.
condition : Any
The condition under which the transition is taken.
action : int
The action to perform when the transition is taken.
Raises
------
ValueError
If any of the states does not exist.
"""
if isinstance(from_state, str):
from_state = self.states.get(from_state, None)
if from_state is None:
raise ValueError(f"No state {from_state} defined.")
if isinstance(to_state, str):
to_state = self.states.get(to_state, None)
if to_state is None:
raise ValueError(f"No state {to_state} defined.")
if action is None:
action = 0 if condition is None else 1
transition = Transition(from_state, to_state, condition, action, negated)
from_state.transitions.append(transition)
return transition
def set_single_start(self) -> State:
"""
Set the automata to have a single start state.
Returns
-------
State
The start state.
"""
if len(self.start_states) == 1:
return self.start_states[0]
start_st = self.add_state(f"_start_{self.name}")
for state in self.start_states:
self.add_transition(start_st, state)
self.start_states = [start_st]
return start_st
def set_single_end(self) -> State:
"""
Set the automata to have a single end state.
Returns
-------
State
The end state.
"""
if len(self.end_states) == 1:
return self.end_states[0]
end_st = self.add_state(f"_end_{self.name}")
for state in self.end_states:
self.add_transition(state, end_st)
self.end_states = [end_st]
return end_st
def set_single_start_end(self) -> Tuple[State, State]:
"""
Set the automata to have a single start and end state.
Returns
-------
Tuple[State, State]
The start and end state.
"""
start_st = self.set_single_start()
end_st = self.set_single_end()
return start_st, end_st
def flat(self) -> Automata:
"""
Flatten the automata.
Returns
-------
Automata
The flattened automata.
"""
flat = Automata(self.name)
count = 0
visited_states = []
non_visited_states = self.start_states
while non_visited_states:
new_non_visited_states = []
for state in non_visited_states:
flat.add_state(
state,
state in self.start_states,
state in self.end_states,
name=f"q{count}",
)
state.name = f"q{count}"
count += 1
visited_states.append(state)
for transition in state.transitions:
to_state = transition.to_state
if (
to_state not in visited_states
and to_state not in new_non_visited_states
and to_state not in non_visited_states
):
new_non_visited_states.append(transition.to_state)
non_visited_states = new_non_visited_states
return flat
def show(self) -> None:
"""
Show the automata.
"""
# Inverse name states dict
inv_states = {v: k for k, v in self.states.items()}
for name, state in self.states.items():
print(name, f"Final: {state in self.end_states}")
for transition in state.transitions:
neg = "^" if transition.negated else ""
print(
f" ({neg}{transition.str_cond}) "
f"-> {inv_states[transition.to_state]}"
)
def _eps_closure_single(self, state: Union[str, State]) -> Set[State]:
"""
Compute the epsilon closure of a single state.
Parameters
----------
state : Union[str, State]
The state to compute the epsilon closure of.
Returns
-------
Set[State]
The epsilon closure of the state.
Raises
------
ValueError
If the state does not exist.
"""
if isinstance(state, str):
if state not in self.states:
raise ValueError(f"No state {state} defined.")
state = self.states[state]
visited = set()
non_vsited = [state]
while non_vsited:
new_non_vsited = []
for current_state in non_vsited:
visited.add(current_state)
for transition in current_state.transitions:
if transition.is_epsilon:
to_st = transition.to_state
if (
to_st not in visited
and to_st not in new_non_vsited
and to_st not in non_vsited
):
new_non_vsited.append(to_st)
non_vsited = new_non_vsited
return visited
def eps_closure(
self, state: Union[str, State, Iterable[str], Iterable[State]]
) -> Set[State]:
"""
Compute the epsilon closure of a state or a set of states.
Parameters
----------
state : Union[str, State, Iterable[str], Iterable[State]]
The state or a list of states.
Returns
-------
Set[State]
The epsilon closure of the state or a set of states.
Raises
------
ValueError
If any of the states does not exist.
"""
if isinstance(state, (str, State)):
return self._eps_closure_single(state)
whole_closure = set()
for current_state in state:
whole_closure.update(self._eps_closure_single(current_state))
return whole_closure
def _goto_single(self, state: Union[str, State], symbol: str) -> Set[State]:
"""
Compute the goto of a single state.
Parameters
----------
state : Union[str, State]
The state to compute the goto of.
symbol : str
The symbol to compute the goto of.
Returns
-------
Set[State]
The goto of the state.
Raises
------
ValueError
If the state does not exist.
"""
if isinstance(state, str):
if state not in self.states:
raise ValueError(f"No state {state} defined.")
state = self.states[state]
answer = set()
st_esp_closure = self.eps_closure(state)
for current_state in st_esp_closure:
for transition in current_state.transitions:
if not transition.is_epsilon and transition.check_condition(symbol):
answer.add(transition.to_state)
return answer
def goto(
self, state: Union[str, State, Iterable[str], Iterable[State]], symbol: str
) -> Set[State]:
"""
Compute the goto of a state or a set of states.
Parameters
----------
state : Union[str, State, Iterable[str], Iterable[State]]
The state or a list of states.
symbol : str
The symbol to compute the goto of.
Returns
-------
Set[State]
The goto of the state or a set of states.
Raises
------
ValueError
If any of the states does not exist.
"""
if isinstance(state, (str, State)):
return self._goto_single(state, symbol)
whole_goto = set()
for current_state in state:
whole_goto.update(self._goto_single(current_state, symbol))
return whole_goto
def to_dfa(self, dfa2nfa: bool = False) -> Union[Automata, Tuple[Automata, Dict]]:
"""
Convert the automata to a DFA.
Parameters
----------
dfa2nfa : bool
If True, the return value will be a tuple of the DFA and the dfa2nfa
dictionary, otherwise only the DFA will be returned. By default, False.
Returns
-------
Union[Automata, Tuple[Automata, Dict]]
The DFA.
"""
get_name = lambda states: "".join(sorted(x.name for x in states))
alphabet = self.alphabet
dfa = Automata(self.name)
start_state = self.eps_closure(self.start_states)
start_name = get_name(start_state)
q_0 = dfa.add_state(start_name, start=True, end=start_state in self.end_states)
dfa_to_nfa = {q_0: start_state}
visited = set()
non_visited = [q_0]
while non_visited:
new_non_visited = []
for current_state in non_visited:
if current_state in visited:
continue
visited.add(current_state)
for char in alphabet:
goto_states = self.goto(dfa_to_nfa[current_state], char)
if not goto_states:
continue
next_state = self.eps_closure(goto_states)
next_name = get_name(next_state)
if next_name not in dfa.states:
dfa_state = dfa.add_state(
next_name,
end=any(s in self.end_states for s in next_state),
)
dfa_to_nfa[dfa_state] = next_state
new_non_visited.append(dfa_state)
else:
dfa_state = dfa.states[next_name]
dfa.add_transition(current_state.name, next_name, char)
if next_state not in new_non_visited and next_state not in visited:
new_non_visited.append(dfa_state)
non_visited = new_non_visited
return dfa if not dfa2nfa else (dfa, dfa_to_nfa)
def run(
self,
input_: Iterable,
stop_at_end: bool = False,
) -> bool:
"""
Run the automata on the given input.
Parameters
----------
input_ : Iterable
The input to run the automata on.
stop_at_end : bool
Whether to stop the automata at the first end state encountered.
success_at_full_input : bool
Whether to consider the automata successful if the input is fully
consumed.
Returns
-------
bool
Whether the automata succeeded.
Raises
------
ValueError
If the automata has no start state.
"""
if not self.start_states:
raise ValueError("No start states defined.")
self._pos = 0
self._processes_idx = 0
self._input = input_
self._processes = [(st, self._pos) for st in self.start_states]
while self._processes:
stop = self._step()
if self._current_state in self.end_states:
if stop_at_end:
return True
if stop:
break
else:
return False
logging.debug(f"Final {self._processes_idx} {self._processes}")
return self._current_state in self.end_states
def _step(self):
self._current_state, self._pos = self._processes[self._processes_idx]
self._current_state.visited()
if self._pos > len(self._input):
self._processes.pop(self._processes_idx)
return False
new_processes = 0
logging.debug(f"{self._processes_idx} {self._processes}")
for transition in self._current_state.transitions:
if transition.is_epsilon or (
0 <= self._pos < len(self._input)
and transition.check_condition(self._input[self._pos])
):
run_state = (transition.to_state, self._pos + transition.action)
if new_processes == 0:
self._processes[self._processes_idx] = run_state
else:
self._processes.append(run_state)
new_processes += 1
if not new_processes:
self._processes.pop(self._processes_idx)
if self._processes:
self._processes_idx = (self._processes_idx + 1) % len(self._processes)
if self._pos >= len(self._input) or self._pos < 0:
return self._current_state in self.end_states
return False
```
#### File: numlab/builtin/nl_float.py
```python
import numlab.exceptions as excpt
from numlab.lang.type import Instance, Type
nl_bool = Type.get("bool")
nl_str = Type.get("str")
nl_int = Type.get("int")
nl_float = Type.get("float")
@nl_float.method("__new__")
def nl__new__(value: float):
_inst = Instance(nl_float)
_inst.set("value", float(value))
return _inst
@nl_float.method("__bool__")
def nl__bool__(self: Instance):
return nl_bool(self.get("value") != 0)
@nl_float.method("__add__")
def nl__add__(self, other: Instance):
if other.type.subtype(nl_float):
return Type.resolve_type(self.get("value") + other.get("value"))
raise excpt.InvalidTypeError("Can't add float to non-float")
@nl_float.method("__iadd__")
def nl__iadd__(self, other: Instance):
if other.type.subtype(nl_float):
self.set("value", self.get("value") + other.get("value"))
return self
raise excpt.InvalidTypeError("Can't add float to non-float")
@nl_float.method("__sub__")
def nl__sub__(self, other: Instance):
if other.type.subtype(nl_float):
return Type.resolve_type(self.get("value") - other.get("value"))
raise excpt.InvalidTypeError("Can't subtract float from non-float")
@nl_float.method("__isub__")
def nl__isub__(self, other: Instance):
if other.type.subtype(nl_float):
self.set("value", self.get("value") - other.get("value"))
return self
raise excpt.InvalidTypeError("Can't subtract float from non-float")
@nl_float.method("__mul__")
def nl__mul__(self, other: Instance):
if other.type.subtype(nl_float):
return Type.resolve_type(self.get("value") * other.get("value"))
raise excpt.InvalidTypeError("Can't multiply float by non-float")
@nl_float.method("__imul__")
def nl__imul__(self, other: Instance):
if other.type.subtype(nl_float):
self.set("value", self.get("value") * other.get("value"))
return self
raise excpt.InvalidTypeError("Can't multiply float by non-float")
@nl_float.method("__pow__")
def nl__pow__(self, other: Instance):
if other.type.subtype(nl_int):
return Type.resolve_type(self.get("value") ** other.get("value"))
raise excpt.InvalidTypeError("Can't raise float to non-int")
@nl_float.method("__truediv__")
def nl__div__(self, other: Instance):
if other.type.subtype(nl_float):
return Type.resolve_type(self.get("value") / other.get("value"))
raise excpt.InvalidTypeError("Can't divide float by non-float")
@nl_float.method("__idiv__")
def nl__idiv__(self, other: Instance):
if other.type.subtype(nl_float):
self.set("value", self.get("value") / other.get("value"))
return self
raise excpt.InvalidTypeError("Can't divide float by non-float")
@nl_float.method("__eq__")
def nl__eq__(self, other: Instance):
if other.type.subtype(nl_float):
return nl_bool(self.get("value") == other.get("value"))
raise excpt.InvalidTypeError("Can't compare float to non-float")
@nl_float.method("__lt__")
def nl__lt__(self, other: Instance):
if other.type.subtype(nl_float):
return nl_bool(self.get("value") < other.get("value"))
raise excpt.InvalidTypeError("Can't compare float to non-float")
@nl_float.method("__gt__")
def nl__gt__(self, other: Instance):
if other.type.subtype(nl_float):
return nl_bool(self.get("value") > other.get("value"))
raise excpt.InvalidTypeError("Can't compare float to non-float")
@nl_float.method("__le__")
def nl__le__(self, other: Instance):
if other.type.subtype(nl_float):
return nl_bool(self.get("value") <= other.get("value"))
raise excpt.InvalidTypeError("Can't compare float to non-float")
@nl_float.method("__ge__")
def nl__ge__(self, other: Instance):
if other.type.subtype(nl_float):
return nl_bool(self.get("value") >= other.get("value"))
raise excpt.InvalidTypeError("Can't compare float to non-float")
@nl_float.method("__str__")
def nl__str__(self):
return nl_str(str(self.get("value")))
@nl_float.method("__repr__")
def nl__repr__(self):
return nl_str(str(self.get("value")))
@nl_float.method("__hash__")
def nl__hash__(self):
return hash(self.get("value"))
```
#### File: numlab/builtin/nl_func.py
```python
from numlab.lang.type import Instance, Type
nl_function = Type.get("function")
@nl_function.method('__new__')
def nl__new__(func):
_inst = Instance(nl_function)
_inst.set('func', func)
return _inst
@nl_function.method('__call__')
def nl__call__(self, *args, **kwargs):
return self.get("func")(*args, **kwargs)
```
#### File: numlab/builtin/nl_none.py
```python
from numlab.lang.type import Instance, Type
nl_bool = Type.get("bool")
nl_none = Type.get("none")
@nl_none.method("__new__")
def nl__new__():
_inst = Instance(nl_none)
return _inst
@nl_none.method("__bool__")
def nl__bool__(self: Instance):
return nl_bool(False)
```
#### File: compiler/parsers/lr1_parser.py
```python
import logging
from functools import lru_cache
from pathlib import Path
from typing import Any, Dict, List, Set, Tuple, Union
from numlab.compiler.generic_ast import AST
from numlab.compiler.grammar import (Grammar, NonTerminal, Production, Symbol,
Terminal)
from numlab.compiler.grammar_ops import calculate_first, calculate_follow
from numlab.compiler.parsers.lritem import LRItem
from numlab.compiler.parsers.parser import Parser
from numlab.compiler.tokenizer import Token
from numlab.exceptions import ParsingError
class LR1Table:
"""
This class represents the LR1 table.
"""
def __init__(self, grammar: Grammar, table_file: str = None):
"""
Initializes a new LR1 table.
Parameters
----------
grammar : Grammar
Grammar that will be used.
table_file : str, optional
File to load the table from.
If file does not the table will be created and saved to this file.
If not given, the table will be generated.
"""
self.grammar = grammar
self._prepare_grammar()
self._symbols = {sym.name: sym for sym in grammar.symbols}
self._symbols["$"] = Terminal("$")
self._productions = {
prod.head_str: prod for _, prod in grammar.all_productions()
}
# [state, symbol] -> [next_state, production]
self._table: Dict[Tuple[int, str], Union[str, int, Production]] = {}
self._first = None
self._follow = None
self._states_by_id: Dict[int, List[LRItem]] = None
self._id_by_hashsum: Dict[int, int] = {}
self._closure_cache: Dict[int, List[LRItem]] = {}
self._goto_cache: Dict[Tuple[int, str], int] = {}
self._lr_items: Dict[Production, int, Terminal] = None
self._item_prods: Dict[NonTerminal, Production] = None
if table_file is not None:
table_file_path = Path(table_file)
if table_file_path.exists():
self._load_table(table_file)
return
self._build_table(table_file)
def _load_table(self, table_file: str):
"""Loads the table from a file.
Parameters
----------
table_file : str
Path to the file.
"""
logging.info(f"Loading table from {table_file}")
with open(str(table_file), "r", encoding="utf-8") as table_f:
file_lines = table_f.readlines()
assert len(file_lines) % 3 == 0, "Invalid table file"
for i in range(0, len(file_lines), 3):
state = int(file_lines[i])
symbol = self._symbols[file_lines[i + 1].strip()]
str_t_val = file_lines[i + 2].strip()
t_val = str_t_val
if "->" in str_t_val:
is_eps = False
if str_t_val.endswith("->"):
is_eps = True
str_t_val += " EPS"
t_val = self._productions[str_t_val]
if is_eps:
item_prod = Production([])
item_prod._head = t_val.head
item_prod._builder = t_val._builder
t_val = item_prod
elif str_t_val.isnumeric():
t_val = int(str_t_val)
self._table[(state, symbol)] = t_val
def save_table(self, table_file: str):
"""Saves the table to a file.
Parameters
----------
table_file : str
Path to the file.
"""
logging.info(f"Saving table to {table_file}")
with open(table_file, "w", encoding="utf-8") as table_f:
for key, value in self._table.items():
state, symbol = key
t_val = "" if value is None else str(value)
if isinstance(value, Production):
t_val = value.head_str
table_f.write(f"{state}\n")
table_f.write(f"{symbol}\n")
table_f.write(f"{t_val}\n")
def _prepare_grammar(self):
logging.info("Preparing grammar (adding S')")
if "S`" not in self.grammar.exprs_dict:
non_ter_prod = Production([self.grammar.start])
non_ter = NonTerminal("S`", [non_ter_prod])
self.grammar.add_expr(non_ter)
self.grammar.start = non_ter
self.grammar.start.prod_0.set_builder(lambda s: s.ast)
def _extract_grammar_lr_items(self) -> List[LRItem]:
"""Extracts all LR items from the grammar."""
logging.info("Extracting all LR items")
lr_items = []
self._item_prods = {}
for _, prod in self.grammar.all_productions():
for dot_pos in range(len(prod.symbols) + 1):
item_prod = prod
is_eps = prod.is_eps
if is_eps:
item_prod = Production([])
item_prod._head = prod.head
item_prod._builder = prod._builder
slr_item = LRItem(item_prod, dot_pos)
lr_items.append(slr_item)
if prod.head not in self._item_prods:
self._item_prods[prod.head] = []
self._item_prods[prod.head.name].append(item_prod)
if is_eps:
break
logging.info(f"Found {len(lr_items)} LR items")
logging.debug("Exacted items in the LR automata:")
for lr_item in lr_items:
logging.debug(f" {lr_item}")
return lr_items
def _contained_in_first(self, terminal: Terminal, *symbols):
"""Checks if a terminal is contained in the first set of a set of symbols.
Parameters
----------
terminal : Terminal
Terminal to be checked.
symbols : Symbol
Symbols to be checked.
Returns
-------
bool
True if the terminal is contained in the first set of the symbols.
"""
for symbol in symbols:
if isinstance(symbol, Symbol) and symbol.is_terminal:
return symbol == terminal
if terminal in self._first[symbol]:
return True
if "EPS" not in self._first[symbol]:
break
return False
def _build_table(self, table_file: str = None):
self._first = calculate_first(self.grammar)
self._follow = calculate_follow(self.grammar, self._first)
items = self._extract_grammar_lr_items()
self._lr_items = {}
for item in items:
for follow in self._follow[item.prod.head]:
new_lr_item = LRItem(item.prod, item.dot_pos, follow)
self._lr_items[item.prod, item.dot_pos, follow] = new_lr_item
init_state = self._closure(
{
self._lr_items[
self.grammar.start_expr.prod_0,
0,
self._follow[self.grammar.start_expr][0],
]
}
)
self._states_by_id = {0: init_state}
logging.info("Building LR1 table")
lr1_table: Dict[Tuple[int, str], Union[str, int, Production]] = {}
current_state = 0
while current_state < len(self._states_by_id):
logging.info(f"Building state {current_state} of {len(self._states_by_id)}")
state = self._states_by_id[current_state]
for item in state:
if item.at_symbol is None:
val = "OK" if item.prod.head.name == "S`" else item.prod
table_key = (current_state, item.lah.name)
else:
val = self._goto(current_state, item.at_symbol)
table_key = (current_state, item.at_symbol.name)
cont_val = lr1_table.get(table_key, None)
if cont_val is not None and cont_val != val:
raise ValueError(
f"LR1 table already contains "
f"{table_key} -> {cont_val.__repr__()} *** {val.__repr__()}"
)
lr1_table[table_key] = val
current_state += 1
self._table = lr1_table
logging.info("LR1 table built")
if table_file is not None:
self.save_table(table_file)
def get_state_number(self, items: Set[LRItem]) -> int:
"""Returns the state number for a list of LR items.
Parameters
----------
items : List[LRItem]
List of LR items.
Returns
-------
int
State number.
"""
hashsum = self.items_hash(items)
if hashsum in self._id_by_hashsum:
return self._id_by_hashsum[hashsum]
number = len(self._states_by_id)
self._states_by_id[number] = items
self._id_by_hashsum[hashsum] = number
return number
def __getitem__(self, index):
return self._table.get(index, None)
def items_hash(self, items: Set[LRItem]) -> Any:
"""Returns a unique value for a list of LR items.
Parameters
----------
items : Set[LRItem]
Set of LR items.
Returns
-------
Any
Hash sum.
"""
# return "".join([item.__repr__() for item in items])
return sum(hash(item) for item in items)
@lru_cache(maxsize=None)
def _goto(self, state: int, symbol: Symbol) -> int:
"""Returns the state number for a state and a symbol.
Parameters
----------
state : int
State number.
symbol : Symbol
Symbol.
Returns
-------
int
State number.
"""
logging.debug(f"Goto({state}, {symbol})")
state_items = self._states_by_id[state]
filtered_items = {
self._lr_items[item.prod, item.dot_pos + 1, item.lah]
for item in state_items
if item.at_symbol == symbol
}
clausure = self._closure(filtered_items)
return self.get_state_number(clausure)
def _closure(self, items: Set[LRItem]) -> Set[LRItem]:
"""Returns the closure of a list of LR items.
Parameters
----------
items : List[LRItem]
List of LR items.
Returns
-------
List[LRItem]
Closure of the list of LR items.
"""
hashsum = self.items_hash(items)
if hashsum in self._closure_cache:
return self._closure_cache[hashsum]
logging.debug(f"Calculating closure of {items}")
closure = items
change = True
while change:
change = False
new = set()
for item in closure:
next_item = item.at_symbol
if next_item is None or next_item.is_terminal:
continue
lah = item.lah
rest = item.prod.symbols[item.dot_pos + 1 :]
rest.append(lah)
for prod in self._item_prods[next_item.name]:
for fol in self._follow[next_item]:
if not self._contained_in_first(fol, *rest):
continue
lr_item = self._lr_items[prod, 0, fol]
if lr_item not in closure:
new.add(lr_item)
change = True
closure.update(new)
self._closure_cache[hashsum] = closure
return closure
class LR1Parser(Parser):
"""LR1 Parser.
Parameters
----------
grammar : Grammar
Grammar to be used.
table_file : str
Path to the file containing the LR1 table.
If the file does not exist, it will be created.
If not specified, the table will be built automatically from
the grammar.
"""
def __init__(self, grammar: Grammar, table_file: str = None):
super().__init__(grammar)
self.lr1_table = LR1Table(grammar, table_file)
def save_table(self, table_file: str):
"""Saves the LR1 table."""
self.lr1_table.save_table(table_file)
def parse(self, tokens: List[Token]) -> AST:
logging.debug(f"Parsing {len(tokens)} tokens (LR1)")
table = self.lr1_table
stack: List[Tuple[Symbol, int]] = []
i = 0
while i < len(tokens):
token = tokens[i]
logging.info(f"----------------------------------------------------")
logging.info(f"Parsing token {token}. Stack: {stack}")
current_state = stack[-1][1] if stack else 0
table_val = table[current_state, token.token_type]
logging.info(
f"Table value: {table_val} at ({current_state}, {token.token_type})"
)
if table_val == "OK":
break
if isinstance(table_val, int):
logging.info(f"Making SHIFT action")
term = Terminal(token.token_type, value=token.lexem)
stack.append((term, table_val))
i += 1
elif isinstance(table_val, Production):
logging.info(f"Making REDUCE action")
reduce_prod = table_val
# Pop from stack the necessary items
items_needed = len(reduce_prod.symbols)
items = []
if items_needed != 0:
stack, items = stack[:-items_needed], stack[-items_needed:]
items = [item[0].ast for item in items]
# Apply reduction
new_head = reduce_prod.head.copy()
new_head.set_ast(reduce_prod.build_ast(items))
logging.info(f"Reduced to {new_head}")
# Check next state
left_state = stack[-1][1] if stack else 0
next_state = table[left_state, reduce_prod.head.name]
logging.info(
f"Next state GOTO({left_state},{reduce_prod.head.name})"
f" is {next_state}"
)
# Push to stack the new item
stack.append((new_head, next_state))
elif table_val is None:
raise ParsingError(f"Unexpected token", token)
if len(stack) != 1:
raise ValueError(f"Dirty stack at the end of the parsing. Stack: {stack}")
return stack[-1][0].ast
```
#### File: numlab/lang/context.py
```python
from __future__ import annotations
from typing import Any, Dict, List, Optional, Tuple
from numlab.lang.type import Instance, Type
class Context:
def __init__(self, parent: Context = None):
self.parent: Context = parent
self.symbols: Dict[str, Any] = {}
self.var_count: int = 0
def make_child(self) -> Context:
return Context(self)
def _is_countable(self, name, value):
return (
not isinstance(value, Type)
and isinstance(value, Instance)
and not value.type.subtype(Type.get("function"))
and name != "stats"
)
def define(self, name: str, value: Any):
current = self.symbols.get(name, None)
if current is not None and self._is_countable(name, current):
self.var_count -= 1
if self._is_countable(name, value):
self.var_count += 1
self.symbols[name] = value
def delete(self, name):
if name not in self.symbols:
if self.parent:
self.parent.delete(name)
else:
raise Exception(f"Undefined variable {name}")
return
val = self.symbols[name]
if self._is_countable(name, val):
self.var_count -= 1
if name in self.symbols:
del self.symbols[name]
def resolve(self, name: str) -> Optional[Any]:
if name in self.symbols:
return self.symbols[name]
if self.parent:
return self.parent.resolve(name)
return None
def count_vars(self) -> int:
count = self.var_count
if self.parent:
count += self.parent.count_vars()
return count
```
#### File: numlab/lang/type.py
```python
from __future__ import annotations
from typing import Any, Callable, Dict, List
class Type:
nl_types = {}
def __init__(self, type_name: str, parent: Type = None):
self.type_name = type_name
self.attributes: Dict[str, Any] = {}
self.parent = parent
Type.nl_types[type_name] = self
def add_attribute(self, attribute: str, default: Any = None):
self.attributes[attribute] = default
def method(self, method_name: str):
def method_wrapper(func):
self.add_attribute(method_name, func)
return func
return method_wrapper
def get_attribute(self, attribute_name: str):
for attribute in self.attributes:
if attribute.name == attribute_name:
return attribute
return None
def get_attr_dict(self):
all_attrs = {}
if self.parent:
all_attrs.update(self.parent.get_attr_dict())
all_attrs.update(self.attributes)
return all_attrs
def subtype(self, other: Type):
if not isinstance(other, tuple):
other = (other,)
for subtype in other:
if self.type_name == subtype.type_name:
return True
if self.parent is None:
return False
return self.parent.subtype(other)
def __call__(self, *args, **kwargs):
return self.attributes["__new__"](*args, **kwargs)
@staticmethod
def new(type_name: str, *args, **kwargs):
if type_name not in Type.nl_types:
raise ValueError(f"{type_name} is not a valid NumLab type")
return Type.nl_types[type_name](*args, **kwargs)
@staticmethod
def get(type_name: str):
if type_name not in Type.nl_types:
raise ValueError(f"{type_name} is not a valid NumLab type")
return Type.nl_types[type_name]
@staticmethod
def get_type(value):
if isinstance(value, str):
return Type.get("str")
if isinstance(value, bool):
return Type.get("bool")
if isinstance(value, int):
return Type.get("int")
if isinstance(value, float):
return Type.get("float")
if value is None:
return Type.get("none")
if isinstance(value, list):
return Type.get("list")
if isinstance(value, dict):
return Type.get("dict")
if isinstance(value, tuple):
return Type.get("tuple")
if callable(value):
return Type.get("function")
return value
@staticmethod
def resolve_type(value):
val_type = Type.get_type(value)
return val_type(value)
class Instance:
def __init__(self, _type: Type):
self.type = _type
self._dict = self.type.get_attr_dict()
self._dict["__dict__"] = self._dict
def get(self, attr_name):
if attr_name == "__dict__":
return Type.get("dict")(self._dict)
if attr_name not in self._dict:
raise ValueError(f"{self.type.type_name} has no attribute {attr_name}")
return self._dict[attr_name]
def set(self, attr_name, value):
self._dict[attr_name] = value
def has_value(self):
return self.type.type_name in ["int", "float", "str", "bool"]
def get_value(self):
if self.has_value():
return self.get("__new__")(self.get("value"))
return self
def __iter__(self):
iterator = self.get("__iter__")(self)
while True:
try:
yield iterator.get("__next__")(iterator)
except StopIteration:
break
def __repr__(self):
return self.get("__repr__")(self).get("value")
nl_object = Type("object")
nl_float = Type("float", nl_object)
nl_int = Type("int", nl_float)
nl_bool = Type("bool", nl_int)
nl_str = Type("str", nl_object)
nl_dict = Type("dict", nl_object)
nl_list = Type("list", nl_object)
nl_tuple = Type("tuple", nl_object)
nl_set = Type("set", nl_object)
nl_slice = Type("slice", nl_object)
nl_function = Type("function", nl_object)
nl_generator = Type("generator", nl_object)
nl_none = Type("none", nl_object)
```
#### File: NumLab/numlab/nl_ast.py
```python
from __future__ import annotations
import enum
from typing import Any, List
from numlab.compiler import AST
# pylint: disable=too-few-public-methods
# pylint: disable=missing-class-docstring
class ExprCtx(enum.Enum):
LOAD = enum.auto()
STORE = enum.auto()
DEL = enum.auto()
class Operator(enum.Enum):
ADD = enum.auto()
SUB = enum.auto()
MUL = enum.auto()
DIV = enum.auto()
MOD = enum.auto()
POW = enum.auto()
AND = enum.auto()
OR = enum.auto()
LSHIFT = enum.auto()
RSHIFT = enum.auto()
BIT_AND = enum.auto()
BIT_XOR = enum.auto()
BIT_OR = enum.auto()
FLOORDIV = enum.auto()
MATMUL = enum.auto()
class CmpOp(enum.Enum):
IN = enum.auto()
NOT_IN = enum.auto()
IS = enum.auto()
IS_NOT = enum.auto()
EQ = enum.auto()
NOT_EQ = enum.auto()
LT = enum.auto()
LTE = enum.auto()
GT = enum.auto()
GTE = enum.auto()
class UnaryOp(enum.Enum):
UADD = enum.auto()
USUB = enum.auto()
NOT = enum.auto()
INVERT = enum.auto()
class Program(AST):
__slots__ = ("stmts",)
def __init__(self, stmts: List[Stmt]):
self.stmts = stmts
class Stmt(AST):
pass
class FuncDefStmt(Stmt):
__slots__ = ("name", "args", "body", "decorators")
def __init__(
self,
name: str,
args: Args,
body: List[Stmt],
decorators: List[Expr] = None,
):
self.name = name
self.args = args
self.body = body
self.decorators = decorators or []
def add_decorators(self, decorators: List[Expr]) -> FuncDefStmt:
self.decorators = decorators
return self
class ClassDefStmt(Stmt):
__slots__ = ("name", "bases", "body", "decorators")
def __init__(
self,
name: str,
bases: List[Expr],
body: List[Stmt],
decorators: List[Expr] = None,
):
self.name = name
self.bases = bases
self.body = body
self.decorators = decorators or []
def add_decorators(self, decorators: List[Expr]) -> ClassDefStmt:
self.decorators = decorators
return self
class ReturnStmt(Stmt):
__slots__ = ("expr",)
def __init__(self, expr: Expr = None):
self.expr = expr
class DeleteStmt(Stmt):
__slots__ = ("targets",)
def __init__(self, targets: List[Expr]):
self.targets = targets
class AssignStmt(Stmt):
__slots__ = ("targets", "value")
def __init__(self, targets: List[Expr], value: Expr):
self.targets = targets
self.value = value
class AugAssignStmt(Stmt):
__slots__ = ("target", "op", "value")
def __init__(self, target: Expr, op: Operator, value: Expr):
self.target = target
self.op = op
self.value = value
def set_target(self, target: Expr) -> AugAssignStmt:
self.target = target
return self
class AnnAssignStmt(Stmt):
__slots__ = ("target", "annotation", "value")
def __init__(self, target: Expr, annotation: Expr, value: Expr):
self.target = target
self.annotation = annotation
self.value = value
def set_target(self, target: Expr) -> AnnAssignStmt:
self.target = target
return self
class ConfDefStmt(Stmt):
__slots__ = ("name", "base", "configs")
def __init__(self, name: str, cofigs: List[ConfOption], base: str = None):
self.name = name
self.base = base
self.configs = cofigs
class ConfOption(AST):
__slots__ = ("name", "value")
def __init__(self, name: str, value: Expr):
self.name = name
self.value = value
class Begsim(Stmt):
__slots__ = ("config",)
def __init__(self, config: Expr):
self.config = config
class Endsim(Stmt):
pass
class ResetStats(Stmt):
pass
class ForStmt(Stmt):
__slots__ = ("target", "iter_expr", "body", "orelse")
def __init__(
self, target: Expr, iter_expr: Expr, body: List[Stmt], orelse: List[Stmt] = None
):
self.target = target
self.iter_expr = iter_expr
self.body = body
self.orelse = orelse or []
class WhileStmt(Stmt):
__slots__ = ("test", "body", "orelse")
def __init__(self, test: Expr, body: List[Stmt], orelse: List[Stmt] = None):
self.test = test
self.body = body
self.orelse = orelse or []
class IfStmt(Stmt):
__slots__ = ("test", "body", "orelse")
def __init__(self, test: Expr, body: List[Stmt], orelse: List[Stmt] = None):
self.test = test
self.body = body
self.orelse = orelse or []
class WithStmt(Stmt):
__slots__ = ("items", "body")
def __init__(self, items: List[WithItem], body: List[Stmt]):
self.items = items
self.body = body
class WithItem(AST):
__slots__ = ("context_expr", "optional_vars")
def __init__(self, context_expr: Expr, optional_vars: List[Expr] = None):
self.context_expr = context_expr
self.optional_vars = optional_vars
class RaiseStmt(Stmt):
__slots__ = ("exc", "cause")
def __init__(self, exc: Expr = None, cause: Expr = None):
self.exc = exc
self.cause = cause
class TryStmt(Stmt):
__slots__ = ("body", "handlers", "orelse", "finalbody")
def __init__(
self,
body: List[Stmt],
handlers: List[ExceptHandler] = None,
orelse: List[Stmt] = None,
finalbody: List[Stmt] = None,
):
self.body = body
self.handlers = handlers
self.orelse = orelse
self.finalbody = finalbody
class ExceptHandler(AST):
__slots__ = ("hand_type", "name", "body")
def __init__(self, hand_type: Expr, name: Expr, body: List[Stmt]):
self.hand_type = hand_type
self.name = name
self.body = body
class AssertStmt(Stmt):
__slots__ = ("test", "msg")
def __init__(self, test: Expr, msg: Expr = None):
self.test = test
self.msg = msg
class GlobalStmt(Stmt):
__slots__ = ("names",)
def __init__(self, names: List[str]):
self.names = names
class NonlocalStmt(Stmt):
__slots__ = ("names",)
def __init__(self, names: List[str]):
self.names = names
class ExprStmt(Stmt):
__slots__ = ("expr",)
def __init__(self, expr: Expr):
self.expr = expr
class PassStmt(Stmt):
pass
class BreakStmt(Stmt):
pass
class ContinueStmt(Stmt):
pass
class Expr(AST):
pass
class BinOpExpr(Expr):
__slots__ = ("left", "op", "right")
def __init__(self, left: Expr, op: str, right: Expr):
self.op = op
self.left = left
self.right = right
class UnaryOpExpr(Expr):
__slots__ = ("op", "operand")
def __init__(self, op: UnaryOp, operand: Expr):
self.op = op
self.operand = operand
class LambdaExpr(Expr):
__slots__ = ("args", "body")
def __init__(self, args: Args, body: Expr):
self.args = args
self.body = body
class IfExpr(Expr):
__slots__ = ("test", "body", "orelse")
def __init__(self, test: Expr, body: Expr, orelse: Expr = None):
self.test = test
self.body = body
self.orelse = orelse
class DictExpr(Expr):
__slots__ = ("keys", "values")
def __init__(self, keys: List[Expr] = None, values: List[Expr] = None):
self.keys = keys or []
self.values = values or []
class SetExpr(Expr):
__slots__ = ("elts",)
def __init__(self, elts: List[Expr] = None):
self.elts = elts or []
class ListCompExpr(Expr):
__slots__ = ("elt", "generators")
def __init__(self, elt: Expr, generators: List[Comprehension]):
self.elt = elt
self.generators = generators
class SetCompExpr(Expr):
__slots__ = ("elt", "generators")
def __init__(self, elt: Expr, generators: List[Comprehension]):
self.elt = elt
self.generators = generators
class DictCompExpr(Expr):
__slots__ = ("key", "value", "generators")
def __init__(self, key: Expr, value: Expr, generators: List[Comprehension]):
self.key = key
self.value = value
self.generators = generators
class GeneratorExpr(Expr):
__slots__ = ("elt", "generators")
def __init__(self, elt: Expr, generators: List[Comprehension]):
self.elt = elt
self.generators = generators
def add_elt(self, elt: Expr) -> GeneratorExpr:
self.elt = elt
return self
class Comprehension(AST):
__slots__ = ("target", "comp_iter", "ifs")
def __init__(self, target: Expr, comp_iter: Expr, ifs: List[Expr] = None):
self.target = target
self.comp_iter = comp_iter
self.ifs = ifs or []
class YieldExpr(Expr):
__slots__ = ("value",)
def __init__(self, value: List[Expr] = None):
self.value = value
class YieldFromExpr(Expr):
__slots__ = ("value",)
def __init__(self, value: Expr):
self.value = value
class CompareExpr(Expr):
__slots__ = ("left", "ops", "comparators")
def __init__(self, left: Expr, ops: List[CmpOp], comparators: List[Expr]):
self.left = left
self.ops = ops
self.comparators = comparators
class CallExpr(Expr):
__slots__ = ("func", "args", "keywords")
def __init__(self, func: Expr, args: List[Expr], keywords: List[Keyword]):
self.func = func
self.args = args
self.keywords = keywords
@property
def value(self):
return self.func
@value.setter
def value(self, value):
self.func = value
class Keyword(AST):
__slots__ = ("arg", "value")
def __init__(self, arg: Expr, value: Expr):
self.arg = arg
self.value = value
class StarredExpr(Expr):
__slots__ = ("value", "ctx")
def __init__(self, value: Expr, ctx: ExprCtx = ExprCtx.LOAD):
self.value = value
self.ctx = ctx
class ConstantExpr(Expr):
__slots__ = ("value",)
def __init__(self, value: Any):
self.value = value
def show(self):
return f"ConstantExpr({self.value})"
class AttributeExpr(Expr):
__slots__ = ("value", "attr", "ctx")
def __init__(self, value: Expr, attr: str, ctx: ExprCtx = ExprCtx.LOAD):
self.value = value
self.attr = attr
self.ctx = ctx
def insert_name_at_start(self, name: str) -> AttributeExpr:
if isinstance(self.value, AttributeExpr):
self.value.insert_name_at_start(name)
elif isinstance(self.value, NameExpr):
new_name_val = NameExpr(name)
name_val = self.value.name_id
self.value = AttributeExpr(new_name_val, name_val)
return self
class SubscriptExpr(Expr):
__slots__ = ("value", "slice_expr", "ctx")
def __init__(self, value: Expr, slice_expr: SliceExpr, ctx: ExprCtx = ExprCtx.LOAD):
self.value = value
self.slice_expr = slice_expr
self.ctx = ctx
class NameExpr(Expr):
__slots__ = ("name_id", "ctx")
def __init__(self, name_id: str, ctx: ExprCtx = ExprCtx.LOAD):
self.name_id = name_id
self.ctx = ctx
def show(self):
return f"NameExpr('{self.name_id}', ctx={self.ctx})"
class ListExpr(Expr):
__slots__ = ("elts", "ctx")
def __init__(self, elts: List[Expr] = None, ctx: ExprCtx = ExprCtx.LOAD):
self.elts = elts or []
self.ctx = ctx
class TupleExpr(Expr):
__slots__ = ("elts", "ctx")
def __init__(self, elts: List[Expr] = None, ctx: ExprCtx = ExprCtx.LOAD):
self.elts = elts
self.ctx = ctx
class SliceExpr(Expr):
__slots__ = ("lower", "upper", "step")
def __init__(self, lower: Expr = None, upper: Expr = None, step: Expr = None):
self.lower = lower
self.upper = upper
self.step = step
class Args(AST):
__slots__ = ("args", "vararg", "kwarg")
def __init__(
self,
args: List[Arg] = None,
vararg=None,
kwarg=None,
):
self.args = args or []
self.vararg = vararg
self.kwarg = kwarg
class Arg(AST):
__slots__ = ("arg", "annotation", "default", "is_arg", "is_kwarg")
def __init__(
self,
arg: str,
annotation: Expr = None,
default: Expr = None,
is_arg: bool = False,
is_kwarg: bool = False,
):
self.arg = arg
self.annotation = annotation
self.default = default
self.is_arg = is_arg
self.is_kwarg = is_kwarg
def set_default(self, default: Expr) -> Arg:
self.default = default
return self
def set_arg(self, is_arg: bool) -> Arg:
self.is_arg = is_arg
return self
def set_kwarg(self, is_kwarg: bool) -> Arg:
self.is_kwarg = is_kwarg
return self
```
#### File: NumLab/numlab/nlre.py
```python
import logging
from numlab.automata import Automata
ASCII = list(map(chr, range(128)))
SPECIAL_CHARS = ["(", ")", "|", "*", "^"]
DIGITS = list("0123456789")
LOWER_CASE_CHARS = list("abcdefghijklmnopqrstuvwxyz")
UPPER_CASE_CHARS = list("ABCDEFGHIJKLMNOPQRSTUVWXYZ")
class RegexMatch:
"""
Represents the result of a match.
Parameters
----------
re_expr : str
The regular expression used to match the text.
text : str
The text to match against the regular expression.
pos : int
The position where the match ended.
Attributes
----------
re_expr : str
The regular expression used to match the text.
text : str
The text that was matched against the regular expression.
pos : int
The position where the match ended.
"""
def __init__(self, re_expr: str, text: str, end: int):
self.re_expr = re_expr
self.text = text
self.end = end
def __repr__(self):
return f"RegexMatch(matched:{self.text[:self.end]}; end={self.end})"
@property
def matched_text(self) -> str:
return self.text[: self.end]
class RegexPattern:
"""
Regular expression compiled to an automata.
Parameters
----------
re_expr : str
The regular expression to be converted.
Attributes
----------
re_expr : str
The regular expression.
atmt : Automata
The automata representing the regular expression.
Raises
------
ValueError
If the regular expression is not valid.
"""
def __init__(self, re_expr: str):
self.re_expr = re_expr
self.atmt = _build_automata(self.re_expr).flat().to_dfa()
def match(self, text: str) -> RegexMatch:
"""
Match the text against the regular expression.
Parameters
----------
text : str
The text to match against the regular expression.
Returns
-------
RegexMatch
The RegexMatch object containing the result of the match.
"""
last_pos = -1
def set_last_pos():
nonlocal last_pos
last_pos = self.atmt.pos
for state in self.atmt.end_states:
state.on_visited = set_last_pos
self.atmt.run(text)
if last_pos == -1:
return None
return RegexMatch(self.re_expr, text, last_pos)
def _find_matching_paren(text: str, start: int = 0) -> int:
count = 0
for i in range(start, len(text)):
if text[i] == "(":
count += 1
elif text[i] == ")":
count -= 1
if count == 0:
return i
raise ValueError("Unmatched parenthesis.")
def _get_basic_re_expr(re_expr: str) -> str:
"""
Converts a regular expression to a basic regular expression.
Parameters
----------
re_expr : str
Regular expression.
Returns
-------
str
Basic regular expression.
"""
return re_expr
def _apply_star_op(atmt: Automata) -> Automata:
"""
Applies the star operator to an automata.
Parameters
----------
atmt : Automata
Automata.
Returns
-------
Automata
Automata.
"""
#
# .---- < ----.
# / \
# - - > (q0) -- .. --> (qf) --> ((new_qf))
# \ /
# `-------- > ---------'
#
flat_atmt = atmt.flat()
q_0 = flat_atmt.start_state
q_f = flat_atmt.end_state
flat_atmt.states.pop(q_f.name)
for state in list(flat_atmt.states.values()):
for trans in state.transitions:
if trans.to_state == q_f:
trans.to_state = q_0
flat_atmt.end_states = [flat_atmt.start_state]
return flat_atmt
def _check_special_char(re_expr, index, atmt):
if index < 0 or index >= len(re_expr) or re_expr[index] != "*":
return atmt, False
atmt = _apply_star_op(atmt)
return atmt, True
def _process_single_char(re_expr: str, negated: bool) -> Automata:
"""
Processes a single character.
Parameters
----------
re_expr : str
Regular expression.
Returns
-------
Automata
Automata.
"""
#
# - - > (q0) -- a --> ((q1))
#
escaped = False
if re_expr[0] == "\\":
re_expr = re_expr[1:]
logging.debug("Escaped character")
escaped = True
cond = re_expr[0]
logging.debug(f"Parsing {cond}")
if escaped:
if cond == "s":
cond = " "
elif cond == "n":
cond = "\n"
elif cond == "t":
cond = "\t"
elif cond == "r":
cond = "\r"
elif cond == "d":
cond = DIGITS
elif cond == "a":
cond = LOWER_CASE_CHARS
elif cond == "A":
cond = UPPER_CASE_CHARS
elif cond == ".":
cond = None
if negated:
cond = [c for c in ASCII if c not in cond]
new_atmt = Automata()
from_st = new_atmt.add_state("q0", start=True)
to_st = new_atmt.add_state("q1", end=True)
new_atmt.add_transition(from_st, to_st, cond, action=1)
logging.debug(f"Condition: {new_atmt.q0.transitions[0].str_cond}")
new_atmt, changed = _check_special_char(re_expr, 1, new_atmt)
logging.debug(f"Especial char found after {changed}")
new_index = 2 if changed else 1
new_index += 1 if escaped else 0
return new_atmt, new_index
def _process_group(re_expr: str, negated: bool) -> Automata:
"""
Processes a group.
Parameters
----------
re_expr : str
Regular expression.
Returns
-------
Automata
Automata.
"""
logging.debug("Parsing group")
close_paren_index = _find_matching_paren(re_expr)
negated = re_expr[1] == "^"
start_index = 2 if negated else 1
new_atmt = _build_automata(re_expr[start_index:close_paren_index], negated=negated)
new_atmt, changed = _check_special_char(re_expr, close_paren_index + 1, new_atmt)
logging.debug(f"Especial char found after group {changed}")
new_index = close_paren_index + 2 if changed else close_paren_index + 1
return new_atmt, new_index
def _process_or_operator(a_atmt: Automata, b_atmt: Automata) -> Automata:
"""
Processes an or operator.
Parameters
----------
re_expr : str
Regular expression.
Returns
-------
Automata
Automata.
"""
#
# .-- > -- (a0) --..--> (af) -- > --.
# / \
# - - > (new_q0) ((new_qf))
# \ /
# `-- > -- (b0) --..--> (bf) -- > --'
#
new_atmt = Automata()
new_atmt.add_state("q0", start=True)
new_atmt.add_state("qf", end=True)
new_atmt.add_transition(new_atmt.q0, a_atmt.start_state)
new_atmt.add_transition(new_atmt.q0, b_atmt.start_state)
new_atmt.add_transition(a_atmt.end_state, new_atmt.qf)
new_atmt.add_transition(b_atmt.end_state, new_atmt.qf)
return new_atmt
def _build_automata(
re_expr: str, last_atmt: Automata = None, negated: bool = False
) -> Automata:
"""
Builds an automata from a regular expression using the Thompson
construction algorithm.
Parameters
----------
re_expr : str
Regular expression.
Returns
-------
Automata
Automata.
"""
if re_expr == "":
if last_atmt:
return last_atmt
raise ValueError("Invalid regular expression.")
logging.debug(f"Building automata for {re_expr}")
# Parse a single character
if re_expr[0] not in SPECIAL_CHARS or re_expr[0] == "\\":
new_atmt, new_index = _process_single_char(re_expr, negated)
if last_atmt:
new_atmt = last_atmt.concatenate(new_atmt)
return _build_automata(re_expr[new_index:], new_atmt, negated)
# Parse a group
if re_expr[0] == "(":
new_atmt, new_index = _process_group(re_expr, negated)
if last_atmt:
new_atmt = last_atmt.concatenate(new_atmt)
return _build_automata(re_expr[new_index:], new_atmt, negated)
# Parse an or operator
if re_expr[0] == "|":
logging.debug("Parsing or operator")
if not last_atmt:
raise ValueError("Invalid regular expression.")
a_atmt = last_atmt
b_atmt = _build_automata(re_expr[1:], None, negated)
new_atmt = _process_or_operator(a_atmt, b_atmt)
return new_atmt
raise ValueError("Invalid regular expression {re_expr}")
def compile_patt(re_expr: str) -> RegexPattern:
"""
Compiles a regular expression into an automata.
Parameters
----------
re_expr : str
Regular expression.
Returns
-------
Automata
Automata.
"""
return RegexPattern(re_expr)
def check(re_expr: str, text: str) -> bool:
"""
Checks a regular expression against a text.
Parameters
----------
re_expr : str
Regular expression.
text : str
Text.
Returns
-------
bool
True if the regular expression matches the text, False otherwise.
"""
re_expr = _get_basic_re_expr(re_expr)
re_patt = _build_automata(re_expr).flat()
return re_patt.run(text)
def match(re_expr: str, text: str):
"""
Matches a regular expression against a text.
Parameters
----------
re_expr : str
Regular expression.
text : str
Text.
Returns
-------
Match
Match.
"""
re_expr = _get_basic_re_expr(re_expr)
re_patt = _build_automata(re_expr).flat().to_dfa()
last_pos = -1
def set_last_pos():
nonlocal last_pos
last_pos = re_patt.pos
for state in re_patt.end_states:
state.on_visited = set_last_pos
re_patt.run(text)
if last_pos == -1:
return None
return RegexMatch(re_expr, text, last_pos)
```
#### File: NumLab/numlab/nl_tokenizer.py
```python
from typing import List
from numlab.compiler import Token, Tokenizer
tknz = Tokenizer()
tknz.add_pattern("NEWLINE", r"\n")
tknz.add_pattern("SPACE", r"( |\t)( |\t)*", lambda l: None)
tknz.add_pattern("COMMENT", r"#.*\n", lambda t: None)
# Augassing
tknz.add_pattern("+=", r"+=")
tknz.add_pattern("-=", r"-=")
tknz.add_pattern("*=", r"\*=")
tknz.add_pattern("@=", r"@=")
tknz.add_pattern("/=", r"/=")
tknz.add_pattern("%=", r"%=")
tknz.add_pattern("&=", r"&=")
tknz.add_pattern("|=", r"\|=")
tknz.add_pattern("^=", r"\^=")
tknz.add_pattern("<<=", r"<<=")
tknz.add_pattern(">>=", r">>=")
tknz.add_pattern("**=", r"\*\*=")
tknz.add_pattern("//=", r"//=")
# Operators
tknz.add_pattern("==", r"==")
tknz.add_pattern(">=", r">=")
tknz.add_pattern("<=", r"<=")
tknz.add_pattern("!=", r"!=")
tknz.add_pattern("**", r"\*\*")
tknz.add_pattern("<<", r"<<")
tknz.add_pattern(">>", r">>")
tknz.add_pattern("*", r"\*")
tknz.add_pattern("=", r"=")
tknz.add_pattern("|", r"\|")
tknz.add_pattern("^", r"\^")
tknz.add_pattern("&", r"&")
tknz.add_pattern("+", r"+")
tknz.add_pattern("-", r"-")
tknz.add_pattern("@", r"@")
tknz.add_pattern("/", r"/")
tknz.add_pattern("%", r"%")
tknz.add_pattern("//", r"//")
tknz.add_pattern("~", r"~")
tknz.add_pattern("<", r"<")
tknz.add_pattern(">", r">")
# Punctuation
tknz.add_pattern("(", r"\(")
tknz.add_pattern(")", r"\)")
tknz.add_pattern("{", r"{")
tknz.add_pattern("}", r"}")
tknz.add_pattern("[", r"[")
tknz.add_pattern("]", r"]")
tknz.add_pattern(";", r";")
tknz.add_pattern(",", r",")
tknz.add_pattern(".", r"\.")
tknz.add_pattern(":", r":")
# Keywords
tknz.add_keywords(
"True",
"False",
"None",
"and",
"or",
"not",
"if",
"else",
"elif",
"while",
"for",
"in",
"break",
"continue",
"return",
"def",
"class",
"try",
"except",
"finally",
"raise",
"import",
"from",
"as",
"is",
"del",
"pass",
"yield",
"assert",
"with",
"global",
"nonlocal",
"lambda",
"conf",
"begsim",
"endsim",
"resetstats",
)
# Special terminals
tknz.add_pattern("NAME", r"(\a|\A|_)(\a|\A|\d|_)*")
tknz.add_pattern("NUMBER", r"\d\d*|\d\d*\.\d\d*")
tknz.add_pattern(
"STRING", r"'((^')|(\\'))*(^\\)'|\"((^\")|(\\\"))*(^\\)\"", lambda t: t[1:-1]
)
@tknz.process_tokens
def process_tokens(tokens: List[Token]):
indent_tok = Token("INDENT", "INDENT")
dedent_tok = Token("DEDENT", "DEDENT")
indentations = [0]
new_tokens = []
check_indent = 0
for tok in tokens:
if tok.NEWLINE and check_indent == 0:
check_indent = 1
if not tok.NEWLINE and check_indent == 1:
check_indent = 2
if check_indent == 2:
new_indentation_size = tok.col
while new_indentation_size < indentations[-1]:
new_tokens.append(dedent_tok)
indentations.pop()
if new_indentation_size > indentations[-1]:
indentations.append(new_indentation_size)
new_tokens.append(indent_tok)
check_indent = 0
if not tok.NEWLINE or (new_tokens and not new_tokens[-1].NEWLINE):
new_tokens.append(tok)
if len(indentations) > 1:
for _ in range(len(indentations) - 1):
new_tokens.append(dedent_tok)
new_tokens.append(Token("NEWLINE", "\n"))
return new_tokens
```
#### File: numlab/optimization/fuzzy_opt_classifier.py
```python
import math
from typing import Callable
class FuzzyRule:
def __init__(self, name: str, functions: list, rule: Callable):
self.name = name
self.functions = functions
self.rule = rule
def apply(self, *input_values: tuple):
if len(input_values) != len(self.functions):
raise ValueError(
"The number of input values does not match the number of functions"
)
results = [self.functions[i](val) for i, val in enumerate(input_values)]
return self.rule(*results)
class FuzzyOptClassifier:
def __init__(self):
self.max_loop_depth = 0
self.changes = []
self.rules = [
FuzzyRule(
name="Poor change",
functions=[
lambda x: self._norm_bell_curve(0, 0.2, x),
lambda x: self._norm_bell_curve(0, 1, x),
],
rule=max,
),
FuzzyRule(
name="Acceptable change",
functions=[
lambda x: self._norm_bell_curve(0.5, 0.2, x),
lambda x: self._norm_bell_curve(0.5, 1, x),
],
rule=max,
),
FuzzyRule(
name="Good change",
functions=[
lambda x: self._norm_bell_curve(1, 0.2, x),
lambda x: self._norm_bell_curve(1, 1, x),
],
rule=max,
),
]
self.membership_val_centers = [0.1, 0.5, 1]
def add_change(self, category: float, loop_depth: float):
self.max_loop_depth = max(self.max_loop_depth, loop_depth)
self.changes.append((category, loop_depth))
def _loop_depth_val(self, loop_depth):
return min(3, loop_depth)
def _defuzzify(self, *values):
return sum(
[self.membership_val_centers[i] * values[i] for i in range(len(values))]
) / sum(values)
def _classify_single_change(self, category: float, loop_depth: float):
vals = [
self.rules[i].apply(category, self._loop_depth_val(loop_depth))
for i in range(len(self.rules))
]
return self._defuzzify(*vals)
def classify_changes(self):
if len(self.changes) == 0:
raise ValueError("No changes have been added")
return [self._classify_single_change(*vals) for vals in self.changes]
def classify_optimization_quality(self):
if len(self.changes) == 0:
raise ValueError("No changes have been added")
vals = self.classify_changes()
return max(vals)
def _bell_curve(self, mean: float, std: float, x: float):
return (1 / (std * math.sqrt(2 * math.pi))) * math.exp(
-((x - mean) ** 2) / (2 * std ** 2)
)
def _norm_bell_curve(self, mean: float, std: float, x: float):
return self._bell_curve(mean, std, x) / self._bell_curve(mean, std, mean)
```
#### File: NumLab/tests/test_parser.py
```python
import logging
import sys
from pathlib import Path
from typing import List
import pytest
from numlab.compiler import (AST, Grammar, LR1Parser, ParserManager, Symbol,
Tokenizer)
from numlab.exceptions import ParsingError
# Math ast
class Expr(AST):
def __init__(self, term: AST, expr: AST = None):
self.expr = expr
self.term = term
def eval(self):
if self.expr is None:
return self.term.eval()
else:
return self.expr.eval() + self.term.eval()
class Term(AST):
def __init__(self, factor: AST, term: AST = None):
self.term = term
self.factor = factor
def eval(self):
if self.term is None:
return self.factor.eval()
else:
return self.term.eval() * self.factor.eval()
class Factor(AST):
def __init__(self, value: int = None, expr: AST = None):
self.value = value
self.expr = expr
def eval(self):
if self.expr is None:
return self.value
else:
return self.expr.eval()
builders = {
"F -> i": lambda x: Factor(value=int(x.value)),
"F -> ( E )": lambda p1, x, p2: Factor(expr=x),
"T -> F": lambda x: Term(factor=x),
"T -> T * F": lambda x, t, y: Term(term=x, factor=y),
"E -> T": lambda x: Expr(term=x),
"E -> E + T": lambda x, p, y: Expr(expr=x, term=y),
}
@pytest.fixture
def grammar():
gm = Grammar.open("./tests/grammars/math_expr_lr.gm")
gm.assign_builders(builders)
return gm
@pytest.fixture
def tokenizer():
tokenizer = Tokenizer()
tokenizer.add_pattern("NEWLINE", r"( |\n)*\n\n*( |\n)*", lambda l: None)
tokenizer.add_pattern("SPACE", r"( |\t)( \t)*", lambda t: None)
tokenizer.add_pattern("i", r"\d\d*")
tokenizer.add_pattern("+", r"+")
tokenizer.add_pattern("*", r"\*")
tokenizer.add_pattern("(", r"\(")
tokenizer.add_pattern(")", r"\)")
return tokenizer
@pytest.fixture
def parser(grammar, tokenizer):
return ParserManager(grammar, tokenizer)
# Test parsing
def test_parse(parser: ParserManager):
ast = parser.parse("(1+2)*3")
assert ast.eval() == 9
ast = parser.parse("(1+2)*(3+4)")
assert ast.eval() == 21
with pytest.raises(ParsingError):
parser.parse("1+2+")
with pytest.raises(ParsingError):
parser.parse("1+2)")
with pytest.raises(ParsingError):
parser.parse("(")
with pytest.raises(ParsingError):
parser.parse("")
def test_parse_file(parser: ParserManager):
ast = parser.parse_file("./tests/grammars/math_file")
assert ast.eval() == 54
def test_save_and_load_lrtable(grammar, tokenizer):
table_file = Path("./tests/grammars/math_expr_lr_table")
if table_file.exists():
table_file.unlink()
parser = LR1Parser(grammar, str(table_file))
assert table_file.exists()
assert parser.lr1_table._first is not None
parser = LR1Parser(grammar, str(table_file))
assert parser.lr1_table._first is None
parser_man = ParserManager(grammar, tokenizer, parser)
assert parser_man.parse("1 + 2").eval() == 3
table_file.unlink()
```
#### File: NumLab/tests/test_tokenizer.py
```python
import logging
import re
import pytest
from numlab.compiler import Tokenizer
from numlab.exceptions import TokenizationError
@pytest.fixture
def tokenizer():
return Tokenizer()
def test_add_pattern(tokenizer: Tokenizer):
ttype = "TOKEN_TYPE"
patt = r"aa*"
tokenizer.add_pattern(ttype, patt)
assert ttype in tokenizer.token_patterns
assert tokenizer.token_patterns[ttype].re_expr == patt
def test_add_patterns(tokenizer: Tokenizer):
patterns = {
"AB": r"(a|b)(a|b)*",
"ABC": r"(a|b|c)(a|b|c)*",
"ABCD": r"(a|b|c|d)(a|b|c|d)*",
"SPACE": r"( |\t)",
}
tokenizer.add_patterns(**patterns)
for token_type, patt in patterns.items():
assert token_type in tokenizer.token_patterns
assert tokenizer.token_patterns[token_type].re_expr == patt
def test_add_existent_token_type(tokenizer: Tokenizer):
ttype = "AB"
patt = r"(a|b)(a|b)*"
with pytest.raises(TokenizationError):
tokenizer.add_pattern(ttype, patt)
tokenizer.add_pattern(ttype, patt)
def test_tokenizer(tokenizer: Tokenizer):
# logging.basicConfig(level=logging.DEBUG)
patterns = {
"AB": r"(a|b)(a|b)*",
"ABC": r"(a|b|c)(a|b|c)*",
"ABCD": r"(a|b|c|d)(a|b|c|d)*",
"SPACE": r"( |\t)",
}
for token_type, patt in patterns.items():
tokenizer.add_pattern(token_type, patt)
text = "ab cdaba"
tokens = tokenizer.tokenize(text)
types = ["AB", "SPACE", "ABC", "ABCD"]
assert len(tokens) == len(types)
for token, ttype in zip(tokens, types):
assert token.token_type == ttype
# Test wrong text
text = "123"
with pytest.raises(TokenizationError):
tokenizer.tokenize(text)
def test_wrong_pattern_order(tokenizer: Tokenizer):
ttype_1 = "ABC"
patt_1 = r"(a|b|c)(a|b|c)*"
ttype_2 = "AB"
patt_2 = r"(a|b)(a|b)*"
tokenizer.add_pattern(ttype_1, patt_1)
tokenizer.add_pattern(ttype_2, patt_2)
text = "ababbbb"
tokens = tokenizer.tokenize(text)
assert tokens[0].token_type != ttype_2
``` |
{
"source": "jmorganc/Asteroid-Hunting-Alert",
"score": 2
} |
#### File: Asteroid-Hunting-Alert/app/hunter.py
```python
import ephem
"""
Get an observer location
Find all asteroids in the next 48 hours visible from that location
Will also need to consider weather and light pollution
"""
class Hunter(ephem.Observer):
def __init__(self):
ephem.__init__(self)
def something(self):
pass
```
#### File: Asteroid-Hunting-Alert/app/hunt.py
```python
import bottle
import ephem
import pprint
"""
Static routes
"""
@bottle.route('/js/#')
def static_js(filename):
return bottle.static_file(filename, root='./static/js')
@bottle.route('/css/#')
def static_css(filename):
return bottle.static_file(filename, root='./static/css')
@bottle.route('/img/#')
def static_img(filename):
return bottle.static_file(filename, root='./static/img')
@bottle.route('/fonts/#')
def static_fonts(filename):
return bottle.static_file(filename, root='./static/fonts')
"""
Index
"""
@bottle.route('/')
def index():
# Default find for current location. For now, make this Richardson
lon = -96.743857
lat = 32.950962
location = (lon, lat)
observer = ephem.Observer()
observer.lon = lon
observer.lat = lat
moon = ephem.Moon()
moon.compute(observer)
'''
yh = ephem.readdb("C/2002 Y1 (Juels-Holvorcem),e,103.7816," +
"166.2194,128.8232,242.5695,0.0002609,0.99705756,0.0000," +
"04/13.2508/2003,2000,g 6.5,4.0")
yh.compute('2003/4/11')
print(yh.name) >>> C/2002 Y1 (Juels-Holvorcem)
print("%s %s" % (yh.ra, yh.dec)) >>> 0:22:44.58 26:49:48.1
print("%s %s" % (ephem.constellation(yh), yh.mag)) >>> ('And', 'Andromeda') 5.96
'''
test_message = 'Just beginning work...'
return bottle.template(
'templates/index',
test_message=test_message,
location=location
)
"""
Run
"""
bottle.debug(True)
bottle.run(reloader=True)
``` |
{
"source": "JMorganUSU/PyDSS",
"score": 3
} |
#### File: src/app/HDF5.py
```python
import os
# Third party libraries
import pandas as pd
import h5py
class hdf5Writer:
""" Class that handles writing simulation results to arrow
files.
"""
def __init__(self, log_dir, columnLength):
""" Constructor """
self.log_dir = log_dir
self.store = h5py.File(os.path.join(log_dir, 'groups_dict.hdf5'), 'w')
self.store_groups = {}
self.store_datasets = {}
self.row = {}
self.columnLength = columnLength
self.chunkRows = 24
self.step = 0
self.dfs = {}
# Create arrow writer for each object type
def write(self, fed_name, currenttime, powerflow_output, index):
"""
Writes the status of BES assets at a particular timestep to an
arrow file.
:param fed_name: name of BES federate
:param log_fields: list of objects to log
:param currenttime: simulation timestep
:param powerflow_output: Powerflow solver timestep output as a dict
"""
# Iterate through each object type
for obj_type in powerflow_output:
Data = pd.DataFrame(powerflow_output[obj_type], index=[self.step])
if obj_type not in self.row:
self.row[obj_type] = 0
self.store_groups[obj_type] = self.store.create_group(obj_type)
self.store_datasets[obj_type] = {}
for colName in powerflow_output[obj_type].keys():
self.store_datasets[obj_type][colName] = self.store_groups[obj_type].create_dataset(
colName,
shape=(self.columnLength, ),
maxshape=(None, ),
chunks=True,
compression="gzip",
compression_opts=4
)
if obj_type not in self.dfs:
self.dfs[obj_type] = Data
else:
if self.dfs[obj_type] is None:
self.dfs[obj_type] = Data
else:
self.dfs[obj_type] = self.dfs[obj_type].append(Data, ignore_index=True)
if self.step % self.chunkRows == self.chunkRows - 1:
si = int(self.step / self.chunkRows) * self.chunkRows
ei = si + self.chunkRows
for colName in powerflow_output[obj_type].keys():
self.store_datasets[obj_type][colName][si:ei] = self.dfs[obj_type][colName]
self.dfs[obj_type] = None
# Add object status data to a DataFrame
self.step += 1
def __del__(self):
self.store.flush()
self.store.close()
```
#### File: PyDSS/cli/reports.py
```python
import ast
import logging
import os
import sys
import json
import click
from PyDSS.pydss_project import PyDssProject
from PyDSS.loggers import setup_logging
from PyDSS.utils.utils import get_cli_string, make_human_readable_size
from PyDSS.common import SIMULATION_SETTINGS_FILENAME
from terminaltables import SingleTable
from os.path import normpath, basename
@click.argument(
"project-path",
)
@click.option(
"-l", "--list-reports",
help="List all reports for a given project path",
is_flag=True,
default=False,
show_default=True,
)
@click.option(
"-i", "--index",
help="View report by index (use -l flag to see list of available reports)",
default=0,
show_default=True,
)
@click.option(
"-s", "--scenario",
required=False,
help="PyDSS scenario name.",
)
@click.option(
"-r", "--report",
required=False,
help="PyDSS report name.",
)
@click.command()
def reports(project_path, list_reports=False, scenario=None, report=None, index=0):
"""Explore and print PyDSS reports."""
assert not (list_reports and index), "Both 'list' and 'index' options cannot be set to true at the same time"
assert os.path.exists(project_path), "The provided project path {} does not exist".format(project_path)
logsPath = os.path.join(project_path, "Logs")
assert os.path.exists(logsPath), "No Logs folder in the provided project path.".format(project_path)
print(logsPath)
reportList = getAvailableReports(logsPath)
project = basename(normpath(project_path))
if list_reports:
Table = SingleTable(reportList, title="Available PyDSS reports")
print("")
print(Table.table)
elif index:
idx, projectName, ScenarioName, ReportName = reportList[index]
printReport(logsPath, projectName, ScenarioName, ReportName)
elif project:
for dx, projectName, ScenarioName, ReportName in reportList[1:]:
if projectName == project:
if scenario is None or scenario == ScenarioName:
if report is None or report == ReportName:
printReport(logsPath, projectName, ScenarioName, ReportName)
def printReport(logsPath, project, scenario, report):
fileName = "{}__{}__reports.log".format(project, scenario)
filePath = os.path.join(logsPath, fileName)
assert os.path.exists(filePath), "Report {} for project: {} / scenario: {} does not exist".format(
report, project, scenario
)
tableData = []
Keys = {}
with open(os.path.join(logsPath, fileName), "r") as f:
for l in f:
data = json.loads(l.strip())
if "Report" not in data:
print("Skipping {}. Not a valid PyDSS report.".format(fileName))
return None
elif data["Report"] == report:
if report not in Keys:
Keys[report] = list(data.keys())
Keys[report] = [x for x in Keys[report] if x != "Report"]
values = []
for k in Keys[report]:
values.append(data[k])
tableData.append(values)
tableData.insert(0, Keys[report])
Table = SingleTable(tableData, title="{} report (Project: {}, Scenario: {})".format(
report, project, scenario
))
print("")
print(Table.table)
return
def getAvailableReports(logsPath):
logFiles = list(filter(lambda x: '.log' in x, os.listdir(logsPath)))
reportFiles = [x for x in logFiles if "__reports" in x]
headings = ["#", "Project", "Scenario", "Report"]
reportList = [headings]
reportNumber = 0
for report in reportFiles:
project, scenario, _ = report.split("__")
print(reportNumber, project, scenario, report)
reportTypes = getReportTypes(logsPath, report)
if reportTypes is not None:
for reportTypes in reportTypes:
reportNumber += 1
reportList.append([reportNumber, project, scenario, reportTypes])
return reportList
def getReportTypes(logsPath, reportFile):
fileName = os.path.join(logsPath, reportFile)
print(fileName)
f = open(fileName, "r")
lines = f.readlines()
reportTypes = []
for l in lines:
data = json.loads(l.strip())
if "Report" not in data:
print("Skipping {}. Not a valid PyDSS report.".format(fileName))
return None
else:
if data["Report"] not in reportTypes:
reportTypes.append(data["Report"])
return reportTypes
```
#### File: PyDSS/cli/run_server.py
```python
from PyDSS.api.server import pydss_server
from aiohttp import web
import logging
import click
logger = logging.getLogger(__name__)
@click.option(
"-p", "--port",
default=9090,
show_default=True,
help="Socket port for the server",
)
@click.command()
def serve(ip="127.0.0.1",port=9090):
"""Run a PyDSS RESTful API server."""
FORMAT = '%(asctime)s - %(levelname)s - %(message)s'
logging.basicConfig(level=logging.INFO, format=FORMAT)
pydss = pydss_server(ip, port)
web.run_app(pydss.app, port=port)
```
#### File: ProfileManager/hooks/h5.py
```python
from PyDSS.ProfileManager.base_definitions import BaseProfileManager, BaseProfile
from PyDSS.ProfileManager.common import PROFILE_TYPES
from PyDSS.exceptions import InvalidParameter
from PyDSS.common import DATE_FORMAT
from datetime import datetime
import pandas as pd
import numpy as np
import datetime
import h5py
import copy
import os
class ProfileManager(BaseProfileManager):
def __init__(self, sim_instance, solver, options, logger, **kwargs):
super(ProfileManager, self).__init__(sim_instance, solver, options, logger, **kwargs)
self.Objects = kwargs["objects"]
if os.path.exists(self.basepath):
self.logger.info("Loading existing h5 store")
self.store = h5py.File(self.basepath, "r+")
else:
self.logger.info("Creating new h5 store")
self.store = h5py.File(self.basepath, "w")
for profileGroup in PROFILE_TYPES.names():
self.store.create_group(profileGroup)
self.setup_profiles()
return
def setup_profiles(self):
self.Profiles = {}
for group, profileMap in self.mapping.items():
if group in self.store:
grp = self.store[group]
for profileName, mappingDict in profileMap.items():
if profileName in grp:
objects = {x['object']: self.Objects[x['object']] for x in mappingDict}
self.Profiles[f"{group}/{profileName}"] = Profile(
self.sim_instance,
grp[profileName],
objects,
self.solver,
mappingDict,
self.logger,
**self.kwargs
)
else:
self.logger.warning("Group {} \ data set {} not found in the h5 store".format(
group, profileName
))
else:
self.logger.warning("Group {} not found in the h5 store".format(group))
return
def create_dataset(self, dname, pType, data ,startTime, resolution, units, info):
grp = self.store[pType]
if dname not in grp:
dset = grp.create_dataset(
dname,
data=data,
shape=(len(data),),
maxshape=(None,),
chunks=True,
compression="gzip",
compression_opts=4,
shuffle=True
)
self.createMetadata(
dset, startTime, resolution, data, units, info
)
else:
self.logger.error('Dataset "{}" already exists in group "{}".'.format(dname, pType))
raise Exception('Dataset "{}" already exists in group "{}".'.format(dname, pType))
def add_from_arrays(self, data, name, pType, startTime, resolution, units="", info=""):
r, c = data.shape
if r > c:
for i in range(c):
d = data[:, i]
dname = name if i==0 else "{}_{}".format(name, i)
self.create_dataset(dname=dname, pType=pType, data=d, startTime=startTime, resolution=resolution,
units=units, info=info)
else:
for i in range(r):
d = data[i, :]
dname = name if i==0 else "{}_{}".format(name, i)
self.create_dataset(dname=dname, pType=pType, data=d, startTime=startTime, resolution=resolution,
units=units, info=info)
return
def add_profiles_from_csv(self, csv_file, name, pType, startTime, resolution_sec=900, units="",
info=""):
data = pd.read_csv(csv_file).values
self.add_profiles(data, name, pType, startTime, resolution_sec=resolution_sec, units=units, info=info)
def add_profiles(self, data, name, pType, startTime, resolution_sec=900, units="", info=""):
if type(startTime) is not datetime.datetime:
raise InvalidParameter("startTime should be a python datetime object")
if pType not in PROFILE_TYPES.names():
raise InvalidParameter("Valid values for pType are {}".format(PROFILE_TYPES.names()))
if data:
self.add_from_arrays(data, name, pType, startTime, resolution_sec, units=units, info=info)
self.store.flush()
return
def createMetadata(self, dSet, startTime, resolution, data, units, info):
metadata = {
"sTime": str(startTime),
"eTime": str(startTime + datetime.timedelta(seconds=resolution*len(data))),
"resTime": resolution,
"npts": len(data),
"min": min(data),
"max": max(data),
"mean": np.mean(data),
"units": units,
"info": info,
}
for key, value in metadata.items():
if isinstance(value, str):
value = np.string_(value)
dSet.attrs[key] = value
return
def remove_profile(self, profile_type, profile_name):
return
def update(self):
results = {}
for profileaName, profileObj in self.Profiles.items():
result = profileObj.update()
results[profileaName] = result
return results
class Profile(BaseProfile):
DEFAULT_SETTINGS = {
"multiplier": 1,
"normalize": False,
"interpolate": False
}
def __init__(self, sim_instance, dataset, devices, solver, mapping_dict, logger, **kwargs):
super(Profile, self).__init__(sim_instance, dataset, devices, solver, mapping_dict, logger, **kwargs)
self.valueSettings = {x['object']: {**self.DEFAULT_SETTINGS, **x} for x in mapping_dict}
self.bufferSize = kwargs["bufferSize"]
self.buffer = np.zeros(self.bufferSize)
self.profile = dataset
self.neglectYear = kwargs["neglectYear"]
self.Objects = devices
self.attrs = self.profile.attrs
self.sTime = datetime.datetime.strptime(self.attrs["sTime"].decode(), DATE_FORMAT)
self.eTime = datetime.datetime.strptime(self.attrs["eTime"].decode(), '%Y-%m-%d %H:%M:%S.%f')
self.simRes = solver.GetStepSizeSec()
self.Time = copy.deepcopy(solver.GetDateTime())
return
def update_profile_settings(self):
return
def update(self, updateObjectProperties=True):
self.Time = copy.deepcopy(self.solver.GetDateTime())
if self.Time < self.sTime or self.Time > self.eTime:
value = 0
value1 = 0
else:
dT = (self.Time - self.sTime).total_seconds()
n = int(dT / self.attrs["resTime"])
value = self.profile[n]
dT2 = (self.Time - (self.sTime + datetime.timedelta(seconds=int(n * self.attrs["resTime"])))).total_seconds()
value1 = self.profile[n] + (self.profile[n+1] - self.profile[n]) * dT2 / self.attrs["resTime"]
if updateObjectProperties:
for objName, obj in self.Objects.items():
if self.valueSettings[objName]['interpolate']:
value = value1
mult = self.valueSettings[objName]['multiplier']
if self.valueSettings[objName]['normalize']:
valueF = value / self.attrs["max"] * mult
else:
valueF = value * mult
obj.SetParameter(self.attrs["units"].decode(), valueF)
return value
```
#### File: PyDSS/PyDSS/pydss_results.py
```python
from collections import defaultdict
from datetime import datetime
import json
import logging
import os
import re
from pathlib import Path
import h5py
import numpy as np
import pandas as pd
from PyDSS.common import DatasetPropertyType
from PyDSS.dataset_buffer import DatasetBuffer
from PyDSS.element_options import ElementOptions
from PyDSS.exceptions import InvalidParameter
from PyDSS.pydss_project import PyDssProject, RUN_SIMULATION_FILENAME
from PyDSS.reports.reports import Reports, REPORTS_DIR
from PyDSS.utils.dataframe_utils import read_dataframe, write_dataframe
from PyDSS.utils.utils import dump_data, load_data, make_json_serializable, \
make_timestamps
from PyDSS.value_storage import ValueStorageBase, get_dataset_property_type, \
get_time_step_path
logger = logging.getLogger(__name__)
class PyDssResults:
"""Interface to perform analysis on PyDSS output data."""
def __init__(
self, project_path=None, project=None, in_memory=False,
frequency=False, mode=False
):
"""Constructs PyDssResults object.
Parameters
----------
project_path : str | None
Load project from files in path
project : PyDssProject | None
Existing project object
in_memory : bool
If true, load all exported data into memory.
frequency : bool
If true, add frequency column to all dataframes.
mode : bool
If true, add mode column to all dataframes.
"""
options = ElementOptions()
if project_path is not None:
# TODO: handle old version?
self._project = PyDssProject.load_project(
project_path,
simulation_file=RUN_SIMULATION_FILENAME,
)
elif project is None:
raise InvalidParameter("project_path or project must be set")
else:
self._project = project
self._fs_intf = self._project.fs_interface
self._scenarios = []
filename = self._project.get_hdf_store_filename()
driver = "core" if in_memory else None
self._hdf_store = h5py.File(filename, "r", driver=driver)
if self._project.simulation_config.exports.export_results:
for name in self._project.list_scenario_names():
metadata = self._project.read_scenario_export_metadata(name)
scenario_result = PyDssScenarioResults(
name,
self.project_path,
self._hdf_store,
self._fs_intf,
metadata,
options,
frequency=frequency,
mode=mode,
)
self._scenarios.append(scenario_result)
def generate_reports(self):
"""Generate all reports specified in the configuration.
Returns
-------
list
list of report filenames
"""
return Reports.generate_reports(self)
def read_report(self, report_name):
"""Return the report data.
Parameters
----------
report_name : str
Returns
-------
str
"""
all_reports = Reports.get_all_reports()
if report_name not in all_reports:
raise InvalidParameter(f"invalid report name {report_name}")
report_cls = all_reports[report_name]
# This bypasses self._fs_intf because reports are always extracted.
reports_dir = os.path.join(self._project.project_path, REPORTS_DIR)
for filename in os.listdir(reports_dir):
name, ext = os.path.splitext(filename)
if name == os.path.splitext(report_cls.FILENAME)[0]:
path = os.path.join(reports_dir, filename)
if ext in (".json", ".toml"):
return load_data(path)
if ext in (".csv", ".h5"):
return read_dataframe(path)
raise InvalidParameter(f"did not find report {report_name} in {reports_dir}")
@property
def project(self):
"""Return the PyDssProject instance.
Returns
-------
PyDssProject
"""
return self._project
@property
def scenarios(self):
"""Return the PyDssScenarioResults instances for the project.
Returns
-------
list
list of PyDssScenarioResults
"""
return self._scenarios
def get_scenario(self, name):
"""Return the PyDssScenarioResults object for scenario with name.
Parameters
----------
name : str
Scenario name
Results
-------
PyDssScenarioResults
Raises
------
InvalidParameter
Raised if the scenario does not exist.
"""
for scenario in self._scenarios:
if name == scenario.name:
return scenario
raise InvalidParameter(f"scenario {name} does not exist")
@property
def hdf_store(self):
"""Return a handle to the HDF data store.
Returns
-------
h5py.File
"""
return self._hdf_store
@property
def project_path(self):
"""Return the path to the PyDSS project.
Returns
-------
str
"""
return self._project.project_path
def read_file(self, path):
"""Read a file from the PyDSS project.
Parameters
----------
path : str
Path to the file relative from the project directory.
Returns
-------
str
Contents of the file
"""
return self._fs_intf.read_file(path)
@property
def simulation_config(self):
"""Return the simulation configuration
Returns
-------
dict
"""
return self._project.simulation_config
class PyDssScenarioResults:
"""Contains results for one scenario."""
def __init__(
self, name, project_path, store, fs_intf, metadata, options,
frequency=False, mode=False
):
self._name = name
self._project_path = project_path
self._hdf_store = store
self._metadata = metadata or {}
self._options = options
self._fs_intf = fs_intf
self._elems_by_class = defaultdict(set)
self._elem_data_by_prop = defaultdict(dict)
self._elem_values_by_prop = defaultdict(dict)
self._elem_indices_by_prop = defaultdict(dict)
self._props_by_class = defaultdict(list)
self._elem_props = defaultdict(list)
self._column_ranges_per_elem = defaultdict(dict)
self._summed_elem_props = defaultdict(dict)
self._summed_elem_timeseries_props = defaultdict(list)
self._indices_df = None
self._add_frequency = frequency
self._add_mode = mode
self._data_format_version = self._hdf_store.attrs["version"]
if name not in self._hdf_store["Exports"]:
self._group = None
return
self._group = self._hdf_store[f"Exports/{name}"]
self._elem_classes = [
x for x in self._group if isinstance(self._group[x], h5py.Group)
]
self._parse_datasets()
def _parse_datasets(self):
for elem_class in self._elem_classes:
class_group = self._group[elem_class]
if "ElementProperties" in class_group:
prop_group = class_group["ElementProperties"]
for prop, dataset in prop_group.items():
dataset_property_type = get_dataset_property_type(dataset)
if dataset_property_type == DatasetPropertyType.TIME_STEP:
continue
if dataset_property_type == DatasetPropertyType.VALUE:
self._elem_values_by_prop[elem_class][prop] = []
prop_names = self._elem_values_by_prop
elif dataset_property_type in (
DatasetPropertyType.PER_TIME_POINT,
DatasetPropertyType.FILTERED,
):
self._elem_data_by_prop[elem_class][prop] = []
prop_names = self._elem_data_by_prop
else:
continue
self._props_by_class[elem_class].append(prop)
self._elem_indices_by_prop[elem_class][prop] = {}
names = DatasetBuffer.get_names(dataset)
self._column_ranges_per_elem[elem_class][prop] = \
DatasetBuffer.get_column_ranges(dataset)
for i, name in enumerate(names):
self._elems_by_class[elem_class].add(name)
prop_names[elem_class][prop].append(name)
self._elem_indices_by_prop[elem_class][prop][name] = i
self._elem_props[name].append(prop)
else:
self._elems_by_class[elem_class] = set()
summed_elem_props = self._group[elem_class].get("SummedElementProperties", [])
for prop in summed_elem_props:
dataset = self._group[elem_class]["SummedElementProperties"][prop]
dataset_property_type = get_dataset_property_type(dataset)
if dataset_property_type == DatasetPropertyType.VALUE:
df = DatasetBuffer.to_dataframe(dataset)
assert len(df) == 1
self._summed_elem_props[elem_class][prop] = {
x: df[x].values[0] for x in df.columns
}
elif dataset_property_type == DatasetPropertyType.PER_TIME_POINT:
self._summed_elem_timeseries_props[elem_class].append(prop)
@staticmethod
def get_name_from_column(column):
"""Return the element name from the dataframe column. The dataframe should have been
returned from this class.
Parameters
----------
column : str
Returns
-------
str
"""
fields = column.split(ValueStorageBase.DELIMITER)
assert len(fields) > 1
return fields[0]
@property
def name(self):
"""Return the name of the scenario.
Returns
-------
str
"""
return self._name
def export_data(self, path=None, fmt="csv", compress=False):
"""Export data to path.
Parameters
----------
path : str
Output directory; defaults to scenario exports path
fmt : str
Filer format type (csv, h5)
compress : bool
Compress data
"""
if path is None:
path = os.path.join(self._project_path, "Exports", self._name)
os.makedirs(path, exist_ok=True)
self._export_element_timeseries(path, fmt, compress)
self._export_element_values(path, fmt, compress)
self._export_summed_element_timeseries(path, fmt, compress)
self._export_summed_element_values(path, fmt, compress)
def _export_element_timeseries(self, path, fmt, compress):
for elem_class in self.list_element_classes():
for prop in self.list_element_properties(elem_class):
dataset = self._group[f"{elem_class}/ElementProperties/{prop}"]
prop_type = get_dataset_property_type(dataset)
if prop_type == DatasetPropertyType.FILTERED:
self._export_filtered_dataframes(elem_class, prop, path, fmt, compress)
else:
df = self.get_full_dataframe(elem_class, prop)
base = "__".join([elem_class, prop])
filename = os.path.join(path, base + "." + fmt.replace(".", ""))
write_dataframe(df, filename, compress=compress)
def _export_element_values(self, path, fmt, compress):
elem_prop_nums = defaultdict(dict)
for elem_class in self._elem_values_by_prop:
for prop in self._elem_values_by_prop[elem_class]:
dataset = self._group[f"{elem_class}/ElementProperties/{prop}"]
for name in self._elem_values_by_prop[elem_class][prop]:
col_range = self._get_element_column_range(elem_class, prop, name)
start = col_range[0]
length = col_range[1]
if length == 1:
val = dataset[:][0][start]
else:
val = dataset[:][0][start: start + length]
if prop not in elem_prop_nums[elem_class]:
elem_prop_nums[elem_class][prop] = {}
elem_prop_nums[elem_class][prop][name] = val
if elem_prop_nums:
filename = os.path.join(path, "element_property_values.json")
dump_data(elem_prop_nums, filename, indent=2, default=make_json_serializable)
logger.info("Exported data to %s", path)
def _export_filtered_dataframes(self, elem_class, prop, path, fmt, compress):
for name, df in self.get_filtered_dataframes(elem_class, prop).items():
if df.empty:
logger.debug("Skip empty dataframe %s %s %s", elem_class, prop, name)
continue
base = "__".join([elem_class, prop, name])
filename = os.path.join(path, base + "." + fmt.replace(".", ""))
write_dataframe(df, filename, compress=compress)
def _export_summed_element_timeseries(self, path, fmt, compress):
for elem_class in self._summed_elem_timeseries_props:
for prop in self._summed_elem_timeseries_props[elem_class]:
fields = prop.split(ValueStorageBase.DELIMITER)
if len(fields) == 1:
base = ValueStorageBase.DELIMITER.join([elem_class, prop])
else:
assert len(fields) == 2, fields
# This will be <elem_class>__<prop>__<group>
base = ValueStorageBase.DELIMITER.join([elem_class, prop])
filename = os.path.join(path, base + "." + fmt.replace(".", ""))
dataset = self._group[elem_class]["SummedElementProperties"][prop]
prop_type = get_dataset_property_type(dataset)
if prop_type == DatasetPropertyType.PER_TIME_POINT:
df = DatasetBuffer.to_dataframe(dataset)
self._finalize_dataframe(df, dataset)
write_dataframe(df, filename, compress=compress)
def _export_summed_element_values(self, path, fmt, compress):
filename = os.path.join(path, "summed_element_property_values.json")
dump_data(self._summed_elem_props, filename, default=make_json_serializable)
def get_dataframe(self, element_class, prop, element_name, real_only=False, abs_val=False, **kwargs):
"""Return the dataframe for an element.
Parameters
----------
element_class : str
prop : str
element_name : str
real_only : bool
If dtype of any column is complex, drop the imaginary component.
abs_val : bool
If dtype of any column is complex, compute its absolute value.
kwargs
Filter on options; values can be strings or regular expressions.
Returns
-------
pd.DataFrame
Raises
------
InvalidParameter
Raised if the element is not stored.
"""
if element_name not in self._elem_props:
raise InvalidParameter(f"element {element_name} is not stored")
dataset = self._group[f"{element_class}/ElementProperties/{prop}"]
prop_type = get_dataset_property_type(dataset)
if prop_type == DatasetPropertyType.PER_TIME_POINT:
return self._get_elem_prop_dataframe(
element_class, prop, element_name, dataset, real_only=real_only,
abs_val=abs_val, **kwargs
)
elif prop_type == DatasetPropertyType.FILTERED:
return self._get_filtered_dataframe(
element_class, prop, element_name, dataset, real_only=real_only,
abs_val=abs_val, **kwargs
)
assert False, str(prop_type)
def get_filtered_dataframes(self, element_class, prop, real_only=False, abs_val=False):
"""Return the dataframes for all elements.
Calling this is much more efficient than calling get_dataframe for each
element.
Parameters
----------
element_class : str
prop : str
element_name : str
real_only : bool
If dtype of any column is complex, drop the imaginary component.
abs_val : bool
If dtype of any column is complex, compute its absolute value.
Returns
-------
dict
key = str (name), val = pd.DataFrame
The dict will be empty if no data was stored.
"""
if prop not in self.list_element_properties(element_class):
logger.debug("%s/%s is not stored", element_class, prop)
return {}
dataset = self._group[f"{element_class}/ElementProperties/{prop}"]
columns = DatasetBuffer.get_columns(dataset)
names = DatasetBuffer.get_names(dataset)
length = dataset.attrs["length"]
indices_df = self._get_indices_df()
data_vals = dataset[:length]
elem_data = defaultdict(list)
elem_timestamps = defaultdict(list)
# The time_step_dataset has these columns:
# 1. time step index
# 2. element index
# Each row describes the source data in the dataset row.
path = dataset.attrs["time_step_path"]
assert length == self._hdf_store[path].attrs["length"]
time_step_data = self._hdf_store[path][:length]
for i in range(length):
ts_index = time_step_data[:, 0][i]
elem_index = time_step_data[:, 1][i]
# TODO DT: more than one column?
val = data_vals[i, 0]
if real_only:
val = val.real
elif abs_val:
val = abs(val)
elem_data[elem_index].append(val)
elem_timestamps[elem_index].append(indices_df.iloc[ts_index, 0])
dfs = {}
for elem_index, vals in elem_data.items():
elem_name = names[elem_index]
cols = self._fix_columns(elem_name, columns)
dfs[elem_name] = pd.DataFrame(
vals,
columns=cols,
index=elem_timestamps[elem_index],
)
return dfs
def get_full_dataframe(self, element_class, prop, real_only=False, abs_val=False, **kwargs):
"""Return a dataframe containing all data. The dataframe is copied.
Parameters
----------
element_class : str
prop : str
real_only : bool
If dtype of any column is complex, drop the imaginary component.
abs_val : bool
If dtype of any column is complex, compute its absolute value.
kwargs
Filter on options; values can be strings or regular expressions.
Returns
-------
pd.DataFrame
"""
if prop not in self.list_element_properties(element_class):
raise InvalidParameter(f"property {prop} is not stored")
dataset = self._group[f"{element_class}/ElementProperties/{prop}"]
df = DatasetBuffer.to_dataframe(dataset)
if kwargs:
options = self._check_options(element_class, prop, **kwargs)
names = self._elems_by_class.get(element_class, set())
columns = ValueStorageBase.get_columns(df, names, options, **kwargs)
columns = list(columns)
columns.sort()
df = df[columns]
self._finalize_dataframe(df, dataset, real_only=real_only, abs_val=abs_val)
return df
def get_summed_element_total(self, element_class, prop, group=None):
"""Return the total value for a summed element property.
Parameters
----------
element_class : str
prop : str
group : str | None
Specify a group name if sum_groups was assigned.
Returns
-------
dict
Raises
------
InvalidParameter
Raised if the element class is not stored.
"""
if group is not None:
prop = ValueStorageBase.DELIMITER.join((prop, group))
if element_class not in self._summed_elem_props:
raise InvalidParameter(f"{element_class} is not stored")
if prop not in self._summed_elem_props[element_class]:
raise InvalidParameter(f"{prop} is not stored")
return self._summed_elem_props[element_class][prop]
def get_element_property_value(self, element_class, prop, element_name):
"""Return the number stored for the element property."""
if element_class not in self._elem_values_by_prop:
raise InvalidParameter(f"{element_class} is not stored")
if prop not in self._elem_values_by_prop[element_class]:
raise InvalidParameter(f"{prop} is not stored")
if element_name not in self._elem_values_by_prop[element_class][prop]:
raise InvalidParameter(f"{element_name} is not stored")
dataset = self._group[f"{element_class}/ElementProperties/{prop}"]
col_range = self._get_element_column_range(element_class, prop, element_name)
start = col_range[0]
length = col_range[1]
if length == 1:
return dataset[:][0][start]
return dataset[:][0][start: start + length]
def get_option_values(self, element_class, prop, element_name):
"""Return the option values for the element property.
element_class : str
prop : str
element_name : str
Returns
-------
list
"""
df = self.get_dataframe(element_class, prop, element_name)
return ValueStorageBase.get_option_values(df, element_name)
def get_summed_element_dataframe(self, element_class, prop, real_only=False, abs_val=False, group=None):
"""Return the dataframe for a summed element property.
Parameters
----------
element_class : str
prop : str
group : str | None
Specify a group name if sum_groups was assigned.
real_only : bool
If dtype of any column is complex, drop the imaginary component.
abs_val : bool
If dtype of any column is complex, compute its absolute value.
Returns
-------
pd.DataFrame
Raises
------
InvalidParameter
Raised if the element class is not stored.
"""
if group is not None:
prop = ValueStorageBase.DELIMITER.join((prop, group))
if element_class not in self._summed_elem_timeseries_props:
raise InvalidParameter(f"{element_class} is not stored")
if prop not in self._summed_elem_timeseries_props[element_class]:
raise InvalidParameter(f"{prop} is not stored")
elem_group = self._group[element_class]["SummedElementProperties"]
dataset = elem_group[prop]
df = DatasetBuffer.to_dataframe(dataset)
self._add_indices_to_dataframe(df)
if real_only:
for column in df.columns:
if df[column].dtype == complex:
df[column] = [x.real for x in df[column]]
elif abs_val:
for column in df.columns:
if df[column].dtype == complex:
df[column] = [abs(x) for x in df[column]]
return df
def iterate_dataframes(self, element_class, prop, real_only=False, abs_val=False, **kwargs):
"""Returns a generator over the dataframes by element name.
Parameters
----------
element_class : str
prop : str
real_only : bool
If dtype of any column is complex, drop the imaginary component.
abs_val : bool
If dtype of any column is complex, compute its absolute value.
kwargs : dict
Filter on options; values can be strings or regular expressions.
Returns
-------
tuple
Tuple containing the name or property and a pd.DataFrame
"""
for name in self.list_element_names(element_class):
if prop in self._elem_props[name]:
df = self.get_dataframe(
element_class, prop, name, real_only=real_only, abs_val=abs_val, **kwargs
)
yield name, df
def iterate_element_property_values(self):
"""Return a generator over all element properties stored as values.
Yields
------
tuple
element_class, property, element_name, value
"""
for elem_class in self._elem_values_by_prop:
for prop in self._elem_values_by_prop[elem_class]:
for name in self._elem_values_by_prop[elem_class][prop]:
val = self.get_element_property_value(elem_class, prop, name)
yield elem_class, prop, name, val
def list_element_classes(self):
"""Return the element classes stored in the results.
Returns
-------
list
"""
return self._elem_classes[:]
def list_element_names(self, element_class, prop=None):
"""Return the element names for a property stored in the results.
Parameters
----------
element_class : str
prop : str
Returns
-------
list
"""
# TODO: prop is deprecated
return sorted(list(self._elems_by_class.get(element_class, [])))
def list_element_properties(self, element_class, element_name=None):
"""Return the properties stored in the results for a class.
Parameters
----------
element_class : str
element_name : str | None
If not None, list properties only for that name.
Returns
-------
list
"""
if element_class not in self._props_by_class:
return []
if element_name is None:
return sorted(list(self._props_by_class[element_class]))
return self._elem_props.get(element_name, [])
def list_element_value_names(self, element_class, prop):
if element_class not in self._elem_values_by_prop:
raise InvalidParameter(f"{element_class} is not stored")
if prop not in self._elem_values_by_prop[element_class]:
raise InvalidParameter(f"{element_class} / {prop} is not stored")
return sorted(self._elem_values_by_prop[element_class][prop])
def list_element_property_values(self, element_name):
nums = []
for elem_class in self._elem_prop_nums:
for prop in self._elem_prop_nums[elem_class]:
for name in self._elem_prop_nums[elem_class][prop]:
if name == element_name:
nums.append(prop)
return nums
def list_element_property_options(self, element_class, prop):
"""List the possible options for the element class and property.
Parameters
----------
element_class : str
prop : str
Returns
-------
list
"""
return self._options.list_options(element_class, prop)
def list_element_info_files(self):
"""Return the files describing the OpenDSS element objects.
Returns
-------
list
list of filenames (str)
"""
return self._metadata.get("element_info_files", [])
def list_summed_element_properties(self, element_class):
"""Return the properties stored for a class where the values are a sum
of all elements.
Parameters
----------
element_class : str
Returns
-------
list
Raises
------
InvalidParameter
Raised if the element_class is not stored.
"""
if element_class not in self._summed_elem_props:
raise InvalidParameter(f"class={element_class} is not stored")
return self._summed_elem_props[element_class]
def list_summed_element_time_series_properties(self, element_class):
"""Return the properties stored for a class where the values are a sum
of all elements.
Parameters
----------
element_class : str
Returns
-------
list
Raises
------
InvalidParameter
Raised if the element_class is not stored.
"""
if element_class not in self._summed_elem_timeseries_props:
raise InvalidParameter(f"class={element_class} is not stored")
return self._summed_elem_timeseries_props[element_class]
def read_element_info_file(self, filename):
"""Return the contents of file describing an OpenDSS element object.
Parameters
----------
filename : str
full path to a file (returned by list_element_info_files) or
an element class, like "Transformers"
Returns
-------
pd.DataFrame
"""
if "." not in filename:
actual = None
for _file in self.list_element_info_files():
basename = os.path.splitext(os.path.basename(_file))[0]
if basename.replace("Info", "") == filename:
actual = _file
if actual is None:
raise InvalidParameter(
f"element info file for {filename} is not stored"
)
filename = actual
return self._fs_intf.read_csv(filename)
def read_capacitor_changes(self):
"""Read the capacitor state changes from the OpenDSS event log.
Returns
-------
dict
Maps capacitor names to count of state changes.
"""
text = self.read_file(self._metadata.get("event_log", ""))
return _read_capacitor_changes(text)
def read_event_log(self):
"""Returns the event log for the scenario.
Returns
-------
list
list of dictionaries (one dict for each row in the file)
"""
text = self.read_file(self._metadata.get("event_log", ""))
return _read_event_log(text)
def read_pv_profiles(self):
"""Returns exported PV profiles for all PV systems.
Returns
-------
dict
"""
return self._fs_intf.read_scenario_pv_profiles(self._name)
def _check_options(self, element_class, prop, **kwargs):
"""Checks that kwargs are valid and returns available option names."""
for option in kwargs:
if not self._options.is_option_valid(element_class, prop, option):
raise InvalidParameter(
f"class={element_class} property={prop} option={option} is invalid"
)
return self._options.list_options(element_class, prop)
def read_feeder_head_info(self):
"""Read the feeder head information.
Returns
-------
dict
"""
return json.loads(self.read_file(f"Exports/{self._name}/FeederHeadInfo.json"))
def read_file(self, path):
"""Read a file from the PyDSS project.
Parameters
----------
path : str
Path to the file relative from the project directory.
Returns
-------
str
Contents of the file
"""
return self._fs_intf.read_file(path)
def _add_indices_to_dataframe(self, df):
indices_df = self._get_indices_df()
df["Timestamp"] = indices_df["Timestamp"]
if self._add_frequency:
df["Frequency"] = indices_df["Frequency"]
if self._add_mode:
df["Simulation Mode"] = indices_df["Simulation Mode"]
df.set_index("Timestamp", inplace=True)
def _finalize_dataframe(self, df, dataset, real_only=False, abs_val=False):
if df.empty:
return
dataset_property_type = get_dataset_property_type(dataset)
if dataset_property_type == DatasetPropertyType.FILTERED:
time_step_path = get_time_step_path(dataset)
time_step_dataset = self._hdf_store[time_step_path]
df["TimeStep"] = DatasetBuffer.to_datetime(time_step_dataset)
df.set_index("TimeStep", inplace=True)
else:
self._add_indices_to_dataframe(df)
if real_only:
for column in df.columns:
if df[column].dtype == complex:
df[column] = [x.real for x in df[column]]
elif abs_val:
for column in df.columns:
if df[column].dtype == complex:
df[column] = [abs(x) for x in df[column]]
@staticmethod
def _fix_columns(name, columns):
cols = []
for column in columns:
fields = column.split(ValueStorageBase.DELIMITER)
fields[0] = name
cols.append(ValueStorageBase.DELIMITER.join(fields))
return cols
def _get_elem_prop_dataframe(self, elem_class, prop, name, dataset, real_only=False, abs_val=False, **kwargs):
col_range = self._get_element_column_range(elem_class, prop, name)
df = DatasetBuffer.to_dataframe(dataset, column_range=col_range)
if kwargs:
options = self._check_options(elem_class, prop, **kwargs)
columns = ValueStorageBase.get_columns(df, name, options, **kwargs)
df = df[columns]
self._finalize_dataframe(df, dataset, real_only=real_only, abs_val=abs_val)
return df
def _get_element_column_range(self, elem_class, prop, name):
elem_index = self._elem_indices_by_prop[elem_class][prop][name]
col_range = self._column_ranges_per_elem[elem_class][prop][elem_index]
return col_range
def _get_filtered_dataframe(self, elem_class, prop, name, dataset,
real_only=False, abs_val=False, **kwargs):
indices_df = self._get_indices_df()
elem_index = self._elem_indices_by_prop[elem_class][prop][name]
length = dataset.attrs["length"]
data_vals = dataset[:length]
# The time_step_dataset has these columns:
# 1. time step index
# 2. element index
# Each row describes the source data in the dataset row.
path = dataset.attrs["time_step_path"]
time_step_data = self._hdf_store[path][:length]
assert length == self._hdf_store[path].attrs["length"]
data = []
timestamps = []
for i in range(length):
stored_elem_index = time_step_data[:, 1][i]
if stored_elem_index == elem_index:
ts_index = time_step_data[:, 0][i]
# TODO DT: more than one column?
val = data_vals[i, 0]
# TODO: profile this vs a df operation at end
if real_only:
val = val.real
elif abs_val:
val = abs(val)
data.append(val)
timestamps.append(indices_df.iloc[ts_index, 0])
columns = self._fix_columns(name, DatasetBuffer.get_columns(dataset))
return pd.DataFrame(data, columns=columns, index=timestamps)
def _get_indices_df(self):
if self._indices_df is None:
self._make_indices_df()
return self._indices_df
def _make_indices_df(self):
data = {
"Timestamp": make_timestamps(self._group["Timestamp"][:, 0])
}
if self._add_frequency:
data["Frequency"] = self._group["Frequency"][:, 0]
if self._add_mode:
data["Simulation Mode"] = self._group["Mode"][:, 0]
df = pd.DataFrame(data)
self._indices_df = df
def _read_capacitor_changes(event_log_text):
"""Read the capacitor state changes from an OpenDSS event log.
Parameters
----------
event_log_text : str
Text of event log
Returns
-------
dict
Maps capacitor names to count of state changes.
"""
capacitor_changes = {}
regex = re.compile(r"(Capacitor\.\w+)")
data = _read_event_log(event_log_text)
for row in data:
match = regex.search(row["Element"])
if match:
name = match.group(1)
if name not in capacitor_changes:
capacitor_changes[name] = 0
action = row["Action"].replace("*", "")
if action in ("OPENED", "CLOSED", "STEP UP"):
capacitor_changes[name] += 1
return capacitor_changes
def _read_event_log(event_log_text):
"""Return OpenDSS event log information.
Parameters
----------
event_log_text : str
Text of event log
Returns
-------
list
list of dictionaries (one dict for each row in the file)
"""
data = []
if not event_log_text:
return data
for line in event_log_text.split("\n"):
if line == "":
continue
tokens = [x.strip() for x in line.split(",")]
row = {}
for token in tokens:
name_and_value = [x.strip() for x in token.split("=")]
name = name_and_value[0]
value = name_and_value[1]
row[name] = value
data.append(row)
return data
```
#### File: PyDSS/pyPostprocessor/pyPostprocess.py
```python
from os.path import dirname, basename, isfile
import glob
from PyDSS.pyPostprocessor import PostprocessScripts
modules = glob.glob(PostprocessScripts.__path__[0]+"/*.py")
pythonFiles = [basename(f)[:-3] for f in modules if isfile(f) and not f.endswith('__init__.py')]
POST_PROCESSES = {}
for file in pythonFiles:
exec('from PyDSS.pyPostprocessor.PostprocessScripts import {}'.format(file))
exec('POST_PROCESSES["{}"] = {}.{}'.format(file, file, file))
def Create(project, scenario, ppInfo, dssInstance, dssSolver, dssObjects, dssObjectsByClass, simulationSettings, Logger):
test = None
PostProcessorClass = None
ScriptName = ppInfo.script
assert (ScriptName in pythonFiles), \
f"Definition for '{ScriptName}' post process script not found. \n" \
"Please define the controller in PyDSS/pyPostprocessor/PostprocessScripts"
PostProcessor = POST_PROCESSES[ScriptName](
project,
scenario,
ppInfo,
dssInstance,
dssSolver,
dssObjects,
dssObjectsByClass,
simulationSettings,
Logger,
)
return PostProcessor
```
#### File: PyDSS/utils/pydss_utils.py
```python
import numpy as np
import math
def form_Yprim(values):
dimension = int(math.sqrt(len(values) / 2))
Yprim = np.array([[complex(0, 0)] * dimension] * dimension)
for ii in range(dimension):
for jj in range(dimension):
Yprim[ii][jj] = complex(values[dimension * ii * 2 + 2 * jj], values[dimension * ii * 2 + 2 * jj + 1])
return Yprim
def form_Yprim_2(values):
Ydim = int(math.sqrt(len(values) / 2))
Yreal = np.array(values[0::2]).reshape((Ydim, Ydim))
Yimag = np.array(values[1::2]).reshape((Ydim, Ydim))
Yprim = Yreal + 1j * Yimag
return Yprim
def get_Yprime_Matrix(dssObjects):
Elements = dssObjects["Lines"] + dssObjects["Transformers"]
nElements = len(Elements)
Ybranch_prim = np.array([[complex(0, 0)] * 2 * nElements] * 2 * nElements)
``` |
{
"source": "jmorici/Homework-11",
"score": 3
} |
#### File: jmorici/Homework-11/app.py
```python
from flask import Flask, render_template, redirect
from flask_pymongo import PyMongo
import scrape_mars
app = Flask(__name__)
app.config["MONGO_URI"] = "mongodb://localhost:27017/myDatabase"
mongo = PyMongo(app)
@app.route("/")
def home():
mars = list(mongo.db.collection.find())[-1]
return render_template("index.html", mars=mars)
@app.route("/scrape")
def scrape():
mars = scrape_mars.scrape()
mars_dict = {
"news_title": mars["news_title"],
"news_p": mars["news_p"],
"featured_image_url": mars["featured_image_url"],
"mars_weather": mars["mars_weather"],
"table_html": mars["table_html"],
"hemisphere_image_urls": mars["hemisphere_image_urls"]
}
mongo.db.collection.insert_one(mars_dict)
return redirect("http://localhost:5000/", code=302)
if __name__ == "__main__":
app.run(debug=True)
```
#### File: jmorici/Homework-11/scrape_mars.py
```python
from bs4 import BeautifulSoup
from splinter import Browser
import pandas as pd
def scrape():
executable_path = {'executable_path': '/usr/local/bin/chromedriver'}
browser = Browser('chrome', **executable_path, headless=False)
url = 'https://mars.nasa.gov/news/?page=0&per_page=40&order=publish_date+desc%2Ccreated_at+desc&search=&category=19%2C165%2C184%2C204&blank_scope=Latest'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
latest_news = soup.find("li", class_="slide")
news_title = latest_news.find("h3").text
news_p = latest_news.find(class_="article_teaser_body").text
url = 'https://www.jpl.nasa.gov/spaceimages/?search=&category=Mars'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
soup.find(class_="main_feature").footer.a["data-fancybox-href"]
base_url = "https://www.jpl.nasa.gov"
style = soup.find(class_="main_feature").find(class_="carousel_items").article["style"]
featured_image_url = base_url + style.split("url")[1].strip(";(')")
url = 'https://twitter.com/marswxreport?lang=en'
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
mars_weather = soup.find("li", class_="js-stream-item").find("p", class_="tweet-text").text
url = 'https://space-facts.com/mars/'
table = pd.read_html(url)[0]
table.rename(columns={0:"metric", 1:"value"}, inplace=True)
table_html = table.to_html(index=False)
table_html = table_html.replace('\n', '')
table_html = table_html.replace("<table border=\"1\" class=\"dataframe\">", "").replace("</table>", "").strip()
url_parent = 'https://astrogeology.usgs.gov/search/results?q=hemisphere+enhanced&k1=target&v1=Mars'
browser.visit(url_parent)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
base_url = "https://astrogeology.usgs.gov"
links = [base_url + item.find(class_="description").a["href"] for item in soup.find_all("div", class_="item")]
hemisphere_image_urls = []
for url in links:
browser.visit(url)
html = browser.html
soup = BeautifulSoup(html, 'html.parser')
title = soup.find("div", class_="content").find("h2", class_="title").text.replace(" Enhanced", "")
img_url = base_url + soup.find("img", class_="wide-image")["src"]
hemisphere_image_urls.append({"title": title, "img_url": img_url})
browser.quit()
mars = {
"news_title": news_title,
"news_p": news_p,
"featured_image_url": featured_image_url,
"mars_weather": mars_weather,
"table_html": table_html,
"hemisphere_image_urls": hemisphere_image_urls
}
return mars
``` |
{
"source": "jmorim/mudpi-core",
"score": 3
} |
#### File: sensors/arduino/float_sensor.py
```python
import time
import datetime
import json
import redis
from .sensor import Sensor
from nanpy import (ArduinoApi, SerialManager)
import sys
sys.path.append('..')
import variables
default_connection = SerialManager(device='/dev/ttyUSB0')
#r = redis.Redis(host='127.0.0.1', port=6379)
class FloatSensor(Sensor):
def __init__(self, pin, name='FloatSensor', key=None, connection=default_connection):
super().__init__(pin, name=name, key=key, connection=connection)
return
def init_sensor(self):
# read data using pin specified pin
self.api.pinMode(self.pin, self.api.INPUT)
def read(self):
value = self.api.digitalRead(self.pin)
variables.r.set(self.key, value)
return value
def readRaw(self):
return self.read()
if __name__ == '__main__':
try:
loop_count = 10
while (loop_count > 0):
sensor = FloatSensor(9)
rainread = sensor.read()
print('Float: ', rainread)
loop_count += 1
time.sleep(3)
except KeyboardInterrupt:
pass
finally:
print('Float Sensor Closing...')
```
#### File: sensors/pi/soil_sensor.py
```python
import time
from .sensor import Sensor
from board import SCL, SDA
import busio
from adafruit_seesaw.seesaw import Seesaw
class SoilSensor(Sensor):
def __init__(self, pin, name = 'SoilSensor', key = None):
super().__init__(pin, name = name, key = key)
return
def init_sensor(self):
i2c_bus = busio.I2C(SCL, SDA)
ss = Seesaw(i2c_bus, addr = 0x36)
self.sensor = ss
return
def read(self):
moist = self.sensor.moisture_read()
temp = self.sensor.get_temp()
temp_f = round(temp * 9.0 / 5.0 + 32.0, 2)
print('Moisture:', moist)
print('Temperature:', temp_f)
#while True:
# # read moisture level through capacitive touch pad
# touch = ss.moisture_read()
#
# # read temperature from the temperature sensor
# temp = ss.get_temp()
#
# print("temp: " + str(temp) + " moisture: " + str(touch))
# time.sleep(1)
```
#### File: mudpi-core/triggers/control_trigger.py
```python
import time
import json
import redis
import sys
from .trigger import Trigger
sys.path.append('..')
import variables
class ControlTrigger(Trigger):
def __init__(self, main_thread_running, system_ready,\
name='ControlTrigger',key=None, source=None, thresholds=None,\
channel="controls", trigger_active=None, frequency='once',\
actions=[], group=None):
super().__init__(main_thread_running, system_ready, name=name, key=key,\
source=source, thresholds=thresholds,\
trigger_active=trigger_active, frequency=frequency,\
actions=actions, trigger_interval=0.5, group=group)
self.channel = channel.replace(" ", "_").lower() if channel is not None else "controls"
return
def init_trigger(self):
#Initialize the trigger here (i.e. set listeners or create cron jobs)
#Pubsub Listeners
self.pubsub = variables.r.pubsub()
self.pubsub.subscribe(**{self.channel: self.handleEvent})
pass
def check(self):
while self.main_thread_running.is_set():
if self.system_ready.is_set():
super().check()
self.pubsub.get_message()
# self.trigger_active.clear()
time.sleep(self.trigger_interval)
else:
time.sleep(2)
return
def handleEvent(self, message):
data = message['data']
if data is not None:
decoded_message = super().decodeEventData(data)
try:
if decoded_message['event'] == 'ControlUpdate':
control_value = self.parseControlData(decoded_message["data"])
if super().evaluateThresholds(control_value):
self.trigger_active.set()
if self.previous_state != self.trigger_active.is_set():
super().trigger(decoded_message['event'])
else:
if self.frequency == 'many':
super().trigger(decoded_message['event'])
else:
self.trigger_active.clear()
except:
print('Error During Trigger Actions {0}'.format(self.key))
self.previous_state = self.trigger_active.is_set()
def parseControlData(self, data):
parsed_data = data.get(self.source, None)
return parsed_data
def shutdown(self):
self.pubsub.close()
return
``` |
{
"source": "jmorlana/pixloc",
"score": 2
} |
#### File: pixloc/localization/model3d.py
```python
import logging
from collections import defaultdict
from typing import Dict, List, Optional
import numpy as np
from ..utils.colmap import read_model
from ..utils.quaternions import weighted_pose
logger = logging.getLogger(__name__)
class Model3D:
def __init__(self, path):
logger.info('Reading COLMAP model %s.', path)
self.cameras, self.dbs, self.points3D = read_model(path)
self.name2id = {i.name: i.id for i in self.dbs.values()}
def covisbility_filtering(self, dbids):
clusters = do_covisibility_clustering(dbids, self.dbs, self.points3D)
dbids = clusters[0]
return dbids
def pose_approximation(self, qname, dbids, global_descriptors, alpha=8):
"""Described in:
Benchmarking Image Retrieval for Visual Localization.
<NAME>, <NAME>, <NAME>,
<NAME>, <NAME>. 3DV 2020.
"""
dbs = [self.dbs[i] for i in dbids]
dbdescs = np.stack([global_descriptors[im.name] for im in dbs])
qdesc = global_descriptors[qname]
sim = dbdescs @ qdesc
weights = sim**alpha
weights /= weights.sum()
tvecs = [im.tvec for im in dbs]
qvecs = [im.qvec for im in dbs]
return weighted_pose(tvecs, qvecs, weights)
def get_dbid_to_p3dids(self, p3did_to_dbids):
"""Link the database images to selected 3D points."""
dbid_to_p3dids = defaultdict(list)
for p3id, obs_dbids in p3did_to_dbids.items():
for obs_dbid in obs_dbids:
dbid_to_p3dids[obs_dbid].append(p3id)
return dict(dbid_to_p3dids)
def get_p3did_to_dbids(self, dbids: List, loc: Optional[Dict] = None,
inliers: Optional[List] = None,
point_selection: str = 'all',
min_track_length: int = 3):
"""Return a dictionary mapping 3D point ids to their covisible dbids.
This function can use hloc sfm logs to only select inliers.
Which can be further used to select top reference images / in
sufficient track length selection of points.
"""
p3did_to_dbids = defaultdict(set)
if point_selection == 'all':
for dbid in dbids:
p3dids = self.dbs[dbid].point3D_ids
for p3did in p3dids[p3dids != -1]:
p3did_to_dbids[p3did].add(dbid)
elif point_selection in ['inliers', 'matched']:
if loc is None:
raise ValueError('"{point_selection}" point selection requires'
' localization logs.')
# The given SfM model must match the localization SfM model!
for (p3did, dbidxs), inlier in zip(loc["keypoint_index_to_db"][1],
inliers):
if inlier or point_selection == 'matched':
obs_dbids = set(loc["db"][dbidx] for dbidx in dbidxs)
obs_dbids &= set(dbids)
if len(obs_dbids) > 0:
p3did_to_dbids[p3did] |= obs_dbids
else:
raise ValueError(f"{point_selection} point selection not defined.")
# Filter unstable points (min track length)
p3did_to_dbids = {
i: v
for i, v in p3did_to_dbids.items()
if len(self.points3D[i].image_ids) >= min_track_length
}
return p3did_to_dbids
def rerank_and_filter_db_images(self, dbids: List, ninl_dbs: List,
num_dbs: int, min_matches_db: int = 0):
"""Re-rank the images by inlier count and filter invalid images."""
dbids = [dbids[i] for i in np.argsort(-ninl_dbs)
if ninl_dbs[i] > min_matches_db]
# Keep top num_images matched image images
dbids = dbids[:num_dbs]
return dbids
def get_db_inliers(self, loc: Dict, dbids: List, inliers: List):
"""Get the number of inliers for each db."""
inliers = loc["PnP_ret"]["inliers"]
dbids = loc["db"]
ninl_dbs = np.zeros(len(dbids))
for (_, dbidxs), inl in zip(loc["keypoint_index_to_db"][1], inliers):
if not inl:
continue
for dbidx in dbidxs:
ninl_dbs[dbidx] += 1
return ninl_dbs
def do_covisibility_clustering(frame_ids, all_images, points3D):
clusters = []
visited = set()
for frame_id in frame_ids:
# Check if already labeled
if frame_id in visited:
continue
# New component
clusters.append([])
queue = {frame_id}
while len(queue):
exploration_frame = queue.pop()
# Already part of the component
if exploration_frame in visited:
continue
visited.add(exploration_frame)
clusters[-1].append(exploration_frame)
observed = all_images[exploration_frame].point3D_ids
connected_frames = set(
j for i in observed if i != -1 for j in points3D[i].image_ids)
connected_frames &= set(frame_ids)
connected_frames -= visited
queue |= connected_frames
clusters = sorted(clusters, key=len, reverse=True)
return clusters
```
#### File: pixlib/datasets/view.py
```python
from pathlib import Path
import numpy as np
import cv2
# TODO: consider using PIL instead of OpenCV as it is heavy and only used here
import torch
from ..geometry import Camera, Pose
def numpy_image_to_torch(image):
"""Normalize the image tensor and reorder the dimensions."""
if image.ndim == 3:
image = image.transpose((2, 0, 1)) # HxWxC to CxHxW
elif image.ndim == 2:
image = image[None] # add channel axis
else:
raise ValueError(f'Not an image: {image.shape}')
return torch.from_numpy(image / 255.).float()
def read_image(path, grayscale=False):
mode = cv2.IMREAD_GRAYSCALE if grayscale else cv2.IMREAD_COLOR
image = cv2.imread(str(path), mode)
if image is None:
raise IOError(f'Could not read image at {path}.')
if not grayscale:
image = image[..., ::-1]
return image
def resize(image, size, fn=None, interp='linear'):
"""Resize an image to a fixed size, or according to max or min edge."""
h, w = image.shape[:2]
if isinstance(size, int):
scale = size / fn(h, w)
h_new, w_new = int(round(h*scale)), int(round(w*scale))
# TODO: we should probably recompute the scale like in the second case
scale = (scale, scale)
elif isinstance(size, (tuple, list)):
h_new, w_new = size
scale = (w_new / w, h_new / h)
else:
raise ValueError(f'Incorrect new size: {size}')
mode = {
'linear': cv2.INTER_LINEAR,
'cubic': cv2.INTER_CUBIC,
'nearest': cv2.INTER_NEAREST}[interp]
return cv2.resize(image, (w_new, h_new), interpolation=mode), scale
def crop(image, size, *, random=True, other=None, camera=None,
return_bbox=False, centroid=None):
"""Random or deterministic crop of an image, adjust depth and intrinsics.
"""
h, w = image.shape[:2]
h_new, w_new = (size, size) if isinstance(size, int) else size
if random:
top = np.random.randint(0, h - h_new + 1)
left = np.random.randint(0, w - w_new + 1)
elif centroid is not None:
x, y = centroid
top = np.clip(int(y) - h_new // 2, 0, h - h_new)
left = np.clip(int(x) - w_new // 2, 0, w - w_new)
else:
top = left = 0
image = image[top:top+h_new, left:left+w_new]
ret = [image]
if other is not None:
ret += [other[top:top+h_new, left:left+w_new]]
if camera is not None:
ret += [camera.crop((left, top), (w_new, h_new))]
if return_bbox:
ret += [(top, top+h_new, left, left+w_new)]
return ret
def zero_pad(size, *images):
ret = []
for image in images:
h, w = image.shape[:2]
padded = np.zeros((size, size)+image.shape[2:], dtype=image.dtype)
padded[:h, :w] = image
ret.append(padded)
return ret
def read_view(conf, image_path: Path, camera: Camera, T_w2cam: Pose,
p3D: np.ndarray, p3D_idxs: np.ndarray, *,
rotation=0, random=False):
img = read_image(image_path, conf.grayscale)
img = img.astype(np.float32)
name = image_path.name
# we assume that the pose and camera were already rotated during preprocess
if rotation != 0:
img = np.rot90(img, rotation)
if conf.resize:
scales = (1, 1)
if conf.resize_by == 'max':
img, scales = resize(img, conf.resize, fn=max)
elif (conf.resize_by == 'min' or
(conf.resize_by == 'min_if'
and min(*img.shape[:2]) < conf.resize)):
img, scales = resize(img, conf.resize, fn=min)
if scales != (1, 1):
camera = camera.scale(scales)
if conf.crop:
if conf.optimal_crop:
p2D, valid = camera.world2image(T_w2cam * p3D[p3D_idxs])
p2D = p2D[valid].numpy()
centroid = tuple(p2D.mean(0)) if len(p2D) > 0 else None
random = False
else:
centroid = None
img, camera, bbox = crop(
img, conf.crop, random=random,
camera=camera, return_bbox=True, centroid=centroid)
elif conf.pad:
img, = zero_pad(conf.pad, img)
# we purposefully do not update the image size in the camera object
data = {
'name': name,
'image': numpy_image_to_torch(img),
'camera': camera.float(),
'T_w2cam': T_w2cam.float(),
}
return data
```
#### File: pixlib/models/base_optimizer.py
```python
import logging
from typing import Tuple, Dict, Optional
import torch
from torch import Tensor
from .base_model import BaseModel
from .utils import masked_mean
from ..geometry import Camera, Pose
from ..geometry.optimization import optimizer_step
from ..geometry.interpolation import Interpolator
from ..geometry.costs import DirectAbsoluteCost
from ..geometry import losses # noqa
from ...utils.tools import torchify
logger = logging.getLogger(__name__)
class BaseOptimizer(BaseModel):
default_conf = dict(
num_iters=100,
loss_fn='squared_loss',
jacobi_scaling=False,
normalize_features=False,
lambda_=0, # Gauss-Newton
interpolation=dict(
mode='linear',
pad=4,
),
grad_stop_criteria=1e-4,
dt_stop_criteria=5e-3, # in meters
dR_stop_criteria=5e-2, # in degrees
# deprecated entries
sqrt_diag_damping=False,
bound_confidence=True,
no_conditions=True,
verbose=False,
)
logging_fn = None
def _init(self, conf):
self.loss_fn = eval('losses.' + conf.loss_fn)
self.interpolator = Interpolator(**conf.interpolation)
self.cost_fn = DirectAbsoluteCost(self.interpolator,
normalize=conf.normalize_features)
assert conf.lambda_ >= 0.
# deprecated entries
assert not conf.sqrt_diag_damping
assert conf.bound_confidence
assert conf.no_conditions
assert not conf.verbose
def log(self, **args):
if self.logging_fn is not None:
self.logging_fn(**args)
def early_stop(self, **args):
stop = False
if not self.training and (args['i'] % 10) == 0:
T_delta, grad = args['T_delta'], args['grad']
grad_norm = torch.norm(grad.detach(), dim=-1)
small_grad = grad_norm < self.conf.grad_stop_criteria
dR, dt = T_delta.magnitude()
small_step = ((dt < self.conf.dt_stop_criteria)
& (dR < self.conf.dR_stop_criteria))
if torch.all(small_step | small_grad):
stop = True
return stop
def J_scaling(self, J: Tensor, J_scaling: Tensor, valid: Tensor):
if J_scaling is None:
J_norm = torch.norm(J.detach(), p=2, dim=(-2))
J_norm = masked_mean(J_norm, valid[..., None], -2)
J_scaling = 1 / (1 + J_norm)
J = J * J_scaling[..., None, None, :]
return J, J_scaling
def build_system(self, J: Tensor, res: Tensor, weights: Tensor):
grad = torch.einsum('...ndi,...nd->...ni', J, res) # ... x N x 6
grad = weights[..., None] * grad
grad = grad.sum(-2) # ... x 6
Hess = torch.einsum('...ijk,...ijl->...ikl', J, J) # ... x N x 6 x 6
Hess = weights[..., None, None] * Hess
Hess = Hess.sum(-3) # ... x 6 x6
return grad, Hess
def _forward(self, data: Dict):
return self._run(
data['p3D'], data['F_ref'], data['F_q'], data['T_init'],
data['cam_q'], data['mask'], data.get('W_ref_q'))
@torchify
def run(self, *args, **kwargs):
return self._run(*args, **kwargs)
def _run(self, p3D: Tensor, F_ref: Tensor, F_query: Tensor,
T_init: Pose, camera: Camera, mask: Optional[Tensor] = None,
W_ref_query: Optional[Tuple[Tensor, Tensor]] = None):
T = T_init
J_scaling = None
if self.conf.normalize_features:
F_ref = torch.nn.functional.normalize(F_ref, dim=-1)
args = (camera, p3D, F_ref, F_query, W_ref_query)
failed = torch.full(T.shape, False, dtype=torch.bool, device=T.device)
for i in range(self.conf.num_iters):
res, valid, w_unc, _, J = self.cost_fn.residual_jacobian(T, *args)
if mask is not None:
valid &= mask
failed = failed | (valid.long().sum(-1) < 10) # too few points
# compute the cost and aggregate the weights
cost = (res**2).sum(-1)
cost, w_loss, _ = self.loss_fn(cost)
weights = w_loss * valid.float()
if w_unc is not None:
weights *= w_unc
if self.conf.jacobi_scaling:
J, J_scaling = self.J_scaling(J, J_scaling, valid)
# solve the linear system
g, H = self.build_system(J, res, weights)
delta = optimizer_step(g, H, self.conf.lambda_, mask=~failed)
if self.conf.jacobi_scaling:
delta = delta * J_scaling
# compute the pose update
dt, dw = delta.split([3, 3], dim=-1)
T_delta = Pose.from_aa(dw, dt)
T = T_delta @ T
self.log(i=i, T_init=T_init, T=T, T_delta=T_delta, cost=cost,
valid=valid, w_unc=w_unc, w_loss=w_loss, H=H, J=J)
if self.early_stop(i=i, T_delta=T_delta, grad=g, cost=cost):
break
if failed.any():
logger.debug('One batch element had too few valid points.')
return T, failed
def loss(self, pred, data):
raise NotImplementedError
def metrics(self, pred, data):
raise NotImplementedError
```
#### File: pixlib/models/classic_optimizer.py
```python
import logging
from typing import Tuple, Optional
import torch
from torch import Tensor
from .base_optimizer import BaseOptimizer
from .utils import masked_mean
from ..geometry import Camera, Pose
from ..geometry.optimization import optimizer_step
from ..geometry import losses # noqa
logger = logging.getLogger(__name__)
class ClassicOptimizer(BaseOptimizer):
default_conf = dict(
lambda_=1e-2,
lambda_max=1e4,
)
def _run(self, p3D: Tensor, F_ref: Tensor, F_query: Tensor,
T_init: Pose, camera: Camera, mask: Optional[Tensor] = None,
W_ref_query: Optional[Tuple[Tensor, Tensor]] = None):
T = T_init
J_scaling = None
if self.conf.normalize_features:
F_ref = torch.nn.functional.normalize(F_ref, dim=-1)
args = (camera, p3D, F_ref, F_query, W_ref_query)
failed = torch.full(T.shape, False, dtype=torch.bool, device=T.device)
lambda_ = torch.full_like(failed, self.conf.lambda_, dtype=T.dtype)
mult = torch.full_like(lambda_, 10)
recompute = True
# compute the initial cost
with torch.no_grad():
res, valid_i, w_unc_i = self.cost_fn.residuals(T_init, *args)[:3]
cost_i = self.loss_fn((res.detach()**2).sum(-1))[0]
if w_unc_i is not None:
cost_i *= w_unc_i.detach()
valid_i &= mask
cost_best = masked_mean(cost_i, valid_i, -1)
for i in range(self.conf.num_iters):
if recompute:
res, valid, w_unc, _, J = self.cost_fn.residual_jacobian(
T, *args)
if mask is not None:
valid &= mask
failed = failed | (valid.long().sum(-1) < 10) # too few points
cost = (res**2).sum(-1)
cost, w_loss, _ = self.loss_fn(cost)
weights = w_loss * valid.float()
if w_unc is not None:
weights *= w_unc
if self.conf.jacobi_scaling:
J, J_scaling = self.J_scaling(J, J_scaling, valid)
g, H = self.build_system(J, res, weights)
delta = optimizer_step(g, H, lambda_.unqueeze(-1), mask=~failed)
if self.conf.jacobi_scaling:
delta = delta * J_scaling
dt, dw = delta.split([3, 3], dim=-1)
T_delta = Pose.from_aa(dw, dt)
T_new = T_delta @ T
# compute the new cost and update if it decreased
with torch.no_grad():
res = self.cost_fn.residual(T_new, *args)[0]
cost_new = self.loss_fn((res**2).sum(-1))[0]
cost_new = masked_mean(cost_new, valid, -1)
accept = cost_new < cost_best
lambda_ = lambda_ * torch.where(accept, 1/mult, mult)
lambda_ = lambda_.clamp(max=self.conf.lambda_max, min=1e-7)
T = Pose(torch.where(accept[..., None], T_new._data, T._data))
cost_best = torch.where(accept, cost_new, cost_best)
recompute = accept.any()
self.log(i=i, T_init=T_init, T=T, T_delta=T_delta, cost=cost,
valid=valid, w_unc=w_unc, w_loss=w_loss, accept=accept,
lambda_=lambda_, H=H, J=J)
stop = self.early_stop(i=i, T_delta=T_delta, grad=g, cost=cost)
if self.conf.lambda_ == 0: # Gauss-Newton
stop |= (~recompute)
else: # LM saturates
stop |= bool(torch.all(lambda_ >= self.conf.lambda_max))
if stop:
break
if failed.any():
logger.debug('One batch element had too few valid points.')
return T, failed
```
#### File: pixlib/models/__init__.py
```python
from ..utils.tools import get_class
from .base_model import BaseModel
def get_model(name):
return get_class(name, __name__, BaseModel)
```
#### File: pixlib/models/utils.py
```python
import torch
def masked_mean(x, mask, dim):
mask = mask.float()
return (mask * x).sum(dim) / mask.sum(dim).clamp(min=1)
def checkpointed(cls, do=True):
'''Adapted from the DISK implementation of <NAME>.'''
assert issubclass(cls, torch.nn.Module)
class Checkpointed(cls):
def forward(self, *args, **kwargs):
super_fwd = super(Checkpointed, self).forward
if any((torch.is_tensor(a) and a.requires_grad) for a in args):
return torch.utils.checkpoint.checkpoint(
super_fwd, *args, **kwargs)
else:
return super_fwd(*args, **kwargs)
return Checkpointed if do else cls
```
#### File: pixloc/pixloc/run_RobotCar.py
```python
import pickle
from pathlib import Path
from . import set_logging_debug, logger
from .localization import RetrievalLocalizer, PoseLocalizer
from .utils.data import Paths, create_argparser, parse_paths, parse_conf
from .utils.io import write_pose_results, concat_results
default_paths = Paths(
query_images='images/',
reference_images='images/',
reference_sfm='sfm_superpoint+superglue/',
query_list='{condition}_queries_with_intrinsics.txt',
global_descriptors='robotcar_ov-ref_tf-netvlad.h5',
retrieval_pairs='pairs-query-netvlad10-percam-perloc.txt',
results='pixloc_RobotCar_{condition}.txt',
)
experiment = 'pixloc_cmu'
default_confs = {
'from_retrieval': {
'experiment': experiment,
'features': {},
'optimizer': {
'num_iters': 100,
'pad': 2,
},
'refinement': {
'num_dbs': 2,
'point_selection': 'all',
'normalize_descriptors': True,
'average_observations': False,
'filter_covisibility': False,
'do_pose_approximation': False,
},
},
'from_pose': {
'experiment': experiment,
'features': {},
'optimizer': {
'num_iters': 100,
'pad': 2,
},
'refinement': {
'num_dbs': 5,
'min_points_opt': 100,
'point_selection': 'inliers',
'normalize_descriptors': True,
'average_observations': False,
'layer_indices': [0, 1],
},
},
}
CONDITIONS = ['dawn', 'dusk', 'night', 'night-rain', 'overcast-summer',
'overcast-winter', 'rain', 'snow', 'sun']
def generate_query_list(paths, condition):
h, w = 1024, 1024
intrinsics_filename = 'intrinsics/{}_intrinsics.txt'
cameras = {}
for side in ['left', 'right', 'rear']:
with open(paths.dataset / intrinsics_filename.format(side), 'r') as f:
fx = f.readline().split()[1]
fy = f.readline().split()[1]
cx = f.readline().split()[1]
cy = f.readline().split()[1]
assert fx == fy
params = ['SIMPLE_RADIAL', w, h, fx, cx, cy, 0.0]
cameras[side] = [str(p) for p in params]
queries = sorted((paths.query_images / condition).glob('**/*.jpg'))
queries = [str(q.relative_to(paths.query_images)) for q in queries]
out = [[q] + cameras[Path(q).parent.name] for q in queries]
with open(paths.query_list, 'w') as f:
f.write('\n'.join(map(' '.join, out)))
def main():
parser = create_argparser('RobotCar')
parser.add_argument('--conditions', default=CONDITIONS, choices=CONDITIONS,
nargs='+')
args = parser.parse_intermixed_args()
set_logging_debug(args.verbose)
paths = parse_paths(args, default_paths)
conf = parse_conf(args, default_confs)
logger.info('Will evaluate %s conditions.', len(args.conditions))
all_results = []
for condition in args.conditions:
logger.info('Working on condition %s.', condition)
paths_cond = paths.interpolate(condition=condition)
all_results.append(paths_cond.results)
if paths_cond.results.exists():
continue
if not paths_cond.query_list.exists():
generate_query_list(paths_cond, condition)
if args.from_poses:
localizer = PoseLocalizer(paths_cond, conf)
else:
localizer = RetrievalLocalizer(paths_cond, conf)
poses, logs = localizer.run_batched(skip=args.skip)
write_pose_results(poses, paths_cond.results, prepend_camera_name=True)
with open(f'{paths_cond.results}_logs.pkl', 'wb') as f:
pickle.dump(logs, f)
output_path = concat_results(
all_results, args.conditions, paths.results, 'condition')
logger.info(
'Finished evaluating all conditions, you can now submit the file %s to'
' https://www.visuallocalization.net/submission/', output_path)
if __name__ == '__main__':
main()
```
#### File: pixloc/visualization/animation.py
```python
from pathlib import Path
from typing import Optional, List
import logging
import shutil
import json
import io
import base64
import cv2
import numpy as np
import matplotlib.pyplot as plt
from .viz_2d import save_plot
from ..localization import Model3D
from ..pixlib.geometry import Pose, Camera
from ..utils.quaternions import rotmat2qvec
logger = logging.getLogger(__name__)
try:
import ffmpeg
except ImportError:
logger.info('Cannot import ffmpeg.')
def subsample_steps(T_w2q: Pose, p2d_q: np.ndarray, mask_q: np.ndarray,
camera_size: np.ndarray, thresh_dt: float = 0.1,
thresh_px: float = 0.005) -> List[int]:
"""Subsample steps of the optimization based on camera or point
displacements. Main use case: compress an animation
but keep it smooth and interesting.
"""
mask = mask_q.any(0)
dp2ds = np.linalg.norm(np.diff(p2d_q, axis=0), axis=-1)
dp2ds = np.median(dp2ds[:, mask], 1)
dts = (T_w2q[:-1] @ T_w2q[1:].inv()).magnitude()[0].numpy()
assert len(dts) == len(dp2ds)
thresh_dp2 = camera_size.min()*thresh_px # from percent to pixel
num = len(dp2ds)
keep = []
count_dp2 = 0
count_dt = 0
for i, dp2 in enumerate(dp2ds):
count_dp2 += dp2
count_dt += dts[i]
if (i == 0 or i == (num-1)
or count_dp2 >= thresh_dp2 or count_dt >= thresh_dt):
count_dp2 = 0
count_dt = 0
keep.append(i)
return keep
class VideoWriter:
"""Write frames sequentially as images, create a video, and clean up."""
def __init__(self, tmp_dir: Path, ext='.jpg'):
self.tmp_dir = Path(tmp_dir)
self.ext = ext
self.count = 0
if self.tmp_dir.exists():
shutil.rmtree(self.tmp_dir)
self.tmp_dir.mkdir(parents=True)
def add_frame(self):
save_plot(self.tmp_dir / f'{self.count:0>5}{self.ext}')
plt.close()
self.count += 1
def to_video(self, out_path: Path, duration: Optional[float] = None,
fps: int = 5, crf: int = 23, verbose: bool = False):
assert self.count > 0
if duration is not None:
fps = self.count / duration
frames = self.tmp_dir / f'*{self.ext}'
logger.info('Running ffmpeg.')
(
ffmpeg
.input(frames, pattern_type='glob', framerate=fps)
.filter('crop', 'trunc(iw/2)*2', 'trunc(ih/2)*2')
.output(out_path, crf=crf, vcodec='libx264', pix_fmt='yuv420p')
.run(overwrite_output=True, quiet=not verbose)
)
shutil.rmtree(self.tmp_dir)
def display_video(path: Path):
from IPython.display import HTML
# prevent jupyter from caching the video file
data = io.open(path, 'r+b').read()
encoded = base64.b64encode(data).decode('ascii')
return HTML(f"""
<video width="100%" controls autoplay loop>
<source src="data:video/mp4;base64,{encoded}" type="video/mp4">
</video>
""")
def frustum_points(camera: Camera) -> np.ndarray:
"""Compute the corners of the frustum of a camera object."""
W, H = camera.size.numpy()
corners = np.array([[0, 0], [W, 0], [W, H], [0, H],
[0, 0], [W/2, -H/5], [W, 0]])
corners = (corners - camera.c.numpy()) / camera.f.numpy()
return corners
def copy_compress_image(source: Path, target: Path, quality: int = 50):
"""Read an image and write it to a low-quality jpeg."""
image = cv2.imread(str(source))
cv2.imwrite(str(target), image, [int(cv2.IMWRITE_JPEG_QUALITY), quality])
def format_json(x, decimals: int = 3):
"""Control the precision of numpy float arrays, convert boolean to int."""
if isinstance(x, np.ndarray):
if np.issubdtype(x.dtype, np.floating):
if x.shape != (4,): # qvec
x = np.round(x, decimals=decimals)
elif x.dtype == np.bool:
x = x.astype(int)
return x.tolist()
if isinstance(x, float):
return round(x, decimals)
if isinstance(x, dict):
return {k: format_json(v) for k, v in x.items()}
if isinstance(x, (list, tuple)):
return [format_json(v) for v in x]
return x
def create_viz_dump(assets: Path, paths: Path, cam_q: Camera, name_q: str,
T_w2q: Pose, mask_q: np.ndarray, p2d_q: np.ndarray,
ref_ids: List[int], model3d: Model3D, p3d_ids: np.ndarray,
tfm: np.ndarray = np.eye(3)):
assets.mkdir(parents=True, exist_ok=True)
dump = {
'p3d': {},
'T': {},
'camera': {},
'image': {},
'p2d': {},
}
p3d = np.stack([model3d.points3D[i].xyz for i in p3d_ids], 0)
dump['p3d']['colors'] = [model3d.points3D[i].rgb for i in p3d_ids]
dump['p3d']['xyz'] = p3d @ tfm.T
dump['T']['refs'] = []
dump['camera']['refs'] = []
dump['image']['refs'] = []
dump['p2d']['refs'] = []
for idx, ref_id in enumerate(ref_ids):
ref = model3d.dbs[ref_id]
cam_r = Camera.from_colmap(model3d.cameras[ref.camera_id])
T_w2r = Pose.from_colmap(ref)
qtvec = (rotmat2qvec(T_w2r.R.numpy() @ tfm.T), T_w2r.t.numpy())
dump['T']['refs'].append(qtvec)
dump['camera']['refs'].append(frustum_points(cam_r))
tmp_name = f'ref{idx}.jpg'
dump['image']['refs'].append(tmp_name)
copy_compress_image(
paths.reference_images / ref.name, assets / tmp_name)
p2d_, valid_ = cam_r.world2image(T_w2r * p3d)
p2d_ = p2d_[valid_ & mask_q.any(0)] / cam_r.size
dump['p2d']['refs'].append(p2d_.numpy())
qtvec_q = [(rotmat2qvec(T.R.numpy() @ tfm.T), T.t.numpy()) for T in T_w2q]
dump['T']['query'] = qtvec_q
dump['camera']['query'] = frustum_points(cam_q)
p2d_q_norm = [np.asarray(p[v]/cam_q.size) for p, v in zip(p2d_q, mask_q)]
dump['p2d']['query'] = p2d_q_norm[-1]
tmp_name = 'query.jpg'
dump['image']['query'] = tmp_name
copy_compress_image(paths.query_images / name_q, assets / tmp_name)
with open(assets / 'dump.json', 'w') as fid:
json.dump(format_json(dump), fid, separators=(',', ':'))
# We dump 2D points as a separate json because it is much heavier
# and thus slower to load.
dump_p2d = {
'query': p2d_q_norm,
'masks': np.asarray(mask_q),
}
with open(assets / 'dump_p2d.json', 'w') as fid:
json.dump(format_json(dump_p2d), fid, separators=(',', ':'))
```
#### File: pixloc/visualization/viz_3d.py
```python
import plotly.graph_objects as go
import numpy as np
from ..pixlib.geometry.utils import to_homogeneous
def init_figure(height=800):
"""Initialize a 3D figure."""
fig = go.Figure()
fig.update_layout(
height=height,
scene_camera=dict(
eye=dict(x=0., y=-.1, z=-2), up=dict(x=0, y=-1., z=0)),
scene=dict(
xaxis=dict(showbackground=False),
yaxis=dict(showbackground=False),
aspectmode='data', dragmode='orbit'),
margin=dict(l=0, r=0, b=0, t=0, pad=0)) # noqa E741
return fig
def plot_points(fig, pts, color='rgba(255, 0, 0, 1)', ps=2):
"""Plot a set of 3D points."""
x, y, z = pts.T
tr = go.Scatter3d(
x=x, y=y, z=z, mode='markers', marker_size=ps,
marker_color=color, marker_line_width=.2)
fig.add_trace(tr)
def plot_camera(fig, R, t, K, color='rgb(0, 0, 255)'):
"""Plot a camera as a cone with camera frustum."""
x, y, z = t
u, v, w = R @ -np.array([0, 0, 1])
tr = go.Cone(
x=[x], y=[y], z=[z], u=[u], v=[v], w=[w], anchor='tip',
showscale=False, colorscale=[[0, color], [1, color]],
sizemode='absolute')
fig.add_trace(tr)
W, H = K[0, 2]*2, K[1, 2]*2
corners = np.array([[0, 0], [W, 0], [W, H], [0, H], [0, 0]])
corners = to_homogeneous(corners) @ np.linalg.inv(K).T
corners = (corners/2) @ R.T + t
x, y, z = corners.T
tr = go.Scatter3d(
x=x, y=y, z=z, line=dict(color='rgba(0, 0, 0, .5)'),
marker=dict(size=0.0001), showlegend=False)
fig.add_trace(tr)
def create_slider_animation(fig, traces):
"""Create a slider that animates a list of traces (e.g. 3D points)."""
slider = {'steps': []}
frames = []
fig.add_trace(traces[0])
idx = len(fig.data) - 1
for i, tr in enumerate(traces):
frames.append(go.Frame(name=str(i), traces=[idx], data=[tr]))
step = {"args": [
[str(i)],
{"frame": {"redraw": True},
"mode": "immediate"}],
"label": i,
"method": "animate"}
slider['steps'].append(step)
fig.frames = tuple(frames)
fig.layout.sliders = (slider,)
``` |
{
"source": "jmoro0408/OOP_Python",
"score": 3
} |
#### File: OOP_Python/Pumps/parameters.py
```python
import numpy as np
import matplotlib.pyplot as plt
from pathlib import Path
from datetime import datetime
# TODO - fix legend
# TODO - Combine system curve and pump curve plot. https://stackoverflow.com/questions/36204644/what-is-the-best-way-of-combining-two-independent-plots-with-matplotlib
# TODO - Add capability to provide custom AOR and POR points
# TODO - Add auto duty point based on system and pump curves
class Pump:
default_speeds = [90, 80, 70, 60, 50]
flow = None
head = None
def __init__(self, make, model, impeller=None, motor=None):
self.make = make
self.model = model
self.impeller = impeller
self.motor = motor
def __repr__(self):
return f"{self.make}, {self.model}"
def fullname(self):
"""Return the pump make and model
Returns:
str: make and model of pump
"""
return self.make + " " + self.model
def define_pumpcurve(self, flow: list, head: list):
"""assigns flow and head curves to the pump
Args:
flow (list): list of flows
head (list): list of head achieved at corresponding flows
"""
self.flow = flow
self.head = head
def define_efficiency(self, efficiency: list, efficiency_flow: list = None):
"""Add an efficiency to the pump. By default this assume the efficiency values
correspond to the flow defined in the pump head/flow curve. However this can be
overwritten by providing a new efficiency_flow list to this method.
Args:
efficiency (list): pump efficiency list
efficiency_flow (list, optional): Flow corresponding to efficiency values. Defaults to None.
"""
self.efficiency = efficiency
self.efficiency_flow = self.flow
if efficiency_flow is not None:
self.efficiency_flow = efficiency_flow
def define_npshr(self, npshr: list, npshr_flow: list = None):
"""Add a net positive suction head required (npshr) to the pump.
By default this assume the npshr values correspond to the flows
defined in the pump head/flow curve. However this can be overwritten by
providing a new npshr_flow list to this method.
Args:
npshr (list): npshr values
npshr_flow (list, optional): flow corresponding to npshr. If none, this
defaults to the flow provided in the flow/head curve. Defaults to None.
"""
self.npshr = npshr
self.npshr_flow = self.flow
if npshr_flow is not None:
self.npshr_flow = npshr_flow
def BEP(self):
"""return the best efficiency point for a given pump.
will return the best efficiency (%), followed by the corresponding flow and head
Returns:
tuple: BEP of the pump in (efficiency, flow, head)
"""
try:
_max_efficiency_index = self.efficiency.index(max(self.efficiency))
poly = self.generate_curve_equation(
self.efficiency_flow, self.head, deg=3
) # generating flow/head curve polynomial
_max_efficiency_head = poly(self.efficiency_flow[_max_efficiency_index])
best_efficiency_point = (
max(self.efficiency),
self.efficiency_flow[_max_efficiency_index],
_max_efficiency_head,
)
except AttributeError:
print("Error: Please assign efficiency before calculating the BEP")
return None
return best_efficiency_point
def generate_affinity(self, new_speed: int):
"""Uses pump affinity laws to create new pump/head curves based on an inputted speed.
This function expects the self.flow values to correspond to the pump at 100%.
Args:
new_speed (int): New pump speed to create flow/head values for
Returns:
(tuple): Tuple of two lists, containing reduced flow and reduced head values
"""
flow_multplier, head_multipler = self.affinity_ratio(
new_speed
) # assumes original pump curve is at 100% speed
reduced_flow = [flow * flow_multplier for flow in self.flow]
reduced_head = [head * head_multipler for head in self.head]
return reduced_flow, reduced_head
def generate_speed_curves(self, speeds: list = None):
"""generate multiple speeds curves for a given list.
Default speeds are [90,80,70,60,50]% however these can be overwritted with a
provided list of speeds.
Args:
speeds (list, optional): List of speeds to create. If none provided,
speeds of [90,80,70,60,50]% are created. Defaults to None.
Returns:
dict: dictionary of speeds and corresponding head and flow.
dict has structure {speed: ([flow], [head])}
"""
if (isinstance(speeds, int)) or (isinstance(speeds, float)):
speeds = [speeds]
_speeds = self.default_speeds # typical % speeds
if speeds is not None:
_speeds = speeds
speed_curve_dict = {} # empty dict to hold our speed data.
for speed in _speeds:
flow, head = self.generate_affinity(new_speed=speed)
_temp_dict = {speed: (flow, head)}
speed_curve_dict.update(_temp_dict)
return speed_curve_dict
def POR(self):
"""creates upper and lower preferred operating points for a given pump speed.
This assume HI guidance (lower = 70% BEP flow, upper = 120% BEP flow)
Returns:
tuple: coordinates of upper and lower POR.
POR_upper_flow, POR_upper_head, POR_lower_flow, POR_lower_head
"""
poly = self.generate_curve_equation(
self.flow, self.head, deg=3
) # generating flow/head curve polynomial
_, BEP_flow, BEP_head = self.BEP() # disregard the best efficiency (%)
POR_lower_flow = (
0.7 * BEP_flow
) # POR lower range is 70% of the BEP (Hydraulic Institute)
POR_lower_head = poly(POR_lower_flow)
POR_upper_flow = (
1.2 * BEP_flow
) # POR upper range is 120% of the BEP (Hydraulic Institute)
POR_upper_head = poly(POR_upper_flow)
POR_dict = {
"Upper Flow": POR_upper_flow,
"Upper Head": POR_upper_head,
"Lower Flow": POR_lower_flow,
"Lower Head": POR_lower_head,
}
return POR_dict
@staticmethod
def generate_curve_equation(x: list, y: list, deg=3):
"""returns a 1d poly object for a given x and y
Args:
x (list): x values to curve fit
y (list): y values to curve fit
deg (int, optional): degree of curve. Defaults to 3.
Returns:
[poly1d]: np.poly1d object of curve
"""
coeff = np.polyfit(x, y, deg)
poly = np.poly1d(coeff)
return poly
def generate_speeds_BEP(self, speeds: list):
"""generates BEPs for various speeds. Argument should be a list of speeds, if a single speed is preferred, this
can be passed as an int which is automatically mapped to a single element list.
Args:
speeds (list) : list of speeds to create BEPs for.
Returns:
dict: dictionary holding all the speed BEP data with structure: {speed: (BEP flow, BEP head)}
"""
if (isinstance(speeds, int)) or (
isinstance(speeds, float)
): # allows single speed plotting
speeds = [speeds]
BEP_speeds_dict = {}
_, BEP_flow, BEP_head = self.BEP()
for speed in speeds:
flow_multiplier, head_multiplier = self.affinity_ratio(speed)
BEP_flow_speed = BEP_flow * flow_multiplier
BEP_head_speed = BEP_head * head_multiplier
_temp_dict = {speed: (BEP_flow_speed, BEP_head_speed)}
BEP_speeds_dict.update(_temp_dict)
return BEP_speeds_dict
def generate_speeds_POR(self, speeds: list):
"""generate PORs for various speeds. If a single speed is preferred this can be passed as an int which is automatically
mapped to a single element list.
Args:
speeds (list): list of speeds for POR points to be created for.
Returns:
dict: dictionary of speeds with corresponding POR data points. Structure:
{Speed: (POR Flow - Upper, POR head - Upper, POR Flow - Lower, POR head - Lower)}
"""
if (isinstance(speeds, int)) or (
isinstance(speeds, float)
): # allows single speed plotting
speeds = [speeds]
POR_speeds_dict = {}
POR_dict = self.POR()
for speed in speeds:
flow_multiplier, head_multiplier = self.affinity_ratio(speed)
POR_flow_speed_upper = POR_dict["Upper Flow"] * flow_multiplier
POR_head_speed_upper = POR_dict["Upper Head"] * head_multiplier
POR_flow_speed_lower = POR_dict["Lower Flow"] * flow_multiplier
POR_head_speed_lower = POR_dict["Lower Head"] * head_multiplier
_temp_dict = {
speed: (
POR_flow_speed_upper,
POR_head_speed_upper,
POR_flow_speed_lower,
POR_head_speed_lower,
)
}
POR_speeds_dict.update(_temp_dict)
return POR_speeds_dict
def affinity_ratio(self, speed: int):
"""Uses affinity laws to create flow and head multipliers for a given speed.
Args:
speed (int): new speed the ratio is to be calculated for
Returns:
flow_multiplier, head_multiplier (int, int): multipliers for flow and head
"""
flow_multiplier = speed / 100
head_multiplier = (speed / 100) ** 2
return flow_multiplier, head_multiplier
def BEP_at_speed(self, speed, print_string=False):
best_efficiency, BEP_flow_100, BEP_head_100 = self.BEP()
flow_multiplier, head_multiplier = self.affinity_ratio(speed)
BEP_flow_speed = BEP_flow_100 * flow_multiplier
BEP_head_speed = BEP_head_100 * head_multiplier
if print_string:
print(
f"""The best efficiency at {speed}% speed is {round(best_efficiency,2)}, occuring at {round(BEP_flow_speed,2)} L/s and {round(BEP_head_speed,2)} m"""
)
return best_efficiency, BEP_flow_speed, BEP_head_speed
#####-----------Plotting Functions------------######
def generate_plot(self, BEP=False, POR=False):
"""Plots the 100% speed pump curve, with optional best efficiency and preferred
operating point markers
Args:
BEP (bool, optional): Plot best efficiency point. Defaults to False.
POR (bool, optional): Plot preferred operating range. Defaults to False.
Returns:
matplotlib ax object: plot of the 100% pump curve
"""
self.fig, self.ax1 = plt.subplots()
self.ax1.plot(self.flow, self.head, label="100%")
self.ax1.set_xlabel("Flow (L/s)")
self.ax1.set_ylabel("Head (m)")
self.ax1.set_title(f"Pump Curve for {self.fullname()}")
if BEP:
_, BEP_flow, BEP_head = self.BEP()
self.ax1.plot(BEP_flow, BEP_head, marker="o", label="BEP")
if POR:
POR_dict = self.POR()
self.ax1.plot(
POR_dict["Upper Flow"],
POR_dict["Upper Head"],
marker="x",
color="r",
label="POR",
)
self.ax1.plot(
POR_dict["Lower Flow"], POR_dict["Lower Head"], marker="x", color="r"
)
return self
def add_npshr(self):
"""adds an npshr plot to the plot object.
This method requires the generate_plot method is called first.
Also requires that some npshr data has been assigned to the pump object
Raises:
AttributeError: Raises error if no NPSHr data has been assigned to the pump object
Returns:
NPSHr plot on ax figure.
"""
if not hasattr(self, "npshr_flow"):
raise AttributeError(
"Error: Please attribute NPSHr data with this pump object before attempting to plot NPSHr"
)
elif not hasattr(self, "ax1"):
raise AttributeError(
"Error: Please call generate_plot method before adding an NPSHr plot"
)
else:
self.ax1.plot(
self.npshr_flow,
self.npshr,
linestyle="-.",
color="coral",
label="NPSHr",
)
return self
def add_efficiency(self):
"""Plots pump efficiency on a secondary y axis
Returns:
matplotlib ax figure
"""
self.ax2 = self.ax1.twinx()
self.ax2.plot(
self.efficiency_flow,
self.efficiency,
linestyle="--",
color="b",
label="Efficiency (%)",
)
self.ax2.set_ylabel("Efficiency (%)")
return self
def plot_speeds(self, speeds=None, BEP=False, POR=False):
"""plots various speed curves.
If no speeds are passed the method plots "typical" speeds (90,80,70,60,50)%.
Args:
speeds (bool, optional): If None, typical speeds are plotted. Custom speeds
should be passed as a list.
BEP (Bool, optional): If True, BEP points are plotted for the given speeds. Defaults to False.
POR (Bool|Str, optional): Plotting method for POR. Accepts True, False, "marker", "line", or "fill".
If True - Markers are plotted.
If False - No POR is plotted.
Defaults to False.
Returns:
matplotlib ax: ax object with new speed curves added
"""
plot_params_dict = {
"_marker": "x" if (POR == "marker") or (POR == True) else "None",
"_linestyle": "dashed" if POR == "line" else "None",
"_fill": True if POR == "fill" else False,
}
if speeds is None:
speed_dict = self.generate_speed_curves()
else:
speed_dict = self.generate_speed_curves(speeds)
for key, value in speed_dict.items():
self.ax1.plot(
value[0], value[1], label=str(key) + "%", alpha=0.2, color="tab:blue"
)
if BEP:
if speeds is None:
BEP_dict = self.generate_speeds_BEP(speeds=self.default_speeds)
else:
BEP_dict = self.generate_speeds_BEP(speeds)
for key, value in BEP_dict.items():
self.ax1.plot(value[0], value[1], marker="o", color="orange")
if POR:
if speeds is None:
POR_dict = self.generate_speeds_POR(speeds=self.default_speeds)
else:
POR_dict = self.generate_speeds_POR(speeds=speeds)
# grabbing the 100% POR points. Reqd to make the line meet the 100% speed curve
upper_flows = [self.POR()["Upper Flow"]]
upper_heads = [self.POR()["Upper Head"]]
lower_flows = [self.POR()["Lower Flow"]]
lower_heads = [self.POR()["Lower Head"]]
# grabbing POR points for other speeds
for key, value in POR_dict.items():
upper_flows.append(value[0])
upper_heads.append(value[1])
lower_flows.append(value[2])
lower_heads.append(value[3])
if POR == "fill":
self.ax1.fill(
np.append(upper_flows, lower_flows[::-1]),
np.append(upper_heads, lower_heads[::-1]),
color="red",
alpha=0.2,
linewidth=0,
)
# Filling gap between POR curve and 100% speed curve
# Getting the ranges of the POR flow and creating a linear array
POR_flows = np.linspace(
self.POR()["Upper Flow"], self.POR()["Lower Flow"], 50
)
# Getting the ranges of the POR head and creating a linear array
POR_heads = np.linspace(
self.POR()["Upper Head"], self.POR()["Lower Head"], 50
)
pump_curve_coeffs = self.generate_curve_equation(self.flow, self.head)
pump_flows = pump_curve_coeffs(POR_flows)
self.ax1.fill_between(
x=POR_flows,
y1=POR_heads,
y2=pump_flows,
color="red",
alpha=0.2,
linewidth=0,
)
return self
self.ax1.plot(
upper_flows,
upper_heads,
marker=plot_params_dict["_marker"],
linestyle=plot_params_dict["_linestyle"],
color="red",
)
self.ax1.plot(
lower_flows,
lower_heads,
marker=plot_params_dict["_marker"],
linestyle=plot_params_dict["_linestyle"],
color="red",
)
return self
def add_duty(self, duty_flow, duty_head, line=False):
"""add a marker or line for a given duty point.
Args:
duty_flow (float or int): flow at duty point
duty_head (float or int): head at duty point
line (bool): if True, plots a line at the duty flow instead of a marker. Defaults to False.
Returns:
matplotlib axes object: plot with duty point added
"""
if line:
self.ax1.vlines(
duty_flow,
ymax=max(self.head),
ymin=0,
linestyles="dotted",
colors="forestgreen",
label="Duty",
)
return self
self.ax1.plot(
duty_flow,
duty_head,
marker="+",
color="forestgreen",
label="Duty Point",
linestyle="None",
)
return self
def get_legends(self):
"""gathering all the legend labels from all plots into one legend object
Returns:
matplotlib fig legend object: single legend object for all ax labels
"""
lines_labels = [ax.get_legend_handles_labels() for ax in self.fig.axes]
lines, labels = [sum(lol, []) for lol in zip(*lines_labels)]
return self.fig.legend(
lines,
labels,
bbox_to_anchor=(1, 0),
loc="lower right",
bbox_transform=self.fig.transFigure,
)
def show_plot(self, grid=True, save=False, save_dir: str = None):
self.fig.tight_layout()
self.get_legends()
if grid:
self.ax1.grid(linestyle="dotted", alpha=0.35, color="grey")
plt.show()
now = datetime.now()
now = now.strftime("%d_%m_%Y__%H_%M_%S")
filename = Path(f"Output Plot_{now}.png") # saving with date and time appended
if save:
if save_dir is None:
save_dir = Path.cwd()
self.fig.savefig(fname=Path(save_dir / filename), format="png")
print(f"Image saved as {filename} at {save_dir}")
class SystemCurve(Pump):
def __init__(self, name, flow, head):
self.name = name
self.flow = flow
self.head = head
def plot(self, ax=None):
self.fig, self.ax1 = plt.subplots()
self.ax1.plot(self.flow, self.head, label="System Curve")
self.ax1.set_xlabel("Flow (L/s)")
self.ax1.set_ylabel("Head (m)")
self.ax1.set_title(f"System Curve for {self.name}")
return self
``` |
{
"source": "jmorris0x0/zipline",
"score": 2
} |
#### File: zipline/tests/test_history.py
```python
from unittest import TestCase
from nose_parameterized import parameterized
import numpy as np
import pandas as pd
from pandas.util.testing import assert_frame_equal
from zipline.history import history
from zipline.history.history_container import HistoryContainer
from zipline.protocol import BarData
import zipline.utils.factory as factory
from zipline import TradingAlgorithm
from zipline.finance.trading import SimulationParameters, TradingEnvironment
from zipline.sources import RandomWalkSource, DataFrameSource
from .history_cases import (
HISTORY_CONTAINER_TEST_CASES,
)
# Cases are over the July 4th holiday, to ensure use of trading calendar.
# March 2013
# Su Mo Tu We Th Fr Sa
# 1 2
# 3 4 5 6 7 8 9
# 10 11 12 13 14 15 16
# 17 18 19 20 21 22 23
# 24 25 26 27 28 29 30
# 31
# April 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30
#
# May 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
#
# June 2013
# Su Mo Tu We Th Fr Sa
# 1
# 2 3 4 5 6 7 8
# 9 10 11 12 13 14 15
# 16 17 18 19 20 21 22
# 23 24 25 26 27 28 29
# 30
# July 2013
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6
# 7 8 9 10 11 12 13
# 14 15 16 17 18 19 20
# 21 22 23 24 25 26 27
# 28 29 30 31
#
# Times to be converted via:
# pd.Timestamp('2013-07-05 9:31', tz='US/Eastern').tz_convert('UTC')},
INDEX_TEST_CASES_RAW = {
'week of daily data': {
'input': {'bar_count': 5,
'frequency': '1d',
'algo_dt': '2013-07-05 9:31AM'},
'expected': [
'2013-06-28 4:00PM',
'2013-07-01 4:00PM',
'2013-07-02 4:00PM',
'2013-07-03 1:00PM',
'2013-07-05 9:31AM',
]
},
'five minutes on july 5th open': {
'input': {'bar_count': 5,
'frequency': '1m',
'algo_dt': '2013-07-05 9:31AM'},
'expected': [
'2013-07-03 12:57PM',
'2013-07-03 12:58PM',
'2013-07-03 12:59PM',
'2013-07-03 1:00PM',
'2013-07-05 9:31AM',
]
},
}
def to_timestamp(dt_str):
return pd.Timestamp(dt_str, tz='US/Eastern').tz_convert('UTC')
def convert_cases(cases):
"""
Convert raw strings to values comparable with system data.
"""
cases = cases.copy()
for case in cases.values():
case['input']['algo_dt'] = to_timestamp(case['input']['algo_dt'])
case['expected'] = pd.DatetimeIndex([to_timestamp(dt_str) for dt_str
in case['expected']])
return cases
INDEX_TEST_CASES = convert_cases(INDEX_TEST_CASES_RAW)
def get_index_at_dt(case_input):
history_spec = history.HistorySpec(
case_input['bar_count'],
case_input['frequency'],
None,
False,
daily_at_midnight=False
)
return history.index_at_dt(history_spec, case_input['algo_dt'])
class TestHistoryIndex(TestCase):
@classmethod
def setUpClass(cls):
cls.environment = TradingEnvironment.instance()
@parameterized.expand(
[(name, case['input'], case['expected'])
for name, case in INDEX_TEST_CASES.items()]
)
def test_index_at_dt(self, name, case_input, expected):
history_index = get_index_at_dt(case_input)
history_series = pd.Series(index=history_index)
expected_series = pd.Series(index=expected)
pd.util.testing.assert_series_equal(history_series, expected_series)
class TestHistoryContainer(TestCase):
@classmethod
def setUpClass(cls):
cls.env = TradingEnvironment.instance()
def bar_data_dt(self, bar_data, require_unique=True):
"""
Get a dt to associate with the given BarData object.
If require_unique == True, throw an error if multiple unique dt's are
encountered. Otherwise, return the earliest dt encountered.
"""
dts = {sid_data['dt'] for sid_data in bar_data.values()}
if require_unique and len(dts) > 1:
self.fail("Multiple unique dts ({0}) in {1}".format(dts, bar_data))
return sorted(dts)[0]
@parameterized.expand(
[(name,
case['specs'],
case['sids'],
case['dt'],
case['updates'],
case['expected'])
for name, case in HISTORY_CONTAINER_TEST_CASES.items()]
)
def test_history_container(self,
name,
specs,
sids,
dt,
updates,
expected):
for spec in specs:
# Sanity check on test input.
self.assertEqual(len(expected[spec.key_str]), len(updates))
container = HistoryContainer(
{spec.key_str: spec for spec in specs}, sids, dt
)
for update_count, update in enumerate(updates):
bar_dt = self.bar_data_dt(update)
container.update(update, bar_dt)
for spec in specs:
pd.util.testing.assert_frame_equal(
container.get_history(spec, bar_dt),
expected[spec.key_str][update_count],
check_dtype=False,
check_column_type=True,
check_index_type=True,
check_frame_type=True,
)
def test_container_nans_and_daily_roll(self):
spec = history.HistorySpec(
bar_count=3,
frequency='1d',
field='price',
ffill=True,
daily_at_midnight=False
)
specs = {spec.key_str: spec}
initial_sids = [1, ]
initial_dt = pd.Timestamp(
'2013-06-28 9:31AM', tz='US/Eastern').tz_convert('UTC')
container = HistoryContainer(
specs, initial_sids, initial_dt)
bar_data = BarData()
container.update(bar_data, initial_dt)
# Since there was no backfill because of no db.
# And no first bar of data, so all values should be nans.
prices = container.get_history(spec, initial_dt)
nan_values = np.isnan(prices[1])
self.assertTrue(all(nan_values), nan_values)
# Add data on bar two of first day.
second_bar_dt = pd.Timestamp(
'2013-06-28 9:32AM', tz='US/Eastern').tz_convert('UTC')
bar_data[1] = {
'price': 10,
'dt': second_bar_dt
}
container.update(bar_data, second_bar_dt)
prices = container.get_history(spec, second_bar_dt)
# Prices should be
# 1
# 2013-06-26 20:00:00+00:00 NaN
# 2013-06-27 20:00:00+00:00 NaN
# 2013-06-28 13:32:00+00:00 10
self.assertTrue(np.isnan(prices[1].ix[0]))
self.assertTrue(np.isnan(prices[1].ix[1]))
self.assertEqual(prices[1].ix[2], 10)
third_bar_dt = pd.Timestamp(
'2013-06-28 9:33AM', tz='US/Eastern').tz_convert('UTC')
del bar_data[1]
container.update(bar_data, third_bar_dt)
prices = container.get_history(spec, third_bar_dt)
# The one should be forward filled
# Prices should be
# 1
# 2013-06-26 20:00:00+00:00 NaN
# 2013-06-27 20:00:00+00:00 NaN
# 2013-06-28 13:33:00+00:00 10
self.assertEquals(prices[1][third_bar_dt], 10)
# Note that we did not fill in data at the close.
# There was a bug where a nan was being introduced because of the
# last value of 'raw' data was used, instead of a ffilled close price.
day_two_first_bar_dt = pd.Timestamp(
'2013-07-01 9:31AM', tz='US/Eastern').tz_convert('UTC')
bar_data[1] = {
'price': 20,
'dt': day_two_first_bar_dt
}
container.update(bar_data, day_two_first_bar_dt)
prices = container.get_history(spec, day_two_first_bar_dt)
# Prices Should Be
# 1
# 2013-06-27 20:00:00+00:00 nan
# 2013-06-28 20:00:00+00:00 10
# 2013-07-01 13:31:00+00:00 20
self.assertTrue(np.isnan(prices[1].ix[0]))
self.assertEqual(prices[1].ix[1], 10)
self.assertEqual(prices[1].ix[2], 20)
# Clear out the bar data
del bar_data[1]
day_three_first_bar_dt = pd.Timestamp(
'2013-07-02 9:31AM', tz='US/Eastern').tz_convert('UTC')
container.update(bar_data, day_three_first_bar_dt)
prices = container.get_history(spec, day_three_first_bar_dt)
# 1
# 2013-06-28 20:00:00+00:00 10
# 2013-07-01 20:00:00+00:00 20
# 2013-07-02 13:31:00+00:00 20
self.assertTrue(prices[1].ix[0], 10)
self.assertTrue(prices[1].ix[1], 20)
self.assertTrue(prices[1].ix[2], 20)
day_four_first_bar_dt = pd.Timestamp(
'2013-07-03 9:31AM', tz='US/Eastern').tz_convert('UTC')
container.update(bar_data, day_four_first_bar_dt)
prices = container.get_history(spec, day_four_first_bar_dt)
# 1
# 2013-07-01 20:00:00+00:00 20
# 2013-07-02 20:00:00+00:00 20
# 2013-07-03 13:31:00+00:00 20
self.assertEqual(prices[1].ix[0], 20)
self.assertEqual(prices[1].ix[1], 20)
self.assertEqual(prices[1].ix[2], 20)
class TestHistoryAlgo(TestCase):
def setUp(self):
np.random.seed(123)
def test_history_daily(self):
bar_count = 3
algo_text = """
from zipline.api import history, add_history
from copy import deepcopy
def initialize(context):
add_history(bar_count={bar_count}, frequency='1d', field='price')
context.history_trace = []
def handle_data(context, data):
prices = history(bar_count={bar_count}, frequency='1d', field='price')
context.history_trace.append(deepcopy(prices))
""".format(bar_count=bar_count).strip()
# March 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start = pd.Timestamp('2006-03-20', tz='UTC')
end = pd.Timestamp('2006-03-30', tz='UTC')
sim_params = factory.create_simulation_parameters(
start=start, end=end)
_, df = factory.create_test_df_source(sim_params)
df = df.astype(np.float64)
source = DataFrameSource(df, sids=[0])
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='daily',
sim_params=sim_params
)
output = test_algo.run(source)
self.assertIsNotNone(output)
history_trace = test_algo.history_trace
for i, received in enumerate(history_trace[bar_count - 1:]):
expected = df.iloc[i:i + bar_count]
assert_frame_equal(expected, received)
def test_basic_history(self):
algo_text = """
from zipline.api import history, add_history
def initialize(context):
add_history(bar_count=2, frequency='1d', field='price')
def handle_data(context, data):
prices = history(bar_count=2, frequency='1d', field='price')
prices['prices_times_two'] = prices[1] * 2
context.last_prices = prices
""".strip()
# March 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start = pd.Timestamp('2006-03-20', tz='UTC')
end = pd.Timestamp('2006-03-21', tz='UTC')
sim_params = factory.create_simulation_parameters(
start=start, end=end)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
self.assertIsNotNone(output)
last_prices = test_algo.last_prices[0]
oldest_dt = pd.Timestamp(
'2006-03-20 4:00 PM', tz='US/Eastern').tz_convert('UTC')
newest_dt = pd.Timestamp(
'2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC')
self.assertEquals(oldest_dt, last_prices.index[0])
self.assertEquals(newest_dt, last_prices.index[-1])
# Random, depends on seed
self.assertEquals(139.36946942498648, last_prices[oldest_dt])
self.assertEquals(180.15661995395106, last_prices[newest_dt])
def test_basic_history_one_day(self):
algo_text = """
from zipline.api import history, add_history
def initialize(context):
add_history(bar_count=1, frequency='1d', field='price')
def handle_data(context, data):
prices = history(bar_count=1, frequency='1d', field='price')
context.last_prices = prices
""".strip()
# March 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start = pd.Timestamp('2006-03-20', tz='UTC')
end = pd.Timestamp('2006-03-21', tz='UTC')
sim_params = factory.create_simulation_parameters(
start=start, end=end)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
self.assertIsNotNone(output)
last_prices = test_algo.last_prices[0]
# oldest and newest should be the same if there is only 1 bar
oldest_dt = pd.Timestamp(
'2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC')
newest_dt = pd.Timestamp(
'2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC')
self.assertEquals(oldest_dt, last_prices.index[0])
self.assertEquals(newest_dt, last_prices.index[-1])
# Random, depends on seed
self.assertEquals(180.15661995395106, last_prices[oldest_dt])
self.assertEquals(180.15661995395106, last_prices[newest_dt])
def test_basic_history_positional_args(self):
"""
Ensure that positional args work.
"""
algo_text = """
import copy
from zipline.api import history, add_history
def initialize(context):
add_history(2, '1d', 'price')
def handle_data(context, data):
prices = history(2, '1d', 'price')
context.last_prices = copy.deepcopy(prices)
""".strip()
# March 2006
# Su Mo Tu We Th Fr Sa
# 1 2 3 4
# 5 6 7 8 9 10 11
# 12 13 14 15 16 17 18
# 19 20 21 22 23 24 25
# 26 27 28 29 30 31
start = pd.Timestamp('2006-03-20', tz='UTC')
end = pd.Timestamp('2006-03-21', tz='UTC')
sim_params = factory.create_simulation_parameters(
start=start, end=end)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
self.assertIsNotNone(output)
last_prices = test_algo.last_prices[0]
oldest_dt = pd.Timestamp(
'2006-03-20 4:00 PM', tz='US/Eastern').tz_convert('UTC')
newest_dt = pd.Timestamp(
'2006-03-21 4:00 PM', tz='US/Eastern').tz_convert('UTC')
self.assertEquals(oldest_dt, last_prices.index[0])
self.assertEquals(newest_dt, last_prices.index[-1])
self.assertEquals(139.36946942498648, last_prices[oldest_dt])
self.assertEquals(180.15661995395106, last_prices[newest_dt])
def test_history_with_volume(self):
algo_text = """
from zipline.api import history, add_history, record
def initialize(context):
add_history(3, '1d', 'volume')
def handle_data(context, data):
volume = history(3, '1d', 'volume')
record(current_volume=volume[0].ix[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
np.testing.assert_equal(output.ix[0, 'current_volume'],
212218404.0)
def test_history_with_high(self):
algo_text = """
from zipline.api import history, add_history, record
def initialize(context):
add_history(3, '1d', 'high')
def handle_data(context, data):
highs = history(3, '1d', 'high')
record(current_high=highs[0].ix[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
np.testing.assert_equal(output.ix[0, 'current_high'],
139.5370641791925)
def test_history_with_low(self):
algo_text = """
from zipline.api import history, add_history, record
def initialize(context):
add_history(3, '1d', 'low')
def handle_data(context, data):
lows = history(3, '1d', 'low')
record(current_low=lows[0].ix[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
np.testing.assert_equal(output.ix[0, 'current_low'],
99.891436939669944)
def test_history_with_open(self):
algo_text = """
from zipline.api import history, add_history, record
def initialize(context):
add_history(3, '1d', 'open_price')
def handle_data(context, data):
opens = history(3, '1d', 'open_price')
record(current_open=opens[0].ix[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
np.testing.assert_equal(output.ix[0, 'current_open'],
99.991436939669939)
def test_history_passed_to_func(self):
"""
Had an issue where MagicMock was causing errors during validation
with rolling mean.
"""
algo_text = """
from zipline.api import history, add_history
import pandas as pd
def initialize(context):
add_history(2, '1d', 'price')
def handle_data(context, data):
prices = history(2, '1d', 'price')
pd.rolling_mean(prices, 2)
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
start = pd.Timestamp('2007-04-10', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='minute'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
# At this point, just ensure that there is no crash.
self.assertIsNotNone(output)
def test_history_passed_to_talib(self):
"""
Had an issue where MagicMock was causing errors during validation
with talib.
We don't officially support a talib integration, yet.
But using talib directly should work.
"""
algo_text = """
import talib
import numpy as np
from zipline.api import history, add_history, record
def initialize(context):
add_history(2, '1d', 'price')
def handle_data(context, data):
prices = history(2, '1d', 'price')
ma_result = talib.MA(np.asarray(prices[0]), timeperiod=2)
record(ma=ma_result[-1])
""".strip()
# April 2007
# Su Mo Tu We Th Fr Sa
# 1 2 3 4 5 6 7
# 8 9 10 11 12 13 14
# 15 16 17 18 19 20 21
# 22 23 24 25 26 27 28
# 29 30
# Eddie: this was set to 04-10 but I don't see how that makes
# sense as it does not generate enough data to get at -2 index
# below.
start = pd.Timestamp('2007-04-05', tz='UTC')
end = pd.Timestamp('2007-04-10', tz='UTC')
sim_params = SimulationParameters(
period_start=start,
period_end=end,
capital_base=float("1.0e5"),
data_frequency='minute',
emission_rate='daily'
)
test_algo = TradingAlgorithm(
script=algo_text,
data_frequency='minute',
sim_params=sim_params
)
source = RandomWalkSource(start=start,
end=end)
output = test_algo.run(source)
# At this point, just ensure that there is no crash.
self.assertIsNotNone(output)
recorded_ma = output.ix[-2, 'ma']
self.assertFalse(pd.isnull(recorded_ma))
# Depends on seed
np.testing.assert_almost_equal(recorded_ma,
159.76304468946876)
``` |
{
"source": "jmorrisnrel/engage",
"score": 2
} |
#### File: api/models/configuration.py
```python
from django.db import models
from django.contrib.humanize.templatetags.humanize import ordinal
from django.conf import settings
from django.forms.models import model_to_dict
from django.db.models import Q
from django.contrib.auth.models import User
from django.utils.html import mark_safe
from api.exceptions import ModelAccessException, ModelNotExistException
from api.models.utils import EngageManager
from api.models.calliope import Parameter, Run_Parameter, \
Abstract_Tech, Abstract_Tech_Param
from taskmeta.models import CeleryTask
import uuid
import logging
import pandas as pd
import numpy as np
import re
import os
import json
from copy import deepcopy
logging.basicConfig(
level=logging.INFO,
format="%(asctime)s - %(name)s - %(levelname)s - %(message)s"
)
logger = logging.getLogger(__name__)
CARRIER_IDS = [4, 5, 6, 23, 66, 67, 68, 69, 70, 71]
PRIMARY_CARRIER_IDS = [70, 71]
CARRIER_RATIOS_ID = 7
class Model(models.Model):
class Meta:
db_table = "model"
verbose_name_plural = "[0] Models"
ordering = ['name', '-snapshot_version']
objects = EngageManager()
objects_all = models.Manager()
uuid = models.UUIDField(default=uuid.uuid4, editable=False, unique=True)
name = models.CharField(max_length=200)
snapshot_version = models.IntegerField(blank=True, null=True)
snapshot_base = models.ForeignKey(
"self", on_delete=models.CASCADE, blank=True, null=True)
public = models.BooleanField(default=False)
is_uploading = models.BooleanField(default=False)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True, null=True)
deleted = models.DateTimeField(default=None, editable=False, null=True)
def __str__(self):
if self.snapshot_version:
return '%s [v%s]' % (self.name, self.snapshot_version)
else:
return '%s' % (self.name)
@classmethod
def find_unique_name(cls, original_name):
""" Iterate a name with an integer suffix until unique name is found """
i = 0
non_unique_name = True
unique_model_name = original_name
while non_unique_name:
if i > 0:
unique_model_name = original_name + ' (' + str(i) + ')'
existing = cls.objects.filter(name__iexact=unique_model_name)
if existing:
i += 1
else:
non_unique_name = False
return unique_model_name
def handle_edit_access(self, user):
""" Requires Model Edit Permissions: 1
Used to verify a user's full access to a model """
permissions = self.get_user_permissions(user)
if permissions in [1]:
return True
raise ModelAccessException("NO ACCESS")
def handle_view_access(self, user):
""" Requires Model Access Permissions: 0 or 1
Used to verify a user's view access to a model """
permissions = self.get_user_permissions(user)
if permissions in [0, 1]:
return bool(permissions)
raise ModelAccessException("NO ACCESS")
def get_user_permissions(self, user):
""" Lookup the permissions that the user has on the given model
-1: No Access, 0: View Only, 1: Can Edit """
if self.public:
return 0
if user.is_authenticated is False:
return -1
# If snapshot, retrieve the base model
if self.snapshot_base is not None:
model = self.snapshot_base
is_snapshot = True
else:
model = self
is_snapshot = False
# Retrieve permissions
models = Model_User.objects.filter(user=user, model=model)
if len(models) > 0:
if is_snapshot:
return 0
else:
return int(models.first().can_edit)
else:
return -1
def notify_collaborators(self, user):
""" Update the notification badges displayed to the other collaborators
on a model """
model_users = Model_User.objects.filter(model=self).exclude(user=user)
for model_user in model_users:
model_user.notifications += 1
model_user.save()
@classmethod
def by_uuid(cls, model_uuid):
""" Get a requested model by its UUID """
model = cls.objects.filter(uuid=model_uuid).first()
if not model:
raise ModelNotExistException("Model is None.")
return model
def get_uuid(self):
""" Get the string formatted UUID of the given model """
return str(self.uuid)
@property
def locations(self):
""" Get all configured location objects """
return self.location_set.all()
@property
def technologies(self):
""" Get all configured technology objects """
return self.technology_set.all()
@property
def loc_techs(self):
""" Get all configured loc_tech objects """
return self.loc_tech_set.all()
@property
def scenarios(self):
""" Get all configured scenario objects """
return self.scenario_set.all()
@property
def scenario_loc_techs(self):
""" Get all configured scenario_loc_techs objects """
return self.scenario_loc_tech_set.all()
@property
def runs(self):
""" Get all configured run objects """
return self.run_set.all()
@property
def color_lookup(self):
params = Tech_Param.objects.filter(technology__in=self.technologies,
parameter__name='color')
return {c.technology_id: c.value for c in params}
def carrier_lookup(self, carrier_in=True):
names = Parameter.C_INS if carrier_in else Parameter.C_OUTS
params = Tech_Param.objects.filter(
technology__in=self.technologies, parameter__name__in=names)
carrier_ins = {}
for c in params:
if c.technology_id not in carrier_ins:
carrier_ins[c.technology_id] = c.value
else:
val = carrier_ins[c.technology_id]
carrier_ins[c.technology_id] = ', '.join([val, c.value])
return carrier_ins
@property
def carriers(self):
""" Get all configured carrier strings """
carriers = Tech_Param.objects.filter(
model=self,
parameter_id__in=CARRIER_IDS)
carriers = carriers.values_list('value', flat=True)
carriers_list = []
for carrier in carriers:
try:
carriers_list += json.loads(carrier)
except Exception:
carriers_list += [carrier]
return sorted(set(carriers_list))
@property
def favorites(self):
""" Get all configured favorited parameter ids """
return list(Model_Favorite.objects.filter(
model=self).values_list('parameter_id', flat=True))
def collaborators(self):
""" Get all model_user objects (collaborators) """
return Model_User.objects.filter(model=self)
def deprecate_runs(self, location_id=None,
technology_id=None, scenario_id=None):
""" When a user has made changes to the model configurations, deprecate
previous runs to inform all collaborators of these changes """
if location_id or technology_id:
if location_id:
loc_techs = self.loc_techs.filter(
Q(location_1_id=location_id) | Q(location_2_id=location_id))
if technology_id:
loc_techs = self.loc_techs.filter(technology_id=technology_id)
scenario_loc_techs = self.scenario_loc_techs.filter(loc_tech__in=loc_techs)
# Scenario IDs to Update
scenario_ids = scenario_loc_techs.values_list('scenario_id', flat=True)
elif scenario_id:
# Scenario IDs to Update
scenario_ids = [scenario_id]
else:
scenario_ids = []
self.runs.filter(scenario_id__in=scenario_ids).update(deprecated=True)
return True
def duplicate(self, dst_model, user):
""" Duplicate the given model as either a new editable model or
a new view-only snapshot. """
return DuplicateModelManager(self, dst_model, user).run()
class Model_User(models.Model):
class Meta:
db_table = "model_user"
verbose_name_plural = "[0] Model Users"
ordering = ['user__user_profile__organization']
objects = EngageManager()
objects_all = models.Manager()
model = models.ForeignKey(Model, on_delete=models.CASCADE)
user = models.ForeignKey(User, on_delete=models.CASCADE)
can_edit = models.BooleanField(default=False)
last_access = models.DateTimeField(auto_now=True, null=True)
notifications = models.IntegerField(default=0)
deleted = models.DateTimeField(default=None, editable=False, null=True)
def __str__(self):
return '%s - %s' % (self.model, self.user)
@classmethod
def update(cls, model, user, can_edit):
""" Update the access permissions of a model collaborator """
existing_collaborator = cls.objects.filter(
user=user, model=model)
if existing_collaborator:
could_edit = existing_collaborator.first().can_edit
if can_edit is None:
existing_collaborator.hard_delete()
message = 'Collaborator removed.'
elif can_edit != could_edit:
existing_collaborator.update(can_edit=can_edit)
message = 'Updated collaborator.'
else:
message = 'Already a collaborator.'
else:
if can_edit is None:
message = 'Not a collaborator.'
else:
cls.objects.create(user=user,
model=model,
can_edit=can_edit)
message = 'Added collaborator.'
return message
class Model_Comment(models.Model):
class Meta:
db_table = "model_comment"
verbose_name_plural = "[0] Model Comments"
ordering = ['-created']
objects = EngageManager()
objects_all = models.Manager()
user = models.ForeignKey(User, on_delete=models.CASCADE,
blank=True, null=True)
comment = models.TextField(blank=True, null=True)
model = models.ForeignKey(Model, on_delete=models.CASCADE)
type = models.CharField(max_length=200, blank=True, null=True)
created = models.DateTimeField(auto_now_add=True, null=True)
deleted = models.DateTimeField(default=None, editable=False, null=True)
def __str__(self):
return '%s' % (self.comment)
def safe_comment(self):
""" Mark the stored comment as safe for html rendering """
return mark_safe(self.comment)
def icon(self):
""" Get the appropriate icon for the given comment type """
if self.type == 'add':
return mark_safe('<i class="fas fa-plus"></i>')
elif self.type == 'edit':
return mark_safe('<i class="far fa-edit"></i>')
elif self.type == 'delete':
return mark_safe('<i class="fas fa-trash"></i>')
elif self.type == 'comment':
return mark_safe('<i class="fas fa-comment"></i>')
elif self.type == 'version':
return mark_safe('<i class="far fa-clock"></i>')
else:
return ''
class Model_Favorite(models.Model):
class Meta:
db_table = "model_favorite"
verbose_name_plural = "[0] Model Favorites"
objects = EngageManager()
objects_all = models.Manager()
model = models.ForeignKey(Model, on_delete=models.CASCADE)
parameter = models.ForeignKey(Parameter, on_delete=models.CASCADE)
deleted = models.DateTimeField(default=None, editable=False, null=True)
def __str__(self):
return '%s - %s' % (self.model, self.parameter)
class User_File(models.Model):
class Meta:
db_table = "user_file"
verbose_name_plural = "[0] User File Uploads"
ordering = ['-created']
objects = EngageManager()
objects_all = models.Manager()
filename = models.FileField(upload_to='user_files/')
description = models.TextField(blank=True, null=True)
model = models.ForeignKey(Model, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True, null=True)
deleted = models.DateTimeField(default=None, editable=False, null=True)
def __str__(self):
return '%s' % (self.filename)
def simple_filename(self):
""" Get the filename without its full path """
return str(self.filename).split('/')[-1]
class DuplicateModelManager():
""" Class to duplicate a model as either snapshot or copy """
IGNORED_PARENTS = ['model', 'user', 'parameter', 'run_parameter',
'abstract_tech', 'abstract_tech_param',
'upload_task', 'build_task', 'run_task']
IGNORED_CHILDREN = ['model', 'model_user']
def __init__(self, src_model, dst_model, user):
self.model = src_model
self.dst_model = dst_model
self.user = user
self._change_dict = {}
self._table_set = []
self._children = []
for field in self.model._meta.get_fields():
if field.one_to_many:
self._change_dict[field.name] = {}
self._table_set += list(
getattr(self.model, '{}_set'.format(field.name)).all())
def run(self):
""" Master method for performing a model duplication and returning
the new model instance """
self._create_model()
self._create_children()
self._update_foreign_keys()
self._clean()
self._log_activity()
return self.new_model
def _create_model(self):
""" Create the new model as snapshot or copy """
new_model = deepcopy(self.model)
new_model.pk = self.dst_model.pk
new_model.uuid = self.dst_model.uuid
new_model.name = self.dst_model.name
new_model.snapshot_version = self.dst_model.snapshot_version
new_model.snapshot_base = self.dst_model.snapshot_base
new_model.public = False
new_model.is_uploading = True
new_model.save()
self.new_model = new_model
def _create_children(self):
""" Duplicate the children records for the new model """
for obj in self._table_set:
# Initiate the copy
key = obj._meta.label_lower.split('.')[1]
if key in self.IGNORED_CHILDREN:
continue
old_pk = obj.pk
obj.pk = None
obj.model_id = self.new_model.id
# Create copy; save created date if exists
if hasattr(obj, 'created'):
created = obj.created
obj.save()
# Restore created date if exists
if hasattr(obj, 'created'):
obj.created = created
obj.save()
# Record the old and new primary keys
new_pk = obj.pk
self._change_dict[key][old_pk] = new_pk
# Record the parent foreign keys
if 'parents' not in self._change_dict[key]:
parents = []
for field in obj._meta.get_fields():
if (field.many_to_one is True):
if (field.name not in self.IGNORED_PARENTS):
parents.append(field.name)
self._change_dict[key]['parents'] = parents
if len(self._change_dict[key]['parents']) > 0:
self._children.append(obj)
def _update_foreign_keys(self):
""" Update the new model's children with new foreign keys """
for obj in self._children:
key = obj._meta.label_lower.split('.')[1]
parents = self._change_dict[key]['parents']
for parent in parents:
try:
old_id = getattr(obj, '{}_id'.format(parent))
if old_id:
if parent in ['location_1', 'location_2']:
new_id = self._change_dict['location'][old_id]
else:
new_id = self._change_dict[parent][old_id]
setattr(obj, '{}_id'.format(parent), new_id)
except Exception as e:
logger.error("------FOREIGN KEY ERROR-------")
logger.error("{} | {}".format(key, obj.id))
logger.error("{} | {}".format(parent, old_id))
logger.error(e)
obj.save()
def _clean(self):
""" Clean up the new model """
# Unpublish from Cambium
self.new_model.run_set.all().update(published=False)
# Drop Comments and Model Users
if self.new_model.snapshot_base is None:
Model_Comment.objects.filter(model=self.new_model).hard_delete()
Model_User.objects.filter(model=self.new_model).hard_delete()
Model_User.objects.create(
user=self.user, model=self.new_model, can_edit=True)
def _log_activity(self):
""" Log both old and new models with comments """
username = self.user.get_full_name()
# New Model
link = '<a href="/{}/model/">{}</a>.'
if self.new_model.snapshot_base is None:
comment = '{} initiated this model from ' + link
else:
comment = '{} created this snapshot from ' + link
comment = comment.format(username, self.model.uuid, str(self.model))
Model_Comment.objects.create(
model=self.new_model, comment=comment, type="version")
# Old Model
if self.new_model.snapshot_base is not None:
comment = '{} created a snapshot: <a href="/{}/model/">{}</a>'
comment = comment.format(username, self.new_model.uuid, str(self.new_model))
Model_Comment.objects.create(
model=self.model, comment=comment, type="version")
class Timeseries_Meta(models.Model):
class Meta:
db_table = "timeseries_meta"
verbose_name_plural = "[3] Timeseries Meta"
ordering = ['-is_uploading', '-created']
objects = EngageManager()
objects_all = models.Manager()
name = models.CharField(max_length=200)
start_date = models.DateTimeField(null=True)
end_date = models.DateTimeField(null=True)
model = models.ForeignKey(Model, on_delete=models.CASCADE)
file_uuid = models.UUIDField(default=uuid.uuid4)
is_uploading = models.BooleanField(default=False)
failure = models.BooleanField(default=False)
message = models.TextField(blank=True, null=True)
original_filename = models.CharField(max_length=200, null=True, blank=True)
original_timestamp_col = models.IntegerField(null=True)
original_value_col = models.IntegerField(null=True)
created = models.DateTimeField(auto_now_add=True, null=True)
deleted = models.DateTimeField(default=None, editable=False, null=True)
upload_task = models.ForeignKey(
to=CeleryTask,
to_field="id",
related_name="timeseries_meta",
null=True,
on_delete=models.PROTECT,
default=None
)
def __str__(self):
if self.original_filename is None:
return self.name
else:
s = "%s - %s (%s column)" % (self.name,
self.original_filename,
ordinal(1 + self.original_value_col))
return s
def get_period(self):
""" Calculate the min/max dates of the given timeseries """
if None in [self.start_date, self.end_date]:
timeseries = self.get_timeseries()
self.start_date = timeseries.datetime.min()
self.end_date = timeseries.datetime.max()
self.save()
return (self.start_date.replace(tzinfo=None),
self.end_date.replace(tzinfo=None))
def get_timeseries(self):
""" Retrieve the data from the given timeseries """
directory = '{}/timeseries'.format(settings.DATA_STORAGE)
input_fname = '{}/{}.csv'.format(directory, self.file_uuid)
timeseries = pd.read_csv(input_fname, parse_dates=[0])
timeseries.index = pd.DatetimeIndex(timeseries.datetime).tz_localize(None)
return timeseries
@classmethod
def create_ts_8760(cls, model, name, values):
""" Creates an hourly (8760) timeseries for a full year
Using arbitrary year of 2019, and has no other significance """
# TODO: year as argument
start_date = "2019-01-01 00:00"
end_date = "2019-12-31 23:00"
dates = list(pd.date_range(start_date, end_date, freq="1h"))
timeseries = pd.DataFrame(np.array([dates, values]).T,
columns=['time', 'value'])
timeseries = timeseries.set_index('time')
timeseries.index.name = 'datetime'
meta = cls.objects.create(model=model, name=name,
start_date=start_date, end_date=end_date)
try:
directory = "{}/timeseries".format(settings.DATA_STORAGE)
os.makedirs(directory, exist_ok=True)
fname = "{}/{}.csv".format(directory, meta.file_uuid)
timeseries.to_csv(fname)
return True
except Exception:
meta.delete()
return False
class Technology(models.Model):
class Meta:
db_table = "technology"
verbose_name_plural = "[2] Technologies"
ordering = ['pretty_name']
objects = EngageManager()
objects_all = models.Manager()
abstract_tech = models.ForeignKey(Abstract_Tech, on_delete=models.CASCADE)
name = models.CharField(max_length=200)
pretty_name = models.CharField(max_length=200)
tag = models.CharField(max_length=200, blank=True, null=True)
pretty_tag = models.CharField(max_length=200, blank=True, null=True)
description = models.TextField(blank=True, null=True)
model = models.ForeignKey(Model, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True, null=True)
deleted = models.DateTimeField(default=None, editable=False, null=True)
def __str__(self):
return '%s' % (self.pretty_name)
@property
def calliope_name(self):
""" Get the calliope appropriate name for the given technology """
if self.tag:
return '{}-{}'.format(self.name, self.tag)
else:
return '{}'.format(self.name)
@property
def calliope_pretty_name(self):
if self.pretty_tag:
return '{} [{}]'.format(self.pretty_name, self.pretty_tag)
else:
return self.pretty_name
@property
def color(self):
""" Lookup the color from the technology's parameters """
p = Tech_Param.objects.filter(technology=self,
parameter__name='color').first()
return p.value if p else "white"
@property
def carrier_in(self):
""" Lookup the input carrier from the technology's parameters """
p = Tech_Param.objects.filter(technology=self,
parameter__name__in=Parameter.C_INS
).order_by('value')
return ','.join(list(p.values_list('value', flat=True)))
@property
def carrier_out(self):
""" Lookup the output carrier from the technology's parameters """
p = Tech_Param.objects.filter(technology=self,
parameter__name__in=Parameter.C_OUTS
).order_by('value')
return ','.join(list(p.values_list('value', flat=True)))
def to_dict(self):
d = {'model': self.__class__.__name__}
d.update(model_to_dict(self))
return d
def to_json(self):
d = self.to_dict()
j = json.dumps(d)
return j
def update_calliope_pretty_name(self):
tech_param = Tech_Param.objects.filter(
model_id=self.model_id,
technology_id=self.id,
parameter__name='name')
if len(tech_param) == 0:
Tech_Param.objects.create(
model_id=self.model_id,
technology_id=self.id,
parameter=Parameter.objects.get(name='name'),
value=self.calliope_pretty_name)
else:
tech_param.update(value=self.calliope_pretty_name)
def duplicate(self, model_id, pretty_name):
""" Duplicate and return a new technology instance """
new_tech = deepcopy(self)
new_tech.pk = None
new_tech.pretty_name = pretty_name
new_tech.name = ParamsManager.simplify_name(pretty_name)
new_tech.model_id = model_id
new_tech.save()
tech_params = Tech_Param.objects.filter(technology=self)
# Handle New & Existing Timeseries Meta
tmetas = {}
existing_tmetas = {}
for t in Timeseries_Meta.objects.filter(model_id=model_id):
existing_tmetas[t.name] = t
# Iterate copy over Technology's parameters
for tech_param in tech_params:
tech_param.pk = None
tech_param.technology_id = new_tech.id
tech_param.model_id = model_id
# Timeseries
tmeta = deepcopy(tech_param.timeseries_meta)
if tmeta is not None:
original_pk = tmeta.pk
if tmeta.name in existing_tmetas.keys():
# Timeseries already exists (by name)
tech_param.timeseries_meta = existing_tmetas[tmeta.name]
tech_param.value = existing_tmetas[tmeta.name].pk
elif original_pk in tmetas.keys():
# Timeseries was just copied in a previous iteration
tech_param.timeseries_meta = tmetas[original_pk]
tech_param.value = tmetas[original_pk].pk
else:
# Creating a new timeseries meta record
tmeta.pk = None
tmeta.model_id = model_id
tmeta.save()
tech_param.timeseries_meta = tmeta
tech_param.value = tmeta.pk
tmetas[original_pk] = tmeta
tech_param.save()
new_tech.update_calliope_pretty_name()
return new_tech
def update(self, form_data):
""" Update the Technology parameters stored in Tech_Param """
METHODS = ['essentials', 'add', 'edit', 'delete']
for method in METHODS:
if method in form_data.keys():
data = form_data[method]
getattr(Tech_Param, '_' + method)(self, data)
class Tech_Param(models.Model):
class Meta:
db_table = "tech_param"
verbose_name_plural = "[2] Technology Parameters"
objects = EngageManager()
objects_all = models.Manager()
technology = models.ForeignKey(Technology, on_delete=models.CASCADE)
year = models.IntegerField(default=0)
parameter = models.ForeignKey(Parameter, on_delete=models.CASCADE)
value = models.CharField(max_length=200, blank=True, null=True)
raw_value = models.CharField(max_length=200, blank=True, null=True)
timeseries = models.BooleanField(default=False)
timeseries_meta = models.ForeignKey(Timeseries_Meta,
on_delete=models.CASCADE,
blank=True, null=True)
model = models.ForeignKey(Model, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True, null=True)
deleted = models.DateTimeField(default=None, editable=False, null=True)
@classmethod
def _essentials(cls, technology, data):
""" Update a technologies essential parameters """
for key, value in data.items():
if key == 'tech_name':
if value:
technology.name = ParamsManager.simplify_name(value)
technology.pretty_name = value
elif key == 'tech_tag':
technology.tag = ParamsManager.simplify_name(value)
technology.pretty_tag = value
elif key == 'tech_description':
technology.description = value
elif key == 'cplus_carrier':
cls._cplus_carriers(technology, value, data['cplus_ratio'])
elif key == 'cplus_ratio':
continue
else:
cls.objects.filter(
model_id=technology.model_id,
technology_id=technology.id,
parameter_id=key).hard_delete()
if value:
cls.objects.create(
model_id=technology.model_id,
technology_id=technology.id,
parameter_id=key,
value=ParamsManager.clean_str_val(value))
technology.save()
technology.update_calliope_pretty_name()
@classmethod
def _cplus_carriers(cls, technology, carriers, ratios):
""" Update a technologies (Conversion Plus) carrier parameters """
ratios_dict = {}
for param_id in carriers.keys():
# Delete Old Parameter
cls.objects.filter(
model_id=technology.model_id,
technology_id=technology.id,
parameter_id__in=[param_id, CARRIER_RATIOS_ID]).hard_delete()
# Create New Parameters
vals = [v for v in carriers[param_id] if v != '']
if vals:
val = vals[0] if len(vals) == 1 else json.dumps(vals)
cls.objects.create(
model_id=technology.model_id,
technology_id=technology.id,
parameter_id=param_id,
value=val)
name = Parameter.objects.get(id=param_id).name
# Update Ratios Dict
if name not in ratios_dict:
ratios_dict[name] = {}
for carrier, ratio in zip(carriers[param_id],
ratios[param_id]):
if carrier:
try:
val = float(ratio) if float(ratio) >= 0 else 1
except ValueError:
val = 1
ratios_dict[name][carrier] = val
# Update Ratios Parameter
ratios_val = json.dumps(ratios_dict)
cls.objects.create(
model_id=technology.model_id,
technology_id=technology.id,
parameter_id=CARRIER_RATIOS_ID,
value=ratios_val)
@classmethod
def _add(cls, technology, data):
""" Add a new parameter to a technology """
for key, value_dict in data.items():
if (('year' in value_dict) & ('value' in value_dict)):
years = value_dict['year']
values = value_dict['value']
num_records = np.min([len(years), len(values)])
new_objects = []
for i in range(num_records):
vals = str(values[i]).split('||')
new_objects.append(cls(
model_id=technology.model_id,
technology_id=technology.id,
year=years[i],
parameter_id=key,
value=ParamsManager.clean_str_val(vals[0]),
raw_value=vals[1] if len(vals) > 1 else vals[0]))
cls.objects.bulk_create(new_objects)
@classmethod
def _edit(cls, technology, data):
""" Edit a technology's parameters """
if 'parameter' in data:
for key, value in data['parameter'].items():
vals = str(value).split('||')
cls.objects.filter(
model_id=technology.model_id,
technology_id=technology.id,
parameter_id=key).hard_delete()
cls.objects.create(
model_id=technology.model_id,
technology_id=technology.id,
parameter_id=key,
value=ParamsManager.clean_str_val(vals[0]),
raw_value=vals[1] if len(vals) > 1 else vals[0])
if 'timeseries' in data:
for key, value in data['timeseries'].items():
cls.objects.filter(
model_id=technology.model_id,
technology_id=technology.id,
parameter_id=key).hard_delete()
cls.objects.create(
model_id=technology.model_id,
technology_id=technology.id,
parameter_id=key,
value=ParamsManager.clean_str_val(value),
timeseries_meta_id=value,
timeseries=True)
if 'parameter_instance' in data:
instance_items = data['parameter_instance'].items()
for key, value_dict in instance_items:
parameter_instance = cls.objects.filter(
model_id=technology.model_id,
id=key)
if 'value' in value_dict:
vals = str(value_dict['value']).split('||')
parameter_instance.update(
value=ParamsManager.clean_str_val(vals[0]),
raw_value=vals[1] if len(vals) > 1 else vals[0])
if 'year' in value_dict:
parameter_instance.update(year=value_dict['year'])
@classmethod
def _delete(cls, technology, data):
""" Delete a technology's parameters """
if 'parameter' in data:
for key, value in data['parameter'].items():
cls.objects.filter(
model_id=technology.model_id,
technology_id=technology.id,
parameter_id=key).hard_delete()
elif 'parameter_instance' in data:
instance_items = data['parameter_instance'].items()
for key, value in instance_items:
cls.objects.filter(
model_id=technology.model_id,
id=key).hard_delete()
class Location(models.Model):
class Meta:
db_table = "location"
verbose_name_plural = "[1] Locations"
ordering = ['pretty_name']
objects = EngageManager()
objects_all = models.Manager()
name = models.CharField(max_length=200)
pretty_name = models.CharField(max_length=200)
latitude = models.FloatField()
longitude = models.FloatField()
available_area = models.FloatField(blank=True, null=True)
description = models.TextField(blank=True, null=True)
model = models.ForeignKey(Model, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True, null=True)
deleted = models.DateTimeField(default=None, editable=False, null=True)
def __str__(self):
return '%s' % (self.pretty_name)
class Loc_Tech(models.Model):
class Meta:
db_table = "loc_tech"
verbose_name_plural = "[3] Location Technologies"
ordering = ['technology__abstract_tech__name',
'technology__pretty_name',
'location_1', 'location_2']
objects = EngageManager()
objects_all = models.Manager()
location_1 = models.ForeignKey(Location,
on_delete=models.CASCADE,
related_name="location_1")
location_2 = models.ForeignKey(Location,
on_delete=models.CASCADE,
related_name="location_2",
blank=True, null=True)
technology = models.ForeignKey(Technology, on_delete=models.CASCADE)
description = models.TextField(blank=True, null=True)
model = models.ForeignKey(Model, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True, null=True)
deleted = models.DateTimeField(default=None, editable=False, null=True)
def __str__(self):
if self.location_2:
return '%s <-> %s | %s [%s]' % (self.location_1, self.location_2,
self.technology,
self.technology.pretty_tag)
else:
return '%s | %s [%s]' % (self.location_1, self.technology,
self.technology.pretty_tag)
def update(self, form_data):
""" Update the Location Technology parameters
stored in Loc_Tech_Param """
METHODS = ['add', 'edit', 'delete']
for method in METHODS:
if method in form_data.keys():
data = form_data[method]
getattr(Loc_Tech_Param, '_' + method)(self, data)
# Remove system-wide parameters
sw = Loc_Tech_Param.objects.filter(parameter__is_systemwide=True)
sw.hard_delete()
class Loc_Tech_Param(models.Model):
class Meta:
db_table = "loc_tech_param"
verbose_name_plural = "[3] Location Technology Parameters"
objects = EngageManager()
objects_all = models.Manager()
loc_tech = models.ForeignKey(Loc_Tech, on_delete=models.CASCADE)
year = models.IntegerField(default=0)
parameter = models.ForeignKey(Parameter, on_delete=models.CASCADE)
value = models.CharField(max_length=200, blank=True, null=True)
raw_value = models.CharField(max_length=200, blank=True, null=True)
timeseries = models.BooleanField(default=False)
timeseries_meta = models.ForeignKey(Timeseries_Meta,
on_delete=models.CASCADE,
blank=True, null=True)
model = models.ForeignKey(Model, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True, null=True)
deleted = models.DateTimeField(default=None, editable=False, null=True)
@classmethod
def _add(cls, loc_tech, data):
""" Add a new parameter to a location technology """
for key, value_dict in data.items():
if (('year' in value_dict) & ('value' in value_dict)):
years = value_dict['year']
values = value_dict['value']
num_records = np.min([len(years), len(values)])
new_objects = []
for i in range(num_records):
vals = str(values[i]).split('||')
new_objects.append(cls(
model_id=loc_tech.model_id,
loc_tech_id=loc_tech.id,
year=years[i],
parameter_id=key,
value=ParamsManager.clean_str_val(vals[0]),
raw_value=vals[1] if len(vals) > 1 else vals[0]))
cls.objects.bulk_create(new_objects)
@classmethod
def _edit(cls, loc_tech, data):
""" Edit a location technology parameter """
if 'parameter' in data:
for key, value in data['parameter'].items():
vals = str(value).split('||')
cls.objects.filter(
model_id=loc_tech.model_id,
loc_tech_id=loc_tech.id,
parameter_id=key).hard_delete()
cls.objects.create(
model_id=loc_tech.model_id,
loc_tech_id=loc_tech.id,
parameter_id=key,
value=ParamsManager.clean_str_val(vals[0]),
raw_value=vals[1] if len(vals) > 1 else vals[0])
if 'timeseries' in data:
for key, value in data['timeseries'].items():
cls.objects.filter(
model_id=loc_tech.model_id,
loc_tech_id=loc_tech.id,
parameter_id=key).hard_delete()
cls.objects.create(
model_id=loc_tech.model_id,
loc_tech_id=loc_tech.id,
parameter_id=key,
value=ParamsManager.clean_str_val(value),
timeseries_meta_id=value,
timeseries=True)
if 'parameter_instance' in data:
instance_items = data['parameter_instance'].items()
for key, value_dict in instance_items:
parameter_instance = cls.objects.filter(
model_id=loc_tech.model_id,
id=key)
if 'value' in value_dict:
vals = str(value_dict['value']).split('||')
parameter_instance.update(
value=ParamsManager.clean_str_val(vals[0]),
raw_value=vals[1] if len(vals) > 1 else vals[0])
if 'year' in value_dict:
parameter_instance.update(year=value_dict['year'])
@classmethod
def _delete(cls, loc_tech, data):
""" Delete a location technology parameter """
if 'parameter' in data:
for key, value in data['parameter'].items():
cls.objects.filter(
model_id=loc_tech.model_id,
loc_tech_id=loc_tech.id,
parameter_id=key).hard_delete()
elif 'parameter_instance' in data:
instance_items = data['parameter_instance'].items()
for key, value in instance_items:
cls.objects.filter(
model_id=loc_tech.model_id,
id=key).hard_delete()
class Scenario(models.Model):
class Meta:
db_table = "scenario"
verbose_name_plural = "[4] Scenarios"
ordering = ['name']
objects = EngageManager()
objects_all = models.Manager()
name = models.CharField(max_length=200)
description = models.TextField(blank=True, null=True)
model = models.ForeignKey(Model, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True, null=True)
deleted = models.DateTimeField(default=None, editable=False, null=True)
def __str__(self):
return '%s' % (self.name)
def duplicate(self, name):
""" Duplicate and return a new scenario with a copy of the original's
configuration and settings instances """
# Create Scenario
new_scenario = deepcopy(self)
new_scenario.pk = None
new_scenario.name = name
new_scenario.save()
# Copy Parameters
scenario_params = Scenario_Param.objects.filter(scenario=self)
existing_param_ids = []
for scenario_param in scenario_params:
existing_param_ids.append(scenario_param.run_parameter_id)
scenario_param.pk = None
if scenario_param.run_parameter.name == "name":
scenario_param.value = "{}: {}".format(new_scenario.model.name,
name)
scenario_param.scenario_id = new_scenario.id
scenario_param.save()
# Copy Default Parameters
parameters = Run_Parameter.objects.all()
for param in parameters:
if param.id in existing_param_ids:
continue
if param.name == "name":
value = "{}: {}".format(new_scenario.model.name, name)
else:
value = ParamsManager.clean_str_val(param.default_value)
Scenario_Param.objects.create(
scenario=new_scenario, run_parameter=param,
value=value, model=new_scenario.model
)
# Copy Configuration
scenario_loc_techs = Scenario_Loc_Tech.objects.filter(scenario=self)
for scenario_loc_tech in scenario_loc_techs:
scenario_loc_tech.pk = None
scenario_loc_tech.scenario_id = new_scenario.id
scenario_loc_tech.save()
return new_scenario
def timeseries_precheck(self):
"""
Extracts timeseries data to verify and validate before a new run
"""
scenario_loc_techs = Scenario_Loc_Tech.objects.filter(scenario=self)
ts_params = {}
missing_ts = []
t_format = "%m/%d/%Y, %H:%M:%S"
for scenario_loc_tech in scenario_loc_techs:
loc_tech = scenario_loc_tech.loc_tech
technology = loc_tech.technology
t_params = Tech_Param.objects.filter(technology=technology,
timeseries=True)
lt_params = Loc_Tech_Param.objects.filter(loc_tech=loc_tech,
timeseries=True)
param_sets = [(t_params, 'technologies', technology),
(lt_params, 'loc_techs', loc_tech)]
for param_set in param_sets:
for param in param_set[0]:
if param.timeseries_meta:
period = param.timeseries_meta.get_period()
key = (str(loc_tech), str(param.parameter))
ts_params[key] = [t.strftime(t_format) for t in period]
else:
if param_set[1] == 'loc_techs':
loc_tech_id = loc_tech.id
else:
loc_tech_id = ''
missing_ts.append((param_set[1], technology.id,
loc_tech_id, str(param_set[2]),
str(param.parameter)))
ts_params = [list(k) + list(v) for k, v in ts_params.items()]
return json.dumps(ts_params), set(missing_ts)
class Scenario_Loc_Tech(models.Model):
class Meta:
db_table = "scenario_loc_tech"
verbose_name_plural = "[4] Scenario Location Technologies"
ordering = ['loc_tech__technology__name']
objects = EngageManager()
objects_all = models.Manager()
scenario = models.ForeignKey(Scenario, on_delete=models.CASCADE)
loc_tech = models.ForeignKey(Loc_Tech, on_delete=models.CASCADE)
model = models.ForeignKey(Model, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True, null=True)
deleted = models.DateTimeField(default=None, editable=False, null=True)
def __str__(self):
return '%s' % (self.loc_tech)
class Scenario_Param(models.Model):
class Meta:
db_table = "scenario_param"
verbose_name_plural = "[4] Scenario Parameters"
ordering = ['run_parameter__pretty_name', 'year', 'id']
objects = EngageManager()
objects_all = models.Manager()
scenario = models.ForeignKey(Scenario, on_delete=models.CASCADE)
run_parameter = models.ForeignKey(Run_Parameter, on_delete=models.CASCADE)
year = models.IntegerField(default=0)
value = models.TextField()
model = models.ForeignKey(Model, on_delete=models.CASCADE)
created = models.DateTimeField(auto_now_add=True, null=True)
updated = models.DateTimeField(auto_now=True, null=True)
deleted = models.DateTimeField(default=None, editable=False, null=True)
@classmethod
def update(cls, scenario, form_data):
""" Update the Scenario parameters stored in Scenario_Param """
METHODS = ['add', 'edit', 'delete']
for method in METHODS:
if method in form_data.keys():
data = form_data[method]
getattr(cls, '_' + method)(scenario, data)
@classmethod
def _add(cls, scenario, data):
""" Add a new parameter to a scenario """
for p_id in data:
years = data[p_id]['years']
values = data[p_id]['values']
new_objects = []
for i in range(len(years)):
if values[i] != '':
new_objects.append(cls(
run_parameter_id=p_id,
model_id=scenario.model_id,
scenario_id=scenario.id,
year=cls.int_or_zero(years[i]),
value=ParamsManager.clean_str_val(values[i])))
cls.objects.bulk_create(new_objects)
@classmethod
def _edit(cls, scenario, data):
""" Edit a scenario parameter """
if 'year' in data.keys():
for key, val in data['year'].items():
param = cls.objects.filter(
model_id=scenario.model_id,
scenario_id=scenario.id,
id=key)
param.update(year=cls.int_or_zero(val))
if 'value' in data.keys():
for key, val in data['value'].items():
param = cls.objects.filter(
model_id=scenario.model_id,
scenario_id=scenario.id,
id=key)
param.update(value=ParamsManager.clean_str_val(val))
@classmethod
def _delete(cls, scenario, data):
""" Delete a scenario parameter """
for p_id in data:
cls.objects.filter(
model_id=scenario.model_id,
scenario_id=scenario.id,
id=p_id).hard_delete()
@staticmethod
def int_or_zero(val):
""" Force convert a value into an integer, and return 0 on failure """
try:
return int(val)
except ValueError:
return 0
class ParamsManager():
@classmethod
def all_tech_params(cls, tech):
""" Builds data for the parameters table UI: Tech Level """
p1, ids = cls.get_tech_params_dict('1_tech', tech.id)
p0, _ = cls.get_tech_params_dict('0_abstract', tech.id, ids)
essential_params, parameters = cls.parse_essentials(p1 + p0)
return essential_params, parameters
@classmethod
def all_loc_tech_params(cls, loc_tech):
""" Builds data for the parameters table UI: Loc Tech Level """
tech_id = loc_tech.technology_id
p2, ids = cls.get_tech_params_dict('2_loc_tech', loc_tech.id,
systemwide=False)
p1, ids = cls.get_tech_params_dict('1_tech', tech_id, ids,
systemwide=False)
p0, _ = cls.get_tech_params_dict('0_abstract', tech_id, ids,
systemwide=False)
_, parameters = cls.parse_essentials(p2 + p1 + p0)
return parameters
@staticmethod
def get_tech_params_dict(level, id, excl_ids=None, systemwide=True):
""" Builds data for the parameters table UI
Levels: 2_loc_tech, 1_tech, 0_abstract
excl_ids: Parameters IDs to exclude from return list
systemwide: include system-wide parameters
"""
data = []
if excl_ids is None:
excl_ids = []
new_excl_ids = excl_ids.copy()
values = ["id", "parameter__root",
"parameter__category", "parameter__category", "parameter_id",
"parameter__name", "parameter__pretty_name",
"parameter__description", "parameter__is_essential",
"parameter__is_carrier", "parameter__units", "parameter__choices",
"parameter__timeseries_enabled"]
# Get Params based on Level
if level == '0_abstract':
technology = Technology.objects.get(id=id)
params = Abstract_Tech_Param.objects.filter(
abstract_tech=technology.abstract_tech
).order_by('parameter__category', 'parameter__pretty_name')
values += ["default_value"]
elif level == '1_tech':
technology = Technology.objects.get(id=id)
params = Tech_Param.objects.filter(
technology_id=id
).order_by('parameter__category', 'parameter__pretty_name', 'year')
elif level == '2_loc_tech':
loc_tech = Loc_Tech.objects.get(id=id)
technology = loc_tech.technology
params = Loc_Tech_Param.objects.filter(
loc_tech_id=id
).order_by('parameter__category', 'parameter__pretty_name', 'year')
if level in ['1_tech', '2_loc_tech']:
values += ["year", "timeseries", "timeseries_meta_id",
"raw_value", "value"]
# System-Wide Handling
if systemwide is False:
params = params.filter(parameter__is_systemwide=False)
# Build Parameter Dictionary List
params = params.values(*values)
for param in params:
if (param["parameter_id"] in excl_ids):
continue
new_excl_ids.append(param["parameter_id"])
param_dict = {
'id': param["id"] if 'id' in param.keys() else 0,
'level': level,
'year': param["year"] if 'year' in param.keys() else 0,
'technology_id': technology.id,
'parameter_root': param["parameter__root"],
'parameter_category': param["parameter__category"],
'parameter_id': param["parameter_id"],
'parameter_name': param["parameter__name"],
'parameter_pretty_name': param["parameter__pretty_name"],
'parameter_description': param["parameter__description"],
'parameter_is_essential': param["parameter__is_essential"],
'parameter_is_carrier': param["parameter__is_carrier"],
'units': param["parameter__units"],
'placeholder': param["raw_value"] or param["value"] if "raw_value" in param.keys() else param["default_value"],
'choices': param["parameter__choices"],
'timeseries_enabled': param["parameter__timeseries_enabled"],
'timeseries': param["timeseries"] if 'timeseries' in param.keys() else False,
'timeseries_meta_id': param["timeseries_meta_id"] if 'timeseries_meta_id' in param.keys() else 0,
'value': param["value"] if "value" in param.keys() else param["default_value"]}
data.append(param_dict)
return data, list(set(new_excl_ids))
@staticmethod
def parse_essentials(parameters):
""" Parse out the essentials from the list returned from
get_tech_params_dict() """
p_df = pd.DataFrame(parameters)
essentials_mask = p_df.parameter_is_essential == True
# Parameters
non_essentials_ids = p_df[~essentials_mask].index
parameters = p_df.loc[non_essentials_ids].to_dict(orient='records')
# Essentials
essentials = {}
essentials_ids = p_df[essentials_mask].index
essential_params = p_df.loc[essentials_ids]
carrier_ratios = essential_params[essential_params.parameter_id == 7]
for _, row in essential_params.iterrows():
ratios_val = None
val = row.value
if row.parameter_id in CARRIER_IDS:
try:
val = json.loads(row.value)
except Exception:
val = [row.value]
try:
ratios = json.loads(carrier_ratios.value[0])
ratios_val = ratios[row.parameter_name]
except Exception:
pass
essentials[row.parameter_id] = {
'name': row.parameter_pretty_name,
'value': val,
'ratios': ratios_val,
'description': row.parameter_description,
}
return essentials, parameters
@staticmethod
def simplify_name(name):
simple_name = name.strip().replace(" ", "_")
simple_name = re.sub(r"\W+", "", simple_name)
return simple_name
@staticmethod
def clean_str_val(value):
value = str(value)
clean_value = value.replace(',', '')
try:
if '.' in clean_value:
return str(float(clean_value))
else:
return str(int(clean_value))
except ValueError:
return value
@staticmethod
def parse_carrier_name(carrier):
return carrier.split(" [")[0].strip()
@staticmethod
def parse_carrier_units(carrier):
try:
return re.search(r"\[([A-Za-z0-9_]+)\]", carrier).group(1)
except Exception:
return "kW"
@classmethod
def emission_categories(cls):
queryset = Parameter.objects.filter(category__contains="Emissions")
categories = sorted(list(set([param.category for param in queryset])))
return categories
@classmethod
def cost_classes(cls):
queryset = Parameter.objects.filter(category__contains="Emissions")
categories = {param.category: param.root for param in queryset}
categories['Costs'] = 'costs.monetary'
return categories
```
#### File: api/models/utils.py
```python
from django.db import models
from datetime import datetime
import pytz
# ----- Override Base Django Classes: QuerySet, Manager
class EngageQuerySet(models.query.QuerySet):
"""
QuerySet whose delete() does not delete items, but instead marks the
rows as not active by setting the deleted timestamp field
"""
def delete(self):
self._cascade_mark_delete(self)
def hard_delete(self):
for model in self:
model.delete()
@classmethod
def _cascade_mark_delete(cls, query_in):
objects = list(query_in.all())
query_in.update(deleted=datetime.now(tz=pytz.UTC))
for obj in objects:
for field in obj._meta.get_fields():
if field.one_to_many:
attr = field.name
try:
query_out = getattr(obj, '{}'.format(attr))
except AttributeError:
query_out = getattr(obj, '{}_set'.format(attr))
cls._cascade_mark_delete(query_out)
class EngageManager(models.Manager):
"""
Manager that returns a DeactivateQuerySet,
to prevent object deletion.
"""
def get_queryset(self):
objects = EngageQuerySet(self.model, using=self._db)
return objects.filter(deleted=None)
```
#### File: tests/test_models/test_calliope.py
```python
from django.test import TestCase
from api.models.calliope import (
Parameter,
Abstract_Tech,
Abstract_Tech_Param,
Run_Parameter,
)
class ParameterTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.param = Parameter.objects.create(
root="root",
category="public",
name="my_param_one",
pretty_name="MyParamOne",
timeseries_enabled=True,
choices=["c1", "c2"],
units="kWh",
)
def test_class_meta(self):
self.assertEqual(Parameter._meta.db_table, "parameter")
self.assertEqual(Parameter._meta.verbose_name_plural, "[Admin] Parameters")
def test_string_representation(self):
self.assertEqual(str(self.param), self.param.pretty_name)
class AbstractTechTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.abstract_tech = Abstract_Tech.objects.create(
name="my-abstract-tech",
pretty_name="MyAbstractTech",
image="my-abstract-tech.png",
)
def test_class_meta(self):
self.assertEqual(Abstract_Tech._meta.db_table, "abstract_tech")
self.assertEqual(
Abstract_Tech._meta.verbose_name_plural, "[Admin] Abstract Technologies"
)
def test_string_representation(self):
self.assertEqual(
str(self.abstract_tech),
f"{self.abstract_tech.pretty_name} ({self.abstract_tech.name})",
)
class AbstractTechParamTestCase(TestCase):
@classmethod
def setUpTestData(cls):
param = Parameter.objects.create(
root="root",
category="public",
name="my-param-one",
pretty_name="MyParamOne",
timeseries_enabled=True,
choices=["c1", "c2"],
units="kWh",
)
abstract_tech = Abstract_Tech.objects.create(
name="my-abstract-tech",
pretty_name="MyAbstractTech",
image="my-abstract-tech.png",
)
Abstract_Tech_Param.objects.create(
abstract_tech=abstract_tech,
parameter=param,
default_value="default-tech-param-value",
)
def test_class_meta(self):
self.assertEqual(Abstract_Tech_Param._meta.db_table, "abstract_tech_param")
self.assertEqual(
Abstract_Tech_Param._meta.verbose_name_plural,
"[Admin] Abstract Technology Parameters",
)
class RunParameterTestCase(TestCase):
@classmethod
def setUpTestData(cls):
cls.run_param = Run_Parameter.objects.create(
root="root",
name="my-run-parameter",
pretty_name="MyRunParameter",
user_visibility=True,
default_value="default-run-param-value",
choices=["c1", "c2"],
)
def test_class_meta(self):
self.assertEqual(Run_Parameter._meta.db_table, "run_parameter")
self.assertEqual(
Run_Parameter._meta.verbose_name_plural, "[Admin] Run Parameters"
)
def test_string_representation(self):
self.assertEqual(str(self.run_param), self.run_param.pretty_name)
```
#### File: client/component_views/outputs.py
```python
from django.http import HttpResponse, JsonResponse, Http404
from django.shortcuts import render
from django.views.decorators.csrf import csrf_protect
from django.utils.html import mark_safe
from django.conf import settings
from api.models.configuration import Model
from api.models.outputs import Run, Cambium
from api.tasks import task_status
import os
import io
import json
from datetime import datetime
import pandas as pd
from collections import defaultdict
@csrf_protect
def run_dashboard(request):
"""
Retrieve the runs for a scenario
Parameters:
model_uuid (uuid): required
scenario_id (int): optional
Returns: HttpResponse
Example:
POST: /component/run_dashboard/
"""
model_uuid = request.POST['model_uuid']
scenario_id = request.POST.get('scenario_id', None)
if scenario_id is not None:
request.session['scenario_id'] = scenario_id
model = Model.by_uuid(model_uuid)
can_edit = model.handle_view_access(request.user)
runs = model.runs.filter(scenario_id=scenario_id)
# Check for any publication updates
for run in runs.filter(published=None):
Cambium.push_run(run)
context = {
"model": model,
"runs": runs,
"can_edit": can_edit,
"task_status": task_status,
"cambium_configured": bool(settings.CAMBIUM_API_KEY)
}
html = list(render(request, 'run_dashboard.html', context))[0]
payload = {
'html': html.decode('utf-8')}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def add_run_precheck(request):
"""
Retrieve the precheck for a new scenario run
Parameters:
model_uuid (uuid): required
scenario_id (int): required
Returns: HttpResponse
Example:
GET: /component/add_run_precheck/
"""
model_uuid = request.GET['model_uuid']
scenario_id = request.GET['scenario_id']
model = Model.by_uuid(model_uuid)
can_edit = model.handle_view_access(request.user)
scenario = model.scenarios.filter(id=scenario_id).first()
runs = model.runs.filter(scenario=scenario)
runs = runs.order_by('-created')
prev_dates = []
for run in runs:
start_date, end_date = run.subset_time.split(' to ')
s_date = datetime.strptime(start_date, '%Y-%m-%d')
e_date = datetime.strptime(end_date, '%Y-%m-%d')
n_days = (e_date - s_date).days + 1
days = ' (' + str(n_days) + ' days)'
s_date = '<b>' + s_date.strftime('%b. %-d, %Y') + '</b>'
e_date = '<b>' + e_date.strftime('%b. %-d, %Y') + '</b>'
formatted = s_date + ' to ' + e_date + days
prev_date = (start_date, end_date, mark_safe(formatted))
if prev_date not in prev_dates:
prev_dates.append(prev_date)
timeseries, missing_timeseries = scenario.timeseries_precheck()
context = {
"prev_dates": prev_dates[:4],
"model": model,
"timeseries": timeseries,
"missing_timseries": missing_timeseries,
"can_edit": can_edit
}
html = list(render(request, 'add_run_precheck.html', context))[0]
payload = {
'html': html.decode('utf-8')}
return HttpResponse(json.dumps(payload), content_type="application/json")
@csrf_protect
def show_logs(request):
"""
Retrieve the logs from a run path
Parameters:
model_uuid (uuid): required
run_id (int): required
Returns: HttpResponse
Example:
POST: /component/show_logs/
"""
model_uuid = request.POST['model_uuid']
run_id = request.POST['run_id']
model = Model.by_uuid(model_uuid)
model.handle_view_access(request.user)
try:
run = Run.objects.get(id=run_id)
except Exception:
raise Http404
with open(run.logs_path) as f:
html = f.read()
try:
tb = run.run_task.traceback
html += tb.replace("\n", "<br>").replace(" ", "  ")
except Exception:
pass
return HttpResponse(html, content_type="text/html")
@csrf_protect
def plot_outputs(request):
"""
Retrieve the plots from a run path
Parameters:
model_uuid (uuid): required
run_id (int): required
Returns: HttpResponse
Example:
POST: /component/plot_outputs/
"""
model_uuid = request.POST["model_uuid"]
run_id = request.POST["run_id"]
carrier = request.POST.get("carrier", None)
location = request.POST.get("location", None)
month = request.POST.get("month", None)
model = Model.by_uuid(model_uuid)
model.handle_view_access(request.user)
try:
run = Run.objects.get(id=run_id)
except Exception:
raise Http404
data = run.get_viz_data(carrier, location, month)
return JsonResponse(data)
@csrf_protect
def map_outputs(request):
"""
Retrieve the data for rendering the nodal network map
Parameters:
model_uuid (uuid): required
run_id (int): required
Returns: JsonResponse
Example:
POST: /component/map_outputs/
"""
model_uuid = request.POST['model_uuid']
run_id = request.POST['run_id']
start_date = pd.to_datetime(request.POST['start_date'])
end_date = pd.to_datetime(request.POST['end_date']) + pd.DateOffset(days=1)
model = Model.by_uuid(model_uuid)
model.handle_view_access(request.user)
run = model.runs.filter(id=run_id).first()
response = defaultdict(lambda: None)
if run is None:
response["message"] = "To request data, " \
"post a valid 'model_uuid' and 'run_id'"
else:
# Static
files = ["inputs_colors",
"inputs_names",
"inputs_inheritance",
"inputs_loc_coordinates",
"results_energy_cap"]
for file in files:
with open(os.path.join(run.outputs_path, file + ".csv")) as f:
response[file] = f.read()
# Variable
files = ["results_carrier_con",
"results_carrier_prod"]
month = None
if run.get_months():
month = start_date.month
month = '0' + str(month) if month < 10 else str(month)
ext = '_' + str(month) + '.csv' if month else '.csv'
for file in files:
df = pd.read_csv(os.path.join(run.outputs_path, file + ext),
header=0)
df.set_index('timesteps', inplace=True, drop=False)
df.index = pd.to_datetime(df.index)
df = df[(df.index >= start_date) & (df.index < end_date)]
s = io.StringIO()
df.to_csv(s, index=False)
response[file] = s.getvalue()
return JsonResponse(response)
```
#### File: client/views/outputs.py
```python
from django.conf import settings
from django.shortcuts import render
from django.contrib.auth.decorators import login_required
from django.http import HttpResponseRedirect
from django.urls import reverse
from api.models.engage import Help_Guide
from api.models.configuration import Model
import re
from pytz import common_timezones
def runs_view(request, model_uuid):
"""
View the "Runs" page
Parameters:
model_uuid (uuid): required
Returns: HttpResponse
Example:
http://0.0.0.0:8000/<model_uuid>/run
"""
model = Model.by_uuid(model_uuid)
try:
can_edit = model.handle_view_access(request.user)
except Exception:
return HttpResponseRedirect(reverse('home'))
scenarios = model.scenarios
session_scenario_id = request.session.get('scenario_id', None)
session_scenario = scenarios.filter(id=session_scenario_id).first()
context = {
"timezones": common_timezones,
"model": model,
"scenarios": scenarios,
"session_scenario": session_scenario,
"can_edit": can_edit,
"mapbox_token": settings.MAPBOX_TOKEN,
"cambium_url": settings.CAMBIUM_URL + '?project=' + str(model.uuid),
"help_content": Help_Guide.get_safe_html('runs'),
}
return render(request, "run.html", context)
@login_required
def add_runs_view(request, model_uuid, scenario_id):
"""
View the "Add Run" page
Parameters:
model_uuid (uuid): required
Returns: HttpResponse
Example:
http://0.0.0.0:8000/<model_uuid>/add_runs_view
"""
model = Model.by_uuid(model_uuid)
can_edit = model.handle_view_access(request.user)
context = {
"timezones": common_timezones,
"model": model,
"scenario": model.scenarios.get(id=scenario_id),
"can_edit": can_edit,
"help_content": Help_Guide.get_safe_html('add_run'),
}
return render(request, "add_run.html", context)
def map_viz_view(request, model_uuid, run_id):
""" Example:
http://0.0.0.0:8000/<model_uuid>/<run_id>/map_viz """
model = Model.by_uuid(model_uuid)
try:
can_edit = model.handle_view_access(request.user)
except Exception:
return HttpResponseRedirect(reverse('home'))
run = model.runs.filter(id=run_id).first()
subset_time = run.subset_time # 2005-01-01 to 2005-01-07
run_min_date, run_max_date = re.match(
"^(\d{4}-\d{2}-\d{2}) to (\d{4}-\d{2}-\d{2})$", subset_time).groups()
context = {
"timezones": common_timezones,
"model": model,
"run": run,
"scenario": run.scenario,
"mapbox_token": settings.MAPBOX_TOKEN,
"can_edit": can_edit,
"run_min_date": run_min_date,
"run_max_date": run_max_date
}
return render(request, "map_viz.html", context)
```
#### File: calliope_app/commands/cli.py
```python
import sys
import click
from .solve_model import solve_model
@click.group()
def main():
"""Solving Engage models in commands"""
return 0
main.add_command(solve_model)
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
```
#### File: calliope_app/taskmeta/admin.py
```python
from django.contrib import admin
from .models import CeleryTask
# Register your models here.
class CeleryTaskAdmin(admin.ModelAdmin):
def _date_start(self, obj):
if not obj.date_start:
return ""
return obj.date_start.strftime("%Y-%m-%d %H:%M:%S")
def _date_done(self, obj):
if not obj.date_done:
return ""
return obj.date_done.strftime("%Y-%m-%d %H:%M:%S")
list_display = ['id', 'task_id', 'status', '_date_start', '_date_done', 'result', 'traceback']
admin.site.register(CeleryTask, CeleryTaskAdmin)
``` |
{
"source": "jmorris-uk/openmc_workshop",
"score": 2
} |
#### File: tasks/task_9/lithium_enrichment_and_thickness_optimisation.py
```python
""" run with python3 simulate_sphere_model.py | tqdm >> /dev/null """
""" outputs results to a file called simulation_results.json """
__author__ = "<NAME>"
import openmc
import os
import json
import numpy as np
from numpy import random
import re
from tqdm import tqdm
from gp_tools import GpOptimiser
# from inference.gp_tools import GpOptimiser
from material_maker_functions import *
from numpy import sin, cos, linspace, array, meshgrid
import matplotlib.pyplot as plt
import matplotlib as mpl
import ghalton
def make_breeder_material(enrichment_fraction, breeder_material_name, temperature_in_C):
#density data from http://aries.ucsd.edu/LIB/PROPS/PANOS/matintro.html
natural_breeder_material = openmc.Material(2, "natural_breeder_material")
breeder_material = openmc.Material(1, breeder_material_name) # this is for enrichmed Li6
element_numbers = get_element_numbers(breeder_material_name)
elements = get_elements(breeder_material_name)
for e, en in zip(elements, element_numbers):
natural_breeder_material.add_element(e, en,'ao')
for e, en in zip(elements, element_numbers):
if e == 'Li':
breeder_material.add_nuclide('Li6', en * enrichment_fraction, 'ao')
breeder_material.add_nuclide('Li7', en * (1.0-enrichment_fraction), 'ao')
else:
breeder_material.add_element(e, en,'ao')
density_of_natural_material_at_temperature = find_density_of_natural_material_at_temperature(breeder_material_name,temperature_in_C,natural_breeder_material)
natural_breeder_material.set_density('g/cm3', density_of_natural_material_at_temperature)
atom_densities_dict = natural_breeder_material.get_nuclide_atom_densities()
atoms_per_barn_cm = sum([i[1] for i in atom_densities_dict.values()])
breeder_material.set_density('atom/b-cm',atoms_per_barn_cm)
return breeder_material
def make_materials_geometry_tallies(v):
enrichment_fraction_list,thickness = v
batches = 2
inner_radius = 500
breeder_material_name = 'Li'
temperature_in_C = 500
if isinstance(enrichment_fraction_list,list):
enrichment_fraction = enrichment_fraction_list[0]
else:
enrichment_fraction = enrichment_fraction_list
print('simulating ',batches,enrichment_fraction,inner_radius,thickness,breeder_material_name)
#MATERIALS#
breeder_material = make_breeder_material(enrichment_fraction,breeder_material_name,temperature_in_C)
eurofer = make_eurofer()
mats = openmc.Materials([breeder_material, eurofer])
#GEOMETRY#
breeder_blanket_inner_surface = openmc.Sphere(R=inner_radius)
breeder_blanket_outer_surface = openmc.Sphere(R=inner_radius+thickness)
vessel_inner_surface = openmc.Sphere(R=inner_radius+thickness+10)
vessel_outer_surface = openmc.Sphere(R=inner_radius+thickness+20,boundary_type='vacuum')
breeder_blanket_region = -breeder_blanket_outer_surface & +breeder_blanket_inner_surface
breeder_blanket_cell = openmc.Cell(region=breeder_blanket_region)
breeder_blanket_cell.fill = breeder_material
breeder_blanket_cell.name = 'breeder_blanket'
inner_void_region = -breeder_blanket_inner_surface
inner_void_cell = openmc.Cell(region=inner_void_region)
inner_void_cell.name = 'inner_void'
vessel_region = +vessel_inner_surface & -vessel_outer_surface
vessel_cell = openmc.Cell(region=vessel_region)
vessel_cell.name = 'vessel'
vessel_cell.fill = eurofer
blanket_vessel_gap_region = -vessel_inner_surface & + breeder_blanket_outer_surface
blanket_vessel_gap_cell = openmc.Cell(region=blanket_vessel_gap_region)
blanket_vessel_gap_cell.name = 'blanket_vessel_gap'
universe = openmc.Universe(cells=[inner_void_cell,
breeder_blanket_cell,
blanket_vessel_gap_cell,
vessel_cell])
geom = openmc.Geometry(universe)
#SIMULATION SETTINGS#
sett = openmc.Settings()
# batches = 3 # this is parsed as an argument
sett.batches = batches
sett.inactive = 10
sett.particles = 500
sett.run_mode = 'fixed source'
source = openmc.Source()
source.space = openmc.stats.Point((0,0,0))
source.angle = openmc.stats.Isotropic()
source.energy = openmc.stats.Muir(e0=14080000.0, m_rat=5.0, kt=20000.0) #neutron energy = 14.08MeV, AMU for D + T = 5, temperature is 20KeV
sett.source = source
#TALLIES#
tallies = openmc.Tallies()
# define filters
cell_filter_breeder = openmc.CellFilter(breeder_blanket_cell)
cell_filter_vessel = openmc.CellFilter(vessel_cell)
particle_filter = openmc.ParticleFilter([1]) #1 is neutron, 2 is photon
surface_filter_rear_blanket = openmc.SurfaceFilter(breeder_blanket_outer_surface)
surface_filter_rear_vessel = openmc.SurfaceFilter(vessel_outer_surface)
energy_bins = openmc.mgxs.GROUP_STRUCTURES['VITAMIN-J-175']
energy_filter = openmc.EnergyFilter(energy_bins)
tally = openmc.Tally(name='TBR')
tally.filters = [cell_filter_breeder, particle_filter]
tally.scores = ['205']
tallies.append(tally)
tally = openmc.Tally(name='blanket_leakage')
tally.filters = [surface_filter_rear_blanket, particle_filter]
tally.scores = ['current']
tallies.append(tally)
tally = openmc.Tally(name='vessel_leakage')
tally.filters = [surface_filter_rear_vessel, particle_filter]
tally.scores = ['current']
tallies.append(tally)
tally = openmc.Tally(name='breeder_blanket_spectra')
tally.filters = [cell_filter_breeder, particle_filter, energy_filter]
tally.scores = ['flux']
tallies.append(tally)
tally = openmc.Tally(name='vacuum_vessel_spectra')
tally.filters = [cell_filter_vessel, particle_filter, energy_filter]
tally.scores = ['flux']
tallies.append(tally)
tally = openmc.Tally(name='DPA')
tally.filters = [cell_filter_vessel, particle_filter]
tally.scores = ['444']
tallies.append(tally)
#RUN OPENMC #
model = openmc.model.Model(geom, mats, sett, tallies)
model.run()
sp = openmc.StatePoint('statepoint.'+str(batches)+'.h5')
json_output = {'enrichment_fraction': enrichment_fraction,
'inner_radius': inner_radius,
'thickness': thickness,
'breeder_material_name': breeder_material_name,
'temperature_in_C': temperature_in_C}
tallies_to_retrieve = ['TBR', 'DPA', 'blanket_leakage', 'vessel_leakage']
for tally_name in tallies_to_retrieve:
tally = sp.get_tally(name=tally_name)
# for some reason the tally sum is a nested list
tally_result = tally.sum[0][0][0]/batches
# for some reason the tally std_dev is a nested list
tally_std_dev = tally.std_dev[0][0][0]/batches
json_output[tally_name] = {'value': tally_result,
'std_dev': tally_std_dev}
spectra_tallies_to_retrieve = ['breeder_blanket_spectra', 'vacuum_vessel_spectra']
for spectra_name in spectra_tallies_to_retrieve:
spectra_tally = sp.get_tally(name=spectra_name)
spectra_tally_result = [entry[0][0] for entry in spectra_tally.mean]
spectra_tally_std_dev = [entry[0][0]
for entry in spectra_tally.std_dev]
json_output[spectra_name] = {'value': spectra_tally_result,
'std_dev': spectra_tally_std_dev,
'energy_groups': list(energy_bins)}
return json_output
def example_plot_1d(GP):
M = 500
x_gp = linspace(*bounds[0],M)
mu, sig = GP(x_gp)
fig, (ax1, ax2, ax3) = plt.subplots(3, 1, gridspec_kw={'height_ratios': [1, 3, 1]}, figsize = (10,8))
plt.subplots_adjust(hspace=0)
ax1.plot(evaluations, max_values, marker = 'o', ls = 'solid', c = 'orange', label = 'optimum value', zorder = 5)
#ax1.plot([2,12], [max(y_func), max(y_func)], ls = 'dashed', label = 'actual max', c = 'black')
ax1.set_xlabel('Simulations')
ax1.set_xlim([0,len(evaluations)])
#ax1.set_ylim([max(y)-0.3, max(y_func)+0.3])
ax1.xaxis.set_label_position('top')
ax1.yaxis.set_label_position('right')
ax1.xaxis.tick_top()
ax1.set_yticks([])
ax1.legend(loc=4)
ax2.errorbar(GP.x, GP.y, marker='o', yerr=GP.y_err, ftm=None, linestyle='', c = 'green', label = 'Simulation (Halton sample selection)', zorder = 5)
if len(GP.y) > number_of_samples:
ax2.errorbar(GP.x[number_of_samples:], GP.y[number_of_samples:], marker='o', yerr=GP.y_err[number_of_samples:], ftm=None, linestyle='', c = 'red', label = 'Simulation (Gaussian process selection)', zorder = 6)
# ax2.plot(GP.x, GP.y, marker='o', c = 'red', label = 'observations', zorder = 5)
#ax2.plot(GP.x, GP.y, 'o', c = 'red', label = 'observations', zorder = 5)
#ax2.plot(x_gp, y_func, lw = 1.5, c = 'red', ls = 'dashed', label = 'actual function')
ax2.plot(x_gp, mu, lw = 2, c = 'blue', label = 'GP prediction')
ax2.fill_between(x_gp, (mu-2*sig), y2=(mu+2*sig), color = 'blue', alpha = 0.15, label = '95% confidence interval')
# ax2.set_ylim([min(mu-2*sig),max(mu+2*sig)])
ax2.set_ylim([0.8,2.0])
ax2.set_xlim(*GP.bounds)
ax2.set_ylabel('TBR')
ax2.set_xticks([])
ax2.legend(loc=2)
aq = array([abs(GP.expected_improvement(array([k]))) for k in x_gp])
ax3.plot(x_gp, 0.9*aq/max(aq), c = 'green', label = 'acquisition function')
ax3.set_yticks([])
ax3.set_xlabel('Li6 enrichment')
ax3.legend(loc=1)
print('plotting ',GP.x, GP.y, GP.y_err)
# plt.show()
plt.savefig(str(len(GP.y)).zfill(4)+'.png')
def example_plot_2d(GP):
fig, (ax1, ax2) = plt.subplots(2, 1, gridspec_kw={'height_ratios': [1, 3]}, figsize=(10, 8))
plt.subplots_adjust(hspace=0)
ax1.plot(evaluations, max_values, marker='o', ls='solid', c='orange', label='optimum value', zorder=5)
ax1.plot([5, 30], [z_func.max(), z_func.max()], ls='dashed', label='actual max', c='black')
ax1.set_xlabel('function evaluations')
#ax1.set_xlim([5, 30])
#ax1.set_ylim([max(y) - 0.3, z_func.max() + 0.3])
#ax1.xaxis.set_label_position('top')
#ax1.yaxis.set_label_position('right')
#ax1.xaxis.tick_top()
#ax1.set_yticks([])
#ax1.legend(loc=4)
ax2.contour(*mesh, z_func, 40)
ax2.plot([i[0] for i in GP.x], [i[1] for i in GP.x], 'D', c='red', markeredgecolor='black')
plt.show()
os.system('rm *.png')
sequencer = ghalton.Halton(2)
number_of_samples = 10
x = sequencer.get(number_of_samples)
x = [[i[0],i[1] * 100] for i in x]
bounds = [(0.0,1.0),(0,100)]
N = 80
x = linspace(*bounds[0], N)
y = linspace(*bounds[1], N)
mesh = meshgrid(x, y)
y = []
y_errors = []
max_values = []
evaluations = []
all_results = []
for filename_counter, coords in enumerate(x):
results = make_materials_geometry_tallies(coords[0],coords[1])
all_results.append(results)
y.append(results['TBR']['value'])
y_errors.append(results['TBR']['std_dev'] * 2)
print('x from HS',x[0:filename_counter+1])
print('y from HS',y)
print('y_errors from HS',y_errors)
print(bounds)
if filename_counter >0:
GP = GpOptimiser(x[0:filename_counter+1],y,y_err=y_errors,bounds=bounds)
max_values.append(max(GP.y))
evaluations.append(len(GP.y))
example_plot_2d(GP)
for i in range(number_of_samples,number_of_samples+10):
# plot the current state of the optimisation
# request the proposed evaluation
new_x = GP.search_for_maximum()[0]
# evaluate the new point
new_result = make_materials_geometry_tallies(new_x)
all_results.append(results)
new_y = new_result['TBR']['value']
new_y_error = new_result['TBR']['std_dev'] * 2
print('x from loop',new_x)
print('y from loop',new_y)
print('new_y_error from loop',new_y_error)
# update the gaussian process with the new information
GP.add_evaluation(new_x, new_y, new_y_err=new_y_error)
# track the optimum value for plotting
max_values.append(max(GP.y))
evaluations.append(len(GP.y))
example_plot_2d(GP)
os.system('convert *.png output.gif')
os.system('eog -f output.gif')
with open('simulation_results.json', 'w') as file_object:
json.dump(all_results.append(results) , file_object, indent=2)
``` |
{
"source": "jmorrr/Hietalab_specific_Stitch",
"score": 3
} |
#### File: Hietalab_specific_Stitch/src/stitch_RUN.py
```python
import os
from tkinter import *
from tkinter import filedialog
import logging
import subprocess
class Stitch:
def __init__(self):
self.__py_file_dir = os.path.dirname(os.path.realpath(__file__))
self.__py_file = r"\stitch.py"
self.__py_file_loc = self.__py_file_dir + self.__py_file
self.__img_file = r"\stitch.gif"
self.__img_file_loc = self.__py_file_dir + self.__img_file
# Creates the structure for the GUI with the title
self.__window = Tk()
self.__window.title('Stitch')
# Creates label for select ImageJ.exe prompt
self.__s_ij_prompt = Label(self.__window,
text='Select ImageJ.exe file:') \
.grid(row=3, column=1)
# Creates the browse button for getting the ImageJ.exe path
Button(self.__window, text='Browse', command=self.retrieve_ijfolder) \
.grid(row=3, column=2)
# Creates the variable label for ImageJ path text
self.__imgj_path = StringVar()
self.__selectij = Label(self.__window, text=self.__imgj_path.get(),
bg='white', bd=2,
textvariable=self.__imgj_path, relief='sunken')
self.__selectij.grid(row=3, column=3, columnspan=3, sticky=W)
# Creates label for select folder prompt
self.__s_dir_prompt = Label(self.__window,
text='Select root folder:') \
.grid(row=5, column=1)
# Creates the browse button for getting the root folder
Button(self.__window, text='Browse', command=self.retrieve_rfolder) \
.grid(row=5, column=2)
# Creates the variable label for root folder text
self.__rfolder = StringVar()
self.__selectDir = Label(self.__window, text=self.__rfolder.get(),
bg='white', bd=2,
textvariable=self.__rfolder, relief='sunken')
self.__selectDir.grid(row=5, column=3, columnspan=3, sticky=W)
# Creates check button for using companion.ome file only for stitching
self.__cb_ome_v = IntVar()
self.__cb_ome_v.set(0)
self.__cb_ome = Checkbutton(self.__window,
text='Only use companion.ome file for stitching?',
variable=self.__cb_ome_v, command=self.ome_checked)
self.__cb_ome.grid(row=6, column=1, sticky=W, columnspan=3)
# Creates check button for fused_orig creation yes/no
self.__cb1_var1 = IntVar()
self.__cb1_var1.set(0)
self.__cb1 = Checkbutton(self.__window, text='Create stitched tiff using original positions?',
variable=self.__cb1_var1)
self.__cb1.grid(row=7, column=1, sticky=W, columnspan=3)
# Creates check button for imagej macro run yes/no
self.__cb2_var1 = IntVar()
self.__cb2_var1.set(0)
self.__cb2 = Checkbutton(self.__window,
text='Run imageJ macro?',
variable=self.__cb2_var1)
self.__cb2.grid(row=8, column=1, sticky=W)
# Creates the multiplier entry input field
self.__multi_prompt = Label(self.__window,
text='Enter positions multiplier (Use "." not ","):') \
.grid(row=9, column=1)
self.__multi_input = Entry(self.__window, width=5)
self.__multi_input.grid(row=9, column=2, padx=5, ipadx=5)
# Creates the label for errors in multiplier input
self.__multi_error = Label(self.__window, text='')
self.__multi_error.grid(row=10, column=1)
self.__multi_errortxt = 'Multiplier input must be greater than 0!'
# Creates label for select ImageJ.exe prompt
self.__magnification_vals = Label(self.__window,
text='Magnifications and multiplier values '
'(Aurox):\n '
'10x - 1.56\n '
'20x - 3.1\n '
'63x - 4.9462') \
.grid(row=11, column=1)
# Creates the run button for running the simulator
Button(self.__window, text='Run', command=self.stitch_away) \
.grid(row=12, column=1, sticky=E)
# Creates button for quitting the stitcher
Button(self.__window, text='Quit', command=self.quit_func) \
.grid(row=12, column=2, sticky=W)
# Adds the Stitch image
Img = PhotoImage(file=self.__img_file_loc)
Img = Img.subsample(5)
imglabel = Label(self.__window, image=Img)
imglabel.image = Img
imglabel.grid(row=6, column=4, rowspan=6)
def retrieve_ijfolder(self):
selected_path = filedialog.askopenfilename()
self.__imgj_path.set(selected_path)
def retrieve_rfolder(self):
selected_directory = filedialog.askdirectory()
self.__rfolder.set(selected_directory)
if not selected_directory == '':
logging.basicConfig(filename='%s/stitch.log' % selected_directory,
format='%(asctime)s %(levelname)-8s %(message)s',
level=logging.INFO, datefmt='%d-%m-%Y %H:%M:%S')
def ome_checked(self):
if self.__cb_ome_v.get() == 1:
self.__cb1.config(state=DISABLED)
self.__cb1_var1.set(0)
self.__multi_input.delete(0, "end")
self.__multi_input.config(state=DISABLED)
else:
self.__cb1.config(state=NORMAL)
self.__multi_input.config(state=NORMAL)
def prompt_creator(self):
prompt_items = [str(self.__rfolder.get()), str(self.__cb1_var1.get()),
str(self.__cb2_var1.get()),
str(self.__multi_input.get()),
str(self.__cb_ome_v.get())]
prompt = ' "root_dir_path=\'{p[0]}\',y_orig=\'{p[1]}\'' \
',y_macro=\'{p[2]}\',multiplier=\'{p[3]}\'' \
',ome_only=\'{p[4]}\'"'\
.format(p=prompt_items)
lab_prompt = self.__imgj_path.get() + " --ij2 --headless --console --run " + \
self.__py_file_loc + prompt
return lab_prompt
def stitch_away(self):
lab_prompt = self.prompt_creator()
if self.__rfolder.get() == '' or self.__imgj_path.get() == '':
from tkinter import messagebox
messagebox.showinfo("Warning", "File directory or ImageJ path not"
" selected!")
else:
try:
if self.__cb_ome_v.get() == 1:
try:
self.__window.destroy()
subprocess.call(lab_prompt, shell=True)
except Exception as e:
logging.exception(str(e))
elif float(self.__multi_input.get()) > 0:
try:
self.__window.destroy()
subprocess.call(lab_prompt, shell=True)
except Exception as e:
logging.exception(str(e))
else:
self.__multi_error.configure(text=self.__multi_errortxt, fg='red')
except ValueError:
self.__multi_error.configure(text=self.__multi_errortxt, fg='red')
def quit_func(self):
self.__window.destroy()
def start(self):
self.__window.mainloop()
def main():
ui = Stitch()
ui.start()
main()
``` |
{
"source": "jmorse/dexter",
"score": 2
} |
#### File: dex/command/ParseCommand.py
```python
from collections import defaultdict
import imp
import inspect
import os
from dex.command.CommandBase import CommandBase
from dex.utils.Exceptions import CommandParseError
def _get_valid_commands():
"""Search the commands subdirectory for any classes which are subclasses of
CommandBase and return a dict in the form of {name: class}.
"""
try:
return _get_valid_commands.cached
except AttributeError:
commands_directory = os.path.join(
os.path.dirname(__file__), 'commands')
potential_modules = [
os.path.splitext(f)[0] for f in os.listdir(commands_directory)
]
commands = {}
for m in potential_modules:
try:
module_info = imp.find_module(m, [commands_directory])
module = imp.load_module(m, *module_info)
except ImportError:
continue
commands.update({
c[0]: c[1]
for c in inspect.getmembers(module, inspect.isclass)
if c[1] != CommandBase and issubclass(c[1], CommandBase)
})
_get_valid_commands.cached = commands
return commands
def get_command_object(commandIR):
"""Externally visible version of _safe_eval. Only returns the Command
object itself.
"""
valid_commands = _get_valid_commands()
# pylint: disable=eval-used
command = eval(commandIR.raw_text, valid_commands)
# pylint: enable=eval-used
command.path = commandIR.loc.path
command.lineno = commandIR.loc.lineno
return command
def _get_command_name(command_raw):
"""Return command name by splitting up DExTer command contained in
command_raw on the first opening paranthesis and further stripping
any potential leading or trailing whitespace.
"""
command_name = command_raw.split('(', 1)[0].rstrip()
return command_name
def _find_all_commands_in_file(path, file_lines, valid_commands):
commands = defaultdict(dict)
err = CommandParseError()
err.filename = path
for lineno, line in enumerate(file_lines):
lineno += 1 # Line numbering convention starts at 1.
err.lineno = lineno
err.src = line.rstrip()
for command in valid_commands:
column = line.rfind(command)
if column != -1:
break
else:
continue
to_eval = line[column:].rstrip()
try:
# pylint: disable=eval-used
command = eval(to_eval, valid_commands)
# pylint: enable=eval-used
command_name = _get_command_name(to_eval)
command.path = path
command.lineno = lineno
command.raw_text = to_eval
assert (path, lineno) not in commands[command_name], (
command_name, commands[command_name])
commands[command_name][path, lineno] = command
except SyntaxError as e:
err.info = str(e.msg)
err.caret = '{}<r>^</>'.format(' ' * (column + e.offset - 1))
raise err
except TypeError as e:
err.info = str(e).replace('__init__() ', '')
err.caret = '{}<r>{}</>'.format(' ' * (column),
'^' * (len(err.src) - column))
raise err
return dict(commands)
def find_all_commands(source_files):
commands = defaultdict(dict)
valid_commands = _get_valid_commands()
for source_file in source_files:
with open(source_file) as fp:
lines = fp.readlines()
file_commands = _find_all_commands_in_file(source_file, lines,
valid_commands)
for command_name in file_commands:
commands[command_name].update(file_commands[command_name])
return dict(commands)
```
#### File: dex/dextIR/StepIR.py
```python
from collections import OrderedDict
import json
from dex.dextIR.FrameIR import FrameIR
from dex.dextIR.LocIR import LocIR
from dex.dextIR.ValueIR import ValueIR
from dex.utils import create_named_enum
from dex.utils.serialize import SrField, SrObject
StopReason = create_named_enum('BREAKPOINT', 'STEP', 'PROGRAM_EXIT', 'ERROR',
'OTHER')
StepKind = create_named_enum('FUNC', 'FUNC_EXTERNAL', 'FUNC_UNKNOWN',
'FORWARD', 'SAME', 'BACKWARD', 'UNKNOWN')
# pylint: disable=no-member
class StepIR(SrObject):
sr_fields = [
SrField('step_index', int),
SrField(
'step_kind',
str,
required_in_init=False,
default_value=StepKind.UNKNOWN,
can_be_none=True,
possible_values=StepKind.possible_values),
SrField(
'stop_reason',
str,
possible_values=StopReason.possible_values,
can_be_none=True),
SrField('frames', FrameIR, list_of=True),
SrField(
'watches',
ValueIR,
dict_of=True,
required_in_init=False,
default_value=OrderedDict),
]
def __str__(self):
try:
frame = self.current_frame
frame_info = (frame.function, frame.loc.path, frame.loc.lineno,
frame.loc.column)
except AttributeError:
frame_info = (None, None, None, None)
watches = OrderedDict((w, self.watches[w].value) for w in self.watches)
step_info = (self.step_index, ) + frame_info + (
self.stop_reason, self.step_kind, watches)
return '{}{}'.format('. ' * (self.num_frames - 1),
json.dumps(step_info))
@property
def num_frames(self):
return len(self.frames)
@property
def current_frame(self):
try:
return self.frames[0]
except IndexError:
return None
@property
def current_function(self):
try:
return self.current_frame.function
except AttributeError:
return None
@property
def current_location(self):
try:
return self.current_frame.loc
except AttributeError:
return LocIR(path=None, lineno=None, column=None)
```
#### File: tools/annotate_expected_values/Tool.py
```python
import os
from dex.dextIR.DextIR import importDextIR
from dex.dextIR.StepIR import StepKind
from dex.tools import ToolBase
from dex.utils.Exceptions import Error, ImportDextIRException
class ExpectedWatchValue(object):
def __init__(self, file_path, expression, start_value, line_seen):
self.file_path = file_path
self.expression = expression
self.values = [start_value]
self.last_line_seen = line_seen
self.from_line = line_seen
self.to_line = line_seen
self.last_value_seen = start_value
def __eq__(self, other):
return (self.file_path == other.file_path
and self.expression == other.expression
and self.from_line == other.from_line
and self.to_line == other.to_line)
def __hash__(self):
return hash((self.file_path, self.expression, self.from_line,
self.to_line))
def add_value(self, value, line_seen):
if line_seen < self.from_line:
self.from_line = line_seen
if line_seen > self.to_line:
self.to_line = line_seen
if self.last_value_seen == value:
return
self.values.append(value)
self.last_value_seen = value
self.last_line_seen = line_seen
def is_out_of_range(self, lineno):
return ((lineno < self.from_line - 1 or lineno > self.to_line + 1)
and abs(lineno - self.last_line_seen) > 2)
def __str__(self):
if self.from_line == self.to_line:
from_to = 'on_line={}'.format(self.from_line)
else:
from_to = 'from_line={}, to_line={}'.format(
self.from_line, self.to_line)
return ("// DexExpectWatchValue('{}', {}, {})".format(
self.expression, ', '.join(str(v) for v in self.values), from_to))
class ExpectedStepKind(object):
def __init__(self, name, start_count):
self.name = name
self.count = start_count
def increase_count(self):
self.count += 1
class Tool(ToolBase):
"""Given a JSON dextIR file, attempt to automatically provide automatic
DExTer command annotations based on that output. Typically, this would be
from an unoptimized build.
It is expected that these automatically generated annotations will need
some manual fix-ups to become generic enough to be useful, but it is hoped
that this tool will provide a starting point to speed up the overall
process.
"""
@property
def name(self):
return 'DExTer annotate expected values'
def add_tool_arguments(self, parser, defaults):
parser.description = Tool.__doc__
parser.add_argument(
'json_file',
metavar='dexter-json-file',
type=str,
help='dexter json file to read')
parser.add_argument(
'source_files',
metavar='source-file',
type=str,
nargs='+',
help='source files to annotate')
def handle_options(self, defaults):
options = self.context.options
options.json_file = os.path.abspath(options.json_file)
if not os.path.isfile(options.json_file):
raise Error('<d>could not find</> <r>"{}"</>'.format(
options.json_file))
options.source_files = [
os.path.normcase(os.path.abspath(sf))
for sf in options.source_files
]
for sf in options.source_files:
if not os.path.isfile(sf):
if os.path.exists(sf):
raise Error('"{}" <d>is not a valid file</>'.format(sf))
raise Error('<d>could not find file</> "{}"'.format(sf))
def go(self): # noqa
options = self.context.options
exp_values = set()
step_kinds = []
for step_kind in StepKind.possible_values:
step_kinds.append(ExpectedStepKind(step_kind, 0))
with open(options.json_file) as fp:
try:
step_collection = importDextIR(fp.read())
except ImportDextIRException as e:
raise Error(
'<d>could not import</> <r>"{}"</>: <d>{}</>'.format(
options.json_file, e))
for step in getattr(step_collection, 'steps'):
lineno = step.current_location.lineno
for step_kind in step_kinds:
if step_kind.name == step.step_kind:
step_kind.increase_count()
for value_info in step.watches.values():
if value_info.value is None:
continue
found_exp_val = False
is_pointer = value_info.value.startswith('0x')
for exp_value in exp_values:
if exp_value.expression == value_info.expression:
if exp_value.is_out_of_range(lineno):
exp_values.add(
ExpectedWatchValue(
step.current_location.path,
value_info.expression, "'{}'".format(
value_info.value), lineno))
found_exp_val = True
break
if not is_pointer:
exp_value.add_value(
"'{}'".format(value_info.value), lineno)
found_exp_val = True
break
if not found_exp_val:
exp_values.add(
ExpectedWatchValue(
step.current_location.path, value_info.expression,
"'{}'".format(value_info.value), lineno))
for source_file in options.source_files:
with open(source_file, 'a') as fp:
exp_values_trimmed = [
v for v in exp_values if v.file_path == source_file
]
if exp_values_trimmed:
fp.write('\n\n')
prev_from_line = -1
for exp_value in sorted(
exp_values_trimmed,
key=lambda v: (v.from_line, v.expression)):
if exp_value.from_line != prev_from_line:
fp.write('\n')
prev_from_line = exp_value.from_line
fp.write('\n{}'.format(exp_value))
with open(options.source_files[0], 'a') as fp:
if step_kinds:
fp.write('\n\n')
for step_kind in step_kinds:
fp.write("\n// DexExpectStepKind('{}', {})".format(
step_kind.name, step_kind.count))
return 0
```
#### File: tools/list_debuggers/Tool.py
```python
from dex.debugger.Debuggers import add_debugger_tool_arguments1
from dex.debugger.Debuggers import handle_debugger_tool_options1
from dex.debugger.Debuggers import Debuggers
from dex.tools import ToolBase
from dex.utils import Timer
from dex.utils.Exceptions import DebuggerException, Error
class Tool(ToolBase):
"""List all of the potential debuggers that DExTer knows about and whether
there is currently a valid interface available for them.
"""
@property
def name(self):
return 'DExTer list debuggers'
def add_tool_arguments(self, parser, defaults):
parser.description = Tool.__doc__
add_debugger_tool_arguments1(parser, defaults)
def handle_options(self, defaults):
handle_debugger_tool_options1(self.context, defaults)
def go(self):
with Timer('list debuggers'):
try:
Debuggers(self.context).list()
except DebuggerException as e:
raise Error(e)
return 0
``` |
{
"source": "jmorski/salesforce-challenge",
"score": 3
} |
#### File: jmorski/salesforce-challenge/main.py
```python
import sys
filename = "proga.dat"
packageList = []
class package:
def __init__(self,name):
self.name = name
self.dependents = []
self.isInstalled = False
#requested by the user
self.explicitlyInstalled = False
#needed by another package
self.implicitlyInstalled = False
def getName(self):
return self.name
def setName(self,name):
self.name = name
def addDependents(self,pkg):
self.dependents.append(pkg)
def getDependents(self):
return self.dependents
def isInstalled(self):
return self.isInstalled
def setInstalled(self,explicit,implicit):
if self.isInstalled:
return self.isInstalled
for x in self.getDependents():
x.setInstalled(False,True)
print "\t Installing %s" % self.getName()
self.isInstalled = True
self.explicitlyInstalled = explicit
self.implicitlyInstalled = implicit
def dependsOn(self,deppkg):
for pkg in self.getDependents():
if pkg.getName() == deppkg.getName():
return True
else:
return False
def canBeUninstalled(self , explicitRemoval):
if not explicitRemoval and self.explicitlyInstalled:
return False
for pkg in packageList:
if pkg.isInstalled and pkg.dependsOn(self):
return False
else:
return True
def uninstall(self):
print "\t Removing %s" % self.getName()
self.isInstalled = False
self.implicitlyInstalled = False
self.explicitlyInstalled = False
for pkg in self.getDependents():
if pkg.canBeUninstalled(False):
pkg.uninstall()
def getCreatePackage(name):
curpack = None
for pkg in packageList:
if pkg.getName() == name:
curpack = pkg
if curpack is None:
curpack = package(name)
packageList.append(curpack)
return curpack
def stripBlanksFromList(list):
list = [x for x in list if x != ""]
return list
def processLine(line):
commandparts = line.split(" ")
if commandparts[0] == "DEPEND":
commandparts = stripBlanksFromList(commandparts)
for part in commandparts:
if part != "DEPEND":
getCreatePackage(part)
currentpackage = getCreatePackage(commandparts[1])
for x in range(2,len(commandparts)):
currentpackage.addDependents(getCreatePackage(commandparts[x]))
elif commandparts[0] == "INSTALL":
commandparts = stripBlanksFromList(commandparts)
for part in commandparts:
if part != "INSTALL":
pkg = getCreatePackage(part)
if pkg.isInstalled:
print "\t %s is already installed" % pkg.getName()
pkg.setInstalled(True,False)
elif commandparts[0] == "REMOVE":
commandparts = stripBlanksFromList(commandparts)
for part in commandparts:
if part != "REMOVE":
pkg = getCreatePackage(part)
if not pkg.isInstalled:
print "\t %s is not installed" % pkg.name
return
else:
if pkg.canBeUninstalled(True):
pkg.uninstall()
else:
print "\t %s is still needed" % pkg.name
elif commandparts[0] == "LIST":
for pkg in packageList:
if pkg.isInstalled:
print "\t %s" %pkg.getName()
else:
print "Invalid Command"
sys.exit()
def main():
try:
lines = tuple(open(filename,'r'))
except IOError:
print "Could not not open file %s ",filename
for line in lines:
line = line.rstrip()
if line == "END":
sys.exit(0)
print '%s' % line
processLine(line)
if __name__ == "__main__":
main()
``` |
{
"source": "jmortega/OSTrICa",
"score": 2
} |
#### File: Plugins/NortonSafeWeb/__init__.py
```python
import sys
import httplib
import string
import socket
import gzip
import re
import StringIO
import json
import ssl
from bs4 import BeautifulSoup
from ostrica.utilities.cfg import Config as cfg
extraction_type = [cfg.intelligence_type['ip'], cfg.intelligence_type['domain']]
enabled = True
version = 0.1
developer = '<NAME> <<EMAIL>>'
description = 'Plugin used to check if a domain or an ip is in SafeWeb'
visual_data = False
class NortonSafeWeb:
def __init__(self):
self.safeweb_host = 'safeweb.norton.com'
self.intelligence = {}
self.server_response = ''
pass
def __del__(self):
if cfg.DEBUG:
print 'cleanup NortonSafeWeb...'
self.intelligence = {}
def extract_intelligence(self):
if self.server_response.find('<b>WARNING</b>') != -1:
self.intelligence['safeweb'] = 'WARNING'
elif self.server_response.find('<b>SAFE</b>') != -1:
self.intelligence['safeweb'] = 'SAFE'
elif self.server_response.find('<b>UNTESTED</b>') != -1:
self.intelligence['safeweb'] = 'UNTESTED'
else:
self.intelligence['safeweb'] = ''
return True
def extract_server_info(self, data_to_analyze):
ssl_context = ssl._create_unverified_context()
query = '/report/show_mobile?name=%s' % (data_to_analyze)
hhandle = httplib.HTTPSConnection(self.safeweb_host, context=ssl_context, timeout=cfg.timeout)
hhandle.putrequest('GET', query)
hhandle.putheader('Connection', 'keep-alive')
hhandle.putheader('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
hhandle.putheader('referer', 'https://safeweb.norton.com/rate_limit')
hhandle.putheader('Accept-Encoding', 'gzip, deflate, sdch')
hhandle.putheader('User-Agent', cfg.user_agent)
hhandle.putheader('Accept-Language', 'en-GB,en-US;q=0.8,en;q=0.6')
hhandle.endheaders()
response = hhandle.getresponse()
if response.status == 200:
self.server_response = response.read()
if self.extract_intelligence() != False:
return True
else:
return False
else:
return False
def run(intelligence, extraction_type):
if cfg.DEBUG:
print 'Running NortonSafeWeb() on %s' % intelligence
intel_collector = NortonSafeWeb()
if (extraction_type == cfg.intelligence_type['ip']) or (extraction_type == cfg.intelligence_type['domain']):
if intel_collector.extract_server_info(intelligence) == True:
collected_intel = extracted_information(extraction_type, intel_collector.intelligence)
del intel_collector
return collected_intel
def extracted_information(extraction_type, intelligence_dictionary):
return {'extraction_type': extraction_type, 'intelligence_information':intelligence_dictionary}
def data_visualization(nodes, edges, json_data):
return nodes, edges
```
#### File: Plugins/SpyOnWeb/__init__.py
```python
import sys
import httplib
import string
import socket
import gzip
import re
import StringIO
import json
from bs4 import BeautifulSoup
from ostrica.utilities.cfg import Config as cfg
extraction_type = [cfg.intelligence_type['ip'], cfg.intelligence_type['domain']]
enabled = True
version = 0.1
developer = '<NAME> <<EMAIL>>'
description = 'Plugin used to collect information about domains related to IP, Google Adsense and Google Analytics IDs'
visual_data = False
class SpyOnWeb:
def __init__(self):
self.host = 'spyonweb.com'
self.intelligence = {}
self.server_response = ''
self.ip_address = ''
self.n_domains = ''
pass
def __del__(self):
if cfg.DEBUG:
print 'cleanup SpyOnWeb...'
self.intelligence = {}
def extract_ip_associated_to_url(self, soup):
url_to_ip = soup.findAll('h3', {'class':'panel-title'})
if len(url_to_ip) != 1:
return False
try:
self.ip_address = url_to_ip[0].contents[0].get_text()
pos = url_to_ip[0].contents[2].get_text().find(' ')
if pos != -1:
self.n_domains = url_to_ip[0].contents[2].get_text()[:pos]
else:
self.n_domains = ''
except:
return False
return True
def extract_associated_urls(self, soup):
associated_urls = []
same_ip_url = soup.findAll('div', {'class':'links'})
if len(same_ip_url) == 0:
return False
urls = same_ip_url[0].findAll('a')
if len(urls) == 0:
False
for url in urls:
if url.get_text() != '':
associated_urls.append(url.get_text())
return associated_urls
def extract_urls(self, soup):
related_domains = []
all_available_ips = soup.findAll('div', {'class':'panel panel-default'})
for available_ip in all_available_ips:
if self.extract_ip_associated_to_url(available_ip) == False:
continue
associated_urls = self.extract_associated_urls(available_ip)
if associated_urls == False:
self.ip_address = ''
self.n_domains = ''
continue
if self.ip_address.startswith('pub-'):
related_domains.append({ 'GoogleAdsense': self.ip_address, 'url_details': (self.n_domains, associated_urls) })
elif self.ip_address.startswith('UA-'):
related_domains.append({ 'GoogleAnalytics': self.ip_address, 'url_details': (self.n_domains, associated_urls) })
else:
related_domains.append({ 'ip': self.ip_address, 'url_details': (self.n_domains, associated_urls) })
return related_domains
def extract_intelligence(self):
soup = BeautifulSoup(self.server_response, 'html.parser')
self.intelligence['associated_urls'] = self.extract_urls(soup)
pass
def extract_server_info(self, data_to_analyze):
query = '/%s' % (data_to_analyze)
hhandle = httplib.HTTPConnection(self.host, timeout=cfg.timeout)
hhandle.putrequest('GET', query)
hhandle.putheader('Connection', 'keep-alive')
hhandle.putheader('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
hhandle.putheader('referer', 'http://spyonweb.com')
hhandle.putheader('Accept-Encoding', 'gzip, deflate, sdch')
hhandle.putheader('User-Agent', cfg.user_agent)
hhandle.putheader('Accept-Language', 'en-GB,en-US;q=0.8,en;q=0.6')
hhandle.endheaders()
response = hhandle.getresponse()
if response.status == 200:
self.server_response = response.read()
if self.extract_intelligence() != False:
return True
else:
return False
else:
return False
def run(intelligence, extraction_type):
if cfg.DEBUG:
print 'Running SpyOnWeb() on %s' % intelligence
intel_collector = SpyOnWeb()
if (extraction_type == cfg.intelligence_type['ip']) or (extraction_type == cfg.intelligence_type['domain']):
if intel_collector.extract_server_info(intelligence) == True:
collected_intel = extracted_information(extraction_type, intel_collector.intelligence)
del intel_collector
return collected_intel
def extracted_information(extraction_type, intelligence_dictionary):
return {'extraction_type': extraction_type, 'intelligence_information':intelligence_dictionary}
def data_visualization(nodes, edges, json_data):
if json_data['plugin_name'] == 'SpyOnWeb':
visual_report = SpyOnWebVisual(nodes, edges, json_data)
return visual_report.nodes, visual_report.edges
else:
return nodes, edges
class SpyOnWebVisual:
def __init__(self, ext_nodes, ext_edges, intel):
self.nodes = ext_nodes
self.edges = ext_edges
self.json_data = intel
self.visual_report_dictionary = {}
self.origin = ''
self.color = '#999966'
if self.parse_intelligence() != False:
self.parse_visual_data()
def parse_intelligence(self):
related_websites = {}
if self.json_data['intelligence'] is None:
return False
if 'associated_urls' in self.json_data['intelligence']['intelligence_information']:
for associated_urls in self.json_data['intelligence']['intelligence_information']['associated_urls']:
if 'url_details' in associated_urls and 'GoogleAdsense' in associated_urls:
related_websites['GoogleAdSense'] = (associated_urls['GoogleAdsense'], associated_urls['url_details'][1])
elif 'url_details' in associated_urls and 'ip' in associated_urls:
related_websites['ip'] = (associated_urls['ip'], associated_urls['url_details'][1])
elif 'url_details' in associated_urls and 'GoogleAnalytics' in associated_urls:
related_websites['GoogleAnalytics'] = (associated_urls['GoogleAnalytics'], associated_urls['url_details'][1])
if self.json_data['requested_intel'] not in self.visual_report_dictionary.keys():
self.visual_report_dictionary[self.json_data['requested_intel']] = {'SpyOnWeb': [{'related_websites': related_websites}]}
else:
self.visual_report_dictionary[self.json_data['requested_intel']].update({'SpyOnWeb': [{'related_websites': related_websites}]})
self.origin = self.json_data['requested_intel']
if self.origin not in self.edges.keys():
self.edges.setdefault(self.origin, [])
def parse_visual_data(self):
for intel in self.visual_report_dictionary[self.origin]['SpyOnWeb']:
for key, value in intel.iteritems():
if key == 'related_websites':
self._manage_spyonweb_relatedwebsites(value)
def _manage_spyonweb_relatedwebsites(self, sites):
for key, value in sites.iteritems():
if key == 'ip':
self._manage_associated_hosts_to_ip(value)
elif key == 'GoogleAdSense':
self._manage_associated_google_adsense_hosts(value)
if key == 'GoogleAnalytics':
self._manage_associated_google_analytics_hosts(value)
def _manage_associated_hosts_to_ip(self, hosts):
size = 30
ip = hosts[0]
for host in hosts[1]:
if host in self.nodes.keys():
self.nodes[host] = (self.nodes[host][0] + 5, self.nodes[host][1], self.nodes[host][2])
else:
self.nodes[host] = (size, self.color, 'associated domain')
if ip not in self.edges.keys():
self.edges.setdefault(ip, [])
self.edges[ip].append(host)
else:
self.edges[ip].append(host)
def _manage_associated_google_adsense_hosts(self, hosts):
size = 30
google_adsense_id = hosts[0]
if google_adsense_id in self.nodes.keys():
self.nodes[google_adsense_id] = (self.nodes[google_adsense_id][0] + 5, self.nodes[google_adsense_id][1], self.nodes[google_adsense_id][2])
else:
self.nodes[google_adsense_id] = (size, self.color, 'analytics associated domain')
for host in hosts[1]:
if host in self.nodes.keys():
self.nodes[host] = (self.nodes[host][0] + 5, self.nodes[host][1], self.nodes[host][2])
else:
self.nodes[host] = (size, self.color, 'adsense associated domain')
if google_adsense_id not in self.edges.keys():
self.edges.setdefault(google_adsense_id, [])
self.edges[google_adsense_id].append(host)
else:
self.edges[google_adsense_id].append(host)
def _manage_associated_google_analytics_hosts(self, hosts):
size = 30
google_adnalytics_id = hosts[0]
if google_adnalytics_id in self.nodes.keys():
self.nodes[google_adnalytics_id] = (self.nodes[google_adnalytics_id][0] + 5, self.nodes[google_adnalytics_id][1], self.nodes[google_adnalytics_id][2])
else:
self.nodes[google_adnalytics_id] = (size, self.color, 'analytics associated domain')
for host in hosts[1]:
if host in self.nodes.keys():
self.nodes[host] = (self.nodes[host][0] + 5, self.nodes[host][1], self.nodes[host][2])
else:
self.nodes[host] = (size, self.color, 'analytics associated domain')
if google_adnalytics_id not in self.edges.keys():
self.edges.setdefault(google_adnalytics_id, [])
self.edges[google_adnalytics_id].append(host)
else:
self.edges[google_adnalytics_id].append(host)
```
#### File: Plugins/TCPIPutils/__init__.py
```python
import httplib
import string
import socket
import gzip
import re
import StringIO
from bs4 import BeautifulSoup
from ostrica.utilities.cfg import Config as cfg
extraction_type = [cfg.intelligence_type['domain'], cfg.intelligence_type['asn']]
enabled = True
version = 0.1
developer = '<NAME> <<EMAIL>>'
description = 'Plugin used to collect information about domains or ASNs on TCPIPUtils'
visual_data = True
class TCPIPUtils(object):
def __init__(self):
self.host = 'www.utlsapi.com'
self.asn_host = 'www.tcpiputils.com'
self.version = '1.0'
self.extversion = '0.1'
self.intelligence = {}
pass
def __del__(self):
if cfg.DEBUG:
print 'cleanup TCPIPutils...'
self.intelligence = {}
def asn_information(self, asn):
query = '/browse/as/%s' % (asn)
hhandle = httplib.HTTPConnection(self.asn_host, timeout=cfg.timeout)
hhandle.putrequest('GET', query)
hhandle.putheader('Connection', 'keep-alive')
hhandle.putheader('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
hhandle.putheader('Accept-Encoding', 'gzip, deflate, sdch')
hhandle.putheader('User-Agent', cfg.user_agent)
hhandle.putheader('Accept-Language', 'en-GB,en-US;q=0.8,en;q=0.6')
hhandle.endheaders()
response = hhandle.getresponse()
if (response.status == 200):
if response.getheader('Content-Encoding') == 'gzip':
content = StringIO.StringIO(response.read())
server_response = gzip.GzipFile(fileobj=content).read()
if (server_response.find('No valid IPv4 address found!') != 1):
self.extract_asn_intelligence(server_response)
return True
else:
return False
else:
return False
def domain_information(self, domain):
query = '/plugin.php?version=%s&type=ipv4info&hostname=%s&source=chromeext&extversion=%s' % (self.version, domain, self.extversion)
hhandle = httplib.HTTPSConnection(self.host, timeout=cfg.timeout)
hhandle.putrequest('GET', query)
hhandle.putheader('Connection', 'keep-alive')
hhandle.putheader('Accept', 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,*/*;q=0.8')
hhandle.putheader('Accept-Encoding', 'gzip, deflate, sdch')
hhandle.putheader('User-Agent', cfg.user_agent)
hhandle.putheader('Accept-Language', 'en-GB,en-US;q=0.8,en;q=0.6')
hhandle.endheaders()
response = hhandle.getresponse()
if (response.status == 200):
if response.getheader('Content-Encoding') == 'gzip':
content = StringIO.StringIO(response.read())
server_response = gzip.GzipFile(fileobj=content).read()
if (server_response.find('No valid IPv4 address found!') != 1):
self.extract_domain_intelligence(server_response)
return True
else:
return False
else:
return False
def extract_domain_intelligence(self, server_response):
ip_address = False
description = False
location = False
subnet = False
asn_number = False
soup = BeautifulSoup(server_response, 'html.parser')
all_tds = soup.findAll('td')
for td in all_tds:
if td.get_text() == unicode('IP address'):
ip_address = True
continue
elif td.get_text() == unicode('Description'):
description = True
continue
elif td.get_text() == unicode('Location'):
location = True
continue
elif td.get_text() == unicode('IP-range/subnet'):
subnet = True
continue
elif td.get_text() == unicode('ASN number'):
asn_number = True
continue
if ip_address == True:
if 'ip_address' not in self.intelligence.keys():
self.intelligence['ip_address'] = td.get_text()
ip_address = False
continue
elif description == True:
if 'description' not in self.intelligence.keys():
self.intelligence['description'] = td.get_text()
description = False
continue
elif location == True:
if 'location' not in self.intelligence.keys():
self.intelligence['location'] = td.get_text().replace(u'\xa0', '')
location = False
continue
elif subnet == True:
if 'subnet' not in self.intelligence.keys():
self.intelligence['subnet'] = td.contents[2]
self.intelligence['subnet_cidr'] = td.contents[0].get_text()
subnet = False
continue
elif asn_number == True:
if 'asn_number' not in self.intelligence.keys():
self.intelligence['asn_number'] = td.get_text()
location = False
continue
if 'ip_address' not in self.intelligence.keys():
self.intelligence['ip_address'] = ''
if 'description' not in self.intelligence.keys():
self.intelligence['description'] = ''
if 'location' not in self.intelligence.keys():
self.intelligence['location'] = ''
if 'subnet' not in self.intelligence.keys():
self.intelligence['subnet'] = ''
if 'asn_number' not in self.intelligence.keys():
self.intelligence['asn_number'] = ''
if 'n_domains' not in self.intelligence.keys():
self.intelligence['n_domains'] = ''
if 'adult_domains' not in self.intelligence.keys():
self.intelligence['adult_domains'] = ''
if 'name_servers' not in self.intelligence.keys():
self.intelligence['name_servers'] = ''
if 'spam_hosts' not in self.intelligence.keys():
self.intelligence['spam_hosts'] = ''
if 'open_proxies' not in self.intelligence.keys():
self.intelligence['open_proxies'] = ''
if 'mail_servers' not in self.intelligence.keys():
self.intelligence['mail_servers'] = ''
def extract_mailservers_associated_to_asn(self, soup):
mail_servers = []
idx = 0
all_tds = soup.findAll('td')
while idx < len(all_tds):
if all_tds[idx].get_text() == unicode('See more items'):
idx += 1
continue
elif all_tds[idx].get_text().find(u'Note:') != -1:
break
mail_servers.append(all_tds[idx].get_text())
idx += 3
self.intelligence['mail_servers'] = mail_servers
def extract_domains_associated_to_asn(self, soup):
associated_domains = []
idx = 0
all_tds = soup.findAll('td')
while idx < len(all_tds):
if all_tds[idx].get_text() == unicode('See more items'):
idx += 1
continue
elif all_tds[idx].get_text().find(u'Note:') != -1:
break
domain_name = all_tds[idx].get_text()
idx += 1
ip_address = all_tds[idx].get_text()
idx += 1
associated_domains.append((domain_name, ip_address))
self.intelligence['associated_domains'] = associated_domains
def extract_asn_intelligence(self, server_response):
n_domains = False
adult_domains = False
name_servers = False
spam_hosts = False
open_proxies = False
mail_servers = False
soup = BeautifulSoup(server_response, 'html.parser')
if not soup.findAll(text=re.compile(r'No hosted mail servers found on')):
self.extract_mailservers_associated_to_asn(soup.findAll('table')[6]) # mail servers
if not soup.findAll(text=re.compile(r'No hosted domains found on')):
self.extract_domains_associated_to_asn(soup.findAll('table')[4]) # domains
all_tds = soup.findAll('td')
for td in all_tds:
if td.get_text() == unicode('Number of domains hosted'):
n_domains = True
continue
elif td.get_text() == unicode('Number of adult domains hosted'):
adult_domains = True
continue
elif td.get_text() == unicode('Number of name servers hosted'):
name_servers = True
continue
elif td.get_text() == unicode('Number of SPAM hosts hosted'):
spam_hosts = True
continue
elif td.get_text() == unicode('Number of open proxies hosted'):
open_proxies = True
continue
elif td.get_text() == unicode('Number of mail servers hosted'):
mail_servers = True
continue
if n_domains == True:
if 'n_domains' not in self.intelligence.keys():
self.intelligence['n_domains'] = td.get_text()
n_domains = False
continue
elif adult_domains == True:
if 'adult_domains' not in self.intelligence.keys():
self.intelligence['adult_domains'] = td.get_text()
adult_domains = False
continue
elif name_servers == True:
if 'name_servers' not in self.intelligence.keys():
self.intelligence['name_servers'] = td.get_text()
name_servers = False
continue
elif spam_hosts == True:
if 'spam_hosts' not in self.intelligence.keys():
self.intelligence['spam_hosts'] = td.get_text()
spam_hosts = False
continue
elif open_proxies == True:
if 'open_proxies' not in self.intelligence.keys():
self.intelligence['open_proxies'] = td.get_text()
open_proxies = False
continue
elif mail_servers == True:
if 'mail_servers' not in self.intelligence.keys():
self.intelligence['mail_servers'] = td.get_text()
mail_servers = False
continue
if 'ip_address' not in self.intelligence.keys():
self.intelligence['ip_address'] = ''
if 'description' not in self.intelligence.keys():
self.intelligence['description'] = ''
if 'location' not in self.intelligence.keys():
self.intelligence['location'] = ''
if 'subnet' not in self.intelligence.keys():
self.intelligence['subnet'] = ''
if 'asn_number' not in self.intelligence.keys():
self.intelligence['asn_number'] = ''
if 'n_domains' not in self.intelligence.keys():
self.intelligence['n_domains'] = ''
if 'adult_domains' not in self.intelligence.keys():
self.intelligence['adult_domains'] = ''
if 'name_servers' not in self.intelligence.keys():
self.intelligence['name_servers'] = ''
if 'spam_hosts' not in self.intelligence.keys():
self.intelligence['spam_hosts'] = ''
if 'open_proxies' not in self.intelligence.keys():
self.intelligence['open_proxies'] = ''
if 'mail_servers' not in self.intelligence.keys():
self.intelligence['mail_servers'] = ''
def run(intelligence, extraction_type):
if cfg.DEBUG:
print 'Running TCPIPUtils() on %s' % intelligence
intel_collector = TCPIPUtils()
if extraction_type == cfg.intelligence_type['domain']:
if intel_collector.domain_information(intelligence) == True:
collected_intel = extracted_information(extraction_type, intel_collector.intelligence)
del intel_collector
return collected_intel
elif extraction_type == cfg.intelligence_type['asn']:
if intel_collector.asn_information(intelligence) == True:
collected_intel = extracted_information(extraction_type, intel_collector.intelligence)
del intel_collector
return collected_intel
else:
return {}
def extracted_information(extraction_type, intelligence_dictionary):
return {'extraction_type': extraction_type, 'intelligence_information':intelligence_dictionary}
def data_visualization(nodes, edges, json_data):
if json_data['plugin_name'] == 'TCPIPutils':
visual_report = TCPIPutilsVisual(nodes, edges, json_data)
return visual_report.nodes, visual_report.edges
else:
return nodes, edges
class TCPIPutilsVisual:
def __init__(self, ext_nodes, ext_edges, intel):
self.nodes = ext_nodes
self.edges = ext_edges
self.json_data = intel
self.visual_report_dictionary = {}
self.origin = ''
self.color = '#bf00ff'
if self.parse_intelligence() != False:
self.parse_visual_data()
def parse_intelligence(self):
if self.json_data['intelligence'] is None:
return False
if 'asn' in self.json_data['intelligence']['intelligence_information']:
asn = self.json_data['intelligence']['intelligence_information']['asn_number']
else:
asn = ''
if 'ip_address' in self.json_data['intelligence']['intelligence_information']:
ip_address = self.json_data['intelligence']['intelligence_information']['ip_address']
else:
ip_address = ''
if self.json_data['requested_intel'] not in self.visual_report_dictionary.keys():
self.visual_report_dictionary[self.json_data['requested_intel']] = {'TCPIPutils': [{'asn': asn}, {'ip_address': ip_address}]}
else:
self.visual_report_dictionary[self.json_data['requested_intel']].update({'TCPIPutils': [{'asn': asn}, {'ip_address': ip_address}]})
self.origin = self.json_data['requested_intel']
if self.origin not in self.edges.keys():
self.edges.setdefault(self.origin, [])
def parse_visual_data(self):
for intel in self.visual_report_dictionary[self.origin]['TCPIPutils']:
for key, value in intel.iteritems():
if key == 'asn':
self._manage_tcpiputils_asn(value)
elif key == 'ip_address':
self._manage_tcpiputils_ip_address(value)
def _manage_tcpiputils_asn(self, asn):
size = 30
if asn in self.nodes.keys():
self.nodes[asn] = (self.nodes[asn][0] + 5, self.nodes[asn][1], 'asn')
else:
self.nodes[asn] = (size, self.color, 'asn')
if asn not in self.edges[self.origin]:
self.edges[self.origin].append(asn)
def _manage_tcpiputils_ip_address(self, ip):
size = 30
if ip in self.nodes.keys():
self.nodes[ip] = (self.nodes[ip][0] + 5, self.nodes[ip][1], 'ip')
else:
self.nodes[ip] = (size, self.color, 'ip')
if ip not in self.edges[self.origin]:
self.edges[self.origin].append(ip)
```
#### File: Plugins/VT/__init__.py
```python
import traceback
import datetime
import httplib
import string
import socket
import sys
import os
import re
from bs4 import BeautifulSoup
from ostrica.utilities.cfg import Config as cfg
extraction_type = [cfg.intelligence_type['md5'], cfg.intelligence_type['sha256'],
cfg.intelligence_type['domain'], cfg.intelligence_type['ip']]
enabled = True
version = 0.1
developer = '<NAME> <<EMAIL>>'
description = 'Plugin used to collect information about domains, IPs, md5s, sha256s on VirusTotal'
visual_data = True
class VT:
host = "www.virustotal.com"
def __init__(self):
self.page_content = ''
self.av_results = {}
self.first_submission_date = 0
self.last_submission_date = 0
self.submitted_filenames = []
self.threat_traits = {}
self.file_details = {}
self.intelligence = {}
pass
def __del__(self):
if cfg.DEBUG:
print 'cleanup VirusTotal...'
self.intelligence = {}
@staticmethod
def extract_sha256(location_link):
if location_link.find('/file/') == -1 and location_link.find('/analysis/') == -1:
return False
else:
file_pos = location_link.find('/file/')
analysis_pos = location_link.find('/analysis/')
return location_link[file_pos+6:analysis_pos]
def detection_to_dict(self, detections):
i = 0
while i < len(detections):
av_name = detections[i].get_text().replace('\n', '').strip()
detection = detections[i+1].get_text().replace('\n', '').strip()
if detection == '':
detection = 'Not detected'
update = detections[i+2].get_text().replace('\n', '').strip()
self.av_results[av_name] = (detection, update)
i += 3
return True
def get_av_result(self):
soup = BeautifulSoup(self.page_content, 'html.parser')
detection_table = soup.findAll('table', {'id':'antivirus-results'})
if len(detection_table) != 1:
return False
detections = detection_table[0].findAll('td')
if len(detections) != 0:
self.detection_to_dict(detections)
return True
else:
return False
def get_detections_by_md5(self, md5):
body = 'query=%s' % (md5)
hhandle = httplib.HTTPSConnection(self.host, timeout=cfg.timeout)
hhandle.putrequest('POST', '/en/search/')
hhandle.putheader('Host', 'www.virustotal.com')
hhandle.putheader('Connection', 'keep-alive')
hhandle.putheader('Cache-Control', 'max-age=0')
hhandle.putheader('Referer', 'https://www.virustotal.com/')
hhandle.putheader('Origin', 'https://www.virustotal.com')
hhandle.putheader('User-Agent', cfg.user_agent)
hhandle.putheader('Content-Type', 'application/x-www-form-urlencoded')
hhandle.putheader('Accept-Language', 'en-GB,en-US;q=0.8,en;q=0.6')
hhandle.putheader('Content-Length', str(len(body)))
hhandle.endheaders()
hhandle.send(body)
response = hhandle.getresponse()
if (response.status == 302):
sha256hash = self.extract_sha256(response.getheader('Location'))
if (sha256hash == False):
return False
else:
return self.get_detections_by_sha256(sha256hash)
else:
return False
def extract_intelligece(self):
self.intelligence['filenames'] = self.submitted_filenames
self.intelligence['first_submission_date'] = self.first_submission_date
self.intelligence['last_submission_date'] = self.last_submission_date
self.intelligence['av_results'] = self.av_results
self.intelligence['threat_behaviour'] = self.threat_traits
self.intelligence['file_details'] = self.file_details
def get_detections_by_sha256(self, sha256hash):
query = '/en/file/%s/analysis/' % (sha256hash)
hhandle = httplib.HTTPSConnection(self.host, timeout=cfg.timeout)
hhandle.putrequest('GET', query)
hhandle.putheader('Host', 'www.virustotal.com')
hhandle.putheader('Connection', 'keep-alive')
hhandle.putheader('Cache-Control', 'max-age=0')
hhandle.putheader('Referer', 'https://www.virustotal.com/')
hhandle.putheader('Origin', 'https://www.virustotal.com')
hhandle.putheader('User-Agent', cfg.user_agent)
hhandle.putheader('Accept-Language', 'en-GB,en-US;q=0.8,en;q=0.6')
hhandle.endheaders()
response = hhandle.getresponse()
if (response.status == 200):
self.page_content = response.read()
self.get_behaviour()
self.get_file_details()
self.get_av_result()
self.get_vt_metadata()
self.extract_intelligece()
return True
else:
return False
def get_file_details(self):
soup = BeautifulSoup(self.page_content, 'html.parser')
file_details_information = soup.findAll('div', {'id':'file-details'})
if len(file_details_information) == 0 or len(file_details_information) > 2:
return False
file_details = file_details_information[0].findAll('h5')
for file_detail_info in file_details:
if file_detail_info.get_text().strip() == u'Risk summary':
self.extract_file_details('risk_summary', file_detail_info.find_next('div'))
elif file_detail_info.get_text().strip() == u'Required permissions':
self.extract_file_details('required_permission', file_detail_info.find_next('div'))
elif file_detail_info.get_text().strip() == u'Permission-related API calls':
self.extract_file_details('permission_related_api_calls', file_detail_info.find_next('div'))
elif file_detail_info.get_text().strip() == u'Main Activity':
self.extract_file_details('main_activitiy', file_detail_info.find_next('div'))
elif file_detail_info.get_text().strip() == u'Activities':
self.extract_file_details('activities', file_detail_info.find_next('div'))
elif file_detail_info.get_text().strip() == u'Services':
self.extract_file_details('services', file_detail_info.find_next('div'))
elif file_detail_info.get_text().strip() == u'Activity-related intent filters':
self.extract_file_details('activity_related_intent_filters', file_detail_info.find_next('div'))
elif file_detail_info.get_text().strip() == u'Application certificate information':
self.extract_file_details('application_certificate_information', file_detail_info.find_next('textarea'), 'textarea')
elif file_detail_info.get_text().strip() == u'Interesting strings':
self.extract_file_details('interesting_strings', file_detail_info.find_next('textarea'), 'textarea')
elif file_detail_info.get_text().strip() == u'Application bundle files':
self.extract_file_details('bundled_files', file_detail_info.find_next('div'))
elif file_detail_info.get_text().strip() == u'Contained files':
self.extract_file_details('contained_files', file_detail_info.find_next('div'))
elif file_detail_info.get_text().strip() == u'Receivers':
self.extract_file_details('receivers', file_detail_info.find_next('div'))
elif file_detail_info.get_text().strip() == u'Providers':
self.extract_file_details('providers', file_detail_info.find_next('div'))
def extract_file_details(self, typology, soup, tag_type=''):
file_detail_list = []
if tag_type == '':
details = soup.findAll('div', {'class':'enum'})
elif tag_type == 'textarea':
self.file_details[typology] = soup.get_text().strip()
return
for detail in details:
file_detail_list.append(detail.get_text().strip())
self.file_details[typology] = file_detail_list
def get_behaviour(self):
soup = BeautifulSoup(self.page_content, 'html.parser')
behavioural_information = soup.findAll('div', {'id':'behavioural-info'})
if len(behavioural_information) != 1:
return False
threat_actions = behavioural_information[0].findAll('h5')
for threat_action in threat_actions:
if threat_action.get_text().strip() == u'Opened files':
self.extract_behavioural_traits('opened_files', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Read files':
self.extract_behavioural_traits('read_files', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Created processes':
self.extract_behavioural_traits('created_processes', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Terminated processes':
self.extract_behavioural_traits('terminated_processes', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Opened mutexes':
self.extract_behavioural_traits('opened_mutexes', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Runtime DLLs':
self.extract_behavioural_traits('runtime_dlls', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Created mutexes':
self.extract_behavioural_traits('created_mutexes', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Deleted files':
self.extract_behavioural_traits('deleted_files', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Copied files':
self.extract_behavioural_traits('copied_files', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Additional details':
self.extract_behavioural_traits('additional_details', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Written files':
self.extract_behavioural_traits('written_files', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Contacted URLs':
self.extract_behavioural_traits('contacted_urls', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'TCP connections':
self.extract_behavioural_traits('tcp_connections', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'DNS requests':
self.extract_behavioural_traits('dns_requests', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'HTTP requests':
self.extract_behavioural_traits('http_requests', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Interesting calls':
self.extract_behavioural_traits('interesting_calls', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Started services':
self.extract_behavioural_traits('started_services', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Accessed files':
self.extract_behavioural_traits('accessed_files', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Started receivers':
self.extract_behavioural_traits('started_receivers', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Accessed URIs':
self.extract_behavioural_traits('accessed_uris', threat_action.find_next('div'))
elif threat_action.get_text().strip() == u'Permissions checked':
self.extract_behavioural_traits('permission_chcked', threat_action.find_next('div'))
def extract_behavioural_traits(self, typology, soup):
trait_list = []
traits = soup.findAll('div', {'class':'enum'})
for trait in traits:
trait_list.append(trait.get_text().strip())
self.threat_traits[typology] = trait_list
def extract_submissions_date(self, date_to_convert, typology):
pos = date_to_convert.find('UTC')
if pos == -1:
return False
try:
if typology == 'first_submission':
#FIXME: TypeError: datetime.datetime(2015, 1, 18, 1, 42, 26) is not JSON serializable
#self.first_submission_date = datetime.datetime.strptime(date_to_convert[:pos].strip(), '%Y-%m-%d %H:%M:%S')
self.first_submission_date = date_to_convert[:pos].strip()
else:
#FIXME: TypeError: datetime.datetime(2015, 1, 18, 1, 42, 26) is not JSON serializable
#self.last_submission_date = datetime.datetime.strptime(date_to_convert[:pos].strip(), '%Y-%m-%d %H:%M:%S')
self.last_submission_date = date_to_convert[:pos].strip()
return True
except:
print traceback.print_exc()
return False
def extract_filenames(self, filenames):
filenames = filenames.split('\n')
for filename in filenames:
if filename.strip() != '':
# TODO: fix it. It is a quick hack around unicode filenames
filename = filename.encode('utf8', 'ignore').strip()
filename = re.sub(r'[^\x00-\x7f]',r'',filename)
self.submitted_filenames.append(filename)
def get_vt_metadata(self):
soup = BeautifulSoup(self.page_content, 'html.parser')
metadatas = soup.findAll('div', {'class':'enum'})
if len(metadatas) == 0:
return False
for metadata in metadatas:
if hasattr(metadata.span, 'get_text'):
if metadata.span.get_text() == u'First submission':
splitted_data = metadata.get_text().split('\n')
if len(splitted_data) == 4:
self.extract_submissions_date(splitted_data[2].strip(), 'first_submission')
elif metadata.span.get_text() == u'Last submission':
splitted_data = metadata.get_text().split('\n')
if len(splitted_data) == 4:
self.extract_submissions_date(splitted_data[2].strip(), 'last_submission')
elif hasattr(metadata.table, 'get_text'):
if metadata.table.get_text().find(u'File names') != -1:
filenames = soup.findAll('td', {'class':'field-value'})
if len(filenames) == 2:
self.extract_filenames(filenames[1].get_text())
class VTNetwork:
host = "www.virustotal.com"
def __init__(self):
self.domain_page_content = ''
self.ips_associated_to_domain = []
self.domains_associated_to_ip = []
self.detected_domains = []
self.detected_ips = []
self.AS = ''
self.ASN = 0
self.country = ''
self.intelligence= {}
pass
def __del__(self):
if cfg.DEBUG:
print 'cleanup VirusTotal Network...'
self.intelligence = {}
def get_domain_intelligence(self, domain):
query = '/en/domain/%s/information/' % (domain)
hhandle = httplib.HTTPSConnection(self.host, timeout=cfg.timeout)
hhandle.putrequest('GET', query)
hhandle.putheader('Host', 'www.virustotal.com')
hhandle.putheader('Connection', 'keep-alive')
hhandle.putheader('Cache-Control', 'max-age=0')
hhandle.putheader('Referer', 'https://www.virustotal.com/')
hhandle.putheader('Origin', 'https://www.virustotal.com')
hhandle.putheader('User-Agent', cfg.user_agent)
hhandle.putheader('Accept-Language', 'en-GB,en-US;q=0.8,en;q=0.6')
hhandle.endheaders()
response = hhandle.getresponse()
if (response.status == 200):
self.domain_page_content = response.read()
self.extract_domain_intelligence()
self.extract_intelligence()
return True
else:
return False
def extract_intelligence(self):
self.intelligence['as'] = self.AS
self.intelligence['asn'] = self.ASN
self.intelligence['domains_associated_to_ip'] = self.domains_associated_to_ip
self.intelligence['ips_associated_to_domain'] = self.ips_associated_to_domain
self.intelligence['detected_domains'] = self.detected_domains
self.intelligence['detected_ips'] = self.detected_ips
def extract_domain_intelligence(self):
soup = BeautifulSoup(self.domain_page_content, 'html.parser')
self.extract_latest_detected_url('domain', soup)
intelligence = soup.findAll('div', {'class':'enum'})
if len(intelligence) == 0:
return False
for intel in intelligence:
if len(intel) == 3:
if len(intel.findAll('a')) == 1:
if intel.contents[0].strip() == '':
continue
#FIXME: TypeError: datetime.datetime(2015, 1, 18, 1, 42, 26) is not JSON serializable
#date_associated = datetime.datetime.strptime(intel.contents[0].strip(), '%Y-%m-%d')
date_associated = intel.contents[0].strip()
domain_associated = intel.contents[1].get_text()
self.ips_associated_to_domain.append((date_associated, domain_associated))
def get_ip_intelligence(self, ip_address):
query = '/en/ip-address/%s/information/' % (ip_address)
hhandle = httplib.HTTPSConnection(self.host, timeout=cfg.timeout)
hhandle.putrequest('GET', query)
hhandle.putheader('Host', 'www.virustotal.com')
hhandle.putheader('Connection', 'keep-alive')
hhandle.putheader('Cache-Control', 'max-age=0')
hhandle.putheader('Referer', 'https://www.virustotal.com/')
hhandle.putheader('Origin', 'https://www.virustotal.com')
hhandle.putheader('User-Agent', cfg.user_agent)
hhandle.putheader('Accept-Language', 'en-GB,en-US;q=0.8,en;q=0.6')
hhandle.endheaders()
response = hhandle.getresponse()
if (response.status == 200):
self.domain_page_content = response.read()
self.extract_ip_intelligence()
self.extract_intelligence()
return True
else:
return False
def extract_ip_intelligence(self):
soup = BeautifulSoup(self.domain_page_content, 'html.parser')
self.extract_latest_detected_url('ip', soup)
intelligence = soup.findAll('div', {'class':'enum'})
if len(intelligence) == 0:
return False
for intel in intelligence:
if hasattr(intel, 'div') and len(intel) == 7:
if len(intel.findAll('a')) != 0:
continue
if intel.div.get_text() == u'Country':
self.country = intel.contents[3].get_text().strip()
if hasattr(intel, 'div') and len(intel) == 7:
if len(intel.findAll('a')) != 0:
continue
if intel.div.get_text() == u'Autonomous System':
self.AS = intel.contents[3].get_text().strip()
pos = intel.contents[3].get_text().find('(')
if pos != -1:
self.ASN = int(intel.contents[3].get_text()[:pos].strip())
if len(intel) == 3:
if len(intel.findAll('a')) == 1:
#FIXME: TypeError: datetime.datetime(2015, 1, 18, 1, 42, 26) is not JSON serializable
#date_associated = datetime.datetime.strptime(intel.contents[0].strip(), '%Y-%m-%d')
date_associated = intel.contents[0].strip()
domain_associated = intel.contents[1].get_text()
self.domains_associated_to_ip.append((date_associated, domain_associated))
def extract_latest_detected_url(self, typology, soup):
detected_domains_information = soup.findAll('div', {'id':'detected-urls'})
detected_list = []
if len(detected_domains_information) != 1:
return False
detected_domains = detected_domains_information[0].findAll('div')
for detected_domain in detected_domains:
if len(detected_domain) == 7:
detection_rate = detected_domain.contents[1].get_text().strip()
detection_time = detected_domain.contents[3].get_text().strip()
detection_url = detected_domain.a.get_text().strip()
if typology == 'domain':
self.detected_domains.append((detection_rate, detection_time, detection_url))
elif typology == 'ip':
self.detected_ips.append((detection_rate, detection_time, detection_url))
def run(intelligence, extraction_type):
if cfg.DEBUG:
print 'Running VT() on %s' % intelligence
if extraction_type == cfg.intelligence_type['sha256']:
intel_collector = VT()
if intel_collector.get_detections_by_sha256(intelligence) == True:
collected_intel = extracted_information(extraction_type, intel_collector.intelligence)
del intel_collector
return collected_intel
elif extraction_type == cfg.intelligence_type['md5']:
intel_collector = VT()
if intel_collector.get_detections_by_md5(intelligence) == True:
collected_intel = extracted_information(extraction_type, intel_collector.intelligence)
del intel_collector
return collected_intel
elif extraction_type == cfg.intelligence_type['ip']:
intel_collector = VTNetwork()
if intel_collector.get_ip_intelligence(intelligence) == True:
collected_intel = extracted_information(extraction_type, intel_collector.intelligence)
del intel_collector
return collected_intel
elif extraction_type == cfg.intelligence_type['domain']:
intel_collector = VTNetwork()
if intel_collector.get_domain_intelligence(intelligence) == True:
collected_intel = extracted_information(extraction_type, intel_collector.intelligence)
del intel_collector
return collected_intel
else:
return {}
def extracted_information(extraction_type, intelligence_dictionary):
return {'extraction_type': extraction_type, 'intelligence_information':intelligence_dictionary}
def data_visualization(nodes, edges, json_data):
if json_data['plugin_name'] == 'VT':
visual_report = VTVisual(nodes, edges, json_data)
return visual_report.nodes, visual_report.edges
else:
return nodes, edges
class VTVisual:
def __init__(self, ext_nodes, ext_edges, intel):
self.nodes = ext_nodes
self.edges = ext_edges
self.json_data = intel
self.visual_report_dictionary = {}
self.origin = ''
self.color = '#ff8000'
if self.parse_intelligence() != False:
self.parse_visual_data()
def parse_intelligence(self):
vt_filenames = ''
domains = []
ips_associated_to_domain = []
detected_ips = []
domains_associated_to_ip = []
http_requests = []
tcp_connections = []
mutexes = []
av_results = {}
if self.json_data['intelligence'] is None:
return False
if 'filenames' in self.json_data['intelligence']['intelligence_information']:
vt_filenames = self.json_data['intelligence']['intelligence_information']['filenames']
if 'detected_domains' in self.json_data['intelligence']['intelligence_information']:
for detected_domains in self.json_data['intelligence']['intelligence_information']['detected_domains']:
domains.append(detected_domains[2])
if 'ips_associated_to_domain' in self.json_data['intelligence']['intelligence_information']:
for related_ips in self.json_data['intelligence']['intelligence_information']['ips_associated_to_domain']:
ips_associated_to_domain.append(related_ips[1])
if 'domains_associated_to_ip' in self.json_data['intelligence']['intelligence_information']:
for domain_associated_to_ip in self.json_data['intelligence']['intelligence_information']['domains_associated_to_ip']:
domains_associated_to_ip.append(domain_associated_to_ip[1])
if 'detected_ips' in self.json_data['intelligence']['intelligence_information']:
for detected_ip in self.json_data['intelligence']['intelligence_information']['detected_ips']:
detected_ips.append(detected_ip[2])
if 'threat_behaviour' in self.json_data['intelligence']['intelligence_information']:
mutexes, http_requests, tcp_connections = self.parse_threat_behavior()
if 'av_results' in self.json_data['intelligence']['intelligence_information']:
av_results = self.json_data['intelligence']['intelligence_information']['av_results']
if self.json_data['requested_intel'] not in self.visual_report_dictionary.keys():
self.visual_report_dictionary[self.json_data['requested_intel']] = {'VT': [{'domains': domains},
{'ip_addresses': ips_associated_to_domain},
{'detected_ips': detected_ips},
{'http_requests': http_requests},
{'tcp_connections': tcp_connections},
{'mutexes': mutexes},
{'av_results': av_results},
{'filenames': vt_filenames}]}
else:
self.visual_report_dictionary[self.json_data['requested_intel']].update({'VT': [{'domains': domains},
{'ip_addresses': ips_associated_to_domain},
{'detected_ips': detected_ips},
{'http_requests': http_requests},
{'tcp_connections': tcp_connections},
{'mutexes': mutexes},
{'av_results': av_results},
{'filenames': vt_filenames}]})
self.origin = self.json_data['requested_intel']
if self.origin not in self.edges.keys():
self.edges.setdefault(self.origin, [])
def parse_threat_behavior(self):
http_requests = []
tcp_connections = []
mutexes = []
if 'http_requests' in self.json_data['intelligence']['intelligence_information']['threat_behaviour']:
for request in self.json_data['intelligence']['intelligence_information']['threat_behaviour']['http_requests']:
request_pos = request.find(' ')
if request_pos == -1:
continue
request_pos_end = request.find('\n', request_pos)
if request_pos_end == -1:
continue
http_requests.append(request[request_pos+1:request_pos_end])
if 'tcp_connections' in self.json_data['intelligence']['intelligence_information']['threat_behaviour']:
for tcp_connection in self.json_data['intelligence']['intelligence_information']['threat_behaviour']['tcp_connections']:
pos = tcp_connection.find(':')
if pos == -1:
continue
tcp_connections.append(tcp_connection[:pos])
if 'created_mutexes' in self.json_data['intelligence']['intelligence_information']['threat_behaviour']:
for mutex in self.json_data['intelligence']['intelligence_information']['threat_behaviour']['created_mutexes']:
pos = mutex.find(' ')
if pos == -1:
mutexes.append(mutex)
else:
mutexes.append(mutex[:pos])
return mutexes, http_requests, tcp_connections
def parse_visual_data(self):
for intel in self.visual_report_dictionary[self.origin]['VT']:
for key, value in intel.iteritems():
if key == 'domains':
self._manage_vt_domains(value)
elif key == 'ip_addresses':
self._manage_vt_ip_addresses(value)
elif key == 'detected_ips':
self._manage_vt_detected_domains(value)
elif key == 'filenames':
self._manage_vt_filenames(value)
elif key == 'http_requests':
self._manage_vt_http_requests(value)
elif key == 'tcp_connections':
self._manage_vt_tcp_connections(value)
elif key == 'mutexes':
self._manage_vt_mutexes(value)
elif key == 'av_results':
self._manage_av_results(value)
def _manage_vt_domains(self, domains):
size = 30
for domain in domains:
# FIXME: quick fix for issues related to the visualization module (eg.: when running on shortly.im)
domain = domain.replace('"', '')
if domain in self.nodes.keys():
self.nodes[domain] = (self.nodes[domain][0] + 5, self.nodes[domain][1], self.nodes[domain][2])
else:
self.nodes[domain] = (size, self.color, 'domain')
if domain not in self.edges[self.origin]:
self.edges[self.origin].append(domain)
def _manage_vt_ip_addresses(self, ips):
size = 30
for ip in ips:
# FIXME: quick fix for issues related to the visualization module (eg.: when running on 172.16.58.3)
ip = ip.replace('"', '')
if ip in self.nodes.keys():
self.nodes[ip] = (self.nodes[ip][0] + 5, self.nodes[ip][1], self.nodes[ip][2])
else:
self.nodes[ip] = (size, self.color, 'ip')
if ip not in self.edges[self.origin]:
self.edges[self.origin].append(ip)
def _manage_vt_detected_domains(self, domains):
size = 30
for domain in domains:
# FIXME: quick fix for issues related to the visualization module (eg.: when running on 172.16.58.3)
domain = domain.replace('"', '')
if domain in self.nodes.keys():
self.nodes[domain] = (self.nodes[domain][0] + 5, self.nodes[domain][1], self.nodes[domain][2])
else:
self.nodes[domain] = (size, self.color, 'detected_domain')
if domain not in self.edges[self.origin]:
self.edges[self.origin].append(domain)
def _manage_vt_filenames(self, filenames):
size = 30
for fn in filenames:
# FIXME: quick fix for issues related to the visualization module
fn = fn.replace('"', '')
if fn in self.nodes.keys():
self.nodes[fn] = (self.nodes[fn][0] + 5, self.nodes[fn][1], self.nodes[fn][2])
else:
self.nodes[fn] = (size, self.color, 'filename')
if fn not in self.edges[self.origin]:
self.edges[self.origin].append(fn)
def _manage_vt_http_requests(self, http_reqs):
size = 30
for http_request in http_reqs:
# FIXME: quick fix for issues related to the visualization module
http_request = http_request.replace('"', '')
if http_request in self.nodes.keys():
self.nodes[http_request] = (self.nodes[http_request][0] + 5, self.nodes[http_request][1], self.nodes[http_request][2])
else:
self.nodes[http_request] = (size, self.color, 'http_request')
if http_request not in self.edges[self.origin]:
self.edges[self.origin].append(http_request)
def _manage_vt_tcp_connections(self, tcps):
size = 30
for tcp in tcps:
# FIXME: quick fix for issues related to the visualization module
tcp = tcp.replace('"', '')
if tcp in self.nodes.keys():
self.nodes[tcp] = (self.nodes[tcp][0] + 5, self.nodes[tcp][1], self.nodes[tcp][2])
else:
self.nodes[tcp] = (size, self.color, 'tcp connection')
if tcp not in self.edges[self.origin]:
self.edges[self.origin].append(tcp)
def _manage_vt_mutexes(self, mutexes):
size = 30
for mutex in mutexes:
# FIXME: quick fix for issues related to the visualization module
mutex = mutex.replace('"', '')
if mutex in self.nodes.keys():
self.nodes[mutex] = (self.nodes[mutex][0] + 5, self.nodes[mutex][1], self.nodes[mutex][2])
else:
self.nodes[mutex] = (size, self.color, 'mutex')
if mutex not in self.edges[self.origin]:
self.edges[self.origin].append(mutex)
def _manage_av_results(self, av_reults):
size = 30
detection = ''
for av_name, av_values in av_reults.iteritems():
if av_name == 'Symantec' and av_values[0] != 'Not detected':
detection = av_values[0]
break
elif av_name == 'Microsoft' and av_values[0] != 'Not detected':
detection = av_values[0]
break
if av_values[0] != 'Not detected':
detection = av_values[0]
if detection == '':
return
if detection not in self.nodes.keys():
self.nodes[detection] = (size, self.color, 'detection')
if detection not in self.edges[self.origin]:
self.edges[self.origin].append(detection)
``` |
{
"source": "jmorton-usgs/sat-stac",
"score": 2
} |
#### File: sat-stac/satstac/catalog.py
```python
import json
import os
from .version import __version__
from .thing import Thing, STACError
STAC_VERSION = os.getenv('STAC_VERSION', '1.0.0-beta.2')
class Catalog(Thing):
def __init__(self, data, root=None, **kwargs):
""" Initialize a catalog with a catalog file """
super(Catalog, self).__init__(data, **kwargs)
@property
def stac_version(self):
""" Get the STAC version of this catalog """
return self._data['stac_version']
@property
def description(self):
""" Get catalog description """
return self._data.get('description', '')
@classmethod
def create(cls, id='stac-catalog', title='A STAC Catalog',
description='A STAC Catalog', root=None, **kwargs):
""" Create new catalog """
kwargs.update({
'id': id,
'stac_version': STAC_VERSION,
'title': title,
'description': description,
'links': []
})
return cls(kwargs, root=root)
def children(self):
""" Get child links """
# TODO = should this be tested if Collection and return mix of Catalogs and Collections?
for l in self.links('child'):
yield Catalog.open(l)
def catalogs(self):
""" Recursive get all catalogs within this Catalog """
for cat in self.children():
for subcat in cat.children():
yield subcat
yield from subcat.catalogs()
yield cat
def collections(self):
""" Recursively get all collections within this Catalog """
for cat in self.children():
if 'extent' in cat._data.keys():
yield Collection.open(cat.filename)
# TODO - keep going? if other Collections can appear below a Collection
else:
yield from cat.collections()
def items(self):
""" Recursively get all items within this Catalog """
for item in self.links('item'):
yield Item.open(item)
for child in self.children():
yield from child.items()
def add_catalog(self, catalog, basename='catalog'):
""" Add a catalog to this catalog """
if self.filename is None:
raise STACError('Save catalog before adding sub-catalogs')
# add new catalog child link
child_link = '%s/%s.json' % (catalog.id, basename)
child_fname = os.path.join(self.path, child_link)
child_path = os.path.dirname(child_fname)
root_links = self.links('root')
root_link = root_links[0] if len(root_links) > 0 else self.filename
root_path = os.path.dirname(root_link)
self.add_link('child', child_link)
self.save()
# strip self, parent, child links from catalog and add new links
catalog.clean_hierarchy()
catalog.add_link('root', os.path.relpath(root_link, child_path))
catalog.add_link('parent', os.path.relpath(self.filename, child_path))
# create catalog file
catalog.save(filename=child_fname)
return self
def add_collection(self, catalog, basename='collection'):
""" Add a collection to this catalog """
return self.add_catalog(catalog, basename=basename)
# import and end of module prevents problems with circular dependencies.
# Catalogs use Items and Items use Collections (which are Catalogs)
from .item import Item
from .collection import Collection
```
#### File: sat-stac/satstac/itemcollection.py
```python
import json
import os.path as op
import requests
from logging import getLogger
from .catalog import STAC_VERSION
from .collection import Collection
from .item import Item
from .thing import STACError
from .utils import terminal_calendar, get_s3_signed_url
logger = getLogger(__name__)
class ItemCollection(object):
""" A GeoJSON FeatureCollection of STAC Items with associated Collections """
def __init__(self, items, collections=[]):
""" Initialize with a list of Item objects """
self._collections = collections
self._items = items
# link Items to their Collections
cols = {c.id: c for c in self._collections}
for i in self._items:
# backwards compatible to STAC 0.6.0 where collection is in properties
col = i._data.get('collection', None)
if col is not None:
if col in cols:
i._collection = cols[col]
@classmethod
def open_remote(self, url, headers={}):
""" Open remote file """
resp = requests.get(url, headers=headers)
if resp.status_code == 200:
dat = resp.text
else:
raise STACError('Unable to open %s' % url)
return json.loads(dat)
@classmethod
def open(cls, filename):
""" Load an Items class from a GeoJSON FeatureCollection """
""" Open an existing JSON data file """
logger.debug('Opening %s' % filename)
if filename[0:5] == 'https':
try:
data = cls.open_remote(filename)
except STACError as err:
# try signed URL
url, headers = get_s3_signed_url(filename)
data = cls.open_remote(url, headers)
else:
if op.exists(filename):
data = open(filename).read()
data = json.loads(data)
else:
raise STACError('%s does not exist locally' % filename)
collections = [Collection(col) for col in data.get('collections', [])]
items = [Item(feature) for feature in data['features']]
return cls(items, collections=collections)
@classmethod
def load(cls, *args, **kwargs):
""" Load an Items class from a GeoJSON FeatureCollection """
logger.warning("ItemCollection.load() is deprecated, use ItemCollection.open()")
return cls.open(*args, **kwargs)
def __len__(self):
""" Number of scenes """
return len(self._items)
def __getitem__(self, index):
return self._items[index]
def dates(self):
""" Get sorted list of dates for all scenes """
return sorted(list(set([s.date for s in self._items])))
def collection(self, id):
""" Get collection records for this list of scenes """
cols = [c for c in self._collections if c.id == id]
if len(cols) == 1:
return cols[0]
else:
return None
def properties(self, key, date=None):
""" Set of values for 'key' property in Items, for specific date if provided """
if date is None:
return list(set([i[key] for i in self._items]))
else:
return list(set([i[key] for i in self._items if i.date == date]))
def summary(self, params=[]):
""" Print summary of all scenes """
if len(params) == 0:
params = ['date', 'id']
txt = 'Items (%s):\n' % len(self._items)
txt += ''.join(['{:<25} '.format(p) for p in params]) + '\n'
for s in self._items:
txt += ''.join(['{:<25} '.format(s.get_path('${%s}' % p)) for p in params]) + '\n'
return txt
def calendar(self, group='platform'):
""" Get calendar for dates """
date_labels = {}
for d in self.dates():
groups = self.properties(group, d)
if len(groups) > 1:
date_labels[d] = 'Multiple'
else:
date_labels[d] = groups[0]
return terminal_calendar(date_labels)
def assets_definition(self):
fields = ['Key', 'Title', 'Common Name(s)', 'Type']
w = [12, 35, 20, 50]
for c in self._collections:
txt = f"Collection: {c.id}\n"
txt += ''.join([f"{fields[i]:{w[i]}}" for i in range(len(w))]) + '\n'
for key in c._data['item_assets']:
asset = c._data['item_assets'][key]
if 'eo:bands' in asset:
bands = ', '.join([b.get('common_name', None) for b in asset['eo:bands'] if 'common_name' in b])
else:
bands = ''
#import pdb; pdb.set_trace()
vals = [key, asset['title'], bands, asset['type']]
txt += ''.join([f"{vals[i]:{w[i]}}" for i in range(len(w))]) + '\n'
return txt
def save(self, filename, **kwargs):
""" Save scene metadata """
with open(filename, 'w') as f:
f.write(json.dumps(self.geojson(**kwargs)))
def geojson(self, id='STAC', description='Single file STAC'):
""" Get Items as GeoJSON FeatureCollection """
features = [s._data for s in self._items]
geoj = {
'id': id,
'description': description,
'stac_version': STAC_VERSION,
'stac_extensions': ['single-file-stac'],
'type': 'FeatureCollection',
'features': features,
'collections': [c._data for c in self._collections],
'links': []
}
return geoj
def filter(self, key, values):
""" Filter scenes on key matching value """
items = []
for val in values:
items += list(filter(lambda x: x[key] == val, self._items))
self._items = items
def download_assets(self, *args, **kwargs):
filenames = []
for i in self._items:
fnames = i.download_assets(*args, **kwargs)
if len(fnames) > 0:
filenames.append(fnames)
return filenames
def download(self, *args, **kwargs):
""" Download all Items """
dls = []
for i in self._items:
fname = i.download(*args, **kwargs)
if fname is not None:
dls.append(fname)
return dls
```
#### File: sat-stac/test/test_catalog.py
```python
import json
import os
import unittest
import shutil
from satstac import __version__, Catalog, STACError, Item
testpath = os.path.dirname(__file__)
class Test(unittest.TestCase):
path = os.path.join(testpath, 'test-catalog')
@classmethod
def tearDownClass(cls):
""" Remove test files """
if os.path.exists(cls.path):
shutil.rmtree(cls.path)
@classmethod
def get_catalog(cls):
""" Open existing test catalog """
return Catalog.open(os.path.join(testpath, 'catalog/catalog.json'))
@classmethod
def create_catalog(cls, name):
path = os.path.join(cls.path, name)
return Catalog.create(path)
def test_init(self):
with open(os.path.join(testpath, 'catalog/catalog.json')) as f:
data = json.loads(f.read())
cat = Catalog(data)
assert(cat.id == 'stac-catalog')
def test_open(self):
""" Initialize Catalog with a file """
cat = self.get_catalog()
assert(len(cat._data.keys()) == 4)
assert(cat.id == 'stac-catalog')
assert(len(cat.links())==3)
def test_properties(self):
cat = self.get_catalog()
assert(cat.stac_version == '1.0.0-beta.1')
assert(cat.description == 'An example STAC catalog')
def test_create(self):
""" Create new catalog file """
cat = Catalog.create()
assert(cat.id == 'stac-catalog')
def test_create_with_keywords(self):
path = os.path.join(testpath, 'test-catalog', 'create_with_keywords')
desc = 'this is a catalog'
cat = Catalog.create(path, description=desc)
assert(cat.description == desc)
def test_links(self):
root = self.get_catalog()
child = [c for c in root.children()][0]
assert(child.parent().id == root.id)
def test_get_catalogs(self):
catalogs = [i for i in self.get_catalog().catalogs()]
assert(len(catalogs) == 4)
def test_get_collections(self):
collections = [i for i in self.get_catalog().collections()]
assert(len(collections) == 2)
assert(collections[0].id in ['landsat-8-l1', 'sentinel-s2-l1c'])
assert(collections[1].id in ['landsat-8-l1', 'sentinel-s2-l1c'])
def test_get_items(self):
items = [i for i in self.get_catalog().items()]
assert(len(items) == 2)
def test_add_catalog(self):
cat = Catalog.create(root='http://my.cat').save(os.path.join(self.path, 'catalog.json'))
col = Catalog.open(os.path.join(testpath, 'catalog/eo/landsat-8-l1/catalog.json'))
cat.add_catalog(col)
child = [c for c in cat.children()][0]
assert(child.id == col.id)
def test_add_catalog_without_saving(self):
cat = Catalog.create()
with self.assertRaises(STACError):
cat.add_catalog({})
```
#### File: sat-stac/test/test_itemcollection.py
```python
import os
import unittest
from satstac import ItemCollection, Item
from shutil import rmtree
testpath = os.path.dirname(__file__)
class Test(unittest.TestCase):
path = os.path.join(testpath, 'test-item')
@classmethod
def tearDownClass(cls):
""" Remove test files """
if os.path.exists(cls.path):
rmtree(cls.path)
def load_items(self):
return ItemCollection.load(os.path.join(testpath, 'items.json'))
def test_load(self):
""" Initialize Scenes with list of Scene objects """
items = self.load_items()
assert(len(items._collections) == 1)
assert(len(items) == 2)
assert(isinstance(items[0], Item))
def test_save(self):
""" Save items list """
items = self.load_items()
fname = os.path.join(testpath, 'save-test.json')
items.save(fname)
assert(os.path.exists(fname))
os.remove(fname)
assert(not os.path.exists(fname))
def test_collection(self):
""" Get a collection """
items = self.load_items()
col = items.collection('landsat-8-l1')
assert(col.id == 'landsat-8-l1')
def test_no_collection(self):
""" Attempt to get non-existent collection """
items = self.load_items()
col = items.collection('nosuchcollection')
assert(col is None)
def test_get_properties(self):
""" Get set of properties """
items = self.load_items()
p = items.properties('eo:platform')
assert(len(p) == 1)
assert(p[0] == 'landsat-8')
def test_print_items(self):
""" Print summary of items """
items = self.load_items()
print(items.summary())
def test_dates(self):
""" Get dates of all items """
items = self.load_items()
dates = items.dates()
assert(len(dates) == 1)
def test_text_calendar(self):
""" Get calendar """
items = self.load_items()
cal = items.calendar()
assert(len(cal) > 250)
def test_download_thumbnails(self):
""" Download all thumbnails """
items = self.load_items()
fnames = items.download(key='thumbnail')
for f in fnames:
assert(os.path.exists(f))
os.remove(f)
assert(not os.path.exists(f))
#shutil.rmtree(os.path.join(testpath, 'landsat-8-l1'))
def test_filter(self):
items = self.load_items()
items.filter('eo:cloud_cover', [100])
assert(len(items) == 1)
def test_download_assets(self):
""" Download multiple assets from all items """
items = self.load_items()
filenames = items.download_assets(keys=['MTL', 'ANG'], filename_template=self.path)
assert(len(filenames) == 2)
for fnames in filenames:
assert(len(fnames) == 2)
for f in fnames:
assert(os.path.exists(f))
def test_download(self):
""" Download a data file from all items """
items = self.load_items()
fnames = items.download(key='MTL', filename_template=self.path)
assert(len(fnames) == 2)
for f in fnames:
assert(os.path.exists(f))
``` |
{
"source": "jmorton-usgs/stackstac",
"score": 2
} |
#### File: stackstac/stackstac/stac_types.py
```python
from __future__ import annotations
"""
Compatibility methods for all the different ways of representing STAC catalogs in Python.
Because dicts and lists just are never enough to represent JSON data.
"""
from typing import (
Dict,
Iterator,
List,
Literal,
Optional,
Sequence,
Tuple,
TypedDict,
Union,
cast,
)
try:
from satstac import Item as SatstacItem
from satstac import ItemCollection as SatstacItemCollection
except ImportError:
class SatstacItem:
_data: ItemDict
class SatstacItemCollection:
def __iter__(self) -> Iterator[SatstacItem]:
...
try:
from pystac import Catalog as PystacCatalog
from pystac import Item as PystacItem
except ImportError:
class PystacItem:
def to_dict(self) -> ItemDict:
...
class PystacCatalog:
def get_all_items(self) -> Iterator[PystacItem]:
...
class EOBand(TypedDict, total=False):
name: str
common_name: str
description: str
center_wavelength: float
full_width_half_max: float
AssetDict = TypedDict(
"AssetDict",
{
"href": str,
"title": str,
"description": str,
"type": str,
"roles": List[str],
"proj:shape": Tuple[int, int],
"proj:transform": Union[
Tuple[int, int, int, int, int, int],
Tuple[int, int, int, int, int, int, int, int, int],
],
"eo:bands": EOBand,
"sar:polarizations": List[str],
},
total=False,
)
PropertiesDict = TypedDict(
"PropertiesDict",
{
"datetime": Optional[str],
"proj:epsg": Optional[int],
"proj:bbox": Tuple[float, float, float, float],
"proj:shape": Tuple[int, int],
"proj:transform": Union[
Tuple[int, int, int, int, int, int],
Tuple[int, int, int, int, int, int, int, int, int],
],
},
total=False,
)
class ItemDict(TypedDict):
stac_version: str
id: str
type: Literal["Feature"]
geometry: Optional[dict]
bbox: Tuple[float, float, float, float]
properties: PropertiesDict
assets: Dict[str, AssetDict]
stac_extensions: List[str]
collection: str
ItemSequence = Sequence[ItemDict]
ItemIsh = Union[SatstacItem, PystacItem, ItemDict]
ItemCollectionIsh = Union[SatstacItemCollection, PystacCatalog, ItemSequence]
def items_to_plain(items: Union[ItemCollectionIsh, ItemIsh]) -> ItemSequence:
"""
Convert something like a collection/Catalog of STAC items into a list of plain dicts
Currently works on ``satstac.ItemCollection``, ``pystac.Catalog`` (inefficiently),
and plain Python lists-of-dicts.
"""
if isinstance(items, dict):
# singleton item
return [items]
if isinstance(items, Sequence):
# slicing a satstac `ItemCollection` produces a list, not another `ItemCollection`,
# so having a `List[SatstacItem]` is quite possible
try:
return [item._data for item in cast(SatstacItemCollection, items)]
except AttributeError:
return items
if isinstance(items, SatstacItem):
return [items._data]
if isinstance(items, PystacItem):
# TODO this is wasteful.
return [items.to_dict()]
if isinstance(items, SatstacItemCollection):
return [item._data for item in items]
if isinstance(items, PystacCatalog):
# TODO this is wasteful. Instead of this `items_to_plain` function,
# switch to `get_items`, `get_properties`, `get_assets`, etc. style functions
# which can handle each object type, preventing the need for this sort of copying.
return [item.to_dict() for item in items.get_all_items()]
raise TypeError(f"Unrecognized STAC collection type {type(items)}: {items!r}")
``` |
{
"source": "jmosbacher/context-config",
"score": 3
} |
#### File: context-config/context_config/context_config.py
```python
from abc import ABC, abstractmethod
from intervaltree import IntervalTree, Interval
from collections.abc import Mapping, MutableMapping, Iterable
class BaseConfig(ABC):
def __init__(self, parent=None):
self.parent = parent
@abstractmethod
def lookup(self, key):
pass
@abstractmethod
def configure(self, key, value):
pass
@abstractmethod
def _keys(self):
pass
def __getitem__(self, key):
if isinstance(key, tuple):
if len(key)==1:
return self.lookup(key[0])
elif len(key)==2:
return self.lookup(key[0])[key[1]]
else:
return self.lookup(key[0])[key[1:]]
return self.lookup(key)
def __setitem__(self, key, value):
if isinstance(key, tuple):
if key[0] in self.keys():
if len(key)==1:
return self.configure(key[0], value)
elif len(key)==2:
self.lookup(key[0])[key[1]] = value
return
else:
self.lookup(key[0])[key[1:]] = value
return
elif self.parent is not None:
self.parent[key] = value
return
else:
raise KeyError(f"{key} has not been defined in this context.")
self.configure(key, value)
def __getattr__(self, key):
return self.__getitem__(key)
def subcontext(self, **attrs):
return self.__class__(self, **attrs)
def keys(self):
keys = set(self._keys())
if self.parent is not None:
keys.update(self.parent.keys())
return list(keys)
def items(self):
return [(k,self.lookup(k)) for k in self.keys()]
def __dir__(self):
return super().__dir__() + self.keys()
def __contains__(self, key):
return key in self.keys()
class DictConfig(BaseConfig):
def __init__(self, parent=None, attrs=None, **kwargs):
super().__init__(parent=parent, **kwargs)
if attrs is None:
attrs = {}
self._attrs = dict(attrs)
def lookup(self, key):
if key in self._attrs:
return self._attrs[key]
if self.parent is not None:
return self.parent.lookup(key)
raise KeyError(f"{key} has not been defined in this context.")
def configure(self, key, value):
self._attrs[key] = value
def _keys(self):
return self._attrs.keys()
class IntervalConfig(BaseConfig):
_tree: IntervalTree
@classmethod
def from_label_dict(cls, d):
ivs = [Interval(*map(int, k.split("-")), v) for k,v in d.items()]
return cls(IntervalTree(ivs))
def add_group(self, name, group):
self[name] = group
def key_to_label(self, key):
return f"{key[0]}-{key[1]}"
def label_to_key(self, label):
return tuple(map(int, label.split("-")))
def to_label_dict(self):
return {f"{iv.begin}-{iv.end}": iv.data for iv in sorted(self._tree)}
def to_dict(self):
return {(iv.begin,iv.end): iv.data for iv in sorted(self._tree)}
def __init__(self, parent=None, tree=None, **kwargs):
super().__init__(parent=parent)
if tree is None:
tree = IntervalTree()
if not isinstance(tree, IntervalTree):
raise TypeError("tree must be an instance of IntervalTree.")
self._tree = tree
def lookup(self, key):
if isinstance(key, str):
key = self.label_to_key(key)
if isinstance(key, int):
return self.value(key)
elif isinstance(key, tuple) and len(key)==2:
return self.overlap_content(*key)
elif isinstance(key, Iterable):
return self.values_at(key)
elif isinstance(key, slice):
start = key.start or self.start
stop = key.stop or self.end
if key.step is None:
return self.overlap(start, stop)
else:
return self.values_at(range(start, stop, key.step))
@property
def start(self):
return self._tree.begin()
@property
def end(self):
return self._tree.end()
def configure(self, key, value):
if isinstance(key, str):
key = self.label_to_key(key)
if isinstance(key, slice):
start, stop, step = key.start, key.stop, key.step
elif isinstance(key, tuple):
if len(key)==2:
start, stop = key
step = None
elif len(key)==3:
start, stop, step = key
else:
raise ValueError("Setting intervals with tuple must be \
of form (start, end) or (start, end, step)")
else:
raise TypeError("Wrong type. Setting intervals can only be done using a \
slice or tuple of (start, end) or (start, end, step)")
if start is None:
start = self.start
if stop is None:
stop = self.end
if step is None:
self.set_interval(start, stop, value)
else:
indices = list(range(start,stop,step))
for begin,end,val in zip(indices[:-1], indices[1:], value):
self.set_interval(begin, end, val)
def delete(self, key):
if isinstance(key, str):
key = self.label_to_key(key)
elif isinstance(key, tuple) and len(key)==2:
self._tree.chop(*key)
elif isinstance(key, slice):
self._tree.chop(key.start, key.end)
else:
raise TypeError("Must pass a tuple of (begin,end) or slice.")
def _keys(self):
for iv in sorted(self._tree):
yield iv.begin, iv.end
def labels(self):
return map(self.key_to_label, self.keys())
def items(self):
for iv in sorted(self._tree):
yield (iv.begin,iv.end), iv.data
def values(self):
for iv in sorted(self._tree):
yield iv.data
def __iter__(self):
return self.keys()
def __len__(self):
return len(self._tree)
def __bool__(self):
return bool(len(self._tree))
def __contains__(self, key):
return bool(self[key])
def __getstate__(self):
return tuple(sorted([tuple(iv) for iv in self._tree]))
def __setstate__(self, d):
ivs = [Interval(*iv) for iv in d]
self._tree = IntervalTree(ivs)
def overlap(self, begin, end):
hits = sorted(self._tree.overlap(begin, end))
return [Interval(max(iv.begin, begin), min(iv.end, end), iv.data)
for iv in hits]
def overlap_content(self, begin, end):
hits = sorted(self._tree.overlap(begin, end))
if len(hits)==1:
return hits[0].data
return [hit.data for hit in hits]
def value(self, index):
hits = sorted(self._tree.at(index))
if hits:
return hits[0].data
raise KeyError(f"No value found at {index}")
def values_at(self, indices):
return [self.value(i) for i in indices]
def set_interval(self, begin, end, value):
self._tree.chop(begin, end)
self._tree.addi(begin, end, value)
``` |
{
"source": "jmosbacher/eve-jwt",
"score": 2
} |
#### File: eve-jwt/eve_jwt/validation.py
```python
import requests
from authlib.jose import jwt, jwk, util, errors, JsonWebKey, KeySet
import authlib
import logging
logger = logging.getLogger(__name__)
class ValidatorBase:
def validate_token(self, token, issuer, method=None,
audiences=None, allowed_roles=None):
raise NotImplementedError
class AsymmetricKeyValidator(ValidatorBase):
def __init__(self, default_key=None, key_url=None,
scope_claim=None, roles_claim=None):
self.key_url = key_url
self.roles_claim = roles_claim
self.scope_claim = scope_claim
self._keyset = KeySet([])
def validate_token(self, token, issuer, method=None, audiences=None, allowed_roles=None):
key = self.get_key(token)
if not key:
return (False, None, None, None)
options = {}
if audiences:
if isinstance(audiences, str):
options["aud"] = {"essential": True, "value": audiences}
else:
options["aud"] = {"essential": True, "values": audiences}
else:
options["aud"] = {"essential": False, "values": []}
if issuer:
options["iss"] = {"essential": True, "value": issuer}
try:
claims = jwt.decode(token, key, claims_options=options)
claims.validate()
except:
return (False, None, None, None)
payload = dict(claims)
account_id = payload.get('sub') # Get account id
roles = None
# Check scope is configured and add append it to the roles
if self.scope_claim and payload.get(self.scope_claim):
scope = payload.get(self.scope_claim)
roles = scope.split(" ")
# If roles claim is defined, gather roles from the token
if self.roles_claim:
roles = payload.get(self.roles_claim, []) + (roles or [])
# Check roles if scope or role claim is set
if allowed_roles and roles is not None:
if not any(role in roles for role in allowed_roles):
return (False, payload, account_id, roles)
return (True, payload, account_id, roles)
def get_key(self, token):
kid = ""
try:
header_str = authlib.common.encoding.urlsafe_b64decode(token.split(".")[0].encode()).decode('utf-8')
header = authlib.common.encoding.json_loads(header_str)
kid = header["kid"]
return self._keyset.find_by_kid(kid)
except Exception as e:
logger.debug(str(e))
kid = ""
try:
self.fetch_keys()
return self._keyset.find_by_kid(kid)
except Exception as e:
logger.debug(str(e))
return False
def fetch_keys(self):
if not self.key_url:
return
response = requests.get(self.key_url)
if response.ok:
data = response.json()
self._keyset = JsonWebKey.import_key_set(data)
``` |
{
"source": "jmosbacher/eve-panel",
"score": 2
} |
#### File: eve-panel/eve_panel/field.py
```python
import param
from eve.io.mongo.validation import Validator
from .types import COERCERS
SUPPORTED_SCHEMA_FIELDS = [
"type",
"schema",
"required",
"default",
"readonly",
"valueschema",
"keyschema",
"regex",
"minlength",
"maxlength",
"min",
"max",
"allowed",
"items",
"empty",
"nullable",
]
TYPE_MAPPING = {
"media": "binary",
}
def EveField(name, schema, klass):
if isinstance(klass, param.ClassSelector):
return klass
if not isinstance(klass, type):
return klass
schema = {k: v for k, v in schema.items() if k in SUPPORTED_SCHEMA_FIELDS}
if schema.get('type', 'string') in COERCERS:
schema["coerce"] = COERCERS[schema.get('type', 'string')]
if schema.get('type', 'string') in TYPE_MAPPING:
schema['type'] = TYPE_MAPPING[schema.get('type', 'string')]
# validator = Validator({"value": schema})
def _validate(self, val):
if self.allow_None and val is None:
return
if self.owner is None:
return
if self.name is None:
return
try:
if not self.validator.validate({"value": val}):
sep = "\n"
errors = [
f"Cannot set \'{self.owner.name}.{self.name}\' to \'{val}\' of type {type(val)}."
]
for k, v in self.validator.errors.items():
errors.append(f"{k} {v}")
if len(errors) <= 2:
sep = ". "
raise ValueError(" ".join(errors))
except ValueError:
raise
except Exception:
pass
params = {
# "_schema": schema,
"_validate": _validate,
"validator": Validator({"value": schema})
}
return type(f"Eve{name.title()}{klass.__name__}Field", (klass, ), params)
```
#### File: eve-panel/eve_panel/io.py
```python
import yaml
import json
import pandas as pd
def read_csv(f):
df = pd.read_csv(f).dropna(axis=1, how="all")
return df.to_dict(orient="records")
FILE_READERS = {
"json": json.load,
"yml": yaml.safe_load,
"yaml": yaml.safe_load,
"csv": read_csv,
}
def read_data_file(f, ext):
if ext in FILE_READERS:
data = FILE_READERS[ext](f)
if isinstance(data, list):
return data
elif isinstance(data, dict):
return [data]
return []
```
#### File: eve-panel/eve_panel/types.py
```python
import param
from bson import objectid
import numpy as np
import pandas as pd
import base64
class CoerceClassSelector(param.ClassSelector):
def __set__(self, obj, val):
try:
val = self.class_(val)
except:
pass
super().__set__(obj, val)
def objectid_param(**kwargs):
return CoerceClassSelector(str, constant=True, **kwargs)
def bytes_param(**kwargs):
return param.ClassSelector(bytes, **kwargs)
def set_param(**kwargs):
return param.ClassSelector(set, **kwargs)
TYPE_MAPPING = {
"objectid": objectid_param,
"boolean": param.Boolean,
"binary": bytes_param,
"date": param.Date,
"datetime": param.Date,
"dict": param.Dict,
"float": param.Number,
"integer": param.Integer,
"list": param.List,
"number": param.Number,
"set": set_param,
"string": param.String,
"media": bytes_param,
}
DASK_TYPE_MAPPING = {
"objectid": str,
"boolean": bool,
"binary": bytes,
"date": np.datetime64,
"datetime": np.datetime64,
"dict": dict,
"float": float,
"integer": int,
"list": list,
"number": float,
"set": set,
"string": str,
"media": bytes,
}
def to_binary(x):
if isinstance(x, str):
x = str.encode(x)
return x
def base64_to_binary(x):
if isinstance(x, str):
x = base64.b64decode(x)
return x
COERCERS = {
"objectid": str,
"boolean": bool,
"binary": to_binary,
"date": pd.to_datetime,
"datetime": pd.to_datetime,
"dict": dict,
"float": float,
"integer": int,
"list": list,
"number": float,
"set": set,
"string": str,
"media": base64_to_binary,
}
```
#### File: eve-panel/eve_panel/widgets.py
```python
import ast
import json
import base64
import panel as pn
import param
from eve.io.mongo.validation import Validator
from panel.widgets import LiteralInput
class LiteralSchemaInputBase(LiteralInput):
"""[summary]
Args:
LiteralInput ([type]): [description]
"""
def validate_schema(self, value):
return True
def _process_property_change(self, msg):
msg = super(LiteralSchemaInputBase, self)._process_property_change(msg)
if msg['value'] == self.value:
return msg
new_state = ''
if 'value' in msg:
value = msg.pop('value')
if not self.validate_schema(value):
new_state = ' (invalid)'
value = self.value
msg['value'] = value
msg['name'] = msg.get('title', self.name).replace(
self._state, '').replace(new_state, '') + new_state
self._state = new_state
self.param.trigger('name')
return msg
def LiteralSchemaInput(name, schema, type_=None):
validator = Validator({"value": schema})
def validate_schema(self, value):
return validator.validate({"value": value})
params = {
"validate_schema": validate_schema,
"type": type_,
}
return type(name + "InputWidget", (LiteralSchemaInputBase, ), params)
def PDFViewer(pdf, width=800, height=500):
if isinstance(pdf, bytes):
pdf = base64.b64encode(pdf).decode()
return pn.pane.HTML(f'<iframe width={width} height={height} src="data:application/pdf;base64,{pdf}" type="application/pdf"></iframe>',
width=1000,sizing_mode="stretch_both")
def PNGViewer(png, width=800, height=500):
if isinstance(png, bytes):
png = base64.b64encode(png).decode()
src = f"data:image/png;base64,{png}"
return pn.pane.HTML(f"<img src='{src}' width={width} height={height}></img>")
class FileInputPreview(pn.widgets.FileInput):
@param.depends('value')
def preview(self):
if not self.filename:
return pn.Column()
if self.filename.endswith(".png"):
return PNGViewer(self.value)
elif self.filename.endswith(".pdf"):
return PDFViewer(self.value)
def _repr_mimebundle_(self, include=None, exclude=None):
return pn.Column(self, self.preview)
WIDGET_MAPPING = {
"media": {"type": pn.widgets.FileInput, "align": "end"},
}
def get_widget(name, schema):
if schema.get('type', 'string') == "dict" and "schema" in schema:
return LiteralSchemaInput(name, schema, dict)
elif schema.get('type', 'string') == "list" and "schema" in schema:
return LiteralSchemaInput(name, schema, list)
else:
return WIDGET_MAPPING.get(schema.get('type', 'string'), None)
class Progress(param.Parameterized):
value = param.Integer(0)
total = param.Integer(100)
active = param.Boolean(False)
desc = param.String("Loading")
unit = param.String("iterations")
def __call__(self, **params):
self.param.set_param(**params)
return self
@param.depends("value", "total", "active")
def view(self):
perc = int(100*self.value/self.total)
text = f"{perc}% [{self.value}/{self.total} {self.unit}]"
ind = pn.indicators.Progress(value=perc, active=self.active, sizing_mode="stretch_width")
return pn.Row(self.desc, ind, text, sizing_mode="stretch_width")
def update(self, inc=1):
self.value = self.value + inc
def reset(self):
self.value = 0
``` |
{
"source": "jmosbacher/igit",
"score": 2
} |
#### File: igit/igit/remotes.py
```python
class Remote:
fetch: str
push: str
kwargs: dict
heads: dict
def fetch(self, branch):
pass
def push(self, commit):
pass
```
#### File: igit/igit/serializers.py
```python
import base64
import hashlib
import json
import pathlib
import pickle
from abc import ABC, abstractmethod, abstractstaticmethod
import dill
import msgpack
import msgpack_numpy as m
from pydantic import BaseModel
from zict import File, Func
m.patch()
SERIALIZERS = {}
class DataCorruptionError(KeyError):
pass
class EncoderMismatchError(TypeError):
pass
class ObjectPacket(BaseModel):
otype: str
key: str
content: str
serializer: str
class BaseObjectSerializer(ABC):
NAME: str
key: bytes
suffix: str = ''
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
SERIALIZERS[cls.NAME] = cls
@abstractstaticmethod
def serialize(obj):
pass
@abstractstaticmethod
def deserialize(data):
pass
@classmethod
def get_mapper(cls, fs_mapper):
if isinstance(fs_mapper, (str, pathlib.Path)):
fs_mapper = File(fs_mapper, mode='a')
mapper = Func(cls.serialize, cls.deserialize, fs_mapper)
return mapper
class NoopSerializer(BaseObjectSerializer):
NAME = None
@staticmethod
def serialize(obj):
return obj
@staticmethod
def deserialize(data):
return data
SERIALIZERS[""] = NoopSerializer
class JsonObjectSerializer(BaseObjectSerializer):
NAME = "json"
@staticmethod
def serialize(obj):
return json.dumps(obj).encode()
@staticmethod
def deserialize(data):
return json.loads(data)
class PickleObjectSerializer(BaseObjectSerializer):
NAME = "pickle"
suffix: str = '.pkl'
@staticmethod
def serialize(obj):
return pickle.dumps(obj)
@staticmethod
def deserialize(data):
return pickle.loads(data)
class DillObjectSerializer(BaseObjectSerializer):
NAME = "dill"
suffix: str = '.dill'
@staticmethod
def serialize(obj):
return dill.dumps(obj)
@staticmethod
def deserialize(data):
return dill.loads(data)
class MsgpackObjectSerializer(BaseObjectSerializer):
NAME = "msgpack"
suffix: str = '.msg'
@staticmethod
def serialize(obj):
return msgpack.dumps(obj)
@staticmethod
def deserialize(data):
return msgpack.loads(data)
class MsgpackDillObjectSerializer(BaseObjectSerializer):
NAME = "msgpack-dill"
@staticmethod
def serialize(obj):
try:
return msgpack.dumps(obj)
except:
return dill.dumps(obj)
@staticmethod
def deserialize(data):
try:
return msgpack.loads(data)
except:
return dill.loads(data)
class JsonDillObjectSerializer(BaseObjectSerializer):
NAME = "json-dill"
@staticmethod
def serialize(obj):
try:
return json.dumps(obj).encode()
except:
return dill.dumps(obj)
@staticmethod
def deserialize(data):
try:
return json.loads(data)
except:
return dill.loads(data)
```
#### File: igit/storage/content_addressable.py
```python
import sys
import typing as ty
from collections.abc import MutableMapping
from ..models import BaseObject, BlobRef, ObjectRef, TreeRef
from ..tokenize import tokenize
from ..trees import BaseTree
from .common import DataCorruptionError, ProxyStorage
class ContentAddressableStorage(ProxyStorage):
verify: bool
hash_func: ty.Callable
def __init__(
self,
d: MutableMapping,
verify=True,
):
self.d = d
self.verify = verify
def hash(self, obj) -> str:
return tokenize(obj)
def get_ref(self, key, obj):
size = sys.getsizeof(obj)
if isinstance(obj, BaseTree):
ref = TreeRef(key=key,
tree_class=obj.__class__.__name__,
size=size)
elif isinstance(obj, BaseObject):
otype = obj.otype
for class_ in ObjectRef.__subclasses__():
if class_.otype == otype:
ref = class_(key=key, size=size)
break
else:
raise KeyError(otype)
else:
ref = BlobRef(key=key, size=size)
return ref
def hash_object(self, obj, save=True, as_ref=True):
if isinstance(obj, BaseTree):
new_obj = obj.__class__()
for k, v in obj.items():
new_obj[k] = self.hash_object(v, save=save)
obj = new_obj
key = self.hash(obj)
if save and key not in self.d:
self.d[key] = obj
if as_ref:
key = self.get_ref(key, obj)
return key
def cat_object(self, key, deref=True, recursive=True):
obj = self.d[key]
if deref and hasattr(obj, 'deref'):
obj = obj.deref(self, recursive=recursive)
if self.verify:
key2 = self.hash_object(obj, save=False, as_ref=False)
if key2 != key:
raise DataCorruptionError(
f"Looks like data has been corrupted or\
a different serializer/encryption was used. key: {key}, hash: {key2}"
)
return obj
def get(self, key, default=None):
if key not in self.d:
return default
return self[key]
def fuzzy_get(self, key):
if key in self.d:
return self.d[key]
for k in self.d.keys():
if key in k:
return self.d[k]
raise KeyError(key)
def equal(self, *objs):
return set([self.hash(obj) for obj in objs]) == 1
def consistent_hash(self, obj):
key1 = self.hash_object(obj, as_ref=False)
key2 = self.hash_object(self.cat_object(key1), as_ref=False)
return key1 == key2
```
#### File: igit/storage/model.py
```python
from .function import FunctionStorage
class PydanticModelStorage(FunctionStorage):
def __init__(self, d, model):
dump = lambda m: m.json().encode()
load = lambda data: model.parse_raw(data)
super().__init__(d, dump, load)
```
#### File: igit/igit/tokenize.py
```python
import datetime
import inspect
import os
import pickle
import threading
import uuid
from collections import OrderedDict
from concurrent.futures import Executor
from contextlib import contextmanager
from dataclasses import fields, is_dataclass
from functools import partial
from hashlib import md5
from numbers import Number
from operator import getitem
from typing import Iterator, Mapping, Set
from packaging.version import parse as parse_version
from tlz import curry, groupby, identity, merge
from tlz.functoolz import Compose
from .hashing import hash_buffer_hex
from .utils import Dispatch
def tokenize(*args, **kwargs):
"""Deterministic token
>>> tokenize([1, 2, '3'])
'<PASSWORD>'
>>> tokenize('Hello') == tokenize('Hello')
True
"""
if kwargs:
args = args + (kwargs, )
return md5(str(tuple(map(normalize_token, args))).encode()).hexdigest()
def are_equal(a, b):
return tokenize(a) == tokenize(b)
normalize_token = Dispatch()
normalize_token.register(
(
int,
float,
str,
bytes,
type(None),
type,
slice,
complex,
type(Ellipsis),
datetime.date,
),
identity,
)
@normalize_token.register(dict)
def normalize_dict(d):
return normalize_token(sorted(d.items(), key=str))
@normalize_token.register(OrderedDict)
def normalize_ordered_dict(d):
return type(d).__name__, normalize_token(list(d.items()))
@normalize_token.register(set)
def normalize_set(s):
return normalize_token(sorted(s, key=str))
@normalize_token.register((tuple, list))
def normalize_seq(seq):
def func(seq):
try:
return list(map(normalize_token, seq))
except RecursionError:
return str(uuid.uuid4())
return type(seq).__name__, func(seq)
@normalize_token.register(range)
def normalize_range(r):
return list(map(normalize_token, [r.start, r.stop, r.step]))
@normalize_token.register(object)
def normalize_object(o):
method = getattr(o, "__igit_tokenize__", None)
if method is not None:
return method()
method = getattr(o, "__dask_tokenize__", None)
if method is not None:
return method()
return normalize_function(o) if callable(o) else uuid.uuid4().hex
function_cache = {}
function_cache_lock = threading.Lock()
def normalize_function(func):
try:
return function_cache[func]
except KeyError:
result = _normalize_function(func)
if len(function_cache) >= 500: # clear half of cache if full
with function_cache_lock:
if len(function_cache) >= 500:
for k in list(function_cache)[::2]:
del function_cache[k]
function_cache[func] = result
return result
except TypeError: # not hashable
return _normalize_function(func)
def _normalize_function(func):
if isinstance(func, Compose):
first = getattr(func, "first", None)
funcs = reversed((first, ) + func.funcs) if first else func.funcs
return tuple(normalize_function(f) for f in funcs)
elif isinstance(func, (partial, curry)):
args = tuple(normalize_token(i) for i in func.args)
if func.keywords:
kws = tuple((k, normalize_token(v))
for k, v in sorted(func.keywords.items()))
else:
kws = None
return (normalize_function(func.func), args, kws)
else:
try:
result = pickle.dumps(func, protocol=0)
if b"__main__" not in result: # abort on dynamic functions
return result
except Exception:
pass
try:
import cloudpickle
return cloudpickle.dumps(func, protocol=0)
except Exception:
return str(func)
@normalize_token.register_lazy("pandas")
def register_pandas():
import pandas as pd
PANDAS_GT_130 = parse_version(pd.__version__) >= parse_version("1.3.0")
@normalize_token.register(pd.Index)
def normalize_index(ind):
values = ind.array
return [ind.name, normalize_token(values)]
@normalize_token.register(pd.MultiIndex)
def normalize_index(ind):
codes = ind.codes
return ([ind.name] + [normalize_token(x) for x in ind.levels] +
[normalize_token(x) for x in codes])
@normalize_token.register(pd.Categorical)
def normalize_categorical(cat):
return [normalize_token(cat.codes), normalize_token(cat.dtype)]
@normalize_token.register(pd.arrays.PeriodArray)
@normalize_token.register(pd.arrays.DatetimeArray)
@normalize_token.register(pd.arrays.TimedeltaArray)
def normalize_period_array(arr):
return [normalize_token(arr.asi8), normalize_token(arr.dtype)]
@normalize_token.register(pd.arrays.IntervalArray)
def normalize_interval_array(arr):
return [
normalize_token(arr.left),
normalize_token(arr.right),
normalize_token(arr.closed),
]
@normalize_token.register(pd.Series)
def normalize_series(s):
return [
s.name,
s.dtype,
normalize_token(s._values),
normalize_token(s.index),
]
@normalize_token.register(pd.DataFrame)
def normalize_dataframe(df):
mgr = df._data
if PANDAS_GT_130:
# for compat with ArrayManager, pandas 1.3.0 introduced a `.arrays`
# attribute that returns the column arrays/block arrays for both
# BlockManager and ArrayManager
data = list(mgr.arrays)
else:
data = [block.values for block in mgr.blocks]
data.extend([df.columns, df.index])
return list(map(normalize_token, data))
@normalize_token.register(pd.api.extensions.ExtensionArray)
def normalize_extension_array(arr):
import numpy as np
return normalize_token(np.asarray(arr))
# Dtypes
@normalize_token.register(pd.api.types.CategoricalDtype)
def normalize_categorical_dtype(dtype):
return [
normalize_token(dtype.categories),
normalize_token(dtype.ordered)
]
@normalize_token.register(pd.api.extensions.ExtensionDtype)
def normalize_period_dtype(dtype):
return normalize_token(dtype.name)
@normalize_token.register_lazy("numpy")
def register_numpy():
import numpy as np
@normalize_token.register(np.ndarray)
def normalize_array(x):
if not x.shape:
return (x.item(), x.dtype)
if hasattr(x, "mode") and getattr(x, "filename", None):
if hasattr(x.base, "ctypes"):
offset = (x.ctypes._as_parameter_.value -
x.base.ctypes._as_parameter_.value)
else:
offset = 0 # root memmap's have mmap object as base
if hasattr(
x, "offset"
): # offset numpy used while opening, and not the offset to the beginning of the file
offset += getattr(x, "offset")
return (
x.filename,
os.path.getmtime(x.filename),
x.dtype,
x.shape,
x.strides,
offset,
)
if x.dtype.hasobject:
try:
try:
# string fast-path
data = hash_buffer_hex("-".join(x.flat).encode(
encoding="utf-8", errors="surrogatepass"))
except UnicodeDecodeError:
# bytes fast-path
data = hash_buffer_hex(b"-".join(x.flat))
except (TypeError, UnicodeDecodeError):
try:
data = hash_buffer_hex(
pickle.dumps(x, pickle.HIGHEST_PROTOCOL))
except Exception:
# pickling not supported, use UUID4-based fallback
data = uuid.uuid4().hex
else:
try:
data = hash_buffer_hex(x.ravel(order="K").view("i1"))
except (BufferError, AttributeError, ValueError):
data = hash_buffer_hex(x.copy().ravel(order="K").view("i1"))
return (data, x.dtype, x.shape, x.strides)
@normalize_token.register(np.matrix)
def normalize_matrix(x):
return type(x).__name__, normalize_array(x.view(type=np.ndarray))
normalize_token.register(np.dtype, repr)
normalize_token.register(np.generic, repr)
@normalize_token.register(np.ufunc)
def normalize_ufunc(x):
try:
name = x.__name__
if getattr(np, name) is x:
return "np." + name
except AttributeError:
return normalize_function(x)
@normalize_token.register_lazy("scipy")
def register_scipy():
import scipy.sparse as sp
def normalize_sparse_matrix(x, attrs):
return (
type(x).__name__,
normalize_seq((normalize_token(getattr(x, key)) for key in attrs)),
)
for cls, attrs in [
(sp.dia_matrix, ("data", "offsets", "shape")),
(sp.bsr_matrix, ("data", "indices", "indptr", "blocksize", "shape")),
(sp.coo_matrix, ("data", "row", "col", "shape")),
(sp.csr_matrix, ("data", "indices", "indptr", "shape")),
(sp.csc_matrix, ("data", "indices", "indptr", "shape")),
(sp.lil_matrix, ("data", "rows", "shape")),
]:
normalize_token.register(cls,
partial(normalize_sparse_matrix, attrs=attrs))
@normalize_token.register(sp.dok_matrix)
def normalize_dok_matrix(x):
return type(x).__name__, normalize_token(sorted(x.items()))
```
#### File: igit/trees/base.py
```python
import re
import sys
from abc import ABC, abstractclassmethod, abstractmethod, abstractstaticmethod
from collections import UserDict, defaultdict
from collections.abc import Iterable, Mapping, MutableMapping
from copy import copy
from pydoc import locate
import fsspec
import numpy as np
from igit.tokenize import normalize_token, tokenize
from ..constants import TREECLASS_KEY
from ..diffs import Edit, Patch
from ..models import ObjectRef # , BlobRef, TreeRef, Commit, Tag
from ..utils import class_fullname, dict_to_treelib, equal
def camel_to_snake(name):
name = re.sub('(.)([A-Z][a-z]+)', r'\1_\2', name)
return re.sub('([a-z0-9])([A-Z])', r'\1_\2', name).lower()
class KeyTypeError(TypeError):
pass
class BaseTree(MutableMapping):
TREE_CLASSES = []
def __init_subclass__(cls, **kwargs):
super().__init_subclass__(**kwargs)
cls.TREE_CLASSES.append(cls)
method_name = "new_" + camel_to_snake(cls.__name__)
def method(self, name, *args, **kwargs):
self.add_tree(name, cls(*args, **kwargs))
return self[name]
method.__name__ = method_name
setattr(BaseTree, method_name, method)
@classmethod
def instance_from_dict(cls, d):
if not isinstance(d, Mapping):
raise TypeError(f"{d} is not a Mapping")
if TREECLASS_KEY not in d:
raise ValueError('Mapping is not a valid tree representation.')
cls = locate(d[TREECLASS_KEY])
return cls.from_dict(d)
def add_tree(self, name, tree):
if name in self:
raise KeyError(f"A value with the key {name} alread exists.")
self[name] = tree
def to_nested_dict(self, sort=False):
items = self.to_dict().items()
if sort:
items = sorted(items)
d = {}
for k, v in items:
if isinstance(v, BaseTree):
d[k] = v.to_nested_dict(sort=sort)
else:
d[k] = v
return d
def to_nested_label_dict(self) -> dict:
d = self.to_label_dict()
for k, v in d.items():
if isinstance(v, BaseTree):
d[k] = v.to_nested_label_dict()
return d
def to_paths_dict(self, sep='/') -> dict:
d = self.to_label_dict()
paths = {}
for k, v in d.items():
if isinstance(v, BaseTree):
for k2, v2 in v.to_paths_dict().items():
paths[k + sep + k2] = v2
else:
paths[k] = v
paths[TREECLASS_KEY] = class_fullname(self)
return paths
def sync(self, m: MutableMapping, sep='/'):
if hasattr(m, 'fs'):
sep = m.fs.sep
paths = self.to_paths_dict(sep=sep)
for k in m.keys():
if k.startswith('.'):
continue
if k not in paths:
del m[k]
for k, v in paths.items():
m[k] = v
return m
def persist(self, path, serializer="msgpack-dill"):
from igit.storage import ObjectStorage
m = fsspec.get_mapper(path)
store = ObjectStorage(
m,
serializer=serializer,
)
return self.sync(store)
@classmethod
def from_paths_dict(cls, d, sep='/'):
if TREECLASS_KEY in d:
cls = locate(d[TREECLASS_KEY])
tree = defaultdict(dict)
for k in d.keys():
if k.startswith('.'):
continue
label, _, rest = k.partition(sep)
if rest:
tree[label][rest] = d[k]
else:
tree[k] = d[k]
tree = dict(tree)
new_tree = {}
for k, v in tree.items():
if k.startswith('.'):
continue
if isinstance(v, dict) and TREECLASS_KEY in v:
new_tree[k] = BaseTree.from_paths_dict(v, sep=sep)
else:
new_tree[k] = v
return cls.from_label_dict(new_tree)
def to_echarts_series(self, name) -> dict:
d = self.to_label_dict()
children = []
for k, v in d.items():
if isinstance(v, BaseTree):
child = v.to_echarts_series(k)
else:
child = {"name": k, "value": str(v)}
children.append(child)
return {"name": name, "children": children}
def echarts_tree(self, label="Tree view"):
from ..visualizations import echarts_graph
import panel as pn
pn.extension('echarts')
echart = echarts_graph(self.to_echarts_series("root"), label)
echart_pane = pn.pane.ECharts(echart,
width=700,
height=400,
sizing_mode="stretch_both")
return echart_pane
def to_treelib(self, **kwargs):
d = self.to_nested_dict()
return dict_to_treelib(d, **kwargs)
def __repr__(self):
label = camel_to_snake(self.__class__.__name__)
return self.to_treelib(parent=label).show(stdout=False)
def _ipython_key_completions_(self):
return list(self.keys())
__str__ = __repr__
def hash_object(self, store, obj, otype="blob"):
if isinstance(obj, BaseTree):
obj = obj.to_merkle_tree(store)
otype = "tree"
return self._hash_object(store, obj, otype)
def to_merkle_tree(self, store):
tree = self.__class__()
for k, v in sorted(self.items()):
if isinstance(v, BaseTree):
v = v.to_merkle_tree(store)
tree[k] = store.hash_object(v)
return tree
def hash_tree(self, store):
return self.hash_object(store, self)
def deref(self, store, recursive=True):
d = {}
for k, v in self.items():
if recursive and hasattr(v, "deref"):
v = v.deref(store, recursive=recursive)
d[k] = v
return self.__class__.from_dict(d)
def _hash_object(self, store, obj, otype):
return store.hash_object(obj)
def iter_subtrees(self):
for k, v in self.items():
if isinstance(v, BaseTree):
yield k, v
elif isinstance(v, ObjectRef) and v.otype == "tree":
yield k, v
@property
def sub_trees(self):
return {k: v for k, v in self.iter_subtrees()}
def __containes__(self, key):
return key in list(self.keys())
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return tokenize(self) == tokenize(other)
def _igit_hash_object_(self, odb):
return self.hash_tree(odb)
@abstractstaticmethod
def compatible_keys(keys: Iterable) -> bool:
pass
@abstractclassmethod
def from_dict(cls, d: Mapping):
pass
@abstractmethod
def to_dict(self) -> dict:
pass
@abstractclassmethod
def from_label_dict(cls, d):
pass
@abstractmethod
def to_label_dict(self) -> dict:
pass
@abstractmethod
def to_native(self):
pass
@abstractmethod
def diff(self, other):
pass
@abstractmethod
def filter_keys(self, pattern):
pass
def diff_edits(self, other):
diff = self.diff(other)
return get_edits(diff)
def apply_diff(self, diff):
result = self.__class__.from_dict(self.to_dict())
for k, v in diff.items():
if isinstance(v, Patch):
v.apply(k, result)
elif isinstance(v, BaseTree):
result[k] = result[k].apply_diff(v)
return result
def get_edits(diff):
edits = diff.__class__()
for k, v in diff.items():
if isinstance(v, BaseTree):
cs = get_edits(v)
if len(cs):
edits[k] = cs
elif isinstance(v, Edit):
edits[k] = v
return edits
@normalize_token.register(BaseTree)
def normalize_tree(tree):
return tuple((k, normalize_token(tree[k])) for k in sorted(tree.keys()))
```
#### File: igit/trees/configs.py
```python
from collections import defaultdict
from intervaltree.intervaltree import IntervalTree
from ..interval_utils import interval_dict_to_df
from .base import BaseTree
from .intervals import BaseIntervalTree, Interval
from .labels import LabelTree
class ConfigTree(LabelTree):
def __setitem__(self, key, value):
if isinstance(value,
BaseTree) and not isinstance(value, BaseIntervalTree):
raise TypeError("Config subtrees can only be of type IntervalTree")
super().__setitem__(key, value)
@property
def start(self):
return min([t.start for t in self.values() if len(t)])
@property
def end(self):
return max([t.end for t in self.values() if len(t)])
@staticmethod
def mergable(name, ivs, start, end):
return [
Interval(max(iv.begin, start), min(iv.end, end), (name, iv.data))
for iv in ivs
]
def boundaries(self, start=None, end=None, *keys):
if start is None:
start = self.start
if end is None:
end = self.end
if not keys:
keys = self._mapping.keys()
merged = []
for k in keys:
v = self._mapping[k]
if isinstance(v, BaseIntervalTree):
ivs = v.overlap(start, end)
else:
ivs = [Interval(start, end, v)]
merged.extend(self.mergable(k, ivs, start, end))
tree = IntervalTree(merged)
tree.split_overlaps()
cfg = {k: [] for k in keys}
for iv in tree:
cfg[iv.data[0]].append(Interval(iv.begin, iv.end, iv.data[1]))
cfg = {k: sorted(v) for k, v in cfg.items()}
return cfg
def boundaries_df(self, start=None, end=None, *keys):
if start is None:
start = self.start
if end is None:
end = self.end
return interval_dict_to_df(self.boundaries(start, end, *keys))
def split_on_boundaries(self, start=None, end=None, *keys):
if start is None:
start = self.start
if end is None:
end = self.end
config = defaultdict(dict)
cfg = self.boundaries(start, end, *keys)
for k, ivs in cfg.items():
for iv in ivs:
config[(
iv.begin,
iv.end,
)][k] = iv.data
return dict(config)
def show_boundaries(self,
start=None,
end=None,
*keys,
show_labels=True,
**kwargs):
import holoviews as hv
import panel as pn
pn.extension()
if start is None:
start = self.start
if end is None:
end = self.end
df = self.boundaries_df(start, end, *keys)
df["value_str"] = df.value.apply(lambda x: str(x))
opts = dict(color="value_str",
responsive=True,
cmap="Category20",
title="Interval boundaries",
height=len(df["parameter"].unique()) * 30 + 80,
line_width=30,
alpha=0.5,
tools=['hover'])
opts.update(**kwargs)
segments = hv.Segments(df, ["begin", "parameter", "end", "parameter"],
['value_str']).opts(**opts)
vline = hv.Overlay([
hv.VLine(x).opts(color="grey", line_width=1)
for x in df.end.unique()
])
range_view = segments
if show_labels:
df["label"] = df.value.apply(lambda x: str(x)[:8])
labels = hv.Labels(df, ["mid", "parameter"],
["label", 'value_str'])
range_view = range_view * labels
range_view = range_view * vline
range_selection = hv.Segments(
(start - 0.1 * (end - start), 'view', end + 0.1 *
(end - start), 'view', 'full range'),
["start", "parameter", "end", "parameter"],
)
range_selection.opts(
height=100,
yaxis=None,
default_tools=[],
responsive=True,
)
hv.plotting.links.RangeToolLink(range_selection, segments)
layout = (range_view + range_selection).cols(1)
layout = layout.opts(
hv.opts.Layout(shared_axes=False, merge_tools=False))
return pn.Column(layout, sizing_mode="stretch_both")
``` |
{
"source": "jmosbacher/instrosetta-python",
"score": 2
} |
#### File: instrosetta-python/instrosetta/client.py
```python
import grpc
class RpcClient:
stub_class = lambda channel : None
def __init__(self, addr="localhost:50052"):
self.addr = addr
self._channel = None
self._stub = None
def _single_rpc(self, method, request):
try:
resp = getattr(self._stub, method)(request)
return resp
except grpc.RpcError as e:
print(e)
# FIXME: log error/ raise exception.
def single_rpc(self, method, request):
if self._channel is None:
with self as s:
return s._single_rpc(method, request)
else:
return self._single_rpc(method, request)
def _streaming_rpc(self, method, request):
try:
for resp in getattr(self._stub, method)(request):
yield resp
except grpc.RpcError as e:
print(e)
# FIXME: log error/ raise exception.
#
def streaming_rpc(self, method, request):
if self._channel is None:
with self as s:
for resp in s._streaming_rpc(method, request):
yield resp
else:
for resp in self._streaming_rpc(method, request):
yield resp
def __enter__(self):
self._channel = grpc.insecure_channel(self.addr)
self._stub = self.stub_class(self._channel)
return self
def __exit__(self, exc_type, exc_value, traceback):
self._channel.close()
self._channel = None
self._stub = None
```
#### File: interfaces/data_aquisition/basic_daq_pb2_grpc.py
```python
import grpc
from instrosetta.interfaces.data_aquisition import basic_daq_pb2 as instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2
class BasicDaqStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.GetAvailbleDevices = channel.unary_stream(
'/instrosetta.interfaces.data_aquisition.basic_daq.v1.BasicDaq/GetAvailbleDevices',
request_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetAvailbleDevicesRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetAvailbleDevicesResponse.FromString,
)
self.Initialize = channel.unary_unary(
'/instrosetta.interfaces.data_aquisition.basic_daq.v1.BasicDaq/Initialize',
request_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.InitializeRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.InitializeResponse.FromString,
)
self.Shutdown = channel.unary_unary(
'/instrosetta.interfaces.data_aquisition.basic_daq.v1.BasicDaq/Shutdown',
request_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.ShutdownRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.ShutdownResponse.FromString,
)
self.ReadDigital = channel.unary_unary(
'/instrosetta.interfaces.data_aquisition.basic_daq.v1.BasicDaq/ReadDigital',
request_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.ReadDigitalRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.ReadDigitalResponse.FromString,
)
self.ReadAnalog = channel.unary_unary(
'/instrosetta.interfaces.data_aquisition.basic_daq.v1.BasicDaq/ReadAnalog',
request_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.ReadAnalogRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.ReadAnalogResponse.FromString,
)
self.WriteDigital = channel.unary_unary(
'/instrosetta.interfaces.data_aquisition.basic_daq.v1.BasicDaq/WriteDigital',
request_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.WriteDigitalRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.WriteDigitalResponse.FromString,
)
self.WriteAnalog = channel.unary_unary(
'/instrosetta.interfaces.data_aquisition.basic_daq.v1.BasicDaq/WriteAnalog',
request_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.WriteAnalogRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.WriteAnalogResponse.FromString,
)
self.GetDeviceDetails = channel.unary_unary(
'/instrosetta.interfaces.data_aquisition.basic_daq.v1.BasicDaq/GetDeviceDetails',
request_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetDeviceDetailsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetDeviceDetailsResponse.FromString,
)
self.GetDigitalChannels = channel.unary_unary(
'/instrosetta.interfaces.data_aquisition.basic_daq.v1.BasicDaq/GetDigitalChannels',
request_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetDigitalChannelsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetDigitalChannelsResponse.FromString,
)
self.GetAnalogChannels = channel.unary_unary(
'/instrosetta.interfaces.data_aquisition.basic_daq.v1.BasicDaq/GetAnalogChannels',
request_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetAnalogChannelsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetAnalogChannelsResponse.FromString,
)
self.SetChannelMode = channel.unary_unary(
'/instrosetta.interfaces.data_aquisition.basic_daq.v1.BasicDaq/SetChannelMode',
request_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.SetChannelModeRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.SetChannelModeResponse.FromString,
)
class BasicDaqServicer(object):
# missing associated documentation comment in .proto file
pass
def GetAvailbleDevices(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Initialize(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Shutdown(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReadDigital(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def ReadAnalog(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WriteDigital(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def WriteAnalog(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetDeviceDetails(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetDigitalChannels(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetAnalogChannels(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetChannelMode(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_BasicDaqServicer_to_server(servicer, server):
rpc_method_handlers = {
'GetAvailbleDevices': grpc.unary_stream_rpc_method_handler(
servicer.GetAvailbleDevices,
request_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetAvailbleDevicesRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetAvailbleDevicesResponse.SerializeToString,
),
'Initialize': grpc.unary_unary_rpc_method_handler(
servicer.Initialize,
request_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.InitializeRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.InitializeResponse.SerializeToString,
),
'Shutdown': grpc.unary_unary_rpc_method_handler(
servicer.Shutdown,
request_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.ShutdownRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.ShutdownResponse.SerializeToString,
),
'ReadDigital': grpc.unary_unary_rpc_method_handler(
servicer.ReadDigital,
request_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.ReadDigitalRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.ReadDigitalResponse.SerializeToString,
),
'ReadAnalog': grpc.unary_unary_rpc_method_handler(
servicer.ReadAnalog,
request_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.ReadAnalogRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.ReadAnalogResponse.SerializeToString,
),
'WriteDigital': grpc.unary_unary_rpc_method_handler(
servicer.WriteDigital,
request_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.WriteDigitalRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.WriteDigitalResponse.SerializeToString,
),
'WriteAnalog': grpc.unary_unary_rpc_method_handler(
servicer.WriteAnalog,
request_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.WriteAnalogRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.WriteAnalogResponse.SerializeToString,
),
'GetDeviceDetails': grpc.unary_unary_rpc_method_handler(
servicer.GetDeviceDetails,
request_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetDeviceDetailsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetDeviceDetailsResponse.SerializeToString,
),
'GetDigitalChannels': grpc.unary_unary_rpc_method_handler(
servicer.GetDigitalChannels,
request_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetDigitalChannelsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetDigitalChannelsResponse.SerializeToString,
),
'GetAnalogChannels': grpc.unary_unary_rpc_method_handler(
servicer.GetAnalogChannels,
request_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetAnalogChannelsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.GetAnalogChannelsResponse.SerializeToString,
),
'SetChannelMode': grpc.unary_unary_rpc_method_handler(
servicer.SetChannelMode,
request_deserializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.SetChannelModeRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_data__aquisition_dot_basic__daq__pb2.SetChannelModeResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'instrosetta.interfaces.data_aquisition.basic_daq.v1.BasicDaq', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: interfaces/optomechanics/filter_wheel_pb2_grpc.py
```python
import grpc
from instrosetta.interfaces.optomechanics import filter_wheel_pb2 as instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2
class FilterWheelStub(object):
# missing associated documentation comment in .proto file
pass
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Initialize = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/Initialize',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.InitializeRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.InitializeResponse.FromString,
)
self.Shutdown = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/Shutdown',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.ShutdownRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.ShutdownResponse.FromString,
)
self.GetSpeedOptions = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetSpeedOptions',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedOptionsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedOptionsResponse.FromString,
)
self.GetSpeed = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetSpeed',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedResponse.FromString,
)
self.SetSpeed = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/SetSpeed',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSpeedRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSpeedResponse.FromString,
)
self.GetSensorsOptions = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetSensorsOptions',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsOptionsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsOptionsResponse.FromString,
)
self.GetSensors = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetSensors',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsResponse.FromString,
)
self.SetSensors = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/SetSensors',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSensorsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSensorsResponse.FromString,
)
self.GetFilterOptions = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetFilterOptions',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterOptionsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterOptionsResponse.FromString,
)
self.GetFilter = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetFilter',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterResponse.FromString,
)
self.SetFilter = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/SetFilter',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetFilterRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetFilterResponse.FromString,
)
self.GetPositionOptions = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetPositionOptions',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionOptionsRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionOptionsResponse.FromString,
)
self.GetPosition = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/GetPosition',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionResponse.FromString,
)
self.SetPosition = channel.unary_unary(
'/instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel/SetPosition',
request_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetPositionRequest.SerializeToString,
response_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetPositionResponse.FromString,
)
class FilterWheelServicer(object):
# missing associated documentation comment in .proto file
pass
def Initialize(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Shutdown(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSpeedOptions(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSpeed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetSpeed(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSensorsOptions(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetSensors(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetSensors(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetFilterOptions(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetFilter(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetFilter(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPositionOptions(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def GetPosition(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def SetPosition(self, request, context):
# missing associated documentation comment in .proto file
pass
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_FilterWheelServicer_to_server(servicer, server):
rpc_method_handlers = {
'Initialize': grpc.unary_unary_rpc_method_handler(
servicer.Initialize,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.InitializeRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.InitializeResponse.SerializeToString,
),
'Shutdown': grpc.unary_unary_rpc_method_handler(
servicer.Shutdown,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.ShutdownRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.ShutdownResponse.SerializeToString,
),
'GetSpeedOptions': grpc.unary_unary_rpc_method_handler(
servicer.GetSpeedOptions,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedOptionsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedOptionsResponse.SerializeToString,
),
'GetSpeed': grpc.unary_unary_rpc_method_handler(
servicer.GetSpeed,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSpeedResponse.SerializeToString,
),
'SetSpeed': grpc.unary_unary_rpc_method_handler(
servicer.SetSpeed,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSpeedRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSpeedResponse.SerializeToString,
),
'GetSensorsOptions': grpc.unary_unary_rpc_method_handler(
servicer.GetSensorsOptions,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsOptionsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsOptionsResponse.SerializeToString,
),
'GetSensors': grpc.unary_unary_rpc_method_handler(
servicer.GetSensors,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetSensorsResponse.SerializeToString,
),
'SetSensors': grpc.unary_unary_rpc_method_handler(
servicer.SetSensors,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSensorsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetSensorsResponse.SerializeToString,
),
'GetFilterOptions': grpc.unary_unary_rpc_method_handler(
servicer.GetFilterOptions,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterOptionsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterOptionsResponse.SerializeToString,
),
'GetFilter': grpc.unary_unary_rpc_method_handler(
servicer.GetFilter,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetFilterResponse.SerializeToString,
),
'SetFilter': grpc.unary_unary_rpc_method_handler(
servicer.SetFilter,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetFilterRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetFilterResponse.SerializeToString,
),
'GetPositionOptions': grpc.unary_unary_rpc_method_handler(
servicer.GetPositionOptions,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionOptionsRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionOptionsResponse.SerializeToString,
),
'GetPosition': grpc.unary_unary_rpc_method_handler(
servicer.GetPosition,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.GetPositionResponse.SerializeToString,
),
'SetPosition': grpc.unary_unary_rpc_method_handler(
servicer.SetPosition,
request_deserializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetPositionRequest.FromString,
response_serializer=instrosetta_dot_interfaces_dot_optomechanics_dot_filter__wheel__pb2.SetPositionResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'instrosetta.interfaces.optomechanics.filter_wheel.v1.FilterWheel', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
```
#### File: servers/debugging/echo_servicer.py
```python
from concurrent import futures
import time
import math
import grpc
from instrosetta.interfaces.debugging import echo_pb2
from instrosetta.interfaces.debugging import echo_pb2_grpc
_ONE_DAY_IN_SECONDS = 60 * 60 * 24
class EchoServicer(echo_pb2_grpc.EchoServiceServicer):
def Echo(self, request, context):
return echo_pb2.EchoResponse(message=request.message)
def bind(self, server):
echo_pb2_grpc.add_EchoServiceServicer_to_server(self, server)
``` |
{
"source": "jmosbacher/intake-historian",
"score": 3
} |
#### File: intake-historian/intake_xehistorian/historian_stream.py
```python
import streamz.dataframe as sdf
import streamz
import time
import tornado
from tornado import gen
import sys
import requests
import getpass
import pandas as pd
import hvplot.streamz
import hvplot.pandas
from tornado.ioloop import IOLoop
from tornado.httpclient import AsyncHTTPClient
try:
import urllib.parse as urlparse
except ImportError:
import urlparse
from xecatalog.drivers.historian_api import HistorianAuth
class HistorianStream(sdf.DataFrame):
""" A streaming dataframe of SC data
Parameters
----------
url:
name:
freq: timedelta
The time interval between records
interval: timedelta
The time interval between new dataframes, should be significantly
larger than freq
Example
-------
>>> source = HistorianStream(freq=1)
"""
def __init__(self, parameters, url, auth={}, frequency=100,
dask=False, start=False, timeout=2):
if dask:
from streamz.dask import DaskStream
source = DaskStream()
self.loop = source.loop
else:
source = streamz.Source(asynchronous=False)
self.loop = IOLoop.current()
self.source = source
self.url = url
self.parameters = [(param, "") if isinstance(param, str) else (param[0], param[1]) for param in parameters]
self.frequency = frequency
self.continue_ = [True]
self.timeout = timeout
auth_kwargs = auth.copy()
auth_url = auth.pop("url", urlparse.urlunsplit(urlparse.urlsplit(url)[:2]+("Login","","")))
self.auth = HistorianAuth(auth_url, **auth)
example = self.make_df(tuple([(time.time(), name, float("nan"), unit) for name, unit in self.parameters]))
stream = self.source.unique().map(self.make_df)
super(sdf.DataFrame, self).__init__(stream, example=example)
self.http_client = AsyncHTTPClient()
if start:
self.start()
def start(self):
self.auth()
self.loop.add_callback(self._read_value_cb())
@staticmethod
def make_df(datas):
data = [{"name":name, 'timestampseconds': ts, 'value': value, "unit":unit} for ts, name, value, unit in datas]
return pd.DataFrame(data)
def _read_value_cb(self):
@gen.coroutine
def cb():
while self.continue_[0]:
yield gen.sleep(self.frequency)
datas = []
for name, unit in self.parameters:
try:
resp = yield self.http_client.fetch(self.url+"?name="+name,
headers=self.auth(),validate_cert=False,
request_timeout=self.timeout,)
data = tornado.escape.json_decode(resp.body)[0]
# print(data)
data = (data["timestampseconds"], name, data['value'], unit)
except Exception as e:
# print(e)
data = (time.time(), name, float("nan"), "")
datas.append(data)
yield self.source.emit(tuple(datas))
return cb
def __del__(self):
self.stop()
def stop(self):
self.continue_[0] = False
``` |
{
"source": "jmosbacher/ng-opcua",
"score": 2
} |
#### File: ng-opcua/ng_opcserver/client_cli.py
```python
import sys
import click
import ng_client
@click.command()
@click.option("--duration", default=10, help="Poll duration.")
@click.option("--frequency", default=1, help="Poll frequency.")
@click.option("--debug", is_flag=True, default=False, help="Debug.")
def main(duration, frequency, debug):
ng_client.run_poll_status(duration, frequency, debug)
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
``` |
{
"source": "jmosbacher/rframe",
"score": 2
} |
#### File: rframe/interfaces/mongo.py
```python
import datetime
import pymongo
import numbers
from functools import singledispatch
from itertools import product
from typing import List, Union
from loguru import logger
import pandas as pd
from pydantic import BaseModel
from pymongo.collection import Collection
from ..types import Interval
from ..indexes import Index, InterpolatingIndex, IntervalIndex, MultiIndex
from ..utils import singledispatchmethod
from .base import BaseDataQuery, DatasourceInterface
query_precendence = {
Index: 1,
IntervalIndex: 2,
InterpolatingIndex: 3,
}
class MultiMongoAggregation(BaseDataQuery):
aggregations: list
def __init__(self, collection: Collection, aggregations: list):
if not isinstance(collection, Collection):
raise TypeError(
f"collection must be a pymongo Collection, got {type(collection)}."
)
self.collection = collection
self.aggregations = aggregations
def execute(self, limit=None, skip=None):
logger.debug("Executing multi mongo aggregation.")
results = []
for agg in self.aggregations:
results.extend(agg.execute(self.collection, limit=limit, skip=skip))
skip = 0 if skip is None else skip
if limit is not None:
return results[skip : limit + skip]
return results
def logical_or(self, other: "MultiMongoAggregation"):
if isinstance(other, MultiMongoAggregation):
extra = other.aggregations
else:
extra = [other]
return MultiMongoAggregation(self.collection, self.aggregations + extra)
def __or__(self, other):
return self.logical_or(other)
def __add__(self, other):
return self.logical_or(other)
class MongoAggregation(BaseDataQuery):
pipeline: list
def __init__(self, index, labels, collection: Collection, pipeline: list):
if not isinstance(collection, Collection):
raise TypeError(
f"collection must be a pymongo Collection, got {type(collection)}."
)
self.index = index
self.labels = labels
self.collection = collection
self.pipeline = pipeline
@property
def docs_per_label(self):
n = 1
if isinstance(self.index, MultiIndex):
for index in self.index.indexes:
if self.labels.get(index.name, None) is not None:
n *= index.DOCS_PER_LABEL
elif self.labels.get(self.index.name, None) is not None:
n *= self.index.DOCS_PER_LABEL
return n
def execute(self, limit: int = None, skip: int = None, sort=None):
return list(self.iter(limit=limit, skip=skip, sort=sort))
def iter(self, limit=None, skip=None, sort=None):
pipeline = list(self.pipeline)
if sort is not None:
sort = [sort] if isinstance(sort, str) else sort
if isinstance(sort, list):
sort_arg = {field: 1 for field in sort}
elif isinstance(sort, dict):
sort_arg = sort
else:
raise TypeError(f"sort must be a list or dict, got {type(sort)}.")
pipeline = [{"$sort": sort_arg}] + pipeline
if isinstance(skip, int):
raw_skip = skip * self.docs_per_label
pipeline.append({"$skip": raw_skip})
if isinstance(limit, int):
raw_limit = limit * self.docs_per_label
raw_limit = int(raw_limit)
pipeline.append({"$limit": raw_limit})
pipeline.append({"$project": {"_id": 0}})
logger.debug(f"Executing mongo aggregation: {pipeline}.")
# docs = list(self.collection.aggregate(pipeline, allowDiskUse=True))
collected = 0
limit = limit if limit is not None else float("inf")
docs = []
for doc in self.collection.aggregate(pipeline, allowDiskUse=True):
docs.append(doc)
if len(docs) >= self.docs_per_label:
docs = self.index.reduce(docs, self.labels)
for doc in docs:
yield from_mongo(doc)
collected += 1
if collected >= limit:
return
docs = []
if len(docs) and collected < limit:
docs = self.index.reduce(docs, self.labels)
for doc in docs:
yield from_mongo(doc)
collected += 1
if collected >= limit:
return
def unique(self, fields: Union[str, List[str]]):
if isinstance(fields, str):
fields = [fields]
results = {}
for field in fields:
pipeline = list(self.pipeline)
pipeline.append(
{
"$group": {
"_id": "$" + field,
"first": {"$first": "$" + field},
}
}
)
results[field] = [
doc["first"]
for doc in self.collection.aggregate(pipeline, allowDiskUse=True)
]
results = from_mongo(results)
if len(fields) == 1:
return results[fields[0]]
return results
def max(self, fields: Union[str, List[str]]):
if isinstance(fields, str):
fields = [fields]
results = {}
for field in fields:
pipeline = list(self.pipeline)
pipeline.append({"$sort": {field: -1}})
pipeline.append({"$limit": 1})
pipeline.append({"$project": {"_id": 0}})
try:
results[field] = next(
self.collection.aggregate(pipeline, allowDiskUse=True)
)[field]
except (StopIteration, KeyError):
results[field] = None
results = from_mongo(results)
if len(fields) == 1:
return results[fields[0]]
return results
def min(self, fields: Union[str, List[str]]):
if isinstance(fields, str):
fields = [fields]
results = {}
for field in fields:
pipeline = list(self.pipeline)
pipeline.append({"$sort": {field: 1}})
pipeline.append({"$limit": 1})
pipeline.append({"$project": {"_id": 0}})
try:
results[field] = next(
self.collection.aggregate(pipeline, allowDiskUse=True)
)[field]
except (StopIteration, KeyError):
results[field] = None
results = from_mongo(results)
if len(fields) == 1:
return results[fields[0]]
return results
def count(self):
pipeline = list(self.pipeline)
pipeline.append({"$count": "count"})
try:
result = next(self.collection.aggregate(pipeline, allowDiskUse=True))
except StopIteration:
return 0
return result.get("count", 0)
def logical_and(self, other):
index = MultiIndex(self.index, other.index)
labels = dict(self.labels, **other.labels)
return MongoAggregation(
index, labels, self.collection, self.pipeline + other.pipeline
)
def logical_or(self, other):
if isinstance(other, MongoAggregation):
return MultiMongoAggregation(self.collection, [self, other])
if isinstance(other, MultiMongoAggregation):
return other + self
def __add__(self, other):
return self.logical_or(other)
def __and__(self, other):
return self.logical_and(other)
def __mul__(self, other):
return self.logical_and(other)
@DatasourceInterface.register_interface(pymongo.collection.Collection)
class MongoInterface(DatasourceInterface):
@classmethod
def from_url(
cls, source: str, database: str = None, collection: str = None, **kwargs
):
if source.startswith("mongodb"):
if database is None:
raise ValueError("database must be specified")
if collection is None:
raise ValueError("collection must be specified")
source = pymongo.MongoClient(source)[database][collection]
return cls(source)
raise NotImplementedError
@singledispatchmethod
def compile_query(self, index, label):
raise NotImplementedError(
f"{self.__class__.__name__} does not support {type(index)} indexes."
)
def simple_multi_query(self, index, labels):
pipeline = []
indexes = sorted(index.indexes, key=lambda x: query_precendence[type(x)])
for idx in indexes:
label = labels[idx.name]
if label is None:
continue
others = [name for name in index.names if name != idx.name]
agg = self.compile_query(idx, label, others=others)
pipeline.extend(agg.pipeline)
return MongoAggregation(index, labels, self.source, pipeline)
def product_multi_query(self, index, labels):
labels = [label if isinstance(label, list) else [label] for label in labels]
aggs = []
for label_vals in product(*labels):
agg = self.simple_multi_query(index.indexes, label_vals)
aggs.append(agg)
return MultiMongoAggregation(self.source, aggs)
@compile_query.register(list)
@compile_query.register(tuple)
@compile_query.register(MultiIndex)
def multi_query(self, index, labels):
logger.debug(
"Building mongo multi-query for index: " f"{index} with labels: {labels}"
)
if not isinstance(index, MultiIndex):
index = MultiIndex(*index)
return self.simple_multi_query(index, labels)
@compile_query.register(Index)
@compile_query.register(str)
def get_simple_query(self, index, label, others=None):
logger.debug(
"Building mongo simple-query for index: " f"{index} with label: {label}"
)
name = index.name if isinstance(index, Index) else index
label = to_mongo(label)
if isinstance(label, slice):
# support basic slicing, this will only work
# for values that are comparable with the
# $gt/$lt operators
start = label.start
stop = label.stop
step = label.step
if step is None:
label = {}
if start is not None:
label["$gte"] = start
if stop is not None:
label["$lt"] = stop
if not label:
label = None
else:
label = list(range(start, stop, step))
match = {name: label}
if isinstance(label, list):
# support querying multiple values
# in the same request
match = {name: {"$in": label}}
elif isinstance(label, dict):
match = {f"{name}.{k}": v for k, v in label.items()}
pipeline = []
if label is not None:
pipeline.append({"$match": match})
labels = {name: label}
return MongoAggregation(index, labels, self.source, pipeline)
@compile_query.register(InterpolatingIndex)
def build_interpolation_query(self, index, label, others=None):
"""For interpolation we match the values directly before and after
the value of interest. For each value we take the closest document on either side.
"""
logger.debug(
"Building mongo interpolating-query for index: "
f"{index} with label: {label}"
)
label = to_mongo(label)
if label is None:
labels = {index.name: label}
return MongoAggregation(index, labels, self.source, [])
limit = 1 if others is None else others
if not isinstance(label, list):
pipelines = dict(
before=mongo_before_query(index.name, label, limit=limit),
after=mongo_after_query(index.name, label, limit=limit),
)
pipeline = merge_pipelines(pipelines)
labels = {index.name: label}
return MongoAggregation(index, labels, self.source, pipeline)
pipelines = {
f"agg{i}": mongo_closest_query(index.name, value)
for i, value in enumerate(label)
}
pipeline = merge_pipelines(pipelines)
labels = {index.name: label}
return MongoAggregation(index, labels, self.source, pipeline)
@compile_query.register(IntervalIndex)
def build_interval_query(self, index, label, others=None):
"""Query overlaping documents with given interval, supports multiple
intervals as well as zero length intervals (left==right)
multiple overlap queries are joined with the $or operator
"""
logger.debug(
"Building mongo interval-query for index: " f"{index} with label: {label}"
)
if isinstance(label, list):
intervals = label
else:
intervals = [label]
intervals = to_mongo(intervals)
queries = []
for interval in intervals:
if interval is None:
continue
if isinstance(interval, tuple) and all([i is None for i in interval]):
continue
query = mongo_overlap_query(index, interval)
if query:
queries.append(query)
if queries:
pipeline = [
{
"$match": {
# support querying for multiple values
# in a single pipeline
"$or": queries,
}
},
]
else:
pipeline = []
labels = {index.name: intervals}
return MongoAggregation(index, labels, self.source, pipeline)
def insert(self, doc):
"""We want the client logic to be agnostic to
whether the value being replaced is actually stored in the DB or
was inferred from e.g interpolation.
The find_one_and_replace(upsert=True) logic is the best match
for the behavior we want even though it wasts an insert operation
when a document already exists.
FIXME: Maybe we can optimize this with an pipeline to
avoid replacing existing documents with a copy.
"""
from rframe.schema import InsertionError
index = to_mongo(doc.index_labels)
try:
doc = self.source.find_one_and_update(
index,
{"$set": to_mongo(doc.dict())},
projection={"_id": False},
upsert=True,
return_document=pymongo.ReturnDocument.AFTER,
)
return doc
except Exception as e:
raise InsertionError(f"Mongodb has rejected this insertion:\n {e} ")
update = insert
def ensure_index(self, names, order=pymongo.ASCENDING):
self.source.ensure_index([(name, order) for name in names])
def delete(self, doc):
index = to_mongo(doc.index_labels)
return self.source.delete_one(index)
def initdb(self, schema):
index_names = list(schema.get_index_fields())
self.ensure_index(index_names)
def mongo_overlap_query(index, interval):
"""Builds a single overlap query
Intervals with one side equal to null are treated as extending to infinity in
that direction.
Supports closed or open intervals as well as infinite intervals
Overlap definition:
The two intervals (L,R) and (l,r) overlap iff L<r and l<R
The operator < is replaced with <= when the interval is closed on that side.
Where if L/l are None, they are treated as -inf
and if R/r are None, the are treated as inf
"""
# Set the appropriate operators depending on if the interval
# is closed on one side or both
closed = getattr(index, "closed", "right")
gt_op = "$gte" if closed == "both" else "$gt"
lt_op = "$lte" if closed == "both" else "$lt"
# handle different kinds of interval definitions
if isinstance(interval, tuple):
left, right = interval
elif isinstance(interval, dict):
left, right = interval["left"], interval["right"]
elif isinstance(interval, slice):
left, right = interval.start, interval.stop
elif hasattr(interval, "left") and hasattr(interval, "right"):
left, right = interval.left, interval.right
else:
left = right = interval
# Some conditions may not apply if the query interval is None
# on one or both sides
conditions = []
if left is not None:
conditions.append(
{
"$or": [
# if the right side of the queried interval is
# None, treat it as inf
{f"{index.name}.right": None},
{f"{index.name}.right": {gt_op: left}},
]
}
)
if right is not None:
conditions.append(
{
"$or": [
{f"{index.name}.left": None},
{f"{index.name}.left": {lt_op: right}},
]
}
)
if conditions:
return {
"$and": conditions,
}
else:
return {}
def mongo_before_query(name, value, limit=1):
if isinstance(limit, list):
return mongo_grouped_before_query(name, value, limit)
return [
{"$match": {f"{name}": {"$lte": value}}},
{"$sort": {f"{name}": -1}},
{"$limit": limit},
]
def mongo_after_query(name, value, limit=1):
if isinstance(limit, list):
return mongo_grouped_after_query(name, value, limit)
return [
{"$match": {f"{name}": {"$gt": value}}},
{"$sort": {f"{name}": 1}},
{"$limit": limit},
]
def mongo_grouped_before_query(name, value, groups):
return [
{"$match": {f"{name}": {"$lte": value}}},
{"$sort": {f"{name}": -1}},
{"$group": {"_id": [f"${grp}" for grp in groups], "doc": {"$first": "$$ROOT"}}},
{
# make the documents the new root, discarding the groupby value
"$replaceRoot": {"newRoot": "$doc"},
},
]
def mongo_grouped_after_query(name, value, groups):
return [
{"$match": {f"{name}": {"$gt": value}}},
{"$sort": {f"{name}": 1}},
{"$group": {"_id": [f"${grp}" for grp in groups], "doc": {"$first": "$$ROOT"}}},
{
# make the documents the new root, discarding the groupby value
"$replaceRoot": {"newRoot": "$doc"},
},
]
def mongo_closest_query(name, value):
return [
{
"$addFields": {
# Add a field splitting the documents into
# before and after the value of interest
"_after": {"$gte": [f"${name}", value]},
# Add a field with the distance to the value of interest
"_diff": {"$abs": {"$subtract": [value, f"${name}"]}},
}
},
{
# sort in ascending order by distance
"$sort": {"_diff": 1},
},
{
# first group by whether document is before or after the value
# the take the first document in each group
"$group": {
"_id": "$_after",
"doc": {"$first": "$$ROOT"},
}
},
{
# make the documents the new root, discarding the groupby value
"$replaceRoot": {"newRoot": "$doc"},
},
{
# drop the extra fields, they are no longer needed
"$project": {"_diff": 0, "_after": 0},
},
]
def merge_pipelines(pipelines):
pipeline = [
{
# support multiple independent aggregations
# using the facet feature
"$facet": pipelines,
},
# Combine results of all aggregations
{
"$project": {
"union": {
"$setUnion": [f"${name}" for name in pipelines],
}
}
},
# we just want a single list of documents
{"$unwind": "$union"},
# move list of documents to the root of the result
# so we just get a nice list of documents
{"$replaceRoot": {"newRoot": "$union"}},
]
return pipeline
@singledispatch
def to_mongo(obj):
return obj
@to_mongo.register(dict)
def to_mongo_dict(obj: dict):
return {k: to_mongo(v) for k, v in obj.items()}
@to_mongo.register(list)
def to_mongo_list(obj):
return [to_mongo(v) for v in obj]
@to_mongo.register(tuple)
def to_mongo_tuple(obj):
return tuple(to_mongo(v) for v in obj)
@to_mongo.register(BaseModel)
def to_mongo_interval(obj):
return to_mongo(obj.dict())
@to_mongo.register(pd.DataFrame)
def to_mongo_df(df):
return to_mongo(df.to_dict(orient="records"))
@to_mongo.register(datetime.datetime)
def to_mongo_datetime(obj):
# mongodb datetime has millisecond resolution
return obj.replace(microsecond=int(obj.microsecond / 1000) * 1000)
@to_mongo.register(datetime.timedelta)
def to_mongo_timedelta(obj):
# mongodb datetime has millisecond resolution
seconds = int(obj.total_seconds() * 1e3) / 1e3
return datetime.timedelta(seconds=seconds)
@to_mongo.register(pd.Timestamp)
def to_mongo_timestamp(obj):
return to_mongo(obj.to_pydatetime())
@to_mongo.register(pd.Timedelta)
def to_mongo_pdtimedelta(obj):
return to_mongo(obj.to_pytimedelta())
@to_mongo.register(numbers.Integral)
def to_mongo_int(obj):
return int(obj)
@to_mongo.register(numbers.Real)
def to_mongo_float(obj):
return float(obj)
@singledispatch
def from_mongo(obj):
return obj
@from_mongo.register(list)
def from_mongo_list(obj):
return [from_mongo(v) for v in obj]
@from_mongo.register(tuple)
def from_mongo_tuple(obj):
return tuple(from_mongo(v) for v in obj)
@from_mongo.register(dict)
def from_mongo_dict(obj):
if len(obj) == 2 and "left" in obj and "right" in obj:
return Interval[obj["left"], obj["right"]]
return {k: from_mongo(v) for k, v in obj.items()}
```
#### File: rframe/interfaces/pandas.py
```python
from functools import singledispatch
import numbers
from loguru import logger
from datetime import datetime
from typing import Any, Dict, List, Union
import numpy as np
import pandas as pd
from ..indexes import Index, InterpolatingIndex, IntervalIndex, MultiIndex
from ..utils import singledispatchmethod
from .base import BaseDataQuery, DatasourceInterface
from ..types import Interval
class PandasBaseQuery(BaseDataQuery):
def __init__(self, index, df, column: str, label: Any) -> None:
self.index = index
self.df = df
self.column = column
self.label = label
@property
def labels(self):
return {self.column: self.label}
def apply_selection(self, df):
raise NotImplementedError
def execute(self, limit: int = None, skip: int = None, sort=None):
logger.debug("Applying pandas dataframe selection")
if not len(self.df):
return []
if sort is None:
df = self.df
else:
if isinstance(sort, str):
sort = [sort]
elif isinstance(sort, dict):
sort = list(sort)
df = self.df.sort_values(sort)
df = self.apply_selection(df)
if df.index.names or df.index.name:
df = df.reset_index()
if limit is not None:
start = skip * self.index.DOCS_PER_LABEL if skip is not None else 0
limit = start + limit * self.index.DOCS_PER_LABEL
df = df.iloc[start:limit]
docs = df.to_dict(orient="records")
docs = self.index.reduce(docs, self.labels)
logger.debug(f"Done. Found {len(docs)} documents.")
docs = from_pandas(docs)
return docs
def min(self, fields: Union[str, List[str]]):
if isinstance(fields, str):
fields = [fields]
df = self.apply_selection(self.df)
results = {}
for field in fields:
if field in df.index.names:
df = df.reset_index()
results[field] = df[field].min()
if len(fields) == 1:
results = results[fields[0]]
results = from_pandas(results)
return results
def max(self, fields: Union[str, List[str]]):
if isinstance(fields, str):
fields = [fields]
df = self.apply_selection(self.df)
results = {}
for field in fields:
if field in df.index.names:
df = df.reset_index()
results[field] = df[field].max()
if len(fields) == 1:
results = results[fields[0]]
results = from_pandas(results)
return results
def unique(self, fields: Union[str, List[str]]) -> Union[List[Any], Dict[str,List[Any]]]:
if isinstance(fields, str):
fields = [fields]
df = self.apply_selection(self.df)
results = {}
for field in fields:
if field in df.index.names:
df = df.reset_index()
results[field] = list(df[field].unique())
if len(fields) == 1:
results = results[fields[0]]
results = from_pandas(results)
return results
def count(self):
df = self.apply_selection(self.df)
return len(df)
class PandasSimpleQuery(PandasBaseQuery):
def apply_selection(self, df):
if self.label is None:
return df
if self.column in df.index.names:
df = df.reset_index()
if self.column not in df.columns:
raise KeyError(self.column)
label = self.label
if isinstance(label, slice):
if label.step is None:
ge = df[self.column] >= label.start
lt = df[self.column] < label.stop
mask = ge and lt
else:
label = list(range(label.start, label.stop, label.step))
if isinstance(label, list):
mask = df[self.column].isin(label)
else:
mask = df[self.column] == label
return df.loc[mask]
class PandasIntervalQuery(PandasBaseQuery):
def apply_selection(self, df):
if self.label is None:
return df
if self.column in df.index.names:
df = df.reset_index()
if self.column not in df.columns:
raise KeyError(self.column)
df = df.set_index(self.column)
interval = self.label
if isinstance(interval, tuple):
left, right = interval
elif isinstance(interval, dict):
left, right = interval["left"], interval["right"]
elif isinstance(interval, slice):
left, right = interval.start, interval.stop
elif hasattr(interval, "left") and hasattr(interval, "right"):
left, right = interval.left, interval.right
else:
left = right = interval
if isinstance(left, datetime):
left = pd.to_datetime(left)
if isinstance(right, datetime):
right = pd.to_datetime(right)
interval = pd.Interval(left, right)
return df[df.index.overlaps(interval)]
class PandasInterpolationQuery(PandasBaseQuery):
def apply_selection(self, df, limit=1):
if self.label is None:
return df
if self.column in df.index.names:
df = df.reset_index()
if self.column not in df.columns:
raise KeyError(self.column)
rows = []
# select all values before requested values
idx_column = df[self.column]
before = df[idx_column <= self.label]
if len(before):
# if there are values after `value`, we find the closest one
before = before.sort_values(self.column, ascending=False).head(limit)
rows.append(before)
# select all values after requested values
after = df[idx_column > self.label]
if len(after):
# same as before
after = after.sort_values(self.column, ascending=True).head(limit)
rows.append(after)
if not rows:
return df.head(0)
return pd.concat(rows)
class PandasMultiQuery(PandasBaseQuery):
def __init__(self, index, df, queries: List[PandasBaseQuery]) -> None:
self.index = index
self.df = df
self.queries = queries
@property
def labels(self):
return {query.column: query.label for query in self.queries}
def apply_selection(self, df):
if len(self.queries) == 1:
return self.queries[0].apply_selection(df)
for query in self.queries:
if isinstance(query, PandasInterpolationQuery):
selections = []
others = [q.column for q in self.queries if q is not query]
if not others:
df = query.apply_selection(df)
continue
for _, pdf in df.groupby(others):
selection = query.apply_selection(pdf).reset_index()
selections.append(selection)
selections = [s for s in selections if len(s)]
if not selections:
df = df.head(0)
elif len(selections) == 1:
df = selections[0]
else:
df = pd.concat(selections)
else:
df = query.apply_selection(df)
return df
@DatasourceInterface.register_interface(pd.DataFrame)
class PandasInterface(DatasourceInterface):
@classmethod
def from_url(cls, url: str, **kwargs):
if url.endswith(".csv"):
df = pd.read_csv(url, **kwargs)
return cls(df)
elif url.endswith(".pq"):
df = pd.read_parquet(url, **kwargs)
return cls(df)
elif url.endswith(".pkl"):
df = pd.read_pickle(url, **kwargs)
return cls(df)
raise NotImplementedError
@singledispatchmethod
def compile_query(self, index, label):
raise NotImplementedError(
f"{self.__class__.__name__} does not support {type(index)} indexes."
)
@compile_query.register(Index)
def simple_query(self, index, label):
return PandasSimpleQuery(index, self.source, index.name, label)
@compile_query.register(IntervalIndex)
def interval_query(self, index, label):
return PandasIntervalQuery(index, self.source, index.name, label)
@compile_query.register(InterpolatingIndex)
def interpolating_query(self, index, label):
return PandasInterpolationQuery(index, self.source, index.name, label)
@compile_query.register(list)
@compile_query.register(tuple)
@compile_query.register(MultiIndex)
def multi_query(self, index, labels):
if not isinstance(index, MultiIndex):
index = MultiIndex(*index)
queries = [self.compile_query(idx, labels[idx.name]) for idx in index.indexes]
return PandasMultiQuery(index, self.source, queries)
def insert(self, doc):
index = doc.index_labels_tuple
index = to_pandas(index)
if len(index) == 1:
index = index[0]
self.source.loc[index, :] = doc.column_values
update = insert
def delete(self, doc):
index = doc.index_labels_tuple
index = to_pandas(index)
if len(index) == 1:
index = index[0]
return self.source.drop(index=index, inplace=True)
@singledispatch
def to_pandas(obj):
return obj
@to_pandas.register(datetime)
def to_pandas_datetime(obj):
return pd.to_datetime(obj)
@to_pandas.register(dict)
def to_pandas_dict(obj: dict):
if len(obj) == 2 and "left" in obj and "right" in obj:
left, right = to_pandas(obj["left"]), to_pandas(obj["right"])
return pd.Interval(left, right)
return {k: to_pandas(v) for k, v in obj.items()}
@to_pandas.register(list)
def to_pandas_list(obj):
return [to_pandas(v) for v in obj]
@to_pandas.register(tuple)
def to_pandas_tuple(obj):
return tuple(to_pandas(v) for v in obj)
@to_pandas.register(Interval)
def to_pandas_interval(obj):
left, right = to_pandas(obj.left), to_pandas(obj.right)
return pd.Interval(left, right)
@singledispatch
def from_pandas(obj):
return obj
@from_pandas.register(pd.DataFrame)
def from_pandas_df(df):
return from_pandas(df.to_dict(orient="records"))
@from_pandas.register(pd.Series)
def from_pandas_series(obj):
return from_pandas(obj.to_dict())
@from_pandas.register(pd.Interval)
def from_pandas_interval(obj):
left, right = from_pandas(obj.left), from_pandas(obj.right)
return Interval[left, right]
@from_pandas.register(list)
def from_pandas_list(obj):
return [from_pandas(v) for v in obj]
@from_pandas.register(tuple)
def from_pandas_tuple(obj):
return tuple(from_pandas(v) for v in obj)
@from_pandas.register(dict)
def from_pandas_dict(obj):
return {k: from_pandas(v) for k, v in obj.items()}
@from_pandas.register(pd.Timestamp)
def from_pandas_timestamp(obj):
return obj.to_pydatetime()
@from_pandas.register(pd.Timedelta)
def from_pandas_timedelta(obj):
return obj.to_pytimedelta()
@from_pandas.register(numbers.Integral)
def from_pandas_int(obj):
return int(obj)
@from_pandas.register(numbers.Real)
def from_pandas_float(obj):
return float(obj)
```
#### File: rframe/interfaces/rest.py
```python
from typing import List, Union
from loguru import logger
from .base import BaseDataQuery, DatasourceInterface
from ..indexes import Index, InterpolatingIndex, IntervalIndex, MultiIndex
from ..utils import singledispatchmethod
from ..rest_client import BaseRestClient, RestClient
class RestQuery(BaseDataQuery):
client: BaseRestClient
params: dict
def __init__(self, client: BaseRestClient, params=None):
self.client = client
self.params = params if params is not None else {}
def execute(self, limit: int = None, skip: int = None, sort=None):
logger.debug(
f"Executing rest api query with skip={skip}, sort={sort} and limit={limit}"
)
return self.client.query(limit=limit, skip=skip, sort=sort, **self.params)
def unique(self, fields: Union[str, List[str]]):
return self.client.unique(fields, **self.params)
def max(self, fields: Union[str, List[str]]):
return self.client.max(fields, **self.params)
def min(self, fields: Union[str, List[str]]):
return self.client.min(fields, **self.params)
def count(self):
return self.client.count(**self.params)
def serializable_interval(interval):
if isinstance(interval, list):
return [serializable_interval(iv) for iv in interval]
if isinstance(interval, tuple):
left, right = interval
elif isinstance(interval, dict):
left, right = interval["left"], interval["right"]
elif isinstance(interval, slice):
left, right = interval.start, interval.stop
elif hasattr(interval, "left") and hasattr(interval, "right"):
left, right = interval.left, interval.right
else:
left = right = interval
if left is None and right is None:
return None
interval = {"left": left, "right": right}
return interval
@DatasourceInterface.register_interface(BaseRestClient)
class RestInterface(DatasourceInterface):
@classmethod
def from_url(cls, url: str, headers=None, **kwargs):
if url.startswith("http://") or url.startswith("https://"):
client = RestClient(url, headers)
return cls(client)
raise NotImplementedError
@singledispatchmethod
def compile_query(self, index, label):
raise NotImplementedError(
f"{self.__class__.__name__} does not support {type(index)} indexes."
)
@compile_query.register(InterpolatingIndex)
@compile_query.register(Index)
def simple_query(self, index: Union[Index, InterpolatingIndex], label):
return RestQuery(self.source, {index.name: label})
@compile_query.register(IntervalIndex)
def interval_query(self, index: IntervalIndex, interval):
interval = serializable_interval(interval)
return RestQuery(self.source, {index.name: interval})
@compile_query.register(list)
@compile_query.register(tuple)
@compile_query.register(MultiIndex)
def multi_query(self, indexes, labels):
if isinstance(indexes, MultiIndex):
indexes = indexes.indexes
labels = labels.values()
params = {}
for idx, label in zip(indexes, labels):
query = self.compile_query(idx, label)
if idx.name in query.params:
params[idx.name] = query.params[idx.name]
return RestQuery(self.source, params)
def insert(self, doc):
logger.debug(f"REST api backend inserting document {doc}")
return self.source.insert(doc)
update = insert
def delete(self, doc):
logger.debug(f"REST api backend deleting document {doc}")
return self.source.delete(doc)
```
#### File: rframe/rframe/types.py
```python
import datetime
from typing import ClassVar, Literal, Mapping, Optional, TypeVar, Union
import pydantic
from pydantic import BaseModel, root_validator, ValidationError
LabelType = Union[int, str, datetime.datetime]
# allow up to 8 byte integers
MIN_INTEGER = 0
MAX_INTEGER = int(2**63 - 1)
MIN_INTEGER_DELTA = 1
# Must fit in 64 bit uint with ns resolution
MIN_DATETIME = datetime.datetime(1677, 9, 22, 0, 0)
MAX_DATETIME = datetime.datetime(2232, 1, 1, 0, 0)
# Will be truncated by mongodb date type
MIN_TIMEDELTA = datetime.timedelta(microseconds=1000)
MAX_TIMEDELTA = datetime.timedelta(days=106751)
class Interval(BaseModel):
class Config:
validate_assignment = True
frozen = True
_min: ClassVar = None
_max: ClassVar = None
_resolution: ClassVar = None
left: LabelType
right: Optional[LabelType] = None
# closed: Literal['left','right','both'] = 'right'
@classmethod
def __get_validators__(cls):
yield cls.validate_field
@classmethod
def _validate_boundary(cls, v):
if v is None:
raise TypeError("Interval boundary cannot be None.")
if v < cls._min:
raise ValueError(f"{cls} boundary must be larger than {cls._min}.")
if v > cls._max:
raise ValueError(f"{cls} boundary must be less than {cls._max}.")
@classmethod
def validate_field(cls, v, field):
if isinstance(v, cls):
return v
if isinstance(v, tuple):
left, right = v
elif isinstance(v, Mapping):
left = v.get("left", None)
right = v.get("right", None)
elif hasattr(v, "left") and hasattr(v, "left"):
left = v.left
right = v.right
else:
left, right = v, v
if right is None:
right = cls._max
return cls(left=left, right=right)
def __class_getitem__(cls, type_):
if isinstance(type_, tuple):
left, right = type_
if isinstance(left, int):
return IntegerInterval(left=left, right=right)
else:
return TimeInterval(left=left, right=right)
if not isinstance(type_, type):
type_ = type(type_)
if issubclass(type_, int):
return IntegerInterval
if issubclass(type_, datetime.datetime):
return TimeInterval
raise TypeError(type_)
@root_validator
def check_non_zero_length(cls, values):
left, right = values.get("left"), values.get("right")
cls._validate_boundary(left)
cls._validate_boundary(right)
if left > right:
raise ValueError("Interval left must be less than right.")
# FIXME: maybe left, right = right, left
if (right - left) < cls._resolution:
left = left - cls._resolution
values["left"] = left
values["right"] = right
return values
def overlaps(self, other):
return self.left < other.right and self.right > other.left
def __lt__(self, other: "Interval"):
if not isinstance(other, self.__class__):
raise NotImplementedError
if self.right is None:
return False
return self.right < other.left
def __le__(self, other: "Interval"):
if not isinstance(other, self.__class__):
raise NotImplementedError
if self.right is None:
return False
return self.right <= other.left
def __eq__(self, other):
if not isinstance(other, self.__class__):
return False
return (self.left == other.left) and (self.right == other.right)
def __gt__(self, other: "Interval"):
if not isinstance(other, self.__class__):
raise NotImplementedError
if other.right is None:
return False
return self.left > other.right
def __ge__(self, other: "Interval"):
if not isinstance(other, self.__class__):
raise NotImplementedError
if other.right is None:
return False
return self.left >= other.right
def __len__(self):
return self.right - self.left
def clone(self, left=None, right=None):
return self.__class__(left=left or self.left, right=right or self.right)
class IntegerInterval(Interval):
_resolution = 1
_min = MIN_INTEGER
_max = MAX_INTEGER
left: int = pydantic.Field(ge=MIN_INTEGER, lt=MAX_INTEGER - _resolution)
right: int = pydantic.Field(default=MAX_INTEGER, ge=_resolution, lt=MAX_INTEGER)
class TimeInterval(Interval):
_resolution = MIN_TIMEDELTA
_min = MIN_DATETIME
_max = MAX_DATETIME
left: datetime.datetime
right: datetime.datetime = MAX_DATETIME
```
#### File: rframe/tests/test_utils.py
```python
import hypothesis.strategies as st
@st.composite
def non_overlapping_interval_lists(draw, elements=st.datetimes(), min_size=2):
elem = draw(st.lists(elements, unique=True, min_size=min_size * 2).map(sorted))
return list(zip(elem[:-1:2], elem[1::2]))
@st.composite
def non_overlapping_interval_ranges(draw, elements=st.datetimes(), min_size=2):
elem = draw(st.lists(elements, unique=True, min_size=min_size).map(sorted))
return list(zip(elem[:-1], elem[1:]))
``` |
{
"source": "jmosbacher/straxen",
"score": 2
} |
#### File: straxen/plugins/double_scatter.py
```python
import numpy as np
import strax
export, __all__ = strax.exporter()
@export
class DistinctChannels(strax.LoopPlugin):
"""
Compute the number of contributing PMTs that contribute to the
alt_s1 but not to the main S1.
"""
__version__ = '0.1.1'
depends_on = ('event_basics', 'peaks')
loop_over = 'events'
dtype = [
('alt_s1_distinct_channels',
np.int32,
'Number of PMTs contributing to the secondary S1 '
'that do not contribute to the main S1'), ] + strax.time_fields
def compute_loop(self, event, peaks):
if event['alt_s1_index'] == -1:
n_distinct = 0
else:
s1_a = peaks[event['s1_index']]
s1_b = peaks[event['alt_s1_index']]
s1_a_peaks = np.nonzero((s1_a['area_per_channel']>0)*1)
s1_b_peaks = np.nonzero((s1_b['area_per_channel']>0)*1)
n_distinct=0
for channel in range(len(s1_b_peaks[0])):
if s1_b_peaks[0][channel] not in s1_a_peaks[0]:
n_distinct += 1
return dict(alt_s1_distinct_channels=n_distinct,
time=event['time'],
endtime=event['endtime'])
@export
class EventInfoDouble(strax.MergeOnlyPlugin):
"""
Alternate version of event_info for Kr and other double scatter
analyses:
- Uses a different naming convention:
s1 -> s1_a, alt_s1 -> s1_b, and similarly for s2s;
- Adds s1_b_distinct_channels, which can be tricky to compute
(since it requires going back to peaks)
"""
__version__ = '0.1.2'
depends_on = ['event_info', 'distinct_channels']
save_when = strax.SaveWhen.EXPLICIT
@staticmethod
def rename_field(orig_name):
special_cases = {'cs1': 'cs1_a',
'alt_cs1': 'cs1_b',
'alt_s1_delay': 'ds_s1_dt',
'cs2': 'cs2_a',
'alt_cs2': 'cs2_b',
'alt_s2_delay': 'ds_s2_dt',
'cs1_wo_timecorr': 'cs1_a_wo_timecorr',
'alt_cs1_wo_timecorr': 'cs1_b_wo_timecorr',
'cs2_wo_elifecorr': 'cs2_a_wo_elifecorr',
'alt_cs2_wo_elifecorr': 'cs2_b_wo_elifecorr',
'cs2_wo_timecorr': 'cs2_a_wo_timecorr',
'alt_cs2_wo_timecorr': 'cs2_b_wo_timecorr',
'cs2_area_fraction_top': 'cs2_a_area_fraction_top',
'alt_cs2_area_fraction_top': 'cs2_b_area_fraction_top',
'cs2_bottom': 'cs2_a_bottom',
'alt_cs2_bottom': 'cs2_b_bottom'}
if orig_name in special_cases:
return special_cases[orig_name]
name = orig_name
for s_i in [1, 2]:
if name.startswith(f's{s_i}'):
name = name.replace(f's{s_i}', f's{s_i}_a')
if name.startswith(f'alt_s{s_i}'):
name = name.replace(f'alt_s{s_i}', f's{s_i}_b')
return name
def infer_dtype(self):
self.input_dtype = (
strax.unpack_dtype(self.deps['event_info'].dtype)
+ [strax.unpack_dtype(self.deps['distinct_channels'].dtype)[0]])
return [
((comment, self.rename_field(name)), dt)
for (comment, name), dt in self.input_dtype]
def compute(self, events):
result = np.zeros(len(events), dtype=self.dtype)
for (_, name), _ in self.input_dtype:
result[self.rename_field(name)] = events[name]
return result
```
#### File: straxen/plugins/pulse_processing.py
```python
from immutabledict import immutabledict
import numba
import numpy as np
import strax
import straxen
from straxen.get_corrections import is_cmt_option
export, __all__ = strax.exporter()
__all__ += ['NO_PULSE_COUNTS']
# These are also needed in peaklets, since hitfinding is repeated
HITFINDER_OPTIONS = tuple([
strax.Option(
'hit_min_amplitude', track=True, infer_type=False,
default=('hit_thresholds_tpc', 'ONLINE', True),
help='Minimum hit amplitude in ADC counts above baseline. '
'Specify as a tuple of length n_tpc_pmts, or a number,'
'or a string like "pmt_commissioning_initial" which means calling'
'hitfinder_thresholds.py'
'or a tuple like (correction=str, version=str, nT=boolean),'
'which means we are using cmt.'
)])
HITFINDER_OPTIONS_he = tuple([
strax.Option(
'hit_min_amplitude_he',
default=('hit_thresholds_he', 'ONLINE', True), track=True, infer_type=False,
help='Minimum hit amplitude in ADC counts above baseline. '
'Specify as a tuple of length n_tpc_pmts, or a number,'
'or a string like "pmt_commissioning_initial" which means calling'
'hitfinder_thresholds.py'
'or a tuple like (correction=str, version=str, nT=boolean),'
'which means we are using cmt.'
)])
HE_PREAMBLE = """High energy channels: attenuated signals of the top PMT-array\n"""
@export
@strax.takes_config(
strax.Option('hev_gain_model',
default=('disabled', None), infer_type=False,
help='PMT gain model used in the software high-energy veto.'
'Specify as (model_type, model_config)'),
strax.Option(
'baseline_samples',
default=40, infer_type=False,
help='Number of samples to use at the start of the pulse to determine '
'the baseline'),
# Tail veto options
strax.Option(
'tail_veto_threshold',
default=0, infer_type=False,
help=("Minimum peakarea in PE to trigger tail veto."
"Set to None, 0 or False to disable veto.")),
strax.Option(
'tail_veto_duration',
default=int(3e6), infer_type=False,
help="Time in ns to veto after large peaks"),
strax.Option(
'tail_veto_resolution',
default=int(1e3), infer_type=False,
help="Time resolution in ns for pass-veto waveform summation"),
strax.Option(
'tail_veto_pass_fraction',
default=0.05, infer_type=False,
help="Pass veto if maximum amplitude above max * fraction"),
strax.Option(
'tail_veto_pass_extend',
default=3, infer_type=False,
help="Extend pass veto by this many samples (tail_veto_resolution!)"),
strax.Option(
'max_veto_value',
default=None, infer_type=False,
help="Optionally pass a HE peak that exceeds this absolute area. "
"(if performing a hard veto, can keep a few statistics.)"),
# PMT pulse processing options
strax.Option(
'pmt_pulse_filter',
default=None, infer_type=False,
help='Linear filter to apply to pulses, will be normalized.'),
strax.Option(
'save_outside_hits',
default=(3, 20), infer_type=False,
help='Save (left, right) samples besides hits; cut the rest'),
strax.Option(
'n_tpc_pmts', type=int,
help='Number of TPC PMTs'),
strax.Option(
'check_raw_record_overlaps',
default=True, track=False, infer_type=False,
help='Crash if any of the pulses in raw_records overlap with others '
'in the same channel'),
strax.Option(
'allow_sloppy_chunking',
default=False, track=False, infer_type=False,
help=('Use a default baseline for incorrectly chunked fragments. '
'This is a kludge for improperly converted XENON1T data.')),
*HITFINDER_OPTIONS)
class PulseProcessing(strax.Plugin):
"""
Split raw_records into:
- (tpc) records
- aqmon_records
- pulse_counts
For TPC records, apply basic processing:
1. Flip, baseline, and integrate the waveform
2. Apply software HE veto after high-energy peaks.
3. Find hits, apply linear filter, and zero outside hits.
pulse_counts holds some average information for the individual PMT
channels for each chunk of raw_records. This includes e.g.
number of recorded pulses, lone_pulses (pulses which do not
overlap with any other pulse), or mean values of baseline and
baseline rms channel.
"""
__version__ = '0.2.3'
parallel = 'process'
rechunk_on_save = immutabledict(
records=False,
veto_regions=True,
pulse_counts=True)
compressor = 'zstd'
depends_on = 'raw_records'
provides = ('records', 'veto_regions', 'pulse_counts')
data_kind = {k: k for k in provides}
save_when = immutabledict(
records=strax.SaveWhen.TARGET,
veto_regions=strax.SaveWhen.TARGET,
pulse_counts=strax.SaveWhen.ALWAYS,
)
def infer_dtype(self):
# Get record_length from the plugin making raw_records
self.record_length = strax.record_length_from_dtype(
self.deps['raw_records'].dtype_for('raw_records'))
dtype = dict()
for p in self.provides:
if 'records' in p:
dtype[p] = strax.record_dtype(self.record_length)
dtype['veto_regions'] = strax.hit_dtype
dtype['pulse_counts'] = pulse_count_dtype(self.config['n_tpc_pmts'])
return dtype
def setup(self):
self.hev_enabled = (
(self.config['hev_gain_model'][0] != 'disabled')
and self.config['tail_veto_threshold'])
if self.hev_enabled:
self.to_pe = straxen.get_correction_from_cmt(self.run_id,
self.config['hev_gain_model'])
# Check config of `hit_min_amplitude` and define hit thresholds
# if cmt config
if is_cmt_option(self.config['hit_min_amplitude']):
self.hit_thresholds = straxen.get_correction_from_cmt(self.run_id,
self.config['hit_min_amplitude'])
# if hitfinder_thresholds config
elif isinstance(self.config['hit_min_amplitude'], str):
self.hit_thresholds = straxen.hit_min_amplitude(
self.config['hit_min_amplitude'])
else: # int or array
self.hit_thresholds = self.config['hit_min_amplitude']
def compute(self, raw_records, start, end):
if self.config['check_raw_record_overlaps']:
check_overlaps(raw_records, n_channels=3000)
# Throw away any non-TPC records; this should only happen for XENON1T
# converted data
raw_records = raw_records[
raw_records['channel'] < self.config['n_tpc_pmts']]
# Convert everything to the records data type -- adds extra fields.
r = strax.raw_to_records(raw_records)
del raw_records
# Do not trust in DAQ + strax.baseline to leave the
# out-of-bounds samples to zero.
strax.zero_out_of_bounds(r)
strax.baseline(r,
baseline_samples=self.config['baseline_samples'],
allow_sloppy_chunking=self.config['allow_sloppy_chunking'],
flip=True)
strax.integrate(r)
pulse_counts = count_pulses(r, self.config['n_tpc_pmts'])
pulse_counts['time'] = start
pulse_counts['endtime'] = end
if len(r) and self.hev_enabled:
r, r_vetoed, veto_regions = software_he_veto(
r, self.to_pe, end,
area_threshold=self.config['tail_veto_threshold'],
veto_length=self.config['tail_veto_duration'],
veto_res=self.config['tail_veto_resolution'],
pass_veto_extend=self.config['tail_veto_pass_extend'],
pass_veto_fraction=self.config['tail_veto_pass_fraction'],
max_veto_value=self.config['max_veto_value'])
# In the future, we'll probably want to sum the waveforms
# inside the vetoed regions, so we can still save the "peaks".
del r_vetoed
else:
veto_regions = np.zeros(0, dtype=strax.hit_dtype)
if len(r):
# Find hits
# -- before filtering,since this messes with the with the S/N
hits = strax.find_hits(r, min_amplitude=self.hit_thresholds)
if self.config['pmt_pulse_filter']:
# Filter to concentrate the PMT pulses
strax.filter_records(
r, np.array(self.config['pmt_pulse_filter']))
le, re = self.config['save_outside_hits']
r = strax.cut_outside_hits(r, hits,
left_extension=le,
right_extension=re)
# Probably overkill, but just to be sure...
strax.zero_out_of_bounds(r)
return dict(records=r,
pulse_counts=pulse_counts,
veto_regions=veto_regions)
@export
@strax.takes_config(
strax.Option('n_he_pmts', track=False, default=752, infer_type=False,
help="Maximum channel of the he channels"),
strax.Option('record_length', default=110, track=False, type=int,
help="Number of samples per raw_record"),
*HITFINDER_OPTIONS_he)
class PulseProcessingHighEnergy(PulseProcessing):
__doc__ = HE_PREAMBLE + PulseProcessing.__doc__
__version__ = '0.0.1'
provides = ('records_he', 'pulse_counts_he')
data_kind = {k: k for k in provides}
rechunk_on_save = immutabledict(
records_he=False,
pulse_counts_he=True)
depends_on = 'raw_records_he'
compressor = 'zstd'
child_plugin = True
save_when = strax.SaveWhen.TARGET
def infer_dtype(self):
dtype = dict()
dtype['records_he'] = strax.record_dtype(self.config["record_length"])
dtype['pulse_counts_he'] = pulse_count_dtype(self.config['n_he_pmts'])
return dtype
def setup(self):
self.hev_enabled = False
self.config['n_tpc_pmts'] = self.config['n_he_pmts']
# Check config of `hit_min_amplitude` and define hit thresholds
# if cmt config
if is_cmt_option(self.config['hit_min_amplitude_he']):
self.hit_thresholds = straxen.get_correction_from_cmt(self.run_id,
self.config['hit_min_amplitude_he'])
# if hitfinder_thresholds config
elif isinstance(self.config['hit_min_amplitude_he'], str):
self.hit_thresholds = straxen.hit_min_amplitude(
self.config['hit_min_amplitude_he'])
else: # int or array
self.hit_thresholds = self.config['hit_min_amplitude_he']
def compute(self, raw_records_he, start, end):
result = super().compute(raw_records_he, start, end)
return dict(records_he=result['records'],
pulse_counts_he=result['pulse_counts'])
##
# Software HE Veto
##
@export
def software_he_veto(records, to_pe, chunk_end,
area_threshold=int(1e5),
veto_length=int(3e6),
veto_res=int(1e3),
pass_veto_fraction=0.01,
pass_veto_extend=3,
max_veto_value=None):
"""Veto veto_length (time in ns) after peaks larger than
area_threshold (in PE).
Further large peaks inside the veto regions are still passed:
We sum the waveform inside the veto region (with time resolution
veto_res in ns) and pass regions within pass_veto_extend samples
of samples with amplitude above pass_veto_fraction times the maximum.
:returns: (preserved records, vetoed records, veto intervals).
:param records: PMT records
:param to_pe: ADC to PE conversion factors for the channels in records.
:param chunk_end: Endtime of chunk to set as maximum ceiling for the veto period
:param area_threshold: Minimum peak area to trigger the veto.
Note we use a much rougher clustering than in later processing.
:param veto_length: Time in ns to veto after the peak
:param veto_res: Resolution of the sum waveform inside the veto region.
Do not make too large without increasing integer type in some strax
dtypes...
:param pass_veto_fraction: fraction of maximum sum waveform amplitude to
trigger veto passing of further peaks
:param pass_veto_extend: samples to extend (left and right) the pass veto
regions.
:param max_veto_value: if not None, pass peaks that exceed this area
no matter what.
"""
veto_res = int(veto_res)
if veto_res > np.iinfo(np.int16).max:
raise ValueError("Veto resolution does not fit 16-bit int")
veto_length = np.ceil(veto_length / veto_res).astype(np.int64) * veto_res
veto_n = int(veto_length / veto_res) + 1
# 1. Find large peaks in the data.
# This will actually return big agglomerations of peaks and their tails
peaks = strax.find_peaks(
records, to_pe,
gap_threshold=1,
left_extension=0,
right_extension=0,
min_channels=100,
min_area=area_threshold,
result_dtype=strax.peak_dtype(n_channels=len(to_pe),
n_sum_wv_samples=veto_n))
# 2a. Set 'candidate regions' at these peaks. These should:
# - Have a fixed maximum length (else we can't use the strax hitfinder on them)
# - Never extend beyond the current chunk
# - Do not overlap
veto_start = peaks['time']
veto_end = np.clip(peaks['time'] + veto_length,
None,
chunk_end)
veto_end[:-1] = np.clip(veto_end[:-1], None, veto_start[1:])
# 2b. Convert these into strax record-like objects
# Note the waveform is float32 though (it's a summed waveform)
regions = np.zeros(
len(veto_start),
dtype=strax.interval_dtype + [
("data", (np.float32, veto_n)),
("baseline", np.float32),
("baseline_rms", np.float32),
("reduction_level", np.int64),
("record_i", np.int64),
("pulse_length", np.int64),
])
regions['time'] = veto_start
regions['length'] = (veto_end - veto_start) // veto_n
regions['pulse_length'] = veto_n
regions['dt'] = veto_res
if not len(regions):
# No veto anywhere in this data
return records, records[:0], np.zeros(0, strax.hit_dtype)
# 3. Find pass_veto regios with big peaks inside the veto regions.
# For this we compute a rough sum waveform (at low resolution,
# without looping over the pulse data)
rough_sum(regions, records, to_pe, veto_n, veto_res)
if max_veto_value is not None:
pass_veto = strax.find_hits(regions, min_amplitude=max_veto_value)
else:
regions['data'] /= np.max(regions['data'], axis=1)[:, np.newaxis]
pass_veto = strax.find_hits(regions, min_amplitude=pass_veto_fraction)
# 4. Extend these by a few samples and inverse to find veto regions
regions['data'] = 1
regions = strax.cut_outside_hits(
regions,
pass_veto,
left_extension=pass_veto_extend,
right_extension=pass_veto_extend)
regions['data'] = 1 - regions['data']
veto = strax.find_hits(regions, min_amplitude=1)
# Do not remove very tiny regions
veto = veto[veto['length'] > 2 * pass_veto_extend]
# 5. Apply the veto and return results
veto_mask = strax.fully_contained_in(records, veto) == -1
return tuple(list(mask_and_not(records, veto_mask)) + [veto])
@numba.njit(cache=True, nogil=True)
def rough_sum(regions, records, to_pe, n, dt):
"""Compute ultra-rough sum waveforms for regions, assuming:
- every record is a single peak at its first sample
- all regions have the same length and dt
and probably not carying too much about boundaries
"""
if not len(regions) or not len(records):
return
# dt and n are passed explicitly to avoid overflows/wraparounds
# related to the small dt integer type
peak_i = 0
r_i = 0
while (peak_i <= len(regions) - 1) and (r_i <= len(records) - 1):
p = regions[peak_i]
l = p['time']
r = l + n * dt
while True:
if r_i > len(records) - 1:
# Scan ahead until records contribute
break
t = records[r_i]['time']
if t >= r:
break
if t >= l:
index = int((t - l) // dt)
regions[peak_i]['data'][index] += (
records[r_i]['area'] * to_pe[records[r_i]['channel']])
r_i += 1
peak_i += 1
##
# Pulse counting
##
@export
def pulse_count_dtype(n_channels):
# NB: don't use the dt/length interval dtype, integer types are too small
# to contain these huge chunk-wide intervals
return [
(('Start time of the chunk', 'time'), np.int64),
(('End time of the chunk', 'endtime'), np.int64),
(('Number of pulses', 'pulse_count'),
(np.int64, n_channels)),
(('Number of lone pulses', 'lone_pulse_count'),
(np.int64, n_channels)),
(('Integral of all pulses in ADC_count x samples', 'pulse_area'),
(np.int64, n_channels)),
(('Integral of lone pulses in ADC_count x samples', 'lone_pulse_area'),
(np.int64, n_channels)),
(('Average baseline', 'baseline_mean'),
(np.int16, n_channels)),
(('Average baseline rms', 'baseline_rms_mean'),
(np.float32, n_channels)),
]
def count_pulses(records, n_channels):
"""Return array with one element, with pulse count info from records"""
if len(records):
result = np.zeros(1, dtype=pulse_count_dtype(n_channels))
_count_pulses(records, n_channels, result)
return result
return np.zeros(0, dtype=pulse_count_dtype(n_channels))
NO_PULSE_COUNTS = -9999 # Special value required by average_baseline in case counts = 0
@numba.njit(cache=True, nogil=True)
def _count_pulses(records, n_channels, result):
count = np.zeros(n_channels, dtype=np.int64)
lone_count = np.zeros(n_channels, dtype=np.int64)
area = np.zeros(n_channels, dtype=np.int64)
lone_area = np.zeros(n_channels, dtype=np.int64)
last_end_seen = 0
next_start = 0
# Array of booleans to track whether we are currently in a lone pulse
# in each channel
in_lone_pulse = np.zeros(n_channels, dtype=np.bool_)
baseline_buffer = np.zeros(n_channels, dtype=np.float64)
baseline_rms_buffer = np.zeros(n_channels, dtype=np.float64)
for r_i, r in enumerate(records):
if r_i != len(records) - 1:
next_start = records[r_i + 1]['time']
ch = r['channel']
if ch >= n_channels:
print('Channel:', ch)
raise RuntimeError("Out of bounds channel in get_counts!")
area[ch] += r['area'] # <-- Summing total area in channel
if r['record_i'] == 0:
count[ch] += 1
baseline_buffer[ch] += r['baseline']
baseline_rms_buffer[ch] += r['baseline_rms']
if (r['time'] > last_end_seen
and r['time'] + r['pulse_length'] * r['dt'] < next_start):
# This is a lone pulse
lone_count[ch] += 1
in_lone_pulse[ch] = True
lone_area[ch] += r['area']
else:
in_lone_pulse[ch] = False
last_end_seen = max(last_end_seen,
r['time'] + r['pulse_length'] * r['dt'])
elif in_lone_pulse[ch]:
# This is a subsequent fragment of a lone pulse
lone_area[ch] += r['area']
res = result[0]
res['pulse_count'][:] = count[:]
res['lone_pulse_count'][:] = lone_count[:]
res['pulse_area'][:] = area[:]
res['lone_pulse_area'][:] = lone_area[:]
means = (baseline_buffer/count)
means[np.isnan(means)] = NO_PULSE_COUNTS
res['baseline_mean'][:] = means[:]
res['baseline_rms_mean'][:] = (baseline_rms_buffer/count)[:]
##
# Misc
##
@export
@numba.njit(cache=True, nogil=True)
def mask_and_not(x, mask):
return x[mask], x[~mask]
@export
@numba.njit(cache=True, nogil=True)
def channel_split(rr, first_other_ch):
"""Return """
return mask_and_not(rr, rr['channel'] < first_other_ch)
@export
def check_overlaps(records, n_channels):
"""Raise a ValueError if any of the pulses in records overlap
Assumes records is already sorted by time.
"""
last_end = np.zeros(n_channels, dtype=np.int64)
channel, time = _check_overlaps(records, last_end)
if channel != -9999:
raise ValueError(
f"Bad data! In channel {channel}, a pulse starts at {time}, "
f"BEFORE the previous pulse in that same channel ended "
f"(at {last_end[channel]})")
@numba.njit(cache=True, nogil=True)
def _check_overlaps(records, last_end):
for r in records:
if r['time'] < last_end[r['channel']]:
return r['channel'], r['time']
last_end[r['channel']] = strax.endtime(r)
return -9999, -9999
```
#### File: tests/storage/test_rucio_remote.py
```python
import unittest
import straxen
import os
import strax
import shutil
class TestRucioRemote(unittest.TestCase):
"""
Test loading data from the rucio remote frontend
"""
def setUp(self) -> None:
self.run_id = '009104'
self.staging_dir = './test_rucio_remote'
def get_context(self, download_heavy: bool) -> strax.Context:
os.makedirs(self.staging_dir, exist_ok=True)
context = straxen.contexts.xenonnt_online(
output_folder=os.path.join(self.staging_dir, 'output'),
include_rucio_remote=True,
download_heavy=download_heavy,
_rucio_path=self.staging_dir,
_raw_path=os.path.join(self.staging_dir, 'raw'),
_database_init=False,
_processed_path=os.path.join(self.staging_dir, 'processed'),
)
return context
def tearDown(self):
if os.path.exists(self.staging_dir):
shutil.rmtree(self.staging_dir)
@unittest.skipIf(not straxen.HAVE_ADMIX, "Admix is not installed")
def test_download_no_heavy(self):
st = self.get_context(download_heavy=False)
with self.assertRaises(strax.DataNotAvailable):
rr = self.try_load(st, 'raw_records')
assert False, len(rr)
@unittest.skipIf(not straxen.HAVE_ADMIX, "Admix is not installed")
def test_download_with_heavy(self):
st = self.get_context(download_heavy=True)
rr = self.try_load(st, 'raw_records')
assert len(rr)
@unittest.skipIf(not straxen.HAVE_ADMIX, "Admix is not installed")
def test_download_with_heavy_and_high_level(self):
st = self.get_context(download_heavy=True)
pc = self.try_load(st, 'pulse_counts')
assert len(pc)
def test_did_to_dirname(self):
"""Simple formatting test of straxen.rucio_remote.did_to_dirname"""
did = 'xnt_038697:raw_records_aqmon-rfzvpzj4mf'
assert 'xnt_' not in straxen.rucio_remote.did_to_dirname(did)
with self.assertRaises(RuntimeError):
straxen.rucio_remote.did_to_dirname('a-b-c')
def try_load(self, st: strax.Context, target: str):
try:
rr = st.get_array(self.run_id, target)
except strax.DataNotAvailable as data_error:
message = (f'Could not find '
f'{st.key_for(self.run_id, target)} '
f'with the following frontends\n')
for sf in st.storage:
message += f'\t{sf}\n'
raise strax.DataNotAvailable(message) from data_error
return rr
def check_empty_context(self, context):
for sf in context.storage:
assert not context._is_stored_in_sf(self.run_id, 'raw_records', sf), sf
```
#### File: straxen/tests/test_contexts.py
```python
from straxen.contexts import xenon1t_dali, xenon1t_led, fake_daq, demo
from straxen.contexts import xenonnt_led, xenonnt_online, xenonnt
import straxen
import tempfile
import os
import unittest
##
# XENONnT
##
def test_xenonnt_online():
st = xenonnt_online(_database_init=False)
st.search_field('time')
@unittest.skipIf(not straxen.utilix_is_configured(), "No db access, cannot test!")
def test_xenonnt_online_with_online_frontend():
st = xenonnt_online(include_online_monitor=True)
for sf in st.storage:
if 'OnlineMonitor' == sf.__class__.__name__:
break
else:
raise ValueError(f"Online monitor not in {st.storage}")
@unittest.skipIf(not straxen.utilix_is_configured(), "No db access, cannot test!")
def test_xenonnt_online_rucio_local():
st = xenonnt_online(include_rucio_local=True, _rucio_local_path='./test')
for sf in st.storage:
if 'RucioLocalFrontend' == sf.__class__.__name__:
break
else:
raise ValueError(f"Online monitor not in {st.storage}")
@unittest.skipIf(not straxen.utilix_is_configured(), "No db access, cannot test!")
def test_xennonnt():
st = xenonnt(_database_init=False)
st.search_field('time')
def test_xenonnt_led():
st = xenonnt_led(_database_init=False)
st.search_field('time')
@unittest.skipIf(not straxen.utilix_is_configured(), "No db access, cannot test!")
def test_nt_is_nt_online():
# Test that nT and nT online are the same
st_online = xenonnt_online(_database_init=False)
st = xenonnt(_database_init=False)
for plugin in st._plugin_class_registry.keys():
print(f'Checking {plugin}')
nt_key = st.key_for('0', plugin)
nt_online_key = st_online.key_for('0', plugin)
assert str(nt_key) == str(nt_online_key)
@unittest.skipIf(not straxen.utilix_is_configured(), "No db access, cannot test!")
def test_offline():
"""
Let's try and see which CMT versions are compatible with this straxen
version
"""
cmt = straxen.CorrectionsManagementServices()
cmt_versions = list(cmt.global_versions)[::-1]
print(cmt_versions)
success_for = []
for global_version in cmt_versions:
try:
xenonnt(global_version)
success_for.append(global_version)
except straxen.CMTVersionError:
pass
print(f'This straxen version works with {success_for} but is '
f'incompatible with {set(cmt_versions)-set(success_for)}')
test = unittest.TestCase()
# We should always work for one offline and the online version
test.assertTrue(len(success_for) >= 2)
##
# XENON1T
##
def test_xenon1t_dali():
st = xenon1t_dali()
st.search_field('time')
def test_demo():
"""
Test the demo context. Since we download the folder to the current
working directory, make sure we are in a tempfolder where we
can write the data to
"""
with tempfile.TemporaryDirectory() as temp_dir:
try:
print("Temporary directory is ", temp_dir)
os.chdir(temp_dir)
st = demo()
st.search_field('time')
# On windows, you cannot delete the current process'
# working directory, so we have to chdir out first.
finally:
os.chdir('..')
def test_fake_daq():
st = fake_daq()
st.search_field('time')
def test_xenon1t_led():
st = xenon1t_led()
st.search_field('time')
##
# WFSim
##
# Simulation contexts are only tested when special flags are set
@unittest.skipIf('ALLOW_WFSIM_TEST' not in os.environ,
"if you want test wfsim context do `export 'ALLOW_WFSIM_TEST'=1`")
class TestSimContextNT(unittest.TestCase):
@staticmethod
def context(*args, **kwargs):
kwargs.setdefault('cmt_version', 'global_ONLINE')
return straxen.contexts.xenonnt_simulation(*args, **kwargs)
@unittest.skipIf(not straxen.utilix_is_configured(), "No db access, cannot test!")
def test_nt_sim_context_main(self):
st = self.context(cmt_run_id_sim='008000')
st.search_field('time')
@unittest.skipIf(not straxen.utilix_is_configured(), "No db access, cannot test!")
def test_nt_sim_context_alt(self):
"""Some examples of how to run with a custom WFSim context"""
self.context(cmt_run_id_sim='008000', cmt_run_id_proc='008001')
self.context(cmt_run_id_sim='008000',
cmt_option_overwrite_sim={'elife': 1e6})
self.context(cmt_run_id_sim='008000',
overwrite_fax_file_sim={'elife': 1e6})
@unittest.skipIf(not straxen.utilix_is_configured(), "No db access, cannot test!")
def test_nt_diverging_context_options(self):
"""
Test diverging options. Idea is that you can use different
settings for processing and generating data, should have been
handled by RawRecordsFromWFsim but is now hacked into the
xenonnt_simulation context
Just to show how convoluted this syntax for the
xenonnt_simulation context / CMT is...
"""
self.context(cmt_run_id_sim='008000',
cmt_option_overwrite_sim={'elife': ('elife_constant', 1e6, True)},
cmt_option_overwrite_proc={'elife': ('elife_constant', 1e5, True)},
overwrite_from_fax_file_proc=True,
overwrite_from_fax_file_sim=True,
_config_overlap={'electron_lifetime_liquid': 'elife'},
)
def test_nt_sim_context_bad_inits(self):
with self.assertRaises(RuntimeError):
self.context(cmt_run_id_sim=None, cmt_run_id_proc=None,)
@unittest.skipIf('ALLOW_WFSIM_TEST' not in os.environ,
"if you want test wfsim context do `export 'ALLOW_WFSIM_TEST'=1`")
def test_sim_context():
st = straxen.contexts.xenon1t_simulation()
st.search_field('time')
@unittest.skipIf(not straxen.utilix_is_configured(), "No db access, cannot test!")
def test_offline():
st = xenonnt('latest')
st.provided_dtypes()
``` |
{
"source": "jmosbacher/xeauth",
"score": 2
} |
#### File: xeauth/xeauth/cli.py
```python
import sys
import xeauth
import click
@click.command()
def main():
"""Console script for xeauth."""
click.echo("Replace this message by putting your code into "
"xeauth.cli.main")
click.echo("See click documentation at https://click.palletsprojects.com/")
return 0
@click.command()
def login():
xeauth.cli_login()
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
```
#### File: xeauth/xeauth/user_credentials.py
```python
import param
import httpx
import time
from .settings import config
from .tokens import XeToken
class UserCredentialsAuth(param.Parameterized):
AUTH_URL = param.String(config.OAUTH_DOMAIN.rstrip('/')+'/token')
audience = param.String(config.DEFAULT_AUDIENCE)
scope = param.String(config.DEFAULT_SCOPE)
client_id = param.String(config.DEFAULT_CLIENT_ID)
headers = param.Dict({'content-type': 'application/x-www-form-urlencoded'})
def login(self, username, password, audience=None, scope=None):
if scope is None:
scope = self.scope
if audience is None:
audience = self.audience
data = dict(
grant_type='password',
username=username,
password=password,
audience=audience,
scope=scope,
client_id=self.client_id,
# client_secret='<KEY>',
)
r = httpx.post(self.AUTH_URL, data=data, headers=self.headers)
r.raise_for_status()
kwargs = r.json()
kwargs['expires'] = time.time() + kwargs.pop('expires_in')
return XeToken(client_id=self.client_id, **kwargs)
```
#### File: xeauth/xeauth/xeauth.py
```python
import param
import panel as pn
import secrets
import httpx
import webbrowser
import time
import getpass
from xeauth.settings import config
from .oauth import XeAuthSession, NotebookSession
from .user_credentials import UserCredentialsAuth
from .certificates import certs
def user_login(username=None, password=None, **kwargs):
if username is None:
username = input('Username: ')
if password is None:
password = <PASSWORD>("Password: ")
auth = UserCredentialsAuth(**kwargs)
return auth.login(username=username, password=password)
def login(client_id=config.DEFAULT_CLIENT_ID, scopes=[], audience=config.DEFAULT_AUDIENCE,
notify_email=None, open_browser=False, print_url=False, **kwargs):
if isinstance(scopes, str):
scopes = scopes.split(" ")
scopes = list(scopes)
session = XeAuthSession(client_id=client_id, scopes=scopes, audience=audience, notify_email=notify_email, **kwargs)
# return session
return session.login(open_browser=open_browser, print_url=print_url)
def notebook_login(client_id=config.DEFAULT_CLIENT_ID, scopes=[],
audience=config.DEFAULT_AUDIENCE, notify_email=None, open_browser=True):
pn.extension()
if isinstance(scopes, str):
scopes = scopes.split(" ")
scopes = list(scopes)
session = NotebookSession(client_id=client_id, scopes=scopes, audience=audience, notify_email=notify_email)
session.login_requested(None)
if open_browser:
session.authorize()
return session
def cli_login(client_id=config.DEFAULT_CLIENT_ID, scopes=[],
audience=config.DEFAULT_AUDIENCE, notify_email=None):
if isinstance(scopes, str):
scopes = scopes.split(" ")
scopes = list(scopes)
session = login(client_id=client_id, scopes=scopes, audience=audience, notify_email=notify_email, print_url=True)
print("logged in as:")
print(session.profile)
print(f"Access token: {session.access_token}")
print(f"ID token: {session.id_token}")
def validate_claims(token, **claims):
return certs.validate_claims(token, **claims)
``` |
{
"source": "jmosbacher/xepmts-server",
"score": 2
} |
#### File: xepmts-server/xepmts_server/cli.py
```python
import sys
import click
from xepmts_server import make_app
from werkzeug.serving import run_simple
from flask.cli import with_appcontext
@click.group()
def main():
"""Console script for xepmts_endpoints."""
click.echo("Replace this message by putting your code into "
"xepmts_endpoints.cli.main")
click.echo("See click documentation at https://click.palletsprojects.com/")
return 0
@main.command()
@with_appcontext
def run(address='0.0.0.0', port=5000):
app = make_app()
run_simple(address, port, app,
use_reloader=False, use_debugger=False, use_evalex=False)
@main.command()
@with_appcontext
def debug(address='0.0.0.0', port=5000):
app = make_app(debug=True)
run_simple(address, port, app,
use_reloader=True, use_debugger=True, use_evalex=True)
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
```
#### File: xepmts-server/xepmts_server/__init__.py
```python
from . import v1
from . import v2
from .server import run_simple, make_app, create_app
from .utils import add_server_spec_endpoint
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.1.12'
VERSIONS = {
"v1": v1,
"v2": v2,
}
DEFAULT_VERSION = "v2"
def get_server(version, server_spec=False, **kwargs):
app = VERSIONS[version].app.make_app(**kwargs)
if server_spec:
app = add_server_spec_endpoint(app)
return app
def default_server():
return get_server(DEFAULT_VERSION)
```
#### File: xepmts-server/xepmts_server/server.py
```python
import os
from flask import Flask
from threading import Lock
from werkzeug.wsgi import pop_path_info, peek_path_info
from werkzeug.serving import run_simple as _run_simple
from .v1.app import make_app as make_v1_app
from .v1.auth import XenonTokenAuth
from .v2.app import make_app as make_v2_app
from .admin.app import make_app as make_admin_app
from .utils import PathDispatcher, PathMakerDispatcher, add_server_spec_endpoint
import xepmts_server
from eve_jwt import JWTAuth
APP_MAKERS = {
'v1': lambda: make_v1_app(auth=XenonTokenAuth, swagger=True),
'v2': lambda: make_v2_app(auth=JWTAuth, swagger=True)
}
PRODUCTION_CONFIGS = {
'v1': dict(auth=XenonTokenAuth, swagger=True),
'v2': dict(auth=JWTAuth, swagger=True)
}
DEBUG_CONFIGS = {
'v1': dict(swagger=True),
'v2': dict(swagger=True),
}
def create_app():
from eve_jwt import JWTAuth
from flask_swagger_ui import get_swaggerui_blueprint
v1 = make_v1_app(auth=XenonTokenAuth, swagger=True)
v2 = make_v2_app(auth=JWTAuth, swagger=True)
app_versions = {
"v1": v1,
"v2": v2,
}
app = Flask(__name__)
# @app.route("/")
# def hello():
# return "You have reached the PMT db."
app.config['SWAGGER_INFO'] = {
'title': 'XENON PMT API',
'version': '1.0',
'description': 'API for the XENON PMT database',
'termsOfService': 'https://opensource.org/ToS',
'contact': {
'name': '<NAME>',
'url': 'https://pmts.xen<EMAIL>',
"email": "<EMAIL>"
},
'license': {
'name': 'BSD',
'url': 'https://github.com/nicolaiarocci/eve-swagger/blob/master/LICENSE',
},
'schemes': ['http', 'https'],
}
config = {
'app_name': "PMT Database API",
"urls": [{"name": f"Xenon PMT Database {v.capitalize()}", "url": f"/{v}/api-docs" } for v in app_versions]
}
API_URL = '/v2/api-docs'
SWAGGER_URL = os.getenv('SWAGGER_URL_PREFIX', '')
SWAGGERUI_BLUEPRINT = get_swaggerui_blueprint(
SWAGGER_URL,
API_URL,
config=config,
)
app.register_blueprint(SWAGGERUI_BLUEPRINT, url_prefix=SWAGGER_URL)
application = PathDispatcher(app,
app_versions)
return application
def settings_dict(module):
return {k: getattr(module, k) for k in dir(module) if k.isupper()}
def make_app(debug=False, overides={}, export_metrics=True, healthcheck=True):
from eve_jwt import JWTAuth
from flask_swagger_ui import get_swaggerui_blueprint
# if versions is None:
# versions = xepmts_server.VERSIONS
admin_auth = JWTAuth
if debug:
admin_auth = None
admin = make_admin_app(auth=admin_auth, swagger=True)
static_apps = {"admin": admin}
if debug:
configs = DEBUG_CONFIGS
else:
configs = PRODUCTION_CONFIGS
app_configs = {}
with admin.app_context():
for version, config_kwargs in configs.items():
kwargs = dict(config_kwargs)
kwargs.update(overides)
vmodule = getattr(xepmts_server, version)
settings = kwargs.get('settings', settings_dict(vmodule.settings))
endpoints = admin.data.driver.db[f'{version}_endpoints'].find()
endpoints = {endpoint.pop('name'): endpoint for endpoint in endpoints}
if endpoints:
print(f'endpoints for version {version} taken from database.')
settings['DOMAIN'] = endpoints
kwargs['settings'] = settings
kwargs['healthcheck'] = healthcheck
kwargs['export_metrics'] = export_metrics
app_configs[version] = kwargs
app = Flask(__name__)
app.config['SWAGGER_INFO'] = {
'title': 'XENON PMT API',
'version': '1.0',
'description': 'API for the XENON PMT database',
'termsOfService': 'https://opensource.org/ToS',
'contact': {
'name': '<NAME>',
'url': 'https://pmts.xenonnt.org',
"email": "<EMAIL>"
},
'license': {
'name': 'BSD',
'url': 'https://github.com/nicolaiarocci/eve-swagger/blob/master/LICENSE',
},
'schemes': ['http', 'https'],
}
swagger_config = {
'app_name': "PMT Database API",
"urls": [{"name": f"Xenon PMT Database {v.capitalize()}", "url": f"/{v}/api-docs" } for v in list(app_configs)+list(static_apps)]
}
API_URL = '/v2/api-docs'
SWAGGER_URL = os.getenv('SWAGGER_URL_PREFIX', '')
SWAGGERUI_BLUEPRINT = get_swaggerui_blueprint(
SWAGGER_URL,
API_URL,
config=swagger_config,
)
app.register_blueprint(SWAGGERUI_BLUEPRINT, url_prefix=SWAGGER_URL)
if export_metrics:
from prometheus_flask_exporter import PrometheusMetrics
PrometheusMetrics(app)
application = PathMakerDispatcher(app,
static_apps=static_apps,
app_configs=app_configs)
return application
def run_simple(address='0.0.0.0', port=5000, debug=True, reload=True, evalex=True):
app = make_app(debug=debug)
_run_simple(address, port, app,
use_reloader=debug, use_debugger=reload, use_evalex=evalex)
if __name__ == '__main__':
run_simple()
```
#### File: xepmts_server/v2/app.py
```python
import os
from xepmts_server.v2.domain import get_domain
from xepmts_server.v2 import settings
from xepmts_server.utils import clean_dict
# from eve_jwt import JWTAuth
SETTINGS_FILE = settings.__file__
def make_app(settings=SETTINGS_FILE, swagger=False,
export_metrics=False, healthcheck=True, auth=None, **kwargs):
from eve import Eve
app = Eve(settings=settings, auth=auth, **kwargs)
if swagger:
# from eve_swagger import swagger as swagger_blueprint
from eve_swagger import get_swagger_blueprint
swagger_blueprint = get_swagger_blueprint()
app.register_blueprint(swagger_blueprint, url_prefix=f'/{app.config["API_VERSION"]}')
app.config['SWAGGER_INFO'] = {
'title': 'XENON PMT API',
'version': '2.0',
'description': 'API for the XENON PMT database',
'termsOfService': 'https://opensource.org/ToS',
'contact': {
'name': '<NAME>',
'url': 'https://pmts.xenonnt.org',
"email": "<EMAIL>"
},
'license': {
'name': 'BSD',
'url': 'https://github.com/nicolaiarocci/eve-swagger/blob/master/LICENSE',
},
'schemes': ['http', 'https'],
}
if export_metrics:
from prometheus_flask_exporter import PrometheusMetrics
PrometheusMetrics(app, path=f'/{app.config["API_VERSION"]}/metrics')
if healthcheck:
from eve_healthcheck import EveHealthCheck
hc = EveHealthCheck(app, '/healthcheck')
@app.route(f'/{app.config["API_VERSION"]}/endpoints')
def endpoints():
return clean_dict(app.config['DOMAIN'])
return app
def make_local_app(**kwargs):
import eve
app = eve.Eve(settings=SETTINGS_FILE, **kwargs)
return app
def list_roles():
domain = get_domain()
roles = set()
for resource in domain.values():
roles.update(resource["allowed_read_roles"])
roles.update(resource["allowed_item_read_roles"])
roles.update(resource["allowed_write_roles"])
roles.update(resource["allowed_item_write_roles"])
roles = list(roles)
roles.sort(key=lambda x: x.split(":")[-1])
return roles
``` |
{
"source": "jmosbacher/xepmts",
"score": 2
} |
#### File: xepmts/xepmts/cli.py
```python
import sys
import os
import click
import xepmts
@click.group()
def main():
"""Console script for xepmts."""
return 0
if __name__ == "__main__":
sys.exit(main()) # pragma: no cover
```
#### File: xepmts/db/endpoints.py
```python
import httpx
import logging
log = logging.getLogger(__name__)
def clean_nones(d):
return {k:v for k,v in d.items() if v is not None}
def get_endpoints(url, timeout=25, client=None):
if client is None:
client = httpx.Client()
log.info(f"Attempting to read endpoints from {url}")
r = client.get(url, timeout=timeout)
if not r.is_error:
log.info('Endpoints read succesfully from server.')
return {k: clean_nones(v) for k,v in r.json().items()}
def default_endpoints():
import xepmts_endpoints
return xepmts_endpoints.get_endpoints()
```
#### File: xepmts/xepmts/__init__.py
```python
__author__ = """<NAME>"""
__email__ = '<EMAIL>'
__version__ = '0.5.7'
from getpass import getpass
import xeauth
from xepmts.db.client import default_client, get_admin_client
from xepmts.db.client import get_client as _get_client
from . import streams
from .settings import config
def login(version='v2',
username=None,
password=<PASSWORD>,
token=None,
scopes=["openid profile email offline_access read:all"],
auth_kwargs={},
**kwargs):
auth_kwargs = dict(auth_kwargs)
scope = " ".join(scopes)
audience = auth_kwargs.pop('audience', config.OAUTH_AUDIENCE)
if token is not None:
xetoken = xeauth.tokens.XeToken(access_token=token)
elif username is not None:
xetoken = xeauth.user_login(username=username, password=password, scope=scope, audience=audience, **auth_kwargs )
elif version=='v2':
xetoken = xeauth.login(scopes=scope, audience=audience, **auth_kwargs)
else:
token = getpass.getpass('API Token: ')
xetoken = xeauth.tokens.XeToken(access_token=token)
try:
if xetoken.expired:
xetoken.refresh_tokens()
except:
pass
return _get_client(version, xetoken=xetoken, **kwargs)
get_client = login
def settings(**kwargs):
from eve_panel import settings as panel_settings
if not kwargs:
return dir(panel_settings)
else:
for k,v in kwargs.items():
setattr(panel_settings, k, v)
def extension():
import eve_panel
eve_panel.extension()
notebook = extension
```
#### File: xepmts/streams/daq.py
```python
import panel as pn
import param
import numpy as np
import pandas as pd
import httpx
import logging
try:
import holoviews as hv
colormaps = hv.plotting.list_cmaps()
import hvplot.streamz
import streamz
from streamz.dataframe import DataFrame as sDataFrame
except:
colormaps = ["Plasma"]
from concurrent.futures import ThreadPoolExecutor
from panel.io.server import unlocked
from tornado.ioloop import IOLoop
executor = ThreadPoolExecutor(max_workers=20)
logger = logging.getLogger(__name__)
class DAQStreamz(param.Parameterized):
api_user = param.String()
api_key = param.String()
rate_columns = param.List(["array", "signal_channel", "time", "sector","detector",
"position_x", "position_y", "rate"], constant=True)
reader_info_columns = param.List(['time', 'reader', 'host', 'rate',
'status', 'mode', 'buffer_size'], constant=True)
reader_names = param.List(list(range(7)))
xaxis = param.Selector(["position_x", "sector"], default="position_x")
yaxis = param.Selector(["position_y", "array"], default="position_y")
colormap = param.Selector(colormaps, default="Plasma", )
groupby = param.Selector(["array", "detector"], default="array")
loading = param.Boolean(False, precedence=-1)
_sources = param.Dict(default=None, precedence=-1)
_rates = param.Parameter(default=None, precedence=-1)
_readers = param.Parameter(default=None, precedence=-1)
rates_base_info = param.DataFrame(default=None, precedence=-1)
readers_base_info = param.DataFrame(default=None, precedence=-1)
def reset_streams(self):
self._rates = None
self._readers = None
self._sources = None
def _fetch_single_reader(self, name):
try:
r = httpx.get(f"https://xenonnt.lngs.infn.it/api/getstatus/{name}",
params={'api_user': self.api_user, 'api_key': self.api_key })
r.raise_for_status()
resp = r.json()[0]
rates = resp["channels"]
result = {}
result["rates"] = {
"time": [pd.to_datetime(resp["time"])]*len(rates),
"signal_channel": [int(x) for x in rates.keys()],
"rate": list(rates.values()),
}
result["reader_info"] = {k: [v] for k,v in resp.items() if k in self.reader_info_columns}
result["reader_info"]["time"] = [pd.to_datetime(t) for t in result["reader_info"]["time"]]
except Exception as e:
print(e)
print(r.content)
result = {
"rates": self.rates_example[["time", "signal_channel", "rate"]].to_dict(orient="list"),
"reader_info": self.reader_info_example.to_dict(orient="list"),
}
return result
def emit(self, name, data):
logger.debug(f"emitting {name}. columns: {data.keys()} ")
self.sources[name].emit(data)
def data_ready_cb(self, name):
def cb(future):
data = future.result()
self.emit(name, data)
cb.__name__ = str(name) + "_data_ready"
return cb
def all_loaded(self, data):
self.loading = False
return data
@property
def rates_example(self):
return pd.DataFrame({col:[] for col in self.rate_columns}).astype({"time":'datetime64[ns]', "signal_channel": 'int64'})
@property
def reader_info_example(self):
return pd.DataFrame({col:[] for col in self.reader_info_columns})
@property
def sources(self):
if self._sources is None:
self._sources = {name: streamz.Stream() for name in self.reader_names}
return self._sources
@property
def rates(self):
if self._rates is None:
rate_streams = [source.pluck("rates").map(pd.DataFrame).filter(lambda df: len(df)>0) for source in self.sources.values()]
self._rates = streamz.zip(*rate_streams).map(pd.concat).map(self.all_loaded)
return self._rates
def convert_datetime(self, df):
if "time" in df.columns:
df["time"] = pd.to_datetime(df["time"])
return df
@property
def rates_df(self):
example = self.rates_example
stream = self.rates
if self.rates_base_info is not None:
base = self.rates_base_info.copy()
example = base.merge(example, how="outer")[self.rate_columns]
stream = stream.filter(lambda df: len(df)>0).map(lambda df: base.merge(df)).map(lambda df: df[self.rate_columns])
return sDataFrame(stream, example=example)
@property
def readers(self):
if self._readers is None:
reader_streams = [source.pluck("reader_info").map(pd.DataFrame) for source in self.sources.values()]
self._readers = streamz.zip(*reader_streams).map(pd.concat)
return self._readers
@property
def readers_df(self):
example = self.reader_info_example
stream = self.readers
if self.readers_base_info is not None:
base = self.readers_base_info.copy()
columns = example.columns
stream = stream.map(lambda df: df.merge(base)[columns])
return sDataFrame(stream, example=example)
def fetch(self, reader_name, asynchronous=True, timeout=None):
if reader_name in self.reader_names:
f = executor.submit(self._fetch_single_reader, reader_name)
else:
raise ValueError(f"No reader named {reader_name} options are: {self.reader_names}")
if asynchronous:
loop = IOLoop.current()
loop.add_future(f, self.data_ready_cb(reader_name))
return f
else:
data = f.result()
self.emit(reader_name, data)
def fetch_all(self, asynchronous=True, timeout=None):
futures = {}
self.loading = True
for reader, source in self.sources.items():
f = executor.submit(self._fetch_single_reader, reader)
futures[reader] = f
if asynchronous:
loop = IOLoop.current()
for name, f in futures.items():
loop.add_future(f, self.data_ready_cb(name))
return futures
for reader_name, f in futures.items():
data = f.result(timeout=timeout)
self.emit(reader_name, data)
def rate_plots(self, **kwargs):
plots = []
sdf = self.rates_df
nitems = len(self.rates_base_info)
psettings = dict(x=self.xaxis,
y=self.yaxis,
c="rate",
s=180,
cmap=self.colormap,
responsive=True,
backlog=nitems)
psettings.update(kwargs)
if self.rates_base_info is None:
return sdf.hvplot.scatter(**psettings)
groups = self.rates_base_info[self.groupby].unique()
if len(groups)>1:
aspect = 1.2
else:
aspect = 2
for group in groups:
nitems = len(self.rates_base_info[self.rates_base_info[self.groupby]==group])
plot = sdf[sdf[self.groupby]==group].hvplot.scatter(x=self.xaxis,
y=self.yaxis,
c="rate",
s=180,
aspect=aspect,
cmap=self.colormap,
responsive=True,
title=group,
backlog=nitems)
plots.append(plot)
return hv.NdLayout({group: plot for group,plot in zip(groups, plots)},
kdims=self.groupby).cols(2)
# return pn.layout.GridBox(*plots, ncols=2, sizing_mode="stretch_both")
# bsize = 500
# initial = None
# if self.rates_base_info is not None:
# bsize = len(self.rates_base_info)
# return hv.DynamicMap(self._rate_plots,
# streams=[hv.streams.Buffer(self.rates_df, length=bsize)])
def view(self):
return pn.panel(self.rate_plots)
def _repr_mimebundle_(self, include=None, exclude=None):
return self.view()._repr_mimebundle_(include=include, exclude=exclude)
def settings(self):
parameters = ["xaxis", "yaxis", "groupby"]
widgets = {}
return pn.Param(
self.param,
parameters=parameters,
widgets=widgets,
width=250,
)
class LiveDAQStreamz(DAQStreamz):
period = param.Number(2000) # milliseconds
count = param.Integer(default=None)
timeout = param.Number(default=None) #seconds
auto_start = param.Boolean(False)
running = param.Boolean(False, precedence=-1)
_cbs = param.List([], precedence=-1)
_view = param.Parameter(precedence=-1)
start_button = param.Action(lambda self: self.start(), label="Start")
stop_button = param.Action(lambda self: self.stop(), label="Stop")
futures = param.Dict({})
def callback(self):
if self.loading:
return
if not self.running:
return
self.futures = self.fetch_all(asynchronous=True)
def start(self):
self.stop()
cb = pn.state.add_periodic_callback(self.callback,
period=self.period,
count=self.count,
timeout=self.timeout)
self._cbs.append(cb)
self.running = True
def stop(self):
self.running = False
for cb in self._cbs:
if cb.running:
cb.stop()
self._cbs = []
@property
def sources(self):
if self._sources is None:
self._sources = super().sources
if self.auto_start:
self.start()
return self._sources
@param.depends("running")
def controls(self):
button = pn.widgets.Button(align="center", min_width=100,
sizing_mode="stretch_width")
if self.running:
button.name = "Stop"
button.on_click(lambda event: self.stop())
return button
else:
button.name = "Start"
button.on_click(lambda event: self.start())
return pn.Row(
button,
self.param.period,
self.param.timeout,
self.param.count,
align="center",
sizing_mode="stretch_width")
# @param.depends("running")
def stream_view(self):
return self.rate_plots()
# reader_info = pn.pane.DataFrame(self.readers_df, height=200, sizing_mode="stretch_width")
# tabs = pn.Tabs(("Rates", self.rate_plots()),
# # ("Reader info", reader_info),
# sizing_mode="stretch_both")
# if self.running:
# return self.rate_plots()
# else:
# return pn.Column("Not running.")
@param.depends("_view",)
def view(self):
if self._view is None:
# rates = pn.pane.DataFrame(self.rates_df, height=500, sizing_mode="stretch_width")
self._view = pn.Column(self.controls,
self.stream_view(),
height=600,
sizing_mode="stretch_width")
return self._view
def settings(self):
parameters = ["period", "count", "timeout"]
widgets = {}
params = pn.Param(
self.param,
parameters=parameters,
widgets=widgets,
expand_button=False,
width=250,
)
return pn.Column(params,
self.daq_stream.settings(),
width=250,)
def _repr_mimebundle_(self, include=None, exclude=None):
return self.view()._repr_mimebundle_(include=include, exclude=exclude)
class LiveDAQStreamzViewer(param.Parameterized):
CONFIGS = {
"tpc": dict(xaxis="position_x", yaxis="position_y", groupby="array", reader_names=[f"reader{i}_reader_0" for i in range(3)]),
"nveto": dict(xaxis="sector", yaxis="array", groupby="detector", reader_names=["reader6_reader_0", "reader6_reader_1"]),
"muveto": dict(xaxis="sector", yaxis="array", groupby="detector", reader_names=["reader5_reader_0"]),
}
client = param.Parameter(precedence=-1)
api_user = param.String()
api_key = param.String()
detector = param.Selector(list(CONFIGS))
daq_stream = param.Parameter(precedence=-1)
daq_streams = param.Dict({})
add_detector = param.Action(lambda self: self.add_stream(self.detector))
reload_detector = param.Action(lambda self: self.reload_stream(self.detector))
loading = param.Boolean(False, precedence=-1)
def add_stream(self, detector):
if detector not in self.daq_streams:
self.loading = True
try:
streams = self.daq_streams
config = self.CONFIGS[detector]
installs = getattr(self.client, detector).installs.to_dataframe()
installs["signal_channel"] = installs.pmt_index.astype("int64")
stream = LiveDAQStreamz(name=detector,
api_key=self.api_key,
api_user=self.api_user,
rates_base_info=installs,
**config
)
streams[detector] = stream
self.daq_streams = streams
finally:
self.loading = False
self.daq_stream = self.daq_streams[detector]
def reload_stream(self, detector):
stream = self.daq_streams.pop(detector, None)
if stream:
stream.stop()
self.add_stream(detector)
@param.depends("daq_stream", "loading")
def streams_view(self):
if self.loading:
return pn.indicators.LoadingSpinner(value=True)
if self.daq_stream is None:
return pn.Column("## No streams loaded yet")
# streams = pn.Tabs(*[(k, v.view) for k,v in self.daq_streams.items()])
return self.daq_stream.view()
def controls(self):
return pn.Row(self.param.detector,
self.param.add_detector,
self.param.reload_detector,
sizing_mode="stretch_width")
def view(self):
return pn.Column(
self.controls(),
self.streams_view,
sizing_mode="stretch_both"
)
def _repr_mimebundle_(self, include=None, exclude=None):
return self.view()._repr_mimebundle_(include=include, exclude=exclude)
``` |
{
"source": "jmosbacher/xestore",
"score": 2
} |
#### File: xestore/client/auth.py
```python
import param
import panel as pn
import secrets
import httpx
import webbrowser
from xestore.settings import config
import time
class Oauth2DeviceFlow(param.Parameterized):
auth_server_uri = param.String(
label="Authentication server",
default=config.AUTH_SERVER_URI,
regex=r"(?i)\b((?:https?://|www\d{0,3}[.]|[a-z0-9.\-]+[.][a-z]{2,4}/)(?:[^\s()<>]+|\(([^\s()<>]+|(\([^\s()<>]+\)))*\))+(?:\(([^\s()<>]+|(\([^\s()<>]+\)))*\)|[^\s`!()\[\]{};:'\".,<>?«»“”‘’]))")
code_request_path = param.String(default="code/", label="Code request path")
token_request_path = param.String(default="token/",label="Token request path")
verification_path = param.String(default="authorize/",label="Verification path")
extra_headers = param.Dict(label="Extra headers")
client_id = param.String(default="xestore_client", label="Client ID")
notify_email = param.String(default="", label="Notify Email")
device_code = param.String()
user_code = param.String()
token = param.String(default=config.API_TOKEN)
_cb = param.Parameter(default=None)
def get_client(self):
return httpx.Client(base_url=self.auth_server_uri, headers=self.extra_headers)
@property
def authorize_url(self):
return f"{self.auth_server_uri.strip('/')}/{self.verification_path.strip('/')}?user_code={self.user_code}"
@property
def authenticated(self):
return bool(self.token)
def initiate_flow(self):
data = {}
with self.get_client() as client:
try:
resp = client.post(self.code_request_path,
data={"client_id": self.client_id,
"notify_email": self.notify_email})
data = resp.json()
except:
pass
self.device_code = data.get("device_code", "")
self.user_code = data.get("user_code", "")
interval = data.get("interval", 3)
timeout = data.get("expires_in", 300)
if not self.user_code:
return
if not self.device_code:
return
if self._cb is not None:
self._cb.stop()
self._cb = pn.state.add_periodic_callback(self.callback,
period=interval*1000,
count=int(timeout/interval)+1,
)
def authorize(self):
return webbrowser.open(self.authorize_url)
def authorize_link(self):
html_pane = pn.pane.HTML(f"""
<a id="log-in-link" class="nav-link" href="{self.authorize_url}" target="_blank">
Authorize
</a>""",
style={"cursor": "pointer",
"border": "1px solid #ddd",
"border-radius": "4px",
"padding": "5px",})
return html_pane
def await_token(self):
with self.get_client() as client:
for _ in range(int(self.timeout/self.interval)+1):
data = {}
try:
resp = client.post(self.token_request_path,
data={"client_id": self.client_id,
"device_code": self.device_code,})
data = resp.json()
except:
pass
token = data.get("access_token", "")
if token:
self.token = token
break
time.sleep(self.interval)
return token
def check_token(self):
data = {}
with self.get_client() as client:
try:
resp = client.post(self.token_request_path,
data={"client_id": self.client_id,
"device_code": self.device_code,})
data = resp.json()
except:
pass
return data.get("access_token", "")
def callback(self):
token = self.check_token()
if token and self._cb is not None:
self.token = token
self._cb.stop()
self._cb = None
@param.depends("_cb", "token")
def credentials_view(self):
init_flow_button = pn.widgets.Button(name="Generate",
button_type="primary",
width=70)
init_flow_button.on_click(lambda event: self.initiate_flow())
params = pn.Param(self.param, parameters=["token", "auth_server_uri",
"client_id","notify_email"],
widgets={"token": {"type":pn.widgets.TextAreaInput,
"width":300}},
max_width=300,
sizing_mode="stretch_width")
buttons = pn.Row(init_flow_button)
if self._cb is not None:
buttons.append(self.authorize_link())
buttons.append(pn.indicators.LoadingSpinner(value=True, width=20, height=20))
return pn.Column(params, buttons, sizing_mode="stretch_width", width=300)
def perform_flow(self):
self.initiate_flow()
return pn.Column(self.view)
def get_headers(self):
if self.token:
return {"Authorization": f"Bearer {self.token}"}
else:
return {}
def set_token(self, token):
self.token = token
def login(self, webbrowser=True):
self.initiate_flow()
print(f"Authorization URL: {self.authorize_url}")
if webbrowser:
self.authorize()
def make_panel(self):
# config = ["auth_server_uri", "client_id"]
# advanced = ["code_request_path", "token_request_path",
# "verification_path", "extra_headers",]
# tabs = pn.Tabs(
# ("settings", pn.Param(self.param, parameters=config)),
# ("Credentials", self.credentials_view),
# )
return pn.panel(self.credentials_view)
def __getstate__(self):
state = super().__getstate__()
state.pop("_cb", None)
return state
``` |
{
"source": "jmosky12/huxley",
"score": 2
} |
#### File: api/tests/test_user.py
```python
import json
from django.core.urlresolvers import reverse
from django.test import TestCase
from django.test.client import Client
from huxley.accounts.models import User
from huxley.api.tests import (CreateAPITestCase, DestroyAPITestCase,
ListAPITestCase, PartialUpdateAPITestCase,
RetrieveAPITestCase)
from huxley.utils.test import TestSchools, TestUsers
class UserDetailGetTestCase(RetrieveAPITestCase):
url_name = 'api:user_detail'
def test_anonymous_user(self):
'''It should reject request from an anonymous user.'''
user = TestUsers.new_user()
response = self.get_response(user.id)
self.assertNotAuthenticated(response)
def test_other_user(self):
'''It should reject request from another user.'''
user1 = TestUsers.new_user(username='user1')
user2 = TestUsers.new_user(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(user1.id)
self.assertPermissionDenied(response)
def test_superuser(self):
'''It should return the correct fields for a superuser.'''
user1 = TestUsers.new_user(username='user1')
user2 = TestUsers.new_superuser(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(user1.id)
self.assertEqual(response.data, {
'id': user1.id,
'username': user1.username,
'first_name': user1.first_name,
'last_name': user1.last_name,
'user_type': user1.user_type,
'school': user1.school_id,
'committee': user1.committee_id})
def test_self(self):
'''It should return the correct fields for a single user.'''
school = TestSchools.new_school()
user = school.advisor
self.client.login(username=user.username, password='<PASSWORD>')
response = self.get_response(user.id)
self.assertEqual(response.data, {
'id': user.id,
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'user_type': user.user_type,
'school': {
'id': school.id,
'registered': school.registered.isoformat(),
'name': school.name,
'address': school.address,
'city': school.city,
'state': school.state,
'zip_code': school.zip_code,
'country': school.country,
'primary_name': school.primary_name,
'primary_gender': school.primary_gender,
'primary_email': school.primary_email,
'primary_phone': school.primary_phone,
'primary_type': school.primary_type,
'secondary_name': school.secondary_name,
'secondary_gender': school.secondary_gender,
'secondary_email': school.secondary_email,
'secondary_phone': school.secondary_phone,
'secondary_type': school.secondary_type,
'program_type': school.program_type,
'times_attended': school.times_attended,
'international': school.international,
'waitlist': school.waitlist,
'beginner_delegates': school.beginner_delegates,
'intermediate_delegates': school.intermediate_delegates,
'advanced_delegates': school.advanced_delegates,
'spanish_speaking_delegates': school.spanish_speaking_delegates,
'country_preferences': school.country_preference_ids,
'prefers_bilingual': school.prefers_bilingual,
'prefers_specialized_regional':
school.prefers_specialized_regional,
'prefers_crisis': school.prefers_crisis,
'prefers_alternative': school.prefers_alternative,
'prefers_press_corps': school.prefers_press_corps,
'registration_comments': school.registration_comments,
'fees_owed': float(school.fees_owed),
'fees_paid': float(school.fees_paid),
},
'committee': user.committee_id})
def test_chair(self):
'''It should have the correct fields for chairs.'''
user = TestUsers.new_user(user_type=User.TYPE_CHAIR,
committee_id=4)
self.client.login(username='testuser', password='<PASSWORD>')
response = self.get_response(user.id)
self.assertEqual(response.data, {
'id': user.id,
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'user_type': user.user_type,
'school': user.school_id,
'committee': user.committee_id})
class UserDetailDeleteTestCase(DestroyAPITestCase):
url_name = 'api:user_detail'
def setUp(self):
self.user = TestUsers.new_user(username='user1', password='<PASSWORD>')
def test_anonymous_user(self):
'''It should reject the request from an anonymous user.'''
response = self.get_response(self.user.id)
self.assertNotAuthenticated(response)
self.assertTrue(User.objects.filter(id=self.user.id).exists())
def test_other_user(self):
'''It should reject the request from another user.'''
TestUsers.new_user(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(self.user.id)
self.assertPermissionDenied(response)
self.assertTrue(User.objects.filter(id=self.user.id).exists())
def test_self(self):
'''It should allow a user to delete themself.'''
self.client.login(username='user1', password='<PASSWORD>')
response = self.get_response(self.user.id)
self.assertEqual(response.status_code, 204)
self.assertFalse(User.objects.filter(id=self.user.id).exists())
def test_superuser(self):
'''It should allow a superuser to delete a user.'''
TestUsers.new_superuser(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(self.user.id)
self.assertEqual(response.status_code, 204)
self.assertFalse(User.objects.filter(id=self.user.id).exists())
class UserDetailPatchTestCase(PartialUpdateAPITestCase):
url_name = 'api:user_detail'
params = {'first_name': 'first',
'last_name': 'last'}
def setUp(self):
self.user = TestUsers.new_user(username='user1', password='<PASSWORD>')
def test_anonymous_user(self):
'''An anonymous user should not be able to change information.'''
response = self.get_response(self.user.id, params=self.params)
self.assertNotAuthenticated(response)
user = User.objects.get(id=self.user.id)
self.assertEqual(user.first_name, 'Test')
self.assertEqual(user.last_name, 'User')
def test_other_user(self):
'''Another user should not be able to change information about any other user.'''
TestUsers.new_user(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(self.user.id, params=self.params)
self.assertPermissionDenied(response)
user = User.objects.get(id=self.user.id)
self.assertEqual(user.first_name, 'Test')
self.assertEqual(user.last_name, 'User')
def test_self(self):
'''A User should be allowed to change information about himself.'''
self.client.login(username='user1', password='<PASSWORD>')
response = self.get_response(self.user.id, params=self.params)
user = User.objects.get(id=self.user.id)
self.assertEqual(response.data['first_name'], user.first_name)
self.assertEqual(response.data['last_name'], user.last_name)
def test_superuser(self):
'''A superuser should be allowed to change information about a user.'''
TestUsers.new_superuser(username='user2', password='<PASSWORD>')
self.client.login(username='user2', password='<PASSWORD>')
response = self.get_response(self.user.id, params=self.params)
user = User.objects.get(id=self.user.id)
self.assertEqual(response.data['first_name'], user.first_name)
self.assertEqual(response.data['last_name'], user.last_name)
class UserListGetTestCase(ListAPITestCase):
url_name = 'api:user_list'
def test_anonymous_user(self):
'''It should reject the request from an anonymous user.'''
TestUsers.new_user(username='user1')
TestUsers.new_user(username='user2')
response = self.get_response()
self.assertNotAuthenticated(response)
def test_user(self):
'''It should reject the request from a regular user.'''
TestUsers.new_user(username='user1', password='<PASSWORD>')
TestUsers.new_user(username='user2')
self.client.login(username='user1', password='<PASSWORD>')
response = self.get_response()
self.assertPermissionDenied(response)
def test_superuser(self):
'''It should allow a superuser to list all users.'''
user1 = TestUsers.new_superuser(username='user1', password='<PASSWORD>')
user2 = TestUsers.new_user(username='user2')
self.client.login(username='user1', password='<PASSWORD>')
response = self.get_response()
self.assertEqual(response.data, [
{'id': user1.id,
'username': user1.username,
'first_name': user1.first_name,
'last_name': user1.last_name,
'user_type': user1.user_type,
'school': user1.school_id,
'committee': user1.committee_id},
{'id': user2.id,
'username': user2.username,
'first_name': user2.first_name,
'last_name': user2.last_name,
'user_type': user2.user_type,
'school': user2.school_id,
'committee': user2.committee_id}])
class UserListPostTestCase(CreateAPITestCase):
url_name = 'api:user_list'
params = {'username': 'Kunal',
'password': 'password',
'first_name': 'Kunal',
'last_name': 'Mehta'}
def test_valid(self):
params = self.get_params()
response = self.get_response(params)
user_query = User.objects.filter(id=response.data['id'])
self.assertTrue(user_query.exists())
user = User.objects.get(id=response.data['id'])
self.assertEqual(response.data, {
'id': user.id,
'username': user.username,
'first_name': user.first_name,
'last_name': user.last_name,
'user_type': User.TYPE_ADVISOR,
'school': user.school_id,
'email': user.email})
def test_empty_username(self):
response = self.get_response(params=self.get_params(username=''))
self.assertEqual(response.data, {
'username': ['This field is required.']})
def test_taken_username(self):
TestUsers.new_user(username='_Kunal', password='<PASSWORD>')
response = self.get_response(params=self.get_params(username='_Kunal'))
self.assertEqual(response.data, {
'username': ['This username is already taken.']})
def test_invalid_username(self):
response = self.get_response(params=self.get_params(username='>Kunal'))
self.assertEqual(response.data, {
'username': ['Usernames may contain alphanumerics, underscores, '
'and/or hyphens only.']})
def test_empty_password(self):
response = self.get_response(params=self.get_params(password=''))
self.assertEqual(response.data, {
'password': ['<PASSWORD>.']})
def test_invalid_password(self):
response = self.get_response(params=self.get_params(password='><PASSWORD>'))
self.assertEqual(response.data, {
'password': ['Password contains invalid characters.']})
def test_empty_first_name(self):
response = self.get_response(params=self.get_params(first_name=''))
self.assertEqual(response.data, {
'first_name': ['This field is required.']})
def test_empty_last_name(self):
response = self.get_response(params=self.get_params(last_name=''))
self.assertEqual(response.data, {
'last_name': ['This field is required.']})
def test_username_length(self):
response = self.get_response(params=self.get_params(username='user'))
self.assertEqual(response.data, {
'username': ['Username must be at least 5 characters.']})
def test_password_length(self):
response = self.get_response(params=self.get_params(password='<PASSWORD>'))
self.assertEqual(response.data, {
'password': ['Password must be at least 6 characters.']})
class CurrentUserTestCase(TestCase):
def setUp(self):
self.client = Client()
self.url = reverse('api:current_user')
def get_data(self, url):
return json.loads(self.client.get(url).content)
def test_login(self):
user = TestUsers.new_user(username='lol', password='<PASSWORD>')
user2 = TestUsers.new_user(username='bunny', password='<PASSWORD>')
credentials = {'username': 'lol', 'password': '<PASSWORD>'}
response = self.client.post(self.url,
data=json.dumps(credentials),
content_type='application/json')
self.assertEqual(response.status_code, 201)
self.assertEqual(self.client.session['_auth_user_id'], user.id)
credentials = {'username': 'bunny', 'password': '<PASSWORD>'}
response = self.client.post(self.url,
data=json.dumps(credentials),
content_type='application/json')
self.assertEqual(self.client.session['_auth_user_id'], user.id)
data = json.loads(response.content)
self.assertEqual(data['detail'],
'Another user is currently logged in.')
def test_logout(self):
user = TestUsers.new_user(username='lol', password='<PASSWORD>')
self.client.login(username='lol', password='<PASSWORD>')
self.assertEqual(self.client.session['_auth_user_id'], user.id)
response = self.client.delete(self.url)
self.assertEqual(response.status_code, 204)
self.assertTrue('_auth_user_id' not in self.client.session)
def test_get(self):
data = self.get_data(self.url)
self.assertEqual(len(data.keys()), 1)
self.assertEqual(data['detail'], 'Not found')
school = TestSchools.new_school()
user = school.advisor
self.client.login(username=user.username, password='<PASSWORD>')
data = self.get_data(self.url)
self.assertEqual(len(data.keys()), 7)
self.assertEqual(data['id'], user.id)
self.assertEqual(data['username'], user.username)
self.assertEqual(data['first_name'], user.first_name)
self.assertEqual(data['last_name'], user.last_name)
self.assertEqual(data['user_type'], User.TYPE_ADVISOR)
self.assertEqual(data['school'], {
'id': school.id,
'registered': school.registered.isoformat(),
'name': school.name,
'address': school.address,
'city': school.city,
'state': school.state,
'zip_code': school.zip_code,
'country': school.country,
'primary_name': school.primary_name,
'primary_gender': school.primary_gender,
'primary_email': school.primary_email,
'primary_phone': school.primary_phone,
'primary_type': school.primary_type,
'secondary_name': school.secondary_name,
'secondary_gender': school.secondary_gender,
'secondary_email': school.secondary_email,
'secondary_phone': school.secondary_phone,
'secondary_type': school.secondary_type,
'program_type': school.program_type,
'times_attended': school.times_attended,
'international': school.international,
'waitlist': school.waitlist,
'beginner_delegates': school.beginner_delegates,
'intermediate_delegates': school.intermediate_delegates,
'advanced_delegates': school.advanced_delegates,
'spanish_speaking_delegates': school.spanish_speaking_delegates,
'country_preferences': school.country_preference_ids,
'prefers_bilingual': school.prefers_bilingual,
'prefers_specialized_regional': school.prefers_specialized_regional,
'prefers_crisis': school.prefers_crisis,
'prefers_alternative': school.prefers_alternative,
'prefers_press_corps': school.prefers_press_corps,
'registration_comments': school.registration_comments,
'fees_owed': float(school.fees_owed),
'fees_paid': float(school.fees_paid),
})
```
#### File: huxley/scripts/copyright.py
```python
from datetime import date
from os.path import abspath, dirname
from re import sub
from subprocess import check_output
HUXLEY_ROOT = abspath(dirname(dirname(__file__)))
COPYRIGHT_YEAR = '2011-%d' % date.today().year
COPYRIGHT_TEXT = '\n'.join(('# Copyright (c) %s Berkeley Model United Nations. All rights reserved.' % COPYRIGHT_YEAR,
'# Use of this source code is governed by a BSD License (see LICENSE).\n'))
def update_copyright():
'''Add or update copyright headers for python files.'''
files = check_output(['find', HUXLEY_ROOT, '-type', 'f']).split('\n')
py_files = [f for f in files if f.endswith('.py')]
for filename in py_files:
with open(filename, 'r+') as f:
contents = f.read()
f.seek(0)
if not contents.startswith('# Copyright'):
f.write(COPYRIGHT_TEXT)
if len(contents) > 0:
f.write('\n')
f.write(contents)
else:
f.write(sub('2011\-\d{4}', COPYRIGHT_YEAR, contents))
if __name__ == '__main__':
update_copyright()
``` |
{
"source": "Jmos-Mbugua/Corruption-Feed",
"score": 2
} |
#### File: app/main/views.py
```python
from app.main import main
from flask import render_template, abort
from .forms import UpdateProfile
from .. import db, photos
from flask_login import login_required
from flask import render_template, abort,request,redirect,url_for
from flask_login import login_user,login_required,logout_user
from .forms import UpdateProfile
from app.models import User
from .. import db,photos
@main.route('/')
def index():
return render_template('index.html')
@main.route('/user/<uname>')
def profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
return render_template("profile/profile.html", user = user)
@main.route('/user/<uname>/update', methods = ['GET', 'POST'])
@login_required
def update_profile(uname):
user = User.query.filter_by(username = uname).first()
if user is None:
abort(404)
form = UpdateProfile()
if form.validate_on_submit():
user.bio = form.bio.data
db.session.add(user)
db.session.commit()
return redirect(url_for('.profile', uname=user.username))
return render_template('profile/update.html', form=form)
@main.route('/user/<uname>/update/pic',methods= ['POST'])
@login_required
def update_pic(uname):
user = User.query.filter_by(username = uname).first()
if 'photo' in request.files:
filename = photos.save(request.files['photo'])
path = f'photos/{filename}'
user.profile_pic_path = path
db.session.commit()
return redirect(url_for('main.profile',uname=uname))
``` |
{
"source": "Jmos-Mbugua/News-Highlight",
"score": 3
} |
#### File: News-Highlight/tests/test_sources.py
```python
import unittest
from app.models import Sources
# Sources = sources.Sources
class TestSources(unittest.TestCase):
'''
Test class to test the behaviour of the sources class
'''
def setUp(self):
'''
Setup method that will run before every test
'''
self.new_sources = Sources('cbs-news', 'CBS News', 'CBS News: dedicated to providing the best in journalism under standards it pioneered at the dawn of radio and television and continue in the digital age.', 'http://www.cbsnews.com', 'general', 'en', 'us')
def test_instance(self):
self.assertTrue(isinstance(self.new_sources, Sources))
if __name__ == '__main__':
unittest.main()
``` |
{
"source": "JMosqueraM/algoritmos_y_programacion",
"score": 4
} |
#### File: taller_estructuras_de_control/codigo_python_ejercicios/ejercicio_10.py
```python
def chelin_a_peseta(chelin):
taza = 956.871 / 100
peseta_convertida = round(chelin * taza, 3)
return peseta_convertida
def dracma_a_franco(dracma):
taza_dracma_peseta = 88.607 / 100 #Taza de conversion de Dracmas a Pesetas
dracma_peseta = dracma * taza_dracma_peseta #Se convierte Dracmas a Pesetas
taza = 1 / 20.110 #Taza de de conversion de Peseta a Dracma
dracma_convertida = round(dracma_peseta * taza, 3)
return dracma_convertida
def franco_a_dracma(franco):
taza_franco_peseta = 323.728 / 100
franco_peseta = franco * taza_franco_peseta
taza = 100 / 88.607
franco_convertida = round(franco_peseta * taza, 3)
return franco_convertida
def peseta_a_dolar(peseta):
taza = 1 / 122.499
peseta_dolar_convertida = round(peseta * taza, 3)
return peseta_dolar_convertida
def peseta_a_lira(peseta):
taza = 100 / 9.289
peseta_lira_convertida = round(peseta * taza, 3)
return peseta_lira_convertida
#Se le solicita al usuario que ingrese el valor de las monedas que desee convertir
chelines = float(input("Ingrese el numero de Chelines que desea convertir a pesetas: "))
dracmas = float(input("Ingrese el numero de Dracmas Griegos que desea convertir a Francos Franceses: "))
pesetas = float(input("Ingrese el numero de Pesetas que desea convertir a Dolares y Liras Italianas: "))
#Se le imprime el resultado de la conversion al usuario
print(f"{chelines} Chelines equivalen a {chelin_a_peseta(chelines)} pesetas")
print(f"{dracmas} Dracmas Griegos equivalen a {dracma_a_franco(dracmas)} Francos Franceses")
print(f"{pesetas} Pesetas equivalen a {peseta_a_dolar(pesetas)} Dolares o {peseta_a_lira(pesetas)} Liras Italianas")
#La siguiente linea funciona para comprobar que el la conversion de Francos Franceses
# print(f"{dracma_a_franco(dracmas)} Francos Franceses equivalen a {franco_a_dracma(dracma_a_franco(dracmas))} Francos Franceses")
```
#### File: algoritmos_y_programacion/taller_estructuras_de_control_selectivas/ejercicio_13.py
```python
def zodiaco(DD, MM):
if (((DD >= 22) and (MM == 11)) or ((DD <=21) and (MM == 12))):
return("Sagitario")
if (((DD >= 22) and (MM == 12)) or ((DD <=20) and (MM == 1))):
return("Capricornio")
if (((DD >= 21) and (MM == 1)) or ((DD <=19) and (MM == 2))):
return("Acuario")
if (((DD >= 20) and (MM == 2)) or ((DD <=19) and (MM == 3))):
return("Piscis")
if (((DD >= 21) and (MM == 3)) or ((DD <=20) and (MM == 4))):
return("Aries")
if (((DD >= 21) and (MM == 4)) or ((DD <=21) and (MM == 5))):
return("Tauro")
if (((DD >= 22) and (MM == 5)) or ((DD <=21) and (MM == 6))):
return("Geminis")
if (((DD >= 22) and (MM == 6)) or ((DD <=22) and (MM == 7))):
return("Cancer")
if (((DD >= 23) and (MM == 7)) or ((DD <=23) and (MM == 8))):
return("Leo")
if (((DD >= 24) and (MM == 8)) or ((DD <=22) and (MM == 9))):
return("Virgo")
if (((DD >= 23) and (MM == 9)) or ((DD <=22) and (MM == 10))):
return("Libra")
if (((DD >= 23) and (MM == 10)) or ((DD <=21) and (MM == 11))):
return("Escorpion")
fecha_str = input("Ingrese la fecha de nacimiento (DD/MM/AAAA): ")
fecha = fecha_str.split("/")
fecha_int = []
for elemento in fecha:
fecha_int.append(int(elemento))
dia = fecha_int[0]
mes = fecha_int[1]
ano = fecha_int[2]
signo = zodiaco(dia, mes)
print(f"Siendo que su fecha de nacimiento es {fecha_str}, su signo zodiacal corresponde a {signo} y tiene {abs(ano - 2021)} años")
``` |
{
"source": "jmossberg/ParseBankStatement",
"score": 3
} |
#### File: jmossberg/ParseBankStatement/parsebankstatement.py
```python
import argparse
import re
import time
import os.path
class ErrorInputLineEndsWithCsv(Exception):
def __init__(self, message):
self.message = message
class ErrorOutputFileAlreadyExists(Exception):
def __init__(self, message):
self.message = message
class FileReader:
def __init__(cls, file_name):
cls.f_input = open(file_name, 'r')
def __del__(cls):
cls.f_input.close()
def read_line(cls):
line = cls.f_input.readline()
if line == "":
return None
return line
def __iter__(cls):
return cls
def __next__(cls):
line = cls.read_line()
if line is None:
raise StopIteration
return line
class FileWriter:
def __init__(cls, file_name):
if os.path.isfile(file_name):
raise ErrorOutputFileAlreadyExists("Output file name already exists")
cls.f_output = open(file_name, 'w')
def __del__(cls):
cls.f_output.close()
def write_line(cls, line):
cls.f_output.write(line)
class OutputFileName:
ERROR_MSG_INPUT_FILE_ENDS_WITH_CSV = "Input file must not end with .csv"
def create_output_file_name(cls, input_file):
pcsv = re.compile(r"\.csv$")
if pcsv.search(input_file):
raise ErrorInputLineEndsWithCsv(cls.ERROR_MSG_INPUT_FILE_ENDS_WITH_CSV)
ptxt = re.compile(r"\.txt$")
input_file_elements = ptxt.split(input_file)
input_file_without_postfix = input_file_elements[0]
output_file_name = input_file_without_postfix + ".csv"
return output_file_name
class StatementConverter:
def __init__(cls, statement_line_converter, file_reader, file_writer):
cls.statement_line_converter = statement_line_converter
cls.file_reader = file_reader
cls.file_writer = file_writer
def add_csv_header(cls, file_writer):
out_line = "Date,Payee,Category,Memo,Outflow,Inflow\n"
file_writer.write_line(out_line)
def convert(cls):
cls.add_csv_header(cls.file_writer)
for line in cls.file_reader:
converted_line = cls.statement_line_converter.convert_line(line)
if len(converted_line) > 0:
cls.file_writer.write_line(converted_line)
class GeneralLineConverter:
REGEXP_YEAR_MONTH_DAY = r"\d\d\d\d-\d\d-\d\d"
REGEXP_DAY_MONTHSTRING_YEAR = r"\d\d [a-ö]{3,3} \d\d\d\d"
FORMAT_YEAR_MONTH_DAY = "%Y-%m-%d"
FORMAT_DAY_MONTH_YEAR = "%d/%m/%Y"
FORMAT_DAY_MONTH_YEAR_SPACES = "%d %m %Y"
YEAR_MONTH_DAY_LENGTH = 11
def __init__(self, bank):
self.bank = bank
self.ignore_line = ""
self.regexp_date = self.REGEXP_YEAR_MONTH_DAY
self.format_date = self.FORMAT_YEAR_MONTH_DAY
self.convert_date_with_month_string = False
if "santander" == self.bank:
self.transaction_position = 4
self.transaction_includes_currency = 'kr'
self.payee_position = 2
self.use_second_data = False
self.ignore_line = "Transaktioner ovan har du ännu inte fått på ditt kontoutdrag."
elif "skandia" == self.bank:
self.transaction_position = 2
self.transaction_includes_currency = ''
self.payee_position = 1
self.use_second_data = True
elif "ica" == self.bank:
self.transaction_position = 4
self.transaction_includes_currency = 'kr'
self.use_second_data = False
self.payee_position = 1
elif "ica2" == self.bank:
self.transaction_position = 4
self.transaction_includes_currency = 'kr'
self.use_second_data = False
self.payee_position = 1
self.regexp_date = self.REGEXP_DAY_MONTHSTRING_YEAR
self.convert_date_with_month_string = True
self.format_date = self.FORMAT_DAY_MONTH_YEAR_SPACES
else:
raise Exception("Invalid bank" + self.bank)
def parse_outflow(self, line):
outflow = self.parse_transaction(line)
if '-' == outflow[0]:
return outflow[1:]
else:
return ""
def parse_inflow(self, line):
inflow = self.parse_transaction(line)
if '-' == inflow[0]:
return ""
else:
return inflow
def parse_transaction(self, line):
statement_items = line.split('\t')
outflow = statement_items[self.transaction_position]
outflow = outflow.replace(',', '.')
outflow = outflow.replace(' ', '')
outflow = outflow.replace(self.transaction_includes_currency, '')
outflow = outflow.strip()
return outflow
def remove_date_from_payee(self, line):
regexp = re.compile(self.REGEXP_YEAR_MONTH_DAY)
matches = regexp.findall(line)
if len(matches) > 0:
return line[self.YEAR_MONTH_DAY_LENGTH:] # Remove date at the beginning of Payee
return line
def parse_payee(self, line):
statement_items = line.split('\t')
payee = statement_items[self.payee_position] # Get Payee from list, date is stored in index 0
payee = payee.replace(',', '.')
payee = payee.replace('\\\\', ' ')
payee = payee.replace('\\', '')
payee = payee.strip() # Remove trailing with space
payee = self.remove_date_from_payee(payee)
return payee
def parse_date(self, line):
date_year_month_day = self._parse_year_month_day(line)
date_day_month_year = self._convert_date_string(date_year_month_day)
return date_day_month_year
def _parse_year_month_day(self, line):
regexp = re.compile(self.regexp_date)
matches = regexp.findall(line)
date_year_month_day = ""
if (len(matches) == 1):
date_year_month_day = matches[0]
elif (len(matches) == 2):
if self.use_second_data:
date_year_month_day = matches[1]
else:
date_year_month_day = matches[0]
else:
raise Exception("Invalid number of dates found in line: " + line)
return date_year_month_day
def _convert_date_with_month_string(self, extracted_date_as_string):
month_string = self._extract_month_string(extracted_date_as_string)
month_number = self._convert_month_string_to_month_number(month_string)
result = extracted_date_as_string.replace(month_string, month_number)
return result
def _convert_month_string_to_month_number(self, month_string):
if month_string == "jan":
return "01"
if month_string == "feb":
return "02"
if month_string == "mar":
return "03"
if month_string == "apr":
return "04"
if month_string == "maj":
return "05"
if month_string == "jun":
return "06"
if month_string == "jul":
return "07"
if month_string == "aug":
return "08"
if month_string == "sep":
return "09"
if month_string == "okt":
return "10"
if month_string == "nov":
return "11"
if month_string == "dec":
return "12"
raise Exception("Cannot convert month string to month number: " + month_string)
def _extract_month_string(self, extracted_date_as_string):
regexp = re.compile("[a-ö]{3,3}")
matches = regexp.findall(extracted_date_as_string)
month_string = matches[0]
return month_string
def _convert_date_string(self, extracted_date_as_string):
if self.convert_date_with_month_string:
extracted_date_as_string = self._convert_date_with_month_string(extracted_date_as_string)
extracted_date = time.strptime(extracted_date_as_string, self.format_date)
extracted_date_as_string_day_month_year = time.strftime(self.FORMAT_DAY_MONTH_YEAR, extracted_date)
return extracted_date_as_string_day_month_year
def convert_line(self, line):
if ((len(self.ignore_line) > 0) and (self.ignore_line in line)):
return ""
# Date,Payee,Category,Memo,Outflow,Inflow
out_line = ""
out_line += self.parse_date(line) + ","
out_line += self.parse_payee(line) + ","
out_line += "," # Category
out_line += "," # Memo
out_line += self.parse_outflow(line) + ","
out_line += self.parse_inflow(line) + "\n"
return out_line
class IcaLineConverter:
REGEXP_YEAR_MONTH_DAY = r"\d\d\d\d-\d\d-\d\d"
REGEXP_DAY_MONTHSTRING_YEAR = r"\d\d [a-ö]{3,3} \d\d\d\d"
FORMAT_YEAR_MONTH_DAY = "%Y-%m-%d"
FORMAT_DAY_MONTH_YEAR = "%d/%m/%Y"
FORMAT_DAY_MONTH_YEAR_SPACES = "%d %m %Y"
YEAR_MONTH_DAY_LENGTH = 11
def __init__(self, bank):
self.bank = bank
self.ignore_line = ""
self.regexp_date = self.REGEXP_YEAR_MONTH_DAY
self.format_date = self.FORMAT_YEAR_MONTH_DAY
self.convert_date_with_month_string = False
if "ica2" == self.bank:
self.transaction_position = 4
self.transaction_includes_currency = 'kr'
self.use_second_data = False
self.payee_position = 1
else:
raise Exception("Invalid bank" + self.bank)
def parse_outflow(self, line):
outflow = self.parse_transaction(line)
if '-' == outflow[0]:
return outflow[1:]
else:
return ""
def parse_inflow(self, line):
inflow = self.parse_transaction(line)
if '-' == inflow[0]:
return ""
else:
return inflow
def parse_transaction(self, line):
statement_items = line.split(';')
outflow = statement_items[self.transaction_position]
outflow = outflow.replace(',', '.')
outflow = outflow.replace(' ', '')
outflow = outflow.replace(self.transaction_includes_currency, '')
outflow = outflow.strip()
return outflow
def remove_date_from_payee(self, line):
regexp = re.compile(self.REGEXP_YEAR_MONTH_DAY)
matches = regexp.findall(line)
if len(matches) > 0:
return line[self.YEAR_MONTH_DAY_LENGTH:] # Remove date at the beginning of Payee
return line
def parse_payee(self, line):
statement_items = line.split(';')
payee = statement_items[self.payee_position] # Get Payee from list, date is stored in index 0
payee = payee.replace(',', '.')
payee = payee.replace('\\\\', ' ')
payee = payee.replace('\\', '')
payee = payee.strip() # Remove trailing with space
payee = self.remove_date_from_payee(payee)
return payee
def parse_date(self, line):
date_year_month_day = self._parse_year_month_day(line)
date_day_month_year = self._convert_date_string(date_year_month_day)
return date_day_month_year
def _parse_year_month_day(self, line):
regexp = re.compile(self.regexp_date)
matches = regexp.findall(line)
date_year_month_day = ""
if (len(matches) == 1):
date_year_month_day = matches[0]
elif (len(matches) == 2):
if self.use_second_data:
date_year_month_day = matches[1]
else:
date_year_month_day = matches[0]
else:
raise Exception("Invalid number of dates found in line: " + line)
return date_year_month_day
def _convert_date_with_month_string(self, extracted_date_as_string):
month_string = self._extract_month_string(extracted_date_as_string)
month_number = self._convert_month_string_to_month_number(month_string)
result = extracted_date_as_string.replace(month_string, month_number)
return result
def _convert_month_string_to_month_number(self, month_string):
if month_string == "jan":
return "01"
if month_string == "feb":
return "02"
if month_string == "mar":
return "03"
if month_string == "apr":
return "04"
if month_string == "maj":
return "05"
if month_string == "jun":
return "06"
if month_string == "jul":
return "07"
if month_string == "aug":
return "08"
if month_string == "sep":
return "09"
if month_string == "okt":
return "10"
if month_string == "nov":
return "11"
if month_string == "dec":
return "12"
raise Exception("Cannot convert month string to month number: " + month_string)
def _extract_month_string(self, extracted_date_as_string):
regexp = re.compile("[a-ö]{3,3}")
matches = regexp.findall(extracted_date_as_string)
month_string = matches[0]
return month_string
def _convert_date_string(self, extracted_date_as_string):
if self.convert_date_with_month_string:
extracted_date_as_string = self._convert_date_with_month_string(extracted_date_as_string)
extracted_date = time.strptime(extracted_date_as_string, self.format_date)
extracted_date_as_string_day_month_year = time.strftime(self.FORMAT_DAY_MONTH_YEAR, extracted_date)
return extracted_date_as_string_day_month_year
def convert_line(self, line):
if ((len(self.ignore_line) > 0) and (self.ignore_line in line)):
return ""
# Date,Payee,Category,Memo,Outflow,Inflow
out_line = ""
out_line += self.parse_date(line) + ","
out_line += self.parse_payee(line) + ","
out_line += "," # Category
out_line += "," # Memo
out_line += self.parse_outflow(line) + ","
out_line += self.parse_inflow(line) + "\n"
return out_line
def parse_command_line_arguments():
# Setup the argument parser
parser = argparse.ArgumentParser()
parser.add_argument("bank", help="valid banks: santander, skandia, ica")
parser.add_argument("input_file", help="text file with bank statement from the bank")
parser.add_argument("--output_file",
help="csv file to be consumed by YNAB (default: same name as input file but with .csv postfix)",
default=None)
args = parser.parse_args()
input_file = args.input_file
output_file = args.output_file
bank = args.bank
return input_file, output_file, bank
def main():
input_file, output_file, bank = parse_command_line_arguments()
output_file_name = OutputFileName()
if None == output_file:
output_file = output_file_name.create_output_file_name(input_file)
print("Input file.: {}".format(input_file))
print("Output file: {}".format(output_file))
print("Bank.......: {}".format(bank))
file_reader = FileReader(input_file)
file_writer = FileWriter(output_file)
statement_line_converter = GeneralLineConverter(bank)
statement_converter = StatementConverter(statement_line_converter, file_reader, file_writer)
statement_converter.convert()
if __name__ == '__main__':
main()
``` |
{
"source": "jmostella/azure-cli-extensions",
"score": 2
} |
#### File: tests/latest/test_attestation_scenario.py
```python
import os
from .. import try_manual, raise_if
from azure.cli.testsdk import JMESPathCheck
from azure.cli.testsdk import JMESPathCheckExists
from azure.cli.testsdk import NoneCheck
from azure.cli.testsdk import ResourceGroupPreparer
from azure.cli.testsdk import ScenarioTest
TEST_DIR = os.path.abspath(os.path.join(os.path.abspath(__file__), '..'))
@try_manual
def setup(test, rg):
pass
# EXAMPLE: AttestationProviders_Create
@try_manual
def step_attestationproviders_create(test, rg):
test.cmd('az attestation create '
'--name "{myattestation}" '
'--resource-group "{rg}" '
'--location "eastus2" '
'--tags aKey=aValue anotherKey=anotherValue '
'--certs-input-path "src/attestation/azext_attestation/tests/latest/policySigningCerts.pem"',
checks=[
JMESPathCheck('name', test.kwargs.get('myattestation', '')),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('location', 'eastus2'),
JMESPathCheck(
'tags', '{\'aKey\': \'aValue\', \'anotherKey\': \'anotherValue\'}')])
# EXAMPLE: AttestationProviders_Get
@try_manual
def step_attestationproviders_get(test, rg):
test.cmd('az attestation show '
'--name "{myattestation}" '
'--resource-group "{rg}"',
checks=[
JMESPathCheck('name', test.kwargs.get('myattestation', '')),
JMESPathCheck('resourceGroup', rg),
JMESPathCheck('location', 'eastus2')
])
# EXAMPLE: AttestationProviders_List
@try_manual
def step_attestationproviders_list(test, rg):
test.cmd('az attestation list '
'--resource-group=',
checks=[
JMESPathCheckExists('value[?name==\'{}\']'.format(
test.kwargs.get('myattestation', '')))
])
# EXAMPLE: AttestationProviders_ListByResourceGroup
@try_manual
def step_attestationproviders_listbyresourcegroup(test, rg):
test.cmd('az attestation list '
'--resource-group "{rg}"',
checks=[
JMESPathCheck('value[0].name',
test.kwargs.get('myattestation', ''))
])
# EXAMPLE: AttestationProviders_Delete
@try_manual
def step_attestationproviders_delete(test, rg):
test.cmd('az attestation delete '
'--name "{myattestation}" '
'--resource-group "{rg}" '
'--yes',
checks=[])
test.cmd('az attestation list '
'--resource-group "{rg}"',
checks=[test.check('length(value)', 0)])
@try_manual
def cleanup(test, rg):
pass
@try_manual
def call_scenario(test, rg):
setup(test, rg)
step_attestationproviders_create(test, rg)
step_attestationproviders_get(test, rg)
step_attestationproviders_list(test, rg)
step_attestationproviders_listbyresourcegroup(test, rg)
step_attestationproviders_delete(test, rg)
cleanup(test, rg)
@try_manual
class AttestationManagementClientScenarioTest(ScenarioTest):
@ResourceGroupPreparer(name_prefix='clitestattestation_MyResourceGroup'[:7], key='rg', parameter_name='rg')
def test_attestation(self, rg):
self.kwargs.update({
'myattestation': self.create_random_name(prefix='clitestattestation'[:9], length=24)
})
call_scenario(self, rg)
raise_if()
``` |
{
"source": "jmoswalt/django-things",
"score": 3
} |
#### File: things/bin/create_things_project.py
```python
from __future__ import with_statement
from distutils.dir_util import copy_tree
from optparse import OptionParser
import os
from shutil import move
from uuid import uuid4
from django.utils.importlib import import_module
def create_project():
"""
Copies the contents of the project_template directory to the
current directory.
The logic for this type of project build out came from Mezzanine
https://github.com/stephenmcd/mezzanine/blob/master/mezzanine/bin/mezzanine_project.py
"""
parser = OptionParser(usage="usage: %prog")
project_path = os.path.join(os.getcwd())
# Create the list of packages to build from - at this stage it
# should only be one or two names, things plus an alternate
# package.
packages = ["things"]
for package_name in packages:
try:
__import__(package_name)
except ImportError:
parser.error("Could not import package '%s'" % package_name)
# Build the project up copying over the project_template from
# each of the packages.
for package_name in packages:
package_path = os.path.dirname(os.path.abspath(import_module(package_name).__file__))
copy_tree(os.path.join(package_path, "project_template"), project_path)
move(os.path.join(project_path, ".env_example"),
os.path.join(project_path, ".env"))
# Update the local environment file with custom KEYs
env_path = os.path.join(os.getcwd(), ".env")
# Generate a unique SECREY_KEY for the project's setttings module.
with open(env_path, "r") as f:
data = f.read()
with open(env_path, "w") as f:
secret_key = "%s%s" % (uuid4(), uuid4())
f.write(data.replace("your_unique_secret_key", secret_key))
# Clean up pyc files.
for (root, dirs, files) in os.walk(project_path, False):
for f in files:
if f.endswith(".pyc"):
os.remove(os.path.join(root, f))
if __name__ == "__main__":
create_project()
```
#### File: management/commands/clear_static_builds.py
```python
import os
import shutil
from django.core.management.base import BaseCommand
from django.conf import settings
from things.models import StaticBuild
class Command(BaseCommand):
"""
Clears the static build objects from the DB.
"""
def handle(self, **options):
sbs = StaticBuild.objects.all()
sbs.delete()
static_dir = settings.MEDUSA_DEPLOY_DIR
if static_dir:
shutil.rmtree(static_dir)
if not os.path.exists(static_dir):
os.makedirs(static_dir)
print "Static builds cleared"
```
#### File: management/commands/rebuild_static_site.py
```python
import os
from django.core.management.base import BaseCommand
from django.core.management import call_command
from django.conf import settings
from things.models import StaticBuild
class Command(BaseCommand):
"""
Runs the staticsitegen command and the collectstatic command.
"""
def handle(self, **options):
call_command('collectstatic', interactive=False)
call_command('staticsitegen')
if settings.MEDUSA_DEPLOY_DIR:
os.system("sed -i 's|http://testserver|%s|g' %s" % (settings.SERVER_NAME, os.path.join(settings.MEDUSA_DEPLOY_DIR, 'feed', 'index.rss')))
mark_staticbuild = StaticBuild()
mark_staticbuild.save()
print "DONE !!!"
```
#### File: django-things/things/urls.py
```python
from django.conf.urls import patterns, url, include
from django.contrib.sitemaps import views as sitemaps_views
from django.contrib import admin
from django.views.decorators.cache import cache_page
from django.views.generic import TemplateView, base
from .pages.models import Page
from .views import ThingListView, ThingDetailView, ThingImportView, static_build, thing_export
from .feeds import AllThingsFeed, ThingSitemap
admin.autodiscover()
urlpatterns = patterns(
'',
url(r'^$', TemplateView.as_view(template_name='home.html'), name='home'),
url(r'^things/import/$', ThingImportView.as_view(), name='things_import'),
url(r'^things/', include(admin.site.urls)),
url(r'^accounts/login/$', base.RedirectView.as_view(), {'url': '/'}),
url(r'^redactor/', include('redactor.urls')),
url(r'^deploy/$',
static_build,
name='deploy'),
url(r'^feed/$', AllThingsFeed(), name="feed_all"),
url(r'^sitemap\.xml$', cache_page(3600)(sitemaps_views.sitemap), {'sitemaps': {'things': ThingSitemap}}),
url(r'^export/(?P<ct_id>\d+)/$', thing_export, name='things_export'),
)
def auto_app_url_patterns():
from .models import ThingType
items = []
things = ThingType.objects.all()
for t in things:
thing = t.get_class()
label = thing._meta.verbose_name.lower()
label_p = thing._meta.verbose_name_plural.lower()
listname = '%s_list' % label
detailname = '%s_detail' % label
items.append(url(r'^%s/([\d]+)?/?$' % label_p, ThingListView.as_view(model=thing), name=listname))
items.append(url(r'^%s/(?P<slug>[\w\-]+)/$' % label_p, ThingDetailView.as_view(model=thing), name=detailname))
return items
urlpatterns += auto_app_url_patterns()
urlpatterns += patterns('',
url(r'^(?P<slug>[\w\-\/]+)/$',
ThingDetailView.as_view(model=Page),
name='page_detail'),
)
```
#### File: django-things/things/utils.py
```python
import os
from django.http import Http404
from django.conf import settings
from django.contrib.auth.models import AnonymousUser
from django.shortcuts import get_object_or_404
from django.contrib import admin
def get_thing_object_or_404(cls, slug, **kwargs):
"""
Checks if the object is viewable.
Slug filter MUST be chained so it filters on the
actual Thing object.
"""
objs = cls.objects.filter(**kwargs).filter(slug=slug)
if objs:
return objs[0]
else:
raise Http404
def handle_uploaded_file(obj, f):
internal_path = os.path.join(unicode("uploads"), unicode(obj.obj_type_plural().replace(' ', '_')), unicode(obj.id))
folder_path = os.path.join(settings.MEDIA_ROOT, internal_path)
if not os.path.exists(folder_path):
os.makedirs(folder_path)
file_path = os.path.join(internal_path, f.name)
full_file_path = os.path.join(settings.MEDIA_ROOT, file_path)
with open(full_file_path, 'wb+') as destination:
for chunk in f.chunks():
destination.write(chunk)
return file_path
def get_thing_objects_qs(model, user=AnonymousUser()):
public_filter_out = model.public_filter_out or {}
super_user_order = model.super_user_order or ['-created_at']
public_order = model.public_order or ['-created_at']
if type(public_order) == type(str()):
public_order = [public_order,]
if user.is_superuser:
queryset = model.objects.order_by(*super_user_order)
else:
queryset = model.objects.filter(**public_filter_out).order_by(*public_order)
return queryset
def get_thing_object(model, slug, user=AnonymousUser()):
public_filter_out = model.public_filter_out or {}
if user.is_superuser:
obj = get_object_or_404(model, slug=slug)
else:
filters = public_filter_out
obj = get_thing_object_or_404(
cls=model,
slug=slug,
**filters)
return obj
def load_models(msg=None):
from .attrs import CONTENT, AUTHOR, PUBLISHED_AT, FEATURED
from .types import TYPE_TEXT
from .models import ThingType, register_thing
from .admin import ThingAdmin
new_classes = []
# Add try in case ThingType table has not been created
try:
mods = ThingType.objects.all()
len(mods)
except:
return None
if not mods:
example_type = ThingType()
example_type.name = "Note"
example_type.slug = 'notes'
example_type.json = {
'fields': (
CONTENT,
AUTHOR,
PUBLISHED_AT,
FEATURED,
{
"name": "Category",
"key": "category",
"description": "Add a Category to the {{ model }}.",
"datatype": TYPE_TEXT,
"required": False
}
)
}
example_type.save()
mods = ThingType.objects.all()
for mod in mods:
new_class = mod.get_class()
register_thing(new_class, attrs=mod.json['fields'])
try:
admin.site.register(new_class, ThingAdmin)
except admin.sites.AlreadyRegistered:
admin.site.unregister(new_class)
admin.site.register(new_class, ThingAdmin)
new_classes.append(new_class)
return new_classes
``` |
{
"source": "jmoszx/servnav",
"score": 3
} |
#### File: servnav/Servidor/processaRequisicao.py
```python
import os
import socket
import sys
import magic
from threading import Thread
class reqProcessa(object):
def __init__(self):
super(reqProcessa, self).__init__()
# self.arg = arg
def makeRequest(self,conexao, path, listarHtml, htmlErro):
requisicao = conexao.recv(2048).decode()
requestHandler = False #declarado inicialmente como false o inicio da requisição
buildRequisicao = {} #campos de conferencia da requisiçao
bufferSaida = 1024 # necessario para envio do conteúdo pelo socket
hmtlCodigo = ''
ref_htmlCodigo = ''
content_type = ''
#inicia as variaveis utilizadas no processamento do cabeçalho
#inicia a requisição coletando seus componentes e os parametros dos componentes.
for line in requisicao.split('\n'):
if not requestHandler:
requestHandler = line
continue
itensCabecalho = line.strip('\r').split(':')[0]
paramItens = ':'.join(line.strip('\r').split(':')[1:]).strip()
buildRequisicao[itensCabecalho] = paramItens
#print(itensCabecalho)
try:
httpGET, source, httpVER = requestHandler.split(' ')
except ValueError:
httpGET = 'GET'
httpVER = 'HTTP/1.1'
source = '/'
sourcePath = (path + source).replace('//', '/') #recurso ou diretorio
if os.path.isfile(sourcePath):
hmtlCodigo = '200'
ref_htmlCodigo = 'ok'
retornoConteudo = '<h1> 200 ok</h1>'
content_type = 'text/html'
#define as variaveis inicializada caso sucesso
mime = magic.Magic(mime=True) #faz a magica do mime type
content_type = mime.from_file(sourcePath)
retornoConteudo = open(sourcePath, 'rb').read() # abre arquivo e le
#Verifica se eh um diretorio, se for um diretorio .. faz a itemLista ..
elif os.path.isdir(sourcePath):
itemLista = ''
#para cada elemento deste diretorio, lista o item
for i in os.listdir(sourcePath):
itemLista += '<div class="listall">'
if not os.path.isdir(path+i): #faz chamada do icone de arquivo caso seja arquivo.
itemLista += '<img src="https://image.freepik.com/free-icon/ico-file-format-variant_318-45749.jpg" width="15px" height="15px"><a href="' + source.rstrip('/') + '/' + i + '">' + i + '</a></img>'
else: #faz chama do icone de pasta caso seja diretorio
itemLista += '<img src="http://icons.iconseeker.com/ico/aruzo-dark-blue/folder-blue-12.ico" width="15px" height="15px"><a href="' + source.rstrip('/') + '/' + i + '">' + i + '</a></img>'
itemLista += '</div>'
retornoConteudo = listarHtml.format('SERVER ' + sourcePath,itemLista,'<a href="../"><img src="http://icons.iconseeker.com/ico/aruzo-dark-blue/folder-blue-12.ico" width="15px" height="15px">..</img></a>' if sourcePath.rstrip('/') != path.rstrip('/') else '')
else: #caso não seja o diretorio ou arquivo não existe e atribui 404 e retorna HTMLx
hmtlCodigo = '404'
ref_htmlCodigo = 'Not Found'
retornoConteudo = htmlErro
cabecalhoRetorno = 'HTTP/1.1 {0} {1}\nContent-Type: {2}\n\n'.format(hmtlCodigo,ref_htmlCodigo,content_type)
conexao.send(cabecalhoRetorno.encode()) # Faz o retorno de envio do cabeçalho
if isinstance(retornoConteudo, str):# Codifica o conteúdo para retorno seja feito, possibilita o reconhecimento de diretório.
retornoConteudo = retornoConteudo.encode()
for i in range(0, len(retornoConteudo), bufferSaida):
try:
conexao.send(retornoConteudo[i:i + bufferSaida])
except BrokenPipeError:
pass
#faz o envio atraves do socket
conexao.close() #fecha conexão
``` |
{
"source": "jmotis/twitter-networks",
"score": 3
} |
#### File: jmotis/twitter-networks/networkCreationHashtag.py
```python
import csv
# define function
def extract_handles(file):
# open and read the files, create Header Row for export .csv
text = open(file)
entry = csv.reader(text)
export_file_name = file[:-4] + "_edge_list.csv"
handles = open(export_file_name, 'a+')
handles.write('source,target,hashtag\n')
# import entities_str column
for line in entry:
# extract the initial tweeter handle from column B
from_user = line[1]
# extract the entities_str (full tweet) text from column Q
entities_str = line[16]
# split entities_str into a list whose first item is misc. characters and subsequent items start with a twitter handle
entities_str = entities_str.split('"screen_name":"')
# remove the first item of misc. characters
entities_str.pop(0)
# isolate just the twitter handles from subsequent items
for item in entities_str:
item = item.split('"')
item = item[0]
# write initial tweeter handle and entities_str twitter handles to file in network edge format
handles.write(from_user + ',' + item + ',' + hashtag + '\n')
# close files
text.close()
handles.close()
# end function definition
#begin program
file_name = input('What is the exact name of your csv file (include the .csv) ')
hashtag = file_name[:-4]
extract_handles(file_name)
# end program
``` |
{
"source": "jmoudrik/deep-go-wrap",
"score": 3
} |
#### File: deep-go-wrap/deepgo/rank.py
```python
import re
from collections import namedtuple
BrWr = namedtuple('BrWr', 'br wr')
class RankInitExc(Exception):
pass
def argmin(pairs):
return min(pairs, key=lambda x:x[1])[0]
class Rank:
KEYS={'k': lambda x: x, # 1kyu -> 1, 30kyu -> 30
'd': lambda x: -x + 1, # 1dan -> 0, 10dan -> -9
'p': lambda x: -x - 9} # 1pro -> -10, 10pro -> -19
DOMAIN_MAX = { 'k' : 30,
'd' : 10,
'p' : 10 }
@staticmethod
def from_key(number):
# XXX ugly
ranks = list(Rank.iter_all())
dists = [ abs(number - r.key()) for r in ranks ]
return argmin( zip(ranks, dists) )
@staticmethod
def from_string(string, strict=False):
rexp = '^([1-9][0-9]?) ?([kdp]).*'
if strict:
rexp = '^([1-9][0-9]?) ?([kdp])$'
mo = re.match(rexp, string.lower())
if not mo:
return None
try:
return Rank(int(mo.group(1)), mo.group(2))
except (ValueError, RankInitExc):
return None
@staticmethod
def iter_all():
for key, domain in Rank.DOMAIN_MAX.iteritems():
for x in xrange(domain):
yield Rank( x + 1, key )
def __init__(self, number, kdp):
self.number, self.kdp = number, kdp
if not self.kdp in self.KEYS:
raise RankInitExc("kdp must be either 'k' for kyu players,"
" 'd' for dan players or 'p' for proffesionals")
def check_domain(bottom, val, up):
assert bottom <= up
if not( bottom <= val <= up):
raise RankInitExc("Must be %d <= %d <= %d.")
check_domain(1, self.number, self.DOMAIN_MAX[self.kdp])
def as_tuple(self):
return self.number, self.kdp
def key(self):
return self.KEYS[self.kdp](self.number)
def __str__(self):
return "%d%s"%(self.number, self.kdp)
def __repr__(self):
return "Rank(%s, key=%d)"%(self, self.key())
def __hash__(self):
return self.key().__hash__()
def __cmp__(self, other):
if not isinstance(other, Rank):
return -1
return ( - self.key()).__cmp__( - other.key())
if __name__ == "__main__":
assert Rank(6, 'd') > Rank(2, 'd') > Rank(1, 'k') > Rank(10, 'k')
def print_rank():
print "["
for rank in Rank.iter_all():
value = rank.key()
text = str(rank)
print '{"value" : "%s", "text" : "%s"},' % (value, text)
print "]"
print_rank()
```
#### File: jmoudrik/deep-go-wrap/hdf2deepcl_v2.py
```python
import logging
import argparse
import numpy as np
import h5py
def parse_args():
parser = argparse.ArgumentParser(
description='Converts the HDF5 dataset to the binary'
' format v2 compatible with DeepCL.'
' The v2 format specification is available at:'
' https://github.com/hughperkins/kgsgo-dataset-preprocessor'
' The HDF5 dataset must be created using'
' these two options:'
' "-p clark_storkey_2014_packed -l simple_label"'
' or this will fail.')
parser.add_argument('filename_in', metavar='FILENAME_IN',
help='HDF5 filename to read the dataset to')
parser.add_argument('filename_out', metavar='FILENAME_OUT',
help='deepcl v2 filename to store result to')
parser.add_argument('--x-name', dest='xname',
help='HDF5 dataset name to read the xs from',
default='xs')
parser.add_argument('--y-name', dest='yname',
help='HDF5 dataset name to read the ys from',
default='ys')
return parser.parse_args()
def main():
## ARGS
args = parse_args()
## INIT LOGGING
logging.basicConfig(format='%(asctime)s %(levelname)s: %(message)s',
level=logging.DEBUG) # if not args.quiet else logging.WARN)
logging.info("args: %s"%args)
## INIT dataset
with h5py.File(args.filename_in) as hdf:
dset_x = hdf[args.xname]
dset_y = hdf[args.yname]
if dset_x.attrs['name'] != 'clark_storkey_2014_packed':
logging.error("The input dataset must have planes as specified by opt: -p clark_storkey_2014_packed")
if dset_y.attrs['name'] != 'simple_label':
logging.error("The input dataset must have label as specified by opt: -l simple_label")
assert dset_x.shape[0] == dset_y.shape[0]
num_examples = dset_x.shape[0]
with open(args.filename_out, 'w') as fout:
logging.info("Starting the conversion to v2")
header = '-'.join(["mlv2",
"n=%d" % num_examples,
"numplanes=7",
"imagewidth=%d" % dset_x.attrs['boardsize'],
"imageheight=%d"% dset_x.attrs['boardsize'],
"datatype=int",
"bpp=1\0\n"])
fout.write(header)
# the header is padded
fout.write( chr(0) * (1024 - len(header)))
for i in xrange(num_examples):
if i and i % 10000 == 0:
logging.info("Processed %d / %d = %.1f%%"%(i,
num_examples,
100.0*i/num_examples))
data, label = dset_x[i], dset_y[i]
# each example is prefixed by 'GO' string
fout.write('GO')
# then label
label_high, label_low = label // 256, label % 256
fout.write(chr(label_low))
fout.write(chr(label_high))
fout.write(chr(0) * 2)
# then the planes
# clark_storkey_2014_packed, has just the correct representation
data.tofile(fout)
# finaly, mark the end
fout.write('END')
logging.info("Finished processing %d examples."%(num_examples))
if __name__ == "__main__":
main()
``` |
{
"source": "jmoudrik/jmpy",
"score": 4
} |
#### File: jmpy/jmpy/utils.py
```python
import collections as _collections
import contextlib as _contextlib
import itertools as _itertools
import math as _math
import re as _re
import sys as _sys
def identity(x):
"""
Return itself
>>> identity(1)
1
"""
return x
def filter_null(iterable):
"""
Filter out elements that do not evaluate to True
>>> list(filter_null((0, None, 1, '', 'cherry')))
[1, 'cherry']
"""
return filter(identity, iterable)
def filter_both(predicate, iterable):
"""
Splits the iterable into two groups, based on the result of
calling `predicate` on each element.
WARN: Consumes the whole iterable in the process. This is the
price for calling the `predicate` function only once for each
element. (See itertools recipes for similar functionality without
this requirement.)
>>> filter_both(lambda x: x%2 == 0, range(4))
([0, 2], [1, 3])
"""
yes, no = [], []
for i in iterable:
if predicate(i):
yes.append(i)
else:
no.append(i)
return yes, no
def flatten(iterables):
"""
>>> list(flatten(((1, 2, 3), (4, 5, 6))))
[1, 2, 3, 4, 5, 6]
"""
for iterable in iterables:
for element in iterable:
yield element
def argmax(pairs):
"""
Given an iterable of pairs (key, value), return the key corresponding to the greatest value.
Raises `ValueError` on empty sequence.
>>> argmax(zip(range(20), range(20, 0, -1)))
0
"""
return max(pairs, key=lambda x: x[1])[0]
def argmin(pairs):
"""
Given an iterable of pairs (key, value), return the key corresponding to the smallest value.
Raises `ValueError` on empty sequence.
>>> argmin(zip(range(20), range(20, 0, -1)))
19
"""
return min(pairs, key=lambda x: x[1])[0]
def argmax_index(values):
"""
Given an iterable of values, return the index of the (first) greatest value.
Raises `ValueError` on empty sequence.
>>> argmax_index([0, 4, 3, 2, 1, 4, 0])
1
"""
return argmax(zip(_itertools.count(), values))
def argmin_index(values):
"""
Given an iterable of values, return the index of the (first) smallest value.
Raises `ValueError` on empty sequence.
>>> argmin_index([10, 4, 0, 2, 1, 0])
2
"""
return argmin(zip(_itertools.count(), values))
def bucket_by_key(iterable, key_fc):
"""
Throws items in @iterable into buckets given by @key_fc function.
e.g.
>>> bucket_by_key([1, 2, -3, 4, 5, 6, -7, 8, -9], lambda num: 'neg' if num < 0 else 'nonneg')
OrderedDict([('nonneg', [1, 2, 4, 5, 6, 8]), ('neg', [-3, -7, -9])])
"""
buckets = _collections.OrderedDict()
for item in iterable:
buckets.setdefault(key_fc(item), []).append(item)
return buckets
def first_true_pred(predicates, value):
"""
Given a list of predicates and a value, return the index of first predicate,
s.t. predicate(value) == True.
If no such predicate found, raises IndexError.
>>> first_true_pred([lambda x: x%2==0, lambda x: x%2==1], 13)
1
"""
for num, pred in enumerate(predicates):
if pred(value):
return num
raise IndexError
def stderr(*args, **kwargs):
kwargs['file'] = _sys.stderr
print(*args, **kwargs)
def cache_into(factory, filename):
"""
Simple pickle caching. Calls `factory`, stores result to `filename` pickle.
Subsequent calls load the obj from the pickle instead of running the `factory` again."""
import os
import pickle
if os.path.exists(filename):
stderr("loading from '%s'" % filename)
with open(filename, 'rb') as fin:
return pickle.load(fin)
obj = factory()
stderr("saving to '%s'" % filename)
with open(filename, 'wb') as fout:
pickle.dump(obj, fout)
return obj
def consuming_length(iterator):
"""
Return length of an iterator, consuming its contents. O(1) memory.
>>> consuming_length(range(10))
10
"""
cnt = 0
for _ in iterator:
cnt += 1
return cnt
def simple_tokenize(txt, sep_rexp=r"\W"):
"""
Iterates through tokens, kwarg `sep_rexp` specifies the whitespace.
O(N) memory.
>>> list(simple_tokenize('23_45 hello, how are you?'))
['23_45', 'hello', 'how', 'are', 'you']
"""
txt = _re.sub(sep_rexp, ' ', txt)
for s in txt.split(' '):
if s:
yield s
def k_grams(iterable, k):
"""
Returns iterator of k-grams of elements from `iterable`.
>>> list(k_grams(range(4), 2))
[(0, 1), (1, 2), (2, 3)]
>>> list(k_grams((), 2))
[]
>>> list(k_grams((1,), 2))
[]
"""
it = iter(iterable)
keep = tuple(_itertools.islice(it, k-1))
# if we do not even have the starting k-1 elements, exit
if len(keep) < k - 1:
return
# every remaining element will yield a k-gram
for e in it:
this = keep + (e,)
yield this
keep = this[1:]
def uniq(iterable, count=False):
"""
Similar to unix `uniq`. Returns counts as well if `count` arg is True.
Has O(1) memory footprint.
>>> list(uniq([1, 1, 1, 2, 3, 3, 2, 2]))
[1, 2, 3, 2]
>>> list(uniq([1, 1, 1, 2, 3, 3, 2, 2], count=True))
[(3, 1), (1, 2), (2, 3), (2, 2)]
>>> list(uniq([1, None]))
[1, None]
>>> list(uniq([None]))
[None]
>>> list(uniq([]))
[]
"""
def output(counter, element):
if count:
return counter, element
return element
it = iter(iterable)
previous = None
counter = 0
first_run = True
for element in it:
if not first_run and element != previous:
yield output(counter, previous)
counter = 0
counter += 1
previous = element
first_run = False
if not first_run:
yield output(counter, element)
def group_consequent(iterator, key=None):
"""
Groups consequent elements from an iterable and returns them
as a sequence.
Has O(maximal groupsize) memory footprint.
>>> list(group_consequent([0, 2, 1, 3, 2, 1], key=lambda x:x%2))
[[0, 2], [1, 3], [2], [1]]
>>> list(group_consequent([None, None]))
[[None, None]]
>>> [len(g) for g in group_consequent([1, 1, 1, 2, 3, 3, 2, 2])]
[3, 1, 2, 2]
"""
if key is None:
key = lambda e: e
prev_key = None
first_run = True
current_group = []
for row in iterator:
current_key = key(row)
if not first_run and current_key != prev_key:
yield current_group
current_group = []
current_group.append(row)
first_run = False
prev_key = current_key
if current_group:
yield current_group
def nonempty_strip(iterable):
"""
>>> list(nonempty_strip(['little ', ' ', '\tpiggy\\n']))
['little', 'piggy']
"""
for txt in iterable:
txt = txt.strip()
if txt:
yield txt
def collapse_whitespace(txt):
"""
>>> collapse_whitespace("bla bla")
'bla bla'
"""
return _re.sub(r'\s+', r' ', txt)
@_contextlib.contextmanager
def timer(name='', verbose=True):
import time
ts = []
def next():
ts.append(time.time())
try:
yield next
finally:
next()
diffs = []
prev = ts[0]
for i in range(1, len(ts)):
diffs.append(ts[i] - prev)
prev = ts[i]
if verbose:
stderr("Timer %s" % (repr(name)))
stats = num_stats(diffs)
units = {k: " s" if k != 'count' else ' iterations' for k in stats.keys()}
print_num_stats(stats, units=units, file=_sys.stderr)
def num_stats(numbers, print=False, print_formats=None):
"""
Computes stats of the `numbers`, returns an OrderedDict with value and suggested print format
>>> num_stats(range(10))
OrderedDict([('count', 10), ('sum', 45), ('mean', 4.5), ('sd', 2.8722813232690143), ('min', 0), ('1%', 0.09), ('5%', 0.45), ('25%', 2.25), ('50%', 4.5), ('75%', 6.75), ('95%', 8.549999999999999), ('99%', 8.91), ('max', 9)])
>>> print_num_stats(num_stats(range(10)))
count 10
sum 45.000
mean 4.500
sd 2.872
min 0.000
1% 0.090
5% 0.450
25% 2.250
50% 4.500
75% 6.750
95% 8.550
99% 8.910
max 9.000
"""
import numpy
def fl(num):
return num
nums = numpy.array(numbers)
ret = _collections.OrderedDict()
ret['count'] = len(numbers)
if len(numbers):
ret.update([
("sum", fl(nums.sum())),
("mean", fl(nums.mean())),
("sd", fl(numpy.std(nums))),
("min", fl(nums.min())),
("1%", fl(numpy.percentile(nums, 1))),
("5%", fl(numpy.percentile(nums, 5))),
("25%", fl(numpy.percentile(nums, 25))),
("50%", fl(numpy.median(nums))),
("75%", fl(numpy.percentile(nums, 75))),
("95%", fl(numpy.percentile(nums, 95))),
("99%", fl(numpy.percentile(nums, 99))),
("max", fl(nums.max()))])
if print:
print_num_stats(ret, formats=print_formats)
return ret
def draw_console_histogram(counts, bins, nums, stats={}, sum_hist=False, count_hist=True, max_bin_width=50):
import numpy
# FIXME
# problem: for small numbers, the %.2f format could hide
# information
# fix: the format for the bins should be variable based on
# some information measure or something
assert nums.min() == bins[0] and nums.max() == bins[-1]
if sum_hist and (nums < 0.0).any():
stderr("WARN: num_sums only makes sense for positive numbers.")
stderr("WARN: plotting counts instead")
sum_hist = False
count_hist = True
histogram_to_print = []
if count_hist:
histogram_to_print.append(False)
if sum_hist:
histogram_to_print.append(True)
def norm_to_width(counts):
max_count = counts.max()
if max_count > max_bin_width:
norm = max_count / max_bin_width
return counts / norm, max_count
return counts, max_count
def fmt_f(f):
return "%.3f" % f
def pad_right(c, size):
return c + " " * max(0, size - len(c))
def pad_left(c, size):
return " " * max(0, size - len(c)) + c
def reduce_info(stuff):
return ''.join(stuff)
def first_or(a, b, add=True):
if a:
return a + (("." if (b and a[-1] != '.') else '') if add else '')
return b
def stat(flag, key, left, right):
if key not in stats:
return ''
val = stats[key]
if not (left <= val <= right):
return ''
return flag
col_pads = [pad_right, pad_left]
def mkrow(s):
return ["%s = %.3f" % (pad_left(key, 3), stats[key]) for key in s.split()]
def print_row(l, pd=10):
if not l:
return
for element in l:
print(pad_right(element, (0 if len(l) == 1 else pd)), end='\t')
print()
size = counts.sum()
print_row(["count = %d" % size])
if size == 0:
return
if stats:
print_row(["sum = %.3f" % stats['sum']])
print_row(["mean = %.3f ± %.3f" % (stats['mean'], stats['sd'])])
print_row(["median = %.3f" % (stats['50%'])])
print_row(mkrow("min 1% 5% 25%"))
print_row(mkrow("max 99% 95% 75%"))
if 'mean' in stats and 'sd' in stats:
mean = stats['mean']
sd = stats['sd']
stats['sd_low'] = mean - sd
stats['sd_hi'] = mean + sd
stars, max_count = norm_to_width(counts)
bin_sums = numpy.zeros(len(counts))
for i in range(len(counts)):
left, right = bins[i], bins[i + 1]
# https://numpy.org/doc/stable/reference/generated/numpy.histogram.html
# the last histogram boundary is not half-open
select_right = nums < right
if i == len(counts) - 1:
select_right = nums <= right
bin_select = (nums >= left) * select_right
bin_sums[i] = (nums * bin_select).sum()
stars_sum, max_count_sum = norm_to_width(bin_sums)
# num digits for count
digits = len(str(max_count))
# num digits for max size of bin (interval on the left)
digits_bin = max(len(fmt_f(b)) for b in bins)
infos = []
for i in range(len(counts)):
left, right = bins[i], bins[i + 1]
info = first_or(''.join((stat('m', 'mean', left, right),
stat('M', '50%', left, right))).strip(),
first_or(
first_or(first_or(stat('-', 'sd_low', left, right),
stat('25', '25%', left, right)),
first_or(stat('5', '5%', left, right),
stat('1', '1%', left, right)),
add=True),
first_or(first_or(stat('-', 'sd_hi', left, right),
stat('75', '75%', left, right)),
first_or(stat('95', '95%', left, right),
stat('99', '99%', left, right)),
add=True),
add=False)
)
infos.append(info)
info_len = max(map(len, infos))
sum_norm = max(1, nums.sum())
count_norm = max(1, counts.sum())
for print_num_sum in histogram_to_print:
print()
count_cumsum = 0.0
cumsum = 0.0
legend = "%s, %s %s %s %s" % (
pad_left("<from", digits_bin),
pad_left("to)", digits_bin),
pad_left("#", digits),
pad_left("", 1 + info_len),
"statistic of bin SUM" if print_num_sum else "statistics of bin count")
print(legend)
print("-" * (len(legend) + int(legend.startswith(" "))))
for i in range(len(counts)):
left, right = bins[i], bins[i + 1]
count_cumsum += counts[i]
bin_sum = bin_sums[i]
cumsum += bin_sum
stars_float = stars[i]
if print_num_sum:
stars_float = stars_sum[i]
stars_here = _math.floor(stars_float)
# <stars, stars+1)
extra = _math.ceil(stars_float) - stars_here
assert extra <= 1
row = "*" * stars_here + ('.' if extra > 0 else '')
suff = ''
is_last_one = i == len(counts) - 1
round = lambda x: x # _math.floor if not is_last_one else _math.ceil
d_cum = round(100 * count_cumsum / count_norm)
d_bin = 100 * counts[i] / count_norm
if print_num_sum:
d_cum = round(100 * cumsum / sum_norm)
d_bin = 100 * bin_sum / sum_norm
# just for pretty print
# this can be lower because of double rounding
if is_last_one:
pass
# d_cum = 100
print("%s, %s %s %s %s %s %s" % (
pad_left(fmt_f(left), digits_bin),
pad_left(fmt_f(right), digits_bin),
pad_left(str(counts[i]), digits),
pad_left(infos[i], 1 + info_len),
pad_left("%.0f%%" % (d_cum), 4),
pad_left("%.0f%%" % (d_bin), 3),
row))
def full_stats(numbers, count_hist=True, sum_hist=False, bins='sturges', **kwargs):
"""
Prints statistics of a list of `numbers` to console.
:param count_hist: prints histogram.
:param sum_hist: prints histogram, but of SUMS of the values in the bins.
:param bins: numpy bins arguments
>>> import numpy as np
>>> np.random.seed(666)
>>> first_peak = np.random.normal(size=100)
>>> second_peak = np.random.normal(loc=4,size=100)
>>> numbers = np.concatenate([first_peak, second_peak])
>>> full_stats(numbers)
count = 200
sum = 403.403
mean = 2.017 ± 2.261
median = 1.874
min = -3.095 1% = -1.870 5% = -0.990 25% = -0.045
max = 7.217 99% = 6.063 95% = 5.404 75% = 4.089
<BLANKLINE>
<from, to) # statistics of bin count
------------------------------------------------
-3.095, -1.949 1 0% 0% *
-1.949, -0.803 18 5. 10% 9% ******************
-0.803, 0.343 48 -. 34% 24% ************************************************
0.343, 1.488 28 48% 14% ****************************
1.488, 2.634 12 mM 54% 6% ************
2.634, 3.780 34 70% 17% **********************************
3.780, 4.926 39 -. 90% 20% ***************************************
4.926, 6.071 18 95. 99% 9% ******************
6.071, 7.217 2 100% 1% **
"""
import numpy
nums = numpy.array(numbers)
stats = num_stats(nums)
counts, bins = numpy.histogram(nums, bins=bins)
draw_console_histogram(counts, bins, nums, stats=stats, count_hist=count_hist, sum_hist=sum_hist, **kwargs)
def print_num_stats(stats, units=None, formats=None, file=None):
"""
Utility function to print results of `num_stats` function.
>>> print_num_stats(num_stats(range(10)), units={'count':'iterations'}, formats={'sum':'%.5f'})
count 10 iterations
sum 45.00000
mean 4.500
sd 2.872
min 0.000
1% 0.090
5% 0.450
25% 2.250
50% 4.500
75% 6.750
95% 8.550
99% 8.910
max 9.000
>>> print_num_stats(num_stats(range(10)), formats={'sum':'', '1%':'', '5%':''})
count 10
mean 4.500
sd 2.872
min 0.000
25% 2.250
50% 4.500
75% 6.750
95% 8.550
99% 8.910
max 9.000
"""
def get_from(d, key, default):
if d is None:
return default
return d.get(key, default)
for key, value in stats.items():
fmt = "%.3f"
if isinstance(value, int):
fmt = "%d"
fmt = get_from(formats, key, fmt)
if fmt == '':
continue
unit = get_from(units, key, '')
if unit:
unit = ' ' + unit
pstr = "%s %s%s" % (key, fmt % value, unit)
if file is None:
print(pstr)
else:
print(pstr, file=file)
@_contextlib.contextmanager
def mod_stdout(transform, redirect_fn=_contextlib.redirect_stdout, print_fn=print):
"""
A context manager that modifies every line printed to stdout.
>>> with mod_stdout(lambda line: line.upper()):
... print("this will be upper")
THIS WILL BE UPPER
"""
import io
f = io.StringIO()
with redirect_fn(f):
yield
out = f.getvalue()
lines = out.split('\n')
for num, line in enumerate(lines):
if not (num == len(lines) - 1 and not line):
print_fn(transform(line))
def prefix_stdout(prefix):
"""
A context manager that prefixes every line printed to stout by `prefix`.
>>> with prefix_stdout(" * "):
... print("bullet")
* bullet
"""
return mod_stdout(lambda line: "%s%s" % (prefix, line))
if __name__ == "__main__":
if True:
print("TESTING")
import doctest
doctest.testmod(optionflags=doctest.NORMALIZE_WHITESPACE,
verbose=True,
report=True)
if False:
print("# test")
with prefix_stdout("\t* "):
print("bullet 1")
print("bullet 2")
print()
with timer("Test") as start_iteration:
for a in range(100):
start_iteration()
j = 0
for i in range(10000):
j += 10
if a == 20:
raise RuntimeError("ble")
``` |
{
"source": "jmoujaes/dpaste",
"score": 2
} |
#### File: dpaste/dpaste/highlight.py
```python
from django.conf import settings
from django.template.defaultfilters import escape
from django.utils.translation import ugettext_lazy as _
from pygments import highlight
from pygments.formatters import HtmlFormatter
from pygments.lexers import *
"""
# Get a list of all lexer, and then remove all lexer which have '-' or '+'
# or 'with' in the name. Those are too specific and never used. This produces a
# tuple list of [(lexer, Lexer Display Name) ...] lexers.
from pygments.lexers import get_all_lexers
ALL_LEXER = set([(i[1][0], i[0]) for i in get_all_lexers()])
LEXER_LIST = [l for l in ALL_LEXER if not (
'-' in l[0]
or '+' in l[0]
or '+' in l[1]
or 'with' in l[1].lower()
or ' ' in l[1]
or l[0] in IGNORE_LEXER
)]
LEXER_LIST = sorted(LEXER_LIST)
"""
# The list of lexers. Its not worth to autogenerate this. See above how to
# retrieve this.
PLAIN_TEXT = 'text' # lexer name whats rendered as text (paragraphs)
PLAIN_CODE = 'plain' # lexer name of code with no hihglighting
LEXER_LIST = getattr(settings, 'DPASTE_LEXER_LIST', (
(PLAIN_TEXT, 'Text'),
(PLAIN_CODE, 'Code'),
(_('Highlighted'), (
('abap', 'ABAP'),
('apacheconf', 'ApacheConf'),
('applescript', 'AppleScript'),
('as', 'ActionScript'),
('bash', 'Bash'),
('bbcode', 'BBCode'),
('c', 'C'),
('cpp', 'C++'),
('clojure', 'Clojure'),
('cobol', 'COBOL'),
('css', 'CSS'),
('cuda', 'CUDA'),
('dart', 'Dart'),
('delphi', 'Delphi'),
('diff', 'Diff'),
('django', 'Django'),
('erlang', 'Erlang'),
('fortran', 'Fortran'),
('go', 'Go'),
('groovy', 'Groovy'),
('haml', 'Haml'),
('haskell', 'Haskell'),
('html', 'HTML'),
('http', 'HTTP'),
('ini', 'INI'),
('irc', 'IRC'),
('java', 'Java'),
('js', 'JavaScript'),
('json', 'JSON'),
('lua', 'Lua'),
('make', 'Makefile'),
('mako', 'Mako'),
('mason', 'Mason'),
('matlab', 'Matlab'),
('modula2', 'Modula'),
('monkey', 'Monkey'),
('mysql', 'MySQL'),
('numpy', 'NumPy'),
('objc', 'Obj-C'),
('ocaml', 'OCaml'),
('perl', 'Perl'),
('php', 'PHP'),
('postscript', 'PostScript'),
('powershell', 'PowerShell'),
('prolog', 'Prolog'),
('properties', 'Properties'),
('puppet', 'Puppet'),
('python', 'Python'),
('r', 'R'),
('rb', 'Ruby'),
('rst', 'reStructuredText'),
('rust', 'Rust'),
('sass', 'Sass'),
('scala', 'Scala'),
('scheme', 'Scheme'),
('scilab', 'Scilab'),
('scss', 'SCSS'),
('smalltalk', 'Smalltalk'),
('smarty', 'Smarty'),
('sql', 'SQL'),
('tcl', 'Tcl'),
('tcsh', 'Tcsh'),
('tex', 'TeX'),
('text', 'Text'),
('vb.net', 'VB.net'),
('vim', 'VimL'),
('xml', 'XML'),
('xquery', 'XQuery'),
('xslt', 'XSLT'),
('yaml', 'YAML'),
))
))
LEXER_KEYS = [PLAIN_TEXT, PLAIN_CODE] + [i for i in dict(LEXER_LIST[2][1]).keys()]
# The default lexer is python
LEXER_DEFAULT = getattr(settings, 'DPASTE_LEXER_DEFAULT', 'python')
# Lexers which have wordwrap enabled by default
LEXER_WORDWRAP = getattr(settings, 'DPASTE_LEXER_WORDWRAP', ('text', 'rst'))
class NakedHtmlFormatter(HtmlFormatter):
def wrap(self, source, outfile):
return self._wrap_code(source)
def _wrap_code(self, source):
for i, t in source:
yield i, t
def pygmentize(code_string, lexer_name=LEXER_DEFAULT):
# Plain code is noth hihglighted
if lexer_name == PLAIN_CODE:
return '\n'.join([u'<span class="nn">{}</span>'.format(escape(l))
for l in code_string.splitlines()])
try:
lexer = lexer_name and get_lexer_by_name(lexer_name) \
or PythonLexer()
except Exception:
lexer = PythonLexer()
return highlight(code_string, lexer, NakedHtmlFormatter())
``` |
{
"source": "jmoujaes/PyCRS",
"score": 3
} |
#### File: pycrs/elements/containers.py
```python
from . import directions
from . import datums
from . import ellipsoids
# the final CRS object which is instantiated with all of the below and parameters
# remember to use +no_defs when outputting to proj4
# ...
class CRS:
def __init__(self, toplevel):
"""
The main CRS class that defines a coordinate reference system and provides access
to all the sub-containers, sub-elements, parameters,
and values of the reference system in a nested structure.
Args:
- **toplevel**: The type of reference system. Can be either a projected (arbitrary coordinates)
or geographic (latitude-longitude coordinates) reference system.
"""
self.toplevel = toplevel
def to_proj4(self):
if isinstance(self.toplevel, ProjCS):
return "%s +no_defs" % self.toplevel.to_proj4()
elif isinstance(self.toplevel, GeogCS):
return "+proj=longlat %s +no_defs" % self.toplevel.to_proj4()
def to_ogc_wkt(self):
return "%s" % self.toplevel.to_ogc_wkt()
def to_esri_wkt(self):
return "%s" % self.toplevel.to_esri_wkt()
##+proj Projection name (see `proj -l`)
class Projection:
proj4 = "+proj"
ogc_wkt = "PROJECTION"
esri_wkt = "PROJECTION"
def __init__(self, value):
"""
A generic container for the specific projection used.
Args:
- **value**: One of the classes defined in pycrs.elements.projections.
"""
self.value = value
def to_proj4(self):
return "+proj=%s" %self.value.proj4
def to_ogc_wkt(self):
return 'PROJECTION["%s"]' %self.value.ogc_wkt
def to_esri_wkt(self):
return 'PROJECTION["%s"]' %self.value.esri_wkt
##+datum Datum name (see `proj -ld`)
class Datum:
proj4 = "+datum"
ogc_wkt = "DATUM"
esri_wkt = "DATUM"
def __init__(self, name, ellipsoid, datumshift=None):
"""
A Datum defines the shape of the earth.
Arguments:
- **name**: One of the classes defined in pycrs.elements.datums.
- **ellipsoid**: A pycrs.elements.containers.Ellipsoid instance.
- **datumshift** (optional): A pycrs.elements.parameters.DatumShift instance.
"""
self.name = name
self.ellips = ellipsoid
self.datumshift = datumshift
def to_proj4(self):
if self.datumshift:
return "%s %s" % (self.ellips.to_proj4(), self.datumshift.to_proj4())
elif isinstance(self.name, datums.Unknown):
return "%s" % self.ellips.to_proj4()
elif not self.name.proj4:
# has no proj4 equivaent and is better left unspecified, so only return ellips
return "%s" % self.ellips.to_proj4()
else:
return "+datum=%s %s" % (self.name.proj4, self.ellips.to_proj4())
def to_ogc_wkt(self):
if self.datumshift:
return 'DATUM["%s", %s, %s]' % (self.name.ogc_wkt, self.ellips.to_ogc_wkt(), self.datumshift.to_ogc_wkt())
else:
return 'DATUM["%s", %s]' % (self.name.ogc_wkt, self.ellips.to_ogc_wkt())
def to_esri_wkt(self):
if self.datumshift:
return 'DATUM["%s", %s, %s]' % (self.name.esri_wkt, self.ellips.to_esri_wkt(), self.datumshift.to_esri_wkt())
else:
return 'DATUM["%s", %s]' % (self.name.esri_wkt, self.ellips.to_esri_wkt())
def to_geotiff(self):
pass
#return "GeogGeodeticDatum"
##+ellps Ellipsoid name (see `proj -le`)
class Ellipsoid:
proj4 = "+ellps"
ogc_wkt = "SPHEROID"
esri_wkt = "SPHEROID"
def __init__(self, name, semimaj_ax=None, inv_flat=None):
"""
The ellipsoid that defines the shape of the earth.
Arguments:
- **name**: One of the classes defined in pycrs.elements.ellipsoids.
- **semimaj_ax**: A float representing the coordinate position of the semimajor axis.
- **inv_flat**: A float representing the inverse flattening factor.
"""
self.name = name
# get default values if not specified
if semimaj_ax == None:
semimaj_ax = self.name.semimaj_ax
if inv_flat == None:
inv_flat = self.name.inv_flat
self.semimaj_ax = semimaj_ax
self.inv_flat = inv_flat
def to_proj4(self):
if isinstance(self.name, ellipsoids.Unknown):
# has no proj4 equivaent and is better left unspecified
return "+a=%s +f=%s" % (self.semimaj_ax, self.inv_flat)
elif not self.name.proj4:
# has no proj4 equivaent and is better left unspecified
return "+a=%s +f=%s" % (self.semimaj_ax, self.inv_flat)
else:
return "+ellps=%s +a=%s +f=%s" % (self.name.proj4, self.semimaj_ax, self.inv_flat)
def to_ogc_wkt(self):
return 'SPHEROID["%s", %s, %s]' % (self.name.ogc_wkt, self.semimaj_ax, self.inv_flat)
def to_esri_wkt(self):
return 'SPHEROID["%s", %s, %s]' % (self.name.esri_wkt, self.semimaj_ax, self.inv_flat)
def to_geotiff(self):
pass
#return "GeogEllipsoid"
#GEOGCS
class GeogCS:
ogc_wkt = "GEOGCS"
esri_wkt = "GEOGCS"
def __init__(self, name, datum, prime_mer, angunit, twin_ax=None):
"""
A geographic coordinate system where the coordinates are in the latitude-longitude space.
Arguments:
- **name**: An arbitrary name given to this geographic coordinate system, to represent its unique
configuration of datum, prime meridian, angular unit, and twin axes. The actual name
is just for human readability, and does not actually have any implication.
- **datum**: A pycrs.elements.container.Datum instance, representing the shape of the earth.
- **prime_mer**: A pycrs.elements.parameters.PrimeMeridian instance, representing the prime meridian
coordinate where the longitude is considered to be 0.
- **angunit**: A pycrs.elements.parameters.AngularUnit instance, representing the angular unit in which
coordinates are measured.
- **twin_ax**: A pair of pycrs.elements.directions.North/South/East/West instances, one for each axis,
representing the compass direction in which each axis increases. Defaults to East and North.
"""
self.name = name
self.datum = datum
self.prime_mer = prime_mer
self.angunit = angunit
if twin_ax == None:
# default axes
twin_ax = directions.East(), directions.North()
self.twin_ax = twin_ax
def to_proj4(self):
# dont parse axis to proj4, because in proj4, axis only applies to the cs, ie the projcs (not the geogcs, where wkt can specify with axis)
return "%s %s %s" % (self.datum.to_proj4(), self.prime_mer.to_proj4(), self.angunit.to_proj4() )
def to_ogc_wkt(self):
return 'GEOGCS["%s", %s, %s, %s, AXIS["Lon", %s], AXIS["Lat", %s]]' % (self.name, self.datum.to_ogc_wkt(), self.prime_mer.to_ogc_wkt(), self.angunit.to_ogc_wkt(), self.twin_ax[0].ogc_wkt, self.twin_ax[1].ogc_wkt )
def to_esri_wkt(self):
return 'GEOGCS["%s", %s, %s, %s, AXIS["Lon", %s], AXIS["Lat", %s]]' % (self.name, self.datum.to_esri_wkt(), self.prime_mer.to_esri_wkt(), self.angunit.to_esri_wkt(), self.twin_ax[0].esri_wkt, self.twin_ax[1].esri_wkt )
#PROJCS
class ProjCS:
ogc_wkt = "PROJCS"
esri_wkt = "PROJCS"
def __init__(self, name, geogcs, proj, params, unit, twin_ax=None):
"""
Arguments:
- **name**: Arbitrary name of the projected coordinate system.
- **geogcs**: A pycrs.elements.containers.GeogCS instance.
- **proj**: A pycrs.elements.containers.Projection instance.
- **params**: A list of custom parameters from the pycrs.elements.parameters module.
- **unit**: A pycrs.elements.parameters.Unit instance, representing the angular unit in which
coordinates are measured.
- **twin_ax**: A pair of pycrs.elements.directions.North/South/East/West instances, one for each axis,
representing the compass direction in which each axis increases. Defaults to East and North.
"""
self.name = name
self.geogcs = geogcs
self.proj = proj
self.params = params
self.unit = unit
if twin_ax == None:
# default axes
twin_ax = directions.East(), directions.North()
self.twin_ax = twin_ax
def to_proj4(self):
string = "%s %s " % (self.proj.to_proj4(), self.geogcs.to_proj4())
string += " ".join(param.to_proj4() for param in self.params)
string += " %s" % self.unit.to_proj4()
string += " +axis=" + self.twin_ax[0].proj4 + self.twin_ax[1].proj4 + "u" # up set as default because only proj4 can set it I think...
return string
def to_ogc_wkt(self):
string = 'PROJCS["%s", %s, %s, ' % (self.name, self.geogcs.to_ogc_wkt(), self.proj.to_ogc_wkt() )
string += ", ".join(param.to_ogc_wkt() for param in self.params)
string += ', %s' % self.unit.to_ogc_wkt()
string += ', AXIS["X", %s], AXIS["Y", %s]]' % (self.twin_ax[0].ogc_wkt, self.twin_ax[1].ogc_wkt )
return string
def to_esri_wkt(self):
string = 'PROJCS["%s", %s, %s, ' % (self.name, self.geogcs.to_esri_wkt(), self.proj.to_esri_wkt() )
string += ", ".join(param.to_esri_wkt() for param in self.params)
string += ', %s' % self.unit.to_esri_wkt()
string += ', AXIS["X", %s], AXIS["Y", %s]]' % (self.twin_ax[0].esri_wkt, self.twin_ax[1].esri_wkt )
return string
```
#### File: PyCRS/pycrs/loader.py
```python
import json
import sys
try:
import urllib.request as urllib2
except ImportError:
import urllib2
from . import parser
PY3 = (int(sys.version_info[0]) > 2)
#################
# USER FUNCTIONS
#################
def from_url(url, format=None):
"""
Returns the crs object from a string interpreted as a specified format, located at a given url site.
Arguments:
- *url*: The url where the crs string is to be read from.
- *format* (optional): Which format to parse the crs string as. One of "ogc wkt", "esri wkt", or "proj4".
If None, tries to autodetect the format for you (default).
Returns:
- CRS object.
"""
# first get string from url
string = urllib2.urlopen(url).read()
if PY3 is True:
# decode str into string
string = string.decode('utf-8')
# then determine parser
if format:
# user specified format
format = format.lower().replace(" ", "_")
func = parser.__getattr__("from_%s" % format)
else:
# unknown format
func = parser.from_unknown_text
# then load
crs = func(string)
return crs
def from_file(filepath):
"""
Returns the crs object from a file, with the format determined from the filename extension.
Arguments:
- *filepath*: filepath to be loaded, including extension.
"""
if filepath.endswith(".prj"):
string = open(filepath, "r").read()
return parser.from_esri_wkt(string)
elif filepath.endswith((".geojson",".json")):
raw = open(filepath).read()
geoj = json.loads(raw)
if "crs" in geoj:
crsinfo = geoj["crs"]
if crsinfo["type"] == "name":
string = crsinfo["properties"]["name"]
return parser.from_unknown_text(string)
elif crsinfo["type"] == "link":
url = crsinfo["properties"]["name"]
type = crsinfo["properties"].get("type")
return loader.from_url(url, format=type)
else: raise Exception("invalid geojson crs type: must be either name or link")
else:
# assume default wgs84 as per the spec
return parser.from_epsg_code("4326")
## elif filepath.endswith((".tif",".tiff",".geotiff")):
## pass
## # ...
```
#### File: jmoujaes/PyCRS/testbatch.py
```python
import pycrs
import traceback
import logging
###########################
# Drawing routine for testing
raw = None
def render_world(crs, savename):
import urllib2
import json
import pygeoj
import pyagg
import pyproj
import random
# load world borders
global raw
if not raw:
raw = urllib2.urlopen("https://raw.githubusercontent.com/johan/world.geo.json/master/countries.geo.json").read()
rawdict = json.loads(raw)
data = pygeoj.load(data=rawdict)
# convert coordinates
fromproj = pyproj.Proj("+init=EPSG:4326")
toproj = pyproj.Proj(crs.to_proj4())
for feat in data:
if feat.geometry.type == "Polygon":
feat.geometry.coordinates = [zip(*pyproj.transform(fromproj, toproj, zip(*ring)[0], zip(*ring)[1]))
for ring in feat.geometry.coordinates]
elif feat.geometry.type == "MultiPolygon":
feat.geometry.coordinates = [
[zip(*pyproj.transform(fromproj, toproj, zip(*ring)[0], zip(*ring)[1]))
for ring in poly]
for poly in feat.geometry.coordinates]
feat.geometry.update_bbox() # important to clear away old bbox
# get zoom area
data.add_all_bboxes()
data.update_bbox()
bbox = data.bbox
## # to avoid inf bounds and no render in satellite view
## xmins, ymins, xmaxs, ymaxs = zip(*(feat.geometry.bbox for feat in data))
## inf = float("inf")
## xmaxs = (xmax for xmax in xmaxs if xmax != inf)
## ymaxs = (ymax for ymax in ymaxs if ymax != inf)
## bbox = (min(xmins), min(ymins), max(xmaxs), max(ymaxs))
# set up drawing
c = pyagg.Canvas(1000,1000)
c.geographic_space()
c.zoom_bbox(*bbox)
c.zoom_out(1.3)
# draw countries
for feat in data:
try: c.draw_geojson(feat.geometry,
fillcolor=tuple(random.randrange(255) for _ in range(3)),
outlinecolor="white")
except:
# NOTE: feat.__geo_interface__ is one level too high maybe??
print("unable to draw?", feat.geometry)
# draw text of the proj4 string used
c.percent_space()
c.draw_text(crs.to_proj4(), (50,10))
# save
c.save("testrenders/"+savename+".png")
# Source string generator
def sourcestrings(format):
# commonly used projections on global scale
# from http://www.remotesensing.org/geotiff/proj_list/
##Albers Equal-Area Conic
yield pycrs.utils.crscode_to_string("sr-org", 62, format)
##Azimuthal Equidistant
yield pycrs.utils.crscode_to_string("esri", 54032, format)
##Cassini-Soldner
# ...ignore, too specific
##Cylindrical Equal Area
yield pycrs.utils.crscode_to_string("sr-org", 8287, format)
##Eckert IV
yield pycrs.utils.crscode_to_string("esri", 54012, format)
##Eckert VI
yield pycrs.utils.crscode_to_string("esri", 54010, format)
##Equidistant Conic
yield pycrs.utils.crscode_to_string("esri", 54027, format)
##Equidistant Cylindrical
yield pycrs.utils.crscode_to_string("epsg", 3786, format)
##Equirectangular
yield pycrs.utils.crscode_to_string("sr-org", 8270, format)
##Gauss-Kruger
# ...not found???
##Gall Stereographic
yield pycrs.utils.crscode_to_string("esri", 54016, format)
##GEOS - Geostationary Satellite View
yield pycrs.utils.crscode_to_string("sr-org", 81, format)
##Gnomonic
# ...not found
##Hotine Oblique Mercator
yield pycrs.utils.crscode_to_string("esri", 54025, format)
##Krovak
yield pycrs.utils.crscode_to_string("sr-org", 6688, format)
##Laborde Oblique Mercator
# ...not found # yield pycrs.utils.crscode_to_string("epsg", 9813, format)
##Lambert Azimuthal Equal Area
yield pycrs.utils.crscode_to_string("sr-org", 28, format)
##Lambert Conic Conformal (1SP)
# ...not found
##Lambert Conic Conformal (2SP)
yield pycrs.utils.crscode_to_string("sr-org", 29, format) # yield pycrs.utils.crscode_to_string("epsg", 9802, format)
##Lambert Conic Conformal (2SP Belgium)
# ...ignore, too specific
##Lambert Cylindrical Equal Area
yield pycrs.utils.crscode_to_string("sr-org", 8287, format)
##Mercator (1SP)
yield pycrs.utils.crscode_to_string("sr-org", 16, format)
##Mercator (2SP)
yield pycrs.utils.crscode_to_string("sr-org", 7094, format)
##Miller Cylindrical
yield pycrs.utils.crscode_to_string("esri", 54003, format)
##Mollweide
yield pycrs.utils.crscode_to_string("esri", 54009, format)
##New Zealand Map Grid
# ...ignore, too specific
##Oblique Mercator
yield pycrs.utils.crscode_to_string("esri", 54025, format)
##Oblique Stereographic
yield pycrs.utils.crscode_to_string("epsg", 3844, format)
##Orthographic
yield pycrs.utils.crscode_to_string("sr-org", 6980, format)
##Polar Stereographic
yield pycrs.utils.crscode_to_string("sr-org", 8243, format)
##Polyconic
yield pycrs.utils.crscode_to_string("esri", 54021, format)
##Robinson
yield pycrs.utils.crscode_to_string("esri", 54030, format)
##Rosenmund Oblique Mercator
# ...not found
##Sinusoidal
yield pycrs.utils.crscode_to_string("sr-org", 6965, format)
##Swiss Oblique Cylindrical
# ...ignore, too specific
##Swiss Oblique Mercator
# ...ignore, too specific
##Stereographic
yield pycrs.utils.crscode_to_string("sr-org", 6711, format)
##Transverse Mercator
# ...not found???
##Transverse Mercator (Modified Alaska)
# ...ignore, too specific
##Transverse Mercator (South Oriented)
# ...ignore, too specific
##Tunisia Mining Grid
# ...ignore, too specific
##VanDerGrinten
yield pycrs.utils.crscode_to_string("sr-org", 6978, format)
# bunch of randoms
#yield pycrs.utils.crscode_to_string("esri", 54030, format)
#yield pycrs.utils.crscode_to_string("sr-org", 7898, format)
#yield pycrs.utils.crscode_to_string("sr-org", 6978, format)
#yield pycrs.utils.crscode_to_string("epsg", 4324, format)
#yield pycrs.utils.crscode_to_string("sr-org", 6618, format)
#yield pycrs.utils.crscode_to_string("sr-org", 22, format)
#yield pycrs.utils.crscode_to_string("esri", 54031, format)
# add more...
# Misc other crs for testing
#crs = pycrs.utils.crscode_to_string("esri", 54030, "proj4")
#crs = pycrs.utils.crscode_to_string("sr-org", 6978, "proj4")
#crs = pycrs.parser.from_sr_code(7898)
#crs = pycrs.parser.from_epsg_code(4324)
#crs = pycrs.parser.from_sr_code(6618)
#crs = pycrs.parser.from_sr_code(22)
#crs = pycrs.parser.from_esri_code(54031)
#proj4 = "+proj=longlat +ellps=WGS84 +datum=WGS84"
#proj4 = "+proj=aea +lat_1=24 +lat_2=31.5 +lat_0=24 +lon_0=-84 +x_0=400000 +y_0=0 +ellps=GRS80 +units=m +no_defs "
#proj4 = "+proj=larr +datum=WGS84 +lon_0=0 +lat_ts=45 +x_0=0 +y_0=0 +ellps=WGS84 +units=m +no_defs"
#proj4 = "+proj=nsper +datum=WGS84 +ellps=WGS84 +lon_0=-60 +lat_0=40 +h=2000000000000000000000000"
# Testing format outputs
def testoutputs(crs):
print("ogc_wkt:\n")
try:
print(crs.to_ogc_wkt()+"\n")
global ogcwkt_outputs
ogcwkt_outputs += 1
except: logging.warn(traceback.format_exc())
print("esri_wkt:\n")
try:
print(crs.to_esri_wkt()+"\n")
global esriwkt_outputs
esriwkt_outputs += 1
except: logging.warn(traceback.format_exc())
print("proj4:\n")
try:
print(crs.to_proj4()+"\n")
global proj4_outputs
proj4_outputs += 1
except: logging.warn(traceback.format_exc())
#############################################################################
#############################################################################
#############################################################################
###########################
# From OGC WKT
print("--------")
print("Testing from ogc wkt:")
print("")
totals = 0
loaded = 0
ogcwkt_outputs = 0
esriwkt_outputs = 0
proj4_outputs = 0
renders = 0
for wkt in sourcestrings("ogcwkt"):
totals += 1
# test parsing
try:
print("From:\n")
print(wkt)
print("")
crs = pycrs.parser.from_ogc_wkt(wkt)
loaded += 1
# test outputs
print("To:\n")
testoutputs(crs)
# test render
try:
print("Rendering...")
savename = "%i_from_ogcwkt" % totals
render_world(crs, savename)
renders += 1
print("Successully rendered! \n")
except:
logging.warn(traceback.format_exc()+"\n")
except:
logging.warn(traceback.format_exc()+"\n")
print("Summary results:")
print(" Loaded: %f%%" % (loaded/float(totals)*100) )
print(" Outputs (OGC WKT): %f%%" % (ogcwkt_outputs/float(totals)*100) )
print(" Outputs (ESRI WKT): %f%%" % (esriwkt_outputs/float(totals)*100) )
print(" Outputs (Proj4): %f%%" % (proj4_outputs/float(totals)*100) )
print(" Renders: %f%%" % (renders/float(totals)*100) )
###########################
# From PROJ4
print("--------")
print("Testing from proj4:")
print("")
totals = 0
loaded = 0
ogcwkt_outputs = 0
esriwkt_outputs = 0
proj4_outputs = 0
renders = 0
for proj4 in sourcestrings("proj4"):
totals += 1
# test parsing
try:
print("From:\n")
print(proj4)
print("")
crs = pycrs.parser.from_proj4(proj4)
loaded += 1
# test outputs
print("To:\n")
testoutputs(crs)
# test render
try:
print("Rendering...")
savename = "%i_from_proj4" % totals
render_world(crs, savename)
renders += 1
print("Successully rendered! \n")
except:
logging.warn(traceback.format_exc()+"\n")
except:
logging.warn(traceback.format_exc()+"\n")
print("Summary results:")
print(" Loaded: %f%%" % (loaded/float(totals)*100) )
print(" Outputs (OGC WKT): %f%%" % (ogcwkt_outputs/float(totals)*100) )
print(" Outputs (ESRI WKT): %f%%" % (esriwkt_outputs/float(totals)*100) )
print(" Outputs (Proj4): %f%%" % (proj4_outputs/float(totals)*100) )
print(" Renders: %f%%" % (renders/float(totals)*100) )
###########################
# From ESRI WKT/PRJ FILE
print("--------")
print("Testing from esri wkt:")
print("")
totals = 0
loaded = 0
ogcwkt_outputs = 0
esriwkt_outputs = 0
proj4_outputs = 0
renders = 0
for wkt in sourcestrings("esriwkt"):
totals += 1
# test parsing
try:
print("From:\n")
print(wkt)
print("")
crs = pycrs.parser.from_esri_wkt(wkt)
loaded += 1
# test outputs
print("To:\n")
testoutputs(crs)
# test render
try:
print("Rendering...")
savename = "%i_from_esriwkt" % totals
render_world(crs, savename)
renders += 1
print("Successully rendered! \n")
except:
logging.warn(traceback.format_exc()+"\n")
except:
logging.warn(traceback.format_exc()+"\n")
print("Summary results:")
print(" Loaded: %f%%" % (loaded/float(totals)*100) )
print(" Outputs (OGC WKT): %f%%" % (ogcwkt_outputs/float(totals)*100) )
print(" Outputs (ESRI WKT): %f%%" % (esriwkt_outputs/float(totals)*100) )
print(" Outputs (Proj4): %f%%" % (proj4_outputs/float(totals)*100) )
print(" Renders: %f%%" % (renders/float(totals)*100) )
``` |
{
"source": "jmou/kn",
"score": 2
} |
#### File: flow/mdnote/md-to-steps.py
```python
import os
import sys
cellbuf = []
def scan_to_cell():
for line in sys.stdin:
if line.startswith('# '):
global cellbuf
cellbuf = [line]
return line[2:].strip()
def driver_ref(driver):
with open(f'inref/drivers/{driver}') as fh:
return fh.read().strip()
def make_step(name, prereqs, driver, script):
with open(f'out/scripts/{name}', 'w') as fh:
fh.write(script)
step = [
'process=command:chmod +x in/driver && ./in/driver',
f'in/driver={driver_ref(driver)}',
f'in/script=file:scripts/{name}',
]
for prereq in prereqs:
step.append(f'in/inputs/{prereq}/=_pos:{prereq}:out/')
return step
def main():
os.mkdir('out/scripts') # needed by scan_to_cell
os.mkdir('out/cells') # needed to write cellbuf
steps = {}
while True:
name = scan_to_cell()
if not name:
break
prereqs = []
for line in sys.stdin:
cellbuf.append(line)
line = line.strip()
if line.startswith('- '):
prereqs.append(line[2:])
else:
assert line.startswith('```'), 'expected - prereq or ```driver'
driver = line[3:]
break
script = []
for line in sys.stdin:
cellbuf.append(line)
if line.startswith('```'):
break
script.append(line)
script = ''.join(script)
steps[name] = make_step(name, prereqs, driver, script)
with open(f'out/cells/{name}', 'w') as fh:
fh.write(''.join(cellbuf))
with open('out/order', 'w') as fh:
print('\n'.join(steps), file=fh)
with open('out/plan', 'w') as fh:
for name, step in steps.items():
print(f'_pos={name}', file=fh)
for line in step:
print(line, file=fh)
print(file=fh)
print('_pos=main', file=fh)
print('process=identity', file=fh)
print('in/order=file:order', file=fh)
for name in steps:
print(f'in/outs/{name}/=_pos:{name}:out/', file=fh)
print(f'in/cells/{name}=file:cells/{name}', file=fh)
print(file=fh)
if __name__ == '__main__':
main()
``` |
{
"source": "jmount1992/TerminalUI",
"score": 3
} |
#### File: TerminalUI/examples/asynchronous_read_write.py
```python
import time
import threading
from typing import Union
from TerminalUI import TerminalUI
### USER DEFINED FUNCTIONS ###
def command_entered_testing(terminal_ui : TerminalUI, command : str):
"""An example function that could be passed into the TerminalUI command_entered_callback argument, which will be called everytime a user enters a command.
Args:
terminal_ui (TerminalUI): The TerminalUI object that called the command_entered_callback function
command (str): The command entered by the user
"""
# Convert command string into a byte array
byte_array = bytearray([ord(x) for x in command])
byte_array_hex_string = ("".join(" 0x%02x"%(x) for x in byte_array)).strip()
# Set the command debug textbox to contain the command entered by the user as plain text and as a hex coded byte array
txt = " Input Text: %s"%command
txt += "\n HEX Bytes Sent: %s"%byte_array_hex_string
terminal_ui.set_command_debug_text(txt)
def option_item_selected_testing(terminal_ui : TerminalUI, option_name : str, value : Union[int, float, bool, str], index : int):
"""An example function that could be passed into the TerminalUI option_item_selected_callback argument, which will be called everytime an option fires its change event.
Args:
terminal_ui (TerminalUI): The TerminalUI object that called the option_item_selected_callback function
option_name (str): The name of the option that fired the change event
value (Union[int, float, bool, str]): The value of the option
index (int): The index of the value if the option is a list of selectable values
"""
global enable_read
# enable/disable enable_read
if option_name == 'read_enabled':
enable_read = value
terminal_ui.set_receive_text('Read Enabled: %s'%str(value), False)
def read_thread_callback():
global threads_enabled, enable_read
while threads_enabled:
while threads_enabled and enable_read:
# Create text to dump into receive textbo
txt = time.strftime("%Y-%m-%d %H:%M:%S - Read", time.gmtime(time.time()))
terminal_ui.set_receive_text(txt, False)
# sleep for 1 second
time.sleep(1)
### MAIN FUNCTION ###
threads_enabled = True
enable_read = True
read_thread = None
if __name__ == "__main__":
# Options
options = {'Read Settings': {'read_enabled': ([True, False], 'Read Enabled', False)}}
# Create TerminalUI object with the title 'Terminal UI v0.1', command_entered_testing function callback,
# the options specified and the option_item_selected_testing callback function.
terminal_ui = TerminalUI('Terminal UI v0.1', command_entered_testing, options, option_item_selected_testing)
# Start serial read thread
read_thread = threading.Thread(target=read_thread_callback, args=())
read_thread.start()
# Run the terminal catching crtl+c keyboard interrupt to close everything appropriately
try:
terminal_ui.run()
except KeyboardInterrupt:
pass
# Close appropriately
enable_read = False
threads_enabled = False
``` |
{
"source": "jmourelos/doconv",
"score": 2
} |
#### File: doconv/plugin/docbooktodita.py
```python
from os import path
# doconv imports
from doconv.plugin import base
from doconv.util import xslt_process
class DocBookToDita(base.PluginBase):
def get_supported_conversions(self):
return [("docbook", "dita")]
def check_dependencies(self):
pass
def convert(self, input_file, input_format, output_format,
output_file=None):
self.logger.debug("docbooktodita plugin converting...")
current_dir = path.dirname(__file__)
xsl_file = path.join(
current_dir, "docbooktodita/db2dita/docbook2dita.xsl")
xslt_process(input_file, output_file, xsl_file)
self.logger.debug("Generated temporary file: {0}".format(output_file))
return output_file
``` |
{
"source": "jmouroux/video-sdk",
"score": 2
} |
#### File: ffmpeg/tutorials/13_ffmpeg_transcode_only_split_stitch.py
```python
ASPECT_RATIO = (16/9)
import subprocess
from optparse import OptionParser
import time
from datetime import datetime
import json
import re
def count_substrings(string, substring):
string_size = len(string)
substring_size = len(substring)
count = 0
for i in range(0,string_size-substring_size+1):
if string[i:i+substring_size] == substring:
count+=1
return count
def main():
(filename, ofilename, input_encoder, output_encoder, bitrate) = parse_options()
output = subprocess.Popen("xbutil scan",
shell = True,
stdout = subprocess.PIPE).stdout.read()
outputS = str(output)
result = outputS.find('Found total ')
if (result == -1):
print ("Can't determine number of U30s in the system, exiting ...")
raise SystemExit
num_devices = int(re.search(r'\d+', outputS).group())
print ("There are " + str(int(num_devices/2)) + " cards, " + str(num_devices) + " devices in the system")
xres = int(re.search(r'\d+', outputS).group())
if input_encoder == "h265":
input_encoder = "hevc"
if input_encoder != "hevc" and input_encoder != "h264":
print ("Input encoder needs to be h264, h265 or hevc")
raise SystemExit
if output_encoder == "h265":
output_encoder = "hevc"
if output_encoder != "hevc" and output_encoder != "h264":
print ("Output encoder needs to be h264, h265 or hevc")
raise SystemExit
if bitrate < 1.0 or bitrate > 25.0:
print ("Bitrate should be between 1.0 ... 25.0 Mbit/s")
raise SystemExit
br =str(bitrate)
if ofilename[-4:] != ".mp4":
print ("Only mp4 output file format supported")
raise SystemExit
if filename[-4:] != ".mp4" and filename[-4:] != ".mov" and filename[-4:] != ".mkv" and filename[-4:] != ".MOV":
print ("Only mp4 & mov & mkv input file format supported")
raise SystemExit
if filename == ofilename:
print ("Source and destination filename cannot be the same")
raise SystemExit
startSec = time.time()
#ffprobe -v error -select_streams v:0 -show_entries stream=width,height,duration,r_frame_rate -of default=nw=1
output = subprocess.Popen("ffprobe -v error -select_streams v:0 -show_entries stream=width -of default=nw=1 "+filename+" 2>&1",
shell = True,
stdout = subprocess.PIPE).stdout.read()
outputS = str(output)
result = outputS.find('width=')
if (result == -1):
print ("Can't determine clip resolution, exiting ...")
raise SystemExit
xres = int(re.search(r'\d+', outputS).group())
output = subprocess.Popen("ffprobe -v error -select_streams v:0 -show_entries stream=height -of default=nw=1 "+filename+" 2>&1",
shell = True,
stdout = subprocess.PIPE).stdout.read()
outputS = str(output)
result = outputS.find('height=')
if (result == -1):
print ("Can't determine clip resolution, exiting ...")
raise SystemExit
yres = int(re.search(r'\d+', outputS).group())
# find out length of the clip such that we can determine segments sizes
output = subprocess.Popen("ffprobe "+filename+" 2>&1",
shell = True,
stdout = subprocess.PIPE).stdout.read()
outputS = str(output)
#extract the framerate from the string
result = outputS.find('fps, ')
if (result == -1):
print ("Can't determine framerate, exiting ...")
raise SystemExit
tmpS = outputS[result+5:result+14]
framerateS = tmpS.split()
framerate = float (framerateS[0])
print("")
#extract the video duration from the string
result = outputS.find('Duration: ')
if (result == -1):
print ("Can't determine video length, exiting ...")
raise SystemExit
video_lengthS = outputS[result+10:result+18]
try:
pt = datetime.strptime(video_lengthS,'%H:%M:%S')
video_length = pt.second + pt.minute*60 + pt.hour*3600
print("Video clip parameters:")
print (" length in seconds : "+str(video_length))
print (" length in hh:mm:ss: "+video_lengthS)
except ValueError:
print ("Can't determine video length, exiting ...")
raise SystemExit
print(" resolution: "+ str(xres)+"x"+str(yres))
print(" framerate: "+ str(framerate))
totFrames = video_length * framerate
if float((xres/yres)/(ASPECT_RATIO)) != 1.0 :
print ("Example script only supports 16:9 aspect ratios (e.g. 4k, 1080p, 720p)")
raise SystemExit
elif xres == 3840:
device_split_count = 1 * (int(60/framerate))
maxFPS=num_devices * 60
elif xres == 1920:
device_split_count = 4 * (int(60/framerate))
maxFPS=num_devices * 240
elif xres == 1280:
device_split_count = 9 * (int(60/framerate))
maxFPS=num_devices * 540
else:
print ("Resolutions lower than 720p not implemented, exiting!")
raise SystemExit
split_count = device_split_count * num_devices
framesinClip = framerate * video_length / split_count
split_length = int(video_length / split_count) + 1
print ("")
print ("Start splitting clip in " + str(split_count)+ " segments")
# creating cmd to be run for splitting into segments
if split_count != 1:
split_cmd = "ffmpeg -nostdin -loglevel info -vsync 0 -i " + filename + " -c copy -f segment -segment_time " \
+ str(split_length) + " -y tmpfile" + "%2d." + filename[-3:] + " > stdout.log 2>&1 \n"
else:
split_cmd = "cp " + filename + " tmpfile00." + filename[-3:]
# run the command in a blocking way
output = subprocess.Popen(split_cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
# check if the number of segments written equals the desired split_count
output = subprocess.Popen("ls tmpfile* | wc -l", shell = True, stdout = subprocess.PIPE).stdout.read()
if int(output) < split_count:
print ("Video file may not be splittable ...")
print ("Only able to create " + str(int(output)) + " segments for parallel processing")
raise SystemExit
if int(output) > split_count:
print ("Too many tmpfiles; Please delete old tmpfiles ...")
raise SystemExit
print ("")
clipNum = 0
for n in range(0, num_devices):
for m in range(0, device_split_count):
transcode_cmd = "ffmpeg -loglevel info -xlnx_hwdev "+ str(n)+" -vsync 0 -c:v mpsoc_vcu_" + input_encoder + " -i tmpfile" + \
format(clipNum, '02d') + filename[-4:] + \
" -periodicity-idr 120 -b:v " + br + "M -max-bitrate " + \
br + "M -c:v mpsoc_vcu_" \
+ output_encoder + " -y tmpfileout" + \
format(clipNum, '02d') + ofilename[-4:] + " > stdout" +str(n)+".log 2>&1 & \n"
output = subprocess.Popen(transcode_cmd, shell = True)
time.sleep(0.1)
clipNum += 1
print ("Start transcoding segments")
# wait until all ffmpeg processes are done
pidsExist = True
tail_cmd = "tail -1 stdout0.log"
ps_cmd = "ps -ef | grep ffmpeg"
percentDone = 10
print("")
print(" 0 percent of transcoding completed")
while pidsExist:
time.sleep(0.1)
output = subprocess.Popen(ps_cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
nr = count_substrings(str(output), "ffmpeg -loglevel info -xlnx_hwdev")
if nr == 0:
pidsExist = False
output = subprocess.Popen(tail_cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
outputS = str(output)
outputpartS = outputS[-150:]
result = outputpartS.find('frame=')
if result != -1:
frameS = outputpartS[result+6:result+20].split()
frame = int(frameS[0])
if int(100.0 * frame/framesinClip) > percentDone:
if percentDone > 95:
percentDone = 150
else:
print(" " + str(percentDone) + " percent of transcoding completed")
if percentDone > 89:
percentDone = percentDone + 5
else:
percentDone = percentDone + 10
print("100 percent of transcoding completed")
#start concatenating the transcoded files
print("")
print ("Start concatenating segments into final clip")
cmd = "printf \"file '%s'\\n\" tmpfileout* > mylist.txt"
output = subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
cmd = "rm -f " + ofilename
output = subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
cmd = "ffmpeg -f concat -safe 0 -i mylist.txt -c copy " + ofilename + " > stdout.log 2>&1"
output = subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
cmd = "rm tmpfile*"
output = subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
cmd = "rm mylist.txt"
output = subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
cmd = "rm stdout*.log"
# output = subprocess.Popen(cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
endSec = time.time()
totSec = int(endSec-startSec)
print(" ")
if totSec > 119:
print("Time from start to completion : "+ str(totSec) + \
" seconds (" + str(int(totSec/60)) + " minutes and " + \
str(totSec - 60*(int(totSec/60))) + " seconds)")
elif totSec > 59:
print("Time from start to completion : "+ str(totSec) + \
" seconds (1 minute and " + \
str(totSec - 60) + " seconds)")
else:
print("Time from start to completion : "+ str(totSec) + \
" seconds")
print(" ")
print("This clip was processed "+str(round(1.0*video_length/totSec,1))+" times faster than realtime")
print(" ")
print("This clip was effectively processed at " + str(round(totFrames/totSec,2)) + " FPS")
print(" ")
print("Efficiency=" + str(round((totFrames/totSec)/maxFPS,2)*100) + "%")
def destroy():
# Release resource
print("Exiting ...")
def parse_options():
parser = OptionParser()
parser.add_option("-s", "--sourcefile",
dest = "ifilename",
help = "input file to convert",
type = "string",
action = "store"
)
parser.add_option("-d", "--destinationfile",
dest = "ofilename",
help = "output file",
type = "string",
action = "store"
)
parser.add_option("-i", "--icodec",
dest = "input_encoder",
help = "input encode standard <h264, hevc, h265> \
default h264",
type = "string",
action = "store", default = "h264"
)
parser.add_option("-o", "--ocodec",
dest = "output_encoder",
help = "output encode standard <h264, hevc, h265> \
default hevc",
type = "string",
action = "store", default = "hevc"
)
parser.add_option("-b", "--bitrate",
dest = "bitrate",
help = "output bitrate in Mbit/s. Must be a float or integer value between 1.0 and 25.0 (example: use -b 3 to specify an output bitrate of 3Mbits/sec) \
default 5.0",
type = "float",
action = "store", default = 5.0
)
(options, args) = parser.parse_args()
if options.ifilename and options.ofilename:
return (options.ifilename, options.ofilename, \
options.input_encoder, options.output_encoder,options.bitrate)
else:
parser.print_help()
raise SystemExit
if __name__ == '__main__':
try:
main()
# When 'Ctrl+C' is pressed, the child program
# destroy() will be executed.
except KeyboardInterrupt:
destroy()
```
#### File: examples/xma/smokeTest_xmaApp.py
```python
import re
import math
import subprocess
from optparse import OptionParser
import time
import os
import array
#statusPrint 0 == do nothing, 1 == regular print, 2 == logfile, 3 == regular print and logfile
statusPrint = 1
def logPrint(*args):
message = ""
for arg in args:
message += arg.__str__() + " "
if (statusPrint == 1) or (statusPrint == 3):
print(message)
if (statusPrint == 2) or (statusPrint == 3):
fLog = open("smoketest.log", "a")
message = message + "\n"
fLog.write(message)
fLog.close()
def createClip(width, height, frames, filename):
f = open(filename, "wb")
nrframes = frames
speedLineH = 0.5 * width / frames
speedLineV = 0.5 * height / frames
widthY = width
heightY = height
widthUV = int(widthY/2)
heightUV = int(heightY/2)
widthUV2 = int(widthUV/2)
arrY = bytearray(widthY * heightY)
arrU = bytearray(2 * widthUV * heightUV)
#arrV = bytearray(widthUV * heightUV)
startSec = time.time()
# create a zoneplate in a resolution 2 x h and 2 x v of the clip size
# this way we can easily make it a moving zoneplate
arrZP = bytearray(4 * widthY * heightY)
for y in range(0, 2 * heightY):
tmp1 = y * 0.0000003
tmp1 = y * 0.00000278
ytmp2 = y * 2 * widthY
for x in range(0, 2 * widthY):
tmp = math.cos(tmp1 * x * x)
Y = int(127 * (1.0 + tmp))
arrZP[x + ytmp2] = Y
for fr in range(0, nrframes):
for z in range(0, heightY):
# make the zonpelate look like it is moving in h and v direction
htmp = int((fr * widthY) / frames)
vtmp = int((fr * heightY) / frames)
arrY[z*widthY:z*widthY+widthY] = arrZP[htmp+vtmp*2*widthY+z*2*widthY:htmp+vtmp*2*widthY+z*2*widthY+widthY]
ufrtmp = (128 + int((255 / frames) * fr)) % 256
vfrtmp = (128 - int((255 / frames) * fr)) % 256
for y in range(0,heightUV):
if y < heightUV/2 + 60 and y > heightUV/2 - 60:
uvtmp1 = True
else:
uvtmp1 = False
uvtmp2 = 2 * y * widthUV
if y == (int(speedLineV*fr)):
uvtmp3 = True
else:
uvtmp3 = False
uvtmp4 = 2 * y * widthY
uvtmp5 = (2 * y + 1) * widthY
uvtmp8 = int(speedLineH*fr)
for x in range(0,widthUV):
U = ufrtmp
V = vfrtmp
uvtmp6 = x + x
uvtmp7 = x + x + 1
if uvtmp3 or x == uvtmp8:
U = 84
V = 255
arrY[uvtmp6 + uvtmp4] = 76
arrY[uvtmp7 + uvtmp4] = 76
arrY[uvtmp6 + uvtmp5] = 76
arrY[uvtmp7 + uvtmp5] = 76
if uvtmp1 and x < widthUV2 + 60 and x > widthUV2 - 60:
fr255 = fr & 0xFF
U = fr255
V = fr255
arrY[uvtmp6 + uvtmp4] = fr255
arrY[uvtmp7 + uvtmp4] = fr255
arrY[uvtmp6 + uvtmp5] = fr255
arrY[uvtmp7 + uvtmp5] = fr255
arrU[2*x + uvtmp2] = U
arrU[2*x + uvtmp2 + 1] = V
#arrV[x + uvtmp2] = V
f.write(arrY)
f.write(arrU)
#f.write(arrV)
f.close()
endSec = time.time()
totSec = int(endSec-startSec)
print("Time to create clip : " + str(totSec) + " seconds")
def testTranscode(frames, nrfiles, dir, logdir):
xstart = 1920
fail = 0
if (nrfiles < 1):
print("aborting; nr files needs to be at least 1")
raise SystemExit(1)
if (nrfiles == 1):
xstep = 0
else:
xstep = int((1920 - 320) / (nrfiles-1))
fps = [i for i in range(nrfiles)]
#decode with U30
for step in range(0, nrfiles):
x = 4 * int((xstart - (step*xstep)) / 4)
y = 4 * int(((x * 1080) / 1920) / 4)
startSec = time.time()
# check if file exists already
inputfile = dir+"/encodehevc"+str(x).zfill(4)+"x"+str(y).zfill(4)+".265"
fe = os.path.exists(inputfile)
if (fe == False):
logPrint("File " + inputfile + " doesn't exist")
logPrint("Exiting ...")
raise SystemExit(1)
fps[step] = 0
logPrint("Transcoding HEVC "+str(x).zfill(4)+"x"+str(y).zfill(4)+" to h.264 960x540")
transcode_cmd = "u30_xma_transcode -c:v mpsoc_vcu_hevc -i " + inputfile + \
" -multiscale_xma -num-output 1 -out_1_width 960 -out_1_height 540 -c:v mpsoc_vcu_h264 -control-rate 0 -qp-mode 0 -slice-qp 20 -o " \
+dir+"/transcode"+str(x).zfill(4)+"x"+str(y).zfill(4)+".264" \
" > "+logdir+"/transcodestdout" +str(x).zfill(4)+"x"+str(y).zfill(4)+".log 2>> "+logdir+"/transcodestderr"+str(x).zfill(4)+"x"+str(y).zfill(4)+".log"
subprocess.Popen(transcode_cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
endSec = time.time()
logfile = open(logdir+"/transcodestderr" +str(x).zfill(4)+"x"+str(y).zfill(4)+".log")
allNumbers = re.findall(r"[-+]?\d*\.\d+|\d+", logfile.read())
if len(allNumbers) == 0:
logPrint("Transcoder Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED: No fps stats found!")
fps[step] = -1
fail = 1
else:
fps[step] = allNumbers[-1]
output = subprocess.Popen("rm "+dir+"/transcode*.yuv", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
for step in range(0, nrfiles):
x = 4 * int((xstart - (step*xstep)) / 4)
y = 4 * int(((x * 1080) / 1920) / 4)
#decode the transcoded file and check for correctness
file_name = dir+"/transcode"+str(x).zfill(4)+"x"+str(y).zfill(4)+".yuv"
decode_cmd = "ffmpeg -nostdin -i " + dir+"/transcode"+str(x).zfill(4)+"x"+str(y).zfill(4)+ \
".264 -c:v rawvideo -pix_fmt nv12 "+file_name
output = subprocess.Popen(decode_cmd, shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
xo = 960
yo = 540
fe = os.path.exists(file_name)
if fe:
file_stats = os.stat(file_name)
frames_mod = int (file_stats.st_size / (xo * yo * 1.5))
if file_stats.st_size != int(xo * yo * frames * 1.5):
logPrint("Transcode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED: Number of frames is " + str(frames_mod) + " instead of "+ str(frames))
fail = fail + 1
#logPrint("Exiting ...")
#raise SystemExit(1)
f = open(file_name, "rb")
else:
logPrint("File " + file_name + " doesn't exist")
logPrint("Exiting ...")
raise SystemExit(1)
if fps[step] != 0:
testPassY = True
testPassUV = True
firstframe = 0
for i in range(0, frames_mod):
arrY = array.array('B')
arrU = array.array('B')
#arrV = array.array('B')
arrY.fromfile(f, xo*yo)
arrU.fromfile(f, int(xo*yo/2))
#arrV.fromfile(f, int(xo*yo/4))
xval = int((xo/2)+ (xo) * (yo/2))
uval = int((xo/2)+(xo/2) * (yo/2))
#vval = int((xo/4)+(xo/2) * (yo/4))
#if (i != arrY[xval]) or (i != arrU[uval]) or (i != arrV[vval]):
# ignoring UV for now as we know it fails
if (i != arrY[xval]) and testPassY == True:
#if testPassY == True:
# logPrint("Scale Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED" )
# logPrint("Mismatch :",x,y,i, arrY[xval], arrU[uval],arrV[vval])
testPassY = False
firstframe = i
if ((i != arrU[uval]) or (i != arrU[uval + 1])) and testPassUV == True:
#if testPassUV == True:
# logPrint("Scale Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED" )
# logPrint("Mismatch :",x,y,i, arrY[xval], arrU[uval],arrV[vval])
testPassUV = False
firstframe = i
if testPassY == True and testPassUV == True:
logPrint("Transcode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" passed. Processed @ "+str(fps[step])+" fps" )
elif testPassY == True:
logPrint("Transcode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" Luma passed. Processed @ "+str(fps[step])+" fps" )
logPrint("Transcode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" Chroma FAILED. Processed @ "+str(fps[step])+" fps" )
logPrint("Transcode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" First Mismatch detected in frame " + str(firstframe))
fail = fail + 1
else:
logPrint("Transcode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED. Processed @ "+str(fps[step])+" fps" )
logPrint("Transcode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED. First Mismatch detected in frame " + str(firstframe))
fail = fail + 1
f.close()
return fail
def testDecodeHEVC(frames, nrfiles, dir, logdir):
xstart = 1920
fail = 0
if (nrfiles < 1):
print("aborting; nr files needs to be at least 1")
raise SystemExit(1)
if (nrfiles == 1):
xstep = 0
else:
xstep = int((1920 - 320) / (nrfiles-1))
fps = [i for i in range(nrfiles)]
#decode with U30
for step in range(0, nrfiles):
x = 4 * int((xstart - (step*xstep)) / 4)
y = 4 * int(((x * 1080) / 1920) / 4)
startSec = time.time()
# check if file exists already
inputfile = dir+"/encodehevc"+str(x).zfill(4)+"x"+str(y).zfill(4)+".265"
fe = os.path.exists(inputfile)
if (fe == False):
logPrint("File " + inputfile + " doesn't exist")
logPrint("Exiting ...")
raise SystemExit(1)
fps[step] = 0
logPrint("HEVC decoding "+str(x).zfill(4)+"x"+str(y).zfill(4))
decode_cmd = "u30_xma_decode -i " + inputfile + " -c:v mpsoc_vcu_h265 -o " + \
dir+"/decodehevc" + str(x).zfill(4)+"x"+str(y).zfill(4)+".yuv" \
" > "+logdir+"/decodestdout" +str(x).zfill(4)+"x"+str(y).zfill(4)+".log 2>> "+logdir+"/decodestderr"+str(x).zfill(4)+"x"+str(y).zfill(4)+".log"
subprocess.Popen(decode_cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
endSec = time.time()
logfile = open(logdir+"/decodestderr" +str(x).zfill(4)+"x"+str(y).zfill(4)+".log")
allNumbers = re.findall(r"[-+]?\d*\.\d+|\d+", logfile.read())
if len(allNumbers) == 0:
logPrint("Decoder Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED: No fps stats found!")
fps[step] = -1
fail = 1
else:
fps[step] = allNumbers[-1]
for step in range(0, nrfiles):
x = 4 * int((xstart - (step*xstep)) / 4)
y = 4 * int(((x * 1080) / 1920) / 4)
#cmp the U30 decoded mp4 file with the RAW YUV420 output of the encoded file
#they should be the same
decode_cmd = "cmp " +dir+"/encodehevc"+str(x).zfill(4)+"x"+str(y).zfill(4)+".yuv " + dir+"/decodehevc"+str(x).zfill(4)+"x"+str(y).zfill(4)+".yuv"
output = subprocess.Popen(decode_cmd, shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
if output != b'':
logPrint("Decode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED with " + str(output))
fail = 1
else:
logPrint("Decode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" passed. Processed @ "+str(fps[step])+" fps" )
return fail
def testEncodeHEVC(frames, nrfiles, dir, logdir):
xstart = 1920
fail = 0
if (nrfiles < 1):
print("aborting; nr files needs to be at least 1")
raise SystemExit(1)
if (nrfiles == 1):
xstep = 0
else:
xstep = int((1920 - 320) / (nrfiles-1))
fps = [i for i in range(nrfiles)]
#encode with U30
for step in range(0, nrfiles):
x = 4 * int((xstart - (step*xstep)) / 4)
y = 4 * int(((x * 1080) / 1920) / 4)
startSec = time.time()
# check if file exists already
fe = os.path.exists(dir+"/encodehevc"+str(x).zfill(4)+"x"+str(y).zfill(4)+".265")
fps[step] = 0
if (fe == False):
logPrint("HEVC encoding "+str(x).zfill(4)+"x"+str(y).zfill(4))
encode_cmd = "u30_xma_enc -w "+str(x).zfill(4)+" -h "+str(y).zfill(4)+ \
" -i "+dir+"/scale"+str(x).zfill(4)+"x"+str(y).zfill(4)+ \
".yuv -c:v mpsoc_vcu_hevc -control-rate 0 -qp-mode 0 -slice-qp 20 -o "+dir+"/encodehevc" \
+str(x).zfill(4)+"x"+str(y).zfill(4)+".265" \
" > "+logdir+"/encodestdout" +str(x).zfill(4)+"x"+str(y).zfill(4)+".log 2>> "+logdir+"/encodestderr"+str(x).zfill(4)+"x"+str(y).zfill(4)+".log"
subprocess.Popen(encode_cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
endSec = time.time()
logfile = open(logdir+"/encodestderr" +str(x).zfill(4)+"x"+str(y).zfill(4)+".log")
allNumbers = re.findall(r"[-+]?\d*\.\d+|\d+", logfile.read())
if len(allNumbers) == 0:
logPrint("Encoder Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED: No fps stats found!")
fps[step] = -1
fail = 1
else:
fps[step] = allNumbers[-1]
for step in range(0, nrfiles):
x = 4 * int((xstart - (step*xstep)) / 4)
y = 4 * int(((x * 1080) / 1920) / 4)
#decode the encoded file for correctness checking
decode_cmd = "ffmpeg -nostdin -loglevel info -i "+dir+"/encodehevc"+str(x).zfill(4)+"x"+str(y).zfill(4)+ \
".265 -pix_fmt nv12 -y "+dir+"/encodehevc"+str(x).zfill(4)+"x"+str(y).zfill(4)+ \
".yuv > /dev/null 2>> /dev/null"
subprocess.Popen(decode_cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
file_name = dir+"/encodehevc"+str(x).zfill(4)+"x"+str(y).zfill(4)+".yuv"
fe = os.path.exists(file_name)
frames_mod = frames
if fe:
file_stats = os.stat(file_name)
if file_stats.st_size != int(x * y * frames * 1.5):
frames_mod = int (file_stats.st_size / (x * y * 1.5))
logPrint("Encode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" Failure: Number of frames is " + str(frames_mod) + " instead of " + str(frames))
fail = fail + 1
#logPrint("Exiting ...")
#raise SystemExit(1)
f = open(file_name, "rb")
else:
logPrint("File " + file_name + " doesn't exist")
logPrint("Exiting ...")
raise SystemExit(1)
'''
if fe:
file_stats = os.stat(file_name)
frames_mod = int (file_stats.st_size / (x * y * 1.5))
f = open(file_name, "rb")
else:
logPrint("File " + file_name + " doesn't exist")
logPrint("Exiting ...")
raise SystemExit(1)
'''
if fps[step] != 0:
testPassY = True
testPassUV = True
for i in range(0, frames_mod):
arrY = array.array('B')
arrU = array.array('B')
#arrV = array.array('B')
arrY.fromfile(f, x*y)
arrU.fromfile(f, int(x*y/2))
#arrV.fromfile(f, int(x*y/4))
xval = int((x/2)+ (x) * (y/2))
uval = int((x/2)+(x/2) * (y/2))
#vval = int((x/4)+(x/2) * (y/4))
#if (i != arrY[xval]) or (i != arrU[uval]) or (i != arrV[vval]):
# ignoring UV for now as we know it fails
if (i != arrY[xval]):
#if testPassY == True:
# logPrint("Scale Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED" )
# logPrint("Mismatch :",x,y,i, arrY[xval], arrU[uval],arrV[vval])
testPassY = False
if (i != arrU[uval]) or (i != arrU[uval + 1]):
#if testPassUV == True:
# logPrint("Scale Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED" )
# logPrint("Mismatch :",x,y,i, arrY[xval], arrU[uval],arrV[vval])
testPassUV = False
if testPassY == True and testPassUV == True:
logPrint("Encode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" passed. Processed @ "+str(fps[step])+" fps" )
elif testPassY == True:
logPrint("Encode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" Luma passed. Processed @ "+str(fps[step])+" fps" )
logPrint("Encode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" Chroma FAILED. Processed @ "+str(fps[step])+" fps" )
fail = fail + 1
else:
logPrint("Encode Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED. Processed @ "+str(fps[step])+" fps" )
fail = fail + 1
f.close()
return fail
def testScaler(width, height, frames, nrfiles, filename, dir, logdir):
xstart = 1920
fail = 0
if (nrfiles < 1):
print("aborting; nr files needs to be at least 1")
raise SystemExit(1)
if (nrfiles == 1):
xstep = 0
else:
xstep = int((1920 - 320) / (nrfiles-1))
fps = [i for i in range(nrfiles)]
#scale with U30
for step in range(0, nrfiles):
x = 4 * int((xstart - (step*xstep)) / 4)
#y = 8 * int((ystart - (step*ystep)) / 8)
y = 4 * int(((x * 1080) / 1920) / 4)
startSec = time.time()
# check if file exists already
fe = os.path.exists(dir+"scale"+str(x).zfill(4)+"x"+str(y).zfill(4)+".yuv")
fps[step] = 0
if (fe == False):
#scale with U30
logPrint("scaling to "+str(x).zfill(4)+"x"+str(y).zfill(4))
scale_cmd = "u30_xma_scale -w "+str(width)+" -h "+str(height)+" -i "+str(filename)+ \
" -w "+str(x)+" -h "+str(y)+" -o "+dir+"/scale"+str(x).zfill(4)+"x"+str(y).zfill(4)+".yuv" \
" > " + logdir + "/scalestdout.log 2>> " + logdir + "/scalestderr.log"
subprocess.Popen(scale_cmd, shell = True, stdout = subprocess.PIPE).stdout.read()
endSec = time.time()
logfile = open(logdir + "/scalestderr.log")
allNumbers = re.findall(r"[-+]?\d*\.\d+|\d+", logfile.read())
if len(allNumbers) == 0:
logPrint("Scale Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED: No fps stats found!")
fps[step] = -1
fail = 1
else:
fps[step] = allNumbers[-1]
for step in range(0, nrfiles):
x = 4 * int((xstart - (step*xstep)) / 4)
y = 4 * int(((x * 1080) / 1920) / 4)
file_name = dir+"/scale"+str(x).zfill(4)+"x"+str(y).zfill(4)+".yuv"
fe = os.path.exists(file_name)
frames_mod = frames
if fe:
file_stats = os.stat(file_name)
if file_stats.st_size != int(x * y * frames * 1.5):
frames_mod = int (file_stats.st_size / (x * y * 1.5))
logPrint("Scale Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" Failure: Number of frames is " + str(frames_mod) + " instead of " + str(frames))
fail = fail + 1
#logPrint("Exiting ...")
#raise SystemExit(1)
f = open(file_name, "rb")
else:
logPrint("File " + file_name + " doesn't exist")
logPrint("Exiting ...")
raise SystemExit(1)
if fps[step] != 0:
testPassY = True
testPassUV = True
for i in range(0, frames_mod):
arrY = array.array('B')
arrU = array.array('B')
#arrV = array.array('B')
arrY.fromfile(f, x*y)
arrU.fromfile(f, int(x*y/2))
#arrV.fromfile(f, int(x*y/4))
xval = int((x/2)+ (x) * (y/2))
uval = int((x/2)+ (x/2) * (y/2))
#vval = int((x/4)+(x/2) * (y/4))
#if (i != arrY[xval]) or (i != arrU[uval]) or (i != arrV[vval]):
# ignoring UV for now as we know it fails
if (i != arrY[xval]):
#if testPassY == True:
# logPrint("Scale Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED" )
# logPrint("Mismatch :",x,y,i, arrY[xval], arrU[uval],arrV[vval])
testPassY = False
if (i != arrU[uval]) or (i != arrU[uval + 1]):
#if testPassUV == True:
# logPrint("Scale Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" FAILED" )
# logPrint("Mismatch :",x,y,i, arrY[xval], arrU[uval],arrU[uval+1])
testPassUV = False
if testPassY == True and testPassUV == True:
logPrint("Scale Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" passed. Processed @ "+str(fps[step])+" fps" )
elif testPassY == True:
logPrint("Scale Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" Luma passed. Processed @ "+str(fps[step])+" fps" )
logPrint("Scale Test: "+str(x).zfill(4)+"x"+str(y).zfill(4)+" Chroma FAILED. Processed @ "+str(fps[step])+" fps" )
fail = fail + 1
f.close()
return fail
def main():
(nrfiles, tmpdir, logdir, removefiles, iterations, minutes, frames, quit) = parse_options()
startTest = time.time()
# defaults to keep
width = 960
height = 540
if (frames > 255):
print("upper limit of nrframes is 255")
raise SystemExit(1)
if (frames < 10):
print("lower limit of nrframes is 10")
raise SystemExit(1)
#let's check for the presence of U30 boards
print("")
output = subprocess.Popen("lspci | grep Xilinx", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
substring = "Xilinx"
count = str(output).count(substring)
if count == 0:
print("No U30 boards detected. Exiting...")
raise SystemExit(1)
else:
print("Number of U30 boards detected: "+str(count/4.0))
if (minutes != 0):
iterations = 0
print("Running time bound smoketest of: "+ str(minutes)+" minutes")
elif (iterations == 1):
print("Running one iteration of smoketest")
else:
print("Running " + str(iterations)+ " iterations of smoketest")
print("Testing with " + str(frames) + " video frames per clip")
time.sleep(1)
# check if tmpdir exists already
fe = os.path.exists(tmpdir)
if (fe == False):
output = subprocess.Popen("mkdir " + tmpdir, shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
if output != b'':
print("Error occured. Exiting ...")
print("ERROR: "+str(output))
raise SystemExit(1)
else:
print(tmpdir + " directory already exists. Removing old files..")
output = subprocess.Popen("rm "+ tmpdir + "/tmp*.yuv", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ tmpdir + "/scale*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ tmpdir + "/encode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ tmpdir + "/decode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ tmpdir + "/transcode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("touch "+ tmpdir + "/checkforaccess123", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
fe = os.path.exists(tmpdir + "/checkforaccess123")
if fe == False:
print("Can't create files in directory "+tmpdir)
print("Exiting ...")
raise SystemExit(1)
else:
output = subprocess.Popen("rm "+ tmpdir + "/checkforaccess123", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
# check if log exists already
fe = os.path.exists(logdir)
if (fe == False):
output = subprocess.Popen("mkdir " + logdir, shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
if output != b'':
print("Error occured. Exiting ...")
print("ERROR: "+str(output))
raise SystemExit(1)
else:
print(logdir + " directory already exists. Removing old files..")
output = subprocess.Popen("rm "+ logdir + "/scale*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ logdir + "/encode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ logdir + "/decode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ logdir + "/transcode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("touch "+ logdir + "/checkforaccess123", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
fe = os.path.exists(logdir + "/checkforaccess123")
if fe == False:
print("Can't create files in directory "+logdir)
print("Exiting ...")
raise SystemExit(1)
else:
output = subprocess.Popen("rm "+ logdir + "/checkforaccess123", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
# check if test clip is already present
# if so, and md5sum matches, dont recreate
# otherwise, recreate the clip
filename = tmpdir+"/clip"+str(width)+"x"+str(height)+"xmaApp.yuv"
if os.path.exists(filename):
output = subprocess.Popen("md5sum " + filename, shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
checkSum255 = b'5f6b4013c40a227062574e30e0b2c784'
checkSum10 = b'73938e7d6820efc965b2c1b54c85a5ec'
if checkSum255 == output[:32] and frames == 255 and width == 960 and height == 540:
print("Testclip is present; no need to generate again")
elif checkSum10 == output[:32] and frames == 10 and width == 960 and height == 540:
print("Testclip is present; no need to generate again")
else:
print("Creating test clip with size "+str(width)+"x"+str(height))
print("(this can take up to 30 seconds)")
createClip(width, height, frames, filename)
else:
print("Testclip doesn't exist")
print("Creating test clip with size "+str(width)+"x"+str(height))
print("(this can take up to 30 seconds)")
createClip(width, height, frames, filename)
run = True
runNumber = 1
failSE = 0
failS = 0
failE = 0
failD = 0
failT = 0
fail = 0
while run:
# remove intermediate temporary files
output = subprocess.Popen("rm "+ tmpdir + "/scale*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ tmpdir + "/decode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ tmpdir + "/encode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ tmpdir + "/transcode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ logdir + "/scale*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ logdir + "/decode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ logdir + "/encode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ logdir + "/transcode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
print("Starting RUN: "+ str(runNumber))
if (runNumber == iterations) and (iterations != 0):
run = False
runNumber = runNumber+1
# test scaler
# 1 x RAW YUV420 file --> scaler --> "nrfiles" RAW YUV420 video files
# check whether the scaled results are as expected
startSec = time.time()
logPrint(" ")
logPrint("SCALER test: "+str(nrfiles)+" resolutions")
failure = testScaler(width, height, frames, nrfiles, filename, tmpdir, logdir)
endSec = time.time()
totSec = int(endSec-startSec)
print("Scale test time: " + str(totSec) + " seconds")
print(" ")
failS = failS + failure
startSec = time.time()
logPrint("HEVC ENCODER test: "+str(nrfiles)+" resolutions")
failure = testEncodeHEVC(frames, nrfiles, tmpdir, logdir)
endSec = time.time()
totSec = int(endSec-startSec)
print("Encode test time : " + str(totSec) + " seconds")
print(" ")
failE = failE + failure
startSec = time.time()
logPrint("HEVC DECODER test: "+str(nrfiles)+" resolutions")
failure = testDecodeHEVC(frames, nrfiles, tmpdir, logdir)
endSec = time.time()
totSec = int(endSec-startSec)
print("Decode test time : " + str(totSec) + " seconds")
print(" ")
failD = failD + failure
startSec = time.time()
logPrint("TRANSCODER test: "+str(nrfiles)+" resolutions")
failure = testTranscode(frames, nrfiles, tmpdir, logdir)
endSec = time.time()
totSec = int(endSec-startSec)
print("Transcode test time : " + str(totSec) + " seconds")
print(" ")
failT = failT + failure
endTest = time.time()
totSec = int(endTest-startTest)
print("Complete test time: " + str(totSec) + " seconds")
fail = failS + failE + failD + failT + failSE
if quit == "yes" and fail != 0:
print("Failure detected. Exiting as per commandline flag")
raise SystemExit(1)
if quit == "scale" and failS != 0:
print("Scale Failure detected. Exiting as per commandline flag")
raise SystemExit(1)
if quit == "encode" and failE != 0:
print("Encode Failure detected. Exiting as per commandline flag")
raise SystemExit(1)
if quit == "scaleencode" and failSE != 0:
print("Scale+Encode Failure detected. Exiting as per commandline flag")
raise SystemExit(1)
if quit == "decode" and failD != 0:
print("Decode Failure detected. Exiting as per commandline flag")
raise SystemExit(1)
if quit == "transcode" and failT != 0:
print("Transcode Failure detected. Exiting as per commandline flag")
raise SystemExit(1)
if (minutes != 0) and totSec > (minutes * 60 ):
run = False
#if needed, remove temporary log & tmp files before exiting
if (removefiles == "yes"):
print("Removing log and tmp files ...")
output = subprocess.Popen("rm "+ tmpdir + "/clip*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ tmpdir + "/scale*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ tmpdir + "/decode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ tmpdir + "/encode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ tmpdir + "/transcode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ logdir + "/scale*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ logdir + "/decode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ logdir + "/encode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rm "+ logdir + "/transcode*", shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rmdir "+ logdir, shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
output = subprocess.Popen("rmdir "+ tmpdir, shell = True, stderr=subprocess.STDOUT, stdout = subprocess.PIPE).stdout.read()
print("")
print("Number of Smoketests completed : "+str(runNumber-1))
print("")
print("Number of failures in Scale tests : "+str(failS))
print("Number of failures in Encode tests : "+str(failE))
print("Number of failures in Scale + Encode tests: "+str(failSE))
print("Number of failures in Decode tests : "+str(failD))
print("Number of failures in Transcode tests : "+str(failT))
def destroy():
# Release resource
print("Exiting ...")
def parse_options():
parser = OptionParser()
parser.add_option("-f", "--files",
dest = "nrfiles",
help = "#files to generate per test",
type = "int",
action = "store"
)
parser.add_option("-t", "--tmpdir",
dest = "tmpdir",
help = "directory for storing temporary YUV and mp4 files" \
"(best to keep the dir local on the machine for speed)",
type = "string",
action = "store"
)
parser.add_option("-l", "--logdir",
dest = "logdir",
help = "directory for log files",
type = "string",
action = "store"
)
parser.add_option("-r", "--removefiles",
dest = "removefiles",
help = "remove files after completion of all tests (yes/no): default = yes",
type = "string",
action = "store",
default = "yes"
)
parser.add_option("-i", "--iterations",
dest = "iterations",
help = "number of iterations to run (0 = continuous): default = 1",
type = "int",
action = "store",
default = "1"
)
parser.add_option("-m", "--minutes",
dest = "minutes",
help = "number of minutes to run (0 = ignore setting): default = 0",
type = "int",
action = "store",
default = "0"
)
parser.add_option("-p", "--pictures",
dest = "frames",
help = "number of video frames per test: default = 255",
type = "int",
action = "store",
default = 255
)
parser.add_option("-q", "--quit",
dest = "quit",
help = "quit at failure: default = no; other options;yes, scale, encode, scaleencode, decode, transcode",
type = "string",
action = "store",
default = "no"
)
(options, args) = parser.parse_args()
if options.nrfiles and options.tmpdir and options.logdir:
return (options.nrfiles, options.tmpdir, options.logdir, options.removefiles, options.iterations, options.minutes, options.frames, options.quit)
else:
parser.print_help()
raise SystemExit(1)
if __name__ == '__main__':
try:
main()
# When 'Ctrl+C' is pressed, the child program
# destroy() will be executed.
except KeyboardInterrupt:
destroy()
``` |
{
"source": "JMousqueton/ransomwatch",
"score": 3
} |
#### File: JMousqueton/ransomwatch/parsers.py
```python
import os
import json
from sys import platform
from datetime import datetime
from sharedutils import openjson
from sharedutils import runshellcmd
from sharedutils import todiscord, totwitter, toteams
from sharedutils import stdlog, dbglog, errlog, honk
# on macOS we use 'grep -oE' over 'grep -oP'
if platform == 'darwin':
fancygrep = 'grep -oE'
else:
fancygrep = 'grep -oP'
def posttemplate(victim, group_name, timestamp):
'''
assuming we have a new post - form the template we will use for the new entry in posts.json
'''
schema = {
'post_title': victim,
'group_name': group_name,
'discovered': timestamp
}
dbglog(schema)
return schema
def existingpost(post_title, group_name):
'''
check if a post already exists in posts.json
'''
posts = openjson('posts.json')
# posts = openjson('posts.json')
for post in posts:
if post['post_title'] == post_title and post['group_name'] == group_name:
#dbglog('post already exists: ' + post_title)
return True
dbglog('post does not exist: ' + post_title)
return False
def appender(post_title, group_name):
'''
append a new post to posts.json
'''
if len(post_title) == 0:
errlog('post_title is empty')
return
# limit length of post_title to 90 chars
if len(post_title) > 90:
post_title = post_title[:90]
if existingpost(post_title, group_name) is False:
posts = openjson('posts.json')
newpost = posttemplate(post_title, group_name, str(datetime.today()))
stdlog('adding new post - ' + 'group:' + group_name + ' title:' + post_title)
posts.append(newpost)
with open('posts.json', 'w', encoding='utf-8') as outfile:
'''
use ensure_ascii to mandate utf-8 in the case the post contains cyrillic 🇷🇺
https://pynative.com/python-json-encode-unicode-and-non-ascii-characters-as-is/
'''
dbglog('writing changes to posts.json')
json.dump(posts, outfile, indent=4, ensure_ascii=False)
# if socials are set try post
if os.environ.get('DISCORD_WEBHOOK') is not None:
todiscord(newpost['post_title'], newpost['group_name'])
if os.environ.get('TWITTER_ACCESS_TOKEN') is not None:
totwitter(newpost['post_title'], newpost['group_name'])
if os.environ.get('MS_TEAMS_WEBHOOK') is not None:
toteams(newpost['post_title'], newpost['group_name'])
'''
all parsers here are shell - mix of grep/sed/awk & perl - runshellcmd is a wrapper for subprocess.run
'''
def synack():
stdlog('parser: ' + 'synack')
parser='''
grep 'card-title' source/synack-*.html --no-filename | cut -d ">" -f2 | cut -d "<" -f1
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('synack: ' + 'parsing fail')
for post in posts:
appender(post, 'synack')
def everest():
stdlog('parser: ' + 'everest')
parser = '''
grep '<h2 class="entry-title' source/everest-*.html | cut -d '>' -f3 | cut -d '<' -f1
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('everest: ' + 'parsing fail')
for post in posts:
appender(post, 'everest')
def suncrypt():
stdlog('parser: ' + 'suncrypt')
parser = '''
cat source/suncrypt-*.html | tr '>' '\n' | grep -A1 '<a href="client?id=' | sed -e '/^--/d' -e '/^<a/d' | cut -d '<' -f1 | sed -e 's/[ \t]*$//' "$@" -e '/Read more/d'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('suncrypt: ' + 'parsing fail')
for post in posts:
appender(post, 'suncrypt')
def lorenz():
stdlog('parser: ' + 'lorenz')
parser = '''
grep 'h3' source/lorenz-*.html --no-filename | cut -d ">" -f2 | cut -d "<" -f1 | sed -e 's/^ *//g' -e '/^$/d' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('lorenz: ' + 'parsing fail')
for post in posts:
appender(post, 'lorenz')
def lockbit2():
stdlog('parser: ' + 'lockbit2')
# egrep -h -A1 'class="post-title"' source/lockbit2-* | grep -v 'class="post-title"' | grep -v '\--' | cut -d'<' -f1 | tr -d ' '
parser = '''
awk -v lines=2 '/post-title-block/ {for(i=lines;i;--i)getline; print $0 }' source/lockbit2-*.html | cut -d '<' -f1 | sed -e 's/^ *//g' -e 's/[[:space:]]*$//' | sort | uniq
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('lockbit2: ' + 'parsing fail')
for post in posts:
appender(post, 'lockbit2')
'''
used to fetch the description of a lb2 post - not used
def lockbit2desc():
stdlog('parser: ' + 'lockbit2desc')
# sed -n '/post-block-text/{n;p;}' source/lockbit2-*.html | sed '/^</d' | cut -d "<" -f1
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('lockbit2: ' + 'parsing fail')
for post in posts:
appender(post, 'lockbit2')
'''
def arvinclub():
stdlog('parser: ' + 'arvinclub')
parser = '''
grep 'bookmark' source/arvinclub-*.html --no-filename | cut -d ">" -f3 | cut -d "<" -f1
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('arvinclub: ' + 'parsing fail')
for post in posts:
appender(post, 'arvinclub')
def hiveleak():
stdlog('parser: ' + 'hiveleak')
# grep 'bookmark' source/hive-*.html --no-filename | cut -d ">" -f3 | cut -d "<" -f1
# egrep -o 'class="">([[:alnum:]]| |\.)+</h2>' source/hiveleak-hiveleak*.html | cut -d '>' -f 2 | cut -d '<' -f 1 && egrep -o 'class="lines">([[:alnum:]]| |\.)+</h2>' source/hiveleak-hiveleak*.html | cut -d '>' -f 2 | cut -d '<' -f 1 | sort -u
# egrep -o 'class="lines">.*?</h2>' source/hiveleak-hiveleak*.html | cut -d '>' -f 2 | cut -d '<' -f 1 && egrep -o 'class="lines">.*?</h2>' source/hiveleak-hiveleak*.html | cut -d '>' -f 2 | cut -d '<' -f 1 | sort -u
parser = '''
jq -r '.[].title' source/hiveleak-hiveapi*.html || true
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('hiveleak: ' + 'parsing fail')
for post in posts:
appender(post, 'hiveleak')
def avaddon():
stdlog('parser: ' + 'avaddon')
parser = '''
grep 'h6' source/avaddon-*.html --no-filename | cut -d ">" -f3 | sed -e s/'<\/a'//
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('avaddon: ' + 'parsing fail')
for post in posts:
appender(post, 'avaddon')
def xinglocker():
stdlog('parser: ' + 'xinglocker')
parser = '''
grep "h3" -A1 source/xinglocker-*.html --no-filename | grep -v h3 | awk -v n=4 'NR%n==1' | sed -e 's/^[ \t]*//' -e 's/^ *//g' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('xinglocker: ' + 'parsing fail')
for post in posts:
appender(post, 'xinglocker')
def ragnarlocker():
stdlog('parser: ' + 'ragnarlocker')
json_parser = '''
grep 'var post_links' source/ragnarlocker-*.html --no-filename | sed -e s/" var post_links = "// -e "s/ ;//"
'''
posts = runshellcmd(json_parser)
post_json = json.loads(posts[0])
with open('source/ragnarlocker.json', 'w', encoding='utf-8') as f:
json.dump(post_json, f, indent=4)
f.close()
if len(post_json) == 1:
errlog('ragnarlocker: ' + 'parsing fail')
for post in post_json:
try:
appender(post['title'], 'ragnarlocker')
except TypeError:
errlog('ragnarlocker: ' + 'parsing fail')
def clop():
stdlog('parser: ' + 'clop')
parser = '''
grep 'PUBLISHED' source/clop-*.html --no-filename | sed -e s/"<strong>"// -e s/"<\/strong>"// -e s/"<\/p>"// -e s/"<p>"// -e s/"<br>"// -e s/"<strong>"// -e s/"<\/strong>"// -e 's/^ *//g' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('clop: ' + 'parsing fail')
for post in posts:
appender(post, 'clop')
def revil():
stdlog('parser: ' + 'revil')
# grep 'href="/posts' source/revil-*.html --no-filename | cut -d '>' -f2 | sed -e s/'<\/a'// -e 's/^[ \t]*//'
parser = '''
grep 'justify-content-between' source/revil-*.html --no-filename | cut -d '>' -f 3 | cut -d '<' -f 1 | sed -e 's/^ *//g' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('revil: ' + 'parsing fail')
for post in posts:
appender(post, 'revil')
def conti():
stdlog('parser: ' + 'conti')
# grep 'class="title">&' source/conti-*.html --no-filename | cut -d ";" -f2 | sed -e s/"&rdquo"//
parser = '''
grep 'newsList' source/conti-continewsnv5ot*.html --no-filename | sed -e 's/ newsList(//g' -e 's/);//g' | jq '.[].title' -r || true
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('conti: ' + 'parsing fail')
for post in posts:
appender(post, 'conti')
def pysa():
stdlog('parser: ' + 'pysa')
parser = '''
grep 'icon-chevron-right' source/pysa-*.html --no-filename | cut -d '>' -f3 | sed 's/^ *//g'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('pysa: ' + 'parsing fail')
for post in posts:
appender(post, 'pysa')
def nefilim():
stdlog('parser: ' + 'nefilim')
parser = '''
grep 'h2' source/nefilim-*.html --no-filename | cut -d '>' -f3 | sed -e s/'<\/a'//
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('nefilim: ' + 'parsing fail')
for post in posts:
appender(post, 'nefilim')
def mountlocker():
stdlog('parser: ' + 'mountlocker')
parser = '''
grep '<h3><a href=' source/mount-locker-*.html --no-filename | cut -d '>' -f5 | sed -e s/'<\/a'// -e 's/^ *//g' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('mountlocker: ' + 'parsing fail')
for post in posts:
appender(post, 'mountlocker')
def babuklocker():
stdlog('parser: ' + 'babuklocker')
parser = '''
grep '<h5>' source/babuk-locker-*.html --no-filename | sed 's/^ *//g' | cut -d '>' -f2 | cut -d '<' -f1 | grep -wv 'Hospitals\|Non-Profit\|Schools\|Small Business' | sed '/^[[:space:]]*$/d'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('babuklocker: ' + 'parsing fail')
for post in posts:
appender(post, 'babuklocker')
def ransomexx():
stdlog('parser: ' + 'ransomexx')
parser = '''
grep 'card-title' source/ransomexx-*.html --no-filename | cut -d '>' -f2 | sed -e s/'<\/h5'// -e 's/^ *//g' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('ransomexx: ' + 'parsing fail')
for post in posts:
appender(post, 'ransomexx')
def cuba():
stdlog('parser: ' + 'cuba')
# grep '<p>' source/cuba-*.html --no-filename | cut -d '>' -f3 | cut -d '<' -f1
parser = '''
grep '<a href="http://' source/cuba-cuba4i* | cut -d '/' -f 4 | sort -u
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('cuba: ' + 'parsing fail')
for post in posts:
appender(post, 'cuba')
def pay2key():
stdlog('parser: ' + 'pay2key')
parser = '''
grep 'h3><a href' source/pay2key-*.html --no-filename | cut -d '>' -f3 | sed -e s/'<\/a'//
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('pay2key: ' + 'parsing fail')
for post in posts:
appender(post, 'pay2key')
def azroteam():
stdlog('parser: ' + 'azroteam')
parser = '''
grep "h3" -A1 source/aztroteam-*.html --no-filename | grep -v h3 | awk -v n=4 'NR%n==1' | sed -e 's/^[ \t]*//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('azroteam: ' + 'parsing fail')
for post in posts:
appender(post, 'azroteam')
def lockdata():
stdlog('parser: ' + 'lockdata')
parser = '''
grep '<a href="/view.php?' source/lockdata-*.html --no-filename | cut -d '>' -f2 | cut -d '<' -f1
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('lockdata: ' + 'parsing fail')
for post in posts:
appender(post, 'lockdata')
def blacktor():
stdlog('parser: ' + 'blacktor')
# sed -n '/tr/{n;p;}' source/bl@cktor-*.html | grep 'td' | cut -d '>' -f2 | cut -d '<' -f1
parser = '''
grep '>Details</a></td>' source/blacktor-*.html --no-filename | cut -f2 -d '"' | cut -f 2- -d- | cut -f 1 -d .
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('blacktor: ' + 'parsing fail')
for post in posts:
appender(post, 'blacktor')
def darkleakmarket():
stdlog('parser: ' + 'darkleakmarket')
parser = '''
grep 'page.php' source/darkleakmarket-*.html --no-filename | sed -e 's/^[ \t]*//' | cut -d '>' -f3 | sed '/^</d' | cut -d '<' -f1 | sed -e 's/^ *//g' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('darkleakmarket: ' + 'parsing fail')
for post in posts:
appender(post, 'darkleakmarket')
def blackmatter():
stdlog('parser: ' + 'blackmatter')
parser = '''
grep '<h4 class="post-announce-name" title="' source/blackmatter-*.html --no-filename | cut -d '"' -f4 | sort -u
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('blackmatter: ' + 'parsing fail')
for post in posts:
appender(post, 'blackmatter')
def payloadbin():
stdlog('parser: ' + 'payloadbin')
parser = '''
grep '<h4 class="h4' source/payloadbin-*.html --no-filename | cut -d '>' -f3 | cut -d '<' -f 1
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('payloadbin: ' + 'parsing fail')
for post in posts:
appender(post, 'payloadbin')
def groove():
stdlog('parser: ' + 'groove')
parser = '''
egrep -o 'class="title">([[:alnum:]]| |\.)+</a>' source/groove-*.html | cut -d '>' -f2 | cut -d '<' -f 1
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('groove: ' + 'parsing fail')
for post in posts:
appender(post, 'groove')
def bonacigroup():
stdlog('parser: ' + 'bonacigroup')
parser = '''
grep 'h5' source/bonacigroup-*.html --no-filename | cut -d '>' -f 3 | cut -d '<' -f 1
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('bonacigroup: ' + 'parsing fail')
for post in posts:
appender(post, 'bonacigroup')
def karma():
stdlog('parser: ' + 'karma')
parser = '''
grep "h2" source/karma-*.html --no-filename | cut -d '>' -f 3 | cut -d '<' -f 1 | sed '/^$/d'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('karma: ' + 'parsing fail')
for post in posts:
appender(post, 'karma')
def blackbyte():
stdlog('parser: ' + 'blackbyte')
# grep "h1" source/blackbyte-*.html --no-filename | cut -d '>' -f 2 | cut -d '<' -f 1 | sed -e 's/^ *//g' -e '/^$/d' -e 's/[[:space:]]*$//'
# grep "display-4" source/blackbyte-*.html --no-filename | cut -d '>' -f 2 | cut -d '<' -f 1 | sed -e 's/^[ \t]*//' -e 's/^ *//g' -e 's/[[:space:]]*$//'
parser = '''
grep '<h1 class="h_font"' source/blackbyte-*.html | cut -d '>' -f 2 | cut -d '<' -f 1
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('blackbyte: ' + 'parsing fail')
for post in posts:
appender(post, 'blackbyte')
def spook():
stdlog('parser: ' + 'spook')
parser = '''
grep 'h2 class' source/spook-*.html --no-filename | cut -d '>' -f 3 | cut -d '<' -f 1 | sed -e 's/^ *//g' -e '/^$/d'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('spook: ' + 'parsing fail')
for post in posts:
appender(post, 'spook')
def quantum():
stdlog('parser: ' + 'quantum')
parser = '''
awk '/h2/{getline; print}' source/quantum-*.html | sed -e 's/^ *//g' -e '/<\/a>/d'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('quantum: ' + 'parsing fail')
for post in posts:
appender(post, 'quantum')
def atomsilo():
stdlog('parser: ' + 'atomsilo')
parser = '''
cat source/atomsilo-*.html | grep "h4" | cut -d '>' -f 3 | cut -d '<' -f 1 | sed -e 's/^ *//g' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('atomsilo: ' + 'parsing fail')
for post in posts:
appender(post, 'atomsilo')
def lv():
stdlog('parser: ' + 'lv')
parser = '''
%s "blog-post-title.*?</a>" source/lv-rbvuetun*.html | cut -d '>' -f 3 | cut -d '<' -f 1
''' % (fancygrep)
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('lv: ' + 'parsing fail')
for post in posts:
appender(post, 'lv')
def five4bb47h():
stdlog('parser: ' + 'sabbath')
parser = '''
%s "aria-label.*?>" source/sabbath-*.html | cut -d '"' -f 2 | sed -e '/Search button/d' -e '/Off Canvas Menu/d' -e '/Close drawer/d' -e '/Close search modal/d' -e '/Header Menu/d' | tr "..." ' ' | grep "\S" | cat
''' % (fancygrep)
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('sabbath: ' + 'parsing fail')
for post in posts:
appender(post, 'sabbath')
def midas():
stdlog('parser: ' + 'midas')
parser = '''
grep "/h3" source/midas-*.html --no-filename | sed -e 's/<\/h3>//' -e 's/^ *//g' -e '/^$/d' -e 's/^ *//g' -e 's/[[:space:]]*$//' -e '/^$/d'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('midas: ' + 'parsing fail')
for post in posts:
appender(post, 'midas')
def snatch():
stdlog('parser: ' + 'snatch')
parser = '''
%s "a-b-n-name.*?</div>" source/snatch-*.html | cut -d '>' -f 2 | cut -d '<' -f 1
''' % (fancygrep)
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('snatch: ' + 'parsing fail')
for post in posts:
appender(post, 'snatch')
def marketo():
stdlog('parser: ' + 'marketo')
parser = '''
cat source/marketo-*.html | grep '<a href="/lot' | sed -e 's/^ *//g' -e '/Show more/d' -e 's/<strong>//g' | cut -d '>' -f 2 | cut -d '<' -f 1 | sed -e '/^$/d'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('marketo: ' + 'parsing fail')
for post in posts:
appender(post, 'marketo')
def rook():
stdlog('parser: ' + 'rook')
parser = '''
grep 'class="post-title"' source/rook-*.html | cut -d '>' -f 2 | cut -d '<' -f 1 | sed '/^"/d'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('rook: ' + 'parsing fail')
for post in posts:
appender(post, 'rook')
def cryp70n1c0d3():
stdlog('parser: ' + 'cryp70n1c0d3')
parser = '''
grep '<td class="selection"' source/cryp70n1c0d3-*.html | cut -d '>' -f 2 | cut -d '<' -f 1 | sed -e 's/^ *//g' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('cryp70n1c0d3: ' + 'parsing fail')
for post in posts:
appender(post, 'cryp70n1c0d3')
def mosesstaff():
stdlog('parser: ' + 'mosesstaff')
parser = '''
grep '<h2 class="entry-title">' source/moses-moses-staff.html -A 3 --no-filename | grep '</a>' | sed 's/^ *//g' | cut -d '<' -f 1 | sed 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('mosesstaff: ' + 'parsing fail')
for post in posts:
appender(post, 'mosesstaff')
def alphv():
stdlog('parser: ' + 'alphv')
# egrep -o 'class="mat-h2">([[:alnum:]]| |\.)+</h2>' source/alphv-*.html | cut -d '>' -f 2 | cut -d '<' -f 1
# grep -o 'class="mat-h2">[^<>]*<\/h2>' source/alphv-*.html | cut -d '>' -f 2 | cut -d '<' -f 1 | sed -e 's/^ *//g' -e 's/[[:space:]]*$//' -e '/No articles here yet, check back later./d'
parser = '''
jq -r '.items[].title' source/alphv-alphvmmm27*.html | sed -e 's/^ *//g' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('alphv: ' + 'parsing fail')
for post in posts:
appender(post, 'alphv')
def nightsky():
stdlog('parser: ' + 'nightsky')
parser = '''
grep 'class="mdui-card-primary-title"' source/nightsky-*.html --no-filename | cut -d '>' -f 3 | cut -d '<' -f 1
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('nightsky: ' + 'parsing fail')
for post in posts:
appender(post, 'nightsky')
def vicesociety():
stdlog('parser: ' + 'vicesociety')
parser = '''
grep '<tr><td valign="top"><br><font size="4" color="#FFFFFF"><b>' source/vicesociety-*.html --no-filename | cut -d '>' -f 6 | cut -d '<' -f 1 | sed -e '/ato District Health Boa/d' -e 's/^ *//g' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('vicesociety: ' + 'parsing fail')
for post in posts:
appender(post, 'vicesociety')
def pandora():
stdlog('parser: ' + 'pandora')
parser = '''
grep '<span class="post-title gt-c-content-color-first">' source/pandora-*.html | cut -d '>' -f 2 | cut -d '<' -f 1
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('pandora: ' + 'parsing fail')
for post in posts:
appender(post, 'pandora')
def stormous():
stdlog('parser: ' + 'stormous')
# grep '<p> <h3> <font color="' source/stormous-*.html | grep '</h3>' | cut -d '>' -f 4 | cut -d '<' -f 1 | sed -e 's/^ *//g' -e 's/[[:space:]]*$//'
# grep '<h3>' source/stormous-*.html | sed -e 's/^ *//g' -e 's/[[:space:]]*$//' | grep "^<h3> <font" | cut -d '>' -f 3 | cut -d '<' -f 1 | sed 's/[[:space:]]*$//'
parser = '''
awk '/<h3>/{getline; print}' source/stormous-*.html | sed -e 's/^ *//g' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('stormous: ' + 'parsing fail')
for post in posts:
appender(post, 'stormous')
def leaktheanalyst():
stdlog('parser: ' + 'leaktheanalyst')
parser = '''
grep '<label class="news-headers">' source/leaktheanalyst-*.html | cut -d '>' -f 2 | cut -d '<' -f 1 | sed -e 's/Section //' -e 's/#//' -e 's/^ *//g' -e 's/[[:space:]]*$//' | sort -n | uniq
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('leaktheanalyst: ' + 'parsing fail')
for post in posts:
appender(post, 'leaktheanalyst')
def kelvinsecurity():
stdlog('parser: ' + 'kelvinsecurity')
parser = '''
egrep -o '<span style="font-size:20px;">([[:alnum:]]| |\.)+</span>' source/kelvinsecurity-*.html | cut -d '>' -f 2 | cut -d '<' -f 1
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('kelvinsecurity: ' + 'parsing fail')
for post in posts:
appender(post, 'kelvinsecurity')
def blackbasta():
stdlog('parser: ' + 'blackbasta')
parser = '''
egrep -o 'fqd.onion/\?id=([[:alnum:]]| |\.)+"' source/blackbasta-*.html | cut -d = -f 2 | cut -d '"' -f 1 | sed -e 's/^ *//g' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('blackbasta: ' + 'parsing fail')
for post in posts:
appender(post, 'blackbasta')
def onyx():
stdlog('parser: ' + 'onyx')
parser = '''
grep '<h6 class=' source/onyx-*.html | cut -d '>' -f 2 | cut -d '<' -f 1 | sed -e '/Connect with us/d' -e 's/^ *//g' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('onyx: ' + 'parsing fail')
for post in posts:
appender(post, 'onyx')
def mindware():
stdlog('parser: ' + 'mindware')
parser = '''
grep '<div class="card-header">' source/mindware-*.html | cut -d '>' -f 2 | cut -d '<' -f 1
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('mindware: ' + 'parsing fail')
for post in posts:
appender(post, 'mindware')
def ransomhouse():
stdlog('parser: ' + 'ransomhouse')
parser = '''
egrep -o 'class="cls_recordTop"><p>([[:alnum:]]| |\.)+</p>' source/ransomhhouse-*.html | cut -d '>' -f 3 | cut -d '<' -f 1
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('ransomhouse: ' + 'parsing fail')
for post in posts:
appender(post, 'ransomhouse')
def cheers():
stdlog('parser: ' + 'cheers')
parser = '''
cat source/cheers-*.html | grep '<a href="' | grep -v title | cut -d '>' -f 2 | cut -d '<' -f 1 | sed -e '/Cheers/d' -e '/Home/d' -e 's/^ *//g' -e 's/[[:space:]]*$//'
'''
posts = runshellcmd(parser)
if len(posts) == 1:
errlog('cheers: ' + 'parsing fail')
for post in posts:
appender(post, 'cheers')
``` |
{
"source": "jmoussa/covid-data-be",
"score": 3
} |
#### File: endpoints/data_acquisition/post.py
```python
from api.endpoints.data_acquisitions.data_sources.covid import CovidAggregator
from fastapi.responses import JSONResponse
from loguru import logger
# pylint: disable=E0611
from pydantic import BaseModel
# pylint: enable=E0611
DOC = {
200: {
"description": "API response successfully",
"content": {
"application/json": {"example": {"data_source": "covid", "limit": 0}}
},
}
}
class Payload(BaseModel):
data_source: str
limit: int
def post(payload: Payload):
"""
POST /api/v1/data_acquisition/
The entry point for data acquisition.
Send a query in JSON format.
The query, will be parsed and the proper data scource will be queried then data will be returned in JSON format.
"""
logger.info(f"{payload}")
payload.data_source = payload.data_source.lower()
if payload.data_source == "covid":
aggregator = CovidAggregator()
data = aggregator.get_data(payload.limit)
else:
return JSONResponse({"error": "Data source not found"}, status_code=404)
return JSONResponse(data, status_code=200)
```
#### File: api/tests/__init__.py
```python
from dataclasses import dataclass
from typing import Any, Union
from app import APP
from fastapi.testclient import TestClient
CLIENT = TestClient(APP)
@dataclass
class AssertRequest:
"""
API request assertion dataclass
headers (Union[dict, None]): The expected headers of request
payload (Union[dict, None]): The expceted payload(json or GET parameters) of request
"""
headers: Union[dict, None]
payload: Union[dict, None]
@dataclass
class AssertResponse:
"""
API response assertion dataclass
body (Any): The expected body of response
status_code (int): The expected status code of response
"""
body: Any = "OK"
status_code: int = 200
def assert_request(
method: str, route: str, request: AssertRequest, response: AssertResponse
):
if method.upper() == "GET":
resp = CLIENT.request(
method,
f"{route}",
headers=request.headers,
params=request.payload,
)
else:
resp = CLIENT.request(
method,
f"{route}",
headers=request.headers,
json=request.payload,
)
try:
assert (
resp.json() == response.body
), f"{resp.json} does not match {response.body}"
except Exception:
assert resp.text == response.body, f"{resp.body} does not match {response.body}"
assert (
resp.status_code == response.status_code
), f"{resp.status_code} does not match {response.status_code}"
``` |
{
"source": "jmoutte/pipump",
"score": 2
} |
#### File: jmoutte/pipump/main.py
```python
from config import Config
import signal
import sys
import os
import logging
import asyncio
logging.basicConfig(
level=logging.DEBUG,
format="%(asctime)s [%(levelname)s] %(message)s",
handlers=[
logging.FileHandler(os.path.join(os.path.abspath(os.path.dirname(__file__)), 'debug.log'))
]
)
mode = 'AUTO'
auto_task = None
emulate_pi = False
try:
import RPi.GPIO as GPIO
GPIO.setmode(GPIO.BOARD)
except ImportError:
emulate_pi = True
def signal_handler(sig, frame):
logging.info('Exiting cleanly')
loop.stop()
if not emulate_pi:
GPIO.cleanup()
sys.exit(0)
signal.signal(signal.SIGINT, signal_handler)
def on_mode_changed(new_mode):
global auto_task,mode
if mode == new_mode:
return mode
logging.info(f'changing operation mode from {mode} to {new_mode}')
if mode == 'AUTO':
if auto_task is not None:
auto_task.cancel()
if new_mode == 'AUTO':
auto_task = loop.create_task(auto_loop())
else:
for p in pumps:
p.turn_off()
mode = new_mode
return mode
def on_switch_command(pump, command):
if mode != 'MANUAL':
logging.debug('ignoring switch command in non MANUAL modes')
return
else:
if command == 'ON':
pump.turn_on()
elif command == 'OFF':
pump.turn_off()
else:
logging.warning(f'Invalid command {command} for pump {pump.name}')
async def auto_loop():
try:
while True:
for p in pumps:
p.update() # Potentially stops a pump that reached desired runtime, update counters
availability = device.update()
if availability <= 0:
if aux_pump.is_running():
aux_pump.turn_off()
del device.consumption
elif main_pump.is_running():
main_pump.turn_off()
del device.consumption
else:
for p in pumps:
start = False
if p.should_run():
start, availability = p.can_run(availability)
if start and not p.is_running():
p.turn_on()
del device.consumption
await asyncio.sleep(60)
except asyncio.CancelledError:
logging.debug('auto_loop task cancelled')
raise
config = Config('config.yaml')
pumps = config.load_pumps()
device = config.load_pvsystem()
mqtt_client = config.load_mqttclient()
mqtt_client.attach(pumps, on_mode_changed, on_switch_command)
# FIXME: need to find a better way to implement that logic
main_pump = None
aux_pump = None
for p in pumps:
if p.is_chained():
aux_pump = p
else:
main_pump = p
if __name__ == '__main__':
loop = asyncio.get_event_loop()
if mode == 'AUTO':
auto_task = loop.create_task(auto_loop())
mqtt_task = loop.create_task(mqtt_client.task())
loop.run_forever()
loop.close()
``` |
{
"source": "jmox0351/chemEPy",
"score": 3
} |
#### File: chemEPy/chemEPy/equations.py
```python
import pandas as pd
import math
import pkg_resources
antDf = pd.read_csv(pkg_resources.resource_filename(__name__, 'data/antoine.csv'))
def antoine(**kwargs):
name = kwargs['name']
info = []
try:
kwargs['P'] #if unknown is P
except:
for index, row in antDf.iterrows():
if(name == row[0]): #We have multiple sources here so we are going to need to try multiple rows for the same name
if(kwargs['T'] >= row[4] and kwargs['T'] <= row[5]):
info = row
break
if(len(info) == 0): #if no exceptable options are found
return('Temperature is outside of acceptable range or name not found see antoineNames for names and ranges')
return(10**(info[1]-info[2]/(info[3]+kwargs['T']))) #else return pressure in mmHg
try:
kwargs['T']
except:
for index, row in antDf.iterrows(): #we are going with the first valid row for now
if(name == row[0]):
info = row
break
if(len(info) == 0):
return('name not found see antoineNames for names and temperature ranges')
else:
return(-1*info[2]/(math.log10(kwargs['P'])-info[1]) - info[3])
def antoineNames():
for index, row in antDf.iterrows():
print('Name:', row[0], ' Min temp (C):', row[4], ' Max temp (C):', row[5])
def antoineUnits():
print('P is in mmHg and T is in C')
```
#### File: chemEPy/chemEPy/fluidNumbers.py
```python
def biot(**kwargs):
return(kwargs['h']*kwargs['k']/kwargs['L'])
def biotInfo():
print('arguments are h, k, and L')
def graetz(**kwargs):
try:
kwargs['rho']
return(kwargs['D']**2 * kwargs['rho'] * kwargs['u'] * kwargs['cp'] / (kwargs['L']*kwargs['k']))
except:
pass
try:
kwargs['nu']
return(kwargs['D']**2 * kwargs['mu'] * kwargs['u'] * kwargs['cp'] / (kwargs['L']*kwargs['k']*kwargs['nu']))
except:
pass
return(kwargs['D']*kwargs['Re']*kwargs['Pr']/kwargs['L'])
def graetzInfo():
print('arguments are D, rho, u, cp, L, k OR D, mu, u, cp, L, k, nu OR D, Re, Pr, L')
def grashoff(**kwargs):
if (kwargs['idealGas']==True): #if an ideal gas then use beta = 1/T_avg approximation
return(kwargs['g']*(1/((kwargs['Ts']+kwargs['Tinf'])/2)*(kwargs['Ts']-kwargs['Tinf'])*kwargs['L']**3/kwargs['nu']**2))
else:
return(kwargs['g']*kwargs['beta']*(kwargs['Ts']-kwargs['Tinf'])*kwargs['L']**3/kwargs['nu']**2)
def grashoffInfo():
print('arguments are g, Ts, Tinf, L, nu, idealGas=True OR g, beta, Ts, Tinf, L, nu, idealGas=False')
def nusselt(**kwargs):
return(kwargs['h']*kwargs['k']/kwargs['L'])
def nusseltInfo():
print('arguments are h, k, L')
def peclet(**kwargs):
try:
kwargs['Re']
return(kwargs['Re']*kwargs['Pr'])
except:
pass
try:
kwargs['alpha']
return(kwargs['L']*kwargs['u']/kwargs['alpha'])
except:
pass
return(kwargs['L']*kwargs['u']*kwargs['rho']*kwargs['cp']/kwargs['k'])
def pecletInfo():
print('arguments are Re, Pr OR L, u, alpha, OR L, u, rho, cp, k')
def rayleigh(**kwargs):
try:
kwargs['Gr']
return(kwargs['Gr']*kwargs['Pr'])
except:
return(kwargs['g']*kwargs['beta']*(kwargs['Ts']-kwargs['Tinf'])*kwargs['L']**3/(kwargs['nu']*kwargs['alpha']))
def rayleighInfo():
print('arguments are Gr, Pr OR g, beta, Ts, Tinf, L, nu, alpha')
def prandtl(**kwargs):
try:
kwargs['alpha']
return(kwargs['nu']/kwargs['alpha'])
except:
return(kwargs['cp']*kwargs['mu']/kwargs['k'])
def prandtlInfo():
print('arguments are nu, alpha OR cp, mu, k')
def reynolds(**kwargs):
try:
kwargs['rho']
return(kwargs['rho']*kwargs['u']*kwargs['L']/kwargs['mu'])
except:
return(kwargs['u']*kwargs['L']/kwargs['nu'])
def reynoldsInfo():
print('arguments are rho, u, L, mu OR u, L, nu')
def thermalDiffusivity(**kwargs):
return(kwargs['k']/(kwargs['rho']*kwargs['cp']))
def thermalDiffusivityInfo():
print('arguments are k, rho, cp')
def archimedes(**kwargs):
return(kwargs['g']*kwargs['L']**3*kwargs['rhoL']*(kwargs['rhoB']-kwargs['rhoL'])/kwargs['mu']**2)
def archimedesInfo():
print('arguments are g, L, rhoL, rhoB, mu')
```
#### File: chemEPy/chemEPy/insulation.py
```python
import math
import numpy as np
import scipy as sp
from scipy import optimize
from .fluidNumbers import reynolds, prandtl, graetz
from .nusseltCor import nu as nu
def areaLM(r1, r2, l1, l2):
a1 = math.pi*r1**2*l1
a2 = math.pi*r2**2*l2
return((a2-a1)/math.log(a2/a1))
def cylFunc(r3, rho, u, mu, Pr, kOuter, r1, r2, L, kInner, tInner, tOuter, q, hInner):
return((2/(0.3 + (0.0004202729100991299*Pr^(1/3)*math.sqrt((rho *r3* u)/mu) (14100 + 10^(1/8)*141^(3/8)*\
((rho *r3* u)/mu)^(5/8))^(4/5))/(1 + 0.5428835233189814*(1/Pr)^(2/3))^(1/4)) + math.log(r3/r2))/(2*kOuter*L/math.pi)+\
1/(2*r1*L*math.pi*hInner) + (r2-r1)/(kInner*areaLM(r1,r2,L,L)) - (tInner-tOuter)/q)
def singlePipe(**kwargs):
try:
ub = kwargs['ub']
except:
ub = 1
need = ['rhoInner', 'rhoOuter', 'uInner', 'uOuter', 'muInner', 'muOuter', 'PrInner', 'PrOuter', 'kInner', 'kOuter',\
'r1', 'r2', 'L', 'tInner', 'tOuter', 'q']
allNeeds = True
for check in need: #checks all the required args are included if not print and then return
if(check in kwargs):
continue
else:
print('you are missing argument:', check)
allNeeds = False
if(not allNeeds):
return
rhoInner = kwargs['rhoInner']
rhoOuter = kwargs['rhoOuter']
uInner = kwargs['uInner']
uOuter = kwargs['uOuter']
muInner = kwargs['muInner']
muOuter = kwargs['muOuter']
PrInner = kwargs['PrInner']
PrOuter = kwargs['PrOuter']
r1 = kwargs['r1']
r2 = kwargs['r2']
L = kwargs['L']
kInner = kwargs['kInner']
kOuter = kwargs['kOuter']
tInner = kwargs['tInner']
tOuter = kwargs['tOuter']
q = kwargs['q']
ReInner = chemEPy.fluidNumbers.reynolds(rho = rhoInner, u = uInner, L = 2*r1, mu = muInner)
Gz = chemEPy.fluidNumbers.graetz(D = 2*r1, Re = ReInner, Pr = PrInner, L = L)
a1 = 2*math.pi*L*r1
if(tInner > tOuter):
heat = True
else:
heat = False
hInner = nu(forced = True, shape = tube, uniform = Ts, Re = Re, Gz = Gz, heating = heat)*kInner/(2*r1)
rGuess = np.linspace(0,ub,101)
stepSize = rGuess[1] - rGuess[0]
#now we run the cylFunc with each rGuess, look for the sign change and then use the midpoint as an initial guess
prev = cylFunc(0, rhoOuter, uOuter, muOuter, PrOuter, kOuter, r1, r2, L, kInner, tInner, tOuter, q, hInner)
rUpper, rLower = 1, 1
for i in rGuess:
nex = cylFunc(0, rhoOuter, uOuter, muOuter, PrOuter, kOuter, r1, r2, L, kInner, tInner, tOuter, q, hInner)
if(prev * nex <= 0):
rUpper = i # we have found an good initial guess
rLower = i - stepSize
break
else:
prev = nex
if(i == ub): #if we get to the upper bound and still have not found a sign change
print('upper bound is not large enough')
return
sol = sp.optimize.root_scalar(cylFunc, args = (rhoOuter, uOuter, muOuter, PrOuter, kOuter, r1, r2, L, kInner,\
tInner, tOuter, q, hInner), bracket = [rLower, rUpper], method = 'toms748')
if(not sol.flag): #if this did not converge
print('bracket did not converge')
else:
return(sol.root)
def singlePipeInfo():
print('need the following args. Will return r3 the radius to the outer edge of the insulation \n\
rhoInner, rhoOuter, uInner, uOuter, muInner, muOuter, PrInner, PrOuter, kInner, kOuter, r1, r2, L, tInner, tOuter, q')
``` |
{
"source": "jmp1617/launchpad_gol",
"score": 3
} |
#### File: jmp1617/launchpad_gol/launchpad_gol.py
```python
import launchpad_py as launchpad
from pygame import time
L_SIZE = 8
color = (0, 64, 64)
def display_grid(grid, lp):
"""
display the grid on the launchpad
:param grid: the grid to display
:param lp: the launchpad object
:return: void
"""
for y in range(0, L_SIZE):
for x in range(0, L_SIZE):
if grid[y][x] > 0:
lp.LedCtrlXY(x, y+1, color[0], color[1], color[2])
else:
lp.LedCtrlXY(x, y+1, 0, 0, 0)
def check_type(lp):
"""
check the type of the launchpad
:param lp: launchpad object
:return: name of launchpad type
"""
if lp.Check(0, "pro"):
print("Launchpad Pro")
return "Pro"
elif lp.Check(0, "mk2"):
print("Launchpad Mk2")
return "Mk2"
else:
print("Launchpad Mk1/S/Mini")
return "Mk1"
def init_lp():
"""
initialize the launchpad
:return: launchpad object
"""
# create launchpad instance
lp = launchpad.Launchpad()
mode = check_type(lp)
success = False
# check for the correct launchpad
if mode == "Pro":
lp = launchpad.LaunchpadPro()
if lp.Open(0, "pro"):
success = True
elif mode == "Mk2":
lp = launchpad.LaunchpadMk2()
if lp.Open(0, "mk2"):
success = True
else:
if lp.Open():
success = True
lp.LedCtrlXY(0, 0, 10, 30, 64) # speed play and pause
lp.LedCtrlXY(1, 0, 10, 30, 64)
lp.LedCtrlXY(2, 0, 64, 30, 0)
lp.LedCtrlXY(3, 0, 0, 64, 0)
lp.LedCtrlXY(7, 0, 64, 0, 0)
if not success:
exit(1)
else:
return lp
def check_neighbors(x, y, grid):
"""
check the count of neighbors
:param x: row of live cell
:param y: col of live cell
:param grid: grid at the time
:return: number of neighbors
"""
neighbors = 0
c_y = [-1, -1, -1, 1, 1, 1, 0, 0]
c_x = [-1, 1, 0, -1, 1, 0, -1, 1]
for n in range(0, 8):
if grid[(c_y[n]+y) % 8][(c_x[n]+x) % 8] > 0:
neighbors += 1
return neighbors
def life_cycle(grid):
"""
function to to perform a life cycle
:param grid: life grid
:return: new grid
"""
newg = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
for y in range(0, 8):
for x in range(0, 8):
n = check_neighbors(x, y, grid)
if grid[y][x] > 0: # if its alive
if n == 2 or n == 3:
newg[y][x] = 1
else: # or if it is dead
if n == 3:
newg[y][x] = 1
return newg
def main():
cont = 1
lp = init_lp()
grid = [[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0]]
play = True
speed = 100
while cont:
# lp.LedAllOn(0)
but = lp.ButtonStateXY()
if but: # check for play pause and speed control
if but[0] == 3 and but[1] == 0 and but[2] == 127:
play = True
elif but[0] == 2 and but[1] == 0 and but[2] == 127:
play = False
elif but[0] == 1 and but[1] == 0 and but[2] == 127:
if speed != 20:
speed -= 20
elif but[0] == 0 and but[1] == 0 and but[2] == 127:
if speed != 1000:
speed += 20
elif but[0] == 7 and but[1] == 0 and but[2] == 127:
cont = False
else:
if not play: # if its paused allow editing via grid buttons
if grid[but[1]-1][but[0]] == 0 and but[2] == 127:
grid[but[1]-1][but[0]] = 1
lp.LedCtrlXY(but[0], but[1], color[0], color[1], color[2])
elif grid[but[1]-1][but[0]] == 1 and but[2] == 127:
grid[but[1]-1][but[0]] = 0
lp.LedCtrlXY(but[0], but[1], 0, 0, 0)
if play:
grid = life_cycle(grid)
display_grid(grid, lp)
time.wait(speed)
time.wait(speed)
lp.LedAllOn(0)
lp.ButtonFlush()
if __name__ == '__main__':
main()
``` |
{
"source": "jmp1617/L.L.U.V.",
"score": 3
} |
#### File: L.L.U.V./lluv/lluv_classes.py
```python
class UsbStorageDevice:
"""
object to hold define a usb device
"""
def __init__(self, name: str, size: int, path: str, key: int):
self._name = name
self._size = size
self._path = path
self._key = key
def __str__(self) -> str:
return "USB DEVICE[ name:\'"+self._name+"\' size:"+str(self._size)+" path:"+self._path+" ]"
def __repr__(self) -> str:
return self.__str__()
def get_name(self) -> str:
return self._name
def get_size(self) -> int:
return self._size
def get_path(self) -> str:
return self._path
def get_key(self) -> int:
return self._key
class Image:
"""
object to define an image
"""
def __init__(self, name: str, size: str, rsize: str, cat: str):
self._name = name
self._size = size
self._rsize = rsize
self._cat = cat
def __str__(self) -> str:
return "IMAGE[ name:\'"+self._name+"\' size:"+str(self._size) + \
" recommended size:"+self._rsize+" category: "+self._cat+"]"
def __repr__(self) -> str:
return self.__str__()
def get_name(self) -> str:
return self._name
def get_size(self) -> str:
return self._size
def get_rsize(self) -> str:
return self._rsize
def get_cat(self) -> str:
return self._cat
class Category:
"""
object do define a category
contains name and list of images in category
"""
def __init__(self, name: str, images: dict):
self._name = name
self._images = images
def __str__(self) -> str:
return "CATEGORY [ name:"+self._name+" images: "+str(self._images)+" ]"
def __repr__(self) -> str:
return self.__str__()
def get_name(self) -> str:
return self._name
def get_images(self) -> dict:
return self._images
```
#### File: L.L.U.V./lluv/lluv_cli.py
```python
import lluv.lluv as lluv
def display_curr_choices(selected_usb: str, selected_iso: str, p_usb_devices: dict, images: dict):
"""
print selected
:param selected_usb:
:param selected_iso:
:param p_usb_devices:
:param images:
:return:
"""
if selected_usb is not "":
print("\nSelected USB Device:", p_usb_devices[selected_usb].get_name())
else:
print("\nSelected USB Device: Not Yet Selected")
if selected_iso is not "":
print("Selected Image:", images[selected_iso].get_name(), "\n")
else:
print("Selected Image: Not Yet Selected\n")
def start():
"""
run the CLI
"""
"""
main lluv routine
:return: None
"""
selected_usb = ""
selected_iso = ""
selected_block_size = "512K"
lluv.check_config()
print("Type start to begin (anything else to exit)\n")
begin = input("lluv -> ")
if begin == 'start':
print("\nStarting... ")
iso_dir_path = lluv.get_path()
p_usb_devices = lluv.fetch_usb()
categories = lluv.fetch_images(iso_dir_path)
images = lluv.generate_image_master(categories)
print("Done")
done_step_one = False
done_step_two = False
done_step_three = False
not_finished = True
while not_finished:
while not done_step_one: # Step One
done_step_one = True
done_step_two = False
done_step_three = False
display_curr_choices(selected_usb, selected_iso, p_usb_devices, images)
print("STEP ONE - Select a USB storage Device:")
key_num = 0
for key, device in p_usb_devices.items():
print("\t", key, ") ", device.get_name(), "-", str(int(device.get_size()) / 1000000) + "MB")
key_num = key
print("\t", key_num + 1, ") Refresh Storage Devices")
print("\t 0 ) QUIT")
try:
choice = int(input("\nlluv -> "))
if choice < 0 or choice > (key_num + 1):
print("\nNot a valid number, choose a number 0 -", key_num + 1, "\n")
done_step_one = False
elif choice == key_num + 1:
print("\nRefreshing Devices...")
p_usb_devices = lluv.fetch_usb()
print("Done")
done_step_one = False
elif choice == 0:
exit()
else:
selected_usb = choice
except ValueError:
print("\nNot a number, choose a number 0 -", key_num + 1, "\n")
done_step_one = False
while not done_step_two and done_step_one:
done_step_two = True
done_step_three = False
display_curr_choices(selected_usb, selected_iso, p_usb_devices, images)
print("STEP TWO - Select an image")
print("Categories:")
key_num = 0
for cat in categories:
print("\t" + cat.get_name())
for key, image in cat.get_images().items():
print("\t\t", key, ") ", image.get_name())
key_num += 1
print("\tOther Options")
print("\t\t", key_num + 1, ") Refresh Images")
print("\t\t", key_num + 2, ") Go Back")
print("\t\t 0 ) QUIT")
try:
choice = int(input("\nlluv -> "))
if choice < 0 or choice > (key_num + 1):
print("\nNot a valid number, choose a number 0 -", key_num + 1, "\n")
done_step_two = False
elif choice == key_num:
print("\nRefreshing Images...")
images = lluv.fetch_images(iso_dir_path)
print("Done")
done_step_two = False
elif choice == key_num + 1:
done_step_one = False
done_step_two = False
p_usb_devices = lluv.fetch_usb()
categories = lluv.fetch_images(iso_dir_path)
images = lluv.generate_image_master(categories)
elif choice == 0:
exit()
else:
selected_iso = choice
except ValueError:
print("\nNot a number, choose a number 0 -", key_num + 1, "\n")
done_step_two = False
if selected_iso is not "" and done_step_one and done_step_two:
print("\nRunning Compatibility Check...")
if lluv.check_compatibility(p_usb_devices[selected_usb].get_size(), images[selected_iso].get_rsize()):
print("Selected Device Compatible with Selected Image")
else:
print("WARNING: devices may not be compatible")
print("Image recommended size:", images[selected_iso].get_rsize())
print("Selected USB size:", p_usb_devices[selected_usb].get_size() / 1000000000, " GB")
print("\nCalculating Block Size for " + p_usb_devices[selected_usb].get_name() + "...")
selected_block_size = lluv.calculate_block_size(p_usb_devices[selected_usb].get_path())
if selected_block_size == '':
print("Could not calculate optimal block size\n"
"This could be because the drive is write protected\n"
"(ex. already a live usb).\n"
"It could also be because the drive is unallocated, or it\n"
"was not able to be un mounted.\n"
"A default block size of 512K will be used.")
selected_block_size = "512K"
else:
print("Using: " + selected_block_size + " as it is an optimal bs")
while not done_step_three and done_step_two:
done_step_three = True
display_curr_choices(selected_usb, selected_iso, p_usb_devices, images)
print("STEP THREE - Write")
print("\t1 ) Write To Device")
print("\t2 ) Go Back to Step One")
print("\t3 ) Go Back to Step Two")
print("\t0 ) QUIT")
try:
choice = int(input("\nlluv -> "))
if choice < 0 or choice > 3:
print("\nNot a valid number, choose a number 0 - 3\n")
done_step_three = False
elif choice == 2:
done_step_one = False
done_step_three = False
p_usb_devices = lluv.fetch_usb()
categories = lluv.fetch_images(iso_dir_path)
images = lluv.generate_image_master(categories)
break
elif choice == 3:
done_step_two = False
done_step_three = False
p_usb_devices = lluv.fetch_usb()
categories = lluv.fetch_images(iso_dir_path)
images = lluv.generate_image_master(categories)
break
elif choice == 0:
exit()
else:
print("\nAre you sure you want to write:")
print("\t", images[selected_iso].get_name())
print("To USB device:")
print("\t", p_usb_devices[selected_usb].get_name(), "\n")
print("WARNING: This will destroy everything on selected device\n")
final = input("(Y/N) -> ")
if final in ("Y", "y"):
print("Beginning Write...\n")
lluv.write_to_device(images[selected_iso].get_cat() + "/" + images[selected_iso].get_name(),
# Account for iso category
p_usb_devices[selected_usb].get_path(),
selected_block_size,
images[selected_iso].get_size()[:len(images[selected_iso].get_size()) - 2],
True, "")
print("Done")
exit()
else:
done_step_three = False
except ValueError:
print("\nNot a number, choose a number 0 - 3\n")
done_step_three = False
elif begin == 'debug':
print("[DEBUG]")
iso_dir_path = lluv.get_path()
p_usb_devices = lluv.fetch_usb()
images = lluv.fetch_images(iso_dir_path)
print("Path to images:", iso_dir_path)
print("Possible USB storage devices:", p_usb_devices)
print("Possible Images to write:", images)
else:
exit()
def main():
start()
if __name__ == '__main__':
main()
```
#### File: L.L.U.V./lluv/lluv_tui.py
```python
import npyscreen
import lluv.lluv as lluv
import multiprocessing
import sys
# WIDGET SUBCLASSES
class FilePicker(npyscreen.FilenameCombo):
def __init__(self, *args, **keywords):
super(FilePicker, self).__init__(*args, **keywords)
self.must_exist=False
self.sort_by_extansion=True
self.select_dir=True
class ImgSel(npyscreen.MultiLine):
def __init__(self, *args, **keywords):
super(ImgSel, self).__init__(*args, **keywords)
def display_value(self, vl):
if type(vl) is str:
return vl
else:
return vl.get_name()
class USBSel(npyscreen.MultiLine):
def __init__(self, *args, **keywords):
super(USBSel, self).__init__(*args, **keywords)
def display_value(self, vl):
return vl.get_name() + " - " + str(vl.get_size() / 1000000000).split(".")[0] + " GB"
class CatagorySel(npyscreen.MultiLine):
def __init__(self, *args, **keywords):
super(CatagorySel, self).__init__(*args, **keywords)
def display_value(self, vl):
return vl.get_name()
class BSSlider(npyscreen.Slider):
def __init__(self, *args, **keywords):
super(BSSlider, self).__init__(*args, **keywords)
self.out_of = 17 # 18 dif bs
self.color = 'NO_EDIT'
def translate_value(self):
block_sizes = ["512b", "1K", "2K", "4k", "8K", "16K", "32K", "64K", "128K", "256K", "512K", "1M", "2M",
"4M", "8M", "16M", "32M", "64M"]
selval = int(round(self.value))
return block_sizes[selval]
class ProgressBar(npyscreen.SliderPercent):
def __init__(self, *args, **keywords):
super(ProgressBar, self).__init__(*args, **keywords)
self.editable = False
self.accuracy = 0
self.color = 'NO_EDIT'
class TextItem(npyscreen.FixedText):
def __init__(self, *args, **keywords):
super(TextItem, self).__init__(*args, **keywords)
self.editable = False
# BOX WRAPPERS
class FilePickerBox(npyscreen.BoxTitle):
_contained_widget = FilePicker
class ImgSelectionBox(npyscreen.BoxTitle):
_contained_widget = ImgSel
class UsbSelectionBox(npyscreen.BoxTitle):
_contained_widget = USBSel
class CatSelectionBox(npyscreen.BoxTitle):
_contained_widget = CatagorySel
class SliderBox(npyscreen.BoxTitle):
_contained_widget = BSSlider
class CheckBoxBox(npyscreen.BoxTitle):
_contained_widget = npyscreen.SelectOne
class NewBox(npyscreen.BoxTitle):
_contained_widget = TextItem
class ProgressBarBox(npyscreen.BoxTitle):
_contained_widget = ProgressBar
# FORM THEME
class NewTheme(npyscreen.ThemeManager):
def __init__(self):
super().__init__()
default_colors = {
'DEFAULT': 'WHITE_BLACK',
'FORMDEFAULT': 'WHITE_BLACK',
'NO_EDIT': 'BLUE_BLACK',
'STANDOUT': 'CYAN_BLACK',
'CURSOR': 'WHITE_BLACK',
'CURSOR_INVERSE': 'BLACK_WHITE',
'LABEL': 'GREEN_BLACK',
'LABELBOLD': 'WHITE_BLACK',
'CONTROL': 'YELLOW_BLACK',
'IMPORTANT': 'GREEN_BLACK',
'SAFE': 'GREEN_BLACK',
'WARNING': 'MAGENTA_BLACK',
'DANGER': 'RED_BLACK',
'CRITICAL': 'BLACK_RED',
'GOOD': 'GREEN_BLACK',
'GOODHL': 'GREEN_BLACK',
'VERYGOOD': 'BLACK_GREEN',
'CAUTION': 'YELLOW_BLACK',
'CAUTIONHL': 'BLACK_YELLOW',
}
# SPLASH SCREEN FORM
class TitleForm(npyscreen.Form):
def create(self):
l_space = 12
the1 = " " * 5 + "╔╦╗┬ ┬┌─┐"
the2 = " " * 5 + " ║ ├─┤├┤ "
the3 = " " * 5 + " ╩ ┴ ┴└─┘"
mac1 = " " * 75 + "╔╦╗┌─┐┌─┐┬ ┬┬┌┐┌┌─┐"
mac2 = " " * 75 + "║║║├─┤│ ├─┤││││├┤ "
mac3 = " " * 75 + "╩ ╩┴ ┴└─┘┴ ┴┴┘└┘└─┘"
title_block1 = " " * l_space + " ,gggg, ,gggg, ,ggg, gg ,ggg, ,gg "
title_block2 = " " * l_space + " d8\" \"8I d8\" \"8I dP\"\"Y8a 88 dP\"\"Y8a ,8P "
title_block3 = " " * l_space + " 88 ,dP 88 ,dP Yb, `88 88 Yb, `88 d8' "
title_block4 = " " * l_space + " 8888888P\" 8888888P\" `\" 88 88 `\" 88 88 "
title_block5 = " " * l_space + " 88 88 88 88 88 88 "
title_block6 = " " * l_space + " 88 88 88 88 I8 8I "
title_block7 = " " * l_space + " ,aa,_88 ,aa,_88 88 88 `8, ,8' "
title_block8 = " " * l_space + " dP\" \"88P dP\" \"88P 88 88 Y8, ,8P "
title_block9 = " " * l_space + " Yb,_,d88b,,_ Yb,_,d88b,,_ Y8b,____,d88, Yb,_,dP "
title_block10 = " " * l_space + " \"Y8P\" \"Y88888 \"Y8P\" \"Y88888 \"Y888888P\"Y8 \"Y8P\" "
box = self.add(npyscreen.BoxTitle,
name="Welcome To",
max_width=105,
relx=20,
max_height=36,
rely=5,
contained_widget_arguments={
'color': "WARNING",
'widgets_inherit_color': True, }
)
box.footer = "by: <NAME> (jpotter)"
box.values = ["", "", the1, the2, the3, "", "", "", "", "", "", title_block1, title_block2, title_block3,
title_block4, title_block5, title_block6, title_block7, title_block8, title_block9,
title_block10, "", " " * l_space * 3 + "( Linux Live USB Vending )", "", "", "", "", "", "", mac1,
mac2, mac3]
box.editable = False
def afterEditing(self):
self.parentApp.setNextForm('Selection')
# MAIN FORM
class SelectForm(npyscreen.ActionForm):
def create(self):
self.keypress_timeout = 10
# CREATE WIDGETS
# category selection
self.cat = self.add(CatSelectionBox,
name="Select an Category:",
max_width=30,
relx=3,
rely=2,
max_height=39,
values=self.parentApp.img_categories)
# usb device selection
self.usb = self.add(UsbSelectionBox,
name="Select a USB Device:",
max_width=40,
max_height=12,
relx=35,
rely=2,
values=self.parentApp.usb_list)
# image selection box - becomes editable once a category
self.img = self.add(ImgSelectionBox,
name="Select an Image",
max_width=70,
max_height=22,
relx=35,
rely=15,
values=["- Select a Category First -"])
# isodir path selection widget
self.file_pick = self.add(FilePickerBox,
name="Change which image directory the config points to :",
max_width=70,
relx=35,
rely=38,
max_height=3,
value=lluv.get_path(),
footer="Selection will be saved to the config")
# progress bar - an altered slider widget
self.pbar = self.add(ProgressBarBox,
max_height=3,
max_width=115,
out_of=100,
name="Write Progress:",
relx=3, rely=42)
# box to show how much data has been written
self.written = self.add(NewBox,
name="Written:",
max_width=20,
rely=42, relx=120,
max_height=3,
value="0 / 0 MB")
# box to show selected usb
self.selected_usb_box = self.add(NewBox,
name="Selected USB Device:",
max_width=33,
max_height=4,
relx=107,
rely=7,
value="Not Yet Selected")
# box to show selected image
self.selected_img_box = self.add(NewBox,
name="Selected Image:",
max_width=63,
max_height=4,
relx=77,
rely=2,
value="Not Yet Selected")
# box to show block size and minimum recommended usb size
self.display_block = self.add(npyscreen.BoxTitle,
name="Block Size",
max_width=28,
max_height=7,
relx=77,
rely=7,
footer="Minimum USB Size")
self.display_block.values = [" ▼", " Selected: " + self.parentApp.selected_block, "",
" -Rec. Size For Image-", " ▲"]
# box to display block options - check box field
self.block_check = self.add(CheckBoxBox,
name="Block Size Options:",
max_width=33,
max_height=6,
relx=107,
rely=12,
value=[0, ],
values=["Use Default (512K)", "Auto Detect", "Use Block Size Slider"])
# slider to choose block size
self.bs_slide = self.add(SliderBox,
name="Block Size Slider:",
max_width=33,
max_height=3,
relx=107,
rely=19)
# How to box
self.para = self.add(npyscreen.BoxTitle,
name="How To:",
max_width=33,
max_height=18,
relx=107,
rely=23,
contained_widget_arguments={
'color': "WARNING",
'widgets_inherit_color': True, }
)
self.para.values = ["", " STEP 1:", " │Select an Image Category.", " │Select an Image.",
" │Select an USB Device", "", " STEP 2:", " │Configure Block Size", " │(optional)",
"", " STEP 3:", " │Select the Yellow OK", " │Write", " │Profit $$"]
# Lists of widgets sel_widgets: widgets that can have states
self.sel_widgets = [self.cat, self.usb, self.img, self.block_check, self.bs_slide]
self.all_widgets = [self.cat, self.usb, self.img, self.block_check, self.bs_slide, self.display_block,
self.selected_img_box, self.selected_usb_box, self.written, self.pbar, self.img, self.usb,
self.cat]
# set certain widgets as un editable
self.display_block.editable = False # Block
self.para.editable = False # How to box
self.bs_slide.editable = False # Temp block size slider
self.img.editable = False # Temp image box
# Add the handler - handles progressbar and written value changes
self.add_event_hander("DISPLAY", self.update_prog_handler)
# EVENT HANDLER FOR PROGRESS
def update_prog_handler(self, event):
if self.parentApp.IS_WRITING: # Double check that the write process is active
self.written.value = str(self.parentApp.current_write) + " / " + \
str(self.parentApp.selected_image.get_size()) # current write out of img size
self.pbar.value = self.parentApp.percent # update the progress bar
self.written.display() # redraw both widgets
self.pbar.display()
# IF OK IS PRESSED
def on_ok(self):
if not self.parentApp.IS_WRITING: # Disable ok button
# fetch selected data
image = self.parentApp.selected_image
usb = self.parentApp.selected_usb
block = self.parentApp.selected_block
everything_selected = True # all selected flag
compat = True # all compatible flag
# Check if everything is selected
if image is None and usb is None:
self.err_pop("image or usb device")
everything_selected = False
else:
if image is None:
self.err_pop("image")
everything_selected = False
if usb is None:
self.err_pop("usb device")
everything_selected = False
# Check if devices are compatible
if everything_selected:
compat = lluv.check_compatibility(usb.get_size(), image.get_rsize())
if not compat:
self.aux_pop("The selected usb device is not large enough for the selected image. ("
+ image.get_name() + ") has a recommended size of " + image.get_rsize(),
"Not Compatible")
# Initialize write popup
if everything_selected and compat:
result = self.warning_yesno(image.get_name(), usb.get_name(), block) # ask the user if they are sure
# BEGIN WRITE
if result: # if they confirmed
for widget in self.all_widgets: # disable all widgets
widget.editable = False
self.parentApp.IS_WRITING = True # Flag as ready to write
p = multiprocessing.Process(target=lluv_write_ex, args=( # spawn write process
self.parentApp.selected_image.get_cat() + "/" + self.parentApp.selected_image.get_name(),
self.parentApp.selected_usb.get_path(),
self.parentApp.selected_block,
self.parentApp.selected_image.get_size(),))
p.start()
# IF CANCEL IS CLICKED
def on_cancel(self):
if not self.parentApp.IS_WRITING: # Disable cancel button
if self.parentApp.is_kiosk:
self.full_reset() # reset the form # only if kiosk mode
else:
sys.stdout.write("\x1b[8;{rows};{cols}t".format(rows=24, cols=80))
print("Exiting TUI")
exit() # if not kiosk mode, exit the app after resizing
# CALLED EVERY TIME THE USER PRESSES A BUTTON
# will only update values to avoid slow down
# no redraws except path update
def adjust_widgets(self):
# Check to see if the file path has been changed
if self.file_pick.value != lluv.get_path(): # if the path was changed
lluv.set_image_path(self.file_pick.value)
self.parentApp.img_categories = lluv.fetch_images(self.file_pick.value)
self.cat.values = self.parentApp.img_categories
self.cat.display()
# The category has been selected and the parent isn't writing allow images to be altered
if self.parentApp.selected_category is not None and not self.parentApp.IS_WRITING:
self.img.editable = True
# Set the size of the image in the written box
if self.parentApp.selected_image is not None:
self.written.value = str(self.parentApp.current_write) + " / " + str(
self.parentApp.selected_image.get_size())
# Update the selected values in the parent
if self.cat.value is not None:
self.parentApp.selected_category = self.parentApp.img_categories[self.cat.value]
if self.usb.value is not None:
self.parentApp.selected_usb = self.parentApp.usb_list[self.usb.value]
if self.img.value is not None:
self.parentApp.selected_image = self.parentApp.img_list[self.img.value]
# Update image listing
if self.parentApp.selected_category is not None:
self.parentApp.img_list = lluv.generate_list(self.parentApp.selected_category.get_images())
self.img.values = self.parentApp.img_list
# Update selected views
if self.parentApp.selected_image is not None:
self.selected_img_box.value = self.parentApp.selected_image.get_name()
if self.parentApp.selected_usb is not None:
self.selected_usb_box.value = self.parentApp.selected_usb.get_name()
# Update block size
self.update_block_selection()
# Update the block display
if self.parentApp.selected_image is not None:
self.display_block.values = [" ▼", " Selected: " + self.parentApp.selected_block, "",
" Minimum Size: " + self.parentApp.selected_image.get_rsize(),
" ▲"]
# FUNCTION TO UPDATE BLOCK SELECTION
def update_block_selection(self):
block_sizes = ["512b", "1K", "2K", "4k", "8K", "16K", "32K", "64K", "128K", "256K", "512K", "1M", "2M",
"4M", "8M", "16M", "32M", "64M"]
if self.block_check.value == [2]: # If use slider is selected
if not self.parentApp.IS_WRITING: # If the parent is not writing
self.bs_slide.editable = True # Activate the slider
self.parentApp.haspoped = False # Flag so that use auto pop will pop again
self.parentApp.selected_block = block_sizes[int(round(self.bs_slide.value))] # Get the value of the slider
elif self.block_check.value == [1]: # If auto bs is selected
self.bs_slide.editable = False # Shut off the slider
if not self.parentApp.haspoped: # If the popup has not poped up already
self.spawn_autobs_pop() # Spawn the popup
if self.parentApp.selected_usb is not None:
result = lluv.calculate_block_size(self.parentApp.selected_usb.get_path()) # Backend find optimal
if result == '': # if cant find an optimal
self.spawn_cantauto_pop()
else: # if can find an optimal
self.spawn_canauto_pop()
self.parentApp.selected_block = result # set optimal as selected block
else: # if there is no usb selected
self.spawn_nousb_pop()
self.parentApp.haspoped = True # tell the parent that this has popped so it doesnt do it infinitely
elif self.block_check.value == [0]: # if selected is use default
self.bs_slide.editable = False
self.parentApp.haspoped = False
self.parentApp.selected_block = "512K" # set to default
# CALLED WHEN THE FORM IS WAITING FOR A EVENT
# called less frequently so can be used for redraws
# however, will never be called if the user likes spamming keys at alarming rates
def while_waiting(self):
# Refresh usb listing and image listing then redisplay specified widgets
self.parentApp.refresh() # refresh parent
self.usb.values = self.parentApp.usb_list
self.update_displays()
# Check to see if the write process was complete
if self.parentApp.percent == 100: # Writing process complete
self.pbar.value = 100
# Make sure 100 is displayed
if self.parentApp.selected_image is not None:
self.written.value = self.written.value = \
str(self.parentApp.selected_image.get_size()[:len(self.parentApp.selected_image.get_size()) - 2]) \
+ " / " + \
str(self.parentApp.selected_image.get_size())
self.pbar.display()
self.written.display()
# Reset Flags
self.parentApp.IS_WRITING = False
self.parentApp.running = False
# Alert the user that the write was successful
self.aux_pop(
self.parentApp.selected_image.get_name() + " was written to " +
self.parentApp.selected_usb.get_name() + " successfully!", "Writing Successful")
# Begin Cancel Form
self.full_reset()
# FUNCTION TO REDRAW SELECTED WIDGETS
def update_displays(self):
self.selected_usb_box.display()
self.selected_img_box.display()
self.display_block.display()
self.usb.display()
# FUNCTION TO RESET THE FORM FOR REUSE
def full_reset(self):
# Switch the form to title screen
self.parentApp.switchForm('MAIN')
# Reset parent
self.parentApp.reset_values()
# Reset values
self.selected_usb_box.value = "Not Yet Selected"
self.selected_img_box.value = "Not Yet Selected"
self.img.values = ["- Select a Category First -"]
self.bs_slide.value = 0
self.pbar.value = 0
self.written.value = "0 / 0 MB"
self.display_block.values = [" ▼", " Selected: " + self.parentApp.selected_block, "",
"-Rec. Size For Image-", " ▲"]
# Reset all of the widgets states
for widget in self.sel_widgets:
widget.value = None
# Reset default check
self.block_check.value = [0]
# Unlock the form
self.unlock()
# FUNCTION TO SET INITIALLY EDITABLE WIDGETS AS EDITABLE TO UNLOCK THE FORM
def unlock(self):
self.cat.editable = True
self.usb.editable = True
self.block_check.editable = True
# A BUNCH OF POPUPS
def spawn_autobs_pop(self):
message = "You have selected auto block size. This should work on your storage device if it is" \
"allocated and partition one is writable. LLUV will now find an optimal block size..."
npyscreen.notify_confirm(message, title="AUTO BLOCK SIZE", wrap=True)
def spawn_nousb_pop(self):
message = "No USB device is selected..."
npyscreen.notify_confirm(message, title="AUTO BLOCK SIZE", wrap=True)
def spawn_cantauto_pop(self):
message = "LLUV was not able to generate an optimal block size, This could be because the drive is " \
"un allocated or the drive is read only (ex. already a live usb)"
npyscreen.notify_confirm(message, title="AUTO BLOCK SIZE", wrap=True)
def spawn_canauto_pop(self):
message = "An optimal block size was found. Setting as selected block size..."
npyscreen.notify_confirm(message, title="AUTO BLOCK SIZE", wrap=True)
def err_pop(self, forgot: str):
message = "There was no " + forgot + " selected"
npyscreen.notify_confirm(message, title="Error", wrap=True)
def aux_pop(self, message: str, title: str):
npyscreen.notify_confirm(message, title=title, wrap=True)
def warning_yesno(self, image_name: str, usb_name: str, block: str) -> bool:
message = "You are about to write:\n(" + image_name + ")\n ▼\n(" + usb_name + ")\nWith block size: " \
+ block + "\nThis operation can't be undone and anything on the storage device " \
"will be destroyed\n" \
"Are you sure?"
return npyscreen.notify_yes_no(message, title="WARNING", form_color="DANGER", wrap=True)
# THE APPLICATION CLASS
class LluvTui(npyscreen.StandardApp):
def __init__(self):
super(LluvTui, self).__init__()
# DATA
self.img_categories = lluv.fetch_images(lluv.get_path()) # category List
self.usb_list = lluv.generate_list(lluv.fetch_usb()) # usb list - lluv.fetch_usb by default returns a dict
self.img_list = [] # to be populated after the category has been selected
# SELECTIONS AND STATUS
self.selected_category = None # Hold the objects
self.selected_image = None
self.selected_usb = None
self.selected_block = "512K" # Default BS
self.current_write = 0
self.percent = 0
# FLAGS
self.haspoped = False
self.IS_WRITING = False
self.is_kiosk = lluv.isKiosk() # Check to see if the kiosk option was selected in the config
def onStart(self):
# Form initialization
npyscreen.setTheme(NewTheme) # set the theme
if(self.is_kiosk):
self.addForm('MAIN', TitleForm, name="The L.L.U.V. Machine - Page (1/2)", ) # Title form
name_for_second = "Configure and Write - Page (2/2)"
title = "Selection"
else:
name_for_second = "Configure and Write"
title = "MAIN"
self.addForm(title, SelectForm, name=name_for_second, ) # 47 x 143 Main selection form
# while the form is waiting, if DD is working, send the event to update the progress
def while_waiting(self):
if self.IS_WRITING:
# update parent percent
self.percent = lluv.dd_status(int(self.selected_image.get_size()[:len(self.selected_image.get_size()) - 2]))
# update parent current_write
self.current_write = round(int(self.selected_image.get_size()[:len(self.selected_image.get_size()) - 2]) *
(self.percent / 100))
# send the event to the child
self.queue_event(npyscreen.Event("DISPLAY"))
# function to reset all values for form loop
def reset_values(self):
self.refresh()
# SEL
self.selected_category = None
self.selected_image = None
self.selected_usb = None
self.selected_block = "512K" # Default BS
self.current_write = 0
self.percent = 0
# FLA
self.haspoped = False
self.IS_WRITING = False
# function to refresh the usb devices
def refresh(self):
self.usb_list = lluv.generate_list(lluv.fetch_usb())
# USED TO SPAWN AND PERFORM DD
def lluv_write_ex(i_name, usb_path, block, i_size):
"""
spawned as parallel process
calls the backend dd routine
:param i_name: image name and category
:param usb_path: usb device
:param block: selected block
:param i_size: size of image
:return:
"""
lluv.write_to_device(i_name, usb_path, block, i_size, False, "") # Backend Write
def start():
LluvTui().run()
if __name__ == '__main__':
start()
``` |
{
"source": "jmp1985/dials",
"score": 2
} |
#### File: algorithms/integration/test_processor.py
```python
from __future__ import absolute_import, division, print_function
import math
import mock
import pytest
from dxtbx.model.experiment_list import ExperimentListFactory
import dials.algorithms.integration.processor
from dials.algorithms.profile_model.gaussian_rs import Model
from dials.array_family import flex
from dials_algorithms_integration_integrator_ext import JobList
def test_shoebox_memory_is_a_reasonable_guesstimate(dials_data):
path = dials_data("centroid_test_data").join("experiments.json").strpath
exlist = ExperimentListFactory.from_json_file(path)[0]
exlist.profile = Model(
None,
n_sigma=3,
sigma_b=0.024 * math.pi / 180.0,
sigma_m=0.044 * math.pi / 180.0,
)
rlist = flex.reflection_table.from_predictions(exlist)
rlist["id"] = flex.int(len(rlist), 0)
rlist["bbox"] = flex.int6(rlist.size(), (0, 1, 0, 1, 0, 1))
jobs = JobList()
jobs.add((0, 1), (0, 9), 9)
for flatten in (True, False):
assumed_memory_usage = list(jobs.shoebox_memory(rlist, flatten))
assert len(assumed_memory_usage) == 1
assert assumed_memory_usage[0] == pytest.approx(23952, abs=3000)
@mock.patch("dials.algorithms.integration.processor.flex.max")
@mock.patch("dials.algorithms.integration.processor.psutil.virtual_memory")
@mock.patch("dials.algorithms.integration.processor.psutil.swap_memory")
def test_runtime_error_raised_when_not_enough_memory(
mock_psutil_swap, mock_psutil_vm, mock_flex_max
):
mock_flex_max.return_value = 750001
mock_psutil_vm.return_value.available = 1000000
mock_psutil_swap.return_value.free = 0
phil_mock = mock.Mock()
phil_mock.mp.method = "multiprocessing"
phil_mock.mp.nproc = 4
phil_mock.block.max_memory_usage = 0.75
reflections = {"bbox": flex.int6(1000, (0, 1, 0, 1, 0, 1))}
manager = dials.algorithms.integration.processor._Manager(
None, reflections, phil_mock
)
manager.jobs = mock.Mock(autospec=JobList)
with pytest.raises(MemoryError) as exc_info:
manager.compute_processors()
assert "Not enough memory to run integration jobs." in exc_info.value.args[0]
mock_flex_max.assert_called_once_with(manager.jobs.shoebox_memory.return_value)
# Reduce memory usage by 1 byte, should then pass
mock_flex_max.return_value = 750000
manager.compute_processors()
mock_flex_max.assert_called_with(manager.jobs.shoebox_memory.return_value)
```
#### File: test/command_line/test_preservation_of_experiment_identifiers.py
```python
import procrunner
from dxtbx.serialize import load
from dials.array_family import flex
def test_preservation_of_identifiers(dials_data, tmpdir):
"""Run the dials processing workflow, checking for preservation of identifiers.
This is just a simple case. The individual programs that are expected to
change the identifiers are tested separately, this is to check that the
other programs maintain the identifiers through processing.
"""
# First import - should set a unique id.
image_files = dials_data("centroid_test_data").listdir("centroid*.cbf", sort=True)
result = procrunner.run(
["dials.import", "output.experiments=imported.expt"]
+ [f.strpath for f in image_files],
working_directory=tmpdir.strpath,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("imported.expt").check(file=1)
imported_exp_path = tmpdir.join("imported.expt").strpath
experiments = load.experiment_list(imported_exp_path)
import_expt_id = experiments[0].identifier
assert import_expt_id != ""
# Now find spots.
result = procrunner.run(
["dials.find_spots", imported_exp_path, "output.reflections=strong.refl"],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("strong.refl").check(file=1)
strong_refl_path = tmpdir.join("strong.refl").strpath
reflections = flex.reflection_table.from_file(strong_refl_path)
assert dict(reflections.experiment_identifiers()) == {0: import_expt_id}
# Now index
result = procrunner.run(
[
"dials.index",
strong_refl_path,
imported_exp_path,
"output.reflections=indexed.refl",
"output.experiments=indexed.expt",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("indexed.refl").check(file=1)
assert tmpdir.join("indexed.expt").check(file=1)
indexed_exp_path = tmpdir.join("indexed.expt").strpath
experiments = load.experiment_list(indexed_exp_path)
indexed_refl_path = tmpdir.join("indexed.refl").strpath
reflections = flex.reflection_table.from_file(indexed_refl_path)
indexed_expt_id = experiments[0].identifier
assert indexed_expt_id != ""
assert list(experiments.identifiers()) == [indexed_expt_id]
assert dict(reflections.experiment_identifiers()) == {0: indexed_expt_id}
# Now refine bravais setting
result = procrunner.run(
["dials.refine_bravais_settings", indexed_refl_path, indexed_exp_path],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("bravais_setting_9.expt").check(file=1)
bravais_exp_path = tmpdir.join("bravais_setting_9.expt").strpath
experiments = load.experiment_list(bravais_exp_path)
assert experiments[0].identifier == indexed_expt_id
# Now reindex
result = procrunner.run(
[
"dials.reindex",
indexed_refl_path,
indexed_exp_path,
"change_of_basis_op=b,c,a",
"output.reflections=reindexed.refl",
"output.experiments=reindexed.expt",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("reindexed.expt").check(file=1)
assert tmpdir.join("reindexed.refl").check(file=1)
reindexed_exp_path = tmpdir.join("reindexed.expt").strpath
experiments = load.experiment_list(reindexed_exp_path)
reindexed_refl_path = tmpdir.join("reindexed.refl").strpath
reflections = flex.reflection_table.from_file(reindexed_refl_path)
assert list(experiments.identifiers()) == [indexed_expt_id]
assert dict(reflections.experiment_identifiers()) == {0: indexed_expt_id}
# Now refine
result = procrunner.run(
[
"dials.refine",
reindexed_refl_path,
reindexed_exp_path,
"output.reflections=refined.refl",
"output.experiments=refined.expt",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("refined.expt").check(file=1)
assert tmpdir.join("refined.refl").check(file=1)
refined_exp_path = tmpdir.join("refined.expt").strpath
experiments = load.experiment_list(refined_exp_path)
refined_refl_path = tmpdir.join("refined.refl").strpath
reflections = flex.reflection_table.from_file(refined_refl_path)
assert list(experiments.identifiers()) == [indexed_expt_id]
assert dict(reflections.experiment_identifiers()) == {0: indexed_expt_id}
# Now integrate
result = procrunner.run(
[
"dials.integrate",
refined_refl_path,
refined_exp_path,
"output.reflections=integrated.refl",
"output.experiments=integrated.expt",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("integrated.expt").check(file=1)
assert tmpdir.join("integrated.refl").check(file=1)
integrated_exp_path = tmpdir.join("integrated.expt").strpath
experiments = load.experiment_list(integrated_exp_path)
integrated_refl_path = tmpdir.join("integrated.refl").strpath
reflections = flex.reflection_table.from_file(integrated_refl_path)
assert list(experiments.identifiers()) == [indexed_expt_id]
assert dict(reflections.experiment_identifiers()) == {0: indexed_expt_id}
# Now run cosym (symmetry fails due to small amount of data)
result = procrunner.run(
[
"dials.symmetry",
integrated_refl_path,
integrated_exp_path,
"output.reflections=symmetrized.refl",
"output.experiments=symmetrized.expt",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("symmetrized.expt").check(file=1)
assert tmpdir.join("symmetrized.refl").check(file=1)
symmetrized_exp_path = tmpdir.join("symmetrized.expt").strpath
experiments = load.experiment_list(symmetrized_exp_path)
symmetrized_refl_path = tmpdir.join("symmetrized.refl").strpath
reflections = flex.reflection_table.from_file(symmetrized_refl_path)
assert list(experiments.identifiers()) == [indexed_expt_id]
assert dict(reflections.experiment_identifiers()) == {0: indexed_expt_id}
# Now scale
result = procrunner.run(
[
"dials.scale",
symmetrized_refl_path,
symmetrized_exp_path,
"output.reflections=scaled.refl",
"output.experiments=scaled.expt",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("scaled.expt").check(file=1)
assert tmpdir.join("scaled.refl").check(file=1)
scaled_exp_path = tmpdir.join("scaled.expt").strpath
experiments = load.experiment_list(scaled_exp_path)
scaled_refl_path = tmpdir.join("scaled.refl").strpath
reflections = flex.reflection_table.from_file(scaled_refl_path)
assert list(experiments.identifiers()) == [indexed_expt_id]
assert dict(reflections.experiment_identifiers()) == {0: indexed_expt_id}
# Now do two-theta refine
result = procrunner.run(
[
"dials.two_theta_refine",
scaled_refl_path,
scaled_exp_path,
"output.experiments=tt.expt",
],
working_directory=tmpdir,
)
assert not result.returncode and not result.stderr
assert tmpdir.join("tt.expt").check(file=1)
tt_exp_path = tmpdir.join("tt.expt").strpath
experiments = load.experiment_list(tt_exp_path)
assert list(experiments.identifiers()) == [indexed_expt_id]
``` |
{
"source": "jmp1985/maptools",
"score": 2
} |
#### File: maptools/maptools/fsc.py
```python
import logging
import numpy
import scipy.ndimage
import yaml
from matplotlib import pylab, ticker
from maptools.util import read, read_axis_order
from maptools.reorder import reorder
from math import sqrt
# Get the logger
logger = logging.getLogger(__name__)
def resolution_from_fsc(bins, fsc, value=0.5):
"""
Compute the resolution from the FSC curve
Args:
bins (array): The resolution bins (ordered from low resolution to high)
fsc (array): The fsc in that resolution bin
Returns:
(bin index, bin value, fsc value)
"""
assert len(bins) == len(fsc)
bin_index = len(bins) - 1
bin_value = bins[bin_index]
fsc_value = fsc[bin_index]
for i, (b, f) in enumerate(zip(bins, fsc)):
if f < 0.5:
bin_index = i
bin_value = b
fsc_value = f
break
return bin_index, bin_value, fsc_value
def array_fsc(
data1,
data2,
nbins=20,
resolution=None,
voxel_size=(1, 1, 1),
axis=None,
method="binned",
**kwargs
):
"""
Compute the local FSC of the map
Args:
data1 (array): The input map 1
data2 (array): The input map 2
nbins (int): The number of bins
resolution (float): The resolution limit
axis (tuple): The axis of the plane to compute the FSC
method (str): Method to use (binned or averaged)
Returns:
array: The FSC
"""
# Check the axis
if type(axis) in [int, float]:
axis = (axis,)
# Get the subset of data
logger.info("Computing FSC")
# Average along the remaining axes
if axis is not None:
assert all(a in (0, 1, 2) for a in axis)
voxel_size = tuple(voxel_size[a] for a in axis)
axis = tuple(set((0, 1, 2)).difference(axis))
data1 = numpy.mean(data1, axis=axis)
data2 = numpy.mean(data2, axis=axis)
# Normalize the data
data1 = (data1 - numpy.mean(data1)) / numpy.std(data1)
data2 = (data2 - numpy.mean(data2)) / numpy.std(data2)
# Compute the radius
shape = data1.shape
indices = [
(1 / v) * (numpy.arange(s) - s // 2) / s for s, v in zip(shape, voxel_size)
]
R = numpy.fft.fftshift(
numpy.sum(numpy.array(numpy.meshgrid(*indices, indexing="ij")) ** 2, axis=0)
)
# Compute the FFT of the data
X = numpy.fft.fftn(data1)
Y = numpy.fft.fftn(data2)
# Flatten the array
X = X.flatten()
Y = Y.flatten()
R = R.flatten()
# Get the max resolution
max_resolution = 1.0 / sqrt(R.max())
# Create a resolution mask
if resolution is not None:
if resolution < max_resolution:
resolution = max_resolution
mask = R < 1.0 / resolution ** 2
X = X[mask]
Y = Y[mask]
R = R[mask]
else:
resolution = max_resolution
# Multiply X and Y together
XX = numpy.abs(X) ** 2
YY = numpy.abs(Y) ** 2
XY = numpy.real(X * numpy.conj(Y))
# Compute local variance and covariance by binning with resolution
if method == "binned":
bin_index = numpy.floor(nbins * R * resolution ** 2).astype("int32")
varX = numpy.bincount(bin_index, XX)
varY = numpy.bincount(bin_index, YY)
covXY = numpy.bincount(bin_index, XY)
elif method == "averaged":
bin_index = numpy.floor((sum(shape) // 2) * R * resolution ** 2).astype("int32")
varX = numpy.bincount(bin_index, XX)
varY = numpy.bincount(bin_index, YY)
covXY = numpy.bincount(bin_index, XY)
varX = scipy.ndimage.uniform_filter(varX, size=nbins, mode="nearest")
varY = scipy.ndimage.uniform_filter(varY, size=nbins, mode="nearest")
covXY = scipy.ndimage.uniform_filter(covXY, size=nbins, mode="nearest")
else:
raise RuntimeError('Expected "binned" or "averaged", got %s' % method)
# Compute the FSC
tiny = 1e-5
mask = (varX > tiny) & (varY > tiny)
fsc = numpy.zeros(covXY.shape)
fsc[mask] = covXY[mask] / (numpy.sqrt(varX[mask]) * numpy.sqrt(varY[mask]))
bins = (1 / resolution ** 2) * numpy.arange(1, covXY.size + 1) / (covXY.size)
# Print some output
logger.info("Resolution, FSC")
for b, f in zip(bins, fsc):
logger.info("%.2f, %.2f" % (1 / sqrt(b), f))
# Return the fsc
return bins, fsc
def mapfile_fsc(
input_filename1,
input_filename2,
output_filename=None,
output_data_filename=None,
nbins=20,
resolution=None,
axis=None,
method="binned",
):
"""
Compute the local FSC of the map
Args:
input_filename1 (str): The input map filename
input_filename2 (str): The input map filename
output_filename (str): The output map filename
nbins (int): The number of bins
resolution (float): The resolution limit
axis (tuple): The axis of the plane to compute the FSC
method (str): Method to use (binned or averaged)
"""
# Check the axis
if type(axis) in [int, float]:
axis = (axis,)
# Open the input files
infile1 = read(input_filename1)
infile2 = read(input_filename2)
# Get the data
data1 = infile1.data
data2 = infile2.data
# Reorder data2 to match data1
data1 = reorder(data1, read_axis_order(infile1), (0, 1, 2))
data2 = reorder(data2, read_axis_order(infile2), (0, 1, 2))
# Compute the FSC
bins, fsc = array_fsc(
data1,
data2,
voxel_size=tuple(infile1.voxel_size[a] for a in ["z", "y", "x"]),
nbins=nbins,
resolution=resolution,
axis=axis,
method=method,
)
# Compute the resolution
bin_index, bin_value, fsc_value = resolution_from_fsc(bins, fsc)
logger.info("Estimated resolution = %f A" % (1 / sqrt(bin_value)))
# Write the FSC curve
fig, ax = pylab.subplots(figsize=(8, 6))
ax.plot(bins, fsc)
ax.set_xlabel("Resolution (A)")
ax.set_ylabel("FSC")
ax.set_ylim(0, 1)
ax.axvline(bin_value, color="black")
ax.xaxis.set_major_formatter(
ticker.FuncFormatter(lambda x, p: "%.1f" % (1 / sqrt(x)) if x > 0 else None)
)
fig.savefig(output_filename, dpi=300, bbox_inches="tight")
pylab.close(fig)
# Write a data file
if output_data_filename is not None:
with open(output_data_filename, "w") as outfile:
yaml.safe_dump(
{
"table": {
"bin": list(map(float, bins)),
"fsc": list(map(float, fsc)),
},
"resolution": {
"bin_index": int(bin_index),
"bin_value": float(bin_value),
"fsc_value": float(fsc_value),
"estimate": float(1 / sqrt(bin_value)),
},
},
outfile,
)
def fsc(*args, **kwargs):
"""
Compute the FSC of the map
"""
if len(args) > 0 and type(args[0]) == "str" or "input_filename1" in kwargs:
func = mapfile_fsc
else:
func = array_fsc
return func(*args, **kwargs)
```
#### File: maptools/tests/test_fft.py
```python
import os.path
import tempfile
import maptools
def test_fft(ideal_map_filename):
for mode in ["real", "imaginary", "amplitude", "phase", "power"]:
for shift in [True, False]:
for normalize in [True, False]:
_, output_filename = tempfile.mkstemp()
maptools.fft(
input_filename=ideal_map_filename,
output_filename=output_filename,
mode=mode,
shift=shift,
normalize=normalize,
)
assert os.path.exists(output_filename)
```
#### File: maptools/tests/test_map2mtz.py
```python
import os.path
import tempfile
import maptools
def test_map2mtz(ideal_map_filename):
_, output_filename = tempfile.mkstemp()
maptools.map2mtz(
input_filename=ideal_map_filename,
output_filename=output_filename,
resolution=8,
)
assert os.path.exists(output_filename)
``` |
{
"source": "jmp1985/mrcfile",
"score": 3
} |
#### File: mrcfile/mrcfile/command_line.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import argparse
import mrcfile
def print_headers(names=None, print_file=None):
"""
Print the MRC header contents from a list of files.
This function opens files in permissive mode to allow headers of invalid
files to be examined.
Args:
names: A list of file names. If not given or :data:`None`, the names
are taken from the command line arguments.
print_file: The output text stream to use for printing the headers.
This is passed directly to the ``print_file`` argument of
:meth:`~mrcfile.mrcobject.MrcObject.print_header`. The default is
:data:`None`, which means output will be printed to
:data:`sys.stdout`.
"""
if names is None:
parser = argparse.ArgumentParser(
description="Print the MRC header contents from a list of files."
)
parser.add_argument("filename", nargs='*', help="Input MRC file")
args = parser.parse_args()
names = args.filename
for name in names:
with mrcfile.open(name, permissive=True, header_only=True) as mrc:
mrc.print_header(print_file=print_file)
```
#### File: mrcfile/mrcfile/utils.py
```python
from __future__ import (absolute_import, division, print_function,
unicode_literals)
import sys
import numpy as np
from .constants import IMAGE_STACK_SPACEGROUP
def data_dtype_from_header(header):
"""Return the data dtype indicated by the given header.
This function calls :func:`dtype_from_mode` to get the basic dtype, and
then makes sure that the byte order of the new dtype matches the byte order
of the header's ``mode`` field.
Args:
header: An MRC header as a :class:`numpy record array
<numpy.recarray>`.
Returns:
The :class:`numpy dtype <numpy.dtype>` object for the data array
corresponding to the given header.
Raises:
:exc:`ValueError`: If there is no corresponding dtype for the given
mode.
"""
mode = header.mode
return dtype_from_mode(mode).newbyteorder(mode.dtype.byteorder)
def data_shape_from_header(header):
"""Return the data shape indicated by the given header.
Args:
header: An MRC header as a :class:`numpy record array
<numpy.recarray>`.
Returns:
The shape tuple for the data array corresponding to the given header.
"""
nx = int(header.nx)
ny = int(header.ny)
nz = int(header.nz)
mz = int(header.mz)
if spacegroup_is_volume_stack(header.ispg):
shape = (nz // mz, mz, ny, nx)
elif header.ispg == IMAGE_STACK_SPACEGROUP and nz == 1:
# Use a 2D array for a single image
shape = (ny, nx)
else:
shape = (nz, ny, nx)
return shape
_dtype_to_mode = dict(f2=12, f4=2, i1=0, i2=1, u1=6, u2=6, c8=4)
def mode_from_dtype(dtype):
"""Return the MRC mode number corresponding to the given :class:`numpy
dtype <numpy.dtype>`.
The conversion is as follows:
* float16 -> mode 12
* float32 -> mode 2
* int8 -> mode 0
* int16 -> mode 1
* uint8 -> mode 6 (data will be widened to 16 bits in the file)
* uint16 -> mode 6
* complex64 -> mode 4
Note that there is no numpy dtype which corresponds to MRC mode 3.
Args:
dtype: A :class:`numpy dtype <numpy.dtype>` object.
Returns:
The MRC mode number.
Raises:
:exc:`ValueError`: If there is no corresponding MRC mode for the given
dtype.
"""
kind_and_size = dtype.kind + str(dtype.itemsize)
if kind_and_size in _dtype_to_mode:
return _dtype_to_mode[kind_and_size]
raise ValueError("dtype '{0}' cannot be converted "
"to an MRC file mode".format(dtype))
_mode_to_dtype = { 0: np.int8,
1: np.int16,
2: np.float32,
4: np.complex64,
6: np.uint16,
12: np.float16 }
def dtype_from_mode(mode):
"""Return the :class:`numpy dtype <numpy.dtype>` corresponding to the given
MRC mode number.
The mode parameter may be given as a Python scalar, numpy scalar or
single-item numpy array.
The conversion is as follows:
* mode 0 -> int8
* mode 1 -> int16
* mode 2 -> float32
* mode 4 -> complex64
* mode 6 -> uint16
* mode 12 -> float16
Note that mode 3 is not supported as there is no matching numpy dtype.
Args:
mode: The MRC mode number. This may be given as any type which can be
converted to an int, for example a Python scalar (``int`` or
``float``), a numpy scalar or a single-item numpy array.
Returns:
The :class:`numpy dtype <numpy.dtype>` object corresponding to the
given mode.
Raises:
:exc:`ValueError`: If there is no corresponding dtype for the given
mode.
"""
mode = int(mode)
if mode in _mode_to_dtype:
return np.dtype(_mode_to_dtype[mode])
else:
raise ValueError("Unrecognised mode '{0}'".format(mode))
def pretty_machine_stamp(machst):
"""Return a human-readable hex string for a machine stamp."""
return " ".join("0x{:02x}".format(byte) for byte in machst)
def byte_order_from_machine_stamp(machst):
"""Return the byte order corresponding to the given machine stamp.
Args:
machst: The machine stamp, as a :class:`bytearray` or a :class:`numpy
array <numpy.ndarray>` of bytes.
Returns:
``<`` if the machine stamp represents little-endian data, or ``>`` if
it represents big-endian.
Raises:
:exc:`ValueError`: If the machine stamp is invalid.
"""
if machst[0] == 0x44 and machst[1] in (0x44, 0x41):
return '<'
elif (machst[0] == 0x11 and machst[1] == 0x11):
return '>'
else:
pretty_bytes = pretty_machine_stamp(machst)
raise ValueError("Unrecognised machine stamp: " + pretty_bytes)
_byte_order_to_machine_stamp = {'<': bytearray((0x44, 0x44, 0, 0)),
'>': bytearray((0x11, 0x11, 0, 0))}
def machine_stamp_from_byte_order(byte_order='='):
"""Return the machine stamp corresponding to the given byte order
indicator.
Args:
byte_order: The byte order indicator: one of ``=``, ``<`` or ``>``, as
defined and used by numpy dtype objects.
Returns:
The machine stamp which corresponds to the given byte order, as a
:class:`bytearray`. This will be either ``(0x44, 0x44, 0, 0)`` for
little-endian or ``(0x11, 0x11, 0, 0)`` for big-endian. If the given
byte order indicator is ``=``, the native byte order is used.
Raises:
:exc:`ValueError`: If the byte order indicator is unrecognised.
"""
# If byte order is '=', replace it with the system-native order
byte_order = normalise_byte_order(byte_order)
return _byte_order_to_machine_stamp[byte_order]
def byte_orders_equal(a, b):
"""Work out if the byte order indicators represent the same endianness.
Args:
a: The first byte order indicator: one of ``=``, ``<`` or ``>``, as
defined and used by :class:`numpy dtype <numpy.dtype>` objects.
b: The second byte order indicator.
Returns:
:data:`True` if the byte order indicators represent the same
endianness.
Raises:
:exc:`ValueError`: If the byte order indicator is not recognised.
"""
return normalise_byte_order(a) == normalise_byte_order(b)
def normalise_byte_order(byte_order):
"""Convert a numpy byte order indicator to one of ``<`` or ``>``.
Args:
byte_order: One of ``=``, ``<`` or ``>``.
Returns:
``<`` if the byte order indicator represents little-endian data, or
``>`` if it represents big-endian. Therefore on a little-endian
machine, ``=`` will be converted to ``<``, but on a big-endian machine
it will be converted to ``>``.
Raises:
:exc:`ValueError`: If ``byte_order`` is not one of ``=``, ``<`` or
``>``.
"""
if byte_order not in ('<', '>', '='):
raise ValueError("Unrecognised byte order indicator '{0}'"
.format(byte_order))
if byte_order == '=':
return '<' if sys.byteorder == 'little' else '>'
return byte_order
def spacegroup_is_volume_stack(ispg):
"""Identify if the given space group number represents a volume stack.
Args:
ispg: The space group number, as an integer, numpy scalar or single-
element numpy array.
Returns:
:data:`True` if the space group number is in the range 401--630.
"""
return 401 <= ispg <= 630
``` |
{
"source": "jmp1985/obsidian",
"score": 3
} |
#### File: obsidian/obsidian/labeller.py
```python
from glob import glob
import os
import numpy as np
import matplotlib.pyplot as plt
import pickle
from obsidian.utils.data_handling import read_header
from obsidian.fex.extractor import radius_from_res
def get_rmax():
h = os.path.join(file_dir, 'header.txt')
return int(radius_from_res(7, h))
def main():
file_dir = input("\nEnter directory containing files to be labelled: ")
while not os.path.exists(os.path.join(file_dir, 'keys.txt')):
print("\n Invalid directory or directory does not contain keys.txt.")
file_dir = input("Try again: ")
dest = input("\nEnter destination directory for storing classifications: ")
if not os.path.exists(dest):
os.makedirs(dest)
with open(os.path.join(file_dir, 'keys.txt')) as k:
keys = {line.split()[0] : line.split()[1] for line in k}
ID = ''.join(file_dir.split(os.sep)[-3:])
print("ID: ".format(ID))
print("\nClassifying directory {}...... \n".format(file_dir))
rmax = get_rmax()
file_dict = {}
plt.ion()
allblank = (input("All files blanks (background)? [y/n, default n]") == 'y')
if not allblank:
vmax = int(input("Enter vmax: "))
cropped = (input("Already cropped? [y/n]: ") == 'y')
counter = 1
total = len(keys)
for f in keys:
if not allblank:
name = os.path.splitext(os.path.basename(f))[0]
img = np.load(f)[1300-rmax:1300+rmax,1200-rmax:1200+rmax] if not cropped else np.load(f)
print("image {}/{}".format(counter, total))
plt.imshow(img, cmap='binary', interpolation='None', vmin=-1, vmax=vmax)
plt.draw()
decision = input(name+": can you see protein rings? [y/n] ")
clss = int( decision == 'y' ) # 1-> protein; 0-> no protein
else:
clss = 0
file_dict[keys[f]] = {'Path':f, 'Class':clss}
counter += 1
file_dict_save = open(os.path.join(dest, "{}_classifications.pickle".format(ID)), "wb")
pickle.dump(file_dict, file_dict_save)
file_dict_save.close()
print("Classifications saved as '{}' in {}".format("{}_classifications.pickle".format(ID), dest))
if __name__=='__main__':
main()
```
#### File: obsidian/learn/convnet.py
```python
import sys, getopt, os
from glob import glob
import itertools
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import pickle
# Machine learning
from keras.models import Sequential, load_model
from keras.layers import Dense, Conv1D, MaxPooling1D, Flatten, Dropout
from sklearn.metrics import confusion_matrix
from sklearn.utils import shuffle
# Obsidian modules
from obsidian.utils.imgdisp import ImgDisp
from obsidian.utils.data_handling import pickle_get, pickle_put, new_database
from obsidian.learn.metrics import precision, weighted_binary_crossentropy
# Directories for saving and loading models and databases
models_dir = os.path.join(os.path.dirname(__file__), 'models')
data_dir = os.path.dirname(__file__)
class ProteinClassifier():
'''
Class for specifying, building, training and testing diffraction image classifiers
:ivar data_table: database of all available images with image path, extracted data and class
:ivar train_data:
:ivar test_data:
:ivar model: Keras model object
:ivar history: epoch-wise training history, created when model trained
:ivar split: number of data samples to be used for training (split fraction is 0.8)
**Default parameters:**
| number of layers: 6
| kernel sizes: min 3, max 100
| dropout: 0.3
| padding: same
| number of epochs: 30
| batch size: 20
| loss weight: 0.5
'''
def __init__(self):
'''When a ProteinClassifier object is instantiated,
data_table, model and history are set to None to
ensure that methods are called in the appropriate order.
'''
self.data_table = None
self.model = None
self.history = None
@staticmethod
def make_database(data_folder, labels_folder, name=''):
'''Load data and classes out of relevant folders and store in a data frame along with
file path for each image.
:param str name: Database title. Database will be saved as "namedatabase.pickle"
'''
global data_dir
save_path = os.path.join(data_dir, '{}database.pickle'.format(name))
database = new_database(data_folder, label_folder, save_path=save_path)
def load_database(self, path):
'''Load data from pre-pickled database and extract separate lists for inputs and classes
:param str path: path of stored data file
'''
self.data_table = pickle_get(path)
def massage(self, split_val=0.8):
'''Massage data into shape, prepare for model fitting, populate member variables
:param float split_val: fraction of data to use for training, the remainder used for testing
'''
assert self.data_table is not None, "Load data fist!"
# Reshape data to have dimensions (nsamples, ndata_points, 1)
if len(np.stack(self.data_table['Data'].values).shape) < 3:
self.data_table['Data'] = list(np.expand_dims(np.stack(self.data_table['Data'].values), 2))
self.data_table = shuffle(self.data_table)
# Split into train and test sets
self.split = int(round(split_val*len(self.data_table)))
self.train_data = self.data_table[:self.split]
self.test_data = self.data_table[self.split:]
# Extract training and test inputs and targets
if len(self.train_data)!=0:
self.X_train, self.y_train = np.stack(self.train_data['Data'].values), np.stack(self.train_data['Class'].values)
if len(self.test_data)!=0:
self.X_test, self.y_test = np.stack(self.test_data['Data'].values), np.stack(self.test_data['Class'].values)
def print_summary(self):
'''Print summary of loaded samples and build model.
'''
print("Data loaded!")
print("Total number of samples: {0}\nBalance: class 1 - {1:.2f}%".format(len(self.data_table),
(np.array(self.classes) == 1).sum()/len(self.classes)))
print("Network to train:\n", self.model.summary())
def build_model(self, nlayers=6,
min_kern_size=3, max_kern_size=100,
dropout=0.3,
padding='same',
custom_loss=False, loss_weight=0.5,
name='classifier_model'):
'''Construct and compile a keras Sequential model according to spec
:param int nlayers: number of 1D convolution layers (default 3)
:param int min_kern_size: smallest kernel size (default 3)
:param int max_kern_size: largest kernel size (default 100)
:param float dropout: Dropout rate (default 0.3)
:param str padding: padding mode for convolution layers (default 'same')
:param bool custom_loss: if true, use weighted_binary_crossentropy
:param float loss_weight: weighting of custom loss. A value higher than 1 will bias the
model towards positive predictions, a value lower than 1 will bias
the model towards negative predictions.
:return: created and compiled keras model
'''
kernel_sizes = np.linspace(min_kern_size, max_kern_size, nlayers, dtype=int)
nfilters = np.linspace(30, 50, nlayers, dtype=int)
model = Sequential()
# Input layer
model.add(Conv1D(filters = nfilters[0], kernel_size=kernel_sizes[0].item(),
padding=padding, activation='relu', input_shape=(2000, 1)))
model.add(MaxPooling1D())
model.add(Dropout(dropout))
for i in range(1,nlayers):
model.add(Conv1D(filters=nfilters[i], kernel_size=kernel_sizes[i].item(),
padding=padding, activation='relu'))
model.add(MaxPooling1D())
model.add(Dropout(dropout))
model.add(Flatten())
model.add(Dense(200, activation='relu'))
model.add(Dropout(dropout))
model.add(Dense(50, activation='relu'))
model.add(Dense(1, activation='sigmoid'))
#if loss == 'custom':
loss = weighted_binary_crossentropy(weight=loss_weight) if custom_loss else 'binary_crossentropy'
model.compile(loss=loss, optimizer='adam', metrics=['accuracy', precision])
self.model = model
global models_dir
with open(os.path.join(models_dir, '{}.txt'.format(name)), 'w') as f:
f.write("loss "+("weighted_binary_crossentropy(weight=loss_weight)\n" if custom_loss else "binary_crossentropy\n"))
f.write("loss_weight "+str(loss_weight)+"\n")
return model
def train_model(self, epochs=30, batch_size=20,
name='classifier_model',
update=False, end=-1):
'''Shuffle and split data, then feed to model
:param int epochs: number of training epochs (default 30)
:param int batch_size: number of samples per weight update computation (default 20)
:param int end: number of training samples (default use whole training set)
'''
assert self.model is not None, "Build model first!"
if update:
assert self.history is not None, "Load pre-trained model to use update option"
self.massage()
self.print_summary()
history = self.model.fit(self.X_train[:end], self.y_train[:end],
validation_data=(self.X_test, self.y_test),
epochs=epochs, batch_size=batch_size).history
if update:
for key in self.history.keys():
self.history[key].extend(history[key])
else:
self.history = history
self.model.save(os.path.join(models_dir, '{}.h5'.format(name)))
pickle_put(os.path.join(models_dir, '{}_history.pickle'.format(name)), self.history)
# Plot training history
self.plot_train_performance(self.history)
def model_from_save(self, name='classifier_model'):
'''Load a prebuilt model from file as instance model.
Model must have an associated .txt file recording its loss
function.
:param str name: name of saved model to be loaded
'''
print("WARNING: expect inaccurate performance readings if testing pre-trained models on seen data")
global models_dir
with open(os.path.join(models_dir, '{}.txt'.format(name))) as f:
params = {line.split(' ')[0] : line.split(' ')[1] for line in f}
loss_weight = eval(params['loss_weight'])
try:
loss = eval(params['loss'])
custom_objects = {'precision':precision, 'weighted_loss':loss}
except NameError:
custom_objects = {'precision':precision}
self.model = load_model(os.path.join(models_dir, '{}.h5'.format(name)),
custom_objects=custom_objects)
self.history = pickle_get(os.path.join(models_dir, '{}_history.pickle'.format(name)))
# Plot training history
self.plot_train_performance(self.history)
def test_model(self, batch_size=20):
'''Test model after training. Display training stats and test results.
:param int batch_size:
'''
# Score model on test data
score = self.model.evaluate(self.X_test, self.y_test, batch_size=batch_size)
print("Loss: {0:.2f}, Accuracy: {1:.2f}, Precision: {2:.2f}".format(score[0], score[1], score[2]))
# Analyse performance, add predictions to loaded database
predicted = self.model.predict_classes(self.X_test)
probs = self.model.predict_proba(self.X_test)
self.test_data['Prediction'] = probs
# Display results graphically
cm = confusion_matrix(self.y_test, predicted)
self.show_confusion(cm, [0,1])
self.plot_test_results()
def plot_train_performance(self, history):
'''Plot evolution of metrics such as accuracy and loss as a function of epoch number
:param history: keras history object containing metric info from training
'''
fig, (ax1, ax2, ax3) = plt.subplots(ncols=3, nrows=1)
fig.dpi = 300
ax1.plot(history['loss'], label = 'train', color = '#73ba71')
ax1.plot(history['val_loss'], label = 'validation', color = '#5e9ec4')
ax1.set_xlabel('epoch')
ax1.set_ylabel('loss')
ax2.plot(history['acc'], label = 'train', color = '#73ba71')
ax2.plot(history['val_acc'], label = 'validation', color = '#5e9ec4')
ax2.set_xlabel('epoch')
ax2.set_ylabel('accuracy')
ax3.plot(history['precision'], label = 'train', color = '#73ba71')
ax3.plot(history['val_precision'], label = 'validation', color = '#5e9ec4')
ax3.set_xlabel('epoch')
ax3.set_ylabel('precision')
plt.legend(loc='lower right')
plt.tight_layout()
def plot_test_results(self):
'''Produce a plot of model predictions against true labels
'''
# Sort samples by predicted probability
sort = self.test_data.sort_values('Prediction', ascending=False)
x = np.arange(len(sort))
preds = sort['Prediction'].values
# Determine indices of true and false predictions
truex = np.where(sort['Class'] == np.rint(sort['Prediction']))[0]
trues = sort.iloc[sort['Class'].values == np.rint(sort['Prediction'].values)]
falsex = np.where(sort['Class'] != np.rint(sort['Prediction']))[0]
falses = sort.iloc[sort['Class'].values != np.rint(sort['Prediction'].values)]
# Construct figure
plt.figure(dpi=300)
plt.plot(x, preds, linewidth=0.5) # Predictions
plt.plot(truex, trues['Class'].values, 'g.', markersize=1.5) # True labels, correct
plt.plot(falsex, falses['Class'].values, 'r.', markersize=1.5) # True labels, incorrect
plt.xticks([0, len(sort)])
plt.yticks([0, 0.5, 1])
plt.xlabel('Sample number')
plt.ylabel('Probability')
def show_confusion(self, cm, classes):
'''Display confusion plot and stats
:param array cm: calcualted confusion matrix
:param classes: list of class labels
'''
# Stats
[[tn, fp],[fn, tp]] = cm
tpr = tp / (fn + tp) # True positive rate, Sensitivity, recall
tnr = tn / (tn + fp) # True negative rate, Specificity
ppv = tp / (tp + fp) # Positive predictive value, Precision
npv = tn / (tn + fn) # Negative predictive value
f1 = 2 * (ppv * tpr) / (ppv + tpr)
stats = '{0:<20}{1:>10.2f}\n{2:<20}{3:>10.2f}\n{4:<20}{5:>10.2f}\n{6:<20}{7:>10.2f}\n{8:<20}{9:>10.2f}'.format('Sensitivity:', tpr, 'Specificity:', tnr, 'PPV (Precision):', ppv, 'NPV:', npv, 'F1 score:', f1)
print(stats)
# Construct figure
plt.figure(dpi=300)
plt.imshow(cm, interpolation='nearest', cmap='magma_r')
plt.colorbar()
tick_marks = np.arange(len(classes))
plt.xticks(tick_marks, classes)
plt.yticks(tick_marks, classes)
# Matrix numerical values
thresh = cm.max()/2
for i, j in itertools.product(range(cm.shape[0]), range(cm.shape[1])):
plt.text(j, i, format(cm[i, j], 'd'), horizontalalignment='center',
color = 'black' if cm[i,j]<thresh else 'white')
plt.xlabel('Predicted class')
plt.ylabel('True class')
def show_wrongs(self, n=10):
'''Display wongly classified images
together with their true class and predicted class probabilities.
Print wrongly predicted images in tabular form.
:param int n: number of wrongs to display
'''
wrongs = self.test_data.iloc[self.test_data['Class'].values != np.rint(self.test_data['Prediction'].values)]
num = len(wrongs)
w = ImgDisp([np.load(f) for f in wrongs['Path'].values][:n])
fig1, ax1 = w.disp()
fig1.subplots_adjust(wspace=0.02, hspace=0.02)
try:
for i in range(n):
ax1.flat[i].set_title((str(wrongs.iloc[i]['Class'])+' '+str(wrongs.iloc[i]['Prediction'])))
except Exception as e:
print("Couldn't label plots: \n", e)
pd.set_option('display.max_colwidth', 80)
print(wrongs[['Path','Class','Prediction']])
def main(argv):
global data_dir
build_kwargs = {}
train_kwargs = {}
mode = 'normal_testing'
remake = False
name = 'classifier_model'
# Parse command line options
try:
opts, args = getopt.getopt(argv, 'n:b:e:d:p:w:o:', ['mode=', 'remake', 'name=', 'custom_loss', 'data='])
except getopt.GetoptError as e:
print(e)
print("convnet.py \
-n <num layers> \
-b <batch size> \
-e <num epochs> \
-d <size train data> \
--mode <default: 'normal_testing'> \
--name <model name (if name pre-exists will overwrite old model)> \
--remake to rebuild database")
sys.exit(2)
for opt, arg in opts:
if opt=='-n':
build_kwargs['nlayers'] = int(arg)
elif opt=='-p':
build_kwargs['padding'] = arg
elif opt=='--custom_loss':
build_kwargs['custom_loss'] = True
elif opt=='-w':
build_kwargs['loss_weight'] = float(arg)
elif opt=='-o':
build_kwargs['dropout'] = float(arg)
elif opt=='-b':
train_kwargs['batch_size'] = int(arg)
elif opt=='-e':
train_kwargs['epochs'] = int(arg)
elif opt=='-d':
train_kwargs['end'] = int(arg)
elif opt=='--mode':
mode = arg
elif opt=='--remake':
remake = True
elif opt=='--name':
train_kwargs['name'] = arg
build_kwargs['name'] = arg
name = arg
elif opt=='--data':
data_dir = arg
if remake:
ProteinClassifier.make_database()
PC = ProteinClassifier()
avail = glob(os.path.join(data_dir, '*database.pickle'))
if os.path.isfile(data_dir):
data_path = data_dir
elif len(avail) == 1:
data_path = avail[0]
else:
data_path = input("Available databases:\n\t"+"\n\t".join(name for name in avail)+"\nSelect from above: ")
PC.load_database(data_path)
# Build and train model with default parameters except where
# specified otherwise
if mode=='saved':
PC.massage(split_val=0)
PC.model_from_save(name=name)
PC.test_model()
PC.show_wrongs()
elif mode=='update':
PC.model_from_save(name=name)
PC.train_model(update=True, **train_kwargs)
PC.test_model()
else:
go_ahead = True
if os.path.isfile(os.path.join(models_dir, '{}.h5'.format(name))):
go_ahead = (input("'{}' is an existing model. \
Press 'y' to overwrite, any other key to cancel. ".format(name)) == 'y')
if go_ahead:
PC.build_model(**build_kwargs)
PC.train_model(**train_kwargs)
PC.test_model()
#PC.show_wrongs()
else:
sys.exit(2)
plt.show()
if __name__ == '__main__':
main(sys.argv[1:])
```
#### File: obsidian/learn/metrics.py
```python
import keras.backend as K
from theano.tensor import basic as T
from theano.tensor import nnet, clip
def precision(y_true, y_pred):
'''Returns batch-wise average of precision.
Precision is a metric of how many selected items are relevant, corresponding
to sum(true positives)/sum(predicted positives)
(see: https://en.wikipedia.org/wiki/Confusion_matrix)
:param y_true: True values
:param y_pred: Predictied output values
'''
true_positives = K.sum(K.round(K.clip(y_true * y_pred, 0, 1)))
predicted_positives = K.sum(K.round(K.clip(y_pred, 0, 1)))
precision = true_positives / (predicted_positives + K.epsilon())
return precision
def weighted_binary_crossentropy(weight, from_logits=False):
'''Custom loss function to bias training towards a class.
:param float weight: A value higher than 1 will bias the model towards positive predictions,
a value lower than 1 will bias the model towards negative predictions.
:returns: weighted loss function
'''
def weighted_loss(target, output):
# Not entirely sure what this is for but it was in the Keras backend method
if from_logits:
output = nnet.sigmoid(output)
output = clip(output, K.epsilon(), 1.0 - K.epsilon())
# Modified log loss equation with weight for target positive
return -(weight * target * T.log(output) + (1.0 - target) * T.log(1.0 - output))
return weighted_loss
```
#### File: obsidian/oimp/oimp_main.py
```python
import os, os.path, sys, getopt
from glob import glob
import pickle
import numpy as np
import matplotlib.pyplot as plt
from obsidian.utils.imgdisp import ImgDisp
from obsidian.utils.data_handling import pickle_get, pickle_put, join_files, split_data, read_header
from obsidian.fex.trace import Trace
from obsidian.fex.extractor import FeatureExtractor as Fex, radius_from_res
from obsidian.oimp.processor import Processor
def get_img_dirs(root):
'''Walk through root directory compiling a list of all bottom level directories
(that is directories that contain only files and no subdirectories)
:param str root: directory to walk through
:returns: dictionary of image data directories with generated IDs based on local directory tree
'''
bottom_dirs = {}
for dirname, subdirList, fileList in os.walk(root, topdown=False):
if len(subdirList)==0:
ID = ''.join(dirname.split(os.sep)[-3:])
bottom_dirs[dirname] = ID
return bottom_dirs
def locate_background_file(img_data_dir):
'''Ascend img_dir until the first candidate for a background
file is found.
:param str img_data_dir: bottom directory of diffraction images
:returns: path to background file
'''
current_path = img_data_dir
while True:
next_path = os.path.realpath(os.path.join(current_path, '..'))
if 'background.npy' in os.listdir(current_path):
return os.path.join(current_path, 'background.npy')
elif next_path == current_path:
return None
else:
current_path = next_path
def pipe(top, dump_path, max_res):
'''Takes a single top level directory as input and processes all nested files.
IDs are derived from directory paths. Background files are searched for in a bottom-up manner.
:param str top: Top level directory containing all image files (which may be in several subdirectories)
:param str dump_path: Directory to dump processed data into
'''
last = ''
# Find all relevant image directories within top
bottom_dirs = get_img_dirs(top)
# Process each image directory in turn
for img_data_dir in bottom_dirs.keys():
assert os.path.exists(img_data_dir), "{} not found".format(img_data_dir)
ID = bottom_dirs[img_data_dir]
print("\n###### Working on {}... ######".format(ID))
# Skip directory if already processed
if os.path.exists(os.path.join(dump_path, '{}_profiles.pickle'.format(ID))):
print("\t{} already processed, skipping".format(ID))
continue
# Background file
print("\tLooking for background data for {}...".format(img_data_dir))
background_path = locate_background_file(img_data_dir)
if background_path is None:
print("\t\tNo background file found for {}, skipping folder".format(ID))
continue
else:
background = np.load(background_path)
print("\tBackground data loaded from: {}".format(background_path))
# Batch data processing to avoid memory issues
batched_files = split_data( glob(img_data_dir+'/*.npy'), 400 ) # batch size of 400
batchIDs = ['{}-{}'.format(ID, i) for i in range(len(batched_files))]
i = 0
# Extract max radius in pixels from image header
header = os.path.join(img_data_dir, 'header.txt')
rmax = radius_from_res(max_res, header) if max_res is not None else None
# Open keys file to link each image with unique original image path
with open(os.path.join(img_data_dir, 'keys.txt')) as k:
keys = {line.split()[0] : line.split()[1] for line in k}
# Batchwise processing
for files in batched_files:
batchID = batchIDs[i]
print('\tBatch nr: ', i)
############## Read in data files ###############
print("\t\tLoading image data...")
data = {f : np.load(f) for f in files if 'background' not in f}
############ Processing ############
print("\t\tPre-prossessing images...")
process = Processor(data, background)
process.rm_artifacts(value=500)
process.background()
data = process.processedData
############## Feature analysis ##############
print("\t\tExtracting profiles...")
fex = Fex(data)
mean_traces = fex.mean_traces(rmax=rmax, nangles=20)
############# Saving ##############
# Create an indexed dictionary with the keys derived from keys.txt
indexed_data = {keys[path] : {'Path' : path, 'Data' : mean_traces[path]} for path in mean_traces }
print("\t\tSaving profiles to {}/{}_profiles.pickle...".format(dump_path, batchID))
pickle_put(os.path.join(dump_path, '{}_profiles.pickle'.format(batchID)), indexed_data),
#fex.dump_save(batchID, dump_path)
del data
del fex
del process
i += 1
############# Join batches to single file #############
paths = [os.path.join(dump_path, '{}_profiles.pickle'.format(batchID)) for batchID in batchIDs]
last = os.path.join(dump_path, '{}_profiles.pickle'.format(ID))
join_files(last, paths)
# Return path of last processed file
return last
def run(argv):
top, dump, max_res = None, None, None
# Parse command line options
try:
opts, args = getopt.getopt(argv, 't:d:r:')
except getopt.GetoptError as e:
print(e)
sys.exit(2)
for opt, arg in opts:
if opt=='-t':
top = os.path.abspath(arg)
elif opt=='-d':
dump = arg
elif opt=='-r':
max_res = int(arg)
if not all((top, dump)):
print("-t, -d are required")
sys.exit(2)
if not os.path.exists(dump):
os.makedirs(dump)
pipe(top, dump, max_res)
if __name__ == '__main__':
run(sys.argv[1:])
```
#### File: obsidian/oimp/processor.py
```python
import numpy as np
import matplotlib.pyplot as plt
import pickle
import os
class Processor():
'''Encapsulation of all image processing, to produce data that can be
passed onto the feature extraction stage of Obsidian.
:ivar dict processedData: storage dict for processed images / images to be
processed in form {filepath : image array}
:ivar array bg: background image data
'''
def __init__(self, coll, background=None):
'''
:param dict coll: dict of filepath : image pairs to be processed
'''
self.processedData = coll
self.bg = background
def background(self, bg=None):
'''Remove background from image collection
:param bg: Background image to be removed (provide only if not specified on instantiation)
:returns: modified files
'''
assert self.bg is not None or bg is not None, "No background image provided"
bg = self.bg if bg is None else bg
for name, f in self.processedData.items():
assert (f.shape == bg.shape), "Background subtraction: images must be equal size!"
self.processedData[name] = (np.subtract(f, bg))
if self.bg is None:
self.bg = bg
def rm_artifacts(self, value=600):
'''Null pixel values above a reasonable photon count
:param int value: cutoff value, pixels with higher counts assumed artifacts (default 600)
'''
for name, image in self.processedData.items():
image[image > value] = -1
if self.bg is not None:
self.bg[self.bg > value] = -1
def dump_save(self, ID, path=None):
'''Save processzed images to pickle file
:param str ID: identification label
:param str path: destination path (if None, default is obsidian/datadump)
'''
default = 'obsidian/datadump'
if not os.path.exists(default) and path is None:
os.makedirs(default)
path = os.path.join(default if path is None else path,
"{}_processed.pickle".format(ID))
data_save = open(path, "wb")
pickle.dump(self.processedData, data_save, protocol=-1)
data_save.close()
```
#### File: obsidian/utils/build_bg_files.py
```python
import numpy as np
import os, sys
import pickle
def bg_from_blanks(top, dest):
'''Make background file by averaging images classified as
blanks (without rings)
:param str top: top level directory containing image folders
:param str dest: destination for background file
'''
bottoms = {}
for folder, subdirs, files in os.walk(top):
if len(subdirs)==0:
ID = ''.join(folder.split(os.sep)[-3:])
bottoms[folder] = {'ID':ID, 'files':files}
print(ID)
for d in bottoms.keys():
ID = bottoms[d]['ID']
if os.path.exists(os.path.join(dest, '{}_background.npy'.format(ID))):
print("Background file for {} already exists, skipping".format(ID))
continue
try:
labels = pickle.load(open('/media/Elements/obsidian/classes/small/{}_classifications.pickle'.format(ID), 'rb'))
except IOError:
print("no labels yet for {}".format(ID))
continue
try:
bg_data = [np.load(sample) for sample in labels.keys() if labels[sample]==0]
except Exception as e:
print(e)
if len(bg_data)!=0:
print("making bg file")
try:
mean_bg_data = np.mean(np.dstack(bg_data), axis=2)
except MemoryError:
half = int(round(len(bg_data)/2))
mean1 = np.mean(np.dstack(bg_data[:half]), axis=2)
mean2 = np.mean(np.dstack(bg_data[half:]), axis=2)
mean_bg_data = np.mean(np.dstack((mean1, mean2)), axis=2)
np.save(os.path.join(dest, '{}_background.npy'.format(ID)), mean_bg_data, allow_pickle=False)
def bg_from_scan(top, dest, folders):
'''Seek out background scan directories and build background files
:param str top: top level directory containing image folders
:param str dest: destination for background file
:param collection folders: list or tuple of folder strings to demarcate background scans (e.g. ('g1', 'f1') )
'''
bottoms = {}
for folder, subdirs, files in os.walk(top):
if len(subdirs)==0 and any(f in folder for f in folders):
ID = ''.join(folder.split(os.sep)[-3:])
tray = ''.join(folder.split(os.sep)[-4:-2])
bottoms[folder] = {'ID':ID, 'tray':tray, 'files':files}
for d in bottoms.keys():
ID = bottoms[d]['ID']
tray = bottoms[d]['tray']
if os.path.exists(os.path.join(dest, '{}_background.npy'.format(ID))):
print("Background file for {} already exists, skipping".format(ID))
else:
try:
bg_data = [np.load(os.path.join(d, sample)) for sample in bottoms[d]['files'] if sample.endswith('.npy')]
print(len(bg_data))
except Exception as e:
print(e)
continue
print("making bg file for {}".format(ID))
try:
mean_bg_data = np.mean(np.dstack(bg_data), axis=2)
except MemoryError: # if too much data for a single numpy array, split in half
half = int(round(len(bg_data)/2))
mean1 = np.mean(np.dstack(bg_data[:half]), axis=2)
mean2 = np.mean(np.dstack(bg_data[half:]), axis=2)
mean_bg_data = np.mean(np.dstack((mean1, mean2)), axis=2)
print("MemoryError handled")
np.save(os.path.join(dest, '{}_background.npy'.format(ID)), mean_bg_data, allow_pickle=False)
if os.path.exists(os.path.join(dest, '{}_background.npy'.format(tray))):
print("Background file for {} already exists, skipping".format(tray))
else:
try:
bg_data = [np.load(os.path.join(folder, sample)) for folder in bottoms.keys()
for sample in bottoms[folder]['files']
if bottoms[folder]['tray']==tray
if sample.endswith('.npy')]
print(len(bg_data))
except Exception as e:
print(e)
continue
print("making bg file for {}".format(tray))
try:
mean_bg_data = np.mean(np.dstack(bg_data), axis=2)
except MemoryError: # if too much data for a single numpy array, split in half
half = int(round(len(bg_data)/2))
mean1 = np.mean(np.dstack(bg_data[:half]), axis=2)
mean2 = np.mean(np.dstack(bg_data[half:]), axis=2)
mean_bg_data = np.mean(np.dstack((mean1, mean2)), axis=2)
print("MemoryError handled")
np.save(os.path.join(dest, '{}_background.npy'.format(tray)), mean_bg_data, allow_pickle=False)
if __name__ == '__main__':
top = '/media/Elements/obsidian/diffraction_data/lysozyme_small'
dest = 'obsidian/datadump'
folders = ('g1', 'f1')
bg_from_scan(top, dest, folders)
bg_from_blanks(top, dest)
```
#### File: obsidian/utils/data_handling.py
```python
import pandas as pd
import pickle
import os
from glob import glob
def pickle_get(path):
'''Wrapper function for fetching pickled file contents
:param str path: path of pickle file
:returns: contents of pickle file
'''
pickle_in = open(path, 'rb')
return pickle.load(pickle_in)
def pickle_put(path, data):
'''Wrapper function for dumping data to pickle file
:param str path: destination path
:param data: object to be pickled
'''
pickle_out = open(path, 'wb')
pickle.dump(data, pickle_out)
def join_files(end_path, paths):
'''Combine multiple pickled dictionaries into a single pickle file
:param str end_path: path of target combined file
:param list paths: list of paths of pickle files to be combined
'''
all_data = {}
for path in paths:
all_data.update(pickle_get(path))
os.remove(path)
pickle_put(end_path, all_data)
def split_data(data_list, chunk_size):
'''Handle reading and processing data in chunks to avoid the process being killed
:param list data_list: list of items to be split into chunks
:param int chunk_size: number of items per chunk
:returns: list of sublists of size chunk_size (except the final sublist, which contains remainder)
'''
return [data_list[i:i+chunk_size] for i in range(0, len(data_list), chunk_size)]
def make_frame(datadict):
'''Create dataframe out of paths contained in datadict
:param dict datadict: dict in with entries 'Data':pathlist with Type e.g 'Class', 'Data'
'''
data = {}
for path in datadict['Data']:
data.update(pickle_get(path))
df = pd.DataFrame.from_dict(data, orient='index')
return df
def read_header(f, params):
'''Extract desired parameters from header file. Will not work correctly if params contain any spaces
:param str f: header file path
:param list params: List of strings, each the name of a parameter found in the header
:return: dict of param:values where values is a list of all subsequent space separated strings
Example::
>>> read_header(<header/file/path>, ['Beam_xy', 'Detector_distance'])
{'Beam_xy' : ['(1251.51,', '1320.12)', 'pixels'], 'Detector_distance':['0.49906','m']}
'''
head = open(f, 'r')
info = {}
# Read header file line by line
for l in head:
if any(param in l for param in params):
p = [param for param in params if param in l][0]
info[p] = l.split(' ')[2:] # extract all info following parameter keyword
return info
def update_database(old_database, new_data, save=False):
'''Add new data to existing database
:param str old_database: path to existing database
:param list new_data: paths to new data files
'''
old = pickle_get(old_database)
for path in new_data:
new = pickle_get(path)
try:
old.append(new, ignore_index=True)
except TypeError as e:
print("Failed to add {}:".format(path), e)
if save:
pickle.put(old_database, old)
return old
def new_database(data_dir, classes_dir, save_path=''):
'''Build a pandas dataframe, pickle and save from a list of profiles
and labels files
:param str data_dir: directory containing \*profiles.pickle files
:param str classes_dir: directory containing \*classifications.pickle files
:param str save_path: if specified, resulting database will be saved to save_path
'''
all_data = {}
for f in glob(os.path.join(data_dir, '*profiles.pickle')):
name = os.path.basename(f)
data = pickle_get(f)
try:
labels = pickle_get(os.path.join(classes_dir, name.replace('profiles', 'classifications')))
except IOError:
print("No classifications found for {}".format(name))
continue
for key in data:
try:
data[key].update(labels[key])
except KeyError:
continue
all_data.update(data)
df = pd.DataFrame.from_dict(all_data, orient='index')
if save_path:
pickle_put(save_path, df)
return df
```
#### File: obsidian/utils/import_cbf.py
```python
try:
from dxtbx import load
from dxtbx.format.FormatCBF import FormatCBF
except ImportError:
print("Couldn't import dxtbx, sorry :(")
import numpy as np
import glob, os, sys, getopt
import math
def read_header(f, params, file_string=False):
'''Extract desired parameters from header file. Will not work correctly if params contain any spaces
:param str f: header file path
:param list params: List of strings, each the name of a parameter found in the header
:return: dict of param:values where values is a list of all subsequent space separated strings
Example:
.. code-block:: python
>>> file_path = 'path/to/header.txt'
>>> read_header(file_path, ['Beam_xy', 'Detector_distance'])
{'Beam_xy' : ['(1251.51,', '1320.12)', 'pixels'], 'Detector_distance':['0.49906','m']}
'''
head = f.splitlines() if file_string else open(f, 'r')
info = {}
# Read header file line by line
for l in head:
if any(param in l for param in params):
p = [param for param in params if param in l][0]
info[p] = l.split(' ')[2:] # extract all info following parameter keyword
return info
def get_params(header):
''' Implement read_header to extract the parameters Wavelength, Detector_distance and Pixel_size,
which are required to calculate resolution per pixel
:param str header: path to header file
:returns: values for wavelength, detector distance and pixel size
'''
fields = ['Wavelength', 'Detector_distance', 'Pixel_size']
defaults = False
try:
# Read paramater values from header
info = read_header(header, fields)
except Exception as e:
defaults = True
# Raise exception which, if handled, will allow code to continue
# with default values
raise e
if defaults:
wl = 0.96863
L = 0.49906
pixel_size = 172*10**(-6)
else:
# Extract raw parameter values
wl = float(info['Wavelength'][0])
L = float(info['Detector_distance'][0])
pixel_size = float(info['Pixel_size'][0])
return wl, L, pixel_size
def radius_from_res(max_res, header):
'''Calculate radius in pixels of a given maximum resolution. Use to compute rmax cropping images
:param float max_res: Maximum resolution at which user expects to find protein rings, in Angstrom
:param str header: path to header file
:return: Radius in pixels
'''
wl, L, pixel_size = get_params(header)
# Convert paramters to meters (pixel_size is already in meters)
l = wl*10**(-10) # Wavelength
d = max_res*10**(-10) # Resolution
r = L * math.tan(2*math.asin(l/(2*d))) # Radius in meters
return r/pixel_size # Radius in pixels
def progress(n, tot):
'''Print a progress bar to terminal
:param int n: number of processed items
:param int tot: total number of items
'''
len = 35
prog = int(round(len * n/tot))
# End
if prog == len:
sys.stdout.write("\r[{0}] {1:.1f}%".format("=" * len, 100*n/tot))
# Beginning
elif prog == 0:
sys.stdout.write("\r[{0}] {1:.1f}%".format("-" * len, 100*n/tot))
# In between
else:
sys.stdout.write("\r[{0}{1}{2}] {3:.1f}%".format("=" * (prog-1), ">", "-" * (len-prog), 100*n/tot))
sys.stdout.flush()
class Cbf2Np():
'''Encapusation of cbf to npy conversion process
'''
def __init__(self, root, destination, box=False, centre=None, max_res=None):
'''
:param str root: directory containing subdirectories of files to be converted
:param str destination: directory to house new files
:param bool box: if True, save cropped images up to specified resolution
:param float max_res: maximum resoltion to crop images to in Angstrom
:param tuple centre: beam centre in form (beam_y, beam_x) (in pixel coordinates)
'''
self.root = root
self.dest = destination
self.all_dirs = self.get_dirs() # create dictionary of directory paths and files
self.box = box
if box:
assert max_res is not None, "Cannot crop image without maximum resolution value"
self.centre = centre # in pixel coords, (row, col)
self.max_res = max_res
def get_box(self, header):
'''Get box indices for cropped image
:param str header: file path for header txt file
:returns: start and stop row and col indices
'''
rmax = radius_from_res(self.max_res, header)
if self.centre is None:
# If centre tuple not provided manually, extract from header
info = read_header(header, ['Beam_xy'])
centre = tuple(reversed(eval(''.join(info['Beam_xy'][:2])))) if self.centre is None else self.centre
y, x = centre[0], centre[1]
# Return r0, r1, c0, c1
return int(round(y-rmax)), int(round(y+rmax)), int(round(x-rmax)), int(round(x+rmax))
def get_dirs(self):
'''Scan root for image folders
'''
bottom_dirs = {}
for (dirName, subdirList, fileList) in os.walk(self.root, topdown=True):
# Collect bottom most directory names and files
if len(subdirList)==0:
bottom_dirs[dirName]=fileList
return bottom_dirs
def get_npy_filedir(self, cbf_filedir):
'''Generate destination directoy from cbf file directory. Create if not
preexisting
:param str cbf_filedir: cbf file directory to base destination directory off
'''
rel = os.path.relpath(cbf_filedir, self.root)
npy_filedir = os.path.join(self.dest, '' if rel=='.' else rel)
# Create destination directory
if not os.path.isdir(npy_filedir):
os.makedirs(npy_filedir)
return npy_filedir
def cbf_to_npfile(self, cbf_filepath, npy_filedir, header=False):
'''Convert single cbf file contents to numpy array and save in specified
directory
:param str cbf_filename: input file path
:param str npy_filename: output file name (optional, default same as cbf name)
:param bool header: if True, will also extract header info from cbf file
:returns: path of newly created npy file
'''
# File and directory names
cbf_filedir, cbf_filename = os.path.split(cbf_filepath)
npy_filename = cbf_filename.replace('.cbf','.npy')
npy_filepath = os.path.join(npy_filedir, npy_filename)
# Extract header data
if header:
self.extract_header(cbf_filepath, npy_filedir)
# Extract image data:
image = load(cbf_filepath)
if self.box:
h = os.path.join(npy_filedir, "header.txt")
r0, r1, c0, c1 = self.get_box(h)
# Extract cropped image data
data = image.get_raw_data().as_numpy_array()[r0:r1, c0:c1]
else:
# Extract uncropped image data
data = image.get_raw_data().as_numpy_array()
np.save(npy_filepath, data, allow_pickle=False)
return npy_filepath
def read_data_directory(self):
'''Read directory and parse each file into a numpy array, save in destination directory
'''
for directory in self.all_dirs.keys():
print("\nWorking through directory {}".format(directory))
header = True
# Track progress
i = 0
tot = len(self.all_dirs[directory])
progress(i, tot)
# Determine destination directory for current image directory
npy_filedir = self.get_npy_filedir(directory)
# Open file for writing image keys
with open(os.path.join(npy_filedir, 'keys.txt'), 'w') as keys:
for cbf_file in self.all_dirs[directory]:
if os.path.splitext(cbf_file)[1] == '.cbf':
cbf_filepath = os.path.join(directory, cbf_file)
# Extract image data
npy_filepath = self.cbf_to_npfile(cbf_filepath, npy_filedir, header=header)
# Extract and write image key ('File_path' in header)
keys.write(self.get_image_key(cbf_filepath, npy_filepath)+'\n')
header = False # Extract header for first file only
i+=1
progress(i, tot)
def get_image_key(self, cbf_filepath, npy_filepath):
'''Keep the original image path from the header of each image as a means of
tracing data back to original image data.
:param str cbf_filepath: cbf image path
:param str npy_filepath: new numpy image path
:returns: string containing npy path followed by ramdisk path
'''
cbf_filedir, cbf_filename = os.path.split(cbf_filepath)
key = 'Image_path'
head = FormatCBF.get_cbf_header(cbf_filepath)
value = read_header(head, [key], file_string=True)
ans = os.path.join(value['Image_path'][0], cbf_filename)
return '{} {}'.format(npy_filepath, ans)
def extract_header(self, cbf_filepath, npy_dir, bg=False):
'''Assume all headers (mostly) the same for a directory of image files (i.e directory
contains data from single aquisition experiment)
:param str cbf_filepath: path to cbf image
:param str npy_dir: path to destination
:param bool bg: if True, add 'background' to file name
'''
header = open(os.path.join(npy_dir, "{}header.txt".format("bg" if bg else "")), "w")
header.write(FormatCBF.get_cbf_header(cbf_filepath))
header.close()
def read_bg_directory(self, bg_dir):
'''Read background directory and save mean background file to
destination directory.
:param str bg_dir: path to background directory
'''
print("Importing background data...")
# Read all files in bg_dir into list of np arrays
bgData = []
files = glob.glob(os.path.join(bg_dir,'*.cbf'))
i = 0
tot = len(files)
for f in files:
img = load(f)
self.extract_header(f, self.dest, bg=True)
if self.box:
h = os.path.join(self.dest, "bgheader.txt")
r0, r1, c0, c1 = self.get_box(h)
# Extract cropped image data
bgData.append(img.get_raw_data().as_numpy_array()[r0:r1, c0:c1])
else:
bgData.append(img.get_raw_data().as_numpy_array())
i += 1
progress(i, tot)
bgData = np.dstack(bgData)
bgMean = np.mean(bgData, axis=2)
np.save(os.path.join(self.dest,"background.npy"), bgMean, allow_pickle=False)
def main(argv):
data_root = ''
data_dest = ''
bg_directory = ''
kwargs = {}
help_message = "cbf_to_npy.py \
--root <directory containing cbf files (incl subdirs)> \
--dest <directory to store npy files in> \
-h (print this message)\
-b <background directory> \
-c <beam centre in pixels as tuple '(row, col)'> \
-r <maximum resoltion to crop images to, in Angstrom>"
try:
opts, args = getopt.getopt(argv, 'hc:r:b:', ['root=', 'dest='])
except getopt.GetoptError as e:
print(e)
print(help_message)
sys.exit(2)
for opt, arg in opts:
if opt=='-h':
print(help_message)
sys.exit()
elif opt=='--root':
data_root = os.path.abspath(arg)
elif opt=='--dest':
data_dest = arg
elif opt=='-c':
kwargs['centre'] = eval(arg) # tuple
elif opt=='-b':
bg_directory = arg
elif opt=='-r':
kwargs['max_res'] = float(arg)
kwargs['box'] = True
assert os.path.exists(data_root), "Invalid data root directory"
if not os.path.exists(data_dest):
os.makedirs(data_dest)
# Instantiate
do_thing = Cbf2Np(data_root, data_dest, **kwargs)
# Background data, saved as single averaged file (if specified)
if bg_directory:
do_thing.read_bg_directory(bg_directory)
# Measurement data
do_thing.read_data_directory()
print("\nAll done!")
if __name__ == '__main__':
main(sys.argv[1:])
``` |
{
"source": "jmp448/gp_fates",
"score": 2
} |
#### File: gp_fates/GPfates/GPfates.py
```python
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import GPy
import sys
sys.path.insert(1, '../GPclust/')
from GPclust import OMGP
from gp_utils import bifurcation_statistics
from gp_utils import identify_bifurcation_point
class GPfates(object):
''' An object for GPfates analysis
'''
def __init__(self, sample_info=None, expression_matrix=None, pseudotime_column=None):
super(GPfates, self).__init__()
self.s = sample_info
self.e = expression_matrix[sample_info.index]
self.dr_models = {}
def _gene_filter(self, gene_filter=None):
''' Helper for the common operation of optioanlly filtering genes.
'''
if not gene_filter:
Y = self.e
else:
Y = self.e.loc[gene_filter]
return Y
def infer_pseudotime(self, priors=None, gene_filter=None, s_columns=None):
''' Infer pseudotiem using a 1-dimensional Bayesian GPLVM
'''
if s_columns:
Y = self.s[s_columns].as_matrix()
else:
Y = self._gene_filter(gene_filter).as_matrix().T
self.time_model = GPy.models.BayesianGPLVM(Y, 1, init='random')
self.time_model.rbf.lengthscale.constrain_fixed(2., warning=False)
self.time_model.rbf.variance.constrain_fixed(200., warning=False)
if priors is not None:
for i, p in enumerate(priors):
prior = GPy.priors.Gaussian(p, 2.)
self.time_model.X.mean[i, [0]].set_prior(prior, warning=False)
self.time_model.optimize(max_iters=2000, messages=True)
self.s['pseudotime'] = self.time_model.X.mean[:, [0]]
def plot_psuedotime_uncertainty(self, **kwargs):
yerr = 2 * np.sqrt(self.time_model.X.variance)
plt.errorbar(self.s['pseudotime'], self.s['pseudotime'], yerr=yerr, fmt='none')
plt.scatter(self.s['pseudotime'], self.s['pseudotime'], zorder=2, **kwargs)
def dimensionality_reduction(self, gene_filter=None, name='bgplvm'):
''' Use a Bayesian GPLVM to infer a low-dimensional representation
'''
Y = self._gene_filter(gene_filter).as_matrix().T
gplvm = GPy.models.BayesianGPLVM(Y, 5)
self.dr_models[name] = gplvm
gplvm.optimize(max_iters=2000, messages=True)
def store_dr(self, name='bgplvm', dims=[0, 1]):
''' Put a low-dimensional representation in the sample table.
'''
gplvm = self.dr_models[name]
for d in dims:
self.s['{}_{}'.format(name, d)] = gplvm.X.mean[:, [d]]
def model_fates(self, t='pseudotime', X=['bgplvm_0', 'bgplvm_1'], C=2, step_length=0.01):
''' Model multiple cell fates using OMGP
'''
self.fate_model = OMGP(self.s[[t]].as_matrix(), self.s[X].as_matrix(), K=C, prior_Z='DP')
self.fate_model.variance.constrain_fixed(0.05)
self.fate_model['(.*)lengthscale'].constrain_fixed(1.)
self.fate_model.hyperparam_interval = 1e3
self.fate_model.optimize(maxiter=1000, step_length=step_length)
def make_fates_viz(self, s_columns=['bgplvm_0', 'bgplvm_1']):
''' Make an OMGP model based on the fate model which visualizes the
trends in a representative space.
'''
XY = self.s[s_columns].as_matrix()
self.fates_viz = OMGP(self.fate_model.X, XY, prior_Z='DP')
self.fates_viz[:] = self.fate_model[:]
self.fates_viz.phi = self.fate_model.phi
def identify_bifurcation_point(self, n_splits=30):
''' Linear breakpoint model to infer drastic likelihood decrease
'''
omgp = self.fate_model
return identify_bifurcation_point(omgp, n_splits=n_splits)
def calculate_bifurcation_statistics(self, gene_filter=None):
''' Calculate the bifurcation statistics for all or a subset of genes.
'''
bifurcation_statistics(self.fate_model, self.e)
``` |
{
"source": "jmp75/pyrefcount",
"score": 3
} |
#### File: pyrefcount/refcount/putils.py
```python
import os
import sys
from glob import glob
from ctypes.util import find_library as ctypes_find_library
from typing import List, Union
def library_short_filename(library_name: str) -> str:
"""Based on the library name, return the platform-specific expected library short file name
Args:
library_name (str): name of the library, for instance 'R', which results out of this
function as 'libR.so' on Linux and 'R.dll' on Windows
Raises:
Exception: invalid argument
Returns:
str: expected short file name for the library, for this platform
"""
if library_name is None:
raise Exception("library_name cannot be None")
else:
if sys.platform == "win32":
return "{}.dll".format(library_name)
else:
return "lib{}.so".format(library_name)
def find_full_path(name: str) -> Union[str, None]:
"""Find the full path of a library in under the python
installation directory, or as devised by ctypes.find_library
Args:
name (str): Library name, e.g. 'R' for the R programming language.
Returns:
Union[str, None]: First suitable library full file name.
Examples:
>>> from refcount.putils import *
>>> find_full_path('gfortran')
'/home/xxxyyy/anaconda3/envs/wqml/lib/libgfortran.so'
>>> find_full_path('R')
'libR.so'
"""
full_libpath = None
if name is None:
return None
else:
lib_short_fname = library_short_filename(name)
prefixed_lib_pat = os.path.join(sys.prefix, "lib*", lib_short_fname)
prefixed_libs = glob(prefixed_lib_pat)
if prefixed_libs:
full_libpath = prefixed_libs[0]
if not full_libpath:
full_libpath = ctypes_find_library(name)
return full_libpath
def find_full_paths(dll_short_name: str, directories: List[str] = None) -> List[str]:
"""Find the full paths to library files, if they exist
Args:
dll_short_name (str): Short file name of the libary to search for, e.g. 'libgfortran.so'
directories (List[str], optional): directories under which to look for this file. Defaults to None.
Returns:
List[str]: zero or more matches, full paths to candidate files
"""
if directories is None:
directories = []
full_paths = [os.path.join(d, dll_short_name) for d in directories]
return [x for x in full_paths if os.path.exists(x)]
def find_full_paths_env_var(
dll_short_name: str, env_var_name: str = "PATH"
) -> List[str]:
"""Find the full paths to library files, if they exist
Args:
dll_short_name (str): Short file name of the libary to search for, e.g. 'libgfortran.so'
env_var_name (str, optional): [description]. Environment variable with paths to search under. Defaults to "PATH".
Returns:
List[str]: zero or more matches, full paths to candidate files
"""
x = os.environ.get(env_var_name)
if x is not None:
search_paths = x.split(os.pathsep)
else:
search_paths = [""]
return find_full_paths(dll_short_name, search_paths)
def prepend_path_env(
added_paths: Union[str, List[str]], subfolder: str = None, to_env: str = "PATH"
) -> str:
"""Build a new list of directory paths, prepending prior to an existing env var with paths.
Args:
added_paths (Union[str,List[str]]): paths prepended
subfolder (str, optional): Optional subfolder name to append to each in path prepended. Useful for 64/32 bits variations. Defaults to None.
to_env (str, optional): Environment variable with existing Paths to start with. Defaults to 'PATH'.
Returns:
str: Content (set of paths), typically for a updating/setting an environment variable
"""
path_sep = os.pathsep
if isinstance(added_paths, str):
added_paths = [added_paths]
prior_path_env = os.environ.get(to_env)
if prior_path_env is not None:
prior_paths = prior_path_env.split(path_sep)
else:
prior_paths = []
if subfolder is not None:
added_paths = [os.path.join(x, subfolder) for x in added_paths]
added_paths = [x for x in added_paths if os.path.exists(x)]
new_paths = prior_paths + added_paths
# TODO: check for duplicate folders, perhaps.
new_env_val = path_sep.join(new_paths)
return new_env_val
# TODO: is that of any use still?? refactored out from uchronia and co. , but appears unused.
# def find_first_full_path(native_lib_file_name, readable_lib_name = "native library", env_var_name = ""):
# if os.path.isabs(native_lib_file_name):
# if (not os.path.exists(native_lib_file_name)):
# raise FileNotFoundError("Could not find specified file {0} to load for {1}".format(native_lib_file_name, readable_lib_name))
# return native_lib_file_name
# if (native_lib_file_name is None or native_lib_file_name == ''):
# raise FileNotFoundError("Invalid empty file name to load for {0}".format(readable_lib_name))
# native_lib_file_name = _find_first_full_path(native_lib_file_name, env_var_name)
# return native_lib_file_name
# def _find_first_full_path(short_file_name, env_var_name = ""):
# if (none_or_empty(short_file_name)):
# raise Exception("short_file_name")
# lib_search_path_env_var = env_var_name
# if (none_or_empty(lib_search_path_env_var)):
# if(sys.platform == 'win32'):
# lib_search_path_env_var = "PATH"
# else:
# lib_search_path_env_var = "LD_LIBRARY_PATH"
# candidates = find_full_path_env_var(short_file_name, lib_search_path_env_var)
# if ((len(candidates) == 0) and (sys.platform == 'win32')):
# if (os.path.exists(short_file_name)):
# candidates = [short_file_name]
# if (len(candidates) == 0):
# raise FileNotFoundError("Could not find native library named '{0}' within the directories specified in the '{1}' environment variable".format(short_file_name, lib_search_path_env_var))
# else:
# return candidates[0]
# def find_full_path_env_var(dll_short_name, env_var_name="PATH"):
# x = os.environ.get(env_var_name)
# if x is not None:
# search_paths = x.split(os.pathsep)
# else:
# search_pathsPathUpdater = [""]
# return find_full_paths(dll_short_name, search_paths)
# def find_full_paths(dll_short_name, directories = []):
# full_paths = [os.path.join(d, dll_short_name) for d in directories]
# return [x for x in full_paths if os.path.exists(x)]
# def none_or_empty(x):
# return (x is None or x == '')
# # The following is useful, but idiosyncratic. Consider and rethink.
def build_new_path_env (from_env='LIBRARY_PATH', to_env='PATH', lib_short_fname='unknown.dll') -> str:
"""Propose an update to an existing environment variable, based on the path(s) specified in another environment variable. This function is effectively meant to be useful on Windows only.
Args:
from_env (str, optional): name of the source environment variable specifying the location(s) of custom libraries to load. Defaults to 'LIBRARY_PATH'.
to_env (str, optional): environment variable to update, most likely the Windows PATH env var. Defaults to 'PATH'.
lib_short_fname (str, optional): short file name of the custom library to load. This information is optional and used only for possible warning/log output messages. Defaults to 'unknown.dll'.
Returns:
str: the proposed updated content for the 'to_env' environment variable.
"""
if(sys.platform == 'win32'):
path_sep = ';'
shared_lib_paths = os.environ.get(from_env)
if(shared_lib_paths is not None):
# startup_msg = appendstartup_msg(paste0('Found env var ', from_env, '=', shared_lib_paths), startup_msg)
arch = os.environ["PROCESSOR_ARCHITECTURE"]
if arch == 'AMD64':
subfolder = '64'
else:
subfolder = '32'
shared_lib_paths_vec = shared_lib_paths.split(path_sep)
return prepend_path_env(shared_lib_paths_vec, subfolder, to_env=to_env)
else:
print("WARNING: a function was called to look for environment variable '{0}' to update the environment variable '{1}', but was not found. This may be fine, but if the package fails to load because '{2}' is not found, this is a likely cause.".format(from_env, to_env, lib_short_fname))
def update_path_windows (from_env='LIBRARY_PATH', to_env='PATH', lib_short_fname='unknown.dll') -> None:
"""If called on Windows, append an environment variable, based on the path(s) specified in another environment variable. This function is effectively meant to be useful on Windows only.
Args:
from_env (str, optional): name of the source environment variable specifying the location(s) of custom libraries to load. Defaults to 'LIBRARY_PATH'.
to_env (str, optional): environment variable to update, most likely the Windows PATH env var. Defaults to 'PATH'.
lib_short_fname (str, optional): short file name of the custom library to load. This information is optional and used only for possible warning/log output messages. Defaults to 'unknown.dll'.
Returns:
None
"""
if(sys.platform == 'win32'):
os.environ[to_env] = build_new_path_env(from_env, to_env, lib_short_fname)
``` |
{
"source": "jmp75/spotpy",
"score": 2
} |
#### File: spotpy/examples/spot_setup_hymod_exe.py
```python
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np
import spotpy
import os
import multiprocessing as mp
from distutils.dir_util import copy_tree, remove_tree
#from shutil import rmtree
import sys
class spot_setup(object):
def __init__(self,parallel='seq'):
self.params = [spotpy.parameter.Uniform('cmax',low=1.0 , high=500, optguess=412.33),
spotpy.parameter.Uniform('bexp',low=0.1 , high=2.0, optguess=0.1725),
spotpy.parameter.Uniform('alpha',low=0.1 , high=0.99, optguess=0.8127),
spotpy.parameter.Uniform('Ks',low=0.0 , high=0.10, optguess=0.0404),
spotpy.parameter.Uniform('Kq',low=0.1 , high=0.99, optguess=0.5592)]
self.curdir = os.getcwd()
self.owd = os.path.realpath(__file__)+os.sep+'..'
self.hymod_path = self.owd+os.sep+'hymod_exe'
self.evals = list(np.genfromtxt(self.hymod_path+os.sep+'bound.txt',skip_header=65)[:,3])[:730]
self.Factor = 1944 * (1000 * 1000 ) / (1000 * 60 * 60 * 24)
self.parallel = parallel
def parameters(self):
return spotpy.parameter.generate(self.params)
def simulation(self,x):
if self.parallel == 'seq':
call = ''
elif self.parallel == 'mpi':
call = str(int(os.environ['OMPI_COMM_WORLD_RANK'])+2)
copy_tree(self.hymod_path, self.hymod_path+call)
elif self.parallel == 'mpc':
call =str(os.getpid())
copy_tree(self.hymod_path, self.hymod_path+call)
else:
raise 'No call variable was assigned'
os.chdir(self.hymod_path+call)
try:
if sys.version_info.major == 2:
params = file('Param.in', 'w')
elif sys.version_info.major == 3:
params = open('Param.in','w')
for i in range(len(x)):
if i == len(x):
params.write(str(round(x[i],5)))
else:
params.write(str(round(x[i],5))+' ')
params.close()
os.system('HYMODsilent.exe')
#try:
if sys.version_info.major == 2:
SimRR = file('Q.out', 'r')
elif sys.version_info.major == 3:
SimRR = open('Q.out', 'r')
else:
raise Exception("Your python is too old for this example")
simulations=[]
for i in range(64):
SimRR.readline()
for i in range(730):
val= SimRR.readline()
#print(i,val)
simulations.append(float(val)*self.Factor)
SimRR.close()
#except:
# SimRR = 795 * [0]
except:
'Model has failed'
simulations=[np.nan]*795 #Assign bad values - model might have crashed
os.chdir(self.curdir)
if self.parallel == 'mpi' or self.parallel == 'mpc':
remove_tree(self.hymod_path+call)
return simulations
def evaluation(self):
return self.evals
def objectivefunction(self,simulation,evaluation, params=None):
#like = spotpy.likelihoods.gaussianLikelihoodMeasErrorOut(evaluation,simulation) # Works good
like = spotpy.objectivefunctions.nashsutcliffe(evaluation,simulation) # Works good
return like
``` |
{
"source": "jmpalk/argus",
"score": 2
} |
#### File: argus/argus/forms.py
```python
from django import forms
import datetime
from datetime import date, datetime, timezone
import ipaddress
import time
class LoginForm(forms.Form):
username = forms.CharField(label='username', max_length=32)
password = forms.CharField(label='password', max_length=32, widget=forms.PasswordInput())
class HostSearchForm(forms.Form):
host_ip = forms.CharField(label='IP Address', max_length=18)
from_scan_date = forms.DateField(
widget = forms.SelectDateWidget(years={'2020': 2020, '2019': 2019, '2018': 2018, '2017': 2017, '2016': 2016}, months={ '01':('Jan'), '02':('Feb'), '03':('Mar'), '04':('Apr'),
'05':('May'), '06':('Jun'), '07':('Jul'), '08':('Aug'),
'09':('Sep'), '10':('Oct'), '11':('Nov'), '12':('Dec')}, empty_label=("Choose Year", "Choose Month", "Choose Day")),
)
to_scan_date = forms.DateField(
widget = forms.SelectDateWidget(years={'2020': 2020, '2019': 2019, '2018': 2018, '2017': 2017, '2016': 2016}, months={ '01':('Jan'), '02':('Feb'), '03':('Mar'), '04':('Apr'),
'05':('May'), '06':('Jun'), '07':('Jul'), '08':('Aug'),
'09':('Sep'), '10':('Oct'), '11':('Nov'), '12':('Dec')}, empty_label=("Choose Year", "Choose Month", "Choose Day")),
)
def clean_host_ip(self):
data = self.cleaned_data['host_ip']
try:
ipaddress.ip_network(data)
except:
print("IP Error")
raise forms.ValidationError('Invalid IP Address')
return data
def clean_from_scan_date(self):
data = self.cleaned_data['from_scan_date']
print("clean from date")
print(data.toordinal())
print(date.fromtimestamp(time.time()).toordinal())
if data > date.fromtimestamp(time.time()):
raise forms.ValidationError('Invalid date - Date is in the future')
return data
def clean_to_scan_date(self):
data = self.cleaned_data['to_scan_date']
print("clean to date")
print(data.toordinal())
print(date.fromtimestamp(time.time()).toordinal())
if data > date.fromtimestamp(time.time()):
print("foo")
raise ValidationError('Invalid date - Date is in the future')
return data
def clean(self):
cleaned_data = super().clean()
from_scan_date = cleaned_data['from_scan_date']
to_scan_date = cleaned_data['to_scan_date']
if from_scan_date > to_scan_date:
print("bar")
raise ValidationError(_('Invalid dates - Search range is inverted'))
```
#### File: argus/argus/views.py
```python
from django.http import HttpResponseRedirect
from django.shortcuts import render, get_object_or_404, redirect
from argus.models import Scan, Host, Port, Service
from django.views import generic
from django.urls import reverse
from django.template import loader
from django.contrib.auth import authenticate, login, logout
from django.conf import settings
from django.contrib.auth.decorators import login_required
import os
import sys
import subprocess
import re
import ipaddress
from pathlib import Path
from .forms import HostSearchForm, LoginForm
import base64
import datetime
from datetime import datetime, timezone
#helper and data validation functions
def check_for_ip_bad_chars(test_string):
working_string = test_string
print(working_string)
searchObj = re.sub("[\.,/\-0-9]", "", working_string)
print(searchObj)
print(len(searchObj))
if len(searchObj) > 0:
return(True)
return(False)
def check_for_bad_port_chars(test_string):
working_string = test_string
searchObj = re.sub("[,\-0-9]", "", working_string)
if len(searchObj) > 0:
return(True)
return(False)
# Create your views here.
#class HomeView(generic.ListView):
# template_name = 'home.html'
# context_object_name = 'latest_scan_list'
#
#
# def get_queryset(self):
# return Scan.objects.order_by('-start_date')[:5]
def logout_page(request):
logout(request)
return redirect('argus:login')
def login_page(request):
if request.method == 'POST':
login_form = LoginForm(request.POST)
if login_form.is_valid():
username = login_form.cleaned_data['username']
password = login_form.cleaned_data['password']
user = authenticate(request, username=username, password=password)
if user is not None:
login(request, user)
return redirect('argus:scan_home')
else:
login_form = LoginForm()
login_failed = True
return render(request, 'login_page.html', {'login_form': login_form, 'login_failed': login_failed})
else:
login_form = LoginForm()
return render(request, 'login_page.html', { 'login_form': login_form, })
@login_required(login_url='/argus/login/')
def home_page(request):
latest_scan_list = Scan.objects.order_by('-start_date')[:5]
host_search_form = HostSearchForm()
return render(request, 'home.html', {'latest_scan_list': latest_scan_list, 'host_search_form': host_search_form})
def validate_date(date_string):
if len(date_string) != 8:
working_date = datetime.now(timezone.utc)
return(working_date, "BADDATE")
year = int(date_string[0:4])
month = int(date_string[4:6])
day = int(date_string[6:8])
print("date: %d %d %d" %(day, month, year))
try:
working_date = datetime(year, month, day, tzinfo=timezone.utc)
except:
working_date = datetime.now(timezone.utc)
print("WTF2")
return(working_date, "BADDATE")
return(working_date, "GOODDATE")
@login_required(login_url='/argus/login/')
def host_search_results(request):
if request.method == 'POST':
host_search_form = HostSearchForm(request.POST)
if host_search_form.is_valid():
host_ip = host_search_form.cleaned_data['host_ip']
from_scan_date = host_search_form.cleaned_data['from_scan_date']
to_scan_date = host_search_form.cleaned_data['to_scan_date']
from_scan_date_raw = int(from_scan_date.strftime("%Y%m%d"))
to_scan_date_raw = int(to_scan_date.strftime("%Y%m%d"))
redirect_url = reverse('argus:hostsearchresults') + "?host_ip=%s&fsd=%d&tsd=%d&s=%d&c=%d" % (host_ip, from_scan_date_raw, to_scan_date_raw, 0, 5)
return HttpResponseRedirect(redirect_url)
else:
print("foobar")
return render(request, 'hostsearcherror.html')
else:
host_search_form = HostSearchForm()
host_ip = request.GET.get('host_ip', '0.0.0.0')
from_scan_date_raw = request.GET.get('fsd', '00000000')
to_scan_date_raw = request.GET.get('tsd', '00000000')
start = int(request.GET.get('s', 0))
count = int(request.GET.get('c', 5))
if from_scan_date_raw == '00000000':
from_scan_date = datetime.now(timezone.utc)
else:
(from_scan_date, result_code) = validate_date(from_scan_date_raw)
if result_code == "BADDATE":
return render(request, 'hostsearcherror.html', {'BADDATE': True})
if to_scan_date_raw == '00000000':
to_scan_date = datetime.now(timezone.utc)
else:
(to_scan_date, result_code) = validate_date(to_scan_date_raw)
if result_code == "BADDATE":
return render(request, 'hostsearcherror.html', {'BADDATE': True})
if from_scan_date > to_scan_date:
print("WTF")
return render(request, 'hostsearcherror.html', {'FLIPPEDDATES': True})
host_set = Host.objects.filter(host_scan_time__gt=from_scan_date, host_scan_time__lt=to_scan_date, ip_address=host_ip).order_by('-host_scan_time')
num_hosts = host_set.count()
if start - count < 0:
prev = 0
else:
prev = start - count
if start + count >= num_hosts:
next_set = -1
else:
next_set = start+count
latest_scan_list = Scan.objects.order_by('-start_date')[:5]
host_set = Host.objects.filter(host_scan_time__gt=from_scan_date, host_scan_time__lt=to_scan_date, ip_address=host_ip).order_by('-host_scan_time')[start:start+count]
return render(request, 'host_search_results.html', {'host_set': host_set, 'host_ip': host_ip, 'from_scan_date': from_scan_date, 'to_scan_date': to_scan_date, 'start': start, 'count': count, 'num_hosts': num_hosts, 'latest_scan_list': latest_scan_list, 'prev': prev, 'next_set': next_set, 'host_search_form': host_search_form})
@login_required(login_url='/argus/login/')
def host_search_error(request, baddata):
host_ip = base64.b64decode(baddata).strip()
return render(request, 'hostsearcherror.html', {'host_ip': host_ip})
@login_required(login_url='/argus/login/')
def scan_detail(request, scan_id):
start = int(request.GET.get('s', 0))
count = int(request.GET.get('c', 5))
if start - count < 0:
prev = 0
else:
prev = start - count
num_hosts = Scan.objects.get(pk=scan_id).host_set.all().count()
if start + count >= num_hosts:
next_set = -1
else:
next_set = start+count
latest_scan_list = Scan.objects.order_by('-start_date')[:5]
scan = get_object_or_404(Scan, pk=scan_id)
hosts = scan.host_set.order_by('ip_address')[start:start+count]
host_search_form = HostSearchForm()
return render(request, 'scan_detail.html', {'start': start, 'count': count, 'prev': prev, 'next_set': next_set, 'scan': scan, 'hosts': hosts, 'latest_scan_list': latest_scan_list, 'host_search_form': host_search_form})
@login_required(login_url='/argus/login/')
def scan_list(request):
start = int(request.GET.get('s', 0))
count = int(request.GET.get('c', 10))
if start - count < 0:
prev = 0
else:
prev = start - count
num_scans = Scan.objects.all().count()
if start + count >= num_scans:
next_set = -1
else:
next_set = start+count
scan_list = Scan.objects.order_by('-start_date')[start:start+count]
latest_scan_list = Scan.objects.order_by('-start_date')[:5]
host_search_form = HostSearchForm()
return render(request, 'scan_list.html', {'start': start, 'count': count, 'scan_list': scan_list, 'latest_scan_list': latest_scan_list, 'prev': prev, 'next_set': next_set, 'host_search_form': host_search_form})
@login_required(login_url='/argus/login/')
def search_result_scan_list(request):
start = int(request.GET.get('s', 0))
count = int(request.GET.get('c', 10))
if start - count < 0:
prev = 0
else:
prev = start - count
num_scans = Scan.objects.all().count()
if start + count >= num_scans:
next_set = -1
else:
next_set = start+count
scan_list = Scan.objects.order_by('-start_date')[start:start+count]
latest_scan_list = Scan.objects.order_by('-start_date')[:5]
host_search_form = HostSearchForm()
return render(request, 'scan_list.html', {'start': start, 'count': count, 'scan_list': scan_list, 'latest_scan_list': latest_scan_list, 'prev': prev, 'next_set': next_set, 'host_search_form': host_search_form})
@login_required(login_url='/argus/login/')
def host_detail(request, host_id):
latest_scan_list = Scan.objects.order_by('-start_date')[:5]
host = get_object_or_404(Host, pk=host_id)
ports = host.port_set.all()
host_search_form = HostSearchForm()
return render(request, 'host_detail.html', {'host': host, 'ports': ports, 'latest_scan_list': latest_scan_list, 'host_search_form': host_search_form})
@login_required(login_url='/argus/login/')
def launch(request):
# BASE_PATH = str(Path(__file__)).split('/')[1:-1]
# TOOLS_PATH = '/'
# for part in BASE_PATH:
# TOOLS_PATH = TOOLS_PATH + part + '/'
#
#
# TOOLS_PATH = TOOLS_PATH + 'tools/'
#sys.path.append(str(TOOLS_PATH))
#print(sys.path)
scan_range = request.POST['scan_range']
port_range = request.POST['port_range']
exclude_range = request.POST['exclude_range']
if check_for_ip_bad_chars(scan_range):
return render(request, 'scanerror.html', {'scan_range': scan_range})
if check_for_ip_bad_chars(exclude_range):
return render(request, 'scanerror.html', {'exclude_range': exclude_range})
if check_for_bad_port_chars(port_range):
return render(request, 'scanerror.html', {'port_range': port_range})
ip_ranges = scan_range.split(',')
for ip_range in ip_ranges:
try:
ipaddress.ip_network(ip_range)
except:
return render(request, 'scanerror.html', {'scan_range': scan_range})
ip_ranges = exclude_range.split(',')
for ip_range in ip_ranges:
try:
ipaddress.ip_network(ip_range)
except:
return render(request, 'scanerror.html', {'exclude_range': exclude_range})
port_set = port_range.split(',')
for port in port_set:
if port.find("-") != -1:
(p1, p2) = port.split("-")
p1 = int(p1)
p2 = int(p2)
if p1 >= p2:
return render(request, 'scanerror.html', {'port_range': port_range})
if not 0 < p1 and p1 < 65536:
return render(request, 'scanerror.html', {'port_range': port_range})
if not 0 < p2 and p2 < 65536:
return render(request, 'scanerror.html', {'port_range': port_range})
elif not 0 < int(port) and int(port) < 65536:
return render(request, 'scanerror.html', {'port_range': port_range})
command = ("python3 run_masscan.py %s %s %s &" % (scan_range, port_range, exclude_range))
subprocess.Popen(command, shell=True)
return render(request, 'launch.html', {'scan_range': scan_range, 'exclude_range': exclude_range, 'port_range': port_range})
``` |
{
"source": "jmparelman/dynamic-nmf",
"score": 2
} |
#### File: jmparelman/dynamic-nmf/create-dynamic-partition.py
```python
import os, sys
import logging as log
from optparse import OptionParser
from prettytable import PrettyTable
import unsupervised.nmf, unsupervised.rankings
# --------------------------------------------------------------
def main():
parser = OptionParser(usage="usage: %prog [options] dynamic_topics window_topics1 window_topics2...")
parser.add_option("-o", action="store", type="string", dest="out_path", help="output path", default=None)
(options, args) = parser.parse_args()
if( len(args) < 3 ):
parser.error( "Must specify at least a dynamic topic file, followed by two or more window topic files (in order of time window)" )
log.basicConfig(level=20, format='%(message)s')
# Load dynamic results: (doc_ids, terms, term_rankings, partition, W, H, labels)
dynamic_in_path = args[0]
dynamic_res = unsupervised.nmf.load_nmf_results( dynamic_in_path )
dynamic_k = len(dynamic_res[2])
dynamic_partition = dynamic_res[3]
log.info( "Loaded model with %d dynamic topics from %s" % (dynamic_k, dynamic_in_path) )
# Create a map of window topic label -> dynamic topic
assigned_window_map = {}
dynamic_partition = dynamic_res[3]
for idx, window_topic_label in enumerate(dynamic_res[0]):
assigned_window_map[window_topic_label] = dynamic_partition[idx]
all_partition = []
all_doc_ids = []
# Process each window topic model
window_num = 0
for in_path in args[1:]:
window_num += 1
log.info( "Reading window topics for window %d from %s ..." % ( window_num, in_path ) )
# Load window results: (doc_ids, terms, term_rankings, partition, W, H, labels)
window_res = unsupervised.nmf.load_nmf_results( in_path )
window_doc_ids = window_res[0]
window_k = len(window_res[2])
window_partition = window_res[3]
for window_topic_idx, window_topic_label in enumerate(window_res[6]):
dynamic_topic_idx = assigned_window_map[window_topic_label]
for i, doc_id in enumerate(window_doc_ids):
if window_partition[i] == window_topic_idx:
all_doc_ids.append( doc_id )
all_partition.append( dynamic_topic_idx )
log.info("Created overall partition covering %d documents" % len(all_doc_ids) )
# TODO: fix W and H
if options.out_path is None:
results_out_path = "dynamic-combined.pkl"
else:
results_out_path = options.out_path
unsupervised.nmf.save_nmf_results( results_out_path, all_doc_ids, dynamic_res[1], dynamic_res[2], all_partition, None, None, dynamic_res[6] )
# --------------------------------------------------------------
if __name__ == "__main__":
main()
```
#### File: jmparelman/dynamic-nmf/prep-text.py
```python
import os, sys
from pathlib import Path
import logging as log
from optparse import OptionParser
import text.util
# --------------------------------------------------------------
def main():
parser = OptionParser(usage="usage: %prog [options] directory1 directory2 ...")
parser.add_option("--df", action="store", type="int", dest="min_df", help="minimum number of documents for a term to appear", default=10)
parser.add_option("--tfidf", action="store_true", dest="apply_tfidf", help="apply TF-IDF term weight to the document-term matrix")
parser.add_option("--norm", action="store_true", dest="apply_norm", help="apply unit length normalization to the document-term matrix")
parser.add_option("--minlen", action="store", type="int", dest="min_doc_length", help="minimum document length (in characters)", default=10)
parser.add_option("-s", action="store", type="string", dest="stoplist_file", help="custom stopword file path", default=None)
parser.add_option("-o","--outdir", action="store", type="string", dest="dir_out", help="output directory (default is current directory)", default=None)
parser.add_option("--lem", action="store", type="string", dest="lem_file", help="lemmatizer dictionary file path", default=None)
parser.add_option("--ngram", action="store", type="int", dest="max_ngram", help="maximum ngram range (default is 1, i.e. unigrams only)", default=1)
# Parse command line arguments
(options, args) = parser.parse_args()
if( len(args) < 1 ):
parser.error( "Must specify at least one directory" )
log.basicConfig(level=20, format='%(message)s')
if options.dir_out is None:
dir_out = Path.cwd()
else:
dir_out = Path(options.dir_out)
# need to create the output directory?
if not dir_out.exists():
try:
log.info("Creating directory %s" % dir_out)
dir_out.mkdir(parents=True, exist_ok=True)
except:
log.error("Error: Invalid output directory %s" % dir_out)
sys.exit(1)
# Load stopwords
if options.stoplist_file is None:
stopwords = text.util.load_stopwords()
else:
log.info( "Using custom stopwords from %s" % options.stoplist_file )
stopwords = text.util.load_stopwords( options.stoplist_file )
if stopwords is None:
stopwords = set()
log.info("No stopword list available")
else:
log.info("Loaded %d stopwords" % len(stopwords))
# Load lemmatization dictionary, if specified
lemmatizer = None
if not options.lem_file is None:
log.info("Loading lemmatization dictionary from %s ..." % options.lem_file)
lemmatizer = text.util.DictLemmatizer(options.lem_file)
# add any missing lemmatized stopwords
extra_stopwords = set()
for stopword in stopwords:
extra_stopwords.add(lemmatizer.apply(stopword))
stopwords = extra_stopwords
log.info("Using %d stopwords after lemmatization" % len(stopwords))
# Process each directory
for in_path in args:
if not os.path.exists(in_path):
log.warning("Warning: Skipping %s - path does not exist" % in_path)
continue
dir_name = os.path.basename( in_path )
# Read content of all documents in the directory
docgen = text.util.DocumentBodyGenerator( [in_path], options.min_doc_length )
docs = []
doc_ids = []
for doc_id, body in docgen:
docs.append(body)
doc_ids.append(doc_id)
# check for no documents
if len(docs) == 0:
log.warning("Warning: Skipping %s - contains no documents" % in_path)
continue
log.info( "Found %d documents to parse" % len(docs) )
# Pre-process the documents
log.info( "Pre-processing documents (%d stopwords, tfidf=%s, normalize=%s, min_df=%d, max_ngram=%d) ..." % (len(stopwords), options.apply_tfidf, options.apply_norm, options.min_df, options.max_ngram ) )
(X,terms) = text.util.preprocess( docs, stopwords=stopwords, min_df = options.min_df, apply_tfidf = options.apply_tfidf,
apply_norm = options.apply_norm, ngram_range = (1,options.max_ngram), lemmatizer=lemmatizer )
log.info( "Created %dx%d document-term matrix" % X.shape )
# Save the pre-processed documents
out_prefix = os.path.join( dir_out, dir_name )
text.util.save_corpus( out_prefix, X, terms, doc_ids )
# --------------------------------------------------------------
if __name__ == "__main__":
main()
```
#### File: dynamic-nmf/unsupervised/coherence.py
```python
class ModelSimilarity:
'''
Uses a model (e.g. Word2Vec model) to calculate the similarity between two terms.
'''
def __init__( self, model ):
self.model = model
def similarity( self, ranking_i, ranking_j ):
sim = 0.0
pairs = 0
for term_i in ranking_i:
for term_j in ranking_j:
try:
sim += self.model.similarity(term_i, term_j)
pairs += 1
except:
#print "Failed pair (%s,%s)" % (term_i,term_j)
pass
if pairs == 0:
return 0.0
return sim/pairs
# --------------------------------------------------------------
class WithinTopicMeasure:
'''
Measures within-topic coherence for a topic model, based on a set of term rankings.
'''
def __init__( self, metric ):
self.metric = metric
def evaluate_ranking( self, term_ranking ):
return self.metric.similarity( term_ranking, term_ranking )
def evaluate_rankings( self, term_rankings ):
scores = []
overall = 0.0
for topic_index in range(len(term_rankings)):
score = self.evaluate_ranking( term_rankings[topic_index] )
scores.append( score )
overall += score
overall /= len(term_rankings)
return overall
``` |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.